aboutsummaryrefslogtreecommitdiffstats
diff options
authorPalmer Dabbelt <palmer@rivosinc.com>2025-05-09 15:07:20 -0700
committerPalmer Dabbelt <palmer@rivosinc.com>2025-05-09 15:07:29 -0700
commit91c5822e487aa48a1893a864fe69c2d432244fa2 (patch)
tree04cf208f6ea3657cbc114bc284720d893f57c1bf
parent1a3f6980889df3fc90ad3e4a525061d2c138adba (diff)
parentee293f4771c97525eb9f2069685f8d3057025a88 (diff)
downloadlinux-for-next.tar.gz
Merge branch 'sbiv3' into for-nextfor-next
This is a staging merge, as this branch depends on the SBIv3 freeze and thus isn't officially ready yet. See https://lore.kernel.org/all/mhng-4c05b8a8-03ab-4e68-ae73-ed405f196e7c@palmer-ri-x1c9a/ * sbiv3: riscv: misaligned: add a function to check misalign trap delegability riscv: misaligned: move emulated access uniformity check in a function riscv: misaligned: use correct CONFIG_ ifdef for misaligned_access_speed riscv: misaligned: use on_each_cpu() for scalar misaligned access probing riscv: misaligned: request misaligned exception from SBI riscv: sbi: add SBI FWFT extension calls riscv: sbi: add FWFT extension interface riscv: sbi: add new SBI error mappings riscv: sbi: remove useless parenthesis riscv: sbi: add Firmware Feature (FWFT) SBI extensions definitions
-rw-r--r--arch/riscv/include/asm/cpufeature.h8
-rw-r--r--arch/riscv/include/asm/sbi.h60
-rw-r--r--arch/riscv/kernel/sbi.c81
-rw-r--r--arch/riscv/kernel/traps_misaligned.c110
-rw-r--r--arch/riscv/kernel/unaligned_access_speed.c8
5 files changed, 252 insertions, 15 deletions
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index f56b409361fbe0..3a87f612035ca9 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -67,16 +67,22 @@ void __init riscv_user_isa_enable(void);
_RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
bool __init check_unaligned_access_emulated_all_cpus(void);
+void unaligned_access_init(void);
+int cpu_online_unaligned_access_init(unsigned int cpu);
#if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
-void check_unaligned_access_emulated(struct work_struct *work __always_unused);
void unaligned_emulation_finish(void);
bool unaligned_ctl_available(void);
+bool misaligned_traps_can_delegate(void);
DECLARE_PER_CPU(long, misaligned_access_speed);
#else
static inline bool unaligned_ctl_available(void)
{
return false;
}
+static inline bool misaligned_traps_can_delegate(void)
+{
+ return false;
+}
#endif
bool __init check_vector_unaligned_access_emulated_all_cpus(void);
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 3d250824178bd5..3bbef56bcefcc9 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -35,6 +35,7 @@ enum sbi_ext_id {
SBI_EXT_DBCN = 0x4442434E,
SBI_EXT_STA = 0x535441,
SBI_EXT_NACL = 0x4E41434C,
+ SBI_EXT_FWFT = 0x46574654,
/* Experimentals extensions must lie within this range */
SBI_EXT_EXPERIMENTAL_START = 0x08000000,
@@ -402,6 +403,33 @@ enum sbi_ext_nacl_feature {
#define SBI_NACL_SHMEM_SRET_X(__i) ((__riscv_xlen / 8) * (__i))
#define SBI_NACL_SHMEM_SRET_X_LAST 31
+/* SBI function IDs for FW feature extension */
+#define SBI_EXT_FWFT_SET 0x0
+#define SBI_EXT_FWFT_GET 0x1
+
+enum sbi_fwft_feature_t {
+ SBI_FWFT_MISALIGNED_EXC_DELEG = 0x0,
+ SBI_FWFT_LANDING_PAD = 0x1,
+ SBI_FWFT_SHADOW_STACK = 0x2,
+ SBI_FWFT_DOUBLE_TRAP = 0x3,
+ SBI_FWFT_PTE_AD_HW_UPDATING = 0x4,
+ SBI_FWFT_POINTER_MASKING_PMLEN = 0x5,
+ SBI_FWFT_LOCAL_RESERVED_START = 0x6,
+ SBI_FWFT_LOCAL_RESERVED_END = 0x3fffffff,
+ SBI_FWFT_LOCAL_PLATFORM_START = 0x40000000,
+ SBI_FWFT_LOCAL_PLATFORM_END = 0x7fffffff,
+
+ SBI_FWFT_GLOBAL_RESERVED_START = 0x80000000,
+ SBI_FWFT_GLOBAL_RESERVED_END = 0xbfffffff,
+ SBI_FWFT_GLOBAL_PLATFORM_START = 0xc0000000,
+ SBI_FWFT_GLOBAL_PLATFORM_END = 0xffffffff,
+};
+
+#define SBI_FWFT_PLATFORM_FEATURE_BIT BIT(30)
+#define SBI_FWFT_GLOBAL_FEATURE_BIT BIT(31)
+
+#define SBI_FWFT_SET_FLAG_LOCK BIT(0)
+
/* SBI spec version fields */
#define SBI_SPEC_VERSION_DEFAULT 0x1
#define SBI_SPEC_VERSION_MAJOR_SHIFT 24
@@ -419,6 +447,11 @@ enum sbi_ext_nacl_feature {
#define SBI_ERR_ALREADY_STARTED -7
#define SBI_ERR_ALREADY_STOPPED -8
#define SBI_ERR_NO_SHMEM -9
+#define SBI_ERR_INVALID_STATE -10
+#define SBI_ERR_BAD_RANGE -11
+#define SBI_ERR_TIMEOUT -12
+#define SBI_ERR_IO -13
+#define SBI_ERR_DENIED_LOCKED -14
extern unsigned long sbi_spec_version;
struct sbiret {
@@ -470,6 +503,23 @@ int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
unsigned long asid);
long sbi_probe_extension(int ext);
+int sbi_fwft_set(u32 feature, unsigned long value, unsigned long flags);
+int sbi_fwft_set_cpumask(const cpumask_t *mask, u32 feature,
+ unsigned long value, unsigned long flags);
+/**
+ * sbi_fwft_set_online_cpus() - Set a feature on all online cpus
+ * @feature: The feature to be set
+ * @value: The feature value to be set
+ * @flags: FWFT feature set flags
+ *
+ * Return: 0 on success, appropriate linux error code otherwise.
+ */
+static inline int sbi_fwft_set_online_cpus(u32 feature, unsigned long value,
+ unsigned long flags)
+{
+ return sbi_fwft_set_cpumask(cpu_online_mask, feature, value, flags);
+}
+
/* Check if current SBI specification version is 0.1 or not */
static inline int sbi_spec_is_0_1(void)
{
@@ -503,11 +553,21 @@ static inline int sbi_err_map_linux_errno(int err)
case SBI_SUCCESS:
return 0;
case SBI_ERR_DENIED:
+ case SBI_ERR_DENIED_LOCKED:
return -EPERM;
case SBI_ERR_INVALID_PARAM:
+ case SBI_ERR_INVALID_STATE:
return -EINVAL;
+ case SBI_ERR_BAD_RANGE:
+ return -ERANGE;
case SBI_ERR_INVALID_ADDRESS:
return -EFAULT;
+ case SBI_ERR_NO_SHMEM:
+ return -ENOMEM;
+ case SBI_ERR_TIMEOUT:
+ return -ETIME;
+ case SBI_ERR_IO:
+ return -EIO;
case SBI_ERR_NOT_SUPPORTED:
case SBI_ERR_FAILURE:
default:
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index 1989b8cade1b98..070014ff35d49c 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -299,6 +299,76 @@ static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
return 0;
}
+static bool sbi_fwft_supported;
+
+/**
+ * sbi_fwft_set() - Set a feature on the local hart
+ * @feature: The feature ID to be set
+ * @value: The feature value to be set
+ * @flags: FWFT feature set flags
+ *
+ * Return: 0 on success, appropriate linux error code otherwise.
+ */
+int sbi_fwft_set(u32 feature, unsigned long value, unsigned long flags)
+{
+ struct sbiret ret;
+
+ if (!sbi_fwft_supported)
+ return -EOPNOTSUPP;
+
+ ret = sbi_ecall(SBI_EXT_FWFT, SBI_EXT_FWFT_SET,
+ feature, value, flags, 0, 0, 0);
+
+ return sbi_err_map_linux_errno(ret.error);
+}
+
+struct fwft_set_req {
+ u32 feature;
+ unsigned long value;
+ unsigned long flags;
+ atomic_t error;
+};
+
+static void cpu_sbi_fwft_set(void *arg)
+{
+ struct fwft_set_req *req = arg;
+ int ret;
+
+ ret = sbi_fwft_set(req->feature, req->value, req->flags);
+ if (ret)
+ atomic_set(&req->error, ret);
+}
+
+/**
+ * sbi_fwft_set_cpumask() - Set a feature for the specified cpumask
+ * @mask: CPU mask of cpus that need the feature to be set
+ * @feature: The feature ID to be set
+ * @value: The feature value to be set
+ * @flags: FWFT feature set flags
+ *
+ * Return: 0 on success, appropriate linux error code otherwise.
+ */
+int sbi_fwft_set_cpumask(const cpumask_t *mask, u32 feature,
+ unsigned long value, unsigned long flags)
+{
+ struct fwft_set_req req = {
+ .feature = feature,
+ .value = value,
+ .flags = flags,
+ .error = ATOMIC_INIT(0),
+ };
+
+ if (!sbi_fwft_supported)
+ return -EOPNOTSUPP;
+
+ if (feature & SBI_FWFT_GLOBAL_FEATURE_BIT)
+ return -EINVAL;
+
+ on_each_cpu_mask(mask, cpu_sbi_fwft_set, &req, 1);
+
+ return atomic_read(&req.error);
+}
+
/**
* sbi_set_timer() - Program the timer for next timer event.
* @stime_value: The value after which next timer event should fire.
@@ -609,7 +679,7 @@ void __init sbi_init(void)
} else {
__sbi_rfence = __sbi_rfence_v01;
}
- if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
+ if (sbi_spec_version >= sbi_mk_version(0, 3) &&
sbi_probe_extension(SBI_EXT_SRST)) {
pr_info("SBI SRST extension detected\n");
pm_power_off = sbi_srst_power_off;
@@ -617,11 +687,16 @@ void __init sbi_init(void)
sbi_srst_reboot_nb.priority = 192;
register_restart_handler(&sbi_srst_reboot_nb);
}
- if ((sbi_spec_version >= sbi_mk_version(2, 0)) &&
- (sbi_probe_extension(SBI_EXT_DBCN) > 0)) {
+ if (sbi_spec_version >= sbi_mk_version(2, 0) &&
+ sbi_probe_extension(SBI_EXT_DBCN) > 0) {
pr_info("SBI DBCN extension detected\n");
sbi_debug_console_available = true;
}
+ if (sbi_spec_version >= sbi_mk_version(3, 0) &&
+ sbi_probe_extension(SBI_EXT_FWFT)) {
+ pr_info("SBI FWFT extension detected\n");
+ sbi_fwft_supported = true;
+ }
} else {
__sbi_set_timer = __sbi_set_timer_v01;
__sbi_send_ipi = __sbi_send_ipi_v01;
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 7443632445d44f..1f2ba769833c9c 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -16,6 +16,7 @@
#include <asm/entry-common.h>
#include <asm/hwprobe.h>
#include <asm/cpufeature.h>
+#include <asm/sbi.h>
#include <asm/vector.h>
#define INSN_MATCH_LB 0x3
@@ -368,7 +369,7 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
-#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
+#ifdef CONFIG_RISCV_SCALAR_MISALIGNED
*this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
#endif
@@ -626,6 +627,10 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
{
int cpu;
+ /*
+ * While being documented as very slow, schedule_on_each_cpu() is used since
+ * kernel_vector_begin() expects irqs to be enabled or it will panic()
+ */
schedule_on_each_cpu(check_vector_unaligned_access_emulated);
for_each_online_cpu(cpu)
@@ -646,7 +651,7 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
static bool unaligned_ctl __read_mostly;
-void check_unaligned_access_emulated(struct work_struct *work __always_unused)
+static void check_unaligned_access_emulated(void *arg __always_unused)
{
int cpu = smp_processor_id();
long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
@@ -657,6 +662,13 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
__asm__ __volatile__ (
" "REG_L" %[tmp], 1(%[ptr])\n"
: [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
+}
+
+static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
+{
+ long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
+
+ check_unaligned_access_emulated(NULL);
/*
* If unaligned_ctl is already set, this means that we detected that all
@@ -665,26 +677,35 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
*/
if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) {
pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
- while (true)
- cpu_relax();
+ return -EINVAL;
}
+
+ return 0;
}
-bool __init check_unaligned_access_emulated_all_cpus(void)
+static bool all_cpus_unaligned_scalar_access_emulated(void)
{
int cpu;
+ for_each_online_cpu(cpu)
+ if (per_cpu(misaligned_access_speed, cpu) !=
+ RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
+ return false;
+
+ return true;
+}
+
+bool __init check_unaligned_access_emulated_all_cpus(void)
+{
/*
* We can only support PR_UNALIGN controls if all CPUs have misaligned
* accesses emulated since tasks requesting such control can run on any
* CPU.
*/
- schedule_on_each_cpu(check_unaligned_access_emulated);
+ on_each_cpu(check_unaligned_access_emulated, NULL, 1);
- for_each_online_cpu(cpu)
- if (per_cpu(misaligned_access_speed, cpu)
- != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
- return false;
+ if (!all_cpus_unaligned_scalar_access_emulated())
+ return false;
unaligned_ctl = true;
return true;
@@ -699,4 +720,73 @@ bool __init check_unaligned_access_emulated_all_cpus(void)
{
return false;
}
+static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
+{
+ return 0;
+}
+#endif
+
+static bool misaligned_traps_delegated;
+
+#ifdef CONFIG_RISCV_SBI
+
+static int cpu_online_sbi_unaligned_setup(unsigned int cpu)
+{
+ if (sbi_fwft_set(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0) &&
+ misaligned_traps_delegated) {
+ pr_crit("Misaligned trap delegation non homogeneous (expected delegated)");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void __init unaligned_access_init(void)
+{
+ int ret;
+
+ ret = sbi_fwft_set_online_cpus(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0);
+ if (ret)
+ return;
+
+ misaligned_traps_delegated = true;
+ pr_info("SBI misaligned access exception delegation ok\n");
+ /*
+ * Note that we don't have to take any specific action here, if
+ * the delegation is successful, then
+ * check_unaligned_access_emulated() will verify that indeed the
+ * platform traps on misaligned accesses.
+ */
+}
+#else
+void __init unaligned_access_init(void) {}
+
+static int cpu_online_sbi_unaligned_setup(unsigned int cpu __always_unused)
+{
+ return 0;
+}
+
#endif
+
+int cpu_online_unaligned_access_init(unsigned int cpu)
+{
+ int ret;
+
+ ret = cpu_online_sbi_unaligned_setup(cpu);
+ if (ret)
+ return ret;
+
+ return cpu_online_check_unaligned_access_emulated(cpu);
+}
+
+bool misaligned_traps_can_delegate(void)
+{
+ /*
+ * Either we successfully requested misaligned traps delegation for all
+ * CPUS or the SBI does not implemented FWFT extension but delegated the
+ * exception by default.
+ */
+ return misaligned_traps_delegated ||
+ all_cpus_unaligned_scalar_access_emulated();
+}
+EXPORT_SYMBOL_GPL(misaligned_traps_can_delegate);
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
index 585d2dcf2dab1c..49daa7b39bfaf3 100644
--- a/arch/riscv/kernel/unaligned_access_speed.c
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -141,6 +141,8 @@ static void __init check_unaligned_access_speed_all_cpus(void)
unsigned int cpu_count = num_possible_cpus();
struct page **bufs = kcalloc(cpu_count, sizeof(*bufs), GFP_KERNEL);
+ unaligned_access_init();
+
if (!bufs) {
pr_warn("Allocation failure, not measuring misaligned performance\n");
return;
@@ -236,6 +238,11 @@ arch_initcall_sync(lock_and_set_unaligned_access_static_branch);
static int riscv_online_cpu(unsigned int cpu)
{
+ int ret = cpu_online_unaligned_access_init(cpu);
+
+ if (ret)
+ return ret;
+
/* We are already set since the last check */
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) {
goto exit;
@@ -248,7 +255,6 @@ static int riscv_online_cpu(unsigned int cpu)
{
static struct page *buf;
- check_unaligned_access_emulated(NULL);
buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
if (!buf) {
pr_warn("Allocation failure, not measuring misaligned performance\n");