diff options
10 files changed, 1056 insertions, 0 deletions
diff --git a/queue-5.15/arm64-bpf-add-bhb-mitigation-to-the-epilogue-for-cbpf-programs.patch b/queue-5.15/arm64-bpf-add-bhb-mitigation-to-the-epilogue-for-cbpf-programs.patch new file mode 100644 index 0000000000..0a49e68354 --- /dev/null +++ b/queue-5.15/arm64-bpf-add-bhb-mitigation-to-the-epilogue-for-cbpf-programs.patch @@ -0,0 +1,155 @@ +From stable+bounces-151851-greg=kroah.com@vger.kernel.org Sat Jun 7 17:33:21 2025 +From: Pu Lehui <pulehui@huaweicloud.com> +Date: Sat, 7 Jun 2025 15:35:33 +0000 +Subject: arm64: bpf: Add BHB mitigation to the epilogue for cBPF programs +To: stable@vger.kernel.org +Cc: james.morse@arm.com, catalin.marinas@arm.com, daniel@iogearbox.net, ast@kernel.org, andrii@kernel.org, xukuohai@huawei.com, pulehui@huawei.com +Message-ID: <20250607153535.3613861-8-pulehui@huaweicloud.com> + +From: James Morse <james.morse@arm.com> + +[ Upstream commit 0dfefc2ea2f29ced2416017d7e5b1253a54c2735 ] + +A malicious BPF program may manipulate the branch history to influence +what the hardware speculates will happen next. + +On exit from a BPF program, emit the BHB mititgation sequence. + +This is only applied for 'classic' cBPF programs that are loaded by +seccomp. + +Signed-off-by: James Morse <james.morse@arm.com> +Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> +Acked-by: Daniel Borkmann <daniel@iogearbox.net> +Signed-off-by: Pu Lehui <pulehui@huawei.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/arm64/include/asm/spectre.h | 1 + arch/arm64/kernel/proton-pack.c | 2 - + arch/arm64/net/bpf_jit_comp.c | 55 ++++++++++++++++++++++++++++++++++++--- + 3 files changed, 53 insertions(+), 5 deletions(-) + +--- a/arch/arm64/include/asm/spectre.h ++++ b/arch/arm64/include/asm/spectre.h +@@ -97,6 +97,7 @@ enum mitigation_state arm64_get_meltdown + + enum mitigation_state arm64_get_spectre_bhb_state(void); + bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); ++extern bool __nospectre_bhb; + u8 get_spectre_bhb_loop_value(void); + bool is_spectre_bhb_fw_mitigated(void); + void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); +--- a/arch/arm64/kernel/proton-pack.c ++++ b/arch/arm64/kernel/proton-pack.c +@@ -1023,7 +1023,7 @@ static void this_cpu_set_vectors(enum ar + isb(); + } + +-static bool __read_mostly __nospectre_bhb; ++bool __read_mostly __nospectre_bhb; + static int __init parse_spectre_bhb_param(char *str) + { + __nospectre_bhb = true; +--- a/arch/arm64/net/bpf_jit_comp.c ++++ b/arch/arm64/net/bpf_jit_comp.c +@@ -7,14 +7,17 @@ + + #define pr_fmt(fmt) "bpf_jit: " fmt + ++#include <linux/arm-smccc.h> + #include <linux/bitfield.h> + #include <linux/bpf.h> ++#include <linux/cpu.h> + #include <linux/filter.h> + #include <linux/printk.h> + #include <linux/slab.h> + + #include <asm/byteorder.h> + #include <asm/cacheflush.h> ++#include <asm/cpufeature.h> + #include <asm/debug-monitors.h> + #include <asm/insn.h> + #include <asm/set_memory.h> +@@ -327,7 +330,48 @@ static int emit_bpf_tail_call(struct jit + #undef jmp_offset + } + +-static void build_epilogue(struct jit_ctx *ctx) ++/* Clobbers BPF registers 1-4, aka x0-x3 */ ++static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx) ++{ ++ const u8 r1 = bpf2a64[BPF_REG_1]; /* aka x0 */ ++ u8 k = get_spectre_bhb_loop_value(); ++ ++ if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) || ++ cpu_mitigations_off() || __nospectre_bhb || ++ arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) ++ return; ++ ++ if (supports_clearbhb(SCOPE_SYSTEM)) { ++ emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx); ++ return; ++ } ++ ++ if (k) { ++ emit_a64_mov_i64(r1, k, ctx); ++ emit(A64_B(1), ctx); ++ emit(A64_SUBS_I(true, r1, r1, 1), ctx); ++ emit(A64_B_(A64_COND_NE, -2), ctx); ++ emit(aarch64_insn_gen_dsb(AARCH64_INSN_MB_ISH), ctx); ++ emit(aarch64_insn_get_isb_value(), ctx); ++ } ++ ++ if (is_spectre_bhb_fw_mitigated()) { ++ emit(A64_ORR_I(false, r1, AARCH64_INSN_REG_ZR, ++ ARM_SMCCC_ARCH_WORKAROUND_3), ctx); ++ switch (arm_smccc_1_1_get_conduit()) { ++ case SMCCC_CONDUIT_HVC: ++ emit(aarch64_insn_get_hvc_value(), ctx); ++ break; ++ case SMCCC_CONDUIT_SMC: ++ emit(aarch64_insn_get_smc_value(), ctx); ++ break; ++ default: ++ pr_err_once("Firmware mitigation enabled with unknown conduit\n"); ++ } ++ } ++} ++ ++static void build_epilogue(struct jit_ctx *ctx, bool was_classic) + { + const u8 r0 = bpf2a64[BPF_REG_0]; + const u8 r6 = bpf2a64[BPF_REG_6]; +@@ -346,10 +390,13 @@ static void build_epilogue(struct jit_ct + emit(A64_POP(r8, r9, A64_SP), ctx); + emit(A64_POP(r6, r7, A64_SP), ctx); + ++ if (was_classic) ++ build_bhb_mitigation(ctx); ++ + /* Restore FP/LR registers */ + emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); + +- /* Set return value */ ++ /* Move the return value from bpf:r0 (aka x7) to x0 */ + emit(A64_MOV(1, A64_R(0), r0), ctx); + + emit(A64_RET(A64_LR), ctx); +@@ -1062,7 +1109,7 @@ struct bpf_prog *bpf_int_jit_compile(str + } + + ctx.epilogue_offset = ctx.idx; +- build_epilogue(&ctx); ++ build_epilogue(&ctx, was_classic); + + extable_size = prog->aux->num_exentries * + sizeof(struct exception_table_entry); +@@ -1094,7 +1141,7 @@ skip_init_ctx: + goto out_off; + } + +- build_epilogue(&ctx); ++ build_epilogue(&ctx, was_classic); + + /* 3. Extra pass to validate JITed code. */ + if (validate_code(&ctx)) { diff --git a/queue-5.15/arm64-bpf-only-mitigate-cbpf-programs-loaded-by-unprivileged-users.patch b/queue-5.15/arm64-bpf-only-mitigate-cbpf-programs-loaded-by-unprivileged-users.patch new file mode 100644 index 0000000000..bf3ddc0301 --- /dev/null +++ b/queue-5.15/arm64-bpf-only-mitigate-cbpf-programs-loaded-by-unprivileged-users.patch @@ -0,0 +1,40 @@ +From stable+bounces-151852-greg=kroah.com@vger.kernel.org Sat Jun 7 17:33:22 2025 +From: Pu Lehui <pulehui@huaweicloud.com> +Date: Sat, 7 Jun 2025 15:35:34 +0000 +Subject: arm64: bpf: Only mitigate cBPF programs loaded by unprivileged users +To: stable@vger.kernel.org +Cc: james.morse@arm.com, catalin.marinas@arm.com, daniel@iogearbox.net, ast@kernel.org, andrii@kernel.org, xukuohai@huawei.com, pulehui@huawei.com +Message-ID: <20250607153535.3613861-9-pulehui@huaweicloud.com> + +From: James Morse <james.morse@arm.com> + +[ Upstream commit f300769ead032513a68e4a02e806393402e626f8 ] + +Support for eBPF programs loaded by unprivileged users is typically +disabled. This means only cBPF programs need to be mitigated for BHB. + +In addition, only mitigate cBPF programs that were loaded by an +unprivileged user. Privileged users can also load the same program +via eBPF, making the mitigation pointless. + +Signed-off-by: James Morse <james.morse@arm.com> +Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> +Acked-by: Daniel Borkmann <daniel@iogearbox.net> +Signed-off-by: Pu Lehui <pulehui@huawei.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/arm64/net/bpf_jit_comp.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/arch/arm64/net/bpf_jit_comp.c ++++ b/arch/arm64/net/bpf_jit_comp.c +@@ -341,6 +341,9 @@ static void __maybe_unused build_bhb_mit + arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) + return; + ++ if (capable(CAP_SYS_ADMIN)) ++ return; ++ + if (supports_clearbhb(SCOPE_SYSTEM)) { + emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx); + return; diff --git a/queue-5.15/arm64-insn-add-encoders-for-atomic-operations.patch b/queue-5.15/arm64-insn-add-encoders-for-atomic-operations.patch new file mode 100644 index 0000000000..106ede91e6 --- /dev/null +++ b/queue-5.15/arm64-insn-add-encoders-for-atomic-operations.patch @@ -0,0 +1,404 @@ +From stable+bounces-151855-greg=kroah.com@vger.kernel.org Sat Jun 7 17:33:25 2025 +From: Pu Lehui <pulehui@huaweicloud.com> +Date: Sat, 7 Jun 2025 15:35:28 +0000 +Subject: arm64: insn: add encoders for atomic operations +To: stable@vger.kernel.org +Cc: james.morse@arm.com, catalin.marinas@arm.com, daniel@iogearbox.net, ast@kernel.org, andrii@kernel.org, xukuohai@huawei.com, pulehui@huawei.com +Message-ID: <20250607153535.3613861-3-pulehui@huaweicloud.com> + +From: Hou Tao <houtao1@huawei.com> + +[ Upstream commit fa1114d9eba5087ba5e81aab4c56f546995e6cd3 ] + +It is a preparation patch for eBPF atomic supports under arm64. eBPF +needs support atomic[64]_fetch_add, atomic[64]_[fetch_]{and,or,xor} and +atomic[64]_{xchg|cmpxchg}. The ordering semantics of eBPF atomics are +the same with the implementations in linux kernel. + +Add three helpers to support LDCLR/LDEOR/LDSET/SWP, CAS and DMB +instructions. STADD/STCLR/STEOR/STSET are simply encoded as aliases for +LDADD/LDCLR/LDEOR/LDSET with XZR as the destination register, so no extra +helper is added. atomic_fetch_add() and other atomic ops needs support for +STLXR instruction, so extend enum aarch64_insn_ldst_type to do that. + +LDADD/LDEOR/LDSET/SWP and CAS instructions are only available when LSE +atomics is enabled, so just return AARCH64_BREAK_FAULT directly in +these newly-added helpers if CONFIG_ARM64_LSE_ATOMICS is disabled. + +Signed-off-by: Hou Tao <houtao1@huawei.com> +Link: https://lore.kernel.org/r/20220217072232.1186625-3-houtao1@huawei.com +Signed-off-by: Will Deacon <will@kernel.org> +Signed-off-by: Pu Lehui <pulehui@huawei.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/arm64/include/asm/insn.h | 80 ++++++++++++++++-- + arch/arm64/lib/insn.c | 185 ++++++++++++++++++++++++++++++++++++++---- + arch/arm64/net/bpf_jit.h | 11 ++ + 3 files changed, 253 insertions(+), 23 deletions(-) + +--- a/arch/arm64/include/asm/insn.h ++++ b/arch/arm64/include/asm/insn.h +@@ -206,7 +206,9 @@ enum aarch64_insn_ldst_type { + AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX, + AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX, + AARCH64_INSN_LDST_LOAD_EX, ++ AARCH64_INSN_LDST_LOAD_ACQ_EX, + AARCH64_INSN_LDST_STORE_EX, ++ AARCH64_INSN_LDST_STORE_REL_EX, + }; + + enum aarch64_insn_adsb_type { +@@ -281,6 +283,36 @@ enum aarch64_insn_adr_type { + AARCH64_INSN_ADR_TYPE_ADR, + }; + ++enum aarch64_insn_mem_atomic_op { ++ AARCH64_INSN_MEM_ATOMIC_ADD, ++ AARCH64_INSN_MEM_ATOMIC_CLR, ++ AARCH64_INSN_MEM_ATOMIC_EOR, ++ AARCH64_INSN_MEM_ATOMIC_SET, ++ AARCH64_INSN_MEM_ATOMIC_SWP, ++}; ++ ++enum aarch64_insn_mem_order_type { ++ AARCH64_INSN_MEM_ORDER_NONE, ++ AARCH64_INSN_MEM_ORDER_ACQ, ++ AARCH64_INSN_MEM_ORDER_REL, ++ AARCH64_INSN_MEM_ORDER_ACQREL, ++}; ++ ++enum aarch64_insn_mb_type { ++ AARCH64_INSN_MB_SY, ++ AARCH64_INSN_MB_ST, ++ AARCH64_INSN_MB_LD, ++ AARCH64_INSN_MB_ISH, ++ AARCH64_INSN_MB_ISHST, ++ AARCH64_INSN_MB_ISHLD, ++ AARCH64_INSN_MB_NSH, ++ AARCH64_INSN_MB_NSHST, ++ AARCH64_INSN_MB_NSHLD, ++ AARCH64_INSN_MB_OSH, ++ AARCH64_INSN_MB_OSHST, ++ AARCH64_INSN_MB_OSHLD, ++}; ++ + #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ + static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ + { \ +@@ -304,6 +336,11 @@ __AARCH64_INSN_FUNCS(store_post, 0x3FE00 + __AARCH64_INSN_FUNCS(load_post, 0x3FE00C00, 0x38400400) + __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) + __AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000) ++__AARCH64_INSN_FUNCS(ldclr, 0x3F20FC00, 0x38201000) ++__AARCH64_INSN_FUNCS(ldeor, 0x3F20FC00, 0x38202000) ++__AARCH64_INSN_FUNCS(ldset, 0x3F20FC00, 0x38203000) ++__AARCH64_INSN_FUNCS(swp, 0x3F20FC00, 0x38208000) ++__AARCH64_INSN_FUNCS(cas, 0x3FA07C00, 0x08A07C00) + __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) + __AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000) + __AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000) +@@ -475,13 +512,6 @@ u32 aarch64_insn_gen_load_store_ex(enum + enum aarch64_insn_register state, + enum aarch64_insn_size_type size, + enum aarch64_insn_ldst_type type); +-u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result, +- enum aarch64_insn_register address, +- enum aarch64_insn_register value, +- enum aarch64_insn_size_type size); +-u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address, +- enum aarch64_insn_register value, +- enum aarch64_insn_size_type size); + u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + int imm, enum aarch64_insn_variant variant, +@@ -542,6 +572,42 @@ u32 aarch64_insn_gen_prefetch(enum aarch + enum aarch64_insn_prfm_type type, + enum aarch64_insn_prfm_target target, + enum aarch64_insn_prfm_policy policy); ++#ifdef CONFIG_ARM64_LSE_ATOMICS ++u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result, ++ enum aarch64_insn_register address, ++ enum aarch64_insn_register value, ++ enum aarch64_insn_size_type size, ++ enum aarch64_insn_mem_atomic_op op, ++ enum aarch64_insn_mem_order_type order); ++u32 aarch64_insn_gen_cas(enum aarch64_insn_register result, ++ enum aarch64_insn_register address, ++ enum aarch64_insn_register value, ++ enum aarch64_insn_size_type size, ++ enum aarch64_insn_mem_order_type order); ++#else ++static inline ++u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result, ++ enum aarch64_insn_register address, ++ enum aarch64_insn_register value, ++ enum aarch64_insn_size_type size, ++ enum aarch64_insn_mem_atomic_op op, ++ enum aarch64_insn_mem_order_type order) ++{ ++ return AARCH64_BREAK_FAULT; ++} ++ ++static inline ++u32 aarch64_insn_gen_cas(enum aarch64_insn_register result, ++ enum aarch64_insn_register address, ++ enum aarch64_insn_register value, ++ enum aarch64_insn_size_type size, ++ enum aarch64_insn_mem_order_type order) ++{ ++ return AARCH64_BREAK_FAULT; ++} ++#endif ++u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type); ++ + s32 aarch64_get_branch_offset(u32 insn); + u32 aarch64_set_branch_offset(u32 insn, s32 offset); + +--- a/arch/arm64/lib/insn.c ++++ b/arch/arm64/lib/insn.c +@@ -578,10 +578,16 @@ u32 aarch64_insn_gen_load_store_ex(enum + + switch (type) { + case AARCH64_INSN_LDST_LOAD_EX: ++ case AARCH64_INSN_LDST_LOAD_ACQ_EX: + insn = aarch64_insn_get_load_ex_value(); ++ if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX) ++ insn |= BIT(15); + break; + case AARCH64_INSN_LDST_STORE_EX: ++ case AARCH64_INSN_LDST_STORE_REL_EX: + insn = aarch64_insn_get_store_ex_value(); ++ if (type == AARCH64_INSN_LDST_STORE_REL_EX) ++ insn |= BIT(15); + break; + default: + pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type); +@@ -603,12 +609,65 @@ u32 aarch64_insn_gen_load_store_ex(enum + state); + } + +-u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result, +- enum aarch64_insn_register address, +- enum aarch64_insn_register value, +- enum aarch64_insn_size_type size) ++#ifdef CONFIG_ARM64_LSE_ATOMICS ++static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type, ++ u32 insn) + { +- u32 insn = aarch64_insn_get_ldadd_value(); ++ u32 order; ++ ++ switch (type) { ++ case AARCH64_INSN_MEM_ORDER_NONE: ++ order = 0; ++ break; ++ case AARCH64_INSN_MEM_ORDER_ACQ: ++ order = 2; ++ break; ++ case AARCH64_INSN_MEM_ORDER_REL: ++ order = 1; ++ break; ++ case AARCH64_INSN_MEM_ORDER_ACQREL: ++ order = 3; ++ break; ++ default: ++ pr_err("%s: unknown mem order %d\n", __func__, type); ++ return AARCH64_BREAK_FAULT; ++ } ++ ++ insn &= ~GENMASK(23, 22); ++ insn |= order << 22; ++ ++ return insn; ++} ++ ++u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result, ++ enum aarch64_insn_register address, ++ enum aarch64_insn_register value, ++ enum aarch64_insn_size_type size, ++ enum aarch64_insn_mem_atomic_op op, ++ enum aarch64_insn_mem_order_type order) ++{ ++ u32 insn; ++ ++ switch (op) { ++ case AARCH64_INSN_MEM_ATOMIC_ADD: ++ insn = aarch64_insn_get_ldadd_value(); ++ break; ++ case AARCH64_INSN_MEM_ATOMIC_CLR: ++ insn = aarch64_insn_get_ldclr_value(); ++ break; ++ case AARCH64_INSN_MEM_ATOMIC_EOR: ++ insn = aarch64_insn_get_ldeor_value(); ++ break; ++ case AARCH64_INSN_MEM_ATOMIC_SET: ++ insn = aarch64_insn_get_ldset_value(); ++ break; ++ case AARCH64_INSN_MEM_ATOMIC_SWP: ++ insn = aarch64_insn_get_swp_value(); ++ break; ++ default: ++ pr_err("%s: unimplemented mem atomic op %d\n", __func__, op); ++ return AARCH64_BREAK_FAULT; ++ } + + switch (size) { + case AARCH64_INSN_SIZE_32: +@@ -621,6 +680,8 @@ u32 aarch64_insn_gen_ldadd(enum aarch64_ + + insn = aarch64_insn_encode_ldst_size(size, insn); + ++ insn = aarch64_insn_encode_ldst_order(order, insn); ++ + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, + result); + +@@ -631,18 +692,69 @@ u32 aarch64_insn_gen_ldadd(enum aarch64_ + value); + } + +-u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address, +- enum aarch64_insn_register value, +- enum aarch64_insn_size_type size) ++static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type, ++ u32 insn) + { +- /* +- * STADD is simply encoded as an alias for LDADD with XZR as +- * the destination register. +- */ +- return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address, +- value, size); ++ u32 order; ++ ++ switch (type) { ++ case AARCH64_INSN_MEM_ORDER_NONE: ++ order = 0; ++ break; ++ case AARCH64_INSN_MEM_ORDER_ACQ: ++ order = BIT(22); ++ break; ++ case AARCH64_INSN_MEM_ORDER_REL: ++ order = BIT(15); ++ break; ++ case AARCH64_INSN_MEM_ORDER_ACQREL: ++ order = BIT(15) | BIT(22); ++ break; ++ default: ++ pr_err("%s: unknown mem order %d\n", __func__, type); ++ return AARCH64_BREAK_FAULT; ++ } ++ ++ insn &= ~(BIT(15) | BIT(22)); ++ insn |= order; ++ ++ return insn; + } + ++u32 aarch64_insn_gen_cas(enum aarch64_insn_register result, ++ enum aarch64_insn_register address, ++ enum aarch64_insn_register value, ++ enum aarch64_insn_size_type size, ++ enum aarch64_insn_mem_order_type order) ++{ ++ u32 insn; ++ ++ switch (size) { ++ case AARCH64_INSN_SIZE_32: ++ case AARCH64_INSN_SIZE_64: ++ break; ++ default: ++ pr_err("%s: unimplemented size encoding %d\n", __func__, size); ++ return AARCH64_BREAK_FAULT; ++ } ++ ++ insn = aarch64_insn_get_cas_value(); ++ ++ insn = aarch64_insn_encode_ldst_size(size, insn); ++ ++ insn = aarch64_insn_encode_cas_order(order, insn); ++ ++ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, ++ result); ++ ++ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, ++ address); ++ ++ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn, ++ value); ++} ++#endif ++ + static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type, + enum aarch64_insn_prfm_target target, + enum aarch64_insn_prfm_policy policy, +@@ -1456,3 +1568,48 @@ u32 aarch64_insn_gen_extr(enum aarch64_i + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn); + return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm); + } ++ ++u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type) ++{ ++ u32 opt; ++ u32 insn; ++ ++ switch (type) { ++ case AARCH64_INSN_MB_SY: ++ opt = 0xf; ++ break; ++ case AARCH64_INSN_MB_ST: ++ opt = 0xe; ++ break; ++ case AARCH64_INSN_MB_LD: ++ opt = 0xd; ++ break; ++ case AARCH64_INSN_MB_ISH: ++ opt = 0xb; ++ break; ++ case AARCH64_INSN_MB_ISHST: ++ opt = 0xa; ++ break; ++ case AARCH64_INSN_MB_ISHLD: ++ opt = 0x9; ++ break; ++ case AARCH64_INSN_MB_NSH: ++ opt = 0x7; ++ break; ++ case AARCH64_INSN_MB_NSHST: ++ opt = 0x6; ++ break; ++ case AARCH64_INSN_MB_NSHLD: ++ opt = 0x5; ++ break; ++ default: ++ pr_err("%s: unknown dmb type %d\n", __func__, type); ++ return AARCH64_BREAK_FAULT; ++ } ++ ++ insn = aarch64_insn_get_dmb_value(); ++ insn &= ~GENMASK(11, 8); ++ insn |= (opt << 8); ++ ++ return insn; ++} +--- a/arch/arm64/net/bpf_jit.h ++++ b/arch/arm64/net/bpf_jit.h +@@ -89,9 +89,16 @@ + #define A64_STXR(sf, Rt, Rn, Rs) \ + A64_LSX(sf, Rt, Rn, Rs, STORE_EX) + +-/* LSE atomics */ ++/* ++ * LSE atomics ++ * ++ * STADD is simply encoded as an alias for LDADD with XZR as ++ * the destination register. ++ */ + #define A64_STADD(sf, Rn, Rs) \ +- aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf)) ++ aarch64_insn_gen_atomic_ld_op(A64_ZR, Rn, Rs, \ ++ A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_ADD, \ ++ AARCH64_INSN_MEM_ORDER_NONE) + + /* Add/subtract (immediate) */ + #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \ diff --git a/queue-5.15/arm64-insn-add-support-for-encoding-dsb.patch b/queue-5.15/arm64-insn-add-support-for-encoding-dsb.patch new file mode 100644 index 0000000000..8ddd4e6bd0 --- /dev/null +++ b/queue-5.15/arm64-insn-add-support-for-encoding-dsb.patch @@ -0,0 +1,130 @@ +From stable+bounces-151856-greg=kroah.com@vger.kernel.org Sat Jun 7 17:33:28 2025 +From: Pu Lehui <pulehui@huaweicloud.com> +Date: Sat, 7 Jun 2025 15:35:29 +0000 +Subject: arm64: insn: Add support for encoding DSB +To: stable@vger.kernel.org +Cc: james.morse@arm.com, catalin.marinas@arm.com, daniel@iogearbox.net, ast@kernel.org, andrii@kernel.org, xukuohai@huawei.com, pulehui@huawei.com +Message-ID: <20250607153535.3613861-4-pulehui@huaweicloud.com> + +From: James Morse <james.morse@arm.com> + +[ Upstream commit 63de8abd97ddb9b758bd8f915ecbd18e1f1a87a0 ] + +To generate code in the eBPF epilogue that uses the DSB instruction, +insn.c needs a heler to encode the type and domain. + +Re-use the crm encoding logic from the DMB instruction. + +Signed-off-by: James Morse <james.morse@arm.com> +Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> +Signed-off-by: Pu Lehui <pulehui@huawei.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/arm64/include/asm/insn.h | 1 + arch/arm64/lib/insn.c | 60 +++++++++++++++++++++++++----------------- + 2 files changed, 38 insertions(+), 23 deletions(-) + +--- a/arch/arm64/include/asm/insn.h ++++ b/arch/arm64/include/asm/insn.h +@@ -607,6 +607,7 @@ u32 aarch64_insn_gen_cas(enum aarch64_in + } + #endif + u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type); ++u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type); + + s32 aarch64_get_branch_offset(u32 insn); + u32 aarch64_set_branch_offset(u32 insn, s32 offset); +--- a/arch/arm64/lib/insn.c ++++ b/arch/arm64/lib/insn.c +@@ -5,6 +5,7 @@ + * + * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com> + */ ++#include <linux/bitfield.h> + #include <linux/bitops.h> + #include <linux/bug.h> + #include <linux/printk.h> +@@ -1569,47 +1570,60 @@ u32 aarch64_insn_gen_extr(enum aarch64_i + return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm); + } + +-u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type) ++static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type) + { +- u32 opt; +- u32 insn; +- + switch (type) { + case AARCH64_INSN_MB_SY: +- opt = 0xf; +- break; ++ return 0xf; + case AARCH64_INSN_MB_ST: +- opt = 0xe; +- break; ++ return 0xe; + case AARCH64_INSN_MB_LD: +- opt = 0xd; +- break; ++ return 0xd; + case AARCH64_INSN_MB_ISH: +- opt = 0xb; +- break; ++ return 0xb; + case AARCH64_INSN_MB_ISHST: +- opt = 0xa; +- break; ++ return 0xa; + case AARCH64_INSN_MB_ISHLD: +- opt = 0x9; +- break; ++ return 0x9; + case AARCH64_INSN_MB_NSH: +- opt = 0x7; +- break; ++ return 0x7; + case AARCH64_INSN_MB_NSHST: +- opt = 0x6; +- break; ++ return 0x6; + case AARCH64_INSN_MB_NSHLD: +- opt = 0x5; +- break; ++ return 0x5; + default: +- pr_err("%s: unknown dmb type %d\n", __func__, type); ++ pr_err("%s: unknown barrier type %d\n", __func__, type); + return AARCH64_BREAK_FAULT; + } ++} ++ ++u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type) ++{ ++ u32 opt; ++ u32 insn; ++ ++ opt = __get_barrier_crm_val(type); ++ if (opt == AARCH64_BREAK_FAULT) ++ return AARCH64_BREAK_FAULT; + + insn = aarch64_insn_get_dmb_value(); + insn &= ~GENMASK(11, 8); + insn |= (opt << 8); + ++ return insn; ++} ++ ++u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type) ++{ ++ u32 opt, insn; ++ ++ opt = __get_barrier_crm_val(type); ++ if (opt == AARCH64_BREAK_FAULT) ++ return AARCH64_BREAK_FAULT; ++ ++ insn = aarch64_insn_get_dsb_base_value(); ++ insn &= ~GENMASK(11, 8); ++ insn |= (opt << 8); ++ + return insn; + } diff --git a/queue-5.15/arm64-move-aarch64_break_fault-into-insn-def.h.patch b/queue-5.15/arm64-move-aarch64_break_fault-into-insn-def.h.patch new file mode 100644 index 0000000000..7dc3ccf2d5 --- /dev/null +++ b/queue-5.15/arm64-move-aarch64_break_fault-into-insn-def.h.patch @@ -0,0 +1,76 @@ +From stable+bounces-151848-greg=kroah.com@vger.kernel.org Sat Jun 7 17:33:16 2025 +From: Pu Lehui <pulehui@huaweicloud.com> +Date: Sat, 7 Jun 2025 15:35:27 +0000 +Subject: arm64: move AARCH64_BREAK_FAULT into insn-def.h +To: stable@vger.kernel.org +Cc: james.morse@arm.com, catalin.marinas@arm.com, daniel@iogearbox.net, ast@kernel.org, andrii@kernel.org, xukuohai@huawei.com, pulehui@huawei.com +Message-ID: <20250607153535.3613861-2-pulehui@huaweicloud.com> + +From: Hou Tao <houtao1@huawei.com> + +[ Upstream commit 97e58e395e9c074fd096dad13c54e9f4112cf71d ] + +If CONFIG_ARM64_LSE_ATOMICS is off, encoders for LSE-related instructions +can return AARCH64_BREAK_FAULT directly in insn.h. In order to access +AARCH64_BREAK_FAULT in insn.h, we can not include debug-monitors.h in +insn.h, because debug-monitors.h has already depends on insn.h, so just +move AARCH64_BREAK_FAULT into insn-def.h. + +It will be used by the following patch to eliminate unnecessary LSE-related +encoders when CONFIG_ARM64_LSE_ATOMICS is off. + +Signed-off-by: Hou Tao <houtao1@huawei.com> +Link: https://lore.kernel.org/r/20220217072232.1186625-2-houtao1@huawei.com +Signed-off-by: Will Deacon <will@kernel.org> +Signed-off-by: Pu Lehui <pulehui@huawei.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/arm64/include/asm/debug-monitors.h | 12 ------------ + arch/arm64/include/asm/insn-def.h | 14 ++++++++++++++ + 2 files changed, 14 insertions(+), 12 deletions(-) + +--- a/arch/arm64/include/asm/debug-monitors.h ++++ b/arch/arm64/include/asm/debug-monitors.h +@@ -34,18 +34,6 @@ + */ + #define BREAK_INSTR_SIZE AARCH64_INSN_SIZE + +-/* +- * BRK instruction encoding +- * The #imm16 value should be placed at bits[20:5] within BRK ins +- */ +-#define AARCH64_BREAK_MON 0xd4200000 +- +-/* +- * BRK instruction for provoking a fault on purpose +- * Unlike kgdb, #imm16 value with unallocated handler is used for faulting. +- */ +-#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5)) +- + #define AARCH64_BREAK_KGDB_DYN_DBG \ + (AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5)) + +--- a/arch/arm64/include/asm/insn-def.h ++++ b/arch/arm64/include/asm/insn-def.h +@@ -3,7 +3,21 @@ + #ifndef __ASM_INSN_DEF_H + #define __ASM_INSN_DEF_H + ++#include <asm/brk-imm.h> ++ + /* A64 instructions are always 32 bits. */ + #define AARCH64_INSN_SIZE 4 + ++/* ++ * BRK instruction encoding ++ * The #imm16 value should be placed at bits[20:5] within BRK ins ++ */ ++#define AARCH64_BREAK_MON 0xd4200000 ++ ++/* ++ * BRK instruction for provoking a fault on purpose ++ * Unlike kgdb, #imm16 value with unallocated handler is used for faulting. ++ */ ++#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5)) ++ + #endif /* __ASM_INSN_DEF_H */ diff --git a/queue-5.15/arm64-proton-pack-add-new-cpus-k-values-for-branch-mitigation.patch b/queue-5.15/arm64-proton-pack-add-new-cpus-k-values-for-branch-mitigation.patch new file mode 100644 index 0000000000..c92149106a --- /dev/null +++ b/queue-5.15/arm64-proton-pack-add-new-cpus-k-values-for-branch-mitigation.patch @@ -0,0 +1,56 @@ +From stable+bounces-151857-greg=kroah.com@vger.kernel.org Sat Jun 7 17:33:29 2025 +From: Pu Lehui <pulehui@huaweicloud.com> +Date: Sat, 7 Jun 2025 15:35:35 +0000 +Subject: arm64: proton-pack: Add new CPUs 'k' values for branch mitigation +To: stable@vger.kernel.org +Cc: james.morse@arm.com, catalin.marinas@arm.com, daniel@iogearbox.net, ast@kernel.org, andrii@kernel.org, xukuohai@huawei.com, pulehui@huawei.com +Message-ID: <20250607153535.3613861-10-pulehui@huaweicloud.com> + +From: James Morse <james.morse@arm.com> + +[ Upstream commit efe676a1a7554219eae0b0dcfe1e0cdcc9ef9aef ] + +Update the list of 'k' values for the branch mitigation from arm's +website. + +Add the values for Cortex-X1C. The MIDR_EL1 value can be found here: +https://developer.arm.com/documentation/101968/0002/Register-descriptions/AArch> + +Link: https://developer.arm.com/documentation/110280/2-0/?lang=en +Signed-off-by: James Morse <james.morse@arm.com> +Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> +Signed-off-by: Pu Lehui <pulehui@huawei.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/arm64/include/asm/cputype.h | 2 ++ + arch/arm64/kernel/proton-pack.c | 1 + + 2 files changed, 3 insertions(+) + +--- a/arch/arm64/include/asm/cputype.h ++++ b/arch/arm64/include/asm/cputype.h +@@ -81,6 +81,7 @@ + #define ARM_CPU_PART_CORTEX_A78AE 0xD42 + #define ARM_CPU_PART_CORTEX_X1 0xD44 + #define ARM_CPU_PART_CORTEX_A510 0xD46 ++#define ARM_CPU_PART_CORTEX_X1C 0xD4C + #define ARM_CPU_PART_CORTEX_A520 0xD80 + #define ARM_CPU_PART_CORTEX_A710 0xD47 + #define ARM_CPU_PART_CORTEX_A715 0xD4D +@@ -147,6 +148,7 @@ + #define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE) + #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) + #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) ++#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C) + #define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520) + #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) + #define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715) +--- a/arch/arm64/kernel/proton-pack.c ++++ b/arch/arm64/kernel/proton-pack.c +@@ -891,6 +891,7 @@ static u8 spectre_bhb_loop_affected(void + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), diff --git a/queue-5.15/arm64-proton-pack-expose-whether-the-branchy-loop-k-value.patch b/queue-5.15/arm64-proton-pack-expose-whether-the-branchy-loop-k-value.patch new file mode 100644 index 0000000000..8030a9ac43 --- /dev/null +++ b/queue-5.15/arm64-proton-pack-expose-whether-the-branchy-loop-k-value.patch @@ -0,0 +1,48 @@ +From stable+bounces-151854-greg=kroah.com@vger.kernel.org Sat Jun 7 17:33:23 2025 +From: Pu Lehui <pulehui@huaweicloud.com> +Date: Sat, 7 Jun 2025 15:35:31 +0000 +Subject: arm64: proton-pack: Expose whether the branchy loop k value +To: stable@vger.kernel.org +Cc: james.morse@arm.com, catalin.marinas@arm.com, daniel@iogearbox.net, ast@kernel.org, andrii@kernel.org, xukuohai@huawei.com, pulehui@huawei.com +Message-ID: <20250607153535.3613861-6-pulehui@huaweicloud.com> + +From: James Morse <james.morse@arm.com> + +[ Upstream commit a1152be30a043d2d4dcb1683415f328bf3c51978 ] + +Add a helper to expose the k value of the branchy loop. This is needed +by the BPF JIT to generate the mitigation sequence in BPF programs. + +Signed-off-by: James Morse <james.morse@arm.com> +Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> +Signed-off-by: Pu Lehui <pulehui@huawei.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/arm64/include/asm/spectre.h | 1 + + arch/arm64/kernel/proton-pack.c | 5 +++++ + 2 files changed, 6 insertions(+) + +--- a/arch/arm64/include/asm/spectre.h ++++ b/arch/arm64/include/asm/spectre.h +@@ -97,6 +97,7 @@ enum mitigation_state arm64_get_meltdown + + enum mitigation_state arm64_get_spectre_bhb_state(void); + bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); ++u8 get_spectre_bhb_loop_value(void); + bool is_spectre_bhb_fw_mitigated(void); + void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); + bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr); +--- a/arch/arm64/kernel/proton-pack.c ++++ b/arch/arm64/kernel/proton-pack.c +@@ -998,6 +998,11 @@ bool is_spectre_bhb_affected(const struc + return true; + } + ++u8 get_spectre_bhb_loop_value(void) ++{ ++ return max_bhb_k; ++} ++ + static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) + { + const char *v = arm64_get_bp_hardening_vector(slot); diff --git a/queue-5.15/arm64-proton-pack-expose-whether-the-platform-is-mitigated-by-firmware.patch b/queue-5.15/arm64-proton-pack-expose-whether-the-platform-is-mitigated-by-firmware.patch new file mode 100644 index 0000000000..ac1f77c59f --- /dev/null +++ b/queue-5.15/arm64-proton-pack-expose-whether-the-platform-is-mitigated-by-firmware.patch @@ -0,0 +1,61 @@ +From stable+bounces-151850-greg=kroah.com@vger.kernel.org Sat Jun 7 17:33:16 2025 +From: Pu Lehui <pulehui@huaweicloud.com> +Date: Sat, 7 Jun 2025 15:35:30 +0000 +Subject: arm64: proton-pack: Expose whether the platform is mitigated by firmware +To: stable@vger.kernel.org +Cc: james.morse@arm.com, catalin.marinas@arm.com, daniel@iogearbox.net, ast@kernel.org, andrii@kernel.org, xukuohai@huawei.com, pulehui@huawei.com +Message-ID: <20250607153535.3613861-5-pulehui@huaweicloud.com> + +From: James Morse <james.morse@arm.com> + +[ Upstream commit e7956c92f396a44eeeb6eaf7a5b5e1ad24db6748 ] + +is_spectre_bhb_fw_affected() allows the caller to determine if the CPU +is known to need a firmware mitigation. CPUs are either on the list +of CPUs we know about, or firmware has been queried and reported that +the platform is affected - and mitigated by firmware. + +This helper is not useful to determine if the platform is mitigated +by firmware. A CPU could be on the know list, but the firmware may +not be implemented. Its affected but not mitigated. + +spectre_bhb_enable_mitigation() handles this distinction by checking +the firmware state before enabling the mitigation. + +Add a helper to expose this state. This will be used by the BPF JIT +to determine if calling firmware for a mitigation is necessary and +supported. + +Signed-off-by: James Morse <james.morse@arm.com> +Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> +Signed-off-by: Pu Lehui <pulehui@huawei.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/arm64/include/asm/spectre.h | 1 + + arch/arm64/kernel/proton-pack.c | 5 +++++ + 2 files changed, 6 insertions(+) + +--- a/arch/arm64/include/asm/spectre.h ++++ b/arch/arm64/include/asm/spectre.h +@@ -97,6 +97,7 @@ enum mitigation_state arm64_get_meltdown + + enum mitigation_state arm64_get_spectre_bhb_state(void); + bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); ++bool is_spectre_bhb_fw_mitigated(void); + void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); + bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr); + #endif /* __ASSEMBLY__ */ +--- a/arch/arm64/kernel/proton-pack.c ++++ b/arch/arm64/kernel/proton-pack.c +@@ -1088,6 +1088,11 @@ void spectre_bhb_enable_mitigation(const + update_mitigation_state(&spectre_bhb_state, state); + } + ++bool is_spectre_bhb_fw_mitigated(void) ++{ ++ return test_bit(BHB_FW, &system_bhb_mitigations); ++} ++ + /* Patched to NOP when enabled */ + void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt, + __le32 *origptr, diff --git a/queue-5.15/arm64-spectre-increase-parameters-that-can-be-used-to-turn-off-bhb-mitigation-individually.patch b/queue-5.15/arm64-spectre-increase-parameters-that-can-be-used-to-turn-off-bhb-mitigation-individually.patch new file mode 100644 index 0000000000..7e7c16b6af --- /dev/null +++ b/queue-5.15/arm64-spectre-increase-parameters-that-can-be-used-to-turn-off-bhb-mitigation-individually.patch @@ -0,0 +1,77 @@ +From stable+bounces-151853-greg=kroah.com@vger.kernel.org Sat Jun 7 17:33:21 2025 +From: Pu Lehui <pulehui@huaweicloud.com> +Date: Sat, 7 Jun 2025 15:35:32 +0000 +Subject: arm64: spectre: increase parameters that can be used to turn off bhb mitigation individually +To: stable@vger.kernel.org +Cc: james.morse@arm.com, catalin.marinas@arm.com, daniel@iogearbox.net, ast@kernel.org, andrii@kernel.org, xukuohai@huawei.com, pulehui@huawei.com +Message-ID: <20250607153535.3613861-7-pulehui@huaweicloud.com> + +From: Liu Song <liusong@linux.alibaba.com> + +[ Upstream commit 877ace9eab7de032f954533afd5d1ecd0cf62eaf ] + +In our environment, it was found that the mitigation BHB has a great +impact on the benchmark performance. For example, in the lmbench test, +the "process fork && exit" test performance drops by 20%. +So it is necessary to have the ability to turn off the mitigation +individually through cmdline, thus avoiding having to compile the +kernel by adjusting the config. + +Signed-off-by: Liu Song <liusong@linux.alibaba.com> +Acked-by: Catalin Marinas <catalin.marinas@arm.com> +Link: https://lore.kernel.org/r/1661514050-22263-1-git-send-email-liusong@linux.alibaba.com +Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> +Signed-off-by: Pu Lehui <pulehui@huawei.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + Documentation/admin-guide/kernel-parameters.txt | 5 +++++ + arch/arm64/kernel/proton-pack.c | 10 +++++++++- + 2 files changed, 14 insertions(+), 1 deletion(-) + +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -3105,6 +3105,7 @@ + spectre_bhi=off [X86] + spectre_v2_user=off [X86] + ssbd=force-off [ARM64] ++ nospectre_bhb [ARM64] + tsx_async_abort=off [X86] + + Exceptions: +@@ -3526,6 +3527,10 @@ + vulnerability. System may allow data leaks with this + option. + ++ nospectre_bhb [ARM64] Disable all mitigations for Spectre-BHB (branch ++ history injection) vulnerability. System may allow data leaks ++ with this option. ++ + nospec_store_bypass_disable + [HW] Disable all mitigations for the Speculative Store Bypass vulnerability + +--- a/arch/arm64/kernel/proton-pack.c ++++ b/arch/arm64/kernel/proton-pack.c +@@ -1023,6 +1023,14 @@ static void this_cpu_set_vectors(enum ar + isb(); + } + ++static bool __read_mostly __nospectre_bhb; ++static int __init parse_spectre_bhb_param(char *str) ++{ ++ __nospectre_bhb = true; ++ return 0; ++} ++early_param("nospectre_bhb", parse_spectre_bhb_param); ++ + void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) + { + bp_hardening_cb_t cpu_cb; +@@ -1036,7 +1044,7 @@ void spectre_bhb_enable_mitigation(const + /* No point mitigating Spectre-BHB alone. */ + } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) { + pr_info_once("spectre-bhb mitigation disabled by compile time option\n"); +- } else if (cpu_mitigations_off()) { ++ } else if (cpu_mitigations_off() || __nospectre_bhb) { + pr_info_once("spectre-bhb mitigation disabled by command line option\n"); + } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) { + state = SPECTRE_MITIGATED; diff --git a/queue-5.15/series b/queue-5.15/series index eb7f4c980b..d4978b41b4 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -386,3 +386,12 @@ arm-dts-am335x-bone-common-increase-mdio-reset-deassert-time.patch arm-dts-am335x-bone-common-increase-mdio-reset-deassert-delay-to-50ms.patch serial-sh-sci-increment-the-runtime-usage-counter-for-the-earlycon-device.patch revert-cpufreq-tegra186-share-policy-per-cluster.patch +arm64-move-aarch64_break_fault-into-insn-def.h.patch +arm64-insn-add-encoders-for-atomic-operations.patch +arm64-insn-add-support-for-encoding-dsb.patch +arm64-proton-pack-expose-whether-the-platform-is-mitigated-by-firmware.patch +arm64-proton-pack-expose-whether-the-branchy-loop-k-value.patch +arm64-spectre-increase-parameters-that-can-be-used-to-turn-off-bhb-mitigation-individually.patch +arm64-bpf-add-bhb-mitigation-to-the-epilogue-for-cbpf-programs.patch +arm64-bpf-only-mitigate-cbpf-programs-loaded-by-unprivileged-users.patch +arm64-proton-pack-add-new-cpus-k-values-for-branch-mitigation.patch |