aboutsummaryrefslogtreecommitdiffstats
diff options
authorAlexei Starovoitov <ast@kernel.org>2024-05-20 11:02:46 -0700
committerAlexei Starovoitov <ast@kernel.org>2024-05-20 11:02:46 -0700
commit546573f9f5a6c032f6b4836527149dcccdf31db2 (patch)
tree1d64f748fd866da64a466ca60d813f258b49ba63
parent6f130e4d4a5f7174f98300376f3994817ad7e21c (diff)
downloadbpf-jmp_vs_fallthrough.tar.gz
jmp vs fallthrough hackjmp_vs_fallthrough
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--include/linux/bpf_verifier.h4
-rw-r--r--kernel/bpf/verifier.c49
2 files changed, 49 insertions, 4 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 50aa87f8d77ff6..853f4c6eb90941 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -562,6 +562,10 @@ struct bpf_insn_aux_data {
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
u8 alu_state; /* used in combination with alu_limit */
+ u32 no_fallthrough;
+ int jmp_depth;
+ int depth_fallthrough;
+ int depth_jmp;
/* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 77da1f438becce..73f60a50000c69 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -15104,9 +15104,11 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_verifier_state *other_branch;
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
+ struct bpf_insn_aux_data *aux = cur_aux(env);
struct bpf_reg_state *eq_branch_regs;
struct bpf_reg_state fake_reg = {};
u8 opcode = BPF_OP(insn->code);
+ int prev_depth, new_depth;
bool is_jmp32;
int pred = -1;
int err;
@@ -15219,10 +15221,49 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return 0;
}
- other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
- false);
- if (!other_branch)
- return -EFAULT;
+ prev_depth = aux->jmp_depth;
+ new_depth = aux->jmp_depth = env->stack_size;
+ /* 2nd time visiting conditional jmp insn that jumps forward far enough */
+ if (prev_depth && new_depth - prev_depth > 0 && insn->off > 100) {
+ if (aux->no_fallthrough)
+ aux->depth_jmp = new_depth - prev_depth;
+ else
+ aux->depth_fallthrough = new_depth - prev_depth;
+ aux->no_fallthrough = false;
+ if (aux->depth_jmp == 0) {
+ /* haven't measured the number of new jumps seen
+ * on the path to bpf_exit,
+ * and it's large enough.
+ */
+ if (aux->depth_fallthrough > 100)
+ aux->no_fallthrough = true;
+ } else {
+ if (aux->depth_jmp < aux->depth_fallthrough)
+ /* if exploring jmp path leads to less branches
+ * keep it that way.
+ */
+ aux->no_fallthrough = true;
+ }
+ }
+ if (aux->no_fallthrough) {
+ other_branch = push_stack(env, *insn_idx + 1, *insn_idx,
+ false);
+ if (!other_branch)
+ return -EFAULT;
+ *insn_idx += insn->off;
+ swap(this_branch, other_branch);
+ regs = this_branch->frame[this_branch->curframe]->regs;
+ dst_reg = &regs[insn->dst_reg];
+ if (BPF_SRC(insn->code) == BPF_X)
+ src_reg = &regs[insn->src_reg];
+ else
+ src_reg = &fake_reg;
+ } else {
+ other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
+ false);
+ if (!other_branch)
+ return -EFAULT;
+ }
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
if (BPF_SRC(insn->code) == BPF_X) {