aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@fb.com>2017-05-18 23:38:56 -0700
committerAlexei Starovoitov <ast@fb.com>2017-10-08 21:33:02 -0700
commit6ca926dcfc57ffcf975fcc54dd9ef02be49e8010 (patch)
tree9950c6b34359486441f9dbef76ed320aa8fb7c8b
parentca82214144d925155977abe7c0af7c2c252b837f (diff)
downloadbpf-calls_v2.tar.gz
bpf: introduce function calls. work in progresscalls_v2
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c4
-rw-r--r--include/linux/bpf_verifier.h31
-rw-r--r--include/uapi/linux/bpf.h6
-rw-r--r--kernel/bpf/verifier.c628
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c356
5 files changed, 909 insertions, 116 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 5b783a91b115e3..9e10cd8f500b79 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -78,7 +78,7 @@ static int
nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
const struct bpf_verifier_env *env)
{
- const struct bpf_reg_state *reg0 = &env->cur_state.regs[0];
+ const struct bpf_reg_state *reg0 = &env->cur_state.frame[env->cur_state.curframe].regs[0];
u64 imm;
if (nfp_prog->act == NN_ACT_XDP)
@@ -115,7 +115,7 @@ static int
nfp_bpf_check_ctx_ptr(struct nfp_prog *nfp_prog,
const struct bpf_verifier_env *env, u8 reg)
{
- if (env->cur_state.regs[reg].type != PTR_TO_CTX)
+ if (env->cur_state.frame[env->cur_state.curframe].regs[reg].type != PTR_TO_CTX)
return -EINVAL;
return 0;
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index b8d200f60a4090..89f80228c13e7f 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -40,6 +40,8 @@ enum bpf_reg_liveness {
REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */
};
+struct bpf_func_state;
+
struct bpf_reg_state {
enum bpf_reg_type type;
union {
@@ -78,6 +80,13 @@ struct bpf_reg_state {
u64 umax_value; /* maximum possible (u64)value */
/* This field must be last, for states_equal() reasons. */
enum bpf_reg_liveness live;
+ /* For PTR_TO_STACK 'func' is a pointer to func_state which stack
+ * is referenced by this pointer.
+ * A nested function can have R1=fp-8 and R2=fp-8,
+ * but one of them can point to this function stack and another
+ * to caller's stack.
+ */
+ struct bpf_func_state *func;
};
enum bpf_stack_slot_type {
@@ -91,11 +100,27 @@ enum bpf_stack_slot_type {
/* state of the program:
* type of all registers and stack info
*/
-struct bpf_verifier_state {
+struct bpf_func_state {
struct bpf_reg_state regs[MAX_BPF_REG];
u8 stack_slot_type[MAX_BPF_STACK];
struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
+ /* max stack depth in this function */
+ u32 stack_depth:10;
+ /* index of call instruction that called into this func */
+ u32 callsite:16;
+ /* stack frame number of this function state from pov of
+ * enclosing bpf_verifier_state. It's only used to adjust
+ * bpf_reg_state->func pointer while copying bpf_verifier_state
+ */
+ u32 frameno:6;
+};
+
+#define MAX_CALL_FRAMES 8
+struct bpf_verifier_state {
+ /* call stack tracking */
+ struct bpf_func_state frame[MAX_CALL_FRAMES];
struct bpf_verifier_state *parent;
+ u32 curframe;
};
/* linked list of verifier states used to prune search */
@@ -121,6 +146,8 @@ struct bpf_ext_analyzer_ops {
int insn_idx, int prev_insn_idx);
};
+#define BPF_MAX_SUBPROGS 64
+
/* single container for all structs
* one verifier_env per bpf_check() call
*/
@@ -139,6 +166,8 @@ struct bpf_verifier_env {
bool allow_ptr_leaks;
bool seen_direct_write;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
+ u32 subprog_starts[BPF_MAX_SUBPROGS];
+ u32 subprog_cnt;
};
int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 6082faf5fd2a01..4554a50cd5a283 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -193,8 +193,14 @@ enum bpf_attach_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
+/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1
+/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
+ * offset to another bpf function
+ */
+#define BPF_PSEUDO_CALL 1
+
/* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 52b022310f6ac9..d977c94bb795e4 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -20,6 +20,8 @@
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/stringify.h>
+#include <linux/bsearch.h>
+#include <linux/sort.h>
/* bpf_check() is a static code analyzer that walks eBPF program
* instruction by instruction and updates register/stack state.
@@ -213,9 +215,9 @@ static const char *func_id_name(int id)
return "unknown";
}
-static void print_verifier_state(struct bpf_verifier_state *state)
+static void print_verifier_state(const struct bpf_func_state *state)
{
- struct bpf_reg_state *reg;
+ const struct bpf_reg_state *reg;
enum bpf_reg_type t;
int i;
@@ -228,7 +230,9 @@ static void print_verifier_state(struct bpf_verifier_state *state)
if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
tnum_is_const(reg->var_off)) {
/* reg->off should be 0 for SCALAR_VALUE */
- verbose("%lld", reg->var_off.value + reg->off);
+ verbose("%+lld", reg->var_off.value + reg->off);
+ if (t == PTR_TO_STACK)
+ verbose("/call_%d", reg->func->callsite);
} else {
verbose("(id=%d", reg->id);
if (t != SCALAR_VALUE)
@@ -437,8 +441,11 @@ static void print_bpf_insn(const struct bpf_verifier_env *env,
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
- verbose("(%02x) call %s#%d\n", insn->code,
- func_id_name(insn->imm), insn->imm);
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ verbose("(%02x) call pc%+d\n", insn->code, insn->imm);
+ else
+ verbose("(%02x) call %s#%d\n", insn->code,
+ func_id_name(insn->imm), insn->imm);
} else if (insn->code == (BPF_JMP | BPF_JA)) {
verbose("(%02x) goto pc%+d\n",
insn->code, insn->off);
@@ -509,6 +516,10 @@ err:
static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
+#define CALLEE_SAVED_REGS 5
+static const int callee_saved[CALLEE_SAVED_REGS] = {
+ BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9
+};
static void __mark_reg_not_init(struct bpf_reg_state *reg);
@@ -643,6 +654,7 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg)
reg->id = 0;
reg->off = 0;
reg->var_off = tnum_unknown;
+ reg->func = NULL;
__mark_reg_unbounded(reg);
}
@@ -650,8 +662,8 @@ static void mark_reg_unknown(struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose("mark_reg_unknown(regs, %u)\n", regno);
- /* Something bad happened, let's kill all regs */
- for (regno = 0; regno < MAX_BPF_REG; regno++)
+ /* Something bad happened, let's kill all regs except FP */
+ for (regno = 0; regno < BPF_REG_FP; regno++)
__mark_reg_not_init(regs + regno);
return;
}
@@ -668,16 +680,17 @@ static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose("mark_reg_not_init(regs, %u)\n", regno);
- /* Something bad happened, let's kill all regs */
- for (regno = 0; regno < MAX_BPF_REG; regno++)
+ /* Something bad happened, let's kill all regs except FP */
+ for (regno = 0; regno < BPF_REG_FP; regno++)
__mark_reg_not_init(regs + regno);
return;
}
__mark_reg_not_init(regs + regno);
}
-static void init_reg_state(struct bpf_reg_state *regs)
+static void init_reg_state(struct bpf_func_state *state)
{
+ struct bpf_reg_state *regs = state->regs;
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
@@ -688,37 +701,114 @@ static void init_reg_state(struct bpf_reg_state *regs)
/* frame pointer */
regs[BPF_REG_FP].type = PTR_TO_STACK;
mark_reg_known_zero(regs, BPF_REG_FP);
+ regs[BPF_REG_FP].func = state;
/* 1st arg to a function */
regs[BPF_REG_1].type = PTR_TO_CTX;
mark_reg_known_zero(regs, BPF_REG_1);
}
+static void clear_stack_state(struct bpf_func_state *state)
+{
+ memset(state->stack_slot_type, 0, sizeof(state->stack_slot_type));
+ memset(state->spilled_regs, 0, sizeof(state->spilled_regs));
+}
+
+static void init_func_state(struct bpf_func_state *state, int frameno)
+{
+ init_reg_state(state);
+ clear_stack_state(state);
+ state->callsite = ~0;
+ state->frameno = frameno;
+}
+
enum reg_arg_type {
SRC_OP, /* register is used as source operand */
DST_OP, /* register is used as destination operand */
DST_OP_NO_MARK /* same as above, check only, don't mark */
};
-static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
+struct bpf_verifier_state *skip_callee(const struct bpf_verifier_state *state,
+ struct bpf_verifier_state *parent,
+ u32 regno)
{
- struct bpf_verifier_state *parent = state->parent;
+ struct bpf_verifier_state *tmp;
+
+ /* 'parent' could be a state of caller and
+ * 'state' could be a state of callee. In such case
+ * parent->curframe < state->curframe
+ * and it's ok for r1 - r5 registers
+ *
+ * 'parent' could be a callee's state after it bpf_exit-ed.
+ * In such case parent->curframe > state->curframe
+ * and it's ok for r0 only
+ */
+ if (parent->curframe == state->curframe ||
+ (parent->curframe < state->curframe &&
+ regno >= BPF_REG_1 && regno <= BPF_REG_5) ||
+ (parent->curframe > state->curframe &&
+ regno == BPF_REG_0))
+ return parent;
+
+ if (parent->curframe > state->curframe &&
+ regno >= BPF_REG_6) {
+ /* for callee saved regs we have to skip the whole chain
+ * of states that belong to callee and mark as LIVE_READ
+ * the registers before the call
+ */
+ tmp = parent;
+ while (tmp && tmp->curframe != state->curframe) {
+/* verbose("tmp %d looking %d\n", tmp->curframe, state->curframe);*/
+ tmp = tmp->parent;
+ }
+ if (!tmp)
+ goto bug;
+ parent = tmp;
+ } else {
+ goto bug;
+ }
+ return parent;
+bug:
+ verbose("verifier bug regno %d tmp %p\n", regno, tmp);
+ verbose("regno %d parent frame %d current frame %d\n",
+ regno, parent->curframe, state->curframe);
+ return 0;
+}
+
+static int mark_reg_read(const struct bpf_verifier_state *state,
+ struct bpf_verifier_state *parent,
+ u32 regno)
+{
+ bool writes = parent == state->parent; /* Observe write marks */
+
+ if (regno == BPF_REG_FP)
+ /* We don't need to worry about FP liveness because it's read-only */
+ return 0;
while (parent) {
/* if read wasn't screened by an earlier write ... */
- if (state->regs[regno].live & REG_LIVE_WRITTEN)
+/* verbose("ch %p frame %d regno %d live %d\n", state, state->curframe, regno, state->frame[state->curframe].regs[regno].live);*/
+ if (writes && state->frame[state->curframe].regs[regno].live & REG_LIVE_WRITTEN)
break;
+ parent = skip_callee(state, parent, regno);
+ if (!parent)
+ return -EFAULT;
/* ... then we depend on parent's value */
- parent->regs[regno].live |= REG_LIVE_READ;
+/* verbose("marking in %p frame %d regno %d\n", parent, parent->curframe, regno);*/
+ parent->frame[parent->curframe].regs[regno].live |= REG_LIVE_READ;
state = parent;
parent = state->parent;
+ writes = true;
}
+ return 0;
}
static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
enum reg_arg_type t)
{
- struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs;
if (regno >= MAX_BPF_REG) {
verbose("R%d is invalid\n", regno);
@@ -731,7 +821,7 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
verbose("R%d !read_ok\n", regno);
return -EACCES;
}
- mark_reg_read(&env->cur_state, regno);
+ return mark_reg_read(vstate, vstate->parent, regno);
} else {
/* check whether register used as dest operand can be written to */
if (regno == BPF_REG_FP) {
@@ -739,6 +829,7 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
return -EACCES;
}
regs[regno].live |= REG_LIVE_WRITTEN;
+/* verbose("WR %p frame %d regno %d live %d\n", vstate, vstate->curframe, regno, regs[regno].live);*/
if (t == DST_OP)
mark_reg_unknown(regs, regno);
}
@@ -765,16 +856,18 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
/* check_stack_read/write functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
-static int check_stack_write(struct bpf_verifier_state *state, int off,
- int size, int value_regno)
+static int check_stack_write(struct bpf_func_state *state, /* state in current function */
+ struct bpf_func_state *reg_state, /* func where register points to */
+ int off, int size, int value_regno)
{
int i, spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE;
+ enum bpf_reg_type type;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
* so it's aligned access and [off, off + size) are within stack limits
*/
if (value_regno >= 0 &&
- is_spillable_regtype(state->regs[value_regno].type)) {
+ is_spillable_regtype((type = state->regs[value_regno].type))) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
@@ -782,44 +875,62 @@ static int check_stack_write(struct bpf_verifier_state *state, int off,
return -EACCES;
}
+ if (reg_state != state && type == PTR_TO_STACK) {
+ verbose("cannot spill pointers to stack into stack frame of the caller\n");
+ return -EINVAL;
+ }
+
+ if (type == PTR_TO_STACK)
+ WARN_ON(!state->regs[value_regno].func);
/* save register state */
- state->spilled_regs[spi] = state->regs[value_regno];
- state->spilled_regs[spi].live |= REG_LIVE_WRITTEN;
+ reg_state->spilled_regs[spi] = state->regs[value_regno];
+ reg_state->spilled_regs[spi].live |= REG_LIVE_WRITTEN;
for (i = 0; i < BPF_REG_SIZE; i++)
- state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
+ reg_state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
} else {
/* regular write of data into stack */
- state->spilled_regs[spi] = (struct bpf_reg_state) {};
+ reg_state->spilled_regs[spi] = (struct bpf_reg_state) {};
for (i = 0; i < size; i++)
- state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
+ reg_state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
}
return 0;
}
-static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot)
+static void mark_stack_slot_read(const struct bpf_verifier_state *state,
+ struct bpf_verifier_state *parent,
+ int slot)
{
- struct bpf_verifier_state *parent = state->parent;
+ bool writes = parent == state->parent; /* Observe write marks */
while (parent) {
/* if read wasn't screened by an earlier write ... */
- if (state->spilled_regs[slot].live & REG_LIVE_WRITTEN)
+ if (writes && state->frame[state->curframe].spilled_regs[slot].live & REG_LIVE_WRITTEN)
break;
+ if (parent->curframe != state->curframe) {
+// WARN(1, "parent state is in caller. parent frame %d current frame %d\n",
+// parent->curframe, state->curframe);
+ print_verifier_state(&state->frame[state->curframe]);
+// break;
+ }
/* ... then we depend on parent's value */
- parent->spilled_regs[slot].live |= REG_LIVE_READ;
+ parent->frame[parent->curframe].spilled_regs[slot].live |= REG_LIVE_READ;
state = parent;
parent = state->parent;
+ writes = true;
}
}
-static int check_stack_read(struct bpf_verifier_state *state, int off, int size,
- int value_regno)
+static int check_stack_read(struct bpf_verifier_state *vstate /* current state */,
+ struct bpf_func_state *reg_state /* func where register points to */,
+ int off, int size, int value_regno)
{
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
u8 *slot_type;
int i, spi;
- slot_type = &state->stack_slot_type[MAX_BPF_STACK + off];
+ slot_type = &reg_state->stack_slot_type[MAX_BPF_STACK + off];
if (slot_type[0] == STACK_SPILL) {
if (size != BPF_REG_SIZE) {
@@ -837,8 +948,10 @@ static int check_stack_read(struct bpf_verifier_state *state, int off, int size,
if (value_regno >= 0) {
/* restore register state from stack */
- state->regs[value_regno] = state->spilled_regs[spi];
- mark_stack_slot_read(state, spi);
+ state->regs[value_regno] = reg_state->spilled_regs[spi];
+ mark_stack_slot_read(vstate, vstate->parent, spi);
+ if (state->regs[value_regno].type == PTR_TO_STACK)
+ WARN_ON(!state->regs[value_regno].func);
}
return 0;
} else {
@@ -860,7 +973,9 @@ static int check_stack_read(struct bpf_verifier_state *state, int off, int size,
static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
int size)
{
- struct bpf_map *map = env->cur_state.regs[regno].map_ptr;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
+ struct bpf_map *map = state->regs[regno].map_ptr;
if (off < 0 || size <= 0 || off + size > map->value_size) {
verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
@@ -872,9 +987,10 @@ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
/* check read/write into a map element with possible variable offset */
static int check_map_access(struct bpf_verifier_env *env, u32 regno,
- int off, int size)
+ int off, int size)
{
- struct bpf_verifier_state *state = &env->cur_state;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
struct bpf_reg_state *reg = &state->regs[regno];
int err;
@@ -947,7 +1063,9 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
int off, int size)
{
- struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs;
struct bpf_reg_state *reg = &regs[regno];
if (off < 0 || size <= 0 || (u64)off + size > reg->range) {
@@ -961,7 +1079,9 @@ static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
int size)
{
- struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs;
struct bpf_reg_state *reg = &regs[regno];
int err;
@@ -1031,7 +1151,10 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
{
- return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
+
+ return __is_pointer_value(env->allow_ptr_leaks, &state->regs[regno]);
}
static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
@@ -1129,7 +1252,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
int bpf_size, enum bpf_access_type t,
int value_regno)
{
- struct bpf_verifier_state *state = &env->cur_state;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
struct bpf_reg_state *reg = &state->regs[regno];
int size, err = 0;
@@ -1216,14 +1340,14 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (t == BPF_WRITE) {
if (!env->allow_ptr_leaks &&
- state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
+ reg->func->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
size != BPF_REG_SIZE) {
verbose("attempt to corrupt spilled pointer on stack\n");
return -EACCES;
}
- err = check_stack_write(state, off, size, value_regno);
+ err = check_stack_write(state, reg->func, off, size, value_regno);
} else {
- err = check_stack_read(state, off, size, value_regno);
+ err = check_stack_read(vstate, reg->func, off, size, value_regno);
}
} else if (reg_is_pkt_pointer(reg)) {
if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
@@ -1306,7 +1430,8 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
- struct bpf_verifier_state *state = &env->cur_state;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs;
int off, i;
@@ -1348,7 +1473,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
}
for (i = 0; i < access_size; i++) {
- if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
+ if (regs[regno].func->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
verbose("invalid indirect read from stack off %d+%d size %d\n",
off, i, access_size);
return -EACCES;
@@ -1361,7 +1486,9 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
- struct bpf_reg_state *regs = env->cur_state.regs, *reg = &regs[regno];
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs, *reg = &regs[regno];
switch (reg->type) {
case PTR_TO_PACKET:
@@ -1379,7 +1506,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
enum bpf_arg_type arg_type,
struct bpf_call_arg_meta *meta)
{
- struct bpf_reg_state *regs = env->cur_state.regs, *reg = &regs[regno];
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs, *reg = &regs[regno];
enum bpf_reg_type expected_type, type = reg->type;
int err = 0;
@@ -1653,7 +1782,8 @@ static int check_raw_mode(const struct bpf_func_proto *fn)
*/
static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
{
- struct bpf_verifier_state *state = &env->cur_state;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *reg;
int i;
@@ -1670,9 +1800,80 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
}
}
-static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
+static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ int *insn_idx)
+{
+ struct bpf_verifier_state *state = &env->cur_state;
+ struct bpf_func_state *caller, *callee;
+ int i;
+
+ if (state->curframe >= MAX_CALL_FRAMES)
+ return -E2BIG;
+
+ caller = &state->frame[state->curframe++];
+ callee = &state->frame[state->curframe];
+
+ /* callee cannot access r0, r6 - r9 for reading and has to write
+ * into its own stack before reading from it.
+ * callee can read/write into caller's stack
+ */
+ init_func_state(callee, state->curframe);
+
+ /* copy r1 - r5 args that calle can access */
+ for (i = BPF_REG_1; i <= BPF_REG_5; i++)
+ callee->regs[i] = caller->regs[i];
+
+ /* after the call regsiters r0 - r5 were scratched */
+ for (i = 0; i < CALLER_SAVED_REGS; i++) {
+ mark_reg_not_init(caller->regs, caller_saved[i]);
+ check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
+ }
+
+ /* remember the callsite, it will be used by callee's bpf_exit */
+ callee->callsite = *insn_idx;
+
+ /* and go analyze first insn of the callee */
+ *insn_idx += insn->imm;
+
+ if (log_level) {
+ verbose("caller:\n");
+ print_verifier_state(caller);
+ verbose("callee:\n");
+ print_verifier_state(callee);
+ }
+ return 0;
+}
+
+static void prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
{
struct bpf_verifier_state *state = &env->cur_state;
+ struct bpf_func_state *caller, *callee;
+
+ callee = &state->frame[state->curframe--];
+ caller = &state->frame[state->curframe];
+ /* after the call regsiters r0 - r5 were scratched */
+/* for (i = 0; i < CALLER_SAVED_REGS; i++) {
+ mark_reg_not_init(caller->regs, caller_saved[i]);
+ check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
+ }*/
+ /* and r0 has whatever callee had */
+ caller->regs[BPF_REG_0] = callee->regs[BPF_REG_0];
+
+ *insn_idx = callee->callsite + 1;
+ if (log_level) {
+ verbose("returning from callee:\n");
+ print_verifier_state(callee);
+ verbose("to caller at %d:\n", *insn_idx);
+ print_verifier_state(caller);
+ }
+ /* clear everything in the callee */
+ init_func_state(callee, state->curframe + 1);
+}
+
+static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
+{
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
const struct bpf_func_proto *fn = NULL;
struct bpf_reg_state *regs = state->regs;
struct bpf_call_arg_meta meta;
@@ -1827,7 +2028,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
const struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg)
{
- struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs, *dst_reg;
bool known = tnum_is_const(off_reg->var_off);
s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
@@ -1839,12 +2042,12 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
dst_reg = &regs[dst];
if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
- print_verifier_state(&env->cur_state);
+ print_verifier_state(state);
verbose("verifier internal error: known but bad sbounds\n");
return -EINVAL;
}
if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
- print_verifier_state(&env->cur_state);
+ print_verifier_state(state);
verbose("verifier internal error: known but bad ubounds\n");
return -EINVAL;
}
@@ -1881,6 +2084,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
*/
dst_reg->type = ptr_reg->type;
dst_reg->id = ptr_reg->id;
+ if (ptr_reg->type == PTR_TO_STACK)
+ dst_reg->func = ptr_reg->func;
switch (opcode) {
case BPF_ADD:
@@ -2023,7 +2228,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
struct bpf_reg_state *dst_reg,
struct bpf_reg_state src_reg)
{
- struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs;
u8 opcode = BPF_OP(insn->code);
bool src_known, dst_known;
s64 smin_val, smax_val;
@@ -2244,7 +2451,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
- struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg, *src_reg;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
u8 opcode = BPF_OP(insn->code);
int rc;
@@ -2318,12 +2527,12 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
/* Got here implies adding two SCALAR_VALUEs */
if (WARN_ON_ONCE(ptr_reg)) {
- print_verifier_state(&env->cur_state);
+ print_verifier_state(state);
verbose("verifier internal error: unexpected ptr_reg\n");
return -EINVAL;
}
if (WARN_ON(!src_reg)) {
- print_verifier_state(&env->cur_state);
+ print_verifier_state(state);
verbose("verifier internal error: no src_reg\n");
return -EINVAL;
}
@@ -2333,7 +2542,9 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
/* check validity of 32-bit and 64-bit arithmetic operations */
static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
- struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs;
u8 opcode = BPF_OP(insn->code);
int err;
@@ -2400,6 +2611,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
* copy register state to dest reg
*/
regs[insn->dst_reg] = regs[insn->src_reg];
+ regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
} else {
/* R1 = (u32) R2 */
if (is_pointer_value(env, insn->src_reg)) {
@@ -2475,7 +2687,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return 0;
}
-static void find_good_pkt_pointers(struct bpf_verifier_state *state,
+static void find_good_pkt_pointers(struct bpf_func_state *state,
struct bpf_reg_state *dst_reg,
enum bpf_reg_type type)
{
@@ -2786,7 +2998,7 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
/* The logic is similar to find_good_pkt_pointers(), both could eventually
* be folded together at some point.
*/
-static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
+static void mark_map_regs(struct bpf_func_state *state, u32 regno,
bool is_null)
{
struct bpf_reg_state *regs = state->regs;
@@ -2806,7 +3018,10 @@ static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
- struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *this_branch = &vstate->frame[vstate->curframe];
+ struct bpf_verifier_state *other_branch_state;
+ struct bpf_func_state *other_branch;
struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
u8 opcode = BPF_OP(insn->code);
int err;
@@ -2847,11 +3062,14 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
dst_reg = &regs[insn->dst_reg];
/* detect if R == 0 where R was initialized to zero earlier */
+ verbose("opcode jne %d dst_reg->type %d imm %d\n",
+ opcode == BPF_JNE, dst_reg->type, insn->imm);
if (BPF_SRC(insn->code) == BPF_K &&
(opcode == BPF_JEQ || opcode == BPF_JNE) &&
dst_reg->type == SCALAR_VALUE &&
- tnum_equals_const(dst_reg->var_off, insn->imm)) {
- if (opcode == BPF_JEQ) {
+ tnum_is_const(dst_reg->var_off)) {
+ if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
+ (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
/* if (imm == imm) goto pc+off;
* only follow the goto, ignore fall-through
*/
@@ -2866,9 +3084,10 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
}
}
- other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
- if (!other_branch)
+ other_branch_state = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
+ if (!other_branch_state)
return -EFAULT;
+ other_branch = &other_branch_state->frame[other_branch_state->curframe];
/* detect if we are comparing against a constant value so we can adjust
* our min/max values for our dst register.
@@ -2965,7 +3184,9 @@ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
/* verify BPF_LD_IMM64 instruction */
static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
- struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs;
int err;
if (BPF_SIZE(insn->code) != BPF_DW) {
@@ -3026,7 +3247,9 @@ static bool may_access_skb(enum bpf_prog_type type)
*/
static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
- struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs;
u8 mode = BPF_MODE(insn->code);
int i, err;
@@ -3075,6 +3298,9 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
static int check_return_code(struct bpf_verifier_env *env)
{
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ struct bpf_func_state *state = &vstate->frame[vstate->curframe];
+ struct bpf_reg_state *regs = state->regs;
struct bpf_reg_state *reg;
struct tnum range = tnum_range(0, 1);
@@ -3087,7 +3313,7 @@ static int check_return_code(struct bpf_verifier_env *env)
return 0;
}
- reg = &env->cur_state.regs[BPF_REG_0];
+ reg = &regs[BPF_REG_0];
if (reg->type != SCALAR_VALUE) {
verbose("At program exit the register R0 is not a known value (%s)\n",
reg_type_str[reg->type]);
@@ -3199,6 +3425,103 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
return 0;
}
+static int cmp_subprogs(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static int find_subprog(struct bpf_verifier_env *env, int off)
+{
+ u32 *p;
+
+ p = bsearch(&off, env->subprog_starts, env->subprog_cnt,
+ sizeof(env->subprog_starts[0]), cmp_subprogs);
+ if (!p)
+ return -ENOENT;
+ return p - env->subprog_starts;
+
+}
+
+static int add_subprog(struct bpf_verifier_env *env, int off)
+{
+ int insn_cnt = env->prog->len;
+ int ret;
+
+ if (off >= insn_cnt) {
+ verbose("call to invalid destination\n");
+ return -EINVAL;
+ }
+ ret = find_subprog(env, off);
+ if (ret >= 0)
+ return 0;
+ if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
+ verbose("too many subprograms\n");
+ return -E2BIG;
+ }
+ env->subprog_starts[env->subprog_cnt++] = off;
+ sort(env->subprog_starts, env->subprog_cnt,
+ sizeof(env->subprog_starts[0]), cmp_subprogs, NULL);
+ return 0;
+}
+
+static int check_subprogs(struct bpf_verifier_env *env)
+{
+ int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
+ struct bpf_insn *insn = env->prog->insnsi;
+ int insn_cnt = env->prog->len;
+
+ /* determine subprog starts. The end is one before the next starts */
+ for (i = 0; i < insn_cnt; i++) {
+ if (insn[i].code != (BPF_JMP | BPF_CALL))
+ continue;
+ if (insn[i].src_reg != BPF_PSEUDO_CALL)
+ continue;
+ ret = add_subprog(env, i + insn[i].imm + 1);
+ if (ret < 0)
+ return ret;
+ }
+ for (i = 0; i < env->subprog_cnt && 0; i++)
+ printk("off[%d]=%d\n", i, env->subprog_starts[i]);
+
+ /* now check that all jumps are within the same subprog */
+ subprog_start = 0;
+ if (env->subprog_cnt == cur_subprog)
+ subprog_end = insn_cnt;
+ else
+ subprog_end = env->subprog_starts[cur_subprog++];
+ for (i = 0; i < insn_cnt; i++) {
+ u8 code = insn[i].code;
+
+ if (BPF_CLASS(code) != BPF_JMP)
+ goto next;
+ if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
+ goto next;
+ off = i + insn[i].off + 1;
+ if (off < subprog_start || off >= subprog_end) {
+ verbose("jump out of range from insn %d to %d\n", i, off);
+ return -EINVAL;
+ }
+next:
+ if (i == subprog_end - 1) {
+ /* to avoid fall-through from one subprog into another
+ * the last insn of the subprog should be either exit
+ * or unconditional jump back
+ */
+ if (code != (BPF_JMP | BPF_EXIT) &&
+ code != (BPF_JMP | BPF_JA)) {
+ verbose("last insn is not an exit or jmp\n");
+ return -EINVAL;
+ }
+ subprog_start = subprog_end;
+ if (env->subprog_cnt == cur_subprog)
+ subprog_end = insn_cnt;
+ else
+ subprog_end = env->subprog_starts[cur_subprog++];
+ }
+ }
+ return 0;
+}
+
/* non-recursive depth-first-search to detect loops in BPF program
* loop == back-edge in directed graph
*/
@@ -3209,6 +3532,10 @@ static int check_cfg(struct bpf_verifier_env *env)
int ret = 0;
int i, t;
+ ret = check_subprogs(env);
+ if (ret < 0)
+ return ret;
+
insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_state)
return -ENOMEM;
@@ -3241,6 +3568,14 @@ peek_stack:
goto err_free;
if (t + 1 < insn_cnt)
env->explored_states[t + 1] = STATE_LIST_MARK;
+ if (insns[t].src_reg == BPF_PSEUDO_CALL) {
+ env->explored_states[t] = STATE_LIST_MARK;
+ ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
+ if (ret == 1)
+ goto peek_stack;
+ else if (ret < 0)
+ goto err_free;
+ }
} else if (opcode == BPF_JA) {
if (BPF_SRC(insns[t].code) != BPF_K) {
ret = -EINVAL;
@@ -3477,9 +3812,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
* whereas register type in current state is meaningful, it means that
* the current state will reach 'bpf_exit' instruction safely
*/
-static bool states_equal(struct bpf_verifier_env *env,
- struct bpf_verifier_state *old,
- struct bpf_verifier_state *cur)
+static bool func_states_equal(struct bpf_func_state *old,
+ struct bpf_func_state *cur)
{
struct idpair *idmap;
bool ret = false;
@@ -3532,50 +3866,82 @@ out_free:
return ret;
}
+static bool states_equal(struct bpf_verifier_env *env,
+ struct bpf_verifier_state *old,
+ struct bpf_verifier_state *cur)
+{
+ int i;
+
+ if (old->curframe != cur->curframe)
+ return false;
+
+ /* for states to be equal callsites have to be the same
+ * and all frame states need to be equivalent
+ */
+ for (i = 0; i <= old->curframe; i++) {
+ if (old->frame[i].callsite != cur->frame[i].callsite)
+ return false;
+ if (!func_states_equal(&old->frame[i], &cur->frame[i]))
+ return false;
+ }
+ return true;
+}
+
/* A write screens off any subsequent reads; but write marks come from the
* straight-line code between a state and its parent. When we arrive at a
* jump target (in the first iteration of the propagate_liveness() loop),
* we didn't arrive by the straight-line code, so read marks in state must
* propagate to parent regardless of state's write marks.
*/
-static bool do_propagate_liveness(const struct bpf_verifier_state *state,
- struct bpf_verifier_state *parent)
+static int do_propagate_liveness(const struct bpf_verifier_state *state,
+ struct bpf_verifier_state *parent)
{
- bool writes = parent == state->parent; /* Observe write marks */
- bool touched = false; /* any changes made? */
- int i;
+// bool writes = parent == state->parent; /* Observe write marks */
+// bool touched = false; /* any changes made? */
+ int i, err = 0;
+
+// if (!parent)
+// return touched;
- if (!parent)
- return touched;
+ if (parent->curframe != state->curframe) {
+ WARN(1, "propagate_live: parent frame %d current frame %d\n",
+ parent->curframe, state->curframe);
+ return -EFAULT;
+ }
/* Propagate read liveness of registers... */
BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
/* We don't need to worry about FP liveness because it's read-only */
for (i = 0; i < BPF_REG_FP; i++) {
- if (parent->regs[i].live & REG_LIVE_READ)
- continue;
- if (writes && (state->regs[i].live & REG_LIVE_WRITTEN))
+ if (parent->frame[parent->curframe].regs[i].live & REG_LIVE_READ)
continue;
- if (state->regs[i].live & REG_LIVE_READ) {
- parent->regs[i].live |= REG_LIVE_READ;
- touched = true;
+// if (writes && (state->frame[state->curframe].regs[i].live & REG_LIVE_WRITTEN))
+// continue;
+ if (state->frame[state->curframe].regs[i].live & REG_LIVE_READ) {
+ err = mark_reg_read(state, parent, i);
+ if (err)
+ return err;
+// parent->frame[parent->curframe].regs[i].live |= REG_LIVE_READ;
+// touched = true;
}
}
+
/* ... and stack slots */
for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++) {
- if (parent->stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL)
+ if (parent->frame[parent->curframe].stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL)
continue;
- if (state->stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL)
+ if (state->frame[state->curframe].stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL)
continue;
- if (parent->spilled_regs[i].live & REG_LIVE_READ)
+ if (parent->frame[parent->curframe].spilled_regs[i].live & REG_LIVE_READ)
continue;
- if (writes && (state->spilled_regs[i].live & REG_LIVE_WRITTEN))
- continue;
- if (state->spilled_regs[i].live & REG_LIVE_READ) {
- parent->spilled_regs[i].live |= REG_LIVE_READ;
- touched = true;
+// if (writes && (state->frame[state->curframe].spilled_regs[i].live & REG_LIVE_WRITTEN))
+// continue;
+ if (state->frame[state->curframe].spilled_regs[i].live & REG_LIVE_READ) {
+ mark_stack_slot_read(state, parent, i);
+// parent->frame[parent->curframe].spilled_regs[i].live |= REG_LIVE_READ;
+// touched = true;
}
}
- return touched;
+ return err;
}
/* "parent" is "a state from which we reach the current state", but initially
@@ -3587,21 +3953,41 @@ static bool do_propagate_liveness(const struct bpf_verifier_state *state,
* mark_stack_slot_read() on each reg in "parent" that is read in "state",
* though it requires that parent != state->parent in the call arguments.
*/
-static void propagate_liveness(const struct bpf_verifier_state *state,
+static int propagate_liveness(const struct bpf_verifier_state *state,
struct bpf_verifier_state *parent)
{
- while (do_propagate_liveness(state, parent)) {
+ return do_propagate_liveness(state, parent);
+// while (do_propagate_liveness(state, parent)) {
/* Something changed, so we need to feed those changes onward */
- state = parent;
- parent = state->parent;
- }
+// state = parent;
+// parent = state->parent;
+// }
+}
+
+static void copy_verifier_state(struct bpf_verifier_state *dst,
+ const struct bpf_verifier_state *src)
+{
+ struct bpf_func_state **func;
+ int i, j;
+
+ memcpy(dst, src, sizeof(*src));
+ for (i = 0; i <= dst->curframe; i++)
+ for (j = 0; j < MAX_BPF_REG; j++) {
+ func = &dst->frame[i].regs[j].func;
+ if (!*func)
+ continue;
+ /* func pointer still points to src state. adjust it */
+ WARN_ON_ONCE(*func != &src->frame[(*func)->frameno]);
+ *func = &dst->frame[(*func)->frameno];
+ }
}
static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
{
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl;
- int i;
+ struct bpf_verifier_state *vstate = &env->cur_state;
+ int i, err;
sl = env->explored_states[insn_idx];
if (!sl)
@@ -3622,7 +4008,9 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* they'll be immediately forgotten as we're pruning
* this state and will pop a new one.
*/
- propagate_liveness(&sl->state, &env->cur_state);
+ err = propagate_liveness(&sl->state, &env->cur_state);
+ if (err)
+ return err;
return 1;
}
sl = sl->next;
@@ -3630,16 +4018,17 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
/* there were no equivalent states, remember current one.
* technically the current state is not proven to be safe yet,
- * but it will either reach bpf_exit (which means it's safe) or
- * it will be rejected. Since there are no loops, we won't be
- * seeing this 'insn_idx' instruction again on the way to bpf_exit
+ * but it will either reach outer most bpf_exit (which means it's safe)
+ * or it will be rejected. Since there are no loops, we won't be
+ * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
+ * again on the way to bpf_exit
*/
new_sl = kmalloc(sizeof(struct bpf_verifier_state_list), GFP_USER);
if (!new_sl)
return -ENOMEM;
/* add new state to the head of linked list */
- memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state));
+ copy_verifier_state(&new_sl->state, vstate);
new_sl->next = env->explored_states[insn_idx];
env->explored_states[insn_idx] = new_sl;
/* connect new state to parentage chain */
@@ -3651,10 +4040,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* explored_states can get read marks.)
*/
for (i = 0; i < BPF_REG_FP; i++)
- env->cur_state.regs[i].live = REG_LIVE_NONE;
+ vstate->frame[vstate->curframe].regs[i].live = REG_LIVE_NONE;
for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++)
- if (env->cur_state.stack_slot_type[i * BPF_REG_SIZE] == STACK_SPILL)
- env->cur_state.spilled_regs[i].live = REG_LIVE_NONE;
+ if (vstate->frame[vstate->curframe].stack_slot_type[i * BPF_REG_SIZE] == STACK_SPILL)
+ vstate->frame[vstate->curframe].spilled_regs[i].live = REG_LIVE_NONE;
return 0;
}
@@ -3671,13 +4060,14 @@ static int do_check(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *state = &env->cur_state;
struct bpf_insn *insns = env->prog->insnsi;
- struct bpf_reg_state *regs = state->regs;
+ struct bpf_reg_state *regs;
int insn_cnt = env->prog->len;
int insn_idx, prev_insn_idx = 0;
int insn_processed = 0;
bool do_print_state = false;
- init_reg_state(regs);
+ init_func_state(&state->frame[0], 0);
+ state->curframe = 0;
state->parent = NULL;
insn_idx = 0;
for (;;) {
@@ -3724,7 +4114,7 @@ static int do_check(struct bpf_verifier_env *env)
else
verbose("\nfrom %d to %d:",
prev_insn_idx, insn_idx);
- print_verifier_state(&env->cur_state);
+ print_verifier_state(&state->frame[state->curframe]);
do_print_state = false;
}
@@ -3737,6 +4127,7 @@ static int do_check(struct bpf_verifier_env *env)
if (err)
return err;
+ regs = state->frame[state->curframe].regs;
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
@@ -3854,13 +4245,17 @@ static int do_check(struct bpf_verifier_env *env)
if (opcode == BPF_CALL) {
if (BPF_SRC(insn->code) != BPF_K ||
insn->off != 0 ||
- insn->src_reg != BPF_REG_0 ||
+ (insn->src_reg != BPF_REG_0 &&
+ insn->src_reg != BPF_PSEUDO_CALL) ||
insn->dst_reg != BPF_REG_0) {
verbose("BPF_CALL uses reserved fields\n");
return -EINVAL;
}
- err = check_call(env, insn->imm, insn_idx);
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ err = check_func_call(env, insn, &insn_idx);
+ else
+ err = check_helper_call(env, insn->imm, insn_idx);
if (err)
return err;
@@ -3885,6 +4280,14 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL;
}
+ if (state->curframe) {
+ /* exit from nested function */
+ prev_insn_idx = insn_idx;
+ prepare_func_exit(env, &insn_idx);
+ do_print_state = true;
+ continue;
+ }
+
/* eBPF calling convetion is such that R0 is used
* to return the value from eBPF program.
* Make sure that it's readable at this time
@@ -4268,7 +4671,8 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL))
continue;
-
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ continue;
if (insn->imm == BPF_FUNC_get_route_realm)
prog->dst_needed = 1;
if (insn->imm == BPF_FUNC_get_prandom_u32)
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index cc91d0159f436c..50b169702fa3aa 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -277,7 +277,7 @@ static struct bpf_test tests[] = {
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
},
- .errstr = "jump out of range",
+ .errstr = "not an exit",
.result = REJECT,
},
{
@@ -6964,6 +6964,360 @@ static struct bpf_test tests[] = {
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
+ {
+ "calls: basic sanity",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "calls: using r0 returned by callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "calls: callee is using r1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "calls: callee using args1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R0 leaks",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+ {
+ "calls: callee using wrong args2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+ },
+ {
+ "calls: calle using two args",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "calls: two calls with args",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "calls: two calls with bad jump",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "jump out of range from insn 11 to 9",
+ .result = REJECT,
+ },
+ {
+ "calls: two calls with bad fallthrough",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "not an exit",
+ .result = REJECT,
+ },
+ {
+ "calls: two calls with stack read",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "calls: two calls with stack write",
+ .insns = {
+ /* main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ /* write into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* read from stack frame of main prog */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "calls: spill into caller stack frame",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "cannot spill",
+ .result = REJECT,
+ },
+ {
+ "calls: two calls with stack write and void return",
+ .insns = {
+ /* main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* write into stack frame of main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(), /* void return */
+ },
+ .result = ACCEPT,
+ },
+ {
+ "calls: ambiguous return value",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison prohibited",
+ .result_unpriv = REJECT,
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+ },
+ {
+ "calls: two calls that return map_value",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ },
+ .fixup_map1 = { 23 },
+ .result = ACCEPT,
+ },
+ {
+ "calls: two calls that return map_value with bool condition",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(), /* return 1 */
+ },
+ .fixup_map1 = { 23 },
+ .result = ACCEPT,
+ },
};
static int probe_filter_length(const struct bpf_insn *fp)