aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/verifier.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/verifier.c')
-rw-r--r--kernel/bpf/verifier.c175
1 files changed, 123 insertions, 52 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 11528bdaa9dc..187ea0cfbcca 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -201,8 +201,7 @@ struct bpf_call_arg_meta {
bool pkt_access;
int regno;
int access_size;
- s64 msize_smax_value;
- u64 msize_umax_value;
+ u64 msize_max_value;
int ref_obj_id;
int func_id;
};
@@ -2657,8 +2656,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
/* remember the mem_size which may be used later
* to refine return values.
*/
- meta->msize_smax_value = reg->smax_value;
- meta->msize_umax_value = reg->umax_value;
+ meta->msize_max_value = reg->umax_value;
/* The register is SCALAR_VALUE; the access check
* happens using its boundaries.
@@ -3141,21 +3139,44 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
return 0;
}
-static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
- int func_id,
- struct bpf_call_arg_meta *meta)
+static int do_refine_retval_range(struct bpf_verifier_env *env,
+ struct bpf_reg_state *regs, int ret_type,
+ int func_id, struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
+ struct bpf_reg_state tmp_reg = *ret_reg;
+ bool ret;
if (ret_type != RET_INTEGER ||
(func_id != BPF_FUNC_get_stack &&
func_id != BPF_FUNC_probe_read_str))
- return;
+ return 0;
+
+ /* Error case where ret is in interval [S32MIN, -1]. */
+ ret_reg->smin_value = S32_MIN;
+ ret_reg->smax_value = -1;
+
+ __reg_deduce_bounds(ret_reg);
+ __reg_bound_offset(ret_reg);
+ __update_reg_bounds(ret_reg);
+
+ ret = push_stack(env, env->insn_idx + 1, env->insn_idx, false);
+ if (!ret)
+ return -EFAULT;
+
+ *ret_reg = tmp_reg;
+
+ /* Success case where ret is in range [0, msize_max_value]. */
+ ret_reg->smin_value = 0;
+ ret_reg->smax_value = meta->msize_max_value;
+ ret_reg->umin_value = ret_reg->smin_value;
+ ret_reg->umax_value = ret_reg->smax_value;
- ret_reg->smax_value = meta->msize_smax_value;
- ret_reg->umax_value = meta->msize_umax_value;
__reg_deduce_bounds(ret_reg);
__reg_bound_offset(ret_reg);
+ __update_reg_bounds(ret_reg);
+
+ return 0;
}
static int
@@ -3384,7 +3405,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
regs[BPF_REG_0].ref_obj_id = id;
}
- do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
+ err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta);
+ if (err)
+ return err;
err = check_map_func_compatibility(env, meta.map_ptr, func_id);
if (err)
@@ -4084,9 +4107,16 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
/* Upon reaching here, src_known is true and
* umax_val is equal to umin_val.
*/
- dst_reg->smin_value >>= umin_val;
- dst_reg->smax_value >>= umin_val;
- dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
+ if (insn_bitness == 32) {
+ dst_reg->smin_value = (u32)(((s32)dst_reg->smin_value) >> umin_val);
+ dst_reg->smax_value = (u32)(((s32)dst_reg->smax_value) >> umin_val);
+ } else {
+ dst_reg->smin_value >>= umin_val;
+ dst_reg->smax_value >>= umin_val;
+ }
+
+ dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val,
+ insn_bitness);
/* blow away the dst_reg umin_value/umax_value and rely on
* dst_reg var_off to refine the result.
@@ -4580,6 +4610,70 @@ static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg)
reg->smax_value <= 0 && reg->smin_value >= S32_MIN);
}
+/* Constrain the possible values of @reg with unsigned upper bound @bound.
+ * If @is_exclusive, @bound is an exclusive limit, otherwise it is inclusive.
+ * If @is_jmp32, @bound is a 32-bit value that only constrains the low 32 bits
+ * of @reg.
+ */
+static void set_upper_bound(struct bpf_reg_state *reg, u64 bound, bool is_jmp32,
+ bool is_exclusive)
+{
+ if (is_exclusive) {
+ /* There are no values for `reg` that make `reg<0` true. */
+ if (bound == 0)
+ return;
+ bound--;
+ }
+ if (is_jmp32) {
+ /* Constrain the register's value in the tnum representation.
+ * For 64-bit comparisons this happens later in
+ * __reg_bound_offset(), but for 32-bit comparisons, we can be
+ * more precise than what can be derived from the updated
+ * numeric bounds.
+ */
+ struct tnum t = tnum_range(0, bound);
+
+ t.mask |= ~0xffffffffULL; /* upper half is unknown */
+ reg->var_off = tnum_intersect(reg->var_off, t);
+
+ /* Compute the 64-bit bound from the 32-bit bound. */
+ bound += gen_hi_max(reg->var_off);
+ }
+ reg->umax_value = min(reg->umax_value, bound);
+}
+
+/* Constrain the possible values of @reg with unsigned lower bound @bound.
+ * If @is_exclusive, @bound is an exclusive limit, otherwise it is inclusive.
+ * If @is_jmp32, @bound is a 32-bit value that only constrains the low 32 bits
+ * of @reg.
+ */
+static void set_lower_bound(struct bpf_reg_state *reg, u64 bound, bool is_jmp32,
+ bool is_exclusive)
+{
+ if (is_exclusive) {
+ /* There are no values for `reg` that make `reg>MAX` true. */
+ if (bound == (is_jmp32 ? U32_MAX : U64_MAX))
+ return;
+ bound++;
+ }
+ if (is_jmp32) {
+ /* Constrain the register's value in the tnum representation.
+ * For 64-bit comparisons this happens later in
+ * __reg_bound_offset(), but for 32-bit comparisons, we can be
+ * more precise than what can be derived from the updated
+ * numeric bounds.
+ */
+ struct tnum t = tnum_range(bound, U32_MAX);
+
+ t.mask |= ~0xffffffffULL; /* upper half is unknown */
+ reg->var_off = tnum_intersect(reg->var_off, t);
+
+ /* Compute the 64-bit bound from the 32-bit bound. */
+ bound += gen_hi_min(reg->var_off);
+ }
+ reg->umin_value = max(reg->umin_value, bound);
+}
+
/* Adjusts the register min/max values in the case that the dst_reg is the
* variable register that we are working on, and src_reg is a constant or we're
* simply doing a BPF_K check.
@@ -4635,15 +4729,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
case BPF_JGE:
case BPF_JGT:
{
- u64 false_umax = opcode == BPF_JGT ? val : val - 1;
- u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
-
- if (is_jmp32) {
- false_umax += gen_hi_max(false_reg->var_off);
- true_umin += gen_hi_min(true_reg->var_off);
- }
- false_reg->umax_value = min(false_reg->umax_value, false_umax);
- true_reg->umin_value = max(true_reg->umin_value, true_umin);
+ set_upper_bound(false_reg, val, is_jmp32, opcode == BPF_JGE);
+ set_lower_bound(true_reg, val, is_jmp32, opcode == BPF_JGT);
break;
}
case BPF_JSGE:
@@ -4664,15 +4751,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
case BPF_JLE:
case BPF_JLT:
{
- u64 false_umin = opcode == BPF_JLT ? val : val + 1;
- u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
-
- if (is_jmp32) {
- false_umin += gen_hi_min(false_reg->var_off);
- true_umax += gen_hi_max(true_reg->var_off);
- }
- false_reg->umin_value = max(false_reg->umin_value, false_umin);
- true_reg->umax_value = min(true_reg->umax_value, true_umax);
+ set_lower_bound(false_reg, val, is_jmp32, opcode == BPF_JLE);
+ set_upper_bound(true_reg, val, is_jmp32, opcode == BPF_JLT);
break;
}
case BPF_JSLE:
@@ -4747,15 +4827,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
case BPF_JGE:
case BPF_JGT:
{
- u64 false_umin = opcode == BPF_JGT ? val : val + 1;
- u64 true_umax = opcode == BPF_JGT ? val - 1 : val;
-
- if (is_jmp32) {
- false_umin += gen_hi_min(false_reg->var_off);
- true_umax += gen_hi_max(true_reg->var_off);
- }
- false_reg->umin_value = max(false_reg->umin_value, false_umin);
- true_reg->umax_value = min(true_reg->umax_value, true_umax);
+ set_lower_bound(false_reg, val, is_jmp32, opcode == BPF_JGE);
+ set_upper_bound(true_reg, val, is_jmp32, opcode == BPF_JGT);
break;
}
case BPF_JSGE:
@@ -4773,15 +4846,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
case BPF_JLE:
case BPF_JLT:
{
- u64 false_umax = opcode == BPF_JLT ? val : val - 1;
- u64 true_umin = opcode == BPF_JLT ? val + 1 : val;
-
- if (is_jmp32) {
- false_umax += gen_hi_max(false_reg->var_off);
- true_umin += gen_hi_min(true_reg->var_off);
- }
- false_reg->umax_value = min(false_reg->umax_value, false_umax);
- true_reg->umin_value = max(true_reg->umin_value, true_umin);
+ set_upper_bound(false_reg, val, is_jmp32, opcode == BPF_JLE);
+ set_lower_bound(true_reg, val, is_jmp32, opcode == BPF_JLT);
break;
}
case BPF_JSLE:
@@ -5268,6 +5334,7 @@ static bool may_access_skb(enum bpf_prog_type type)
static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
+ static const int ctx_reg = BPF_REG_6;
u8 mode = BPF_MODE(insn->code);
int i, err;
@@ -5301,7 +5368,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
}
/* check whether implicit source operand (register R6) is readable */
- err = check_reg_arg(env, BPF_REG_6, SRC_OP);
+ err = check_reg_arg(env, ctx_reg, SRC_OP);
if (err)
return err;
@@ -5320,7 +5387,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EINVAL;
}
- if (regs[BPF_REG_6].type != PTR_TO_CTX) {
+ if (regs[ctx_reg].type != PTR_TO_CTX) {
verbose(env,
"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
return -EINVAL;
@@ -5333,6 +5400,10 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return err;
}
+ err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg);
+ if (err < 0)
+ return err;
+
/* reset caller saved regs to unreadable */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, regs, caller_saved[i]);