bpf: Cleanup check_refcount_ok
Discussion around a recently-submitted patch provided historical context for check_refcount_ok [0]. Specifically, the function and its helpers - may_be_acquire_function and arg_type_may_be_refcounted - predate the OBJ_RELEASE type flag and the addition of many more helpers with acquire/release semantics. The purpose of check_refcount_ok is to ensure: 1) Helper doesn't have multiple uses of return reg's ref_obj_id 2) Helper with release semantics only has one arg needing to be released, since that's tracked using meta->ref_obj_id With current verifier, it's safe to remove check_refcount_ok and its helpers. Since addition of OBJ_RELEASE type flag, case 2) has been handled by the arg_type_is_release check in check_func_arg. To ensure case 1) won't result in verifier silently prioritizing one use of ref_obj_id, this patch adds a helper_multiple_ref_obj_use check which fails loudly if a helper passes > 1 test for use of ref_obj_id. [0]: lore.kernel.org/bpf/20220713234529.4154673-1-davemarchevsky@fb.com Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com> Acked-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Joanne Koong <joannelkoong@gmail.com> Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20220808171559.3251090-1-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
6e116280b4
commit
b2d8ef19c6
@ -467,25 +467,11 @@ static bool type_is_rdonly_mem(u32 type)
|
||||
return type & MEM_RDONLY;
|
||||
}
|
||||
|
||||
static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
|
||||
{
|
||||
return type == ARG_PTR_TO_SOCK_COMMON;
|
||||
}
|
||||
|
||||
static bool type_may_be_null(u32 type)
|
||||
{
|
||||
return type & PTR_MAYBE_NULL;
|
||||
}
|
||||
|
||||
static bool may_be_acquire_function(enum bpf_func_id func_id)
|
||||
{
|
||||
return func_id == BPF_FUNC_sk_lookup_tcp ||
|
||||
func_id == BPF_FUNC_sk_lookup_udp ||
|
||||
func_id == BPF_FUNC_skc_lookup_tcp ||
|
||||
func_id == BPF_FUNC_map_lookup_elem ||
|
||||
func_id == BPF_FUNC_ringbuf_reserve;
|
||||
}
|
||||
|
||||
static bool is_acquire_function(enum bpf_func_id func_id,
|
||||
const struct bpf_map *map)
|
||||
{
|
||||
@ -518,6 +504,26 @@ static bool is_ptr_cast_function(enum bpf_func_id func_id)
|
||||
func_id == BPF_FUNC_skc_to_tcp_request_sock;
|
||||
}
|
||||
|
||||
static bool is_dynptr_acquire_function(enum bpf_func_id func_id)
|
||||
{
|
||||
return func_id == BPF_FUNC_dynptr_data;
|
||||
}
|
||||
|
||||
static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
|
||||
const struct bpf_map *map)
|
||||
{
|
||||
int ref_obj_uses = 0;
|
||||
|
||||
if (is_ptr_cast_function(func_id))
|
||||
ref_obj_uses++;
|
||||
if (is_acquire_function(func_id, map))
|
||||
ref_obj_uses++;
|
||||
if (is_dynptr_acquire_function(func_id))
|
||||
ref_obj_uses++;
|
||||
|
||||
return ref_obj_uses > 1;
|
||||
}
|
||||
|
||||
static bool is_cmpxchg_insn(const struct bpf_insn *insn)
|
||||
{
|
||||
return BPF_CLASS(insn->code) == BPF_STX &&
|
||||
@ -6453,33 +6459,6 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
|
||||
{
|
||||
int count = 0;
|
||||
|
||||
if (arg_type_may_be_refcounted(fn->arg1_type))
|
||||
count++;
|
||||
if (arg_type_may_be_refcounted(fn->arg2_type))
|
||||
count++;
|
||||
if (arg_type_may_be_refcounted(fn->arg3_type))
|
||||
count++;
|
||||
if (arg_type_may_be_refcounted(fn->arg4_type))
|
||||
count++;
|
||||
if (arg_type_may_be_refcounted(fn->arg5_type))
|
||||
count++;
|
||||
|
||||
/* A reference acquiring function cannot acquire
|
||||
* another refcounted ptr.
|
||||
*/
|
||||
if (may_be_acquire_function(func_id) && count)
|
||||
return false;
|
||||
|
||||
/* We only support one arg being unreferenced at the moment,
|
||||
* which is sufficient for the helper functions we have right now.
|
||||
*/
|
||||
return count <= 1;
|
||||
}
|
||||
|
||||
static bool check_btf_id_ok(const struct bpf_func_proto *fn)
|
||||
{
|
||||
int i;
|
||||
@ -6502,8 +6481,7 @@ static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
|
||||
{
|
||||
return check_raw_mode_ok(fn) &&
|
||||
check_arg_pair_ok(fn) &&
|
||||
check_btf_id_ok(fn) &&
|
||||
check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
|
||||
check_btf_id_ok(fn) ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
|
||||
@ -7473,6 +7451,12 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
if (type_may_be_null(regs[BPF_REG_0].type))
|
||||
regs[BPF_REG_0].id = ++env->id_gen;
|
||||
|
||||
if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) {
|
||||
verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n",
|
||||
func_id_name(func_id), func_id);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (is_ptr_cast_function(func_id)) {
|
||||
/* For release_reference() */
|
||||
regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
|
||||
@ -7485,10 +7469,10 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
regs[BPF_REG_0].id = id;
|
||||
/* For release_reference() */
|
||||
regs[BPF_REG_0].ref_obj_id = id;
|
||||
} else if (func_id == BPF_FUNC_dynptr_data) {
|
||||
} else if (is_dynptr_acquire_function(func_id)) {
|
||||
int dynptr_id = 0, i;
|
||||
|
||||
/* Find the id of the dynptr we're acquiring a reference to */
|
||||
/* Find the id of the dynptr we're tracking the reference of */
|
||||
for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
|
||||
if (arg_type_is_dynptr(fn->arg_type[i])) {
|
||||
if (dynptr_id) {
|
||||
|
Loading…
Reference in New Issue
Block a user