Revert "bpf: clean up visit_insn()'s instruction processing"
This reverts commit b1c780ed3c
which is
commit 653ae3a874aca6764a4c1f5a8bf1b072ade0d6f4 upstream.
It breaks the Android kernel abi and can be brought back in the future
in an abi-safe way if it is really needed.
Bug: 161946584
Change-Id: I083d407a06bd85594d74aa486969115a74675e1f
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
parent
c7b298c1b5
commit
797e6a76ea
@ -11132,43 +11132,44 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
|
||||
*/
|
||||
static int visit_insn(int t, struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
|
||||
struct bpf_insn *insns = env->prog->insnsi;
|
||||
int ret;
|
||||
|
||||
if (bpf_pseudo_func(insn))
|
||||
if (bpf_pseudo_func(insns + t))
|
||||
return visit_func_call_insn(t, insns, env, true);
|
||||
|
||||
/* All non-branch instructions have a single fall-through edge. */
|
||||
if (BPF_CLASS(insn->code) != BPF_JMP &&
|
||||
BPF_CLASS(insn->code) != BPF_JMP32)
|
||||
if (BPF_CLASS(insns[t].code) != BPF_JMP &&
|
||||
BPF_CLASS(insns[t].code) != BPF_JMP32)
|
||||
return push_insn(t, t + 1, FALLTHROUGH, env, false);
|
||||
|
||||
switch (BPF_OP(insn->code)) {
|
||||
switch (BPF_OP(insns[t].code)) {
|
||||
case BPF_EXIT:
|
||||
return DONE_EXPLORING;
|
||||
|
||||
case BPF_CALL:
|
||||
if (insn->imm == BPF_FUNC_timer_set_callback)
|
||||
if (insns[t].imm == BPF_FUNC_timer_set_callback)
|
||||
/* Mark this call insn as a prune point to trigger
|
||||
* is_state_visited() check before call itself is
|
||||
* processed by __check_func_call(). Otherwise new
|
||||
* async state will be pushed for further exploration.
|
||||
*/
|
||||
mark_prune_point(env, t);
|
||||
return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL);
|
||||
return visit_func_call_insn(t, insns, env,
|
||||
insns[t].src_reg == BPF_PSEUDO_CALL);
|
||||
|
||||
case BPF_JA:
|
||||
if (BPF_SRC(insn->code) != BPF_K)
|
||||
if (BPF_SRC(insns[t].code) != BPF_K)
|
||||
return -EINVAL;
|
||||
|
||||
/* unconditional jump with single edge */
|
||||
ret = push_insn(t, t + insn->off + 1, FALLTHROUGH, env,
|
||||
ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
|
||||
true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mark_prune_point(env, t + insn->off + 1);
|
||||
mark_jmp_point(env, t + insn->off + 1);
|
||||
mark_prune_point(env, t + insns[t].off + 1);
|
||||
mark_jmp_point(env, t + insns[t].off + 1);
|
||||
|
||||
return ret;
|
||||
|
||||
@ -11180,7 +11181,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return push_insn(t, t + insn->off + 1, BRANCH, env, true);
|
||||
return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user