Revert "bpf: Support new 32bit offset jmp instruction"
This reverts commit 2c795ce090
which is
commit 4cd58e9af8b9d9fff6b7145e742abbfcda0af4af upstream.
It breaks the Android kernel abi and can be brought back in the future
in an abi-safe way if it is really needed.
Bug: 161946584
Change-Id: Iac907693874b0a3ac47992214c19c41905562e86
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
parent
1699079678
commit
c7b298c1b5
@ -1625,24 +1625,16 @@ st: if (is_imm8(insn->off))
|
||||
break;
|
||||
|
||||
case BPF_JMP | BPF_JA:
|
||||
case BPF_JMP32 | BPF_JA:
|
||||
if (BPF_CLASS(insn->code) == BPF_JMP) {
|
||||
if (insn->off == -1)
|
||||
/* -1 jmp instructions will always jump
|
||||
* backwards two bytes. Explicitly handling
|
||||
* this case avoids wasting too many passes
|
||||
* when there are long sequences of replaced
|
||||
* dead code.
|
||||
*/
|
||||
jmp_offset = -2;
|
||||
else
|
||||
jmp_offset = addrs[i + insn->off] - addrs[i];
|
||||
} else {
|
||||
if (insn->imm == -1)
|
||||
jmp_offset = -2;
|
||||
else
|
||||
jmp_offset = addrs[i + insn->imm] - addrs[i];
|
||||
}
|
||||
if (insn->off == -1)
|
||||
/* -1 jmp instructions will always jump
|
||||
* backwards two bytes. Explicitly handling
|
||||
* this case avoids wasting too many passes
|
||||
* when there are long sequences of replaced
|
||||
* dead code.
|
||||
*/
|
||||
jmp_offset = -2;
|
||||
else
|
||||
jmp_offset = addrs[i + insn->off] - addrs[i];
|
||||
|
||||
if (!jmp_offset) {
|
||||
/*
|
||||
|
@ -367,12 +367,7 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
|
||||
{
|
||||
const s32 off_min = S16_MIN, off_max = S16_MAX;
|
||||
s32 delta = end_new - end_old;
|
||||
s32 off;
|
||||
|
||||
if (insn->code == (BPF_JMP32 | BPF_JA))
|
||||
off = insn->imm;
|
||||
else
|
||||
off = insn->off;
|
||||
s32 off = insn->off;
|
||||
|
||||
if (curr < pos && curr + off + 1 >= end_old)
|
||||
off += delta;
|
||||
@ -380,12 +375,8 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
|
||||
off -= delta;
|
||||
if (off < off_min || off > off_max)
|
||||
return -ERANGE;
|
||||
if (!probe_pass) {
|
||||
if (insn->code == (BPF_JMP32 | BPF_JA))
|
||||
insn->imm = off;
|
||||
else
|
||||
insn->off = off;
|
||||
}
|
||||
if (!probe_pass)
|
||||
insn->off = off;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1595,7 +1586,6 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
|
||||
INSN_3(JMP, JSLE, K), \
|
||||
INSN_3(JMP, JSET, K), \
|
||||
INSN_2(JMP, JA), \
|
||||
INSN_2(JMP32, JA), \
|
||||
/* Store instructions. */ \
|
||||
/* Register based. */ \
|
||||
INSN_3(STX, MEM, B), \
|
||||
@ -1872,9 +1862,6 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
|
||||
JMP_JA:
|
||||
insn += insn->off;
|
||||
CONT;
|
||||
JMP32_JA:
|
||||
insn += insn->imm;
|
||||
CONT;
|
||||
JMP_EXIT:
|
||||
return BPF_R0;
|
||||
/* JMP */
|
||||
|
@ -2254,10 +2254,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
|
||||
goto next;
|
||||
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
|
||||
goto next;
|
||||
if (code == (BPF_JMP32 | BPF_JA))
|
||||
off = i + insn[i].imm + 1;
|
||||
else
|
||||
off = i + insn[i].off + 1;
|
||||
off = i + insn[i].off + 1;
|
||||
if (off < subprog_start || off >= subprog_end) {
|
||||
verbose(env, "jump out of range from insn %d to %d\n", i, off);
|
||||
return -EINVAL;
|
||||
@ -2269,7 +2266,6 @@ static int check_subprogs(struct bpf_verifier_env *env)
|
||||
* or unconditional jump back
|
||||
*/
|
||||
if (code != (BPF_JMP | BPF_EXIT) &&
|
||||
code != (BPF_JMP32 | BPF_JA) &&
|
||||
code != (BPF_JMP | BPF_JA)) {
|
||||
verbose(env, "last insn is not an exit or jmp\n");
|
||||
return -EINVAL;
|
||||
@ -11137,7 +11133,7 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
|
||||
static int visit_insn(int t, struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
|
||||
int ret, off;
|
||||
int ret;
|
||||
|
||||
if (bpf_pseudo_func(insn))
|
||||
return visit_func_call_insn(t, insns, env, true);
|
||||
@ -11165,19 +11161,14 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
|
||||
if (BPF_SRC(insn->code) != BPF_K)
|
||||
return -EINVAL;
|
||||
|
||||
if (BPF_CLASS(insn->code) == BPF_JMP)
|
||||
off = insn->off;
|
||||
else
|
||||
off = insn->imm;
|
||||
|
||||
/* unconditional jump with single edge */
|
||||
ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
|
||||
ret = push_insn(t, t + insn->off + 1, FALLTHROUGH, env,
|
||||
true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mark_prune_point(env, t + off + 1);
|
||||
mark_jmp_point(env, t + off + 1);
|
||||
mark_prune_point(env, t + insn->off + 1);
|
||||
mark_jmp_point(env, t + insn->off + 1);
|
||||
|
||||
return ret;
|
||||
|
||||
@ -12713,18 +12704,15 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
return err;
|
||||
} else if (opcode == BPF_JA) {
|
||||
if (BPF_SRC(insn->code) != BPF_K ||
|
||||
insn->imm != 0 ||
|
||||
insn->src_reg != BPF_REG_0 ||
|
||||
insn->dst_reg != BPF_REG_0 ||
|
||||
(class == BPF_JMP && insn->imm != 0) ||
|
||||
(class == BPF_JMP32 && insn->off != 0)) {
|
||||
class == BPF_JMP32) {
|
||||
verbose(env, "BPF_JA uses reserved fields\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (class == BPF_JMP)
|
||||
env->insn_idx += insn->off + 1;
|
||||
else
|
||||
env->insn_idx += insn->imm + 1;
|
||||
env->insn_idx += insn->off + 1;
|
||||
continue;
|
||||
|
||||
} else if (opcode == BPF_EXIT) {
|
||||
@ -13550,13 +13538,13 @@ static bool insn_is_cond_jump(u8 code)
|
||||
{
|
||||
u8 op;
|
||||
|
||||
op = BPF_OP(code);
|
||||
if (BPF_CLASS(code) == BPF_JMP32)
|
||||
return op != BPF_JA;
|
||||
return true;
|
||||
|
||||
if (BPF_CLASS(code) != BPF_JMP)
|
||||
return false;
|
||||
|
||||
op = BPF_OP(code);
|
||||
return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user