riscv, bpf: Implement more atomic operations for RV64
This patch implement more BPF atomic operations for RV64. The newly added operations are shown below: atomic[64]_[fetch_]add atomic[64]_[fetch_]and atomic[64]_[fetch_]or atomic[64]_xchg atomic[64]_cmpxchg Since riscv specification does not provide AMO instruction for CAS operation, we use lr/sc instruction for cmpxchg operation, and AMO instructions for the rest ops. Tests "test_bpf.ko" and "test_progs -t atomic" have passed, as well as "test_verifier" with no new failure cases. Signed-off-by: Pu Lehui <pulehui@huawei.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Song Liu <songliubraving@fb.com> Acked-by: Björn Töpel <bjorn@kernel.org> Link: https://lore.kernel.org/bpf/20220410101246.232875-1-pulehui@huawei.com
This commit is contained in:
parent
33fc250c3e
commit
dd642ccb45
@ -535,6 +535,43 @@ static inline u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
|
||||
return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f);
|
||||
}
|
||||
|
||||
static inline u32 rv_amoand_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
|
||||
{
|
||||
return rv_amo_insn(0xc, aq, rl, rs2, rs1, 2, rd, 0x2f);
|
||||
}
|
||||
|
||||
static inline u32 rv_amoor_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
|
||||
{
|
||||
return rv_amo_insn(0x8, aq, rl, rs2, rs1, 2, rd, 0x2f);
|
||||
}
|
||||
|
||||
static inline u32 rv_amoxor_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
|
||||
{
|
||||
return rv_amo_insn(0x4, aq, rl, rs2, rs1, 2, rd, 0x2f);
|
||||
}
|
||||
|
||||
static inline u32 rv_amoswap_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
|
||||
{
|
||||
return rv_amo_insn(0x1, aq, rl, rs2, rs1, 2, rd, 0x2f);
|
||||
}
|
||||
|
||||
static inline u32 rv_lr_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
|
||||
{
|
||||
return rv_amo_insn(0x2, aq, rl, rs2, rs1, 2, rd, 0x2f);
|
||||
}
|
||||
|
||||
static inline u32 rv_sc_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
|
||||
{
|
||||
return rv_amo_insn(0x3, aq, rl, rs2, rs1, 2, rd, 0x2f);
|
||||
}
|
||||
|
||||
static inline u32 rv_fence(u8 pred, u8 succ)
|
||||
{
|
||||
u16 imm11_0 = pred << 4 | succ;
|
||||
|
||||
return rv_i_insn(imm11_0, 0, 0, 0, 0xf);
|
||||
}
|
||||
|
||||
/* RVC instrutions. */
|
||||
|
||||
static inline u16 rvc_addi4spn(u8 rd, u32 imm10)
|
||||
@ -753,6 +790,36 @@ static inline u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
|
||||
return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f);
|
||||
}
|
||||
|
||||
static inline u32 rv_amoand_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
|
||||
{
|
||||
return rv_amo_insn(0xc, aq, rl, rs2, rs1, 3, rd, 0x2f);
|
||||
}
|
||||
|
||||
static inline u32 rv_amoor_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
|
||||
{
|
||||
return rv_amo_insn(0x8, aq, rl, rs2, rs1, 3, rd, 0x2f);
|
||||
}
|
||||
|
||||
static inline u32 rv_amoxor_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
|
||||
{
|
||||
return rv_amo_insn(0x4, aq, rl, rs2, rs1, 3, rd, 0x2f);
|
||||
}
|
||||
|
||||
static inline u32 rv_amoswap_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
|
||||
{
|
||||
return rv_amo_insn(0x1, aq, rl, rs2, rs1, 3, rd, 0x2f);
|
||||
}
|
||||
|
||||
static inline u32 rv_lr_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
|
||||
{
|
||||
return rv_amo_insn(0x2, aq, rl, rs2, rs1, 3, rd, 0x2f);
|
||||
}
|
||||
|
||||
static inline u32 rv_sc_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
|
||||
{
|
||||
return rv_amo_insn(0x3, aq, rl, rs2, rs1, 3, rd, 0x2f);
|
||||
}
|
||||
|
||||
/* RV64-only RVC instructions. */
|
||||
|
||||
static inline u16 rvc_ld(u8 rd, u32 imm8, u8 rs1)
|
||||
|
@ -455,6 +455,90 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
|
||||
struct rv_jit_context *ctx)
|
||||
{
|
||||
u8 r0;
|
||||
int jmp_offset;
|
||||
|
||||
if (off) {
|
||||
if (is_12b_int(off)) {
|
||||
emit_addi(RV_REG_T1, rd, off, ctx);
|
||||
} else {
|
||||
emit_imm(RV_REG_T1, off, ctx);
|
||||
emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
|
||||
}
|
||||
rd = RV_REG_T1;
|
||||
}
|
||||
|
||||
switch (imm) {
|
||||
/* lock *(u32/u64 *)(dst_reg + off16) <op>= src_reg */
|
||||
case BPF_ADD:
|
||||
emit(is64 ? rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0) :
|
||||
rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
|
||||
break;
|
||||
case BPF_AND:
|
||||
emit(is64 ? rv_amoand_d(RV_REG_ZERO, rs, rd, 0, 0) :
|
||||
rv_amoand_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
|
||||
break;
|
||||
case BPF_OR:
|
||||
emit(is64 ? rv_amoor_d(RV_REG_ZERO, rs, rd, 0, 0) :
|
||||
rv_amoor_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
|
||||
break;
|
||||
case BPF_XOR:
|
||||
emit(is64 ? rv_amoxor_d(RV_REG_ZERO, rs, rd, 0, 0) :
|
||||
rv_amoxor_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
|
||||
break;
|
||||
/* src_reg = atomic_fetch_<op>(dst_reg + off16, src_reg) */
|
||||
case BPF_ADD | BPF_FETCH:
|
||||
emit(is64 ? rv_amoadd_d(rs, rs, rd, 0, 0) :
|
||||
rv_amoadd_w(rs, rs, rd, 0, 0), ctx);
|
||||
if (!is64)
|
||||
emit_zext_32(rs, ctx);
|
||||
break;
|
||||
case BPF_AND | BPF_FETCH:
|
||||
emit(is64 ? rv_amoand_d(rs, rs, rd, 0, 0) :
|
||||
rv_amoand_w(rs, rs, rd, 0, 0), ctx);
|
||||
if (!is64)
|
||||
emit_zext_32(rs, ctx);
|
||||
break;
|
||||
case BPF_OR | BPF_FETCH:
|
||||
emit(is64 ? rv_amoor_d(rs, rs, rd, 0, 0) :
|
||||
rv_amoor_w(rs, rs, rd, 0, 0), ctx);
|
||||
if (!is64)
|
||||
emit_zext_32(rs, ctx);
|
||||
break;
|
||||
case BPF_XOR | BPF_FETCH:
|
||||
emit(is64 ? rv_amoxor_d(rs, rs, rd, 0, 0) :
|
||||
rv_amoxor_w(rs, rs, rd, 0, 0), ctx);
|
||||
if (!is64)
|
||||
emit_zext_32(rs, ctx);
|
||||
break;
|
||||
/* src_reg = atomic_xchg(dst_reg + off16, src_reg); */
|
||||
case BPF_XCHG:
|
||||
emit(is64 ? rv_amoswap_d(rs, rs, rd, 0, 0) :
|
||||
rv_amoswap_w(rs, rs, rd, 0, 0), ctx);
|
||||
if (!is64)
|
||||
emit_zext_32(rs, ctx);
|
||||
break;
|
||||
/* r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg); */
|
||||
case BPF_CMPXCHG:
|
||||
r0 = bpf_to_rv_reg(BPF_REG_0, ctx);
|
||||
emit(is64 ? rv_addi(RV_REG_T2, r0, 0) :
|
||||
rv_addiw(RV_REG_T2, r0, 0), ctx);
|
||||
emit(is64 ? rv_lr_d(r0, 0, rd, 0, 0) :
|
||||
rv_lr_w(r0, 0, rd, 0, 0), ctx);
|
||||
jmp_offset = ninsns_rvoff(8);
|
||||
emit(rv_bne(RV_REG_T2, r0, jmp_offset >> 1), ctx);
|
||||
emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 0) :
|
||||
rv_sc_w(RV_REG_T3, rs, rd, 0, 0), ctx);
|
||||
jmp_offset = ninsns_rvoff(-6);
|
||||
emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx);
|
||||
emit(rv_fence(0x3, 0x3), ctx);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
|
||||
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
|
||||
|
||||
@ -1146,30 +1230,8 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
||||
break;
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_DW:
|
||||
if (insn->imm != BPF_ADD) {
|
||||
pr_err("bpf-jit: not supported: atomic operation %02x ***\n",
|
||||
insn->imm);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* atomic_add: lock *(u32 *)(dst + off) += src
|
||||
* atomic_add: lock *(u64 *)(dst + off) += src
|
||||
*/
|
||||
|
||||
if (off) {
|
||||
if (is_12b_int(off)) {
|
||||
emit_addi(RV_REG_T1, rd, off, ctx);
|
||||
} else {
|
||||
emit_imm(RV_REG_T1, off, ctx);
|
||||
emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
|
||||
}
|
||||
|
||||
rd = RV_REG_T1;
|
||||
}
|
||||
|
||||
emit(BPF_SIZE(code) == BPF_W ?
|
||||
rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0) :
|
||||
rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0), ctx);
|
||||
emit_atomic(rd, rs, off, imm,
|
||||
BPF_SIZE(code) == BPF_DW, ctx);
|
||||
break;
|
||||
default:
|
||||
pr_err("bpf-jit: unknown opcode %02x\n", code);
|
||||
|
Loading…
Reference in New Issue
Block a user