* [PATCH bpf-next v3 01/17] bpf: Support new sign-extension load insns
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
@ 2023-07-20 0:01 ` Yonghong Song
2023-07-20 20:33 ` Eduard Zingerman
2023-07-20 0:01 ` [PATCH bpf-next v3 02/17] bpf: Support new sign-extension mov insns Yonghong Song
` (16 subsequent siblings)
17 siblings, 1 reply; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:01 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Add interpreter/jit support for new sign-extension load insns
which adds a new mode (BPF_MEMSX).
Also add verifier support to recognize these insns and to
do proper verification with new insns. In verifier, besides
to deduce proper bounds for the dst_reg, probed memory access
is also properly handled.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
arch/x86/net/bpf_jit_comp.c | 42 ++++++++-
include/linux/filter.h | 3 +
include/uapi/linux/bpf.h | 1 +
kernel/bpf/core.c | 21 +++++
kernel/bpf/verifier.c | 150 +++++++++++++++++++++++++++------
tools/include/uapi/linux/bpf.h | 1 +
6 files changed, 191 insertions(+), 27 deletions(-)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 83c4b45dc65f..54478a9c93e1 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -779,6 +779,29 @@ static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
*pprog = prog;
}
+/* LDSX: dst_reg = *(s8*)(src_reg + off) */
+static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
+{
+ u8 *prog = *pprog;
+
+ switch (size) {
+ case BPF_B:
+ /* Emit 'movsx rax, byte ptr [rax + off]' */
+ EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
+ break;
+ case BPF_H:
+ /* Emit 'movsx rax, word ptr [rax + off]' */
+ EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
+ break;
+ case BPF_W:
+ /* Emit 'movsx rax, dword ptr [rax+0x14]' */
+ EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
+ break;
+ }
+ emit_insn_suffix(&prog, src_reg, dst_reg, off);
+ *pprog = prog;
+}
+
/* STX: *(u8*)(dst_reg + off) = src_reg */
static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
@@ -1370,9 +1393,17 @@ st: if (is_imm8(insn->off))
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW:
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+ /* LDXS: dst_reg = *(s8*)(src_reg + off) */
+ case BPF_LDX | BPF_MEMSX | BPF_B:
+ case BPF_LDX | BPF_MEMSX | BPF_H:
+ case BPF_LDX | BPF_MEMSX | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
insn_off = insn->off;
- if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
+ BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
/* Conservatively check that src_reg + insn->off is a kernel address:
* src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
* src_reg is used as scratch for src_reg += insn->off and restored
@@ -1415,8 +1446,13 @@ st: if (is_imm8(insn->off))
start_of_ldx = prog;
end_of_jmp[-1] = start_of_ldx - end_of_jmp;
}
- emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
- if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
+ BPF_MODE(insn->code) == BPF_MEMSX)
+ emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
+ else
+ emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
+ BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
struct exception_table_entry *ex;
u8 *_insn = image + proglen + (start_of_ldx - temp);
s64 delta;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index f69114083ec7..a93242b5516b 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -69,6 +69,9 @@ struct ctl_table_header;
/* unused opcode to mark special load instruction. Same as BPF_ABS */
#define BPF_PROBE_MEM 0x20
+/* unused opcode to mark special ldsx instruction. Same as BPF_IND */
+#define BPF_PROBE_MEMSX 0x40
+
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 739c15906a65..651a34511780 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -19,6 +19,7 @@
/* ld/ldx fields */
#define BPF_DW 0x18 /* double word (64-bit) */
+#define BPF_MEMSX 0x80 /* load with sign extension */
#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
#define BPF_XADD 0xc0 /* exclusive add - legacy name */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index dc85240a0134..01b72fc001f6 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1610,6 +1610,9 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
INSN_3(LDX, MEM, H), \
INSN_3(LDX, MEM, W), \
INSN_3(LDX, MEM, DW), \
+ INSN_3(LDX, MEMSX, B), \
+ INSN_3(LDX, MEMSX, H), \
+ INSN_3(LDX, MEMSX, W), \
/* Immediate based. */ \
INSN_3(LD, IMM, DW)
@@ -1666,6 +1669,9 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
+ [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
+ [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
+ [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
};
#undef BPF_INSN_3_LBL
#undef BPF_INSN_2_LBL
@@ -1942,6 +1948,21 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
LDST(DW, u64)
#undef LDST
+#define LDSX(SIZEOP, SIZE) \
+ LDX_MEMSX_##SIZEOP: \
+ DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
+ CONT; \
+ LDX_PROBE_MEMSX_##SIZEOP: \
+ bpf_probe_read_kernel(&DST, sizeof(SIZE), \
+ (const void *)(long) (SRC + insn->off)); \
+ DST = *((SIZE *)&DST); \
+ CONT;
+
+ LDSX(B, s8)
+ LDSX(H, s16)
+ LDSX(W, s32)
+#undef LDSX
+
#define ATOMIC_ALU_OP(BOP, KOP) \
case BOP: \
if (BPF_SIZE(insn->code) == BPF_W) \
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 803b91135ca0..79c0cd50ec59 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5809,6 +5809,94 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
__reg_combine_64_into_32(reg);
}
+static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
+{
+ if (size == 1) {
+ reg->smin_value = reg->s32_min_value = S8_MIN;
+ reg->smax_value = reg->s32_max_value = S8_MAX;
+ } else if (size == 2) {
+ reg->smin_value = reg->s32_min_value = S16_MIN;
+ reg->smax_value = reg->s32_max_value = S16_MAX;
+ } else {
+ /* size == 4 */
+ reg->smin_value = reg->s32_min_value = S32_MIN;
+ reg->smax_value = reg->s32_max_value = S32_MAX;
+ }
+ reg->umin_value = reg->u32_min_value = 0;
+ reg->umax_value = U64_MAX;
+ reg->u32_max_value = U32_MAX;
+ reg->var_off = tnum_unknown;
+}
+
+static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
+{
+ s64 init_s64_max, init_s64_min, s64_max, s64_min, u64_cval;
+ u64 top_smax_value, top_smin_value;
+ u64 num_bits = size * 8;
+
+ if (tnum_is_const(reg->var_off)) {
+ u64_cval = reg->var_off.value;
+ if (size == 1)
+ reg->var_off = tnum_const((s8)u64_cval);
+ else if (size == 2)
+ reg->var_off = tnum_const((s16)u64_cval);
+ else
+ /* size == 4 */
+ reg->var_off = tnum_const((s32)u64_cval);
+
+ u64_cval = reg->var_off.value;
+ reg->smax_value = reg->smin_value = u64_cval;
+ reg->umax_value = reg->umin_value = u64_cval;
+ reg->s32_max_value = reg->s32_min_value = u64_cval;
+ reg->u32_max_value = reg->u32_min_value = u64_cval;
+ return;
+ }
+
+ top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits;
+ top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits;
+
+ if (top_smax_value != top_smin_value)
+ goto out;
+
+ /* find the s64_min and s64_min after sign extension */
+ if (size == 1) {
+ init_s64_max = (s8)reg->smax_value;
+ init_s64_min = (s8)reg->smin_value;
+ } else if (size == 2) {
+ init_s64_max = (s16)reg->smax_value;
+ init_s64_min = (s16)reg->smin_value;
+ } else {
+ init_s64_max = (s32)reg->smax_value;
+ init_s64_min = (s32)reg->smin_value;
+ }
+
+ s64_max = max(init_s64_max, init_s64_min);
+ s64_min = min(init_s64_max, init_s64_min);
+
+ if (s64_max >= 0 && s64_min >= 0) {
+ reg->smin_value = reg->s32_min_value = s64_min;
+ reg->smax_value = reg->s32_max_value = s64_max;
+ reg->umin_value = reg->u32_min_value = s64_min;
+ reg->umax_value = reg->u32_max_value = s64_max;
+ reg->var_off = tnum_range(s64_min, s64_max);
+ return;
+ }
+
+ if (s64_min < 0 && s64_max < 0) {
+ reg->smin_value = reg->s32_min_value = s64_min;
+ reg->smax_value = reg->s32_max_value = s64_max;
+ reg->umin_value = (u64)s64_min;
+ reg->umax_value = (u64)s64_max;
+ reg->u32_min_value = (u32)s64_min;
+ reg->u32_max_value = (u32)s64_max;
+ reg->var_off = tnum_range((u64)s64_min, (u64)s64_max);
+ return;
+ }
+
+out:
+ set_sext64_default_val(reg, size);
+}
+
static bool bpf_map_is_rdonly(const struct bpf_map *map)
{
/* A map is considered read-only if the following condition are true:
@@ -5829,7 +5917,8 @@ static bool bpf_map_is_rdonly(const struct bpf_map *map)
!bpf_map_write_active(map);
}
-static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
+static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
+ bool is_ldsx)
{
void *ptr;
u64 addr;
@@ -5842,13 +5931,13 @@ static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
switch (size) {
case sizeof(u8):
- *val = (u64)*(u8 *)ptr;
+ *val = is_ldsx ? (s64)*(s8 *)ptr : (u64)*(u8 *)ptr;
break;
case sizeof(u16):
- *val = (u64)*(u16 *)ptr;
+ *val = is_ldsx ? (s64)*(s16 *)ptr : (u64)*(u16 *)ptr;
break;
case sizeof(u32):
- *val = (u64)*(u32 *)ptr;
+ *val = is_ldsx ? (s64)*(s32 *)ptr : (u64)*(u32 *)ptr;
break;
case sizeof(u64):
*val = *(u64 *)ptr;
@@ -6267,7 +6356,7 @@ static int check_stack_access_within_bounds(
*/
static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
int off, int bpf_size, enum bpf_access_type t,
- int value_regno, bool strict_alignment_once)
+ int value_regno, bool strict_alignment_once, bool is_ldsx)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno;
@@ -6328,7 +6417,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
u64 val = 0;
err = bpf_map_direct_read(map, map_off, size,
- &val);
+ &val, is_ldsx);
if (err)
return err;
@@ -6498,8 +6587,11 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
regs[value_regno].type == SCALAR_VALUE) {
- /* b/h/w load zero-extends, mark upper bits as known 0 */
- coerce_reg_to_size(®s[value_regno], size);
+ if (!is_ldsx)
+ /* b/h/w load zero-extends, mark upper bits as known 0 */
+ coerce_reg_to_size(®s[value_regno], size);
+ else
+ coerce_reg_to_size_sx(®s[value_regno], size);
}
return err;
}
@@ -6591,17 +6683,17 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
* case to simulate the register fill.
*/
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
- BPF_SIZE(insn->code), BPF_READ, -1, true);
+ BPF_SIZE(insn->code), BPF_READ, -1, true, false);
if (!err && load_reg >= 0)
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, load_reg,
- true);
+ true, false);
if (err)
return err;
/* Check whether we can write into the same memory. */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
- BPF_SIZE(insn->code), BPF_WRITE, -1, true);
+ BPF_SIZE(insn->code), BPF_WRITE, -1, true, false);
if (err)
return err;
@@ -6847,7 +6939,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
return zero_size_allowed ? 0 : -EACCES;
return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
- atype, -1, false);
+ atype, -1, false, false);
}
fallthrough;
@@ -7219,7 +7311,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn
/* we write BPF_DW bits (8 bytes) at a time */
for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
err = check_mem_access(env, insn_idx, regno,
- i, BPF_DW, BPF_WRITE, -1, false);
+ i, BPF_DW, BPF_WRITE, -1, false, false);
if (err)
return err;
}
@@ -7312,7 +7404,7 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id
for (i = 0; i < nr_slots * 8; i += BPF_REG_SIZE) {
err = check_mem_access(env, insn_idx, regno,
- i, BPF_DW, BPF_WRITE, -1, false);
+ i, BPF_DW, BPF_WRITE, -1, false, false);
if (err)
return err;
}
@@ -9456,7 +9548,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
*/
for (i = 0; i < meta.access_size; i++) {
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
- BPF_WRITE, -1, false);
+ BPF_WRITE, -1, false, false);
if (err)
return err;
}
@@ -16184,7 +16276,7 @@ static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type typ
* Have to support a use case when one path through
* the program yields TRUSTED pointer while another
* is UNTRUSTED. Fallback to UNTRUSTED to generate
- * BPF_PROBE_MEM.
+ * BPF_PROBE_MEM/BPF_PROBE_MEMSX.
*/
*prev_type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
} else {
@@ -16325,7 +16417,8 @@ static int do_check(struct bpf_verifier_env *env)
*/
err = check_mem_access(env, env->insn_idx, insn->src_reg,
insn->off, BPF_SIZE(insn->code),
- BPF_READ, insn->dst_reg, false);
+ BPF_READ, insn->dst_reg, false,
+ BPF_MODE(insn->code) == BPF_MEMSX);
if (err)
return err;
@@ -16362,7 +16455,7 @@ static int do_check(struct bpf_verifier_env *env)
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, env->insn_idx, insn->dst_reg,
insn->off, BPF_SIZE(insn->code),
- BPF_WRITE, insn->src_reg, false);
+ BPF_WRITE, insn->src_reg, false, false);
if (err)
return err;
@@ -16387,7 +16480,7 @@ static int do_check(struct bpf_verifier_env *env)
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, env->insn_idx, insn->dst_reg,
insn->off, BPF_SIZE(insn->code),
- BPF_WRITE, -1, false);
+ BPF_WRITE, -1, false, false);
if (err)
return err;
@@ -16815,7 +16908,8 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
- (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
+ ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) ||
+ insn->imm != 0)) {
verbose(env, "BPF_LDX uses reserved fields\n");
return -EINVAL;
}
@@ -17513,7 +17607,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
- insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
+ insn->code == (BPF_LDX | BPF_MEM | BPF_DW) ||
+ insn->code == (BPF_LDX | BPF_MEMSX | BPF_B) ||
+ insn->code == (BPF_LDX | BPF_MEMSX | BPF_H) ||
+ insn->code == (BPF_LDX | BPF_MEMSX | BPF_W)) {
type = BPF_READ;
} else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
@@ -17572,8 +17669,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
*/
case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED:
if (type == BPF_READ) {
- insn->code = BPF_LDX | BPF_PROBE_MEM |
- BPF_SIZE((insn)->code);
+ if (BPF_MODE(insn->code) == BPF_MEM)
+ insn->code = BPF_LDX | BPF_PROBE_MEM |
+ BPF_SIZE((insn)->code);
+ else
+ insn->code = BPF_LDX | BPF_PROBE_MEMSX |
+ BPF_SIZE((insn)->code);
env->prog->aux->num_exentries++;
}
continue;
@@ -17761,7 +17862,8 @@ static int jit_subprogs(struct bpf_verifier_env *env)
insn = func[i]->insnsi;
for (j = 0; j < func[i]->len; j++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
- BPF_MODE(insn->code) == BPF_PROBE_MEM)
+ (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
+ BPF_MODE(insn->code) == BPF_PROBE_MEMSX))
num_exentries++;
}
func[i]->aux->num_exentries = num_exentries;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 739c15906a65..651a34511780 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -19,6 +19,7 @@
/* ld/ldx fields */
#define BPF_DW 0x18 /* double word (64-bit) */
+#define BPF_MEMSX 0x80 /* load with sign extension */
#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
#define BPF_XADD 0xc0 /* exclusive add - legacy name */
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH bpf-next v3 01/17] bpf: Support new sign-extension load insns
2023-07-20 0:01 ` [PATCH bpf-next v3 01/17] bpf: Support new sign-extension load insns Yonghong Song
@ 2023-07-20 20:33 ` Eduard Zingerman
2023-07-24 4:03 ` [Bpf] " Yonghong Song
0 siblings, 1 reply; 30+ messages in thread
From: Eduard Zingerman @ 2023-07-20 20:33 UTC (permalink / raw)
To: Yonghong Song, bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
On Wed, 2023-07-19 at 17:01 -0700, Yonghong Song wrote:
> Add interpreter/jit support for new sign-extension load insns
> which adds a new mode (BPF_MEMSX).
> Also add verifier support to recognize these insns and to
> do proper verification with new insns. In verifier, besides
> to deduce proper bounds for the dst_reg, probed memory access
> is also properly handled.
>
> Signed-off-by: Yonghong Song <yhs@fb.com>
> ---
> arch/x86/net/bpf_jit_comp.c | 42 ++++++++-
> include/linux/filter.h | 3 +
> include/uapi/linux/bpf.h | 1 +
> kernel/bpf/core.c | 21 +++++
> kernel/bpf/verifier.c | 150 +++++++++++++++++++++++++++------
> tools/include/uapi/linux/bpf.h | 1 +
> 6 files changed, 191 insertions(+), 27 deletions(-)
>
> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
> index 83c4b45dc65f..54478a9c93e1 100644
> --- a/arch/x86/net/bpf_jit_comp.c
> +++ b/arch/x86/net/bpf_jit_comp.c
> @@ -779,6 +779,29 @@ static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
> *pprog = prog;
> }
>
> +/* LDSX: dst_reg = *(s8*)(src_reg + off) */
> +static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
> +{
> + u8 *prog = *pprog;
> +
> + switch (size) {
> + case BPF_B:
> + /* Emit 'movsx rax, byte ptr [rax + off]' */
> + EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
> + break;
> + case BPF_H:
> + /* Emit 'movsx rax, word ptr [rax + off]' */
> + EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
> + break;
> + case BPF_W:
> + /* Emit 'movsx rax, dword ptr [rax+0x14]' */
> + EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
> + break;
> + }
> + emit_insn_suffix(&prog, src_reg, dst_reg, off);
> + *pprog = prog;
> +}
> +
> /* STX: *(u8*)(dst_reg + off) = src_reg */
> static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
> {
> @@ -1370,9 +1393,17 @@ st: if (is_imm8(insn->off))
> case BPF_LDX | BPF_PROBE_MEM | BPF_W:
> case BPF_LDX | BPF_MEM | BPF_DW:
> case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
> + /* LDXS: dst_reg = *(s8*)(src_reg + off) */
> + case BPF_LDX | BPF_MEMSX | BPF_B:
> + case BPF_LDX | BPF_MEMSX | BPF_H:
> + case BPF_LDX | BPF_MEMSX | BPF_W:
> + case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
> + case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
> + case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
> insn_off = insn->off;
>
> - if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
> + if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
> + BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
> /* Conservatively check that src_reg + insn->off is a kernel address:
> * src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
> * src_reg is used as scratch for src_reg += insn->off and restored
> @@ -1415,8 +1446,13 @@ st: if (is_imm8(insn->off))
> start_of_ldx = prog;
> end_of_jmp[-1] = start_of_ldx - end_of_jmp;
> }
> - emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
> - if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
> + if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
> + BPF_MODE(insn->code) == BPF_MEMSX)
> + emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
> + else
> + emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
> + if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
> + BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
> struct exception_table_entry *ex;
> u8 *_insn = image + proglen + (start_of_ldx - temp);
> s64 delta;
> diff --git a/include/linux/filter.h b/include/linux/filter.h
> index f69114083ec7..a93242b5516b 100644
> --- a/include/linux/filter.h
> +++ b/include/linux/filter.h
> @@ -69,6 +69,9 @@ struct ctl_table_header;
> /* unused opcode to mark special load instruction. Same as BPF_ABS */
> #define BPF_PROBE_MEM 0x20
>
> +/* unused opcode to mark special ldsx instruction. Same as BPF_IND */
> +#define BPF_PROBE_MEMSX 0x40
> +
> /* unused opcode to mark call to interpreter with arguments */
> #define BPF_CALL_ARGS 0xe0
>
> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> index 739c15906a65..651a34511780 100644
> --- a/include/uapi/linux/bpf.h
> +++ b/include/uapi/linux/bpf.h
> @@ -19,6 +19,7 @@
>
> /* ld/ldx fields */
> #define BPF_DW 0x18 /* double word (64-bit) */
> +#define BPF_MEMSX 0x80 /* load with sign extension */
> #define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
> #define BPF_XADD 0xc0 /* exclusive add - legacy name */
>
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index dc85240a0134..01b72fc001f6 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -1610,6 +1610,9 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
> INSN_3(LDX, MEM, H), \
> INSN_3(LDX, MEM, W), \
> INSN_3(LDX, MEM, DW), \
> + INSN_3(LDX, MEMSX, B), \
> + INSN_3(LDX, MEMSX, H), \
> + INSN_3(LDX, MEMSX, W), \
> /* Immediate based. */ \
> INSN_3(LD, IMM, DW)
>
> @@ -1666,6 +1669,9 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
> [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
> [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
> [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
> + [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
> + [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
> + [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
> };
> #undef BPF_INSN_3_LBL
> #undef BPF_INSN_2_LBL
> @@ -1942,6 +1948,21 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
> LDST(DW, u64)
> #undef LDST
>
> +#define LDSX(SIZEOP, SIZE) \
> + LDX_MEMSX_##SIZEOP: \
> + DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
> + CONT; \
> + LDX_PROBE_MEMSX_##SIZEOP: \
> + bpf_probe_read_kernel(&DST, sizeof(SIZE), \
> + (const void *)(long) (SRC + insn->off)); \
> + DST = *((SIZE *)&DST); \
> + CONT;
> +
> + LDSX(B, s8)
> + LDSX(H, s16)
> + LDSX(W, s32)
> +#undef LDSX
> +
> #define ATOMIC_ALU_OP(BOP, KOP) \
> case BOP: \
> if (BPF_SIZE(insn->code) == BPF_W) \
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 803b91135ca0..79c0cd50ec59 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -5809,6 +5809,94 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
> __reg_combine_64_into_32(reg);
> }
>
> +static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
> +{
> + if (size == 1) {
> + reg->smin_value = reg->s32_min_value = S8_MIN;
> + reg->smax_value = reg->s32_max_value = S8_MAX;
> + } else if (size == 2) {
> + reg->smin_value = reg->s32_min_value = S16_MIN;
> + reg->smax_value = reg->s32_max_value = S16_MAX;
> + } else {
> + /* size == 4 */
> + reg->smin_value = reg->s32_min_value = S32_MIN;
> + reg->smax_value = reg->s32_max_value = S32_MAX;
> + }
> + reg->umin_value = reg->u32_min_value = 0;
> + reg->umax_value = U64_MAX;
> + reg->u32_max_value = U32_MAX;
> + reg->var_off = tnum_unknown;
> +}
> +
> +static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
> +{
> + s64 init_s64_max, init_s64_min, s64_max, s64_min, u64_cval;
> + u64 top_smax_value, top_smin_value;
> + u64 num_bits = size * 8;
> +
> + if (tnum_is_const(reg->var_off)) {
> + u64_cval = reg->var_off.value;
> + if (size == 1)
> + reg->var_off = tnum_const((s8)u64_cval);
> + else if (size == 2)
> + reg->var_off = tnum_const((s16)u64_cval);
> + else
> + /* size == 4 */
> + reg->var_off = tnum_const((s32)u64_cval);
> +
> + u64_cval = reg->var_off.value;
> + reg->smax_value = reg->smin_value = u64_cval;
> + reg->umax_value = reg->umin_value = u64_cval;
> + reg->s32_max_value = reg->s32_min_value = u64_cval;
> + reg->u32_max_value = reg->u32_min_value = u64_cval;
> + return;
> + }
> +
> + top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits;
> + top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits;
> +
> + if (top_smax_value != top_smin_value)
> + goto out;
> +
> + /* find the s64_min and s64_min after sign extension */
> + if (size == 1) {
> + init_s64_max = (s8)reg->smax_value;
> + init_s64_min = (s8)reg->smin_value;
> + } else if (size == 2) {
> + init_s64_max = (s16)reg->smax_value;
> + init_s64_min = (s16)reg->smin_value;
> + } else {
> + init_s64_max = (s32)reg->smax_value;
> + init_s64_min = (s32)reg->smin_value;
> + }
> +
> + s64_max = max(init_s64_max, init_s64_min);
> + s64_min = min(init_s64_max, init_s64_min);
> +
> + if (s64_max >= 0 && s64_min >= 0) {
> + reg->smin_value = reg->s32_min_value = s64_min;
> + reg->smax_value = reg->s32_max_value = s64_max;
> + reg->umin_value = reg->u32_min_value = s64_min;
> + reg->umax_value = reg->u32_max_value = s64_max;
> + reg->var_off = tnum_range(s64_min, s64_max);
> + return;
> + }
> +
> + if (s64_min < 0 && s64_max < 0) {
> + reg->smin_value = reg->s32_min_value = s64_min;
> + reg->smax_value = reg->s32_max_value = s64_max;
> + reg->umin_value = (u64)s64_min;
> + reg->umax_value = (u64)s64_max;
> + reg->u32_min_value = (u32)s64_min;
> + reg->u32_max_value = (u32)s64_max;
> + reg->var_off = tnum_range((u64)s64_min, (u64)s64_max);
> + return;
> + }
I think that the bodies for (s64_max >= 0 && s64_min >= 0)
and (s64_min < 0 && s64_max < 0) are now identical.
> +
> +out:
> + set_sext64_default_val(reg, size);
> +}
> +
> static bool bpf_map_is_rdonly(const struct bpf_map *map)
> {
> /* A map is considered read-only if the following condition are true:
> @@ -5829,7 +5917,8 @@ static bool bpf_map_is_rdonly(const struct bpf_map *map)
> !bpf_map_write_active(map);
> }
>
> -static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
> +static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
> + bool is_ldsx)
> {
> void *ptr;
> u64 addr;
> @@ -5842,13 +5931,13 @@ static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
>
> switch (size) {
> case sizeof(u8):
> - *val = (u64)*(u8 *)ptr;
> + *val = is_ldsx ? (s64)*(s8 *)ptr : (u64)*(u8 *)ptr;
> break;
> case sizeof(u16):
> - *val = (u64)*(u16 *)ptr;
> + *val = is_ldsx ? (s64)*(s16 *)ptr : (u64)*(u16 *)ptr;
> break;
> case sizeof(u32):
> - *val = (u64)*(u32 *)ptr;
> + *val = is_ldsx ? (s64)*(s32 *)ptr : (u64)*(u32 *)ptr;
> break;
> case sizeof(u64):
> *val = *(u64 *)ptr;
> @@ -6267,7 +6356,7 @@ static int check_stack_access_within_bounds(
> */
> static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
> int off, int bpf_size, enum bpf_access_type t,
> - int value_regno, bool strict_alignment_once)
> + int value_regno, bool strict_alignment_once, bool is_ldsx)
> {
> struct bpf_reg_state *regs = cur_regs(env);
> struct bpf_reg_state *reg = regs + regno;
> @@ -6328,7 +6417,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
> u64 val = 0;
>
> err = bpf_map_direct_read(map, map_off, size,
> - &val);
> + &val, is_ldsx);
> if (err)
> return err;
>
> @@ -6498,8 +6587,11 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
>
> if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
> regs[value_regno].type == SCALAR_VALUE) {
> - /* b/h/w load zero-extends, mark upper bits as known 0 */
> - coerce_reg_to_size(®s[value_regno], size);
> + if (!is_ldsx)
> + /* b/h/w load zero-extends, mark upper bits as known 0 */
> + coerce_reg_to_size(®s[value_regno], size);
> + else
> + coerce_reg_to_size_sx(®s[value_regno], size);
> }
> return err;
> }
> @@ -6591,17 +6683,17 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
> * case to simulate the register fill.
> */
> err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
> - BPF_SIZE(insn->code), BPF_READ, -1, true);
> + BPF_SIZE(insn->code), BPF_READ, -1, true, false);
> if (!err && load_reg >= 0)
> err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
> BPF_SIZE(insn->code), BPF_READ, load_reg,
> - true);
> + true, false);
> if (err)
> return err;
>
> /* Check whether we can write into the same memory. */
> err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
> - BPF_SIZE(insn->code), BPF_WRITE, -1, true);
> + BPF_SIZE(insn->code), BPF_WRITE, -1, true, false);
> if (err)
> return err;
>
> @@ -6847,7 +6939,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
> return zero_size_allowed ? 0 : -EACCES;
>
> return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
> - atype, -1, false);
> + atype, -1, false, false);
> }
>
> fallthrough;
> @@ -7219,7 +7311,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn
> /* we write BPF_DW bits (8 bytes) at a time */
> for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
> err = check_mem_access(env, insn_idx, regno,
> - i, BPF_DW, BPF_WRITE, -1, false);
> + i, BPF_DW, BPF_WRITE, -1, false, false);
> if (err)
> return err;
> }
> @@ -7312,7 +7404,7 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id
>
> for (i = 0; i < nr_slots * 8; i += BPF_REG_SIZE) {
> err = check_mem_access(env, insn_idx, regno,
> - i, BPF_DW, BPF_WRITE, -1, false);
> + i, BPF_DW, BPF_WRITE, -1, false, false);
> if (err)
> return err;
> }
> @@ -9456,7 +9548,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
> */
> for (i = 0; i < meta.access_size; i++) {
> err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
> - BPF_WRITE, -1, false);
> + BPF_WRITE, -1, false, false);
> if (err)
> return err;
> }
> @@ -16184,7 +16276,7 @@ static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type typ
> * Have to support a use case when one path through
> * the program yields TRUSTED pointer while another
> * is UNTRUSTED. Fallback to UNTRUSTED to generate
> - * BPF_PROBE_MEM.
> + * BPF_PROBE_MEM/BPF_PROBE_MEMSX.
> */
> *prev_type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
> } else {
> @@ -16325,7 +16417,8 @@ static int do_check(struct bpf_verifier_env *env)
> */
> err = check_mem_access(env, env->insn_idx, insn->src_reg,
> insn->off, BPF_SIZE(insn->code),
> - BPF_READ, insn->dst_reg, false);
> + BPF_READ, insn->dst_reg, false,
> + BPF_MODE(insn->code) == BPF_MEMSX);
> if (err)
> return err;
>
> @@ -16362,7 +16455,7 @@ static int do_check(struct bpf_verifier_env *env)
> /* check that memory (dst_reg + off) is writeable */
> err = check_mem_access(env, env->insn_idx, insn->dst_reg,
> insn->off, BPF_SIZE(insn->code),
> - BPF_WRITE, insn->src_reg, false);
> + BPF_WRITE, insn->src_reg, false, false);
> if (err)
> return err;
>
> @@ -16387,7 +16480,7 @@ static int do_check(struct bpf_verifier_env *env)
> /* check that memory (dst_reg + off) is writeable */
> err = check_mem_access(env, env->insn_idx, insn->dst_reg,
> insn->off, BPF_SIZE(insn->code),
> - BPF_WRITE, -1, false);
> + BPF_WRITE, -1, false, false);
> if (err)
> return err;
>
> @@ -16815,7 +16908,8 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
>
> for (i = 0; i < insn_cnt; i++, insn++) {
> if (BPF_CLASS(insn->code) == BPF_LDX &&
> - (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
> + ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) ||
> + insn->imm != 0)) {
> verbose(env, "BPF_LDX uses reserved fields\n");
> return -EINVAL;
> }
> @@ -17513,7 +17607,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
> if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
> insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
> insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
> - insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
> + insn->code == (BPF_LDX | BPF_MEM | BPF_DW) ||
> + insn->code == (BPF_LDX | BPF_MEMSX | BPF_B) ||
> + insn->code == (BPF_LDX | BPF_MEMSX | BPF_H) ||
> + insn->code == (BPF_LDX | BPF_MEMSX | BPF_W)) {
> type = BPF_READ;
> } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
> insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
> @@ -17572,8 +17669,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
> */
> case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED:
> if (type == BPF_READ) {
> - insn->code = BPF_LDX | BPF_PROBE_MEM |
> - BPF_SIZE((insn)->code);
> + if (BPF_MODE(insn->code) == BPF_MEM)
> + insn->code = BPF_LDX | BPF_PROBE_MEM |
> + BPF_SIZE((insn)->code);
> + else
> + insn->code = BPF_LDX | BPF_PROBE_MEMSX |
> + BPF_SIZE((insn)->code);
> env->prog->aux->num_exentries++;
> }
> continue;
> @@ -17761,7 +17862,8 @@ static int jit_subprogs(struct bpf_verifier_env *env)
> insn = func[i]->insnsi;
> for (j = 0; j < func[i]->len; j++, insn++) {
> if (BPF_CLASS(insn->code) == BPF_LDX &&
> - BPF_MODE(insn->code) == BPF_PROBE_MEM)
> + (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
> + BPF_MODE(insn->code) == BPF_PROBE_MEMSX))
> num_exentries++;
> }
> func[i]->aux->num_exentries = num_exentries;
> diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
> index 739c15906a65..651a34511780 100644
> --- a/tools/include/uapi/linux/bpf.h
> +++ b/tools/include/uapi/linux/bpf.h
> @@ -19,6 +19,7 @@
>
> /* ld/ldx fields */
> #define BPF_DW 0x18 /* double word (64-bit) */
> +#define BPF_MEMSX 0x80 /* load with sign extension */
> #define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
> #define BPF_XADD 0xc0 /* exclusive add - legacy name */
>
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [Bpf] [PATCH bpf-next v3 01/17] bpf: Support new sign-extension load insns
2023-07-20 20:33 ` Eduard Zingerman
@ 2023-07-24 4:03 ` Yonghong Song
2023-07-24 4:03 ` Yonghong Song
0 siblings, 1 reply; 30+ messages in thread
From: Yonghong Song @ 2023-07-24 4:03 UTC (permalink / raw)
To: Eduard Zingerman, Yonghong Song, bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
On 7/20/23 1:33 PM, Eduard Zingerman wrote:
> On Wed, 2023-07-19 at 17:01 -0700, Yonghong Song wrote:
>> Add interpreter/jit support for new sign-extension load insns
>> which adds a new mode (BPF_MEMSX).
>> Also add verifier support to recognize these insns and to
>> do proper verification with new insns. In verifier, besides
>> to deduce proper bounds for the dst_reg, probed memory access
>> is also properly handled.
>>
>> Signed-off-by: Yonghong Song <yhs@fb.com>
>> ---
>> arch/x86/net/bpf_jit_comp.c | 42 ++++++++-
>> include/linux/filter.h | 3 +
>> include/uapi/linux/bpf.h | 1 +
>> kernel/bpf/core.c | 21 +++++
>> kernel/bpf/verifier.c | 150 +++++++++++++++++++++++++++------
>> tools/include/uapi/linux/bpf.h | 1 +
>> 6 files changed, 191 insertions(+), 27 deletions(-)
>>
>> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
>> index 83c4b45dc65f..54478a9c93e1 100644
>> --- a/arch/x86/net/bpf_jit_comp.c
>> +++ b/arch/x86/net/bpf_jit_comp.c
>> @@ -779,6 +779,29 @@ static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
>> *pprog = prog;
>> }
>>
>> +/* LDSX: dst_reg = *(s8*)(src_reg + off) */
>> +static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
>> +{
>> + u8 *prog = *pprog;
>> +
>> + switch (size) {
>> + case BPF_B:
>> + /* Emit 'movsx rax, byte ptr [rax + off]' */
>> + EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
>> + break;
>> + case BPF_H:
>> + /* Emit 'movsx rax, word ptr [rax + off]' */
>> + EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
>> + break;
>> + case BPF_W:
>> + /* Emit 'movsx rax, dword ptr [rax+0x14]' */
>> + EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
>> + break;
>> + }
>> + emit_insn_suffix(&prog, src_reg, dst_reg, off);
>> + *pprog = prog;
>> +}
>> +
>> /* STX: *(u8*)(dst_reg + off) = src_reg */
>> static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
>> {
>> @@ -1370,9 +1393,17 @@ st: if (is_imm8(insn->off))
>> case BPF_LDX | BPF_PROBE_MEM | BPF_W:
>> case BPF_LDX | BPF_MEM | BPF_DW:
>> case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
>> + /* LDXS: dst_reg = *(s8*)(src_reg + off) */
>> + case BPF_LDX | BPF_MEMSX | BPF_B:
>> + case BPF_LDX | BPF_MEMSX | BPF_H:
>> + case BPF_LDX | BPF_MEMSX | BPF_W:
>> + case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
>> + case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
>> + case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
>> insn_off = insn->off;
>>
>> - if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
>> + if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
>> + BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
>> /* Conservatively check that src_reg + insn->off is a kernel address:
>> * src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
>> * src_reg is used as scratch for src_reg += insn->off and restored
>> @@ -1415,8 +1446,13 @@ st: if (is_imm8(insn->off))
>> start_of_ldx = prog;
>> end_of_jmp[-1] = start_of_ldx - end_of_jmp;
>> }
>> - emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
>> - if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
>> + if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
>> + BPF_MODE(insn->code) == BPF_MEMSX)
>> + emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
>> + else
>> + emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
>> + if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
>> + BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
>> struct exception_table_entry *ex;
>> u8 *_insn = image + proglen + (start_of_ldx - temp);
>> s64 delta;
>> diff --git a/include/linux/filter.h b/include/linux/filter.h
>> index f69114083ec7..a93242b5516b 100644
>> --- a/include/linux/filter.h
>> +++ b/include/linux/filter.h
>> @@ -69,6 +69,9 @@ struct ctl_table_header;
>> /* unused opcode to mark special load instruction. Same as BPF_ABS */
>> #define BPF_PROBE_MEM 0x20
>>
>> +/* unused opcode to mark special ldsx instruction. Same as BPF_IND */
>> +#define BPF_PROBE_MEMSX 0x40
>> +
>> /* unused opcode to mark call to interpreter with arguments */
>> #define BPF_CALL_ARGS 0xe0
>>
>> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
>> index 739c15906a65..651a34511780 100644
>> --- a/include/uapi/linux/bpf.h
>> +++ b/include/uapi/linux/bpf.h
>> @@ -19,6 +19,7 @@
>>
>> /* ld/ldx fields */
>> #define BPF_DW 0x18 /* double word (64-bit) */
>> +#define BPF_MEMSX 0x80 /* load with sign extension */
>> #define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
>> #define BPF_XADD 0xc0 /* exclusive add - legacy name */
>>
>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>> index dc85240a0134..01b72fc001f6 100644
>> --- a/kernel/bpf/core.c
>> +++ b/kernel/bpf/core.c
>> @@ -1610,6 +1610,9 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
>> INSN_3(LDX, MEM, H), \
>> INSN_3(LDX, MEM, W), \
>> INSN_3(LDX, MEM, DW), \
>> + INSN_3(LDX, MEMSX, B), \
>> + INSN_3(LDX, MEMSX, H), \
>> + INSN_3(LDX, MEMSX, W), \
>> /* Immediate based. */ \
>> INSN_3(LD, IMM, DW)
>>
>> @@ -1666,6 +1669,9 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
>> [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
>> [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
>> [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
>> + [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
>> + [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
>> + [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
>> };
>> #undef BPF_INSN_3_LBL
>> #undef BPF_INSN_2_LBL
>> @@ -1942,6 +1948,21 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
>> LDST(DW, u64)
>> #undef LDST
>>
>> +#define LDSX(SIZEOP, SIZE) \
>> + LDX_MEMSX_##SIZEOP: \
>> + DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
>> + CONT; \
>> + LDX_PROBE_MEMSX_##SIZEOP: \
>> + bpf_probe_read_kernel(&DST, sizeof(SIZE), \
>> + (const void *)(long) (SRC + insn->off)); \
>> + DST = *((SIZE *)&DST); \
>> + CONT;
>> +
>> + LDSX(B, s8)
>> + LDSX(H, s16)
>> + LDSX(W, s32)
>> +#undef LDSX
>> +
>> #define ATOMIC_ALU_OP(BOP, KOP) \
>> case BOP: \
>> if (BPF_SIZE(insn->code) == BPF_W) \
>> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
>> index 803b91135ca0..79c0cd50ec59 100644
>> --- a/kernel/bpf/verifier.c
>> +++ b/kernel/bpf/verifier.c
>> @@ -5809,6 +5809,94 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
>> __reg_combine_64_into_32(reg);
>> }
>>
>> +static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
>> +{
>> + if (size == 1) {
>> + reg->smin_value = reg->s32_min_value = S8_MIN;
>> + reg->smax_value = reg->s32_max_value = S8_MAX;
>> + } else if (size == 2) {
>> + reg->smin_value = reg->s32_min_value = S16_MIN;
>> + reg->smax_value = reg->s32_max_value = S16_MAX;
>> + } else {
>> + /* size == 4 */
>> + reg->smin_value = reg->s32_min_value = S32_MIN;
>> + reg->smax_value = reg->s32_max_value = S32_MAX;
>> + }
>> + reg->umin_value = reg->u32_min_value = 0;
>> + reg->umax_value = U64_MAX;
>> + reg->u32_max_value = U32_MAX;
>> + reg->var_off = tnum_unknown;
>> +}
>> +
>> +static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
>> +{
>> + s64 init_s64_max, init_s64_min, s64_max, s64_min, u64_cval;
>> + u64 top_smax_value, top_smin_value;
>> + u64 num_bits = size * 8;
>> +
>> + if (tnum_is_const(reg->var_off)) {
>> + u64_cval = reg->var_off.value;
>> + if (size == 1)
>> + reg->var_off = tnum_const((s8)u64_cval);
>> + else if (size == 2)
>> + reg->var_off = tnum_const((s16)u64_cval);
>> + else
>> + /* size == 4 */
>> + reg->var_off = tnum_const((s32)u64_cval);
>> +
>> + u64_cval = reg->var_off.value;
>> + reg->smax_value = reg->smin_value = u64_cval;
>> + reg->umax_value = reg->umin_value = u64_cval;
>> + reg->s32_max_value = reg->s32_min_value = u64_cval;
>> + reg->u32_max_value = reg->u32_min_value = u64_cval;
>> + return;
>> + }
>> +
>> + top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits;
>> + top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits;
>> +
>> + if (top_smax_value != top_smin_value)
>> + goto out;
>> +
>> + /* find the s64_min and s64_min after sign extension */
>> + if (size == 1) {
>> + init_s64_max = (s8)reg->smax_value;
>> + init_s64_min = (s8)reg->smin_value;
>> + } else if (size == 2) {
>> + init_s64_max = (s16)reg->smax_value;
>> + init_s64_min = (s16)reg->smin_value;
>> + } else {
>> + init_s64_max = (s32)reg->smax_value;
>> + init_s64_min = (s32)reg->smin_value;
>> + }
>> +
>> + s64_max = max(init_s64_max, init_s64_min);
>> + s64_min = min(init_s64_max, init_s64_min);
>> +
>> + if (s64_max >= 0 && s64_min >= 0) {
>> + reg->smin_value = reg->s32_min_value = s64_min;
>> + reg->smax_value = reg->s32_max_value = s64_max;
>> + reg->umin_value = reg->u32_min_value = s64_min;
>> + reg->umax_value = reg->u32_max_value = s64_max;
>> + reg->var_off = tnum_range(s64_min, s64_max);
>> + return;
>> + }
>> +
>> + if (s64_min < 0 && s64_max < 0) {
>> + reg->smin_value = reg->s32_min_value = s64_min;
>> + reg->smax_value = reg->s32_max_value = s64_max;
>> + reg->umin_value = (u64)s64_min;
>> + reg->umax_value = (u64)s64_max;
>> + reg->u32_min_value = (u32)s64_min;
>> + reg->u32_max_value = (u32)s64_max;
>> + reg->var_off = tnum_range((u64)s64_min, (u64)s64_max);
>> + return;
>> + }
>
> I think that the bodies for (s64_max >= 0 && s64_min >= 0)
> and (s64_min < 0 && s64_max < 0) are now identical.
Thanks. Will change in the next revision.
>
>> +
>> +out:
>> + set_sext64_default_val(reg, size);
>> +}
>> +
[...]
--
Bpf mailing list
Bpf@ietf.org
https://www.ietf.org/mailman/listinfo/bpf
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH bpf-next v3 01/17] bpf: Support new sign-extension load insns
2023-07-24 4:03 ` [Bpf] " Yonghong Song
@ 2023-07-24 4:03 ` Yonghong Song
0 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-24 4:03 UTC (permalink / raw)
To: Eduard Zingerman, Yonghong Song, bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
On 7/20/23 1:33 PM, Eduard Zingerman wrote:
> On Wed, 2023-07-19 at 17:01 -0700, Yonghong Song wrote:
>> Add interpreter/jit support for new sign-extension load insns
>> which adds a new mode (BPF_MEMSX).
>> Also add verifier support to recognize these insns and to
>> do proper verification with new insns. In verifier, besides
>> to deduce proper bounds for the dst_reg, probed memory access
>> is also properly handled.
>>
>> Signed-off-by: Yonghong Song <yhs@fb.com>
>> ---
>> arch/x86/net/bpf_jit_comp.c | 42 ++++++++-
>> include/linux/filter.h | 3 +
>> include/uapi/linux/bpf.h | 1 +
>> kernel/bpf/core.c | 21 +++++
>> kernel/bpf/verifier.c | 150 +++++++++++++++++++++++++++------
>> tools/include/uapi/linux/bpf.h | 1 +
>> 6 files changed, 191 insertions(+), 27 deletions(-)
>>
>> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
>> index 83c4b45dc65f..54478a9c93e1 100644
>> --- a/arch/x86/net/bpf_jit_comp.c
>> +++ b/arch/x86/net/bpf_jit_comp.c
>> @@ -779,6 +779,29 @@ static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
>> *pprog = prog;
>> }
>>
>> +/* LDSX: dst_reg = *(s8*)(src_reg + off) */
>> +static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
>> +{
>> + u8 *prog = *pprog;
>> +
>> + switch (size) {
>> + case BPF_B:
>> + /* Emit 'movsx rax, byte ptr [rax + off]' */
>> + EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
>> + break;
>> + case BPF_H:
>> + /* Emit 'movsx rax, word ptr [rax + off]' */
>> + EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
>> + break;
>> + case BPF_W:
>> + /* Emit 'movsx rax, dword ptr [rax+0x14]' */
>> + EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
>> + break;
>> + }
>> + emit_insn_suffix(&prog, src_reg, dst_reg, off);
>> + *pprog = prog;
>> +}
>> +
>> /* STX: *(u8*)(dst_reg + off) = src_reg */
>> static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
>> {
>> @@ -1370,9 +1393,17 @@ st: if (is_imm8(insn->off))
>> case BPF_LDX | BPF_PROBE_MEM | BPF_W:
>> case BPF_LDX | BPF_MEM | BPF_DW:
>> case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
>> + /* LDXS: dst_reg = *(s8*)(src_reg + off) */
>> + case BPF_LDX | BPF_MEMSX | BPF_B:
>> + case BPF_LDX | BPF_MEMSX | BPF_H:
>> + case BPF_LDX | BPF_MEMSX | BPF_W:
>> + case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
>> + case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
>> + case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
>> insn_off = insn->off;
>>
>> - if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
>> + if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
>> + BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
>> /* Conservatively check that src_reg + insn->off is a kernel address:
>> * src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
>> * src_reg is used as scratch for src_reg += insn->off and restored
>> @@ -1415,8 +1446,13 @@ st: if (is_imm8(insn->off))
>> start_of_ldx = prog;
>> end_of_jmp[-1] = start_of_ldx - end_of_jmp;
>> }
>> - emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
>> - if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
>> + if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
>> + BPF_MODE(insn->code) == BPF_MEMSX)
>> + emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
>> + else
>> + emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
>> + if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
>> + BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
>> struct exception_table_entry *ex;
>> u8 *_insn = image + proglen + (start_of_ldx - temp);
>> s64 delta;
>> diff --git a/include/linux/filter.h b/include/linux/filter.h
>> index f69114083ec7..a93242b5516b 100644
>> --- a/include/linux/filter.h
>> +++ b/include/linux/filter.h
>> @@ -69,6 +69,9 @@ struct ctl_table_header;
>> /* unused opcode to mark special load instruction. Same as BPF_ABS */
>> #define BPF_PROBE_MEM 0x20
>>
>> +/* unused opcode to mark special ldsx instruction. Same as BPF_IND */
>> +#define BPF_PROBE_MEMSX 0x40
>> +
>> /* unused opcode to mark call to interpreter with arguments */
>> #define BPF_CALL_ARGS 0xe0
>>
>> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
>> index 739c15906a65..651a34511780 100644
>> --- a/include/uapi/linux/bpf.h
>> +++ b/include/uapi/linux/bpf.h
>> @@ -19,6 +19,7 @@
>>
>> /* ld/ldx fields */
>> #define BPF_DW 0x18 /* double word (64-bit) */
>> +#define BPF_MEMSX 0x80 /* load with sign extension */
>> #define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
>> #define BPF_XADD 0xc0 /* exclusive add - legacy name */
>>
>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>> index dc85240a0134..01b72fc001f6 100644
>> --- a/kernel/bpf/core.c
>> +++ b/kernel/bpf/core.c
>> @@ -1610,6 +1610,9 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
>> INSN_3(LDX, MEM, H), \
>> INSN_3(LDX, MEM, W), \
>> INSN_3(LDX, MEM, DW), \
>> + INSN_3(LDX, MEMSX, B), \
>> + INSN_3(LDX, MEMSX, H), \
>> + INSN_3(LDX, MEMSX, W), \
>> /* Immediate based. */ \
>> INSN_3(LD, IMM, DW)
>>
>> @@ -1666,6 +1669,9 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
>> [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
>> [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
>> [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
>> + [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
>> + [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
>> + [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
>> };
>> #undef BPF_INSN_3_LBL
>> #undef BPF_INSN_2_LBL
>> @@ -1942,6 +1948,21 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
>> LDST(DW, u64)
>> #undef LDST
>>
>> +#define LDSX(SIZEOP, SIZE) \
>> + LDX_MEMSX_##SIZEOP: \
>> + DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
>> + CONT; \
>> + LDX_PROBE_MEMSX_##SIZEOP: \
>> + bpf_probe_read_kernel(&DST, sizeof(SIZE), \
>> + (const void *)(long) (SRC + insn->off)); \
>> + DST = *((SIZE *)&DST); \
>> + CONT;
>> +
>> + LDSX(B, s8)
>> + LDSX(H, s16)
>> + LDSX(W, s32)
>> +#undef LDSX
>> +
>> #define ATOMIC_ALU_OP(BOP, KOP) \
>> case BOP: \
>> if (BPF_SIZE(insn->code) == BPF_W) \
>> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
>> index 803b91135ca0..79c0cd50ec59 100644
>> --- a/kernel/bpf/verifier.c
>> +++ b/kernel/bpf/verifier.c
>> @@ -5809,6 +5809,94 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
>> __reg_combine_64_into_32(reg);
>> }
>>
>> +static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
>> +{
>> + if (size == 1) {
>> + reg->smin_value = reg->s32_min_value = S8_MIN;
>> + reg->smax_value = reg->s32_max_value = S8_MAX;
>> + } else if (size == 2) {
>> + reg->smin_value = reg->s32_min_value = S16_MIN;
>> + reg->smax_value = reg->s32_max_value = S16_MAX;
>> + } else {
>> + /* size == 4 */
>> + reg->smin_value = reg->s32_min_value = S32_MIN;
>> + reg->smax_value = reg->s32_max_value = S32_MAX;
>> + }
>> + reg->umin_value = reg->u32_min_value = 0;
>> + reg->umax_value = U64_MAX;
>> + reg->u32_max_value = U32_MAX;
>> + reg->var_off = tnum_unknown;
>> +}
>> +
>> +static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
>> +{
>> + s64 init_s64_max, init_s64_min, s64_max, s64_min, u64_cval;
>> + u64 top_smax_value, top_smin_value;
>> + u64 num_bits = size * 8;
>> +
>> + if (tnum_is_const(reg->var_off)) {
>> + u64_cval = reg->var_off.value;
>> + if (size == 1)
>> + reg->var_off = tnum_const((s8)u64_cval);
>> + else if (size == 2)
>> + reg->var_off = tnum_const((s16)u64_cval);
>> + else
>> + /* size == 4 */
>> + reg->var_off = tnum_const((s32)u64_cval);
>> +
>> + u64_cval = reg->var_off.value;
>> + reg->smax_value = reg->smin_value = u64_cval;
>> + reg->umax_value = reg->umin_value = u64_cval;
>> + reg->s32_max_value = reg->s32_min_value = u64_cval;
>> + reg->u32_max_value = reg->u32_min_value = u64_cval;
>> + return;
>> + }
>> +
>> + top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits;
>> + top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits;
>> +
>> + if (top_smax_value != top_smin_value)
>> + goto out;
>> +
>> + /* find the s64_min and s64_min after sign extension */
>> + if (size == 1) {
>> + init_s64_max = (s8)reg->smax_value;
>> + init_s64_min = (s8)reg->smin_value;
>> + } else if (size == 2) {
>> + init_s64_max = (s16)reg->smax_value;
>> + init_s64_min = (s16)reg->smin_value;
>> + } else {
>> + init_s64_max = (s32)reg->smax_value;
>> + init_s64_min = (s32)reg->smin_value;
>> + }
>> +
>> + s64_max = max(init_s64_max, init_s64_min);
>> + s64_min = min(init_s64_max, init_s64_min);
>> +
>> + if (s64_max >= 0 && s64_min >= 0) {
>> + reg->smin_value = reg->s32_min_value = s64_min;
>> + reg->smax_value = reg->s32_max_value = s64_max;
>> + reg->umin_value = reg->u32_min_value = s64_min;
>> + reg->umax_value = reg->u32_max_value = s64_max;
>> + reg->var_off = tnum_range(s64_min, s64_max);
>> + return;
>> + }
>> +
>> + if (s64_min < 0 && s64_max < 0) {
>> + reg->smin_value = reg->s32_min_value = s64_min;
>> + reg->smax_value = reg->s32_max_value = s64_max;
>> + reg->umin_value = (u64)s64_min;
>> + reg->umax_value = (u64)s64_max;
>> + reg->u32_min_value = (u32)s64_min;
>> + reg->u32_max_value = (u32)s64_max;
>> + reg->var_off = tnum_range((u64)s64_min, (u64)s64_max);
>> + return;
>> + }
>
> I think that the bodies for (s64_max >= 0 && s64_min >= 0)
> and (s64_min < 0 && s64_max < 0) are now identical.
Thanks. Will change in the next revision.
>
>> +
>> +out:
>> + set_sext64_default_val(reg, size);
>> +}
>> +
[...]
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 02/17] bpf: Support new sign-extension mov insns
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
2023-07-20 0:01 ` [PATCH bpf-next v3 01/17] bpf: Support new sign-extension load insns Yonghong Song
@ 2023-07-20 0:01 ` Yonghong Song
2023-07-20 0:01 ` [PATCH bpf-next v3 03/17] bpf: Handle sign-extenstin ctx member accesses Yonghong Song
` (15 subsequent siblings)
17 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:01 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Add interpreter/jit support for new sign-extension mov insns.
The original 'MOV' insn is extended to support reg-to-reg
signed version for both ALU and ALU64 operations. For ALU mode,
the insn->off value of 8 or 16 indicates sign-extension
from 8- or 16-bit value to 32-bit value. For ALU64 mode,
the insn->off value of 8/16/32 indicates sign-extension
from 8-, 16- or 32-bit value to 64-bit value.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
arch/x86/net/bpf_jit_comp.c | 43 +++++++++-
kernel/bpf/core.c | 28 +++++-
kernel/bpf/verifier.c | 165 ++++++++++++++++++++++++++++++------
3 files changed, 205 insertions(+), 31 deletions(-)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 54478a9c93e1..031ef3c4185d 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -701,6 +701,38 @@ static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
*pprog = prog;
}
+static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg,
+ u32 src_reg)
+{
+ u8 *prog = *pprog;
+
+ if (is64) {
+ /* movs[b,w,l]q dst, src */
+ if (num_bits == 8)
+ EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe,
+ add_2reg(0xC0, src_reg, dst_reg));
+ else if (num_bits == 16)
+ EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf,
+ add_2reg(0xC0, src_reg, dst_reg));
+ else if (num_bits == 32)
+ EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63,
+ add_2reg(0xC0, src_reg, dst_reg));
+ } else {
+ /* movs[b,w]l dst, src */
+ if (num_bits == 8) {
+ EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe,
+ add_2reg(0xC0, src_reg, dst_reg));
+ } else if (num_bits == 16) {
+ if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT1(add_2mod(0x40, src_reg, dst_reg));
+ EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf,
+ add_2reg(0xC0, src_reg, dst_reg));
+ }
+ }
+
+ *pprog = prog;
+}
+
/* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
{
@@ -1051,9 +1083,14 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
case BPF_ALU64 | BPF_MOV | BPF_X:
case BPF_ALU | BPF_MOV | BPF_X:
- emit_mov_reg(&prog,
- BPF_CLASS(insn->code) == BPF_ALU64,
- dst_reg, src_reg);
+ if (insn->off == 0)
+ emit_mov_reg(&prog,
+ BPF_CLASS(insn->code) == BPF_ALU64,
+ dst_reg, src_reg);
+ else
+ emit_movsx_reg(&prog, insn->off,
+ BPF_CLASS(insn->code) == BPF_ALU64,
+ dst_reg, src_reg);
break;
/* neg dst */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 01b72fc001f6..c37c454b09eb 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -61,6 +61,7 @@
#define AX regs[BPF_REG_AX]
#define ARG1 regs[BPF_REG_ARG1]
#define CTX regs[BPF_REG_CTX]
+#define OFF insn->off
#define IMM insn->imm
struct bpf_mem_alloc bpf_global_ma;
@@ -1739,13 +1740,36 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
DST = -DST;
CONT;
ALU_MOV_X:
- DST = (u32) SRC;
+ switch (OFF) {
+ case 0:
+ DST = (u32) SRC;
+ break;
+ case 8:
+ DST = (u32)(s8) SRC;
+ break;
+ case 16:
+ DST = (u32)(s16) SRC;
+ break;
+ }
CONT;
ALU_MOV_K:
DST = (u32) IMM;
CONT;
ALU64_MOV_X:
- DST = SRC;
+ switch (OFF) {
+ case 0:
+ DST = SRC;
+ break;
+ case 8:
+ DST = (s8) SRC;
+ break;
+ case 16:
+ DST = (s16) SRC;
+ break;
+ case 32:
+ DST = (s32) SRC;
+ break;
+ }
CONT;
ALU64_MOV_K:
DST = IMM;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 79c0cd50ec59..fc82b5e77cbe 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3421,7 +3421,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
return 0;
if (opcode == BPF_MOV) {
if (BPF_SRC(insn->code) == BPF_X) {
- /* dreg = sreg
+ /* dreg = sreg or dreg = (s8, s16, s32)sreg
* dreg needs precision after this insn
* sreg needs precision before this insn
*/
@@ -5897,6 +5897,77 @@ static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
set_sext64_default_val(reg, size);
}
+static void set_sext32_default_val(struct bpf_reg_state *reg, int size)
+{
+ if (size == 1) {
+ reg->s32_min_value = S8_MIN;
+ reg->s32_max_value = S8_MAX;
+ } else {
+ /* size == 2 */
+ reg->s32_min_value = S16_MIN;
+ reg->s32_max_value = S16_MAX;
+ }
+ reg->u32_min_value = 0;
+ reg->u32_max_value = U32_MAX;
+}
+
+static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
+{
+ s32 init_s32_max, init_s32_min, s32_max, s32_min, u32_val;
+ u32 top_smax_value, top_smin_value;
+ u32 num_bits = size * 8;
+
+ if (tnum_is_const(reg->var_off)) {
+ u32_val = reg->var_off.value;
+ if (size == 1)
+ reg->var_off = tnum_const((s8)u32_val);
+ else
+ reg->var_off = tnum_const((s16)u32_val);
+
+ u32_val = reg->var_off.value;
+ reg->s32_min_value = reg->s32_max_value = u32_val;
+ reg->u32_min_value = reg->u32_max_value = u32_val;
+ return;
+ }
+
+ top_smax_value = ((u32)reg->s32_max_value >> num_bits) << num_bits;
+ top_smin_value = ((u32)reg->s32_min_value >> num_bits) << num_bits;
+
+ if (top_smax_value != top_smin_value)
+ goto out;
+
+ /* find the s32_min and s32_min after sign extension */
+ if (size == 1) {
+ init_s32_max = (s8)reg->s32_max_value;
+ init_s32_min = (s8)reg->s32_min_value;
+ } else {
+ /* size == 2 */
+ init_s32_max = (s16)reg->s32_max_value;
+ init_s32_min = (s16)reg->s32_min_value;
+ }
+ s32_max = max(init_s32_max, init_s32_min);
+ s32_min = min(init_s32_max, init_s32_min);
+
+ if (s32_min >= 0 && s32_max >= 0) {
+ reg->s32_min_value = s32_min;
+ reg->s32_max_value = s32_max;
+ reg->u32_min_value = 0;
+ reg->u32_max_value = U32_MAX;
+ return;
+ }
+
+ if (s32_min < 0 && s32_max < 0) {
+ reg->s32_min_value = s32_min;
+ reg->s32_max_value = s32_max;
+ reg->u32_min_value = (u32)s32_min;
+ reg->u32_max_value = (u32)s32_max;
+ return;
+ }
+
+out:
+ set_sext32_default_val(reg, size);
+}
+
static bool bpf_map_is_rdonly(const struct bpf_map *map)
{
/* A map is considered read-only if the following condition are true:
@@ -13030,11 +13101,24 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
} else if (opcode == BPF_MOV) {
if (BPF_SRC(insn->code) == BPF_X) {
- if (insn->imm != 0 || insn->off != 0) {
+ if (insn->imm != 0) {
verbose(env, "BPF_MOV uses reserved fields\n");
return -EINVAL;
}
+ if (BPF_CLASS(insn->code) == BPF_ALU) {
+ if (insn->off != 0 && insn->off != 8 && insn->off != 16) {
+ verbose(env, "BPF_MOV uses reserved fields\n");
+ return -EINVAL;
+ }
+ } else {
+ if (insn->off != 0 && insn->off != 8 && insn->off != 16 &&
+ insn->off != 32) {
+ verbose(env, "BPF_MOV uses reserved fields\n");
+ return -EINVAL;
+ }
+ }
+
/* check src operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
@@ -13058,18 +13142,33 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
!tnum_is_const(src_reg->var_off);
if (BPF_CLASS(insn->code) == BPF_ALU64) {
- /* case: R1 = R2
- * copy register state to dest reg
- */
- if (need_id)
- /* Assign src and dst registers the same ID
- * that will be used by find_equal_scalars()
- * to propagate min/max range.
+ if (insn->off == 0) {
+ /* case: R1 = R2
+ * copy register state to dest reg
*/
- src_reg->id = ++env->id_gen;
- copy_register_state(dst_reg, src_reg);
- dst_reg->live |= REG_LIVE_WRITTEN;
- dst_reg->subreg_def = DEF_NOT_SUBREG;
+ if (need_id)
+ /* Assign src and dst registers the same ID
+ * that will be used by find_equal_scalars()
+ * to propagate min/max range.
+ */
+ src_reg->id = ++env->id_gen;
+ copy_register_state(dst_reg, src_reg);
+ dst_reg->live |= REG_LIVE_WRITTEN;
+ dst_reg->subreg_def = DEF_NOT_SUBREG;
+ } else {
+ /* case: R1 = (s8, s16 s32)R2 */
+ bool no_sext;
+
+ no_sext = src_reg->umax_value < (1ULL << (insn->off - 1));
+ if (no_sext && need_id)
+ src_reg->id = ++env->id_gen;
+ copy_register_state(dst_reg, src_reg);
+ if (!no_sext)
+ dst_reg->id = 0;
+ coerce_reg_to_size_sx(dst_reg, insn->off >> 3);
+ dst_reg->live |= REG_LIVE_WRITTEN;
+ dst_reg->subreg_def = DEF_NOT_SUBREG;
+ }
} else {
/* R1 = (u32) R2 */
if (is_pointer_value(env, insn->src_reg)) {
@@ -13078,19 +13177,33 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
insn->src_reg);
return -EACCES;
} else if (src_reg->type == SCALAR_VALUE) {
- bool is_src_reg_u32 = src_reg->umax_value <= U32_MAX;
-
- if (is_src_reg_u32 && need_id)
- src_reg->id = ++env->id_gen;
- copy_register_state(dst_reg, src_reg);
- /* Make sure ID is cleared if src_reg is not in u32 range otherwise
- * dst_reg min/max could be incorrectly
- * propagated into src_reg by find_equal_scalars()
- */
- if (!is_src_reg_u32)
- dst_reg->id = 0;
- dst_reg->live |= REG_LIVE_WRITTEN;
- dst_reg->subreg_def = env->insn_idx + 1;
+ if (insn->off == 0) {
+ bool is_src_reg_u32 = src_reg->umax_value <= U32_MAX;
+
+ if (is_src_reg_u32 && need_id)
+ src_reg->id = ++env->id_gen;
+ copy_register_state(dst_reg, src_reg);
+ /* Make sure ID is cleared if src_reg is not in u32
+ * range otherwise dst_reg min/max could be incorrectly
+ * propagated into src_reg by find_equal_scalars()
+ */
+ if (!is_src_reg_u32)
+ dst_reg->id = 0;
+ dst_reg->live |= REG_LIVE_WRITTEN;
+ dst_reg->subreg_def = env->insn_idx + 1;
+ } else {
+ /* case: W1 = (s8, s16)W2 */
+ bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1));
+
+ if (no_sext && need_id)
+ src_reg->id = ++env->id_gen;
+ copy_register_state(dst_reg, src_reg);
+ if (!no_sext)
+ dst_reg->id = 0;
+ dst_reg->live |= REG_LIVE_WRITTEN;
+ dst_reg->subreg_def = env->insn_idx + 1;
+ coerce_subreg_to_size_sx(dst_reg, insn->off >> 3);
+ }
} else {
mark_reg_unknown(env, regs,
insn->dst_reg);
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 03/17] bpf: Handle sign-extenstin ctx member accesses
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
2023-07-20 0:01 ` [PATCH bpf-next v3 01/17] bpf: Support new sign-extension load insns Yonghong Song
2023-07-20 0:01 ` [PATCH bpf-next v3 02/17] bpf: Support new sign-extension mov insns Yonghong Song
@ 2023-07-20 0:01 ` Yonghong Song
2023-07-20 0:01 ` [PATCH bpf-next v3 04/17] bpf: Support new unconditional bswap instruction Yonghong Song
` (14 subsequent siblings)
17 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:01 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Currently, if user accesses a ctx member with signed types,
the compiler will generate an unsigned load followed by
necessary left and right shifts.
With the introduction of sign-extension load, compiler may
just emit a ldsx insn instead. Let us do a final movsx sign
extension to the final unsigned ctx load result to
satisfy original sign extension requirement.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
kernel/bpf/verifier.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index fc82b5e77cbe..b9be8706bf48 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -17716,6 +17716,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
for (i = 0; i < insn_cnt; i++, insn++) {
bpf_convert_ctx_access_t convert_ctx_access;
+ u8 mode;
if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
@@ -17797,6 +17798,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
size = BPF_LDST_BYTES(insn);
+ mode = BPF_MODE(insn->code);
/* If the read access is a narrower load of the field,
* convert to a 4/8-byte load, to minimum program type specific
@@ -17856,6 +17858,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
(1ULL << size * 8) - 1);
}
}
+ if (mode == BPF_MEMSX)
+ insn_buf[cnt++] = BPF_RAW_INSN(BPF_ALU64 | BPF_MOV | BPF_X,
+ insn->dst_reg, insn->dst_reg,
+ size * 8, 0);
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 04/17] bpf: Support new unconditional bswap instruction
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (2 preceding siblings ...)
2023-07-20 0:01 ` [PATCH bpf-next v3 03/17] bpf: Handle sign-extenstin ctx member accesses Yonghong Song
@ 2023-07-20 0:01 ` Yonghong Song
2023-07-20 0:01 ` [PATCH bpf-next v3 05/17] bpf: Support new signed div/mod instructions Yonghong Song
` (13 subsequent siblings)
17 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:01 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
The existing 'be' and 'le' insns will do conditional bswap
depends on host endianness. This patch implements
unconditional bswap insns.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
arch/x86/net/bpf_jit_comp.c | 1 +
kernel/bpf/core.c | 14 ++++++++++++++
kernel/bpf/verifier.c | 7 +++++--
3 files changed, 20 insertions(+), 2 deletions(-)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 031ef3c4185d..4942a4c188b9 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1322,6 +1322,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
break;
case BPF_ALU | BPF_END | BPF_FROM_BE:
+ case BPF_ALU64 | BPF_END | BPF_FROM_LE:
switch (imm32) {
case 16:
/* Emit 'ror %ax, 8' to swap lower 2 bytes */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index c37c454b09eb..ad58697cec4b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1524,6 +1524,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
INSN_3(ALU64, DIV, X), \
INSN_3(ALU64, MOD, X), \
INSN_2(ALU64, NEG), \
+ INSN_3(ALU64, END, TO_LE), \
/* Immediate based. */ \
INSN_3(ALU64, ADD, K), \
INSN_3(ALU64, SUB, K), \
@@ -1848,6 +1849,19 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
break;
}
CONT;
+ ALU64_END_TO_LE:
+ switch (IMM) {
+ case 16:
+ DST = (__force u16) __swab16(DST);
+ break;
+ case 32:
+ DST = (__force u32) __swab32(DST);
+ break;
+ case 64:
+ DST = (__force u64) __swab64(DST);
+ break;
+ }
+ CONT;
/* CALL */
JMP_CALL:
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index b9be8706bf48..b305f88c2569 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3012,8 +3012,10 @@ static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
}
}
+ if (class == BPF_ALU64 && op == BPF_END && (insn->imm == 16 || insn->imm == 32))
+ return false;
+
if (class == BPF_ALU64 || class == BPF_JMP ||
- /* BPF_END always use BPF_ALU class. */
(class == BPF_ALU && op == BPF_END && insn->imm == 64))
return true;
@@ -13076,7 +13078,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
(insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
- BPF_CLASS(insn->code) == BPF_ALU64) {
+ (BPF_CLASS(insn->code) == BPF_ALU64 &&
+ BPF_SRC(insn->code) != BPF_TO_LE)) {
verbose(env, "BPF_END uses reserved fields\n");
return -EINVAL;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 05/17] bpf: Support new signed div/mod instructions.
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (3 preceding siblings ...)
2023-07-20 0:01 ` [PATCH bpf-next v3 04/17] bpf: Support new unconditional bswap instruction Yonghong Song
@ 2023-07-20 0:01 ` Yonghong Song
2023-07-20 0:01 ` [Bpf] " Yonghong Song
2023-07-20 0:01 ` [PATCH bpf-next v3 06/17] bpf: Fix jit blinding with new sdiv/smov insns Yonghong Song
` (12 subsequent siblings)
17 siblings, 1 reply; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:01 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Add interpreter/jit support for new signed div/mod insns.
The new signed div/mod instructions are encoded with
unsigned div/mod instructions plus insn->off == 1.
Also add basic verifier support to ensure new insns get
accepted.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
arch/x86/net/bpf_jit_comp.c | 27 ++++++---
kernel/bpf/core.c | 110 ++++++++++++++++++++++++++++++------
kernel/bpf/verifier.c | 6 +-
3 files changed, 117 insertions(+), 26 deletions(-)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 4942a4c188b9..a89b62eb2b40 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1194,15 +1194,26 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
/* mov rax, dst_reg */
emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
- /*
- * xor edx, edx
- * equivalent to 'xor rdx, rdx', but one byte less
- */
- EMIT2(0x31, 0xd2);
+ if (insn->off == 0) {
+ /*
+ * xor edx, edx
+ * equivalent to 'xor rdx, rdx', but one byte less
+ */
+ EMIT2(0x31, 0xd2);
- /* div src_reg */
- maybe_emit_1mod(&prog, src_reg, is64);
- EMIT2(0xF7, add_1reg(0xF0, src_reg));
+ /* div src_reg */
+ maybe_emit_1mod(&prog, src_reg, is64);
+ EMIT2(0xF7, add_1reg(0xF0, src_reg));
+ } else {
+ if (BPF_CLASS(insn->code) == BPF_ALU)
+ EMIT1(0x99); /* cdq */
+ else
+ EMIT2(0x48, 0x99); /* cqo */
+
+ /* idiv src_reg */
+ maybe_emit_1mod(&prog, src_reg, is64);
+ EMIT2(0xF7, add_1reg(0xF8, src_reg));
+ }
if (BPF_OP(insn->code) == BPF_MOD &&
dst_reg != BPF_REG_3)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ad58697cec4b..3fe895199f6e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1792,36 +1792,114 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
(*(s64 *) &DST) >>= IMM;
CONT;
ALU64_MOD_X:
- div64_u64_rem(DST, SRC, &AX);
- DST = AX;
+ switch (OFF) {
+ case 0:
+ div64_u64_rem(DST, SRC, &AX);
+ DST = AX;
+ break;
+ case 1:
+ AX = div64_s64(DST, SRC);
+ DST = DST - AX * SRC;
+ break;
+ }
CONT;
ALU_MOD_X:
- AX = (u32) DST;
- DST = do_div(AX, (u32) SRC);
+ switch (OFF) {
+ case 0:
+ AX = (u32) DST;
+ DST = do_div(AX, (u32) SRC);
+ break;
+ case 1:
+ AX = abs((s32)DST);
+ AX = do_div(AX, abs((s32)SRC));
+ if ((s32)DST < 0)
+ DST = (u32)-AX;
+ else
+ DST = (u32)AX;
+ break;
+ }
CONT;
ALU64_MOD_K:
- div64_u64_rem(DST, IMM, &AX);
- DST = AX;
+ switch (OFF) {
+ case 0:
+ div64_u64_rem(DST, IMM, &AX);
+ DST = AX;
+ break;
+ case 1:
+ AX = div64_s64(DST, IMM);
+ DST = DST - AX * IMM;
+ break;
+ }
CONT;
ALU_MOD_K:
- AX = (u32) DST;
- DST = do_div(AX, (u32) IMM);
+ switch (OFF) {
+ case 0:
+ AX = (u32) DST;
+ DST = do_div(AX, (u32) IMM);
+ break;
+ case 1:
+ AX = abs((s32)DST);
+ AX = do_div(AX, abs((s32)IMM));
+ if ((s32)DST < 0)
+ DST = (u32)-AX;
+ else
+ DST = (u32)AX;
+ break;
+ }
CONT;
ALU64_DIV_X:
- DST = div64_u64(DST, SRC);
+ switch (OFF) {
+ case 0:
+ DST = div64_u64(DST, SRC);
+ break;
+ case 1:
+ DST = div64_s64(DST, SRC);
+ break;
+ }
CONT;
ALU_DIV_X:
- AX = (u32) DST;
- do_div(AX, (u32) SRC);
- DST = (u32) AX;
+ switch (OFF) {
+ case 0:
+ AX = (u32) DST;
+ do_div(AX, (u32) SRC);
+ DST = (u32) AX;
+ break;
+ case 1:
+ AX = abs((s32)DST);
+ do_div(AX, abs((s32)SRC));
+ if ((s32)DST < 0 == (s32)SRC < 0)
+ DST = (u32)AX;
+ else
+ DST = (u32)-AX;
+ break;
+ }
CONT;
ALU64_DIV_K:
- DST = div64_u64(DST, IMM);
+ switch (OFF) {
+ case 0:
+ DST = div64_u64(DST, IMM);
+ break;
+ case 1:
+ DST = div64_s64(DST, IMM);
+ break;
+ }
CONT;
ALU_DIV_K:
- AX = (u32) DST;
- do_div(AX, (u32) IMM);
- DST = (u32) AX;
+ switch (OFF) {
+ case 0:
+ AX = (u32) DST;
+ do_div(AX, (u32) IMM);
+ DST = (u32) AX;
+ break;
+ case 1:
+ AX = abs((s32)DST);
+ do_div(AX, abs((s32)IMM));
+ if ((s32)DST < 0 == (s32)IMM < 0)
+ DST = (u32)AX;
+ else
+ DST = (u32)-AX;
+ break;
+ }
CONT;
ALU_END_TO_BE:
switch (IMM) {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index b305f88c2569..9436fa795f39 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -13237,7 +13237,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
} else { /* all other ALU ops: and, sub, xor, add, ... */
if (BPF_SRC(insn->code) == BPF_X) {
- if (insn->imm != 0 || insn->off != 0) {
+ if (insn->imm != 0 || insn->off > 1 ||
+ (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) {
verbose(env, "BPF_ALU uses reserved fields\n");
return -EINVAL;
}
@@ -13246,7 +13247,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
if (err)
return err;
} else {
- if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
+ if (insn->src_reg != BPF_REG_0 || insn->off > 1 ||
+ (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) {
verbose(env, "BPF_ALU uses reserved fields\n");
return -EINVAL;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [Bpf] [PATCH bpf-next v3 05/17] bpf: Support new signed div/mod instructions.
2023-07-20 0:01 ` [PATCH bpf-next v3 05/17] bpf: Support new signed div/mod instructions Yonghong Song
@ 2023-07-20 0:01 ` Yonghong Song
0 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:01 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Add interpreter/jit support for new signed div/mod insns.
The new signed div/mod instructions are encoded with
unsigned div/mod instructions plus insn->off == 1.
Also add basic verifier support to ensure new insns get
accepted.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
arch/x86/net/bpf_jit_comp.c | 27 ++++++---
kernel/bpf/core.c | 110 ++++++++++++++++++++++++++++++------
kernel/bpf/verifier.c | 6 +-
3 files changed, 117 insertions(+), 26 deletions(-)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 4942a4c188b9..a89b62eb2b40 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1194,15 +1194,26 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
/* mov rax, dst_reg */
emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
- /*
- * xor edx, edx
- * equivalent to 'xor rdx, rdx', but one byte less
- */
- EMIT2(0x31, 0xd2);
+ if (insn->off == 0) {
+ /*
+ * xor edx, edx
+ * equivalent to 'xor rdx, rdx', but one byte less
+ */
+ EMIT2(0x31, 0xd2);
- /* div src_reg */
- maybe_emit_1mod(&prog, src_reg, is64);
- EMIT2(0xF7, add_1reg(0xF0, src_reg));
+ /* div src_reg */
+ maybe_emit_1mod(&prog, src_reg, is64);
+ EMIT2(0xF7, add_1reg(0xF0, src_reg));
+ } else {
+ if (BPF_CLASS(insn->code) == BPF_ALU)
+ EMIT1(0x99); /* cdq */
+ else
+ EMIT2(0x48, 0x99); /* cqo */
+
+ /* idiv src_reg */
+ maybe_emit_1mod(&prog, src_reg, is64);
+ EMIT2(0xF7, add_1reg(0xF8, src_reg));
+ }
if (BPF_OP(insn->code) == BPF_MOD &&
dst_reg != BPF_REG_3)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ad58697cec4b..3fe895199f6e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1792,36 +1792,114 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
(*(s64 *) &DST) >>= IMM;
CONT;
ALU64_MOD_X:
- div64_u64_rem(DST, SRC, &AX);
- DST = AX;
+ switch (OFF) {
+ case 0:
+ div64_u64_rem(DST, SRC, &AX);
+ DST = AX;
+ break;
+ case 1:
+ AX = div64_s64(DST, SRC);
+ DST = DST - AX * SRC;
+ break;
+ }
CONT;
ALU_MOD_X:
- AX = (u32) DST;
- DST = do_div(AX, (u32) SRC);
+ switch (OFF) {
+ case 0:
+ AX = (u32) DST;
+ DST = do_div(AX, (u32) SRC);
+ break;
+ case 1:
+ AX = abs((s32)DST);
+ AX = do_div(AX, abs((s32)SRC));
+ if ((s32)DST < 0)
+ DST = (u32)-AX;
+ else
+ DST = (u32)AX;
+ break;
+ }
CONT;
ALU64_MOD_K:
- div64_u64_rem(DST, IMM, &AX);
- DST = AX;
+ switch (OFF) {
+ case 0:
+ div64_u64_rem(DST, IMM, &AX);
+ DST = AX;
+ break;
+ case 1:
+ AX = div64_s64(DST, IMM);
+ DST = DST - AX * IMM;
+ break;
+ }
CONT;
ALU_MOD_K:
- AX = (u32) DST;
- DST = do_div(AX, (u32) IMM);
+ switch (OFF) {
+ case 0:
+ AX = (u32) DST;
+ DST = do_div(AX, (u32) IMM);
+ break;
+ case 1:
+ AX = abs((s32)DST);
+ AX = do_div(AX, abs((s32)IMM));
+ if ((s32)DST < 0)
+ DST = (u32)-AX;
+ else
+ DST = (u32)AX;
+ break;
+ }
CONT;
ALU64_DIV_X:
- DST = div64_u64(DST, SRC);
+ switch (OFF) {
+ case 0:
+ DST = div64_u64(DST, SRC);
+ break;
+ case 1:
+ DST = div64_s64(DST, SRC);
+ break;
+ }
CONT;
ALU_DIV_X:
- AX = (u32) DST;
- do_div(AX, (u32) SRC);
- DST = (u32) AX;
+ switch (OFF) {
+ case 0:
+ AX = (u32) DST;
+ do_div(AX, (u32) SRC);
+ DST = (u32) AX;
+ break;
+ case 1:
+ AX = abs((s32)DST);
+ do_div(AX, abs((s32)SRC));
+ if ((s32)DST < 0 == (s32)SRC < 0)
+ DST = (u32)AX;
+ else
+ DST = (u32)-AX;
+ break;
+ }
CONT;
ALU64_DIV_K:
- DST = div64_u64(DST, IMM);
+ switch (OFF) {
+ case 0:
+ DST = div64_u64(DST, IMM);
+ break;
+ case 1:
+ DST = div64_s64(DST, IMM);
+ break;
+ }
CONT;
ALU_DIV_K:
- AX = (u32) DST;
- do_div(AX, (u32) IMM);
- DST = (u32) AX;
+ switch (OFF) {
+ case 0:
+ AX = (u32) DST;
+ do_div(AX, (u32) IMM);
+ DST = (u32) AX;
+ break;
+ case 1:
+ AX = abs((s32)DST);
+ do_div(AX, abs((s32)IMM));
+ if ((s32)DST < 0 == (s32)IMM < 0)
+ DST = (u32)AX;
+ else
+ DST = (u32)-AX;
+ break;
+ }
CONT;
ALU_END_TO_BE:
switch (IMM) {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index b305f88c2569..9436fa795f39 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -13237,7 +13237,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
} else { /* all other ALU ops: and, sub, xor, add, ... */
if (BPF_SRC(insn->code) == BPF_X) {
- if (insn->imm != 0 || insn->off != 0) {
+ if (insn->imm != 0 || insn->off > 1 ||
+ (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) {
verbose(env, "BPF_ALU uses reserved fields\n");
return -EINVAL;
}
@@ -13246,7 +13247,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
if (err)
return err;
} else {
- if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
+ if (insn->src_reg != BPF_REG_0 || insn->off > 1 ||
+ (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) {
verbose(env, "BPF_ALU uses reserved fields\n");
return -EINVAL;
}
--
2.34.1
--
Bpf mailing list
Bpf@ietf.org
https://www.ietf.org/mailman/listinfo/bpf
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 06/17] bpf: Fix jit blinding with new sdiv/smov insns
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (4 preceding siblings ...)
2023-07-20 0:01 ` [PATCH bpf-next v3 05/17] bpf: Support new signed div/mod instructions Yonghong Song
@ 2023-07-20 0:01 ` Yonghong Song
2023-07-20 0:01 ` [PATCH bpf-next v3 07/17] bpf: Support new 32bit offset jmp instruction Yonghong Song
` (11 subsequent siblings)
17 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:01 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Handle new insns properly in bpf_jit_blind_insn() function.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
include/linux/filter.h | 14 ++++++++++----
kernel/bpf/core.c | 4 ++--
2 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a93242b5516b..f5eabe3fa5e8 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -93,22 +93,28 @@ struct ctl_table_header;
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
-#define BPF_ALU64_REG(OP, DST, SRC) \
+#define BPF_ALU64_REG_OFF(OP, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
- .off = 0, \
+ .off = OFF, \
.imm = 0 })
-#define BPF_ALU32_REG(OP, DST, SRC) \
+#define BPF_ALU64_REG(OP, DST, SRC) \
+ BPF_ALU64_REG_OFF(OP, DST, SRC, 0)
+
+#define BPF_ALU32_REG_OFF(OP, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
- .off = 0, \
+ .off = OFF, \
.imm = 0 })
+#define BPF_ALU32_REG(OP, DST, SRC) \
+ BPF_ALU32_REG_OFF(OP, DST, SRC, 0)
+
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
#define BPF_ALU64_IMM(OP, DST, IMM) \
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 3fe895199f6e..646d2fe537be 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1272,7 +1272,7 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
case BPF_ALU | BPF_MOD | BPF_K:
*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
- *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
+ *to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
break;
case BPF_ALU64 | BPF_ADD | BPF_K:
@@ -1286,7 +1286,7 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
case BPF_ALU64 | BPF_MOD | BPF_K:
*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
- *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
+ *to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
break;
case BPF_JMP | BPF_JEQ | BPF_K:
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 07/17] bpf: Support new 32bit offset jmp instruction
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (5 preceding siblings ...)
2023-07-20 0:01 ` [PATCH bpf-next v3 06/17] bpf: Fix jit blinding with new sdiv/smov insns Yonghong Song
@ 2023-07-20 0:01 ` Yonghong Song
2023-07-20 0:01 ` [PATCH bpf-next v3 08/17] bpf: Add kernel/bpftool asm support for new instructions Yonghong Song
` (10 subsequent siblings)
17 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:01 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Add interpreter/jit/verifier support for 32bit offset jmp instruction.
If a conditional jmp instruction needs more than 16bit offset,
it can be simulated with a conditional jmp + a 32bit jmp insn.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
arch/x86/net/bpf_jit_comp.c | 28 ++++++++++++++++++----------
kernel/bpf/core.c | 19 ++++++++++++++++---
kernel/bpf/verifier.c | 32 ++++++++++++++++++++++----------
3 files changed, 56 insertions(+), 23 deletions(-)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index a89b62eb2b40..a5930042139d 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1815,16 +1815,24 @@ st: if (is_imm8(insn->off))
break;
case BPF_JMP | BPF_JA:
- if (insn->off == -1)
- /* -1 jmp instructions will always jump
- * backwards two bytes. Explicitly handling
- * this case avoids wasting too many passes
- * when there are long sequences of replaced
- * dead code.
- */
- jmp_offset = -2;
- else
- jmp_offset = addrs[i + insn->off] - addrs[i];
+ case BPF_JMP32 | BPF_JA:
+ if (BPF_CLASS(insn->code) == BPF_JMP) {
+ if (insn->off == -1)
+ /* -1 jmp instructions will always jump
+ * backwards two bytes. Explicitly handling
+ * this case avoids wasting too many passes
+ * when there are long sequences of replaced
+ * dead code.
+ */
+ jmp_offset = -2;
+ else
+ jmp_offset = addrs[i + insn->off] - addrs[i];
+ } else {
+ if (insn->imm == -1)
+ jmp_offset = -2;
+ else
+ jmp_offset = addrs[i + insn->imm] - addrs[i];
+ }
if (!jmp_offset) {
/*
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 646d2fe537be..db0b631908c2 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -373,7 +373,12 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
{
const s32 off_min = S16_MIN, off_max = S16_MAX;
s32 delta = end_new - end_old;
- s32 off = insn->off;
+ s32 off;
+
+ if (insn->code == (BPF_JMP32 | BPF_JA))
+ off = insn->imm;
+ else
+ off = insn->off;
if (curr < pos && curr + off + 1 >= end_old)
off += delta;
@@ -381,8 +386,12 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
off -= delta;
if (off < off_min || off > off_max)
return -ERANGE;
- if (!probe_pass)
- insn->off = off;
+ if (!probe_pass) {
+ if (insn->code == (BPF_JMP32 | BPF_JA))
+ insn->imm = off;
+ else
+ insn->off = off;
+ }
return 0;
}
@@ -1593,6 +1602,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
INSN_3(JMP, JSLE, K), \
INSN_3(JMP, JSET, K), \
INSN_2(JMP, JA), \
+ INSN_2(JMP32, JA), \
/* Store instructions. */ \
/* Register based. */ \
INSN_3(STX, MEM, B), \
@@ -1989,6 +1999,9 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
JMP_JA:
insn += insn->off;
CONT;
+ JMP32_JA:
+ insn += insn->imm;
+ CONT;
JMP_EXIT:
return BPF_R0;
/* JMP */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 9436fa795f39..daf23c5da2ee 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2855,7 +2855,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
goto next;
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
goto next;
- off = i + insn[i].off + 1;
+ if (code == (BPF_JMP32 | BPF_JA))
+ off = i + insn[i].imm + 1;
+ else
+ off = i + insn[i].off + 1;
if (off < subprog_start || off >= subprog_end) {
verbose(env, "jump out of range from insn %d to %d\n", i, off);
return -EINVAL;
@@ -2867,6 +2870,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
* or unconditional jump back
*/
if (code != (BPF_JMP | BPF_EXIT) &&
+ code != (BPF_JMP32 | BPF_JA) &&
code != (BPF_JMP | BPF_JA)) {
verbose(env, "last insn is not an exit or jmp\n");
return -EINVAL;
@@ -14792,7 +14796,7 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
static int visit_insn(int t, struct bpf_verifier_env *env)
{
struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
- int ret;
+ int ret, off;
if (bpf_pseudo_func(insn))
return visit_func_call_insn(t, insns, env, true);
@@ -14840,14 +14844,19 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
if (BPF_SRC(insn->code) != BPF_K)
return -EINVAL;
+ if (BPF_CLASS(insn->code) == BPF_JMP)
+ off = insn->off;
+ else
+ off = insn->imm;
+
/* unconditional jump with single edge */
- ret = push_insn(t, t + insn->off + 1, FALLTHROUGH, env,
+ ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
true);
if (ret)
return ret;
- mark_prune_point(env, t + insn->off + 1);
- mark_jmp_point(env, t + insn->off + 1);
+ mark_prune_point(env, t + off + 1);
+ mark_jmp_point(env, t + off + 1);
return ret;
@@ -16643,15 +16652,18 @@ static int do_check(struct bpf_verifier_env *env)
mark_reg_scratched(env, BPF_REG_0);
} else if (opcode == BPF_JA) {
if (BPF_SRC(insn->code) != BPF_K ||
- insn->imm != 0 ||
insn->src_reg != BPF_REG_0 ||
insn->dst_reg != BPF_REG_0 ||
- class == BPF_JMP32) {
+ (class == BPF_JMP && insn->imm != 0) ||
+ (class == BPF_JMP32 && insn->off != 0)) {
verbose(env, "BPF_JA uses reserved fields\n");
return -EINVAL;
}
- env->insn_idx += insn->off + 1;
+ if (class == BPF_JMP)
+ env->insn_idx += insn->off + 1;
+ else
+ env->insn_idx += insn->imm + 1;
continue;
} else if (opcode == BPF_EXIT) {
@@ -17498,13 +17510,13 @@ static bool insn_is_cond_jump(u8 code)
{
u8 op;
+ op = BPF_OP(code);
if (BPF_CLASS(code) == BPF_JMP32)
- return true;
+ return op != BPF_JA;
if (BPF_CLASS(code) != BPF_JMP)
return false;
- op = BPF_OP(code);
return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 08/17] bpf: Add kernel/bpftool asm support for new instructions
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (6 preceding siblings ...)
2023-07-20 0:01 ` [PATCH bpf-next v3 07/17] bpf: Support new 32bit offset jmp instruction Yonghong Song
@ 2023-07-20 0:01 ` Yonghong Song
2023-07-21 14:36 ` Quentin Monnet
2023-07-20 0:01 ` [PATCH bpf-next v3 09/17] selftests/bpf: Fix a test_verifier failure Yonghong Song
` (9 subsequent siblings)
17 siblings, 1 reply; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:01 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Add asm support for new instructions so kernel verifier and bpftool
xlated insn dumps can have proper asm syntax for new instructions.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
kernel/bpf/disasm.c | 57 ++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 51 insertions(+), 6 deletions(-)
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index 7b4afb7d96db..d7bff608f299 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -87,6 +87,17 @@ const char *const bpf_alu_string[16] = {
[BPF_END >> 4] = "endian",
};
+const char *const bpf_alu_sign_string[16] = {
+ [BPF_DIV >> 4] = "s/=",
+ [BPF_MOD >> 4] = "s%=",
+};
+
+const char *const bpf_movsx_string[4] = {
+ [0] = "(s8)",
+ [1] = "(s16)",
+ [3] = "(s32)",
+};
+
static const char *const bpf_atomic_alu_string[16] = {
[BPF_ADD >> 4] = "add",
[BPF_AND >> 4] = "and",
@@ -101,6 +112,12 @@ static const char *const bpf_ldst_string[] = {
[BPF_DW >> 3] = "u64",
};
+static const char *const bpf_ldsx_string[] = {
+ [BPF_W >> 3] = "s32",
+ [BPF_H >> 3] = "s16",
+ [BPF_B >> 3] = "s8",
+};
+
static const char *const bpf_jmp_string[16] = {
[BPF_JA >> 4] = "jmp",
[BPF_JEQ >> 4] = "==",
@@ -128,6 +145,26 @@ static void print_bpf_end_insn(bpf_insn_print_t verbose,
insn->imm, insn->dst_reg);
}
+static void print_bpf_bswap_insn(bpf_insn_print_t verbose,
+ void *private_data,
+ const struct bpf_insn *insn)
+{
+ verbose(private_data, "(%02x) r%d = bswap%d r%d\n",
+ insn->code, insn->dst_reg,
+ insn->imm, insn->dst_reg);
+}
+
+static bool is_sdiv_smod(const struct bpf_insn *insn)
+{
+ return (BPF_OP(insn->code) == BPF_DIV || BPF_OP(insn->code) == BPF_MOD) &&
+ insn->off == 1;
+}
+
+static bool is_movsx(const struct bpf_insn *insn)
+{
+ return BPF_OP(insn->code) == BPF_MOV && insn->off != 0;
+}
+
void print_bpf_insn(const struct bpf_insn_cbs *cbs,
const struct bpf_insn *insn,
bool allow_ptr_leaks)
@@ -138,7 +175,7 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
if (class == BPF_ALU || class == BPF_ALU64) {
if (BPF_OP(insn->code) == BPF_END) {
if (class == BPF_ALU64)
- verbose(cbs->private_data, "BUG_alu64_%02x\n", insn->code);
+ print_bpf_bswap_insn(verbose, cbs->private_data, insn);
else
print_bpf_end_insn(verbose, cbs->private_data, insn);
} else if (BPF_OP(insn->code) == BPF_NEG) {
@@ -147,17 +184,20 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
insn->dst_reg, class == BPF_ALU ? 'w' : 'r',
insn->dst_reg);
} else if (BPF_SRC(insn->code) == BPF_X) {
- verbose(cbs->private_data, "(%02x) %c%d %s %c%d\n",
+ verbose(cbs->private_data, "(%02x) %c%d %s %s%c%d\n",
insn->code, class == BPF_ALU ? 'w' : 'r',
insn->dst_reg,
- bpf_alu_string[BPF_OP(insn->code) >> 4],
+ is_sdiv_smod(insn) ? bpf_alu_sign_string[BPF_OP(insn->code) >> 4]
+ : bpf_alu_string[BPF_OP(insn->code) >> 4],
+ is_movsx(insn) ? bpf_movsx_string[(insn->off >> 3) - 1] : "",
class == BPF_ALU ? 'w' : 'r',
insn->src_reg);
} else {
verbose(cbs->private_data, "(%02x) %c%d %s %d\n",
insn->code, class == BPF_ALU ? 'w' : 'r',
insn->dst_reg,
- bpf_alu_string[BPF_OP(insn->code) >> 4],
+ is_sdiv_smod(insn) ? bpf_alu_sign_string[BPF_OP(insn->code) >> 4]
+ : bpf_alu_string[BPF_OP(insn->code) >> 4],
insn->imm);
}
} else if (class == BPF_STX) {
@@ -218,13 +258,15 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
}
} else if (class == BPF_LDX) {
- if (BPF_MODE(insn->code) != BPF_MEM) {
+ if (BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) {
verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
return;
}
verbose(cbs->private_data, "(%02x) r%d = *(%s *)(r%d %+d)\n",
insn->code, insn->dst_reg,
- bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
+ BPF_MODE(insn->code) == BPF_MEM ?
+ bpf_ldst_string[BPF_SIZE(insn->code) >> 3] :
+ bpf_ldsx_string[BPF_SIZE(insn->code) >> 3],
insn->src_reg, insn->off);
} else if (class == BPF_LD) {
if (BPF_MODE(insn->code) == BPF_ABS) {
@@ -279,6 +321,9 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
} else if (insn->code == (BPF_JMP | BPF_JA)) {
verbose(cbs->private_data, "(%02x) goto pc%+d\n",
insn->code, insn->off);
+ } else if (insn->code == (BPF_JMP32 | BPF_JA)) {
+ verbose(cbs->private_data, "(%02x) gotol pc%+d\n",
+ insn->code, insn->imm);
} else if (insn->code == (BPF_JMP | BPF_EXIT)) {
verbose(cbs->private_data, "(%02x) exit\n", insn->code);
} else if (BPF_SRC(insn->code) == BPF_X) {
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH bpf-next v3 08/17] bpf: Add kernel/bpftool asm support for new instructions
2023-07-20 0:01 ` [PATCH bpf-next v3 08/17] bpf: Add kernel/bpftool asm support for new instructions Yonghong Song
@ 2023-07-21 14:36 ` Quentin Monnet
2023-07-21 14:36 ` [Bpf] " Quentin Monnet
0 siblings, 1 reply; 30+ messages in thread
From: Quentin Monnet @ 2023-07-21 14:36 UTC (permalink / raw)
To: Yonghong Song, bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
2023-07-19 17:01 UTC-0700 ~ Yonghong Song <yhs@fb.com>
> Add asm support for new instructions so kernel verifier and bpftool
> xlated insn dumps can have proper asm syntax for new instructions.
>
> Signed-off-by: Yonghong Song <yhs@fb.com>
Acked-by: Quentin Monnet <quentin@isovalent.com>
Thanks!
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [Bpf] [PATCH bpf-next v3 08/17] bpf: Add kernel/bpftool asm support for new instructions
2023-07-21 14:36 ` Quentin Monnet
@ 2023-07-21 14:36 ` Quentin Monnet
0 siblings, 0 replies; 30+ messages in thread
From: Quentin Monnet @ 2023-07-21 14:36 UTC (permalink / raw)
To: Yonghong Song, bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
2023-07-19 17:01 UTC-0700 ~ Yonghong Song <yhs@fb.com>
> Add asm support for new instructions so kernel verifier and bpftool
> xlated insn dumps can have proper asm syntax for new instructions.
>
> Signed-off-by: Yonghong Song <yhs@fb.com>
Acked-by: Quentin Monnet <quentin@isovalent.com>
Thanks!
--
Bpf mailing list
Bpf@ietf.org
https://www.ietf.org/mailman/listinfo/bpf
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 09/17] selftests/bpf: Fix a test_verifier failure
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (7 preceding siblings ...)
2023-07-20 0:01 ` [PATCH bpf-next v3 08/17] bpf: Add kernel/bpftool asm support for new instructions Yonghong Song
@ 2023-07-20 0:01 ` Yonghong Song
2023-07-20 0:01 ` [PATCH bpf-next v3 10/17] selftests/bpf: Add a cpuv4 test runner for cpu=v4 testing Yonghong Song
` (8 subsequent siblings)
17 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:01 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
The following test_verifier subtest failed due to
new encoding for BSWAP.
$ ./test_verifier
...
#99/u invalid 64-bit BPF_END FAIL
Unexpected success to load!
verification time 215 usec
stack depth 0
processed 3 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0
#99/p invalid 64-bit BPF_END FAIL
Unexpected success to load!
verification time 198 usec
stack depth 0
processed 3 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0
Tighten the test so it still reports a failure.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
tools/testing/selftests/bpf/verifier/basic_instr.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tools/testing/selftests/bpf/verifier/basic_instr.c b/tools/testing/selftests/bpf/verifier/basic_instr.c
index 071dbc889e8c..bd928a72ad73 100644
--- a/tools/testing/selftests/bpf/verifier/basic_instr.c
+++ b/tools/testing/selftests/bpf/verifier/basic_instr.c
@@ -176,11 +176,11 @@
.retval = 1,
},
{
- "invalid 64-bit BPF_END",
+ "invalid 64-bit BPF_END with BPF_TO_BE",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 0),
{
- .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
+ .code = BPF_ALU64 | BPF_END | BPF_TO_BE,
.dst_reg = BPF_REG_0,
.src_reg = 0,
.off = 0,
@@ -188,7 +188,7 @@
},
BPF_EXIT_INSN(),
},
- .errstr = "unknown opcode d7",
+ .errstr = "unknown opcode df",
.result = REJECT,
},
{
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 10/17] selftests/bpf: Add a cpuv4 test runner for cpu=v4 testing
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (8 preceding siblings ...)
2023-07-20 0:01 ` [PATCH bpf-next v3 09/17] selftests/bpf: Fix a test_verifier failure Yonghong Song
@ 2023-07-20 0:01 ` Yonghong Song
2023-07-20 0:02 ` [PATCH bpf-next v3 11/17] selftests/bpf: Add unit tests for new sign-extension load insns Yonghong Song
` (7 subsequent siblings)
17 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:01 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Similar to no-alu32 runner, a cpuv4 runner is created to test
bpf programs compiled with -mcpu=v4.
The following are some num-of-insn statistics for each newer
instructions based on existing selftests, excluding subsequent
cpuv4 insn specific tests.
insn pattern # of instructions
reg = (s8)reg 4
reg = (s16)reg 4
reg = (s32)reg 144
reg = *(s8 *)(reg + off) 13
reg = *(s16 *)(reg + off) 14
reg = *(s32 *)(reg + off) 15215
reg = bswap16 reg 142
reg = bswap32 reg 38
reg = bswap64 reg 14
reg s/= reg 0
reg s%= reg 0
gotol <offset> 58
Note that in llvm -mcpu=v4 implementation, the compiler is a little
bit conservative about generating 'gotol' insn (32-bit branch offset)
as it didn't precise count the number of insns (e.g., some insns are
debug insns, etc.). Compared to old 'goto' insn, newer 'gotol' insn
should have comparable verification states to 'goto' insn.
With current patch set, all selftests passed with -mcpu=v4
when running test_progs-cpuv4 binary. The -mcpu=v3 and -mcpu=v2 run
are also successful.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
tools/testing/selftests/bpf/.gitignore | 2 ++
tools/testing/selftests/bpf/Makefile | 18 ++++++++++++++----
2 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 116fecf80ca1..110518ba4804 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -13,6 +13,7 @@ test_dev_cgroup
/test_progs
/test_progs-no_alu32
/test_progs-bpf_gcc
+/test_progs-cpuv4
test_verifier_log
feature
test_sock
@@ -36,6 +37,7 @@ test_cpp
*.lskel.h
/no_alu32
/bpf_gcc
+/cpuv4
/host-tools
/tools
/runqslower
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 882be03b179f..e14da9a4705b 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -44,7 +44,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
test_sock test_sockmap get_cgroup_id_user \
test_cgroup_storage \
test_tcpnotify_user test_sysctl \
- test_progs-no_alu32
+ test_progs-no_alu32 test_progs-cpuv4
# Also test bpf-gcc, if present
ifneq ($(BPF_GCC),)
@@ -383,6 +383,11 @@ define CLANG_NOALU32_BPF_BUILD_RULE
$(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
$(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v2 -o $2
endef
+# Similar to CLANG_BPF_BUILD_RULE, but with cpu-v4
+define CLANG_CPUV4_BPF_BUILD_RULE
+ $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
+ $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v4 -o $2
+endef
# Build BPF object using GCC
define GCC_BPF_BUILD_RULE
$(call msg,GCC-BPF,$(TRUNNER_BINARY),$2)
@@ -425,7 +430,7 @@ LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(ske
# $eval()) and pass control to DEFINE_TEST_RUNNER_RULES.
# Parameters:
# $1 - test runner base binary name (e.g., test_progs)
-# $2 - test runner extra "flavor" (e.g., no_alu32, gcc-bpf, etc)
+# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, gcc-bpf, etc)
define DEFINE_TEST_RUNNER
TRUNNER_OUTPUT := $(OUTPUT)$(if $2,/)$2
@@ -453,7 +458,7 @@ endef
# Using TRUNNER_XXX variables, provided by callers of DEFINE_TEST_RUNNER and
# set up by DEFINE_TEST_RUNNER itself, create test runner build rules with:
# $1 - test runner base binary name (e.g., test_progs)
-# $2 - test runner extra "flavor" (e.g., no_alu32, gcc-bpf, etc)
+# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, gcc-bpf, etc)
define DEFINE_TEST_RUNNER_RULES
ifeq ($($(TRUNNER_OUTPUT)-dir),)
@@ -584,6 +589,11 @@ TRUNNER_BPF_BUILD_RULE := CLANG_NOALU32_BPF_BUILD_RULE
TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS)
$(eval $(call DEFINE_TEST_RUNNER,test_progs,no_alu32))
+# Define test_progs-cpuv4 test runner.
+TRUNNER_BPF_BUILD_RULE := CLANG_CPUV4_BPF_BUILD_RULE
+TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS)
+$(eval $(call DEFINE_TEST_RUNNER,test_progs,cpuv4))
+
# Define test_progs BPF-GCC-flavored test runner.
ifneq ($(BPF_GCC),)
TRUNNER_BPF_BUILD_RULE := GCC_BPF_BUILD_RULE
@@ -681,7 +691,7 @@ EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature bpftool \
$(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h \
- no_alu32 bpf_gcc bpf_testmod.ko \
+ no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \
liburandom_read.so)
.PHONY: docs docs-clean
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 11/17] selftests/bpf: Add unit tests for new sign-extension load insns
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (9 preceding siblings ...)
2023-07-20 0:01 ` [PATCH bpf-next v3 10/17] selftests/bpf: Add a cpuv4 test runner for cpu=v4 testing Yonghong Song
@ 2023-07-20 0:02 ` Yonghong Song
2023-07-20 6:31 ` [Bpf] " Yonghong Song
2023-07-20 0:02 ` [PATCH bpf-next v3 12/17] selftests/bpf: Add unit tests for new sign-extension mov insns Yonghong Song
` (6 subsequent siblings)
17 siblings, 1 reply; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:02 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Add unit tests for new ldsx insns. The test includes sign-extension
with a single value or with a value range.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
.../selftests/bpf/prog_tests/verifier.c | 2 +
.../selftests/bpf/progs/verifier_ldsx.c | 117 ++++++++++++++++++
2 files changed, 119 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/verifier_ldsx.c
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index c375e59ff28d..6eec6a9463c8 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -31,6 +31,7 @@
#include "verifier_int_ptr.skel.h"
#include "verifier_jeq_infer_not_null.skel.h"
#include "verifier_ld_ind.skel.h"
+#include "verifier_ldsx.skel.h"
#include "verifier_leak_ptr.skel.h"
#include "verifier_loops1.skel.h"
#include "verifier_lwt.skel.h"
@@ -133,6 +134,7 @@ void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access
void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); }
void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); }
void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
+void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); }
void test_verifier_loops1(void) { RUN(verifier_loops1); }
void test_verifier_lwt(void) { RUN(verifier_lwt); }
diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
new file mode 100644
index 000000000000..4163e9ffffe9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("LDSX, S8")
+__success __success_unpriv __retval(-2)
+__naked void ldsx_s8(void)
+{
+ asm volatile (" \
+ r1 = 0x3fe; \
+ *(u64 *)(r10 - 8) = r1; \
+ r0 = *(s8 *)(r10 - 8); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("LDSX, S16")
+__success __success_unpriv __retval(-2)
+__naked void ldsx_s16(void)
+{
+ asm volatile (" \
+ r1 = 0x3fffe; \
+ *(u64 *)(r10 - 8) = r1; \
+ r0 = *(s16 *)(r10 - 8); \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("LDSX, S32")
+__success __success_unpriv __retval(-1)
+__naked void ldsx_s32(void)
+{
+ asm volatile (" \
+ r1 = 0xfffffffe; \
+ *(u64 *)(r10 - 8) = r1; \
+ r0 = *(s32 *)(r10 - 8); \
+ r0 >>= 1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("LDSX, S8 range checking, privileged")
+__log_level(2) __success __retval(1)
+__msg("R1_w=scalar(smin=-128,smax=127)")
+__naked void ldsx_s8_range_priv(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ *(u64 *)(r10 - 8) = r0; \
+ r1 = *(s8 *)(r10 - 8); \
+ /* r1 with s8 range */ \
+ if r1 s> 0x7f goto l0_%=; \
+ if r1 s< -0x80 goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 2; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("LDSX, S16 range checking")
+__success __success_unpriv __retval(1)
+__naked void ldsx_s16_range(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ *(u64 *)(r10 - 8) = r0; \
+ r1 = *(s16 *)(r10 - 8); \
+ /* r1 with s16 range */ \
+ if r1 s> 0x7fff goto l0_%=; \
+ if r1 s< -0x8000 goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 2; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("LDSX, S32 range checking")
+__success __success_unpriv __retval(1)
+__naked void ldsx_s32_range(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ *(u64 *)(r10 - 8) = r0; \
+ r1 = *(s32 *)(r10 - 8); \
+ /* r1 with s16 range */ \
+ if r1 s> 0x7fffFFFF goto l0_%=; \
+ if r1 s< -0x80000000 goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 2; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [Bpf] [PATCH bpf-next v3 11/17] selftests/bpf: Add unit tests for new sign-extension load insns
2023-07-20 0:02 ` [PATCH bpf-next v3 11/17] selftests/bpf: Add unit tests for new sign-extension load insns Yonghong Song
@ 2023-07-20 6:31 ` Yonghong Song
2023-07-20 6:31 ` Yonghong Song
2023-07-20 6:36 ` [Bpf] " Yonghong Song
0 siblings, 2 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 6:31 UTC (permalink / raw)
To: Yonghong Song, bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
On 7/19/23 5:02 PM, Yonghong Song wrote:
> Add unit tests for new ldsx insns. The test includes sign-extension
> with a single value or with a value range.
>
> Signed-off-by: Yonghong Song <yhs@fb.com>
> ---
> .../selftests/bpf/prog_tests/verifier.c | 2 +
> .../selftests/bpf/progs/verifier_ldsx.c | 117 ++++++++++++++++++
> 2 files changed, 119 insertions(+)
> create mode 100644 tools/testing/selftests/bpf/progs/verifier_ldsx.c
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
> index c375e59ff28d..6eec6a9463c8 100644
> --- a/tools/testing/selftests/bpf/prog_tests/verifier.c
> +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
> @@ -31,6 +31,7 @@
> #include "verifier_int_ptr.skel.h"
> #include "verifier_jeq_infer_not_null.skel.h"
> #include "verifier_ld_ind.skel.h"
> +#include "verifier_ldsx.skel.h"
> #include "verifier_leak_ptr.skel.h"
> #include "verifier_loops1.skel.h"
> #include "verifier_lwt.skel.h"
> @@ -133,6 +134,7 @@ void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access
> void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); }
> void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); }
> void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
> +void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
> void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); }
> void test_verifier_loops1(void) { RUN(verifier_loops1); }
> void test_verifier_lwt(void) { RUN(verifier_lwt); }
> diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
> new file mode 100644
> index 000000000000..4163e9ffffe9
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
> @@ -0,0 +1,117 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#include <linux/bpf.h>
> +#include <bpf/bpf_helpers.h>
> +#include "bpf_misc.h"
> +
> +SEC("socket")
> +__description("LDSX, S8")
> +__success __success_unpriv __retval(-2)
> +__naked void ldsx_s8(void)
> +{
> + asm volatile (" \
> + r1 = 0x3fe; \
> + *(u64 *)(r10 - 8) = r1; \
> + r0 = *(s8 *)(r10 - 8); \
> + exit; \
> +" ::: __clobber_all);
Looks like latest llvm17 is okay with the above asm syntax
but llvm16 is not okay.
https://github.com/kernel-patches/bpf/pull/5377
Will check and fix the problem in the next revision.
> +}
> +
> +SEC("socket")
> +__description("LDSX, S16")
> +__success __success_unpriv __retval(-2)
> +__naked void ldsx_s16(void)
> +{
> + asm volatile (" \
> + r1 = 0x3fffe; \
> + *(u64 *)(r10 - 8) = r1; \
> + r0 = *(s16 *)(r10 - 8); \
> + exit; \
> +" ::: __clobber_all);
> +}
> +
[...]
--
Bpf mailing list
Bpf@ietf.org
https://www.ietf.org/mailman/listinfo/bpf
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH bpf-next v3 11/17] selftests/bpf: Add unit tests for new sign-extension load insns
2023-07-20 6:31 ` [Bpf] " Yonghong Song
@ 2023-07-20 6:31 ` Yonghong Song
2023-07-20 6:36 ` [Bpf] " Yonghong Song
1 sibling, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 6:31 UTC (permalink / raw)
To: Yonghong Song, bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
On 7/19/23 5:02 PM, Yonghong Song wrote:
> Add unit tests for new ldsx insns. The test includes sign-extension
> with a single value or with a value range.
>
> Signed-off-by: Yonghong Song <yhs@fb.com>
> ---
> .../selftests/bpf/prog_tests/verifier.c | 2 +
> .../selftests/bpf/progs/verifier_ldsx.c | 117 ++++++++++++++++++
> 2 files changed, 119 insertions(+)
> create mode 100644 tools/testing/selftests/bpf/progs/verifier_ldsx.c
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
> index c375e59ff28d..6eec6a9463c8 100644
> --- a/tools/testing/selftests/bpf/prog_tests/verifier.c
> +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
> @@ -31,6 +31,7 @@
> #include "verifier_int_ptr.skel.h"
> #include "verifier_jeq_infer_not_null.skel.h"
> #include "verifier_ld_ind.skel.h"
> +#include "verifier_ldsx.skel.h"
> #include "verifier_leak_ptr.skel.h"
> #include "verifier_loops1.skel.h"
> #include "verifier_lwt.skel.h"
> @@ -133,6 +134,7 @@ void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access
> void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); }
> void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); }
> void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
> +void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
> void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); }
> void test_verifier_loops1(void) { RUN(verifier_loops1); }
> void test_verifier_lwt(void) { RUN(verifier_lwt); }
> diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
> new file mode 100644
> index 000000000000..4163e9ffffe9
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
> @@ -0,0 +1,117 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#include <linux/bpf.h>
> +#include <bpf/bpf_helpers.h>
> +#include "bpf_misc.h"
> +
> +SEC("socket")
> +__description("LDSX, S8")
> +__success __success_unpriv __retval(-2)
> +__naked void ldsx_s8(void)
> +{
> + asm volatile (" \
> + r1 = 0x3fe; \
> + *(u64 *)(r10 - 8) = r1; \
> + r0 = *(s8 *)(r10 - 8); \
> + exit; \
> +" ::: __clobber_all);
Looks like latest llvm17 is okay with the above asm syntax
but llvm16 is not okay.
https://github.com/kernel-patches/bpf/pull/5377
Will check and fix the problem in the next revision.
> +}
> +
> +SEC("socket")
> +__description("LDSX, S16")
> +__success __success_unpriv __retval(-2)
> +__naked void ldsx_s16(void)
> +{
> + asm volatile (" \
> + r1 = 0x3fffe; \
> + *(u64 *)(r10 - 8) = r1; \
> + r0 = *(s16 *)(r10 - 8); \
> + exit; \
> +" ::: __clobber_all);
> +}
> +
[...]
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [Bpf] [PATCH bpf-next v3 11/17] selftests/bpf: Add unit tests for new sign-extension load insns
2023-07-20 6:31 ` [Bpf] " Yonghong Song
2023-07-20 6:31 ` Yonghong Song
@ 2023-07-20 6:36 ` Yonghong Song
2023-07-20 6:36 ` Yonghong Song
1 sibling, 1 reply; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 6:36 UTC (permalink / raw)
To: Yonghong Song, bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
On 7/19/23 11:31 PM, Yonghong Song wrote:
>
>
> On 7/19/23 5:02 PM, Yonghong Song wrote:
>> Add unit tests for new ldsx insns. The test includes sign-extension
>> with a single value or with a value range.
>>
>> Signed-off-by: Yonghong Song <yhs@fb.com>
>> ---
>> .../selftests/bpf/prog_tests/verifier.c | 2 +
>> .../selftests/bpf/progs/verifier_ldsx.c | 117 ++++++++++++++++++
>> 2 files changed, 119 insertions(+)
>> create mode 100644 tools/testing/selftests/bpf/progs/verifier_ldsx.c
>>
>> diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c
>> b/tools/testing/selftests/bpf/prog_tests/verifier.c
>> index c375e59ff28d..6eec6a9463c8 100644
>> --- a/tools/testing/selftests/bpf/prog_tests/verifier.c
>> +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
>> @@ -31,6 +31,7 @@
>> #include "verifier_int_ptr.skel.h"
>> #include "verifier_jeq_infer_not_null.skel.h"
>> #include "verifier_ld_ind.skel.h"
>> +#include "verifier_ldsx.skel.h"
>> #include "verifier_leak_ptr.skel.h"
>> #include "verifier_loops1.skel.h"
>> #include "verifier_lwt.skel.h"
>> @@ -133,6 +134,7 @@ void test_verifier_helper_value_access(void) {
>> RUN(verifier_helper_value_access
>> void test_verifier_int_ptr(void) {
>> RUN(verifier_int_ptr); }
>> void test_verifier_jeq_infer_not_null(void) {
>> RUN(verifier_jeq_infer_not_null); }
>> void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
>> +void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
>> void test_verifier_leak_ptr(void) {
>> RUN(verifier_leak_ptr); }
>> void test_verifier_loops1(void) { RUN(verifier_loops1); }
>> void test_verifier_lwt(void) { RUN(verifier_lwt); }
>> diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c
>> b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
>> new file mode 100644
>> index 000000000000..4163e9ffffe9
>> --- /dev/null
>> +++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
>> @@ -0,0 +1,117 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +
>> +#include <linux/bpf.h>
>> +#include <bpf/bpf_helpers.h>
>> +#include "bpf_misc.h"
>> +
>> +SEC("socket")
>> +__description("LDSX, S8")
>> +__success __success_unpriv __retval(-2)
>> +__naked void ldsx_s8(void)
>> +{
>> + asm volatile (" \
>> + r1 = 0x3fe; \
>> + *(u64 *)(r10 - 8) = r1; \
>> + r0 = *(s8 *)(r10 - 8); \
>> + exit; \
>> +" ::: __clobber_all);
>
> Looks like latest llvm17 is okay with the above asm syntax
> but llvm16 is not okay.
>
> https://github.com/kernel-patches/bpf/pull/5377
>
> Will check and fix the problem in the next revision.
I may need to guard this and other similar tests
with llvm compiler versions.
>
>
>> +}
>> +
>> +SEC("socket")
>> +__description("LDSX, S16")
>> +__success __success_unpriv __retval(-2)
>> +__naked void ldsx_s16(void)
>> +{
>> + asm volatile (" \
>> + r1 = 0x3fffe; \
>> + *(u64 *)(r10 - 8) = r1; \
>> + r0 = *(s16 *)(r10 - 8); \
>> + exit; \
>> +" ::: __clobber_all);
>> +}
>> +
> [...]
--
Bpf mailing list
Bpf@ietf.org
https://www.ietf.org/mailman/listinfo/bpf
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH bpf-next v3 11/17] selftests/bpf: Add unit tests for new sign-extension load insns
2023-07-20 6:36 ` [Bpf] " Yonghong Song
@ 2023-07-20 6:36 ` Yonghong Song
0 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 6:36 UTC (permalink / raw)
To: Yonghong Song, bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
On 7/19/23 11:31 PM, Yonghong Song wrote:
>
>
> On 7/19/23 5:02 PM, Yonghong Song wrote:
>> Add unit tests for new ldsx insns. The test includes sign-extension
>> with a single value or with a value range.
>>
>> Signed-off-by: Yonghong Song <yhs@fb.com>
>> ---
>> .../selftests/bpf/prog_tests/verifier.c | 2 +
>> .../selftests/bpf/progs/verifier_ldsx.c | 117 ++++++++++++++++++
>> 2 files changed, 119 insertions(+)
>> create mode 100644 tools/testing/selftests/bpf/progs/verifier_ldsx.c
>>
>> diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c
>> b/tools/testing/selftests/bpf/prog_tests/verifier.c
>> index c375e59ff28d..6eec6a9463c8 100644
>> --- a/tools/testing/selftests/bpf/prog_tests/verifier.c
>> +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
>> @@ -31,6 +31,7 @@
>> #include "verifier_int_ptr.skel.h"
>> #include "verifier_jeq_infer_not_null.skel.h"
>> #include "verifier_ld_ind.skel.h"
>> +#include "verifier_ldsx.skel.h"
>> #include "verifier_leak_ptr.skel.h"
>> #include "verifier_loops1.skel.h"
>> #include "verifier_lwt.skel.h"
>> @@ -133,6 +134,7 @@ void test_verifier_helper_value_access(void) {
>> RUN(verifier_helper_value_access
>> void test_verifier_int_ptr(void) {
>> RUN(verifier_int_ptr); }
>> void test_verifier_jeq_infer_not_null(void) {
>> RUN(verifier_jeq_infer_not_null); }
>> void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
>> +void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
>> void test_verifier_leak_ptr(void) {
>> RUN(verifier_leak_ptr); }
>> void test_verifier_loops1(void) { RUN(verifier_loops1); }
>> void test_verifier_lwt(void) { RUN(verifier_lwt); }
>> diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c
>> b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
>> new file mode 100644
>> index 000000000000..4163e9ffffe9
>> --- /dev/null
>> +++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
>> @@ -0,0 +1,117 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +
>> +#include <linux/bpf.h>
>> +#include <bpf/bpf_helpers.h>
>> +#include "bpf_misc.h"
>> +
>> +SEC("socket")
>> +__description("LDSX, S8")
>> +__success __success_unpriv __retval(-2)
>> +__naked void ldsx_s8(void)
>> +{
>> + asm volatile (" \
>> + r1 = 0x3fe; \
>> + *(u64 *)(r10 - 8) = r1; \
>> + r0 = *(s8 *)(r10 - 8); \
>> + exit; \
>> +" ::: __clobber_all);
>
> Looks like latest llvm17 is okay with the above asm syntax
> but llvm16 is not okay.
>
> https://github.com/kernel-patches/bpf/pull/5377
>
> Will check and fix the problem in the next revision.
I may need to guard this and other similar tests
with llvm compiler versions.
>
>
>> +}
>> +
>> +SEC("socket")
>> +__description("LDSX, S16")
>> +__success __success_unpriv __retval(-2)
>> +__naked void ldsx_s16(void)
>> +{
>> + asm volatile (" \
>> + r1 = 0x3fffe; \
>> + *(u64 *)(r10 - 8) = r1; \
>> + r0 = *(s16 *)(r10 - 8); \
>> + exit; \
>> +" ::: __clobber_all);
>> +}
>> +
> [...]
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 12/17] selftests/bpf: Add unit tests for new sign-extension mov insns
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (10 preceding siblings ...)
2023-07-20 0:02 ` [PATCH bpf-next v3 11/17] selftests/bpf: Add unit tests for new sign-extension load insns Yonghong Song
@ 2023-07-20 0:02 ` Yonghong Song
2023-07-20 0:02 ` [PATCH bpf-next v3 13/17] selftests/bpf: Add unit tests for new bswap insns Yonghong Song
` (5 subsequent siblings)
17 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:02 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Add unit tests for movsx insns.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
.../selftests/bpf/prog_tests/verifier.c | 2 +
.../selftests/bpf/progs/verifier_movsx.c | 199 ++++++++++++++++++
2 files changed, 201 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/verifier_movsx.c
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index 6eec6a9463c8..037af7799cdf 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -41,6 +41,7 @@
#include "verifier_map_ret_val.skel.h"
#include "verifier_masking.skel.h"
#include "verifier_meta_access.skel.h"
+#include "verifier_movsx.skel.h"
#include "verifier_netfilter_ctx.skel.h"
#include "verifier_netfilter_retcode.skel.h"
#include "verifier_prevent_map_lookup.skel.h"
@@ -144,6 +145,7 @@ void test_verifier_map_ptr_mixing(void) { RUN(verifier_map_ptr_mixing); }
void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); }
void test_verifier_masking(void) { RUN(verifier_masking); }
void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
+void test_verifier_movsx(void) { RUN(verifier_movsx); }
void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); }
void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); }
void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); }
diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c
new file mode 100644
index 000000000000..5ee7d004f8ba
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("MOV32SX, S8")
+__success __success_unpriv __retval(0x23)
+__naked void mov32sx_s8(void)
+{
+ asm volatile (" \
+ w0 = 0xff23; \
+ w0 = (s8)w0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S16")
+__success __success_unpriv __retval(0xFFFFff23)
+__naked void mov32sx_s16(void)
+{
+ asm volatile (" \
+ w0 = 0xff23; \
+ w0 = (s16)w0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("MOV64SX, S8")
+__success __success_unpriv __retval(-2)
+__naked void mov64sx_s8(void)
+{
+ asm volatile (" \
+ r0 = 0x1fe; \
+ r0 = (s8)r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("MOV64SX, S16")
+__success __success_unpriv __retval(0xf23)
+__naked void mov64sx_s16(void)
+{
+ asm volatile (" \
+ r0 = 0xf0f23; \
+ r0 = (s16)r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("MOV64SX, S32")
+__success __success_unpriv __retval(-1)
+__naked void mov64sx_s32(void)
+{
+ asm volatile (" \
+ r0 = 0xfffffffe; \
+ r0 = (s32)w0; \
+ r0 >>= 1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S8, range_check")
+__success __success_unpriv __retval(1)
+__naked void mov32sx_s8_range(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ w1 = (s8)w0; \
+ /* w1 with s8 range */ \
+ if w1 s> 0x7f goto l0_%=; \
+ if w1 s< -0x80 goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 2; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S16, range_check")
+__success __success_unpriv __retval(1)
+__naked void mov32sx_s16_range(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ w1 = (s16)w0; \
+ /* w1 with s16 range */ \
+ if w1 s> 0x7fff goto l0_%=; \
+ if w1 s< -0x80ff goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 2; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV32SX, S16, range_check 2")
+__success __success_unpriv __retval(1)
+__naked void mov32sx_s16_range_2(void)
+{
+ asm volatile (" \
+ r1 = 65535; \
+ w2 = (s16)w1; \
+ r2 >>= 1; \
+ if r2 != 0x7fffFFFF goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 0; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV64SX, S8, range_check")
+__success __success_unpriv __retval(1)
+__naked void mov64sx_s8_range(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = (s8)r0; \
+ /* r1 with s8 range */ \
+ if r1 s> 0x7f goto l0_%=; \
+ if r1 s< -0x80 goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 2; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV64SX, S16, range_check")
+__success __success_unpriv __retval(1)
+__naked void mov64sx_s16_range(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = (s16)r0; \
+ /* r1 with s16 range */ \
+ if r1 s> 0x7fff goto l0_%=; \
+ if r1 s< -0x8000 goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 2; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("MOV64SX, S32, range_check")
+__success __success_unpriv __retval(1)
+__naked void mov64sx_s32_range(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r1 = (s32)w0; \
+ /* r1 with s32 range */ \
+ if r1 s> 0x7fffffff goto l0_%=; \
+ if r1 s< -0x80000000 goto l0_%=; \
+ r0 = 1; \
+l1_%=: \
+ exit; \
+l0_%=: \
+ r0 = 2; \
+ goto l1_%=; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 13/17] selftests/bpf: Add unit tests for new bswap insns
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (11 preceding siblings ...)
2023-07-20 0:02 ` [PATCH bpf-next v3 12/17] selftests/bpf: Add unit tests for new sign-extension mov insns Yonghong Song
@ 2023-07-20 0:02 ` Yonghong Song
2023-07-20 0:02 ` [PATCH bpf-next v3 14/17] selftests/bpf: Add unit tests for new sdiv/smod insns Yonghong Song
` (4 subsequent siblings)
17 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:02 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Add unit tests for bswap insns.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
.../selftests/bpf/prog_tests/verifier.c | 2 +
.../selftests/bpf/progs/verifier_bswap.c | 45 +++++++++++++++++++
2 files changed, 47 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/verifier_bswap.c
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index 037af7799cdf..885532540bc3 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -11,6 +11,7 @@
#include "verifier_bounds_deduction_non_const.skel.h"
#include "verifier_bounds_mix_sign_unsign.skel.h"
#include "verifier_bpf_get_stack.skel.h"
+#include "verifier_bswap.skel.h"
#include "verifier_btf_ctx_access.skel.h"
#include "verifier_cfg.skel.h"
#include "verifier_cgroup_inv_retcode.skel.h"
@@ -115,6 +116,7 @@ void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction);
void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); }
void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); }
void test_verifier_bpf_get_stack(void) { RUN(verifier_bpf_get_stack); }
+void test_verifier_bswap(void) { RUN(verifier_bswap); }
void test_verifier_btf_ctx_access(void) { RUN(verifier_btf_ctx_access); }
void test_verifier_cfg(void) { RUN(verifier_cfg); }
void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); }
diff --git a/tools/testing/selftests/bpf/progs/verifier_bswap.c b/tools/testing/selftests/bpf/progs/verifier_bswap.c
new file mode 100644
index 000000000000..b85474a46296
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_bswap.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("BSWAP, 16")
+__success __success_unpriv __retval(0x23ff)
+__naked void bswap_16(void)
+{
+ asm volatile (" \
+ r0 = 0xff23; \
+ r0 = bswap16 r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("BSWAP, 32")
+__success __success_unpriv __retval(0x23ff0000)
+__naked void bswap_32(void)
+{
+ asm volatile (" \
+ r0 = 0xff23; \
+ r0 = bswap32 r0; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("BSWAP, 64")
+__success __success_unpriv __retval(0x34ff12ff)
+__naked void bswap_64(void)
+{
+ asm volatile (" \
+ r0 = %[u64_val] ll; \
+ r0 = bswap64 r0; \
+ exit; \
+" :
+ : [u64_val]"i"(0xff12ff34ff56ff78ull)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 14/17] selftests/bpf: Add unit tests for new sdiv/smod insns
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (12 preceding siblings ...)
2023-07-20 0:02 ` [PATCH bpf-next v3 13/17] selftests/bpf: Add unit tests for new bswap insns Yonghong Song
@ 2023-07-20 0:02 ` Yonghong Song
2023-07-20 0:02 ` [PATCH bpf-next v3 15/17] selftests/bpf: Add unit tests for new gotol insn Yonghong Song
` (3 subsequent siblings)
17 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:02 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Add unit tests for sdiv/smod insns.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
.../selftests/bpf/prog_tests/verifier.c | 2 +
.../selftests/bpf/progs/verifier_sdiv.c | 767 ++++++++++++++++++
2 files changed, 769 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/verifier_sdiv.c
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index 885532540bc3..a591d7b673f1 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -54,6 +54,7 @@
#include "verifier_ringbuf.skel.h"
#include "verifier_runtime_jit.skel.h"
#include "verifier_scalar_ids.skel.h"
+#include "verifier_sdiv.skel.h"
#include "verifier_search_pruning.skel.h"
#include "verifier_sock.skel.h"
#include "verifier_spill_fill.skel.h"
@@ -159,6 +160,7 @@ void test_verifier_regalloc(void) { RUN(verifier_regalloc); }
void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); }
void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); }
void test_verifier_scalar_ids(void) { RUN(verifier_scalar_ids); }
+void test_verifier_sdiv(void) { RUN(verifier_sdiv); }
void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); }
void test_verifier_sock(void) { RUN(verifier_sock); }
void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); }
diff --git a/tools/testing/selftests/bpf/progs/verifier_sdiv.c b/tools/testing/selftests/bpf/progs/verifier_sdiv.c
new file mode 100644
index 000000000000..381b0d73fd58
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_sdiv.c
@@ -0,0 +1,767 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 1")
+__success __success_unpriv __retval(-20)
+__naked void sdiv32_non_zero_imm_1(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w0 s/= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 2")
+__success __success_unpriv __retval(-20)
+__naked void sdiv32_non_zero_imm_2(void)
+{
+ asm volatile (" \
+ w0 = 41; \
+ w0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 3")
+__success __success_unpriv __retval(20)
+__naked void sdiv32_non_zero_imm_3(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 4")
+__success __success_unpriv __retval(-21)
+__naked void sdiv32_non_zero_imm_4(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w0 s/= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 5")
+__success __success_unpriv __retval(-21)
+__naked void sdiv32_non_zero_imm_5(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 6")
+__success __success_unpriv __retval(21)
+__naked void sdiv32_non_zero_imm_6(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 7")
+__success __success_unpriv __retval(21)
+__naked void sdiv32_non_zero_imm_7(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w0 s/= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero imm divisor, check 8")
+__success __success_unpriv __retval(20)
+__naked void sdiv32_non_zero_imm_8(void)
+{
+ asm volatile (" \
+ w0 = 41; \
+ w0 s/= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 1")
+__success __success_unpriv __retval(-20)
+__naked void sdiv32_non_zero_reg_1(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w1 = 2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 2")
+__success __success_unpriv __retval(-20)
+__naked void sdiv32_non_zero_reg_2(void)
+{
+ asm volatile (" \
+ w0 = 41; \
+ w1 = -2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 3")
+__success __success_unpriv __retval(20)
+__naked void sdiv32_non_zero_reg_3(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w1 = -2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 4")
+__success __success_unpriv __retval(-21)
+__naked void sdiv32_non_zero_reg_4(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w1 = 2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 5")
+__success __success_unpriv __retval(-21)
+__naked void sdiv32_non_zero_reg_5(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = -2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 6")
+__success __success_unpriv __retval(21)
+__naked void sdiv32_non_zero_reg_6(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w1 = -2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 7")
+__success __success_unpriv __retval(21)
+__naked void sdiv32_non_zero_reg_7(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = 2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, non-zero reg divisor, check 8")
+__success __success_unpriv __retval(20)
+__naked void sdiv32_non_zero_reg_8(void)
+{
+ asm volatile (" \
+ w0 = 41; \
+ w1 = 2; \
+ w0 s/= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero imm divisor, check 1")
+__success __success_unpriv __retval(-20)
+__naked void sdiv64_non_zero_imm_1(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r0 s/= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero imm divisor, check 2")
+__success __success_unpriv __retval(-20)
+__naked void sdiv64_non_zero_imm_2(void)
+{
+ asm volatile (" \
+ r0 = 41; \
+ r0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero imm divisor, check 3")
+__success __success_unpriv __retval(20)
+__naked void sdiv64_non_zero_imm_3(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero imm divisor, check 4")
+__success __success_unpriv __retval(-21)
+__naked void sdiv64_non_zero_imm_4(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r0 s/= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero imm divisor, check 5")
+__success __success_unpriv __retval(-21)
+__naked void sdiv64_non_zero_imm_5(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero imm divisor, check 6")
+__success __success_unpriv __retval(21)
+__naked void sdiv64_non_zero_imm_6(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r0 s/= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero reg divisor, check 1")
+__success __success_unpriv __retval(-20)
+__naked void sdiv64_non_zero_reg_1(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r1 = 2; \
+ r0 s/= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero reg divisor, check 2")
+__success __success_unpriv __retval(-20)
+__naked void sdiv64_non_zero_reg_2(void)
+{
+ asm volatile (" \
+ r0 = 41; \
+ r1 = -2; \
+ r0 s/= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero reg divisor, check 3")
+__success __success_unpriv __retval(20)
+__naked void sdiv64_non_zero_reg_3(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r1 = -2; \
+ r0 s/= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero reg divisor, check 4")
+__success __success_unpriv __retval(-21)
+__naked void sdiv64_non_zero_reg_4(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r1 = 2; \
+ r0 s/= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero reg divisor, check 5")
+__success __success_unpriv __retval(-21)
+__naked void sdiv64_non_zero_reg_5(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r1 = -2; \
+ r0 s/= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, non-zero reg divisor, check 6")
+__success __success_unpriv __retval(21)
+__naked void sdiv64_non_zero_reg_6(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r1 = -2; \
+ r0 s/= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero imm divisor, check 1")
+__success __success_unpriv __retval(-1)
+__naked void smod32_non_zero_imm_1(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w0 s%%= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero imm divisor, check 2")
+__success __success_unpriv __retval(1)
+__naked void smod32_non_zero_imm_2(void)
+{
+ asm volatile (" \
+ w0 = 41; \
+ w0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero imm divisor, check 3")
+__success __success_unpriv __retval(-1)
+__naked void smod32_non_zero_imm_3(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero imm divisor, check 4")
+__success __success_unpriv __retval(0)
+__naked void smod32_non_zero_imm_4(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w0 s%%= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero imm divisor, check 5")
+__success __success_unpriv __retval(0)
+__naked void smod32_non_zero_imm_5(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero imm divisor, check 6")
+__success __success_unpriv __retval(0)
+__naked void smod32_non_zero_imm_6(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero reg divisor, check 1")
+__success __success_unpriv __retval(-1)
+__naked void smod32_non_zero_reg_1(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w1 = 2; \
+ w0 s%%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero reg divisor, check 2")
+__success __success_unpriv __retval(1)
+__naked void smod32_non_zero_reg_2(void)
+{
+ asm volatile (" \
+ w0 = 41; \
+ w1 = -2; \
+ w0 s%%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero reg divisor, check 3")
+__success __success_unpriv __retval(-1)
+__naked void smod32_non_zero_reg_3(void)
+{
+ asm volatile (" \
+ w0 = -41; \
+ w1 = -2; \
+ w0 s%%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero reg divisor, check 4")
+__success __success_unpriv __retval(0)
+__naked void smod32_non_zero_reg_4(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w1 = 2; \
+ w0 s%%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero reg divisor, check 5")
+__success __success_unpriv __retval(0)
+__naked void smod32_non_zero_reg_5(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = -2; \
+ w0 s%%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, non-zero reg divisor, check 6")
+__success __success_unpriv __retval(0)
+__naked void smod32_non_zero_reg_6(void)
+{
+ asm volatile (" \
+ w0 = -42; \
+ w1 = -2; \
+ w0 s%%= w1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 1")
+__success __success_unpriv __retval(-1)
+__naked void smod64_non_zero_imm_1(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r0 s%%= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 2")
+__success __success_unpriv __retval(1)
+__naked void smod64_non_zero_imm_2(void)
+{
+ asm volatile (" \
+ r0 = 41; \
+ r0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 3")
+__success __success_unpriv __retval(-1)
+__naked void smod64_non_zero_imm_3(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 4")
+__success __success_unpriv __retval(0)
+__naked void smod64_non_zero_imm_4(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r0 s%%= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 5")
+__success __success_unpriv __retval(-0)
+__naked void smod64_non_zero_imm_5(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 6")
+__success __success_unpriv __retval(0)
+__naked void smod64_non_zero_imm_6(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r0 s%%= -2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 7")
+__success __success_unpriv __retval(0)
+__naked void smod64_non_zero_imm_7(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r0 s%%= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero imm divisor, check 8")
+__success __success_unpriv __retval(1)
+__naked void smod64_non_zero_imm_8(void)
+{
+ asm volatile (" \
+ r0 = 41; \
+ r0 s%%= 2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 1")
+__success __success_unpriv __retval(-1)
+__naked void smod64_non_zero_reg_1(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r1 = 2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 2")
+__success __success_unpriv __retval(1)
+__naked void smod64_non_zero_reg_2(void)
+{
+ asm volatile (" \
+ r0 = 41; \
+ r1 = -2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 3")
+__success __success_unpriv __retval(-1)
+__naked void smod64_non_zero_reg_3(void)
+{
+ asm volatile (" \
+ r0 = -41; \
+ r1 = -2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 4")
+__success __success_unpriv __retval(0)
+__naked void smod64_non_zero_reg_4(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r1 = 2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 5")
+__success __success_unpriv __retval(0)
+__naked void smod64_non_zero_reg_5(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r1 = -2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 6")
+__success __success_unpriv __retval(0)
+__naked void smod64_non_zero_reg_6(void)
+{
+ asm volatile (" \
+ r0 = -42; \
+ r1 = -2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 7")
+__success __success_unpriv __retval(0)
+__naked void smod64_non_zero_reg_7(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r1 = 2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, non-zero reg divisor, check 8")
+__success __success_unpriv __retval(1)
+__naked void smod64_non_zero_reg_8(void)
+{
+ asm volatile (" \
+ r0 = 41; \
+ r1 = 2; \
+ r0 s%%= r1; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV32, zero divisor")
+__success __success_unpriv __retval(0)
+__naked void sdiv32_zero_divisor(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = 0; \
+ w2 = -1; \
+ w2 s/= w1; \
+ w0 = w2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SDIV64, zero divisor")
+__success __success_unpriv __retval(0)
+__naked void sdiv64_zero_divisor(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r1 = 0; \
+ r2 = -1; \
+ r2 s/= r1; \
+ r0 = r2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD32, zero divisor")
+__success __success_unpriv __retval(-1)
+__naked void smod32_zero_divisor(void)
+{
+ asm volatile (" \
+ w0 = 42; \
+ w1 = 0; \
+ w2 = -1; \
+ w2 s%%= w1; \
+ w0 = w2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+SEC("socket")
+__description("SMOD64, zero divisor")
+__success __success_unpriv __retval(-1)
+__naked void smod64_zero_divisor(void)
+{
+ asm volatile (" \
+ r0 = 42; \
+ r1 = 0; \
+ r2 = -1; \
+ r2 s%%= r1; \
+ r0 = r2; \
+ exit; \
+" ::: __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 15/17] selftests/bpf: Add unit tests for new gotol insn
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (13 preceding siblings ...)
2023-07-20 0:02 ` [PATCH bpf-next v3 14/17] selftests/bpf: Add unit tests for new sdiv/smod insns Yonghong Song
@ 2023-07-20 0:02 ` Yonghong Song
2023-07-20 0:02 ` [Bpf] " Yonghong Song
2023-07-20 0:02 ` [PATCH bpf-next v3 16/17] selftests/bpf: Test ldsx with more complex cases Yonghong Song
` (2 subsequent siblings)
17 siblings, 1 reply; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:02 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Add unit tests for gotol insn.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
.../selftests/bpf/prog_tests/verifier.c | 2 ++
.../selftests/bpf/progs/verifier_gotol.c | 30 +++++++++++++++++++
2 files changed, 32 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/verifier_gotol.c
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index a591d7b673f1..e3e68c97b40c 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -25,6 +25,7 @@
#include "verifier_direct_stack_access_wraparound.skel.h"
#include "verifier_div0.skel.h"
#include "verifier_div_overflow.skel.h"
+#include "verifier_gotol.skel.h"
#include "verifier_helper_access_var_len.skel.h"
#include "verifier_helper_packet_access.skel.h"
#include "verifier_helper_restricted.skel.h"
@@ -131,6 +132,7 @@ void test_verifier_direct_packet_access(void) { RUN(verifier_direct_packet_acces
void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); }
void test_verifier_div0(void) { RUN(verifier_div0); }
void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); }
+void test_verifier_gotol(void) { RUN(verifier_gotol); }
void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); }
void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); }
void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); }
diff --git a/tools/testing/selftests/bpf/progs/verifier_gotol.c b/tools/testing/selftests/bpf/progs/verifier_gotol.c
new file mode 100644
index 000000000000..78870ea4d468
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_gotol.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("gotol, small_imm")
+__success __success_unpriv __retval(1)
+__naked void gotol_small_imm(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 == 0 goto l0_%=; \
+ gotol l1_%=; \
+l2_%=: \
+ gotol l3_%=; \
+l1_%=: \
+ r0 = 1; \
+ gotol l2_%=; \
+l0_%=: \
+ r0 = 2; \
+l3_%=: \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [Bpf] [PATCH bpf-next v3 15/17] selftests/bpf: Add unit tests for new gotol insn
2023-07-20 0:02 ` [PATCH bpf-next v3 15/17] selftests/bpf: Add unit tests for new gotol insn Yonghong Song
@ 2023-07-20 0:02 ` Yonghong Song
0 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:02 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Add unit tests for gotol insn.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
.../selftests/bpf/prog_tests/verifier.c | 2 ++
.../selftests/bpf/progs/verifier_gotol.c | 30 +++++++++++++++++++
2 files changed, 32 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/verifier_gotol.c
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index a591d7b673f1..e3e68c97b40c 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -25,6 +25,7 @@
#include "verifier_direct_stack_access_wraparound.skel.h"
#include "verifier_div0.skel.h"
#include "verifier_div_overflow.skel.h"
+#include "verifier_gotol.skel.h"
#include "verifier_helper_access_var_len.skel.h"
#include "verifier_helper_packet_access.skel.h"
#include "verifier_helper_restricted.skel.h"
@@ -131,6 +132,7 @@ void test_verifier_direct_packet_access(void) { RUN(verifier_direct_packet_acces
void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); }
void test_verifier_div0(void) { RUN(verifier_div0); }
void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); }
+void test_verifier_gotol(void) { RUN(verifier_gotol); }
void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); }
void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); }
void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); }
diff --git a/tools/testing/selftests/bpf/progs/verifier_gotol.c b/tools/testing/selftests/bpf/progs/verifier_gotol.c
new file mode 100644
index 000000000000..78870ea4d468
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_gotol.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("socket")
+__description("gotol, small_imm")
+__success __success_unpriv __retval(1)
+__naked void gotol_small_imm(void)
+{
+ asm volatile (" \
+ call %[bpf_ktime_get_ns]; \
+ if r0 == 0 goto l0_%=; \
+ gotol l1_%=; \
+l2_%=: \
+ gotol l3_%=; \
+l1_%=: \
+ r0 = 1; \
+ gotol l2_%=; \
+l0_%=: \
+ r0 = 2; \
+l3_%=: \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
--
2.34.1
--
Bpf mailing list
Bpf@ietf.org
https://www.ietf.org/mailman/listinfo/bpf
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 16/17] selftests/bpf: Test ldsx with more complex cases
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (14 preceding siblings ...)
2023-07-20 0:02 ` [PATCH bpf-next v3 15/17] selftests/bpf: Add unit tests for new gotol insn Yonghong Song
@ 2023-07-20 0:02 ` Yonghong Song
2023-07-20 0:02 ` [PATCH bpf-next v3 17/17] docs/bpf: Add documentation for new instructions Yonghong Song
2023-07-20 20:35 ` [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Eduard Zingerman
17 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:02 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
The following ldsx cases are tested:
- signed readonly map value
- read/write map value
- probed memory
- not-narrowed ctx field access
- narrowed ctx field access.
Without previous proper verifier/git handling, the test will fail.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
.../selftests/bpf/bpf_testmod/bpf_testmod.c | 9 +-
.../selftests/bpf/prog_tests/test_ldsx_insn.c | 124 ++++++++++++++++++
.../selftests/bpf/progs/test_ldsx_insn.c | 112 ++++++++++++++++
3 files changed, 244 insertions(+), 1 deletion(-)
create mode 100644 tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c
create mode 100644 tools/testing/selftests/bpf/progs/test_ldsx_insn.c
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index a6f991b56345..cefc5dd72573 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -98,6 +98,12 @@ bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
return bpf_testmod_test_struct_arg_result;
}
+noinline int
+bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
+ bpf_testmod_test_struct_arg_result = a->a;
+ return bpf_testmod_test_struct_arg_result;
+}
+
__bpf_kfunc void
bpf_testmod_test_mod_kfunc(int i)
{
@@ -240,7 +246,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
.off = off,
.len = len,
};
- struct bpf_testmod_struct_arg_1 struct_arg1 = {10};
+ struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
struct bpf_testmod_struct_arg_3 *struct_arg3;
struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
@@ -259,6 +265,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
(void *)20, struct_arg4, 23);
+ (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
sizeof(int)), GFP_KERNEL);
diff --git a/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c b/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c
new file mode 100644
index 000000000000..8180a3bb3b52
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates.*/
+
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "test_ldsx_insn.skel.h"
+
+static void test_map_val_and_probed_memory(void)
+{
+ struct test_ldsx_insn *skel;
+ int err;
+
+ skel = test_ldsx_insn__open();
+ if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.rdonly_map_prog, true);
+ bpf_program__set_autoload(skel->progs.map_val_prog, true);
+ bpf_program__set_autoload(skel->progs.test_ptr_struct_arg, true);
+
+ err = test_ldsx_insn__load(skel);
+ if (!ASSERT_OK(err, "test_ldsx_insn__load"))
+ goto out;
+
+ err = test_ldsx_insn__attach(skel);
+ if (!ASSERT_OK(err, "test_ldsx_insn__attach"))
+ goto out;
+
+ ASSERT_OK(trigger_module_test_read(256), "trigger_read");
+
+ ASSERT_EQ(skel->bss->done1, 1, "done1");
+ ASSERT_EQ(skel->bss->ret1, 1, "ret1");
+ ASSERT_EQ(skel->bss->done2, 1, "done2");
+ ASSERT_EQ(skel->bss->ret2, 1, "ret2");
+ ASSERT_EQ(skel->bss->int_member, -1, "int_member");
+
+out:
+ test_ldsx_insn__destroy(skel);
+}
+
+static void test_ctx_member_sign_ext(void)
+{
+ struct test_ldsx_insn *skel;
+ int err, fd, cgroup_fd;
+ char buf[16] = {0};
+ socklen_t optlen;
+
+ cgroup_fd = test__join_cgroup("/ldsx_test");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /ldsx_test"))
+ return;
+
+ skel = test_ldsx_insn__open();
+ if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open"))
+ goto close_cgroup_fd;
+
+ bpf_program__set_autoload(skel->progs._getsockopt, true);
+
+ err = test_ldsx_insn__load(skel);
+ if (!ASSERT_OK(err, "test_ldsx_insn__load"))
+ goto destroy_skel;
+
+ skel->links._getsockopt =
+ bpf_program__attach_cgroup(skel->progs._getsockopt, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links._getsockopt, "getsockopt_link"))
+ goto destroy_skel;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (!ASSERT_GE(fd, 0, "socket"))
+ goto destroy_skel;
+
+ optlen = sizeof(buf);
+ (void)getsockopt(fd, SOL_IP, IP_TTL, buf, &optlen);
+
+ ASSERT_EQ(skel->bss->set_optlen, -1, "optlen");
+ ASSERT_EQ(skel->bss->set_retval, -1, "retval");
+
+ close(fd);
+destroy_skel:
+ test_ldsx_insn__destroy(skel);
+close_cgroup_fd:
+ close(cgroup_fd);
+}
+
+static void test_ctx_member_narrow_sign_ext(void)
+{
+ struct test_ldsx_insn *skel;
+ struct __sk_buff skb = {};
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ );
+ int err, prog_fd;
+
+ skel = test_ldsx_insn__open();
+ if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs._tc, true);
+
+ err = test_ldsx_insn__load(skel);
+ if (!ASSERT_OK(err, "test_ldsx_insn__load"))
+ goto destroy_skel;
+
+ prog_fd = bpf_program__fd(skel->progs._tc);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+
+ ASSERT_EQ(skel->bss->set_mark, -2, "set_mark");
+
+destroy_skel:
+ test_ldsx_insn__destroy(skel);
+}
+
+void test_ldsx_insn(void)
+{
+ if (test__start_subtest("map_val and probed_memory"))
+ test_map_val_and_probed_memory();
+ if (test__start_subtest("ctx_member_sign_ext"))
+ test_ctx_member_sign_ext();
+ if (test__start_subtest("ctx_member_narrow_sign_ext"))
+ test_ctx_member_narrow_sign_ext();
+}
diff --git a/tools/testing/selftests/bpf/progs/test_ldsx_insn.c b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c
new file mode 100644
index 000000000000..92394fe939f4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+volatile const short val1 = -1;
+volatile const int val2 = -1;
+short val3 = -1;
+int val4 = -1;
+int done1, done2, ret1, ret2;
+
+SEC("?raw_tp/sys_enter")
+int rdonly_map_prog(const void *ctx)
+{
+ if (done1)
+ return 0;
+
+ done1 = 1;
+ /* val1/val2 readonly map */
+ if (val1 == val2)
+ ret1 = 1;
+ return 0;
+
+}
+
+SEC("?raw_tp/sys_enter")
+int map_val_prog(const void *ctx)
+{
+ if (done2)
+ return 0;
+
+ done2 = 1;
+ /* val1/val2 regular read/write map */
+ if (val3 == val4)
+ ret2 = 1;
+ return 0;
+
+}
+
+struct bpf_testmod_struct_arg_1 {
+ int a;
+};
+
+long long int_member;
+
+SEC("?fentry/bpf_testmod_test_arg_ptr_to_struct")
+int BPF_PROG2(test_ptr_struct_arg, struct bpf_testmod_struct_arg_1 *, p)
+{
+ /* probed memory access */
+ int_member = p->a;
+ return 0;
+}
+
+long long set_optlen, set_retval;
+
+SEC("?cgroup/getsockopt")
+int _getsockopt(volatile struct bpf_sockopt *ctx)
+{
+ int old_optlen, old_retval;
+
+ old_optlen = ctx->optlen;
+ old_retval = ctx->retval;
+
+ ctx->optlen = -1;
+ ctx->retval = -1;
+
+ /* sign extension for ctx member */
+ set_optlen = ctx->optlen;
+ set_retval = ctx->retval;
+
+ ctx->optlen = old_optlen;
+ ctx->retval = old_retval;
+
+ return 0;
+}
+
+long long set_mark;
+
+SEC("?tc")
+int _tc(volatile struct __sk_buff *skb)
+{
+ long long tmp_mark;
+ int old_mark;
+
+ old_mark = skb->mark;
+
+ skb->mark = 0xf6fe;
+
+ /* narrowed sign extension for ctx member */
+#if defined(__TARGET_ARCH_x86)
+ /* force narrow one-byte signed load. Otherwise, compiler may
+ * generate a 32-bit unsigned load followed by an s8 movsx.
+ */
+ asm volatile ("r1 = *(s8 *)(%[ctx] + %[off_mark])\n\t"
+ "%[tmp_mark] = r1"
+ : [tmp_mark]"=r"(tmp_mark)
+ : [ctx]"r"(skb),
+ [off_mark]"i"(offsetof(struct __sk_buff, mark))
+ : "r1");
+#else
+ tmp_mark = (char)skb->mark;
+#endif
+ set_mark = tmp_mark;
+
+ skb->mark = old_mark;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH bpf-next v3 17/17] docs/bpf: Add documentation for new instructions
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (15 preceding siblings ...)
2023-07-20 0:02 ` [PATCH bpf-next v3 16/17] selftests/bpf: Test ldsx with more complex cases Yonghong Song
@ 2023-07-20 0:02 ` Yonghong Song
2023-07-20 20:35 ` [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Eduard Zingerman
17 siblings, 0 replies; 30+ messages in thread
From: Yonghong Song @ 2023-07-20 0:02 UTC (permalink / raw)
To: bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
Add documentation in instruction-set.rst for new instruction encoding
and their corresponding operations. Also removed the question
related to 'no BPF_SDIV' in bpf_design_QA.rst since we have
BPF_SDIV insn now.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
Documentation/bpf/bpf_design_QA.rst | 5 -
.../bpf/standardization/instruction-set.rst | 105 ++++++++++++------
2 files changed, 70 insertions(+), 40 deletions(-)
diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst
index 38372a956d65..eb19c945f4d5 100644
--- a/Documentation/bpf/bpf_design_QA.rst
+++ b/Documentation/bpf/bpf_design_QA.rst
@@ -140,11 +140,6 @@ A: Because if we picked one-to-one relationship to x64 it would have made
it more complicated to support on arm64 and other archs. Also it
needs div-by-zero runtime check.
-Q: Why there is no BPF_SDIV for signed divide operation?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-A: Because it would be rarely used. llvm errors in such case and
-prints a suggestion to use unsigned divide instead.
-
Q: Why BPF has implicit prologue and epilogue?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A: Because architectures like sparc have register windows and in general
diff --git a/Documentation/bpf/standardization/instruction-set.rst b/Documentation/bpf/standardization/instruction-set.rst
index 751e657973f0..88c5b9f39af2 100644
--- a/Documentation/bpf/standardization/instruction-set.rst
+++ b/Documentation/bpf/standardization/instruction-set.rst
@@ -154,24 +154,27 @@ otherwise identical operations.
The 'code' field encodes the operation as below, where 'src' and 'dst' refer
to the values of the source and destination registers, respectively.
-======== ===== ==========================================================
-code value description
-======== ===== ==========================================================
-BPF_ADD 0x00 dst += src
-BPF_SUB 0x10 dst -= src
-BPF_MUL 0x20 dst \*= src
-BPF_DIV 0x30 dst = (src != 0) ? (dst / src) : 0
-BPF_OR 0x40 dst \|= src
-BPF_AND 0x50 dst &= src
-BPF_LSH 0x60 dst <<= (src & mask)
-BPF_RSH 0x70 dst >>= (src & mask)
-BPF_NEG 0x80 dst = -src
-BPF_MOD 0x90 dst = (src != 0) ? (dst % src) : dst
-BPF_XOR 0xa0 dst ^= src
-BPF_MOV 0xb0 dst = src
-BPF_ARSH 0xc0 sign extending dst >>= (src & mask)
-BPF_END 0xd0 byte swap operations (see `Byte swap instructions`_ below)
-======== ===== ==========================================================
+======== ===== ======= ==========================================================
+code value offset description
+======== ===== ======= ==========================================================
+BPF_ADD 0x00 0 dst += src
+BPF_SUB 0x10 0 dst -= src
+BPF_MUL 0x20 0 dst \*= src
+BPF_DIV 0x30 0 dst = (src != 0) ? (dst / src) : 0
+BPF_SDIV 0x30 1 dst = (src != 0) ? (dst s/ src) : 0
+BPF_OR 0x40 0 dst \|= src
+BPF_AND 0x50 0 dst &= src
+BPF_LSH 0x60 0 dst <<= (src & mask)
+BPF_RSH 0x70 0 dst >>= (src & mask)
+BPF_NEG 0x80 0 dst = -src
+BPF_MOD 0x90 0 dst = (src != 0) ? (dst % src) : dst
+BPF_SMOD 0x90 1 dst = (src != 0) ? (dst s% src) : dst
+BPF_XOR 0xa0 0 dst ^= src
+BPF_MOV 0xb0 0 dst = src
+BPF_MOVSX 0xb0 8/16/32 dst = (s8,16,s32)src
+BPF_ARSH 0xc0 0 sign extending dst >>= (src & mask)
+BPF_END 0xd0 0 byte swap operations (see `Byte swap instructions`_ below)
+======== ===== ============ ==========================================================
Underflow and overflow are allowed during arithmetic operations, meaning
the 64-bit or 32-bit value will wrap. If eBPF program execution would
@@ -198,11 +201,20 @@ where '(u32)' indicates that the upper 32 bits are zeroed.
dst = dst ^ imm32
-Also note that the division and modulo operations are unsigned. Thus, for
-``BPF_ALU``, 'imm' is first interpreted as an unsigned 32-bit value, whereas
-for ``BPF_ALU64``, 'imm' is first sign extended to 64 bits and the result
-interpreted as an unsigned 64-bit value. There are no instructions for
-signed division or modulo.
+Note that most instructions have instruction offset of 0. But three instructions
+(BPF_SDIV, BPF_SMOD, BPF_MOVSX) have non-zero offset.
+
+The devision and modulo operations support both unsigned and signed flavors.
+For unsigned operation (BPF_DIV and BPF_MOD), for ``BPF_ALU``, 'imm' is first
+interpreted as an unsigned 32-bit value, whereas for ``BPF_ALU64``, 'imm' is
+first sign extended to 64 bits and the result interpreted as an unsigned 64-bit
+value. For signed operation (BPF_SDIV and BPF_SMOD), for ``BPF_ALU``, 'imm' is
+interpreted as a signed value. For ``BPF_ALU64``, the 'imm' is sign extended
+from 32 to 64 and interpreted as a signed 64-bit value.
+
+Instruction BPF_MOVSX does move operation with sign extension.
+``BPF_ALU | MOVSX`` sign extendes 8-bit and 16-bit into 32-bit and upper 32-bit are zeroed.
+``BPF_ALU64 | MOVSX`` sign extends 8-bit, 16-bit and 32-bit into 64-bit.
Shift operations use a mask of 0x3F (63) for 64-bit operations and 0x1F (31)
for 32-bit operations.
@@ -210,21 +222,23 @@ for 32-bit operations.
Byte swap instructions
~~~~~~~~~~~~~~~~~~~~~~
-The byte swap instructions use an instruction class of ``BPF_ALU`` and a 4-bit
-'code' field of ``BPF_END``.
+The byte swap instructions use instruction classes of ``BPF_ALU`` and ``BPF_ALU64``
+and a 4-bit 'code' field of ``BPF_END``.
The byte swap instructions operate on the destination register
only and do not use a separate source register or immediate value.
-The 1-bit source operand field in the opcode is used to select what byte
-order the operation convert from or to:
+For ``BPF_ALU``, the 1-bit source operand field in the opcode is used to select what byte
+order the operation convert from or to. For ``BPF_ALU64``, the 1-bit source operand
+field in the opcode is not used.
-========= ===== =================================================
-source value description
-========= ===== =================================================
-BPF_TO_LE 0x00 convert between host byte order and little endian
-BPF_TO_BE 0x08 convert between host byte order and big endian
-========= ===== =================================================
+========= ========= ===== =================================================
+class source value description
+========= ========= ===== =================================================
+BPF_ALU BPF_TO_LE 0x00 convert between host byte order and little endian
+BPF_ALU BPF_TO_BE 0x08 convert between host byte order and big endian
+BPF_ALU64 BPF_TO_LE 0x00 do byte swap unconditionally
+========= ========= ===== =================================================
The 'imm' field encodes the width of the swap operations. The following widths
are supported: 16, 32 and 64.
@@ -239,6 +253,12 @@ Examples:
dst = htobe64(dst)
+``BPF_ALU64 | BPF_TO_LE | BPF_END`` with imm = 16/32/64 means::
+
+ dst = bswap16(dst)
+ dst = bswap32(dst)
+ dst = bswap64(dst)
+
Jump instructions
-----------------
@@ -249,7 +269,8 @@ The 'code' field encodes the operation as below:
======== ===== === =========================================== =========================================
code value src description notes
======== ===== === =========================================== =========================================
-BPF_JA 0x0 0x0 PC += offset BPF_JMP only
+BPF_JA 0x0 0x0 PC += offset BPF_JMP class
+BPF_JA 0x0 0x0 PC += imm BPF_JMP32 class
BPF_JEQ 0x1 any PC += offset if dst == src
BPF_JGT 0x2 any PC += offset if dst > src unsigned
BPF_JGE 0x3 any PC += offset if dst >= src unsigned
@@ -278,6 +299,10 @@ Example:
where 's>=' indicates a signed '>=' comparison.
+Note there are two flavors of BPF_JA instrions. BPF_JMP class permits 16-bit jump offset while
+BPF_JMP32 permits 32-bit jump offset. A >16bit conditional jmp can be converted to a <16bit
+conditional jmp plus a 32-bit unconditional jump.
+
Helper functions
~~~~~~~~~~~~~~~~
@@ -320,6 +345,7 @@ The mode modifier is one of:
BPF_ABS 0x20 legacy BPF packet access (absolute) `Legacy BPF Packet access instructions`_
BPF_IND 0x40 legacy BPF packet access (indirect) `Legacy BPF Packet access instructions`_
BPF_MEM 0x60 regular load and store operations `Regular load and store operations`_
+ BPF_MEMSX 0x80 sign-extension load operations `Sign-extension load operations`_
BPF_ATOMIC 0xc0 atomic operations `Atomic operations`_
============= ===== ==================================== =============
@@ -350,10 +376,19 @@ instructions that transfer data between a register and memory.
``BPF_MEM | <size> | BPF_LDX`` means::
- dst = *(size *) (src + offset)
+ dst = *(unsigned size *) (src + offset)
Where size is one of: ``BPF_B``, ``BPF_H``, ``BPF_W``, or ``BPF_DW``.
+The ``BPF_MEMSX`` mode modifier is used to encode sign-extension load
+instructions that transfer data between a register and memory.
+
+``BPF_MEMSX | <size> | BPF_LDX`` means::
+
+ dst = *(signed size *) (src + offset)
+
+Where size is one of: ``BPF_B``, ``BPF_H`` or ``BPF_W``.
+
Atomic operations
-----------------
--
2.34.1
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4
2023-07-20 0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
` (16 preceding siblings ...)
2023-07-20 0:02 ` [PATCH bpf-next v3 17/17] docs/bpf: Add documentation for new instructions Yonghong Song
@ 2023-07-20 20:35 ` Eduard Zingerman
17 siblings, 0 replies; 30+ messages in thread
From: Eduard Zingerman @ 2023-07-20 20:35 UTC (permalink / raw)
To: Yonghong Song, bpf
Cc: Alexei Starovoitov, Andrii Nakryiko, bpf, Daniel Borkmann,
Fangrui Song, kernel-team
On Wed, 2023-07-19 at 17:01 -0700, Yonghong Song wrote:
> In previous discussion ([1]), it is agreed that we should introduce
> cpu version 4 (llvm flag -mcpu=v4) which contains some instructions
> which can simplify code, make code easier to understand, fix the
> existing problem, or simply for feature completeness. More specifically,
> the following new insns are proposed:
> . sign extended load
> . sign extended mov
> . bswap
> . signed div/mod
> . ja with 32-bit offset
>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
[...]
^ permalink raw reply [flat|nested] 30+ messages in thread