All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yonghong Song <yhs=40meta.com@dmarc.ietf.org>
To: Eduard Zingerman <eddyz87@gmail.com>, Yonghong Song <yhs@fb.com>,
	bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
	Andrii Nakryiko <andrii@kernel.org>,
	 bpf@ietf.org, Daniel Borkmann <daniel@iogearbox.net>,
	Fangrui Song <maskray@google.com>,
	kernel-team@fb.com
Subject: Re: [Bpf] [PATCH bpf-next v3 01/17] bpf: Support new sign-extension load insns
Date: Sun, 23 Jul 2023 21:03:49 -0700	[thread overview]
Message-ID: <694b67de-702e-eca5-aa03-ef84cf1a0d2a@meta.com> (raw)
In-Reply-To: <c3156f3e7769f779e9fb0dd09edf0e8cd00a5b42.camel@gmail.com>



On 7/20/23 1:33 PM, Eduard Zingerman wrote:
> On Wed, 2023-07-19 at 17:01 -0700, Yonghong Song wrote:
>> Add interpreter/jit support for new sign-extension load insns
>> which adds a new mode (BPF_MEMSX).
>> Also add verifier support to recognize these insns and to
>> do proper verification with new insns. In verifier, besides
>> to deduce proper bounds for the dst_reg, probed memory access
>> is also properly handled.
>>
>> Signed-off-by: Yonghong Song <yhs@fb.com>
>> ---
>>   arch/x86/net/bpf_jit_comp.c    |  42 ++++++++-
>>   include/linux/filter.h         |   3 +
>>   include/uapi/linux/bpf.h       |   1 +
>>   kernel/bpf/core.c              |  21 +++++
>>   kernel/bpf/verifier.c          | 150 +++++++++++++++++++++++++++------
>>   tools/include/uapi/linux/bpf.h |   1 +
>>   6 files changed, 191 insertions(+), 27 deletions(-)
>>
>> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
>> index 83c4b45dc65f..54478a9c93e1 100644
>> --- a/arch/x86/net/bpf_jit_comp.c
>> +++ b/arch/x86/net/bpf_jit_comp.c
>> @@ -779,6 +779,29 @@ static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
>>   	*pprog = prog;
>>   }
>>   
>> +/* LDSX: dst_reg = *(s8*)(src_reg + off) */
>> +static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
>> +{
>> +	u8 *prog = *pprog;
>> +
>> +	switch (size) {
>> +	case BPF_B:
>> +		/* Emit 'movsx rax, byte ptr [rax + off]' */
>> +		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
>> +		break;
>> +	case BPF_H:
>> +		/* Emit 'movsx rax, word ptr [rax + off]' */
>> +		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
>> +		break;
>> +	case BPF_W:
>> +		/* Emit 'movsx rax, dword ptr [rax+0x14]' */
>> +		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
>> +		break;
>> +	}
>> +	emit_insn_suffix(&prog, src_reg, dst_reg, off);
>> +	*pprog = prog;
>> +}
>> +
>>   /* STX: *(u8*)(dst_reg + off) = src_reg */
>>   static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
>>   {
>> @@ -1370,9 +1393,17 @@ st:			if (is_imm8(insn->off))
>>   		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
>>   		case BPF_LDX | BPF_MEM | BPF_DW:
>>   		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
>> +			/* LDXS: dst_reg = *(s8*)(src_reg + off) */
>> +		case BPF_LDX | BPF_MEMSX | BPF_B:
>> +		case BPF_LDX | BPF_MEMSX | BPF_H:
>> +		case BPF_LDX | BPF_MEMSX | BPF_W:
>> +		case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
>> +		case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
>> +		case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
>>   			insn_off = insn->off;
>>   
>> -			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
>> +			if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
>> +			    BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
>>   				/* Conservatively check that src_reg + insn->off is a kernel address:
>>   				 *   src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
>>   				 * src_reg is used as scratch for src_reg += insn->off and restored
>> @@ -1415,8 +1446,13 @@ st:			if (is_imm8(insn->off))
>>   				start_of_ldx = prog;
>>   				end_of_jmp[-1] = start_of_ldx - end_of_jmp;
>>   			}
>> -			emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
>> -			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
>> +			if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
>> +			    BPF_MODE(insn->code) == BPF_MEMSX)
>> +				emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
>> +			else
>> +				emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
>> +			if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
>> +			    BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
>>   				struct exception_table_entry *ex;
>>   				u8 *_insn = image + proglen + (start_of_ldx - temp);
>>   				s64 delta;
>> diff --git a/include/linux/filter.h b/include/linux/filter.h
>> index f69114083ec7..a93242b5516b 100644
>> --- a/include/linux/filter.h
>> +++ b/include/linux/filter.h
>> @@ -69,6 +69,9 @@ struct ctl_table_header;
>>   /* unused opcode to mark special load instruction. Same as BPF_ABS */
>>   #define BPF_PROBE_MEM	0x20
>>   
>> +/* unused opcode to mark special ldsx instruction. Same as BPF_IND */
>> +#define BPF_PROBE_MEMSX	0x40
>> +
>>   /* unused opcode to mark call to interpreter with arguments */
>>   #define BPF_CALL_ARGS	0xe0
>>   
>> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
>> index 739c15906a65..651a34511780 100644
>> --- a/include/uapi/linux/bpf.h
>> +++ b/include/uapi/linux/bpf.h
>> @@ -19,6 +19,7 @@
>>   
>>   /* ld/ldx fields */
>>   #define BPF_DW		0x18	/* double word (64-bit) */
>> +#define BPF_MEMSX	0x80	/* load with sign extension */
>>   #define BPF_ATOMIC	0xc0	/* atomic memory ops - op type in immediate */
>>   #define BPF_XADD	0xc0	/* exclusive add - legacy name */
>>   
>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>> index dc85240a0134..01b72fc001f6 100644
>> --- a/kernel/bpf/core.c
>> +++ b/kernel/bpf/core.c
>> @@ -1610,6 +1610,9 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
>>   	INSN_3(LDX, MEM, H),			\
>>   	INSN_3(LDX, MEM, W),			\
>>   	INSN_3(LDX, MEM, DW),			\
>> +	INSN_3(LDX, MEMSX, B),			\
>> +	INSN_3(LDX, MEMSX, H),			\
>> +	INSN_3(LDX, MEMSX, W),			\
>>   	/*   Immediate based. */		\
>>   	INSN_3(LD, IMM, DW)
>>   
>> @@ -1666,6 +1669,9 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
>>   		[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
>>   		[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
>>   		[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
>> +		[BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
>> +		[BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
>> +		[BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
>>   	};
>>   #undef BPF_INSN_3_LBL
>>   #undef BPF_INSN_2_LBL
>> @@ -1942,6 +1948,21 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
>>   	LDST(DW, u64)
>>   #undef LDST
>>   
>> +#define LDSX(SIZEOP, SIZE)						\
>> +	LDX_MEMSX_##SIZEOP:						\
>> +		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
>> +		CONT;							\
>> +	LDX_PROBE_MEMSX_##SIZEOP:					\
>> +		bpf_probe_read_kernel(&DST, sizeof(SIZE),		\
>> +				      (const void *)(long) (SRC + insn->off));	\
>> +		DST = *((SIZE *)&DST);					\
>> +		CONT;
>> +
>> +	LDSX(B,   s8)
>> +	LDSX(H,  s16)
>> +	LDSX(W,  s32)
>> +#undef LDSX
>> +
>>   #define ATOMIC_ALU_OP(BOP, KOP)						\
>>   		case BOP:						\
>>   			if (BPF_SIZE(insn->code) == BPF_W)		\
>> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
>> index 803b91135ca0..79c0cd50ec59 100644
>> --- a/kernel/bpf/verifier.c
>> +++ b/kernel/bpf/verifier.c
>> @@ -5809,6 +5809,94 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
>>   	__reg_combine_64_into_32(reg);
>>   }
>>   
>> +static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
>> +{
>> +	if (size == 1) {
>> +		reg->smin_value = reg->s32_min_value = S8_MIN;
>> +		reg->smax_value = reg->s32_max_value = S8_MAX;
>> +	} else if (size == 2) {
>> +		reg->smin_value = reg->s32_min_value = S16_MIN;
>> +		reg->smax_value = reg->s32_max_value = S16_MAX;
>> +	} else {
>> +		/* size == 4 */
>> +		reg->smin_value = reg->s32_min_value = S32_MIN;
>> +		reg->smax_value = reg->s32_max_value = S32_MAX;
>> +	}
>> +	reg->umin_value = reg->u32_min_value = 0;
>> +	reg->umax_value = U64_MAX;
>> +	reg->u32_max_value = U32_MAX;
>> +	reg->var_off = tnum_unknown;
>> +}
>> +
>> +static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
>> +{
>> +	s64 init_s64_max, init_s64_min, s64_max, s64_min, u64_cval;
>> +	u64 top_smax_value, top_smin_value;
>> +	u64 num_bits = size * 8;
>> +
>> +	if (tnum_is_const(reg->var_off)) {
>> +		u64_cval = reg->var_off.value;
>> +		if (size == 1)
>> +			reg->var_off = tnum_const((s8)u64_cval);
>> +		else if (size == 2)
>> +			reg->var_off = tnum_const((s16)u64_cval);
>> +		else
>> +			/* size == 4 */
>> +			reg->var_off = tnum_const((s32)u64_cval);
>> +
>> +		u64_cval = reg->var_off.value;
>> +		reg->smax_value = reg->smin_value = u64_cval;
>> +		reg->umax_value = reg->umin_value = u64_cval;
>> +		reg->s32_max_value = reg->s32_min_value = u64_cval;
>> +		reg->u32_max_value = reg->u32_min_value = u64_cval;
>> +		return;
>> +	}
>> +
>> +	top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits;
>> +	top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits;
>> +
>> +	if (top_smax_value != top_smin_value)
>> +		goto out;
>> +
>> +	/* find the s64_min and s64_min after sign extension */
>> +	if (size == 1) {
>> +		init_s64_max = (s8)reg->smax_value;
>> +		init_s64_min = (s8)reg->smin_value;
>> +	} else if (size == 2) {
>> +		init_s64_max = (s16)reg->smax_value;
>> +		init_s64_min = (s16)reg->smin_value;
>> +	} else {
>> +		init_s64_max = (s32)reg->smax_value;
>> +		init_s64_min = (s32)reg->smin_value;
>> +	}
>> +
>> +	s64_max = max(init_s64_max, init_s64_min);
>> +	s64_min = min(init_s64_max, init_s64_min);
>> +
>> +	if (s64_max >= 0 && s64_min >= 0) {
>> +		reg->smin_value = reg->s32_min_value = s64_min;
>> +		reg->smax_value = reg->s32_max_value = s64_max;
>> +		reg->umin_value = reg->u32_min_value = s64_min;
>> +		reg->umax_value = reg->u32_max_value = s64_max;
>> +		reg->var_off = tnum_range(s64_min, s64_max);
>> +		return;
>> +	}
>> +
>> +	if (s64_min < 0 && s64_max < 0) {
>> +		reg->smin_value = reg->s32_min_value = s64_min;
>> +		reg->smax_value = reg->s32_max_value = s64_max;
>> +		reg->umin_value = (u64)s64_min;
>> +		reg->umax_value = (u64)s64_max;
>> +		reg->u32_min_value = (u32)s64_min;
>> +		reg->u32_max_value = (u32)s64_max;
>> +		reg->var_off = tnum_range((u64)s64_min, (u64)s64_max);
>> +		return;
>> +	}
> 
> I think that the bodies for (s64_max >= 0 && s64_min >= 0)
> and (s64_min < 0 && s64_max < 0) are now identical.

Thanks. Will change in the next revision.

> 
>> +
>> +out:
>> +	set_sext64_default_val(reg, size);
>> +}
>> +
[...]

-- 
Bpf mailing list
Bpf@ietf.org
https://www.ietf.org/mailman/listinfo/bpf

WARNING: multiple messages have this Message-ID (diff)
From: Yonghong Song <yhs@meta.com>
To: Eduard Zingerman <eddyz87@gmail.com>, Yonghong Song <yhs@fb.com>,
	bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
	Andrii Nakryiko <andrii@kernel.org>,
	bpf@ietf.org, Daniel Borkmann <daniel@iogearbox.net>,
	Fangrui Song <maskray@google.com>,
	kernel-team@fb.com
Subject: Re: [PATCH bpf-next v3 01/17] bpf: Support new sign-extension load insns
Date: Sun, 23 Jul 2023 21:03:49 -0700	[thread overview]
Message-ID: <694b67de-702e-eca5-aa03-ef84cf1a0d2a@meta.com> (raw)
Message-ID: <20230724040349.NCx3m5f3mm7QWULS3LVCQFetyRAR6Z2oprV2Soy-gRY@z> (raw)
In-Reply-To: <c3156f3e7769f779e9fb0dd09edf0e8cd00a5b42.camel@gmail.com>



On 7/20/23 1:33 PM, Eduard Zingerman wrote:
> On Wed, 2023-07-19 at 17:01 -0700, Yonghong Song wrote:
>> Add interpreter/jit support for new sign-extension load insns
>> which adds a new mode (BPF_MEMSX).
>> Also add verifier support to recognize these insns and to
>> do proper verification with new insns. In verifier, besides
>> to deduce proper bounds for the dst_reg, probed memory access
>> is also properly handled.
>>
>> Signed-off-by: Yonghong Song <yhs@fb.com>
>> ---
>>   arch/x86/net/bpf_jit_comp.c    |  42 ++++++++-
>>   include/linux/filter.h         |   3 +
>>   include/uapi/linux/bpf.h       |   1 +
>>   kernel/bpf/core.c              |  21 +++++
>>   kernel/bpf/verifier.c          | 150 +++++++++++++++++++++++++++------
>>   tools/include/uapi/linux/bpf.h |   1 +
>>   6 files changed, 191 insertions(+), 27 deletions(-)
>>
>> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
>> index 83c4b45dc65f..54478a9c93e1 100644
>> --- a/arch/x86/net/bpf_jit_comp.c
>> +++ b/arch/x86/net/bpf_jit_comp.c
>> @@ -779,6 +779,29 @@ static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
>>   	*pprog = prog;
>>   }
>>   
>> +/* LDSX: dst_reg = *(s8*)(src_reg + off) */
>> +static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
>> +{
>> +	u8 *prog = *pprog;
>> +
>> +	switch (size) {
>> +	case BPF_B:
>> +		/* Emit 'movsx rax, byte ptr [rax + off]' */
>> +		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
>> +		break;
>> +	case BPF_H:
>> +		/* Emit 'movsx rax, word ptr [rax + off]' */
>> +		EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
>> +		break;
>> +	case BPF_W:
>> +		/* Emit 'movsx rax, dword ptr [rax+0x14]' */
>> +		EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
>> +		break;
>> +	}
>> +	emit_insn_suffix(&prog, src_reg, dst_reg, off);
>> +	*pprog = prog;
>> +}
>> +
>>   /* STX: *(u8*)(dst_reg + off) = src_reg */
>>   static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
>>   {
>> @@ -1370,9 +1393,17 @@ st:			if (is_imm8(insn->off))
>>   		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
>>   		case BPF_LDX | BPF_MEM | BPF_DW:
>>   		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
>> +			/* LDXS: dst_reg = *(s8*)(src_reg + off) */
>> +		case BPF_LDX | BPF_MEMSX | BPF_B:
>> +		case BPF_LDX | BPF_MEMSX | BPF_H:
>> +		case BPF_LDX | BPF_MEMSX | BPF_W:
>> +		case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
>> +		case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
>> +		case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
>>   			insn_off = insn->off;
>>   
>> -			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
>> +			if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
>> +			    BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
>>   				/* Conservatively check that src_reg + insn->off is a kernel address:
>>   				 *   src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
>>   				 * src_reg is used as scratch for src_reg += insn->off and restored
>> @@ -1415,8 +1446,13 @@ st:			if (is_imm8(insn->off))
>>   				start_of_ldx = prog;
>>   				end_of_jmp[-1] = start_of_ldx - end_of_jmp;
>>   			}
>> -			emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
>> -			if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
>> +			if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
>> +			    BPF_MODE(insn->code) == BPF_MEMSX)
>> +				emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
>> +			else
>> +				emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
>> +			if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
>> +			    BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
>>   				struct exception_table_entry *ex;
>>   				u8 *_insn = image + proglen + (start_of_ldx - temp);
>>   				s64 delta;
>> diff --git a/include/linux/filter.h b/include/linux/filter.h
>> index f69114083ec7..a93242b5516b 100644
>> --- a/include/linux/filter.h
>> +++ b/include/linux/filter.h
>> @@ -69,6 +69,9 @@ struct ctl_table_header;
>>   /* unused opcode to mark special load instruction. Same as BPF_ABS */
>>   #define BPF_PROBE_MEM	0x20
>>   
>> +/* unused opcode to mark special ldsx instruction. Same as BPF_IND */
>> +#define BPF_PROBE_MEMSX	0x40
>> +
>>   /* unused opcode to mark call to interpreter with arguments */
>>   #define BPF_CALL_ARGS	0xe0
>>   
>> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
>> index 739c15906a65..651a34511780 100644
>> --- a/include/uapi/linux/bpf.h
>> +++ b/include/uapi/linux/bpf.h
>> @@ -19,6 +19,7 @@
>>   
>>   /* ld/ldx fields */
>>   #define BPF_DW		0x18	/* double word (64-bit) */
>> +#define BPF_MEMSX	0x80	/* load with sign extension */
>>   #define BPF_ATOMIC	0xc0	/* atomic memory ops - op type in immediate */
>>   #define BPF_XADD	0xc0	/* exclusive add - legacy name */
>>   
>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>> index dc85240a0134..01b72fc001f6 100644
>> --- a/kernel/bpf/core.c
>> +++ b/kernel/bpf/core.c
>> @@ -1610,6 +1610,9 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
>>   	INSN_3(LDX, MEM, H),			\
>>   	INSN_3(LDX, MEM, W),			\
>>   	INSN_3(LDX, MEM, DW),			\
>> +	INSN_3(LDX, MEMSX, B),			\
>> +	INSN_3(LDX, MEMSX, H),			\
>> +	INSN_3(LDX, MEMSX, W),			\
>>   	/*   Immediate based. */		\
>>   	INSN_3(LD, IMM, DW)
>>   
>> @@ -1666,6 +1669,9 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
>>   		[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
>>   		[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
>>   		[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
>> +		[BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
>> +		[BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
>> +		[BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
>>   	};
>>   #undef BPF_INSN_3_LBL
>>   #undef BPF_INSN_2_LBL
>> @@ -1942,6 +1948,21 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
>>   	LDST(DW, u64)
>>   #undef LDST
>>   
>> +#define LDSX(SIZEOP, SIZE)						\
>> +	LDX_MEMSX_##SIZEOP:						\
>> +		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
>> +		CONT;							\
>> +	LDX_PROBE_MEMSX_##SIZEOP:					\
>> +		bpf_probe_read_kernel(&DST, sizeof(SIZE),		\
>> +				      (const void *)(long) (SRC + insn->off));	\
>> +		DST = *((SIZE *)&DST);					\
>> +		CONT;
>> +
>> +	LDSX(B,   s8)
>> +	LDSX(H,  s16)
>> +	LDSX(W,  s32)
>> +#undef LDSX
>> +
>>   #define ATOMIC_ALU_OP(BOP, KOP)						\
>>   		case BOP:						\
>>   			if (BPF_SIZE(insn->code) == BPF_W)		\
>> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
>> index 803b91135ca0..79c0cd50ec59 100644
>> --- a/kernel/bpf/verifier.c
>> +++ b/kernel/bpf/verifier.c
>> @@ -5809,6 +5809,94 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
>>   	__reg_combine_64_into_32(reg);
>>   }
>>   
>> +static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
>> +{
>> +	if (size == 1) {
>> +		reg->smin_value = reg->s32_min_value = S8_MIN;
>> +		reg->smax_value = reg->s32_max_value = S8_MAX;
>> +	} else if (size == 2) {
>> +		reg->smin_value = reg->s32_min_value = S16_MIN;
>> +		reg->smax_value = reg->s32_max_value = S16_MAX;
>> +	} else {
>> +		/* size == 4 */
>> +		reg->smin_value = reg->s32_min_value = S32_MIN;
>> +		reg->smax_value = reg->s32_max_value = S32_MAX;
>> +	}
>> +	reg->umin_value = reg->u32_min_value = 0;
>> +	reg->umax_value = U64_MAX;
>> +	reg->u32_max_value = U32_MAX;
>> +	reg->var_off = tnum_unknown;
>> +}
>> +
>> +static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
>> +{
>> +	s64 init_s64_max, init_s64_min, s64_max, s64_min, u64_cval;
>> +	u64 top_smax_value, top_smin_value;
>> +	u64 num_bits = size * 8;
>> +
>> +	if (tnum_is_const(reg->var_off)) {
>> +		u64_cval = reg->var_off.value;
>> +		if (size == 1)
>> +			reg->var_off = tnum_const((s8)u64_cval);
>> +		else if (size == 2)
>> +			reg->var_off = tnum_const((s16)u64_cval);
>> +		else
>> +			/* size == 4 */
>> +			reg->var_off = tnum_const((s32)u64_cval);
>> +
>> +		u64_cval = reg->var_off.value;
>> +		reg->smax_value = reg->smin_value = u64_cval;
>> +		reg->umax_value = reg->umin_value = u64_cval;
>> +		reg->s32_max_value = reg->s32_min_value = u64_cval;
>> +		reg->u32_max_value = reg->u32_min_value = u64_cval;
>> +		return;
>> +	}
>> +
>> +	top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits;
>> +	top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits;
>> +
>> +	if (top_smax_value != top_smin_value)
>> +		goto out;
>> +
>> +	/* find the s64_min and s64_min after sign extension */
>> +	if (size == 1) {
>> +		init_s64_max = (s8)reg->smax_value;
>> +		init_s64_min = (s8)reg->smin_value;
>> +	} else if (size == 2) {
>> +		init_s64_max = (s16)reg->smax_value;
>> +		init_s64_min = (s16)reg->smin_value;
>> +	} else {
>> +		init_s64_max = (s32)reg->smax_value;
>> +		init_s64_min = (s32)reg->smin_value;
>> +	}
>> +
>> +	s64_max = max(init_s64_max, init_s64_min);
>> +	s64_min = min(init_s64_max, init_s64_min);
>> +
>> +	if (s64_max >= 0 && s64_min >= 0) {
>> +		reg->smin_value = reg->s32_min_value = s64_min;
>> +		reg->smax_value = reg->s32_max_value = s64_max;
>> +		reg->umin_value = reg->u32_min_value = s64_min;
>> +		reg->umax_value = reg->u32_max_value = s64_max;
>> +		reg->var_off = tnum_range(s64_min, s64_max);
>> +		return;
>> +	}
>> +
>> +	if (s64_min < 0 && s64_max < 0) {
>> +		reg->smin_value = reg->s32_min_value = s64_min;
>> +		reg->smax_value = reg->s32_max_value = s64_max;
>> +		reg->umin_value = (u64)s64_min;
>> +		reg->umax_value = (u64)s64_max;
>> +		reg->u32_min_value = (u32)s64_min;
>> +		reg->u32_max_value = (u32)s64_max;
>> +		reg->var_off = tnum_range((u64)s64_min, (u64)s64_max);
>> +		return;
>> +	}
> 
> I think that the bodies for (s64_max >= 0 && s64_min >= 0)
> and (s64_min < 0 && s64_max < 0) are now identical.

Thanks. Will change in the next revision.

> 
>> +
>> +out:
>> +	set_sext64_default_val(reg, size);
>> +}
>> +
[...]

  reply	other threads:[~2023-07-24  4:04 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-07-20  0:01 [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 01/17] bpf: Support new sign-extension load insns Yonghong Song
2023-07-20 20:33   ` Eduard Zingerman
2023-07-24  4:03     ` Yonghong Song [this message]
2023-07-24  4:03       ` Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 02/17] bpf: Support new sign-extension mov insns Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 03/17] bpf: Handle sign-extenstin ctx member accesses Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 04/17] bpf: Support new unconditional bswap instruction Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 05/17] bpf: Support new signed div/mod instructions Yonghong Song
2023-07-20  0:01   ` [Bpf] " Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 06/17] bpf: Fix jit blinding with new sdiv/smov insns Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 07/17] bpf: Support new 32bit offset jmp instruction Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 08/17] bpf: Add kernel/bpftool asm support for new instructions Yonghong Song
2023-07-21 14:36   ` Quentin Monnet
2023-07-21 14:36     ` [Bpf] " Quentin Monnet
2023-07-20  0:01 ` [PATCH bpf-next v3 09/17] selftests/bpf: Fix a test_verifier failure Yonghong Song
2023-07-20  0:01 ` [PATCH bpf-next v3 10/17] selftests/bpf: Add a cpuv4 test runner for cpu=v4 testing Yonghong Song
2023-07-20  0:02 ` [PATCH bpf-next v3 11/17] selftests/bpf: Add unit tests for new sign-extension load insns Yonghong Song
2023-07-20  6:31   ` [Bpf] " Yonghong Song
2023-07-20  6:31     ` Yonghong Song
2023-07-20  6:36     ` [Bpf] " Yonghong Song
2023-07-20  6:36       ` Yonghong Song
2023-07-20  0:02 ` [PATCH bpf-next v3 12/17] selftests/bpf: Add unit tests for new sign-extension mov insns Yonghong Song
2023-07-20  0:02 ` [PATCH bpf-next v3 13/17] selftests/bpf: Add unit tests for new bswap insns Yonghong Song
2023-07-20  0:02 ` [PATCH bpf-next v3 14/17] selftests/bpf: Add unit tests for new sdiv/smod insns Yonghong Song
2023-07-20  0:02 ` [PATCH bpf-next v3 15/17] selftests/bpf: Add unit tests for new gotol insn Yonghong Song
2023-07-20  0:02   ` [Bpf] " Yonghong Song
2023-07-20  0:02 ` [PATCH bpf-next v3 16/17] selftests/bpf: Test ldsx with more complex cases Yonghong Song
2023-07-20  0:02 ` [PATCH bpf-next v3 17/17] docs/bpf: Add documentation for new instructions Yonghong Song
2023-07-20 20:35 ` [PATCH bpf-next v3 00/17] bpf: Support new insns from cpu v4 Eduard Zingerman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=694b67de-702e-eca5-aa03-ef84cf1a0d2a@meta.com \
    --to=yhs=40meta.com@dmarc.ietf.org \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@ietf.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=eddyz87@gmail.com \
    --cc=kernel-team@fb.com \
    --cc=maskray@google.com \
    --cc=yhs@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.