All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH bpf] riscv, bpf: Emit fixed-length imm64 for BPF_PSEUDO_FUNC
@ 2022-11-30  3:38 ` Pu Lehui
  0 siblings, 0 replies; 8+ messages in thread
From: Pu Lehui @ 2022-11-30  3:38 UTC (permalink / raw)
  To: bpf, linux-riscv, linux-kernel
  Cc: Björn Töpel, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Martin KaFai Lau, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
	Paul Walmsley, Palmer Dabbelt, Albert Ou, Pu Lehui, Pu Lehui

From: Pu Lehui <pulehui@huawei.com>

For BPF_PSEUDO_FUNC instruction, verifier will refill imm with
correct addresses of bpf_calls and then run last pass of JIT.
Since the emit_imm of RV64 is variable-length, which will emit
appropriate length instructions accorroding to the imm, it may
broke ctx->offset, and lead to unpredictable problem, such as
inaccurate jump. So let's fix it with fixed-length imm64 insns.

Fixes: 69c087ba6225 ("bpf: Add bpf_for_each_map_elem() helper")
Signed-off-by: Pu Lehui <pulehui@huawei.com>
---
 arch/riscv/net/bpf_jit_comp64.c | 31 ++++++++++++++++++++++++++++++-
 1 file changed, 30 insertions(+), 1 deletion(-)

diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index eb99df41fa33..f984d5fa014b 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -139,6 +139,30 @@ static bool in_auipc_jalr_range(s64 val)
 		val < ((1L << 31) - (1L << 11));
 }
 
+/* Emit fixed-length instructions for 32-bit imm */
+static void emit_fixed_imm32(u8 rd, s32 val, struct rv_jit_context *ctx)
+{
+	s32 upper = (val + (1U << 11)) >> 12;
+	s32 lower = ((val & 0xfff) << 20) >> 20;
+
+	emit(rv_lui(rd, upper), ctx);
+	emit(rv_addi(rd, rd, lower), ctx);
+}
+
+/* Emit fixed-length instructions for 64-bit imm */
+static void emit_fixed_imm64(u8 rd, s64 val, struct rv_jit_context *ctx)
+{
+	/* Compensation for sign-extension of rv_addi */
+	s32 imm_hi = (val + (1U << 31)) >> 32;
+	s32 imm_lo = val;
+
+	emit_fixed_imm32(rd, imm_hi, ctx);
+	emit_fixed_imm32(RV_REG_T1, imm_lo, ctx);
+	emit(rv_slli(rd, rd, 32), ctx);
+	emit(rv_add(rd, rd, RV_REG_T1), ctx);
+}
+
+/* Emit variable-length instructions for 32-bit and 64-bit imm */
 static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
 {
 	/* Note that the immediate from the add is sign-extended,
@@ -1053,7 +1077,12 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
 		u64 imm64;
 
 		imm64 = (u64)insn1.imm << 32 | (u32)imm;
-		emit_imm(rd, imm64, ctx);
+		if (bpf_pseudo_func(insn))
+			/* fixed-length insns for extra jit pass */
+			emit_fixed_imm64(rd, imm64, ctx);
+		else
+			emit_imm(rd, imm64, ctx);
+
 		return 1;
 	}
 
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH bpf] riscv, bpf: Emit fixed-length imm64 for BPF_PSEUDO_FUNC
@ 2022-11-30  3:38 ` Pu Lehui
  0 siblings, 0 replies; 8+ messages in thread
From: Pu Lehui @ 2022-11-30  3:38 UTC (permalink / raw)
  To: bpf, linux-riscv, linux-kernel
  Cc: Björn Töpel, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Martin KaFai Lau, Song Liu, Yonghong Song,
	John Fastabend, KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa,
	Paul Walmsley, Palmer Dabbelt, Albert Ou, Pu Lehui, Pu Lehui

From: Pu Lehui <pulehui@huawei.com>

For BPF_PSEUDO_FUNC instruction, verifier will refill imm with
correct addresses of bpf_calls and then run last pass of JIT.
Since the emit_imm of RV64 is variable-length, which will emit
appropriate length instructions accorroding to the imm, it may
broke ctx->offset, and lead to unpredictable problem, such as
inaccurate jump. So let's fix it with fixed-length imm64 insns.

Fixes: 69c087ba6225 ("bpf: Add bpf_for_each_map_elem() helper")
Signed-off-by: Pu Lehui <pulehui@huawei.com>
---
 arch/riscv/net/bpf_jit_comp64.c | 31 ++++++++++++++++++++++++++++++-
 1 file changed, 30 insertions(+), 1 deletion(-)

diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index eb99df41fa33..f984d5fa014b 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -139,6 +139,30 @@ static bool in_auipc_jalr_range(s64 val)
 		val < ((1L << 31) - (1L << 11));
 }
 
+/* Emit fixed-length instructions for 32-bit imm */
+static void emit_fixed_imm32(u8 rd, s32 val, struct rv_jit_context *ctx)
+{
+	s32 upper = (val + (1U << 11)) >> 12;
+	s32 lower = ((val & 0xfff) << 20) >> 20;
+
+	emit(rv_lui(rd, upper), ctx);
+	emit(rv_addi(rd, rd, lower), ctx);
+}
+
+/* Emit fixed-length instructions for 64-bit imm */
+static void emit_fixed_imm64(u8 rd, s64 val, struct rv_jit_context *ctx)
+{
+	/* Compensation for sign-extension of rv_addi */
+	s32 imm_hi = (val + (1U << 31)) >> 32;
+	s32 imm_lo = val;
+
+	emit_fixed_imm32(rd, imm_hi, ctx);
+	emit_fixed_imm32(RV_REG_T1, imm_lo, ctx);
+	emit(rv_slli(rd, rd, 32), ctx);
+	emit(rv_add(rd, rd, RV_REG_T1), ctx);
+}
+
+/* Emit variable-length instructions for 32-bit and 64-bit imm */
 static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
 {
 	/* Note that the immediate from the add is sign-extended,
@@ -1053,7 +1077,12 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
 		u64 imm64;
 
 		imm64 = (u64)insn1.imm << 32 | (u32)imm;
-		emit_imm(rd, imm64, ctx);
+		if (bpf_pseudo_func(insn))
+			/* fixed-length insns for extra jit pass */
+			emit_fixed_imm64(rd, imm64, ctx);
+		else
+			emit_imm(rd, imm64, ctx);
+
 		return 1;
 	}
 
-- 
2.25.1


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH bpf] riscv, bpf: Emit fixed-length imm64 for BPF_PSEUDO_FUNC
  2022-11-30  3:38 ` Pu Lehui
@ 2022-11-30 11:38   ` Björn Töpel
  -1 siblings, 0 replies; 8+ messages in thread
From: Björn Töpel @ 2022-11-30 11:38 UTC (permalink / raw)
  To: Pu Lehui, bpf, linux-riscv, linux-kernel
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, Paul Walmsley,
	Palmer Dabbelt, Albert Ou, Pu Lehui, Pu Lehui

Pu Lehui <pulehui@huaweicloud.com> writes:

> From: Pu Lehui <pulehui@huawei.com>
>
> For BPF_PSEUDO_FUNC instruction, verifier will refill imm with
> correct addresses of bpf_calls and then run last pass of JIT.
> Since the emit_imm of RV64 is variable-length, which will emit
> appropriate length instructions accorroding to the imm, it may
> broke ctx->offset, and lead to unpredictable problem, such as
> inaccurate jump. So let's fix it with fixed-length imm64 insns.

Ah, nice one! So, the the invariant doesn't hold (the image grow in the
last pass).

> Fixes: 69c087ba6225 ("bpf: Add bpf_for_each_map_elem() helper")

This is odd? This can't be the right Fixes-tag...

> Signed-off-by: Pu Lehui <pulehui@huawei.com>
> ---
>  arch/riscv/net/bpf_jit_comp64.c | 31 ++++++++++++++++++++++++++++++-
>  1 file changed, 30 insertions(+), 1 deletion(-)
>
> diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
> index eb99df41fa33..f984d5fa014b 100644
> --- a/arch/riscv/net/bpf_jit_comp64.c
> +++ b/arch/riscv/net/bpf_jit_comp64.c
> @@ -139,6 +139,30 @@ static bool in_auipc_jalr_range(s64 val)
>  		val < ((1L << 31) - (1L << 11));
>  }
>  
> +/* Emit fixed-length instructions for 32-bit imm */
> +static void emit_fixed_imm32(u8 rd, s32 val, struct rv_jit_context *ctx)
> +{
> +	s32 upper = (val + (1U << 11)) >> 12;
> +	s32 lower = ((val & 0xfff) << 20) >> 20;
> +
> +	emit(rv_lui(rd, upper), ctx);
> +	emit(rv_addi(rd, rd, lower), ctx);
> +}
> +
> +/* Emit fixed-length instructions for 64-bit imm */
> +static void emit_fixed_imm64(u8 rd, s64 val, struct rv_jit_context *ctx)
> +{
> +	/* Compensation for sign-extension of rv_addi */
> +	s32 imm_hi = (val + (1U << 31)) >> 32;
> +	s32 imm_lo = val;
> +
> +	emit_fixed_imm32(rd, imm_hi, ctx);
> +	emit_fixed_imm32(RV_REG_T1, imm_lo, ctx);
> +	emit(rv_slli(rd, rd, 32), ctx);
> +	emit(rv_add(rd, rd, RV_REG_T1), ctx);
> +}

Hmm, will this really be fixed? We can end up with compressed
instructions, which can then be a non-compressed in the last pass, and
we have the same problem?

The range of valid address for RV64 (sv39 to sv57) are
0xffffffff00000000 to 0xffffffffffffffff, so I think we can do better
than 6 insn, no? My gut feeling (I need to tinker a bit) is that 4
should be sufficient.

Note that worst case for a imm64 load are 8 instructions, but this is
not the general case.


Björn

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH bpf] riscv, bpf: Emit fixed-length imm64 for BPF_PSEUDO_FUNC
@ 2022-11-30 11:38   ` Björn Töpel
  0 siblings, 0 replies; 8+ messages in thread
From: Björn Töpel @ 2022-11-30 11:38 UTC (permalink / raw)
  To: Pu Lehui, bpf, linux-riscv, linux-kernel
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, Paul Walmsley,
	Palmer Dabbelt, Albert Ou, Pu Lehui, Pu Lehui

Pu Lehui <pulehui@huaweicloud.com> writes:

> From: Pu Lehui <pulehui@huawei.com>
>
> For BPF_PSEUDO_FUNC instruction, verifier will refill imm with
> correct addresses of bpf_calls and then run last pass of JIT.
> Since the emit_imm of RV64 is variable-length, which will emit
> appropriate length instructions accorroding to the imm, it may
> broke ctx->offset, and lead to unpredictable problem, such as
> inaccurate jump. So let's fix it with fixed-length imm64 insns.

Ah, nice one! So, the the invariant doesn't hold (the image grow in the
last pass).

> Fixes: 69c087ba6225 ("bpf: Add bpf_for_each_map_elem() helper")

This is odd? This can't be the right Fixes-tag...

> Signed-off-by: Pu Lehui <pulehui@huawei.com>
> ---
>  arch/riscv/net/bpf_jit_comp64.c | 31 ++++++++++++++++++++++++++++++-
>  1 file changed, 30 insertions(+), 1 deletion(-)
>
> diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
> index eb99df41fa33..f984d5fa014b 100644
> --- a/arch/riscv/net/bpf_jit_comp64.c
> +++ b/arch/riscv/net/bpf_jit_comp64.c
> @@ -139,6 +139,30 @@ static bool in_auipc_jalr_range(s64 val)
>  		val < ((1L << 31) - (1L << 11));
>  }
>  
> +/* Emit fixed-length instructions for 32-bit imm */
> +static void emit_fixed_imm32(u8 rd, s32 val, struct rv_jit_context *ctx)
> +{
> +	s32 upper = (val + (1U << 11)) >> 12;
> +	s32 lower = ((val & 0xfff) << 20) >> 20;
> +
> +	emit(rv_lui(rd, upper), ctx);
> +	emit(rv_addi(rd, rd, lower), ctx);
> +}
> +
> +/* Emit fixed-length instructions for 64-bit imm */
> +static void emit_fixed_imm64(u8 rd, s64 val, struct rv_jit_context *ctx)
> +{
> +	/* Compensation for sign-extension of rv_addi */
> +	s32 imm_hi = (val + (1U << 31)) >> 32;
> +	s32 imm_lo = val;
> +
> +	emit_fixed_imm32(rd, imm_hi, ctx);
> +	emit_fixed_imm32(RV_REG_T1, imm_lo, ctx);
> +	emit(rv_slli(rd, rd, 32), ctx);
> +	emit(rv_add(rd, rd, RV_REG_T1), ctx);
> +}

Hmm, will this really be fixed? We can end up with compressed
instructions, which can then be a non-compressed in the last pass, and
we have the same problem?

The range of valid address for RV64 (sv39 to sv57) are
0xffffffff00000000 to 0xffffffffffffffff, so I think we can do better
than 6 insn, no? My gut feeling (I need to tinker a bit) is that 4
should be sufficient.

Note that worst case for a imm64 load are 8 instructions, but this is
not the general case.


Björn

_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH bpf] riscv, bpf: Emit fixed-length imm64 for BPF_PSEUDO_FUNC
  2022-11-30 11:38   ` Björn Töpel
@ 2022-11-30 16:04     ` Björn Töpel
  -1 siblings, 0 replies; 8+ messages in thread
From: Björn Töpel @ 2022-11-30 16:04 UTC (permalink / raw)
  To: Pu Lehui, bpf, linux-riscv, linux-kernel
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, Paul Walmsley,
	Palmer Dabbelt, Albert Ou, Pu Lehui, Pu Lehui

Björn Töpel <bjorn@kernel.org> writes:

> The range of valid address for RV64 (sv39 to sv57) are
> 0xffffffff00000000 to 0xffffffffffffffff, so I think we can do better
> than 6 insn, no? My gut feeling (I need to tinker a bit) is that 4
> should be sufficient.

Ok, thinking a bit more about it; A proper address will at have at least
2B alignment, so that means that we can construct an address with lui,
addi, and slli (31 bits).

u64 addr = 0xffffffff12345678;

u32 (imm |= 0xffffffffUL) >> 1;
u32 upper = (imm + (1 << 11)) >> 12;
u32 lower = imm & 0xfff;
u32 rupper = upper | 0x80000; // for sign extend

NB! Make sure it's !C insn, and emit:

lui xx, rupper
addi xx, xx, lower
slli xx, xx, 1

Now we'll have fixed three insns. WDYT?


Björn

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH bpf] riscv, bpf: Emit fixed-length imm64 for BPF_PSEUDO_FUNC
@ 2022-11-30 16:04     ` Björn Töpel
  0 siblings, 0 replies; 8+ messages in thread
From: Björn Töpel @ 2022-11-30 16:04 UTC (permalink / raw)
  To: Pu Lehui, bpf, linux-riscv, linux-kernel
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, Paul Walmsley,
	Palmer Dabbelt, Albert Ou, Pu Lehui, Pu Lehui

Björn Töpel <bjorn@kernel.org> writes:

> The range of valid address for RV64 (sv39 to sv57) are
> 0xffffffff00000000 to 0xffffffffffffffff, so I think we can do better
> than 6 insn, no? My gut feeling (I need to tinker a bit) is that 4
> should be sufficient.

Ok, thinking a bit more about it; A proper address will at have at least
2B alignment, so that means that we can construct an address with lui,
addi, and slli (31 bits).

u64 addr = 0xffffffff12345678;

u32 (imm |= 0xffffffffUL) >> 1;
u32 upper = (imm + (1 << 11)) >> 12;
u32 lower = imm & 0xfff;
u32 rupper = upper | 0x80000; // for sign extend

NB! Make sure it's !C insn, and emit:

lui xx, rupper
addi xx, xx, lower
slli xx, xx, 1

Now we'll have fixed three insns. WDYT?


Björn

_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH bpf] riscv, bpf: Emit fixed-length imm64 for BPF_PSEUDO_FUNC
  2022-11-30 11:38   ` Björn Töpel
@ 2022-12-02  9:58     ` Pu Lehui
  -1 siblings, 0 replies; 8+ messages in thread
From: Pu Lehui @ 2022-12-02  9:58 UTC (permalink / raw)
  To: Björn Töpel, bpf, linux-riscv, linux-kernel
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, Paul Walmsley,
	Palmer Dabbelt, Albert Ou, Pu Lehui



On 2022/11/30 19:38, Björn Töpel wrote:
> Pu Lehui <pulehui@huaweicloud.com> writes:
> 
>> From: Pu Lehui <pulehui@huawei.com>
>>
>> For BPF_PSEUDO_FUNC instruction, verifier will refill imm with
>> correct addresses of bpf_calls and then run last pass of JIT.
>> Since the emit_imm of RV64 is variable-length, which will emit
>> appropriate length instructions accorroding to the imm, it may
>> broke ctx->offset, and lead to unpredictable problem, such as
>> inaccurate jump. So let's fix it with fixed-length imm64 insns.
> 
> Ah, nice one! So, the the invariant doesn't hold (the image grow in the
> last pass).
> 
>> Fixes: 69c087ba6225 ("bpf: Add bpf_for_each_map_elem() helper")
> 
> This is odd? This can't be the right Fixes-tag...
> 

Only BPF_PSEUDO_FUNC instruction need extra jit pass after refill imm in 
jit_subprogs. Others, like bpf helper call, will update ctx->offset in 
jit iterations. So the fixes-tag is 69c087ba6225.

>> Signed-off-by: Pu Lehui <pulehui@huawei.com>
>> ---
>>   arch/riscv/net/bpf_jit_comp64.c | 31 ++++++++++++++++++++++++++++++-
>>   1 file changed, 30 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
>> index eb99df41fa33..f984d5fa014b 100644
>> --- a/arch/riscv/net/bpf_jit_comp64.c
>> +++ b/arch/riscv/net/bpf_jit_comp64.c
>> @@ -139,6 +139,30 @@ static bool in_auipc_jalr_range(s64 val)
>>   		val < ((1L << 31) - (1L << 11));
>>   }
>>   
>> +/* Emit fixed-length instructions for 32-bit imm */
>> +static void emit_fixed_imm32(u8 rd, s32 val, struct rv_jit_context *ctx)
>> +{
>> +	s32 upper = (val + (1U << 11)) >> 12;
>> +	s32 lower = ((val & 0xfff) << 20) >> 20;
>> +
>> +	emit(rv_lui(rd, upper), ctx);
>> +	emit(rv_addi(rd, rd, lower), ctx);
>> +}
>> +
>> +/* Emit fixed-length instructions for 64-bit imm */
>> +static void emit_fixed_imm64(u8 rd, s64 val, struct rv_jit_context *ctx)
>> +{
>> +	/* Compensation for sign-extension of rv_addi */
>> +	s32 imm_hi = (val + (1U << 31)) >> 32;
>> +	s32 imm_lo = val;
>> +
>> +	emit_fixed_imm32(rd, imm_hi, ctx);
>> +	emit_fixed_imm32(RV_REG_T1, imm_lo, ctx);
>> +	emit(rv_slli(rd, rd, 32), ctx);
>> +	emit(rv_add(rd, rd, RV_REG_T1), ctx);
>> +}
> 
> Hmm, will this really be fixed? We can end up with compressed
> instructions, which can then be a non-compressed in the last pass, and
> we have the same problem?
> 
> The range of valid address for RV64 (sv39 to sv57) are
> 0xffffffff00000000 to 0xffffffffffffffff, so I think we can do better
> than 6 insn, no? My gut feeling (I need to tinker a bit) is that 4
> should be sufficient.
> 
> Note that worst case for a imm64 load are 8 instructions, but this is
> not the general case.
> 
> 
> Björn


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH bpf] riscv, bpf: Emit fixed-length imm64 for BPF_PSEUDO_FUNC
@ 2022-12-02  9:58     ` Pu Lehui
  0 siblings, 0 replies; 8+ messages in thread
From: Pu Lehui @ 2022-12-02  9:58 UTC (permalink / raw)
  To: Björn Töpel, bpf, linux-riscv, linux-kernel
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, Paul Walmsley,
	Palmer Dabbelt, Albert Ou, Pu Lehui



On 2022/11/30 19:38, Björn Töpel wrote:
> Pu Lehui <pulehui@huaweicloud.com> writes:
> 
>> From: Pu Lehui <pulehui@huawei.com>
>>
>> For BPF_PSEUDO_FUNC instruction, verifier will refill imm with
>> correct addresses of bpf_calls and then run last pass of JIT.
>> Since the emit_imm of RV64 is variable-length, which will emit
>> appropriate length instructions accorroding to the imm, it may
>> broke ctx->offset, and lead to unpredictable problem, such as
>> inaccurate jump. So let's fix it with fixed-length imm64 insns.
> 
> Ah, nice one! So, the the invariant doesn't hold (the image grow in the
> last pass).
> 
>> Fixes: 69c087ba6225 ("bpf: Add bpf_for_each_map_elem() helper")
> 
> This is odd? This can't be the right Fixes-tag...
> 

Only BPF_PSEUDO_FUNC instruction need extra jit pass after refill imm in 
jit_subprogs. Others, like bpf helper call, will update ctx->offset in 
jit iterations. So the fixes-tag is 69c087ba6225.

>> Signed-off-by: Pu Lehui <pulehui@huawei.com>
>> ---
>>   arch/riscv/net/bpf_jit_comp64.c | 31 ++++++++++++++++++++++++++++++-
>>   1 file changed, 30 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
>> index eb99df41fa33..f984d5fa014b 100644
>> --- a/arch/riscv/net/bpf_jit_comp64.c
>> +++ b/arch/riscv/net/bpf_jit_comp64.c
>> @@ -139,6 +139,30 @@ static bool in_auipc_jalr_range(s64 val)
>>   		val < ((1L << 31) - (1L << 11));
>>   }
>>   
>> +/* Emit fixed-length instructions for 32-bit imm */
>> +static void emit_fixed_imm32(u8 rd, s32 val, struct rv_jit_context *ctx)
>> +{
>> +	s32 upper = (val + (1U << 11)) >> 12;
>> +	s32 lower = ((val & 0xfff) << 20) >> 20;
>> +
>> +	emit(rv_lui(rd, upper), ctx);
>> +	emit(rv_addi(rd, rd, lower), ctx);
>> +}
>> +
>> +/* Emit fixed-length instructions for 64-bit imm */
>> +static void emit_fixed_imm64(u8 rd, s64 val, struct rv_jit_context *ctx)
>> +{
>> +	/* Compensation for sign-extension of rv_addi */
>> +	s32 imm_hi = (val + (1U << 31)) >> 32;
>> +	s32 imm_lo = val;
>> +
>> +	emit_fixed_imm32(rd, imm_hi, ctx);
>> +	emit_fixed_imm32(RV_REG_T1, imm_lo, ctx);
>> +	emit(rv_slli(rd, rd, 32), ctx);
>> +	emit(rv_add(rd, rd, RV_REG_T1), ctx);
>> +}
> 
> Hmm, will this really be fixed? We can end up with compressed
> instructions, which can then be a non-compressed in the last pass, and
> we have the same problem?
> 
> The range of valid address for RV64 (sv39 to sv57) are
> 0xffffffff00000000 to 0xffffffffffffffff, so I think we can do better
> than 6 insn, no? My gut feeling (I need to tinker a bit) is that 4
> should be sufficient.
> 
> Note that worst case for a imm64 load are 8 instructions, but this is
> not the general case.
> 
> 
> Björn


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2022-12-02  9:58 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-11-30  3:38 [PATCH bpf] riscv, bpf: Emit fixed-length imm64 for BPF_PSEUDO_FUNC Pu Lehui
2022-11-30  3:38 ` Pu Lehui
2022-11-30 11:38 ` Björn Töpel
2022-11-30 11:38   ` Björn Töpel
2022-11-30 16:04   ` Björn Töpel
2022-11-30 16:04     ` Björn Töpel
2022-12-02  9:58   ` Pu Lehui
2022-12-02  9:58     ` Pu Lehui

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.