linux-riscv.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Jiong Wang <jiong.wang@netronome.com>
To: Luke Nelson <luke.r.nels@gmail.com>
Cc: "Song Liu" <songliubraving@fb.com>,
	"Albert Ou" <aou@eecs.berkeley.edu>,
	bpf@vger.kernel.org, "Daniel Borkmann" <daniel@iogearbox.net>,
	"Björn Töpel" <bjorn.topel@gmail.com>,
	"Palmer Dabbelt" <palmer@sifive.com>,
	"Alexei Starovoitov" <ast@kernel.org>,
	linux-kernel@vger.kernel.org, netdev@vger.kernel.org,
	"Yonghong Song" <yhs@fb.com>,
	linux-riscv@lists.infradead.org,
	"Martin KaFai Lau" <kafai@fb.com>, "Xi Wang" <xi.wang@gmail.com>
Subject: Re: [PATCH 2/2] bpf: test_bpf: add tests for upper bits of 32-bit operations
Date: Thu, 30 May 2019 21:30:33 +0100	[thread overview]
Message-ID: <87lfyn4rdy.fsf@netronome.com> (raw)
In-Reply-To: <20190530190800.7633-2-luke.r.nels@gmail.com>


Luke Nelson writes:

> This commit introduces tests that validate the upper 32 bits
> of the result of 32-bit BPF ALU operations.
>
> The existing tests for 32-bit operations do not check the upper 32
> bits of results because the exit instruction truncates the result.
> These tests perform a 32-bit ALU operation followed by a right shift.
> These tests can catch subtle bugs in the extension behavior of JITed
> instructions, including several bugs in the RISC-V BPF JIT, fixed in
> another patch.

Hi Luke,

  Have you seen the following?

    https://www.spinics.net/lists/netdev/msg573355.html

  it has been merged to bpf tree and should have full test coverage of all
  bpf insns that could write to sub-register and are exposed to JIT
  back-end.

  And AFAIK, we add new unit tests to test_verifier which is a userspace
  test infrastructure which offers more test functionality plus tests will
  go through verifier.

Regards,
Jiong

> The added tests pass the JIT and interpreter on x86, as well as the
> JIT and interpreter of RISC-V once the zero extension bugs were fixed.
>
> Cc: Xi Wang <xi.wang@gmail.com>
> Signed-off-by: Luke Nelson <luke.r.nels@gmail.com>
> ---
>  lib/test_bpf.c | 164 +++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 164 insertions(+)
>
> diff --git a/lib/test_bpf.c b/lib/test_bpf.c
> index 0845f635f404..4580dc0220f1 100644
> --- a/lib/test_bpf.c
> +++ b/lib/test_bpf.c
> @@ -2461,6 +2461,20 @@ static struct bpf_test tests[] = {
>  		{ },
>  		{ { 0, 1 } },
>  	},
> +	{
> +		"ALU_ADD_X: (1 + 4294967294) >> 32 + 4294967294 = 4294967294",
> +		.u.insns_int = {
> +			BPF_LD_IMM64(R0, 1U),
> +			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
> +			BPF_ALU32_REG(BPF_ADD, R0, R1),
> +			BPF_ALU64_IMM(BPF_RSH, R0, 32),
> +			BPF_ALU32_REG(BPF_ADD, R0, R1),
> +			BPF_EXIT_INSN(),
> +		},
> +		INTERNAL,
> +		{ },
> +		{ { 0, 4294967294U } },
> +	},
>  	{
>  		"ALU64_ADD_X: 1 + 2 = 3",
>  		.u.insns_int = {
> @@ -2812,6 +2826,20 @@ static struct bpf_test tests[] = {
>  		{ },
>  		{ { 0, 1 } },
>  	},
> +	{
> +		"ALU_SUB_X: (4294967295 - 1) >> 32 + 1 = 1",
> +		.u.insns_int = {
> +			BPF_LD_IMM64(R0, 4294967295U),
> +			BPF_ALU32_IMM(BPF_MOV, R1, 1U),
> +			BPF_ALU32_REG(BPF_SUB, R0, R1),
> +			BPF_ALU64_IMM(BPF_RSH, R0, 32),
> +			BPF_ALU32_REG(BPF_ADD, R0, R1),
> +			BPF_EXIT_INSN(),
> +		},
> +		INTERNAL,
> +		{ },
> +		{ { 0, 1 } },
> +	},
>  	{
>  		"ALU64_SUB_X: 3 - 1 = 2",
>  		.u.insns_int = {
> @@ -3391,6 +3419,20 @@ static struct bpf_test tests[] = {
>  		{ },
>  		{ { 0, 0xffffffff } },
>  	},
> +	{
> +		"ALU_AND_X: (-1 & -1) >> 32 + 1 = 1",
> +		.u.insns_int = {
> +			BPF_LD_IMM64(R0, -1UL),
> +			BPF_LD_IMM64(R1, -1UL),
> +			BPF_ALU32_REG(BPF_AND, R0, R1),
> +			BPF_ALU64_IMM(BPF_RSH, R0, 32),
> +			BPF_ALU32_IMM(BPF_ADD, R0, 1U),
> +			BPF_EXIT_INSN(),
> +		},
> +		INTERNAL,
> +		{ },
> +		{ { 0, 1 } },
> +	},
>  	{
>  		"ALU64_AND_X: 3 & 2 = 2",
>  		.u.insns_int = {
> @@ -3533,6 +3575,20 @@ static struct bpf_test tests[] = {
>  		{ },
>  		{ { 0, 0xffffffff } },
>  	},
> +	{
> +		"ALU_OR_X: (0 & -1) >> 32 + 1 = 1",
> +		.u.insns_int = {
> +			BPF_LD_IMM64(R0, 0),
> +			BPF_LD_IMM64(R1, -1UL),
> +			BPF_ALU32_REG(BPF_OR, R0, R1),
> +			BPF_ALU64_IMM(BPF_RSH, R0, 32),
> +			BPF_ALU32_IMM(BPF_ADD, R0, 1U),
> +			BPF_EXIT_INSN(),
> +		},
> +		INTERNAL,
> +		{ },
> +		{ { 0, 1 } },
> +	},
>  	{
>  		"ALU64_OR_X: 1 | 2 = 3",
>  		.u.insns_int = {
> @@ -3675,6 +3731,20 @@ static struct bpf_test tests[] = {
>  		{ },
>  		{ { 0, 0xfffffffe } },
>  	},
> +	{
> +		"ALU_XOR_X: (0 ^ -1) >> 32 + 1 = 1",
> +		.u.insns_int = {
> +			BPF_LD_IMM64(R0, 0),
> +			BPF_LD_IMM64(R1, -1UL),
> +			BPF_ALU32_REG(BPF_XOR, R0, R1),
> +			BPF_ALU64_IMM(BPF_RSH, R0, 32),
> +			BPF_ALU32_IMM(BPF_ADD, R0, 1U),
> +			BPF_EXIT_INSN(),
> +		},
> +		INTERNAL,
> +		{ },
> +		{ { 0, 1 } },
> +	},
>  	{
>  		"ALU64_XOR_X: 5 ^ 6 = 3",
>  		.u.insns_int = {
> @@ -3817,6 +3887,20 @@ static struct bpf_test tests[] = {
>  		{ },
>  		{ { 0, 0x80000000 } },
>  	},
> +	{
> +		"ALU_LSH_X: (1 << 31) >> 32 + 1 = 1",
> +		.u.insns_int = {
> +			BPF_LD_IMM64(R0, 1),
> +			BPF_ALU32_IMM(BPF_MOV, R1, 31),
> +			BPF_ALU32_REG(BPF_LSH, R0, R1),
> +			BPF_ALU64_IMM(BPF_RSH, R0, 32),
> +			BPF_ALU32_IMM(BPF_ADD, R0, 1),
> +			BPF_EXIT_INSN(),
> +		},
> +		INTERNAL,
> +		{ },
> +		{ { 0, 1 } },
> +	},
>  	{
>  		"ALU64_LSH_X: 1 << 1 = 2",
>  		.u.insns_int = {
> @@ -3842,6 +3926,19 @@ static struct bpf_test tests[] = {
>  		{ { 0, 0x80000000 } },
>  	},
>  	/* BPF_ALU | BPF_LSH | BPF_K */
> +	{
> +		"ALU_LSH_K: (1 << 31) >> 32 + 1 = 1",
> +		.u.insns_int = {
> +			BPF_LD_IMM64(R0, 1),
> +			BPF_ALU32_IMM(BPF_LSH, R0, 31),
> +			BPF_ALU64_IMM(BPF_RSH, R0, 32),
> +			BPF_ALU32_IMM(BPF_ADD, R0, 1),
> +			BPF_EXIT_INSN(),
> +		},
> +		INTERNAL,
> +		{ },
> +		{ { 0, 1 } },
> +	},
>  	{
>  		"ALU_LSH_K: 1 << 1 = 2",
>  		.u.insns_int = {
> @@ -3911,6 +4008,20 @@ static struct bpf_test tests[] = {
>  		{ },
>  		{ { 0, 1 } },
>  	},
> +	{
> +		"ALU_RSH_X: (0x80000000 >> 0) >> 32 + 1 = 1",
> +		.u.insns_int = {
> +			BPF_LD_IMM64(R0, 0x80000000),
> +			BPF_ALU32_IMM(BPF_MOV, R1, 0),
> +			BPF_ALU32_REG(BPF_RSH, R0, R1),
> +			BPF_ALU64_IMM(BPF_RSH, R0, 32),
> +			BPF_ALU32_IMM(BPF_ADD, R0, 1),
> +			BPF_EXIT_INSN(),
> +		},
> +		INTERNAL,
> +		{ },
> +		{ { 0, 1 } },
> +	},
>  	{
>  		"ALU64_RSH_X: 2 >> 1 = 1",
>  		.u.insns_int = {
> @@ -3936,6 +4047,19 @@ static struct bpf_test tests[] = {
>  		{ { 0, 1 } },
>  	},
>  	/* BPF_ALU | BPF_RSH | BPF_K */
> +	{
> +		"ALU_RSH_K: (0x80000000 >> 0) >> 32 + 1 = 1",
> +		.u.insns_int = {
> +			BPF_LD_IMM64(R0, 0x80000000),
> +			BPF_ALU32_IMM(BPF_RSH, R0, 0),
> +			BPF_ALU64_IMM(BPF_RSH, R0, 32),
> +			BPF_ALU32_IMM(BPF_ADD, R0, 1),
> +			BPF_EXIT_INSN(),
> +		},
> +		INTERNAL,
> +		{ },
> +		{ { 0, 1 } },
> +	},
>  	{
>  		"ALU_RSH_K: 2 >> 1 = 1",
>  		.u.insns_int = {
> @@ -3993,7 +4117,34 @@ static struct bpf_test tests[] = {
>  		{ },
>  		{ { 0, 0xffff00ff } },
>  	},
> +	{
> +		"ALU_ARSH_X: (0x80000000 >> 0) >> 32 + 1 = 1",
> +		.u.insns_int = {
> +			BPF_LD_IMM64(R0, 0x80000000),
> +			BPF_ALU32_IMM(BPF_MOV, R1, 0),
> +			BPF_ALU32_REG(BPF_ARSH, R0, R1),
> +			BPF_ALU64_IMM(BPF_RSH, R0, 32),
> +			BPF_ALU32_IMM(BPF_ADD, R0, 1),
> +			BPF_EXIT_INSN(),
> +		},
> +		INTERNAL,
> +		{ },
> +		{ { 0, 1 } },
> +	},
>  	/* BPF_ALU | BPF_ARSH | BPF_K */
> +	{
> +		"ALU_ARSH_K: (0x80000000 >> 0) >> 32 + 1 = 1",
> +		.u.insns_int = {
> +			BPF_LD_IMM64(R0, 0x80000000),
> +			BPF_ALU32_IMM(BPF_ARSH, R0, 0),
> +			BPF_ALU64_IMM(BPF_RSH, R0, 32),
> +			BPF_ALU32_IMM(BPF_ADD, R0, 1),
> +			BPF_EXIT_INSN(),
> +		},
> +		INTERNAL,
> +		{ },
> +		{ { 0, 1 } },
> +	},
>  	{
>  		"ALU_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
>  		.u.insns_int = {
> @@ -4028,6 +4179,19 @@ static struct bpf_test tests[] = {
>  		{ },
>  		{ { 0, 3 } },
>  	},
> +	{
> +		"ALU_NEG: -(1) >> 32 + 1 = 1",
> +		.u.insns_int = {
> +			BPF_ALU32_IMM(BPF_MOV, R0, 1),
> +			BPF_ALU32_IMM(BPF_NEG, R0, 0),
> +			BPF_ALU64_IMM(BPF_RSH, R0, 32),
> +			BPF_ALU64_IMM(BPF_ADD, R0, 1),
> +			BPF_EXIT_INSN(),
> +		},
> +		INTERNAL,
> +		{ },
> +		{ { 0, 1 } },
> +	},
>  	{
>  		"ALU64_NEG: -(3) = -3",
>  		.u.insns_int = {


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

  reply	other threads:[~2019-05-30 20:30 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-30 19:07 [PATCH 1/2] bpf, riscv: fix bugs in JIT for 32-bit ALU operations Luke Nelson
2019-05-30 19:08 ` [PATCH 2/2] bpf: test_bpf: add tests for upper bits of 32-bit operations Luke Nelson
2019-05-30 20:30   ` Jiong Wang [this message]
2019-05-30 20:53 ` [PATCH 1/2] bpf, riscv: fix bugs in JIT for 32-bit ALU operations Song Liu
2019-05-30 22:34   ` Luke Nelson
2019-05-30 23:08     ` Song Liu
2019-05-31  7:22       ` Jiong Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87lfyn4rdy.fsf@netronome.com \
    --to=jiong.wang@netronome.com \
    --cc=aou@eecs.berkeley.edu \
    --cc=ast@kernel.org \
    --cc=bjorn.topel@gmail.com \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=kafai@fb.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=luke.r.nels@gmail.com \
    --cc=netdev@vger.kernel.org \
    --cc=palmer@sifive.com \
    --cc=songliubraving@fb.com \
    --cc=xi.wang@gmail.com \
    --cc=yhs@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).