All of lore.kernel.org
 help / color / mirror / Atom feed
From: Johan Almbladh <johan.almbladh@anyfinetworks.com>
To: ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org
Cc: kafai@fb.com, songliubraving@fb.com, yhs@fb.com,
	john.fastabend@gmail.com, kpsingh@kernel.org, iii@linux.ibm.com,
	paul@cilium.io, yangtiezhu@loongson.cn, netdev@vger.kernel.org,
	bpf@vger.kernel.org,
	Johan Almbladh <johan.almbladh@anyfinetworks.com>
Subject: [PATCH bpf-next 03/10] bpf/tests: Add exhaustive tests of BPF_ATOMIC magnitudes
Date: Fri,  1 Oct 2021 15:03:41 +0200	[thread overview]
Message-ID: <20211001130348.3670534-4-johan.almbladh@anyfinetworks.com> (raw)
In-Reply-To: <20211001130348.3670534-1-johan.almbladh@anyfinetworks.com>

This patch adds a series of test to verify the operation of BPF_ATOMIC
with BPF_DW and BPF_W sizes, for all power-of-two magnitudes of the
register value operand.

Also fixes a confusing typo in the comment for a related test.

Signed-off-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
---
 lib/test_bpf.c | 504 ++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 503 insertions(+), 1 deletion(-)

diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index f6983ad7b981..84efb23e09d0 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -796,7 +796,7 @@ static int __bpf_fill_pattern(struct bpf_test *self, void *arg,
 /*
  * Exhaustive tests of ALU operations for all combinations of power-of-two
  * magnitudes of the operands, both for positive and negative values. The
- * test is designed to verify e.g. the JMP and JMP32 operations for JITs that
+ * test is designed to verify e.g. the ALU and ALU64 operations for JITs that
  * emit different code depending on the magnitude of the immediate value.
  */
 
@@ -1137,6 +1137,306 @@ static int bpf_fill_alu32_mod_reg(struct bpf_test *self)
 	return __bpf_fill_alu32_reg(self, BPF_MOD);
 }
 
+/*
+ * Exhaustive tests of atomic operations for all power-of-two operand
+ * magnitudes, both for positive and negative values.
+ */
+
+static int __bpf_emit_atomic64(struct bpf_test *self, void *arg,
+			       struct bpf_insn *insns, s64 dst, s64 src)
+{
+	int op = *(int *)arg;
+	u64 keep, fetch, res;
+	int i = 0;
+
+	if (!insns)
+		return 21;
+
+	switch (op) {
+	case BPF_XCHG:
+		res = src;
+		break;
+	default:
+		__bpf_alu_result(&res, dst, src, BPF_OP(op));
+	}
+
+	keep = 0x0123456789abcdefULL;
+	if (op & BPF_FETCH)
+		fetch = dst;
+	else
+		fetch = src;
+
+	i += __bpf_ld_imm64(&insns[i], R0, keep);
+	i += __bpf_ld_imm64(&insns[i], R1, dst);
+	i += __bpf_ld_imm64(&insns[i], R2, src);
+	i += __bpf_ld_imm64(&insns[i], R3, res);
+	i += __bpf_ld_imm64(&insns[i], R4, fetch);
+	i += __bpf_ld_imm64(&insns[i], R5, keep);
+
+	insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8);
+	insns[i++] = BPF_ATOMIC_OP(BPF_DW, op, R10, R2, -8);
+	insns[i++] = BPF_LDX_MEM(BPF_DW, R1, R10, -8);
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1);
+	insns[i++] = BPF_EXIT_INSN();
+
+	return i;
+}
+
+static int __bpf_emit_atomic32(struct bpf_test *self, void *arg,
+			       struct bpf_insn *insns, s64 dst, s64 src)
+{
+	int op = *(int *)arg;
+	u64 keep, fetch, res;
+	int i = 0;
+
+	if (!insns)
+		return 21;
+
+	switch (op) {
+	case BPF_XCHG:
+		res = src;
+		break;
+	default:
+		__bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op));
+	}
+
+	keep = 0x0123456789abcdefULL;
+	if (op & BPF_FETCH)
+		fetch = (u32)dst;
+	else
+		fetch = src;
+
+	i += __bpf_ld_imm64(&insns[i], R0, keep);
+	i += __bpf_ld_imm64(&insns[i], R1, (u32)dst);
+	i += __bpf_ld_imm64(&insns[i], R2, src);
+	i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+	i += __bpf_ld_imm64(&insns[i], R4, fetch);
+	i += __bpf_ld_imm64(&insns[i], R5, keep);
+
+	insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
+	insns[i++] = BPF_ATOMIC_OP(BPF_W, op, R10, R2, -4);
+	insns[i++] = BPF_LDX_MEM(BPF_W, R1, R10, -4);
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1);
+	insns[i++] = BPF_EXIT_INSN();
+
+	return i;
+}
+
+static int __bpf_emit_cmpxchg64(struct bpf_test *self, void *arg,
+				struct bpf_insn *insns, s64 dst, s64 src)
+{
+	int i = 0;
+
+	if (!insns)
+		return 23;
+
+	i += __bpf_ld_imm64(&insns[i], R0, ~dst);
+	i += __bpf_ld_imm64(&insns[i], R1, dst);
+	i += __bpf_ld_imm64(&insns[i], R2, src);
+
+	/* Result unsuccessful */
+	insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8);
+	insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8);
+	insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8);
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 2);
+	insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2);
+	insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	/* Result successful */
+	insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8);
+	insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8);
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R3, 2);
+	insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
+	insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	return i;
+}
+
+static int __bpf_emit_cmpxchg32(struct bpf_test *self, void *arg,
+				struct bpf_insn *insns, s64 dst, s64 src)
+{
+	int i = 0;
+
+	if (!insns)
+		return 27;
+
+	i += __bpf_ld_imm64(&insns[i], R0, ~dst);
+	i += __bpf_ld_imm64(&insns[i], R1, (u32)dst);
+	i += __bpf_ld_imm64(&insns[i], R2, src);
+
+	/* Result unsuccessful */
+	insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
+	insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
+	insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
+	insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);
+
+	insns[i++] = BPF_JMP32_REG(BPF_JEQ, R1, R3, 2);
+	insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2);
+	insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	/* Result successful */
+	i += __bpf_ld_imm64(&insns[i], R0, dst);
+	insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
+	insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
+	insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);
+
+	insns[i++] = BPF_JMP32_REG(BPF_JEQ, R2, R3, 2);
+	insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
+	insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+	insns[i++] = BPF_EXIT_INSN();
+
+	return i;
+}
+
+static int __bpf_fill_atomic64(struct bpf_test *self, int op)
+{
+	return __bpf_fill_pattern(self, &op, 64, 64,
+				  0, PATTERN_BLOCK2,
+				  &__bpf_emit_atomic64);
+}
+
+static int __bpf_fill_atomic32(struct bpf_test *self, int op)
+{
+	return __bpf_fill_pattern(self, &op, 64, 64,
+				  0, PATTERN_BLOCK2,
+				  &__bpf_emit_atomic32);
+}
+
+/* 64-bit atomic operations */
+static int bpf_fill_atomic64_add(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_ADD);
+}
+
+static int bpf_fill_atomic64_and(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_AND);
+}
+
+static int bpf_fill_atomic64_or(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_OR);
+}
+
+static int bpf_fill_atomic64_xor(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_XOR);
+}
+
+static int bpf_fill_atomic64_add_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_and_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_or_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xor_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xchg(struct bpf_test *self)
+{
+	return __bpf_fill_atomic64(self, BPF_XCHG);
+}
+
+static int bpf_fill_cmpxchg64(struct bpf_test *self)
+{
+	return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2,
+				  &__bpf_emit_cmpxchg64);
+}
+
+/* 32-bit atomic operations */
+static int bpf_fill_atomic32_add(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_ADD);
+}
+
+static int bpf_fill_atomic32_and(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_AND);
+}
+
+static int bpf_fill_atomic32_or(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_OR);
+}
+
+static int bpf_fill_atomic32_xor(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_XOR);
+}
+
+static int bpf_fill_atomic32_add_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_and_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_or_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xor_fetch(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xchg(struct bpf_test *self)
+{
+	return __bpf_fill_atomic32(self, BPF_XCHG);
+}
+
+static int bpf_fill_cmpxchg32(struct bpf_test *self)
+{
+	return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2,
+				  &__bpf_emit_cmpxchg32);
+}
+
 /*
  * Test the two-instruction 64-bit immediate load operation for all
  * power-of-two magnitudes of the immediate operand. For each MSB, a block
@@ -10721,6 +11021,208 @@ static struct bpf_test tests[] = {
 		{ { 0, 1 } },
 		.fill_helper = bpf_fill_ld_imm64,
 	},
+	/* 64-bit ATOMIC magnitudes */
+	{
+		"ATOMIC_DW_ADD: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_add,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_DW_AND: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_and,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_DW_OR: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_or,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_DW_XOR: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_xor,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_DW_ADD_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_add_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_DW_AND_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_and_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_DW_OR_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_or_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_DW_XOR_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_xor_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_DW_XCHG: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic64_xchg,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_DW_CMPXCHG: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_cmpxchg64,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	/* 64-bit atomic magnitudes */
+	{
+		"ATOMIC_W_ADD: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_add,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_W_AND: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_and,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_W_OR: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_or,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_W_XOR: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_xor,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_W_ADD_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_add_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_W_AND_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_and_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_W_OR_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_or_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_W_XOR_FETCH: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_xor_fetch,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_W_XCHG: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_atomic32_xchg,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ATOMIC_W_CMPXCHG: all operand magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_cmpxchg32,
+		.stack_depth = 8,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
 	/* JMP immediate magnitudes */
 	{
 		"JMP_JSET_K: all immediate value magnitudes",
-- 
2.30.2


  parent reply	other threads:[~2021-10-01 13:05 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-01 13:03 [PATCH bpf-next 00/10] bpf/tests: Extend eBPF JIT test suite Johan Almbladh
2021-10-01 13:03 ` [PATCH bpf-next 01/10] bpf/tests: Add tests of BPF_LDX and BPF_STX with small sizes Johan Almbladh
2021-10-01 13:03 ` [PATCH bpf-next 02/10] bpf/tests: Add zero-extension checks in BPF_ATOMIC tests Johan Almbladh
2021-10-01 13:03 ` Johan Almbladh [this message]
2021-10-01 13:03 ` [PATCH bpf-next 04/10] bpf/tests: Add tests to check source register zero-extension Johan Almbladh
2021-10-01 13:03 ` [PATCH bpf-next 05/10] bpf/tests: Add more tests for ALU and ATOMIC register clobbering Johan Almbladh
2021-10-01 13:03 ` [PATCH bpf-next 06/10] bpf/tests: Minor restructuring of ALU tests Johan Almbladh
2021-10-01 13:03 ` [PATCH bpf-next 07/10] bpf/tests: Add exhaustive tests of ALU register combinations Johan Almbladh
2021-10-01 13:03 ` [PATCH bpf-next 08/10] bpf/tests: Add exhaustive tests of BPF_ATOMIC " Johan Almbladh
2021-10-01 13:03 ` [PATCH bpf-next 09/10] bpf/tests: Add test of ALU shifts with operand register aliasing Johan Almbladh
2021-10-01 13:03 ` [PATCH bpf-next 10/10] bpf/tests: Add test of LDX_MEM with operand aliasing Johan Almbladh
2021-10-01 15:10 ` [PATCH bpf-next 00/10] bpf/tests: Extend eBPF JIT test suite patchwork-bot+netdevbpf

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211001130348.3670534-4-johan.almbladh@anyfinetworks.com \
    --to=johan.almbladh@anyfinetworks.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=iii@linux.ibm.com \
    --cc=john.fastabend@gmail.com \
    --cc=kafai@fb.com \
    --cc=kpsingh@kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=paul@cilium.io \
    --cc=songliubraving@fb.com \
    --cc=yangtiezhu@loongson.cn \
    --cc=yhs@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.