linux-mips.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Tony Ambardar <tony.ambardar@gmail.com>
To: Alexei Starovoitov <ast@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Andrii Nakryiko <andrii@kernel.org>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Paul Burton <paulburton@kernel.org>
Cc: Tony Ambardar <Tony.Ambardar@gmail.com>,
	netdev@vger.kernel.org, bpf@vger.kernel.org,
	linux-mips@vger.kernel.org,
	Johan Almbladh <johan.almbladh@anyfinetworks.com>,
	Tiezhu Yang <yangtiezhu@loongson.cn>,
	Hassan Naveed <hnaveed@wavecomp.com>,
	David Daney <ddaney@caviumnetworks.com>,
	Luke Nelson <luke.r.nels@gmail.com>,
	Serge Semin <fancer.lancer@gmail.com>,
	Martin KaFai Lau <kafai@fb.com>, Song Liu <songliubraving@fb.com>,
	Yonghong Song <yhs@fb.com>,
	John Fastabend <john.fastabend@gmail.com>,
	KP Singh <kpsingh@kernel.org>
Subject: [RFC PATCH bpf-next v2 14/16] MIPS: eBPF64: implement all BPF_ATOMIC ops
Date: Tue,  5 Oct 2021 01:26:58 -0700	[thread overview]
Message-ID: <3499e013eb8994077c8f642fb1907a49d6a0afd7.1633392335.git.Tony.Ambardar@gmail.com> (raw)
In-Reply-To: <cover.1633392335.git.Tony.Ambardar@gmail.com>

Reorganize code for BPF_ATOMIC and BPF_MEM, and add the atomic ops AND,
OR, XOR, XCHG and CMPXCHG, with support for BPF_FETCH.

Signed-off-by: Tony Ambardar <Tony.Ambardar@gmail.com>
---
 arch/mips/net/ebpf_jit_comp64.c | 181 +++++++++++++++++++++-----------
 1 file changed, 119 insertions(+), 62 deletions(-)

diff --git a/arch/mips/net/ebpf_jit_comp64.c b/arch/mips/net/ebpf_jit_comp64.c
index 842e516ce749..35c8c8307b64 100644
--- a/arch/mips/net/ebpf_jit_comp64.c
+++ b/arch/mips/net/ebpf_jit_comp64.c
@@ -167,7 +167,15 @@ static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
 int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
 		   int this_idx, int exit_idx)
 {
+	/*
+	 * Since CMPXCHG uses R0 implicitly, outside of a passed
+	 * bpf_insn, we fake a lookup to get the MIPS base reg.
+	 */
+	const struct bpf_insn r0_insn = {.src_reg = BPF_REG_0};
+	const int r0 = ebpf_to_mips_reg(ctx, &r0_insn,
+					REG_SRC_NO_FP);
 	const int bpf_class = BPF_CLASS(insn->code);
+	const int bpf_size = BPF_SIZE(insn->code);
 	const int bpf_op = BPF_OP(insn->code);
 	bool need_swap, did_move, cmp_eq;
 	unsigned int target = 0;
@@ -944,6 +952,32 @@ int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
 	case BPF_STX | BPF_H | BPF_MEM:
 	case BPF_STX | BPF_W | BPF_MEM:
 	case BPF_STX | BPF_DW | BPF_MEM:
+		dst = ebpf_to_mips_reg(ctx, insn, REG_DST_FP_OK);
+		src = ebpf_to_mips_reg(ctx, insn, REG_SRC_FP_OK);
+		if (src < 0 || dst < 0)
+			return -EINVAL;
+		mem_off = insn->off;
+		switch (BPF_SIZE(insn->code)) {
+		case BPF_B:
+			emit_instr(ctx, sb, src, mem_off, dst);
+			break;
+		case BPF_H:
+			emit_instr(ctx, sh, src, mem_off, dst);
+			break;
+		case BPF_W:
+			emit_instr(ctx, sw, src, mem_off, dst);
+			break;
+		case BPF_DW:
+			if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+				emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
+				emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
+				src = MIPS_R_AT;
+			}
+			emit_instr(ctx, sd, src, mem_off, dst);
+			break;
+		}
+		break;
+
 	case BPF_STX | BPF_W | BPF_ATOMIC:
 	case BPF_STX | BPF_DW | BPF_ATOMIC:
 		dst = ebpf_to_mips_reg(ctx, insn, REG_DST_FP_OK);
@@ -951,71 +985,94 @@ int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
 		if (src < 0 || dst < 0)
 			return -EINVAL;
 		mem_off = insn->off;
-		if (BPF_MODE(insn->code) == BPF_ATOMIC) {
-			if (insn->imm != BPF_ADD) {
-				pr_err("ATOMIC OP %02x NOT HANDLED\n", insn->imm);
-				return -EINVAL;
+		/*
+		 * If mem_off does not fit within the 9 bit ll/sc
+		 * instruction immediate field, use a temp reg.
+		 */
+		if (MIPS_ISA_REV >= 6 &&
+		    (mem_off >= BIT(8) || mem_off < -BIT(8))) {
+			emit_instr(ctx, daddiu, MIPS_R_T6, dst, mem_off);
+			mem_off = 0;
+			dst = MIPS_R_T6;
+		}
+		/* Copy or adjust 32-bit src regs based on BPF op size. */
+		ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+		if (bpf_size == BPF_W) {
+			if (ts == REG_32BIT) {
+				emit_instr(ctx, sll, MIPS_R_T9, src, 0);
+				src = MIPS_R_T9;
 			}
+			/* Ensure proper old == new comparison .*/
+			if (insn->imm == BPF_CMPXCHG)
+				emit_instr(ctx, sll, r0, r0, 0);
+		}
+		if (bpf_size == BPF_DW && ts == REG_32BIT) {
+			emit_instr(ctx, move, MIPS_R_T9, src);
+			emit_instr(ctx, dinsu, MIPS_R_T9, MIPS_R_ZERO, 32, 32);
+			src = MIPS_R_T9;
+		}
+
+/* Helper to simplify using BPF_DW/BPF_W atomic opcodes. */
+#define emit_instr_size(ctx, func64, func32, ...)                              \
+do {                                                                           \
+	if (bpf_size == BPF_DW)                                                \
+		emit_instr(ctx, func64, ##__VA_ARGS__);                        \
+	else                                                                   \
+		emit_instr(ctx, func32, ##__VA_ARGS__);                        \
+} while (0)
+
+		/* Track variable branch offset due to CMPXCHG. */
+		b_off = ctx->idx;
+		emit_instr_size(ctx, lld, ll, MIPS_R_AT, mem_off, dst);
+		switch (insn->imm) {
+		case BPF_AND | BPF_FETCH:
+		case BPF_AND:
+			emit_instr(ctx, and, MIPS_R_T8, MIPS_R_AT, src);
+			break;
+		case BPF_OR | BPF_FETCH:
+		case BPF_OR:
+			emit_instr(ctx, or, MIPS_R_T8, MIPS_R_AT, src);
+			break;
+		case BPF_XOR | BPF_FETCH:
+		case BPF_XOR:
+			emit_instr(ctx, xor, MIPS_R_T8, MIPS_R_AT, src);
+			break;
+		case BPF_ADD | BPF_FETCH:
+		case BPF_ADD:
+			emit_instr_size(ctx, daddu, addu, MIPS_R_T8, MIPS_R_AT, src);
+			break;
+		case BPF_XCHG:
+			emit_instr_size(ctx, daddu, addu, MIPS_R_T8, MIPS_R_ZERO, src);
+			break;
+		case BPF_CMPXCHG:
 			/*
-			 * If mem_off does not fit within the 9 bit ll/sc
-			 * instruction immediate field, use a temp reg.
+			 * If R0 != old_val then break out of LL/SC loop
 			 */
-			if (MIPS_ISA_REV >= 6 &&
-			    (mem_off >= BIT(8) || mem_off < -BIT(8))) {
-				emit_instr(ctx, daddiu, MIPS_R_T6,
-						dst, mem_off);
-				mem_off = 0;
-				dst = MIPS_R_T6;
-			}
-			switch (BPF_SIZE(insn->code)) {
-			case BPF_W:
-				if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
-					emit_instr(ctx, sll, MIPS_R_AT, src, 0);
-					src = MIPS_R_AT;
-				}
-				emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
-				emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
-				emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
-				/*
-				 * On failure back up to LL (-4
-				 * instructions of 4 bytes each
-				 */
-				emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
-				emit_instr(ctx, nop);
-				break;
-			case BPF_DW:
-				if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
-					emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
-					emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
-					src = MIPS_R_AT;
-				}
-				emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
-				emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
-				emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
-				emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
-				emit_instr(ctx, nop);
-				break;
-			}
-		} else { /* BPF_MEM */
-			switch (BPF_SIZE(insn->code)) {
-			case BPF_B:
-				emit_instr(ctx, sb, src, mem_off, dst);
-				break;
-			case BPF_H:
-				emit_instr(ctx, sh, src, mem_off, dst);
-				break;
-			case BPF_W:
-				emit_instr(ctx, sw, src, mem_off, dst);
-				break;
-			case BPF_DW:
-				if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
-					emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
-					emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
-					src = MIPS_R_AT;
-				}
-				emit_instr(ctx, sd, src, mem_off, dst);
-				break;
-			}
+			emit_instr(ctx, bne, r0, MIPS_R_AT, 4 * 4);
+			/* Delay slot */
+			emit_instr_size(ctx, daddu, addu, MIPS_R_T8, MIPS_R_ZERO, src);
+			/* Return old_val in R0 */
+			src = r0;
+			break;
+		default:
+			pr_err("ATOMIC OP %02x NOT HANDLED\n", insn->imm);
+			return -EINVAL;
+		}
+		emit_instr_size(ctx, scd, sc, MIPS_R_T8, mem_off, dst);
+#undef emit_instr_size
+		/*
+		 * On failure back up to LL (calculate # insns)
+		 */
+		b_off = (b_off - ctx->idx - 1) * 4;
+		emit_instr(ctx, beqz, MIPS_R_T8, b_off);
+		emit_instr(ctx, nop);
+		/*
+		 * Using fetch returns old value in src or R0
+		 */
+		if (insn->imm & BPF_FETCH) {
+			if (bpf_size == BPF_W)
+				emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
+			emit_instr(ctx, move, src, MIPS_R_AT);
 		}
 		break;
 
-- 
2.25.1


  parent reply	other threads:[~2021-10-05  8:39 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-12  0:34 [RFC PATCH bpf-next v1 00/14] MIPS: eBPF: refactor code, add MIPS32 JIT Tony Ambardar
2021-07-12  0:34 ` [RFC PATCH bpf-next v1 01/14] MIPS: eBPF: support BPF_TAIL_CALL in JIT static analysis Tony Ambardar
2021-07-12  0:34 ` [RFC PATCH bpf-next v1 02/14] MIPS: eBPF: mask 32-bit index for tail calls Tony Ambardar
2021-07-12  0:34 ` [RFC PATCH bpf-next v1 03/14] MIPS: eBPF: fix BPF_ALU|ARSH handling in JIT static analysis Tony Ambardar
2021-07-12  0:34 ` [RFC PATCH bpf-next v1 04/14] MIPS: eBPF: support BPF_JMP32 " Tony Ambardar
2021-07-12  0:34 ` [RFC PATCH bpf-next v1 05/14] MIPS: eBPF: fix system hang with verifier dead-code patching Tony Ambardar
2021-07-12  0:34 ` [RFC PATCH bpf-next v1 06/14] MIPS: eBPF: fix JIT static analysis hang with bounded loops Tony Ambardar
2021-07-12  0:34 ` [RFC PATCH bpf-next v1 07/14] MIPS: eBPF: fix MOD64 insn on R6 ISA Tony Ambardar
2021-07-12  0:34 ` [RFC PATCH bpf-next v1 08/14] MIPS: eBPF: support long jump for BPF_JMP|EXIT Tony Ambardar
2021-07-12  0:34 ` [RFC PATCH bpf-next v1 09/14] MIPS: eBPF: drop src_reg restriction in BPF_LD|BPF_DW|BPF_IMM Tony Ambardar
2021-07-12  0:34 ` [RFC PATCH bpf-next v1 10/14] MIPS: eBPF: improve and clarify enum 'which_ebpf_reg' Tony Ambardar
2021-07-12  0:34 ` [RFC PATCH bpf-next v1 11/14] MIPS: eBPF: add core support for 32/64-bit systems Tony Ambardar
2021-07-12  0:34 ` [RFC PATCH bpf-next v1 13/14] MIPS: uasm: Enable muhu opcode for MIPS R6 Tony Ambardar
2021-07-12  0:35 ` [RFC PATCH bpf-next v1 14/14] MIPS: eBPF: add MIPS32 JIT Tony Ambardar
2021-07-20  1:25 ` [RFC PATCH bpf-next v1 00/14] MIPS: eBPF: refactor code, " Johan Almbladh
2021-07-20 17:47   ` Alexei Starovoitov
2021-10-05  8:26 ` [RFC PATCH bpf-next v2 00/16] " Tony Ambardar
2021-10-05  8:26   ` [RFC PATCH bpf-next v2 01/16] MIPS: eBPF: support BPF_TAIL_CALL in JIT static analysis Tony Ambardar
2021-10-05  8:26   ` [RFC PATCH bpf-next v2 02/16] MIPS: eBPF: mask 32-bit index for tail calls Tony Ambardar
2021-10-05  8:26   ` [RFC PATCH bpf-next v2 03/16] MIPS: eBPF: fix BPF_ALU|ARSH handling in JIT static analysis Tony Ambardar
2021-10-05  8:26   ` [RFC PATCH bpf-next v2 04/16] MIPS: eBPF: support BPF_JMP32 " Tony Ambardar
2021-10-05  8:26   ` [RFC PATCH bpf-next v2 05/16] MIPS: eBPF: fix system hang with verifier dead-code patching Tony Ambardar
2021-10-05  8:26   ` [RFC PATCH bpf-next v2 06/16] MIPS: eBPF: fix JIT static analysis hang with bounded loops Tony Ambardar
2021-10-05  8:26   ` [RFC PATCH bpf-next v2 07/16] MIPS: eBPF: fix MOD64 insn on R6 ISA Tony Ambardar
2021-10-05  8:26   ` [RFC PATCH bpf-next v2 08/16] MIPS: eBPF: support long jump for BPF_JMP|EXIT Tony Ambardar
2021-10-05  8:26   ` [RFC PATCH bpf-next v2 09/16] MIPS: eBPF: drop src_reg restriction in BPF_LD|BPF_DW|BPF_IMM Tony Ambardar
2021-10-05  8:26   ` [RFC PATCH bpf-next v2 10/16] MIPS: eBPF: add core support for 32/64-bit systems Tony Ambardar
2021-10-05  8:26   ` [RFC PATCH bpf-next v2 11/16] bpf: allow tailcalls in subprograms for MIPS64/MIPS32 Tony Ambardar
2021-10-05  8:26   ` [RFC PATCH bpf-next v2 13/16] MIPS: eBPF64: support BPF_JMP32 conditionals Tony Ambardar
2021-10-05  8:26   ` Tony Ambardar [this message]
2021-10-05  8:26   ` [RFC PATCH bpf-next v2 15/16] MIPS: uasm: Enable muhu opcode for MIPS R6 Tony Ambardar
2021-10-05  8:27   ` [RFC PATCH bpf-next v2 16/16] MIPS: eBPF: add MIPS32 JIT Tony Ambardar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=3499e013eb8994077c8f642fb1907a49d6a0afd7.1633392335.git.Tony.Ambardar@gmail.com \
    --to=tony.ambardar@gmail.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=ddaney@caviumnetworks.com \
    --cc=fancer.lancer@gmail.com \
    --cc=hnaveed@wavecomp.com \
    --cc=johan.almbladh@anyfinetworks.com \
    --cc=john.fastabend@gmail.com \
    --cc=kafai@fb.com \
    --cc=kpsingh@kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=luke.r.nels@gmail.com \
    --cc=netdev@vger.kernel.org \
    --cc=paulburton@kernel.org \
    --cc=songliubraving@fb.com \
    --cc=tsbogend@alpha.franken.de \
    --cc=yangtiezhu@loongson.cn \
    --cc=yhs@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).