bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com>
To: Michael Ellerman <mpe@ellerman.id.au>
Cc: Daniel Borkmann <daniel@iogearbox.net>,
	Alexei Starovoitov <alexei.starovoitov@gmail.com>,
	Christophe Leroy <christophe.leroy@csgroup.eu>,
	Yauheni Kaliuta <yauheni.kaliuta@redhat.com>,
	Hari Bathini <hbathini@linux.ibm.com>,
	<linuxppc-dev@lists.ozlabs.org>, <bpf@vger.kernel.org>
Subject: [PATCH powerpc/next 15/17] powerpc/bpf: Use _Rn macros for GPRs
Date: Mon, 14 Feb 2022 16:11:49 +0530	[thread overview]
Message-ID: <7df626b8cdc6141d4295ac16137c82ad570b6637.1644834730.git.naveen.n.rao@linux.vnet.ibm.com> (raw)
In-Reply-To: <cover.1644834730.git.naveen.n.rao@linux.vnet.ibm.com>

Use _Rn macros to specify register names to make their usage clear.

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 arch/powerpc/net/bpf_jit_comp32.c | 30 +++++++-------
 arch/powerpc/net/bpf_jit_comp64.c | 68 +++++++++++++++----------------
 2 files changed, 49 insertions(+), 49 deletions(-)

diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index 063e3a1be9270d..fe4e0eca017ede 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -41,23 +41,23 @@
 /* BPF to ppc register mappings */
 const int b2p[MAX_BPF_JIT_REG + 1] = {
 	/* function return value */
-	[BPF_REG_0] = 12,
+	[BPF_REG_0] = _R12,
 	/* function arguments */
-	[BPF_REG_1] = 4,
-	[BPF_REG_2] = 6,
-	[BPF_REG_3] = 8,
-	[BPF_REG_4] = 10,
-	[BPF_REG_5] = 22,
+	[BPF_REG_1] = _R4,
+	[BPF_REG_2] = _R6,
+	[BPF_REG_3] = _R8,
+	[BPF_REG_4] = _R10,
+	[BPF_REG_5] = _R22,
 	/* non volatile registers */
-	[BPF_REG_6] = 24,
-	[BPF_REG_7] = 26,
-	[BPF_REG_8] = 28,
-	[BPF_REG_9] = 30,
+	[BPF_REG_6] = _R24,
+	[BPF_REG_7] = _R26,
+	[BPF_REG_8] = _R28,
+	[BPF_REG_9] = _R30,
 	/* frame pointer aka BPF_REG_10 */
-	[BPF_REG_FP] = 18,
+	[BPF_REG_FP] = _R18,
 	/* eBPF jit internal registers */
-	[BPF_REG_AX] = 20,
-	[TMP_REG] = 31,		/* 32 bits */
+	[BPF_REG_AX] = _R20,
+	[TMP_REG] = _R31,		/* 32 bits */
 };
 
 static int bpf_to_ppc(struct codegen_context *ctx, int reg)
@@ -66,8 +66,8 @@ static int bpf_to_ppc(struct codegen_context *ctx, int reg)
 }
 
 /* PPC NVR range -- update this if we ever use NVRs below r17 */
-#define BPF_PPC_NVR_MIN		17
-#define BPF_PPC_TC		16
+#define BPF_PPC_NVR_MIN		_R17
+#define BPF_PPC_TC		_R16
 
 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
 {
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 3e4ed556094770..ac06efa7022379 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -48,28 +48,28 @@
 /* BPF to ppc register mappings */
 const int b2p[MAX_BPF_JIT_REG + 2] = {
 	/* function return value */
-	[BPF_REG_0] = 8,
+	[BPF_REG_0] = _R8,
 	/* function arguments */
-	[BPF_REG_1] = 3,
-	[BPF_REG_2] = 4,
-	[BPF_REG_3] = 5,
-	[BPF_REG_4] = 6,
-	[BPF_REG_5] = 7,
+	[BPF_REG_1] = _R3,
+	[BPF_REG_2] = _R4,
+	[BPF_REG_3] = _R5,
+	[BPF_REG_4] = _R6,
+	[BPF_REG_5] = _R7,
 	/* non volatile registers */
-	[BPF_REG_6] = 27,
-	[BPF_REG_7] = 28,
-	[BPF_REG_8] = 29,
-	[BPF_REG_9] = 30,
+	[BPF_REG_6] = _R27,
+	[BPF_REG_7] = _R28,
+	[BPF_REG_8] = _R29,
+	[BPF_REG_9] = _R30,
 	/* frame pointer aka BPF_REG_10 */
-	[BPF_REG_FP] = 31,
+	[BPF_REG_FP] = _R31,
 	/* eBPF jit internal registers */
-	[BPF_REG_AX] = 12,
-	[TMP_REG_1] = 9,
-	[TMP_REG_2] = 10
+	[BPF_REG_AX] = _R12,
+	[TMP_REG_1] = _R9,
+	[TMP_REG_2] = _R10
 };
 
 /* PPC NVR range -- update this if we ever use NVRs below r27 */
-#define BPF_PPC_NVR_MIN		27
+#define BPF_PPC_NVR_MIN		_R27
 
 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
 {
@@ -136,7 +136,7 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
 	if (ctx->seen & SEEN_TAILCALL) {
 		EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
 		/* this goes in the redzone */
-		EMIT(PPC_RAW_STD(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8)));
+		EMIT(PPC_RAW_STD(b2p[TMP_REG_1], _R1, -(BPF_PPC_STACK_SAVE + 8)));
 	} else {
 		EMIT(PPC_RAW_NOP());
 		EMIT(PPC_RAW_NOP());
@@ -149,10 +149,10 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
 		 */
 		if (ctx->seen & SEEN_FUNC) {
 			EMIT(PPC_RAW_MFLR(_R0));
-			EMIT(PPC_RAW_STD(0, 1, PPC_LR_STKOFF));
+			EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
 		}
 
-		EMIT(PPC_RAW_STDU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
+		EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
 	}
 
 	/*
@@ -162,11 +162,11 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
 	 */
 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
 		if (bpf_is_seen_register(ctx, b2p[i]))
-			EMIT(PPC_RAW_STD(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i])));
+			EMIT(PPC_RAW_STD(b2p[i], _R1, bpf_jit_stack_offsetof(ctx, b2p[i])));
 
 	/* Setup frame pointer to point to the bpf stack area */
 	if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP]))
-		EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
+		EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], _R1,
 				STACK_FRAME_MIN_SIZE + ctx->stack_size));
 }
 
@@ -177,14 +177,14 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
 	/* Restore NVRs */
 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
 		if (bpf_is_seen_register(ctx, b2p[i]))
-			EMIT(PPC_RAW_LD(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i])));
+			EMIT(PPC_RAW_LD(b2p[i], _R1, bpf_jit_stack_offsetof(ctx, b2p[i])));
 
 	/* Tear down our stack frame */
 	if (bpf_has_stack_frame(ctx)) {
-		EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size));
+		EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
 		if (ctx->seen & SEEN_FUNC) {
-			EMIT(PPC_RAW_LD(0, 1, PPC_LR_STKOFF));
-			EMIT(PPC_RAW_MTLR(0));
+			EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
+			EMIT(PPC_RAW_MTLR(_R0));
 		}
 	}
 }
@@ -194,7 +194,7 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
 	bpf_jit_emit_common_epilogue(image, ctx);
 
 	/* Move result to r3 */
-	EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0]));
+	EMIT(PPC_RAW_MR(_R3, b2p[BPF_REG_0]));
 
 	EMIT(PPC_RAW_BLR());
 }
@@ -232,7 +232,7 @@ int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func
 	func += FUNCTION_DESCR_SIZE;
 
 	/* Load function address into r12 */
-	PPC_LI64(12, func);
+	PPC_LI64(_R12, func);
 
 	/* For bpf-to-bpf function calls, the callee's address is unknown
 	 * until the last extra pass. As seen above, we use PPC_LI64() to
@@ -247,7 +247,7 @@ int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func
 	for (i = ctx->idx - ctx_idx; i < 5; i++)
 		EMIT(PPC_RAW_NOP());
 
-	EMIT(PPC_RAW_MTCTR(12));
+	EMIT(PPC_RAW_MTCTR(_R12));
 	EMIT(PPC_RAW_BCTRL());
 
 	return 0;
@@ -281,7 +281,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
 	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
 	 *   goto out;
 	 */
-	EMIT(PPC_RAW_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)));
+	EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R1, bpf_jit_stack_tailcallcnt(ctx)));
 	EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
 	PPC_BCC_SHORT(COND_GE, out);
 
@@ -289,7 +289,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
 	 * tail_call_cnt++;
 	 */
 	EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
-	EMIT(PPC_RAW_STD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)));
+	EMIT(PPC_RAW_STD(b2p[TMP_REG_1], _R1, bpf_jit_stack_tailcallcnt(ctx)));
 
 	/* prog = array->ptrs[index]; */
 	EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
@@ -680,8 +680,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
 				break;
 			case 64:
 				/* Store the value to stack and then use byte-reverse loads */
-				EMIT(PPC_RAW_STD(dst_reg, 1, bpf_jit_stack_local(ctx)));
-				EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
+				EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
+				EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], _R1, bpf_jit_stack_local(ctx)));
 				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
 					EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
 				} else {
@@ -736,8 +736,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
 				break;
 			case STF_BARRIER_FALLBACK:
 				ctx->seen |= SEEN_FUNC;
-				PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
-				EMIT(PPC_RAW_MTCTR(12));
+				PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
+				EMIT(PPC_RAW_MTCTR(_R12));
 				EMIT(PPC_RAW_BCTRL());
 				break;
 			case STF_BARRIER_NONE:
@@ -952,7 +952,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
 				return ret;
 
 			/* move return value from r3 to BPF_REG_0 */
-			EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
+			EMIT(PPC_RAW_MR(b2p[BPF_REG_0], _R3));
 			break;
 
 		/*
-- 
2.35.1


  parent reply	other threads:[~2022-02-14 11:14 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-14 10:41 [PATCH powerpc/next 00/17] powerpc/bpf: Some updates and cleanups Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 01/17] powerpc/bpf: Skip branch range validation during first pass Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 02/17] powerpc/bpf: Emit a single branch instruction for known short branch ranges Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 03/17] powerpc/bpf: Handle large branch ranges with BPF_EXIT Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 04/17] powerpc64/bpf: Do not save/restore LR on each call to bpf_stf_barrier() Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 05/17] powerpc64/bpf: Use r12 for constant blinding Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 06/17] powerpc64: Set PPC64_ELF_ABI_v[1|2] macros to 1 Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 07/17] powerpc64/bpf elfv2: Setup kernel TOC in r2 on entry Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 08/17] powerpc64/bpf elfv1: Do not load TOC before calling functions Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 09/17] powerpc64/bpf: Optimize instruction sequence used for function calls Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 10/17] powerpc/bpf: Rename PPC_BL_ABS() to PPC_BL() Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 11/17] powerpc64/bpf: Convert some of the uses of PPC_BPF_[LL|STL] to PPC_BPF_[LD|STD] Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 12/17] powerpc64/bpf: Get rid of PPC_BPF_[LL|STL|STLU] macros Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 13/17] powerpc/bpf: Cleanup bpf_jit.h Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 14/17] powerpc/bpf: Move bpf_jit64.h into bpf_jit_comp64.c Naveen N. Rao
2022-02-14 10:41 ` Naveen N. Rao [this message]
2022-02-14 10:41 ` [PATCH powerpc/next 16/17] powerpc64/bpf: Store temp registers' bpf to ppc mapping Naveen N. Rao
2022-02-14 10:41 ` [PATCH powerpc/next 17/17] powerpc/bpf: Simplify bpf_to_ppc() and adopt it for powerpc64 Naveen N. Rao
2022-03-08 12:08 ` [PATCH powerpc/next 00/17] powerpc/bpf: Some updates and cleanups Michael Ellerman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=7df626b8cdc6141d4295ac16137c82ad570b6637.1644834730.git.naveen.n.rao@linux.vnet.ibm.com \
    --to=naveen.n.rao@linux.vnet.ibm.com \
    --cc=alexei.starovoitov@gmail.com \
    --cc=bpf@vger.kernel.org \
    --cc=christophe.leroy@csgroup.eu \
    --cc=daniel@iogearbox.net \
    --cc=hbathini@linux.ibm.com \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mpe@ellerman.id.au \
    --cc=yauheni.kaliuta@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).