linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH RFCv3 00/14] arm64: eBPF JIT compiler
@ 2014-07-15  6:24 Zi Shen Lim
  2014-07-15  6:24 ` [PATCH RFCv3 01/14] arm64: introduce aarch64_insn_gen_comp_branch_imm() Zi Shen Lim
                   ` (14 more replies)
  0 siblings, 15 replies; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:24 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, Alexei Starovoitov,
	Chema Gonzalez
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

This series implements eBPF JIT compiler for arm64.
See [14/14] for change log.

Patches [1-13/14] implement code generation functions.
Patch [14/14] implements the actual eBPF JIT compiler.

Zi Shen Lim (14):
  arm64: introduce aarch64_insn_gen_comp_branch_imm()
  arm64: introduce aarch64_insn_gen_branch_reg()
  arm64: introduce aarch64_insn_gen_cond_branch_imm()
  arm64: introduce aarch64_insn_gen_load_store_reg()
  arm64: introduce aarch64_insn_gen_load_store_pair()
  arm64: introduce aarch64_insn_gen_add_sub_imm()
  arm64: introduce aarch64_insn_gen_bitfield()
  arm64: introduce aarch64_insn_gen_movewide()
  arm64: introduce aarch64_insn_gen_add_sub_shifted_reg()
  arm64: introduce aarch64_insn_gen_data1()
  arm64: introduce aarch64_insn_gen_data2()
  arm64: introduce aarch64_insn_gen_data3()
  arm64: introduce aarch64_insn_gen_logical_shifted_reg()
  arm64: eBPF JIT compiler

 Documentation/networking/filter.txt |   2 +-
 arch/arm64/Kconfig                  |   1 +
 arch/arm64/Makefile                 |   1 +
 arch/arm64/include/asm/insn.h       | 249 +++++++++++++
 arch/arm64/kernel/insn.c            | 644 +++++++++++++++++++++++++++++++++-
 arch/arm64/net/Makefile             |   4 +
 arch/arm64/net/bpf_jit.h            | 169 +++++++++
 arch/arm64/net/bpf_jit_comp.c       | 677 ++++++++++++++++++++++++++++++++++++
 8 files changed, 1739 insertions(+), 8 deletions(-)
 create mode 100644 arch/arm64/net/Makefile
 create mode 100644 arch/arm64/net/bpf_jit.h
 create mode 100644 arch/arm64/net/bpf_jit_comp.c

-- 
1.9.1


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH RFCv3 01/14] arm64: introduce aarch64_insn_gen_comp_branch_imm()
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
@ 2014-07-15  6:24 ` Zi Shen Lim
  2014-07-16 16:04   ` Will Deacon
  2014-07-15  6:25 ` [PATCH RFCv3 02/14] arm64: introduce aarch64_insn_gen_branch_reg() Zi Shen Lim
                   ` (13 subsequent siblings)
  14 siblings, 1 reply; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:24 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, Alexei Starovoitov
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

Introduce function to generate compare & branch (immediate)
instructions.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
---
 arch/arm64/include/asm/insn.h | 57 ++++++++++++++++++++++++++++
 arch/arm64/kernel/insn.c      | 86 ++++++++++++++++++++++++++++++++++++++++---
 2 files changed, 138 insertions(+), 5 deletions(-)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index dc1f73b..a98c495 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -2,6 +2,8 @@
  * Copyright (C) 2013 Huawei Ltd.
  * Author: Jiang Liu <liuj97@gmail.com>
  *
+ * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -67,9 +69,58 @@ enum aarch64_insn_imm_type {
 	AARCH64_INSN_IMM_MAX
 };
 
+enum aarch64_insn_register_type {
+	AARCH64_INSN_REGTYPE_RT,
+};
+
+enum aarch64_insn_register {
+	AARCH64_INSN_REG_0  = 0,
+	AARCH64_INSN_REG_1  = 1,
+	AARCH64_INSN_REG_2  = 2,
+	AARCH64_INSN_REG_3  = 3,
+	AARCH64_INSN_REG_4  = 4,
+	AARCH64_INSN_REG_5  = 5,
+	AARCH64_INSN_REG_6  = 6,
+	AARCH64_INSN_REG_7  = 7,
+	AARCH64_INSN_REG_8  = 8,
+	AARCH64_INSN_REG_9  = 9,
+	AARCH64_INSN_REG_10 = 10,
+	AARCH64_INSN_REG_11 = 11,
+	AARCH64_INSN_REG_12 = 12,
+	AARCH64_INSN_REG_13 = 13,
+	AARCH64_INSN_REG_14 = 14,
+	AARCH64_INSN_REG_15 = 15,
+	AARCH64_INSN_REG_16 = 16,
+	AARCH64_INSN_REG_17 = 17,
+	AARCH64_INSN_REG_18 = 18,
+	AARCH64_INSN_REG_19 = 19,
+	AARCH64_INSN_REG_20 = 20,
+	AARCH64_INSN_REG_21 = 21,
+	AARCH64_INSN_REG_22 = 22,
+	AARCH64_INSN_REG_23 = 23,
+	AARCH64_INSN_REG_24 = 24,
+	AARCH64_INSN_REG_25 = 25,
+	AARCH64_INSN_REG_26 = 26,
+	AARCH64_INSN_REG_27 = 27,
+	AARCH64_INSN_REG_28 = 28,
+	AARCH64_INSN_REG_29 = 29,
+	AARCH64_INSN_REG_FP = 29, /* Frame pointer */
+	AARCH64_INSN_REG_30 = 30,
+	AARCH64_INSN_REG_LR = 30, /* Link register */
+	AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */
+	AARCH64_INSN_REG_SP = 31  /* Stack pointer: as load/store base reg */
+};
+
+enum aarch64_insn_variant {
+	AARCH64_INSN_VARIANT_32BIT,
+	AARCH64_INSN_VARIANT_64BIT
+};
+
 enum aarch64_insn_branch_type {
 	AARCH64_INSN_BRANCH_NOLINK,
 	AARCH64_INSN_BRANCH_LINK,
+	AARCH64_INSN_BRANCH_COMP_ZERO,
+	AARCH64_INSN_BRANCH_COMP_NONZERO,
 };
 
 #define	__AARCH64_INSN_FUNCS(abbr, mask, val)	\
@@ -80,6 +131,8 @@ static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
 
 __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
+__AARCH64_INSN_FUNCS(cbz,	0xFE000000, 0x34000000)
+__AARCH64_INSN_FUNCS(cbnz,	0xFE000000, 0x35000000)
 __AARCH64_INSN_FUNCS(svc,	0xFFE0001F, 0xD4000001)
 __AARCH64_INSN_FUNCS(hvc,	0xFFE0001F, 0xD4000002)
 __AARCH64_INSN_FUNCS(smc,	0xFFE0001F, 0xD4000003)
@@ -97,6 +150,10 @@ u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
 				  u32 insn, u64 imm);
 u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
 				enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
+				     enum aarch64_insn_register reg,
+				     enum aarch64_insn_variant variant,
+				     enum aarch64_insn_branch_type type);
 u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
 u32 aarch64_insn_gen_nop(void);
 
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 92f3683..d01bb4e 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -2,6 +2,8 @@
  * Copyright (C) 2013 Huawei Ltd.
  * Author: Jiang Liu <liuj97@gmail.com>
  *
+ * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -264,10 +266,36 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
 	return insn;
 }
 
-u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
-					  enum aarch64_insn_branch_type type)
+static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
+					u32 insn,
+					enum aarch64_insn_register reg)
+{
+	int shift;
+
+	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
+		pr_err("%s: unknown register encoding %d\n", __func__, reg);
+		return 0;
+	}
+
+	switch (type) {
+	case AARCH64_INSN_REGTYPE_RT:
+		shift = 0;
+		break;
+	default:
+		pr_err("%s: unknown register type encoding %d\n", __func__,
+		       type);
+		return 0;
+	}
+
+	insn &= ~(GENMASK(4, 0) << shift);
+	insn |= reg << shift;
+
+	return insn;
+}
+
+static inline long branch_imm_common(unsigned long pc, unsigned long addr,
+				     long range)
 {
-	u32 insn;
 	long offset;
 
 	/*
@@ -276,13 +304,24 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
 	 */
 	BUG_ON((pc & 0x3) || (addr & 0x3));
 
+	offset = ((long)addr - (long)pc);
+	BUG_ON(offset < -range || offset >= range);
+
+	return offset;
+}
+
+u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+					  enum aarch64_insn_branch_type type)
+{
+	u32 insn;
+	long offset;
+
 	/*
 	 * B/BL support [-128M, 128M) offset
 	 * ARM64 virtual address arrangement guarantees all kernel and module
 	 * texts are within +/-128M.
 	 */
-	offset = ((long)addr - (long)pc);
-	BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
+	offset = branch_imm_common(pc, addr, SZ_128M);
 
 	if (type == AARCH64_INSN_BRANCH_LINK)
 		insn = aarch64_insn_get_bl_value();
@@ -293,6 +332,43 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
 					     offset >> 2);
 }
 
+u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
+				     enum aarch64_insn_register reg,
+				     enum aarch64_insn_variant variant,
+				     enum aarch64_insn_branch_type type)
+{
+	u32 insn;
+	long offset;
+
+	offset = branch_imm_common(pc, addr, SZ_1M);
+
+	switch (type) {
+	case AARCH64_INSN_BRANCH_COMP_ZERO:
+		insn = aarch64_insn_get_cbz_value();
+		break;
+	case AARCH64_INSN_BRANCH_COMP_NONZERO:
+		insn = aarch64_insn_get_cbnz_value();
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	switch (variant) {
+	case AARCH64_INSN_VARIANT_32BIT:
+		break;
+	case AARCH64_INSN_VARIANT_64BIT:
+		insn |= BIT(31);
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
+
+	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
+					     offset >> 2);
+}
+
 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
 {
 	return aarch64_insn_get_hint_value() | op;
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RFCv3 02/14] arm64: introduce aarch64_insn_gen_branch_reg()
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
  2014-07-15  6:24 ` [PATCH RFCv3 01/14] arm64: introduce aarch64_insn_gen_comp_branch_imm() Zi Shen Lim
@ 2014-07-15  6:25 ` Zi Shen Lim
  2014-07-15  6:25 ` [PATCH RFCv3 03/14] arm64: introduce aarch64_insn_gen_cond_branch_imm() Zi Shen Lim
                   ` (12 subsequent siblings)
  14 siblings, 0 replies; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, Alexei Starovoitov
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

Introduce function to generate unconditional branch (register)
instructions.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
---
 arch/arm64/include/asm/insn.h |  7 +++++++
 arch/arm64/kernel/insn.c      | 35 +++++++++++++++++++++++++++++++++--
 2 files changed, 40 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index a98c495..5080962 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -71,6 +71,7 @@ enum aarch64_insn_imm_type {
 
 enum aarch64_insn_register_type {
 	AARCH64_INSN_REGTYPE_RT,
+	AARCH64_INSN_REGTYPE_RN,
 };
 
 enum aarch64_insn_register {
@@ -119,6 +120,7 @@ enum aarch64_insn_variant {
 enum aarch64_insn_branch_type {
 	AARCH64_INSN_BRANCH_NOLINK,
 	AARCH64_INSN_BRANCH_LINK,
+	AARCH64_INSN_BRANCH_RETURN,
 	AARCH64_INSN_BRANCH_COMP_ZERO,
 	AARCH64_INSN_BRANCH_COMP_NONZERO,
 };
@@ -138,6 +140,9 @@ __AARCH64_INSN_FUNCS(hvc,	0xFFE0001F, 0xD4000002)
 __AARCH64_INSN_FUNCS(smc,	0xFFE0001F, 0xD4000003)
 __AARCH64_INSN_FUNCS(brk,	0xFFE0001F, 0xD4200000)
 __AARCH64_INSN_FUNCS(hint,	0xFFFFF01F, 0xD503201F)
+__AARCH64_INSN_FUNCS(br,	0xFFFFFC1F, 0xD61F0000)
+__AARCH64_INSN_FUNCS(blr,	0xFFFFFC1F, 0xD63F0000)
+__AARCH64_INSN_FUNCS(ret,	0xFFFFFC1F, 0xD65F0000)
 
 #undef	__AARCH64_INSN_FUNCS
 
@@ -156,6 +161,8 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
 				     enum aarch64_insn_branch_type type);
 u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
 u32 aarch64_insn_gen_nop(void);
+u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
+				enum aarch64_insn_branch_type type);
 
 bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
 
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index d01bb4e..50193d5 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -281,6 +281,9 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
 	case AARCH64_INSN_REGTYPE_RT:
 		shift = 0;
 		break;
+	case AARCH64_INSN_REGTYPE_RN:
+		shift = 5;
+		break;
 	default:
 		pr_err("%s: unknown register type encoding %d\n", __func__,
 		       type);
@@ -323,10 +326,16 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
 	 */
 	offset = branch_imm_common(pc, addr, SZ_128M);
 
-	if (type == AARCH64_INSN_BRANCH_LINK)
+	switch (type) {
+	case AARCH64_INSN_BRANCH_LINK:
 		insn = aarch64_insn_get_bl_value();
-	else
+		break;
+	case AARCH64_INSN_BRANCH_NOLINK:
 		insn = aarch64_insn_get_b_value();
+		break;
+	default:
+		BUG_ON(1);
+	}
 
 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
 					     offset >> 2);
@@ -378,3 +387,25 @@ u32 __kprobes aarch64_insn_gen_nop(void)
 {
 	return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
 }
+
+u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
+				enum aarch64_insn_branch_type type)
+{
+	u32 insn;
+
+	switch (type) {
+	case AARCH64_INSN_BRANCH_NOLINK:
+		insn = aarch64_insn_get_br_value();
+		break;
+	case AARCH64_INSN_BRANCH_LINK:
+		insn = aarch64_insn_get_blr_value();
+		break;
+	case AARCH64_INSN_BRANCH_RETURN:
+		insn = aarch64_insn_get_ret_value();
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
+}
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RFCv3 03/14] arm64: introduce aarch64_insn_gen_cond_branch_imm()
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
  2014-07-15  6:24 ` [PATCH RFCv3 01/14] arm64: introduce aarch64_insn_gen_comp_branch_imm() Zi Shen Lim
  2014-07-15  6:25 ` [PATCH RFCv3 02/14] arm64: introduce aarch64_insn_gen_branch_reg() Zi Shen Lim
@ 2014-07-15  6:25 ` Zi Shen Lim
  2014-07-15  6:25 ` [PATCH RFCv3 04/14] arm64: introduce aarch64_insn_gen_load_store_reg() Zi Shen Lim
                   ` (11 subsequent siblings)
  14 siblings, 0 replies; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, Alexei Starovoitov
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

Introduce function to generate conditional branch (immediate)
instructions.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
---
 arch/arm64/include/asm/insn.h | 21 +++++++++++++++++++++
 arch/arm64/kernel/insn.c      | 17 +++++++++++++++++
 2 files changed, 38 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 5080962..86a8a9c 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -117,6 +117,24 @@ enum aarch64_insn_variant {
 	AARCH64_INSN_VARIANT_64BIT
 };
 
+enum aarch64_insn_condition {
+	AARCH64_INSN_COND_EQ = 0x0, /* == */
+	AARCH64_INSN_COND_NE = 0x1, /* != */
+	AARCH64_INSN_COND_CS = 0x2, /* unsigned >= */
+	AARCH64_INSN_COND_CC = 0x3, /* unsigned < */
+	AARCH64_INSN_COND_MI = 0x4, /* < 0 */
+	AARCH64_INSN_COND_PL = 0x5, /* >= 0 */
+	AARCH64_INSN_COND_VS = 0x6, /* overflow */
+	AARCH64_INSN_COND_VC = 0x7, /* no overflow */
+	AARCH64_INSN_COND_HI = 0x8, /* unsigned > */
+	AARCH64_INSN_COND_LS = 0x9, /* unsigned <= */
+	AARCH64_INSN_COND_GE = 0xa, /* signed >= */
+	AARCH64_INSN_COND_LT = 0xb, /* signed < */
+	AARCH64_INSN_COND_GT = 0xc, /* signed > */
+	AARCH64_INSN_COND_LE = 0xd, /* signed <= */
+	AARCH64_INSN_COND_AL = 0xe, /* always */
+};
+
 enum aarch64_insn_branch_type {
 	AARCH64_INSN_BRANCH_NOLINK,
 	AARCH64_INSN_BRANCH_LINK,
@@ -135,6 +153,7 @@ __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
 __AARCH64_INSN_FUNCS(cbz,	0xFE000000, 0x34000000)
 __AARCH64_INSN_FUNCS(cbnz,	0xFE000000, 0x35000000)
+__AARCH64_INSN_FUNCS(bcond,	0xFF000010, 0x54000000)
 __AARCH64_INSN_FUNCS(svc,	0xFFE0001F, 0xD4000001)
 __AARCH64_INSN_FUNCS(hvc,	0xFFE0001F, 0xD4000002)
 __AARCH64_INSN_FUNCS(smc,	0xFFE0001F, 0xD4000003)
@@ -159,6 +178,8 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
 				     enum aarch64_insn_register reg,
 				     enum aarch64_insn_variant variant,
 				     enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
+				     enum aarch64_insn_condition cond);
 u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
 u32 aarch64_insn_gen_nop(void);
 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 50193d5..f2cf8ab 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -378,6 +378,23 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
 					     offset >> 2);
 }
 
+u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
+				     enum aarch64_insn_condition cond)
+{
+	u32 insn;
+	long offset;
+
+	offset = branch_imm_common(pc, addr, SZ_1M);
+
+	insn = aarch64_insn_get_bcond_value();
+
+	BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
+	insn |= cond;
+
+	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
+					     offset >> 2);
+}
+
 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
 {
 	return aarch64_insn_get_hint_value() | op;
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RFCv3 04/14] arm64: introduce aarch64_insn_gen_load_store_reg()
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
                   ` (2 preceding siblings ...)
  2014-07-15  6:25 ` [PATCH RFCv3 03/14] arm64: introduce aarch64_insn_gen_cond_branch_imm() Zi Shen Lim
@ 2014-07-15  6:25 ` Zi Shen Lim
  2014-07-15  6:25 ` [PATCH RFCv3 05/14] arm64: introduce aarch64_insn_gen_load_store_pair() Zi Shen Lim
                   ` (10 subsequent siblings)
  14 siblings, 0 replies; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, Alexei Starovoitov
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

Introduce function to generate load/store (register offset)
instructions.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
---
 arch/arm64/include/asm/insn.h | 20 ++++++++++++++
 arch/arm64/kernel/insn.c      | 62 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 82 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 86a8a9c..5bc1cc3 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -72,6 +72,7 @@ enum aarch64_insn_imm_type {
 enum aarch64_insn_register_type {
 	AARCH64_INSN_REGTYPE_RT,
 	AARCH64_INSN_REGTYPE_RN,
+	AARCH64_INSN_REGTYPE_RM,
 };
 
 enum aarch64_insn_register {
@@ -143,12 +144,26 @@ enum aarch64_insn_branch_type {
 	AARCH64_INSN_BRANCH_COMP_NONZERO,
 };
 
+enum aarch64_insn_size_type {
+	AARCH64_INSN_SIZE_8,
+	AARCH64_INSN_SIZE_16,
+	AARCH64_INSN_SIZE_32,
+	AARCH64_INSN_SIZE_64,
+};
+
+enum aarch64_insn_ldst_type {
+	AARCH64_INSN_LDST_LOAD_REG_OFFSET,
+	AARCH64_INSN_LDST_STORE_REG_OFFSET,
+};
+
 #define	__AARCH64_INSN_FUNCS(abbr, mask, val)	\
 static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
 { return (code & (mask)) == (val); } \
 static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
 { return (val); }
 
+__AARCH64_INSN_FUNCS(str_reg,	0x3FE0EC00, 0x38206800)
+__AARCH64_INSN_FUNCS(ldr_reg,	0x3FE0EC00, 0x38606800)
 __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
 __AARCH64_INSN_FUNCS(cbz,	0xFE000000, 0x34000000)
@@ -184,6 +199,11 @@ u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
 u32 aarch64_insn_gen_nop(void);
 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
 				enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
+				    enum aarch64_insn_register base,
+				    enum aarch64_insn_register offset,
+				    enum aarch64_insn_size_type size,
+				    enum aarch64_insn_ldst_type type);
 
 bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
 
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index f2cf8ab..447ad63 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -284,6 +284,9 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
 	case AARCH64_INSN_REGTYPE_RN:
 		shift = 5;
 		break;
+	case AARCH64_INSN_REGTYPE_RM:
+		shift = 16;
+		break;
 	default:
 		pr_err("%s: unknown register type encoding %d\n", __func__,
 		       type);
@@ -296,6 +299,35 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
 	return insn;
 }
 
+static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
+					 u32 insn)
+{
+	u32 size;
+
+	switch (type) {
+	case AARCH64_INSN_SIZE_8:
+		size = 0;
+		break;
+	case AARCH64_INSN_SIZE_16:
+		size = 1;
+		break;
+	case AARCH64_INSN_SIZE_32:
+		size = 2;
+		break;
+	case AARCH64_INSN_SIZE_64:
+		size = 3;
+		break;
+	default:
+		pr_err("%s: unknown size encoding %d\n", __func__, type);
+		return 0;
+	}
+
+	insn &= ~GENMASK(31, 30);
+	insn |= size << 30;
+
+	return insn;
+}
+
 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
 				     long range)
 {
@@ -426,3 +458,33 @@ u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
 
 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
 }
+
+u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
+				    enum aarch64_insn_register base,
+				    enum aarch64_insn_register offset,
+				    enum aarch64_insn_size_type size,
+				    enum aarch64_insn_ldst_type type)
+{
+	u32 insn;
+
+	switch (type) {
+	case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
+		insn = aarch64_insn_get_ldr_reg_value();
+		break;
+	case AARCH64_INSN_LDST_STORE_REG_OFFSET:
+		insn = aarch64_insn_get_str_reg_value();
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	insn = aarch64_insn_encode_ldst_size(size, insn);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+					    base);
+
+	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
+					    offset);
+}
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RFCv3 05/14] arm64: introduce aarch64_insn_gen_load_store_pair()
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
                   ` (3 preceding siblings ...)
  2014-07-15  6:25 ` [PATCH RFCv3 04/14] arm64: introduce aarch64_insn_gen_load_store_reg() Zi Shen Lim
@ 2014-07-15  6:25 ` Zi Shen Lim
  2014-07-15  6:25 ` [PATCH RFCv3 06/14] arm64: introduce aarch64_insn_gen_add_sub_imm() Zi Shen Lim
                   ` (9 subsequent siblings)
  14 siblings, 0 replies; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, Alexei Starovoitov
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

Introduce function to generate load/store pair instructions.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
---
 arch/arm64/include/asm/insn.h | 16 +++++++++++
 arch/arm64/kernel/insn.c      | 65 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 81 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 5bc1cc3..eef8f1e 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -66,12 +66,14 @@ enum aarch64_insn_imm_type {
 	AARCH64_INSN_IMM_14,
 	AARCH64_INSN_IMM_12,
 	AARCH64_INSN_IMM_9,
+	AARCH64_INSN_IMM_7,
 	AARCH64_INSN_IMM_MAX
 };
 
 enum aarch64_insn_register_type {
 	AARCH64_INSN_REGTYPE_RT,
 	AARCH64_INSN_REGTYPE_RN,
+	AARCH64_INSN_REGTYPE_RT2,
 	AARCH64_INSN_REGTYPE_RM,
 };
 
@@ -154,6 +156,10 @@ enum aarch64_insn_size_type {
 enum aarch64_insn_ldst_type {
 	AARCH64_INSN_LDST_LOAD_REG_OFFSET,
 	AARCH64_INSN_LDST_STORE_REG_OFFSET,
+	AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX,
+	AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX,
+	AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
+	AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX,
 };
 
 #define	__AARCH64_INSN_FUNCS(abbr, mask, val)	\
@@ -164,6 +170,10 @@ static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
 
 __AARCH64_INSN_FUNCS(str_reg,	0x3FE0EC00, 0x38206800)
 __AARCH64_INSN_FUNCS(ldr_reg,	0x3FE0EC00, 0x38606800)
+__AARCH64_INSN_FUNCS(stp_post,	0x7FC00000, 0x28800000)
+__AARCH64_INSN_FUNCS(ldp_post,	0x7FC00000, 0x28C00000)
+__AARCH64_INSN_FUNCS(stp_pre,	0x7FC00000, 0x29800000)
+__AARCH64_INSN_FUNCS(ldp_pre,	0x7FC00000, 0x29C00000)
 __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
 __AARCH64_INSN_FUNCS(cbz,	0xFE000000, 0x34000000)
@@ -204,6 +214,12 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
 				    enum aarch64_insn_register offset,
 				    enum aarch64_insn_size_type size,
 				    enum aarch64_insn_ldst_type type);
+u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
+				     enum aarch64_insn_register reg2,
+				     enum aarch64_insn_register base,
+				     int offset,
+				     enum aarch64_insn_variant variant,
+				     enum aarch64_insn_ldst_type type);
 
 bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
 
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 447ad63..e01e789 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -253,6 +253,10 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
 		mask = BIT(9) - 1;
 		shift = 12;
 		break;
+	case AARCH64_INSN_IMM_7:
+		mask = BIT(7) - 1;
+		shift = 15;
+		break;
 	default:
 		pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
 			type);
@@ -284,6 +288,9 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
 	case AARCH64_INSN_REGTYPE_RN:
 		shift = 5;
 		break;
+	case AARCH64_INSN_REGTYPE_RT2:
+		shift = 10;
+		break;
 	case AARCH64_INSN_REGTYPE_RM:
 		shift = 16;
 		break;
@@ -488,3 +495,61 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
 					    offset);
 }
+
+u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
+				     enum aarch64_insn_register reg2,
+				     enum aarch64_insn_register base,
+				     int offset,
+				     enum aarch64_insn_variant variant,
+				     enum aarch64_insn_ldst_type type)
+{
+	u32 insn;
+	int shift;
+
+	switch (type) {
+	case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
+		insn = aarch64_insn_get_ldp_pre_value();
+		break;
+	case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
+		insn = aarch64_insn_get_stp_pre_value();
+		break;
+	case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
+		insn = aarch64_insn_get_ldp_post_value();
+		break;
+	case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
+		insn = aarch64_insn_get_stp_post_value();
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	switch (variant) {
+	case AARCH64_INSN_VARIANT_32BIT:
+		/* offset must be multiples of 4 in the range [-256, 252] */
+		BUG_ON(offset & 0x3);
+		BUG_ON(offset < -256 || offset > 252);
+		shift = 2;
+		break;
+	case AARCH64_INSN_VARIANT_64BIT:
+		/* offset must be multiples of 8 in the range [-512, 504] */
+		BUG_ON(offset & 0x7);
+		BUG_ON(offset < -512 || offset > 504);
+		shift = 3;
+		insn |= BIT(31);
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
+					    reg1);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
+					    reg2);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+					    base);
+
+	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
+					     offset >> shift);
+}
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RFCv3 06/14] arm64: introduce aarch64_insn_gen_add_sub_imm()
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
                   ` (4 preceding siblings ...)
  2014-07-15  6:25 ` [PATCH RFCv3 05/14] arm64: introduce aarch64_insn_gen_load_store_pair() Zi Shen Lim
@ 2014-07-15  6:25 ` Zi Shen Lim
  2014-07-15  6:25 ` [PATCH RFCv3 07/14] arm64: introduce aarch64_insn_gen_bitfield() Zi Shen Lim
                   ` (8 subsequent siblings)
  14 siblings, 0 replies; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, Alexei Starovoitov
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

Introduce function to generate add/subtract (immediate) instructions.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
---
 arch/arm64/include/asm/insn.h | 16 ++++++++++++++++
 arch/arm64/kernel/insn.c      | 44 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 60 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index eef8f1e..29386aa 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -75,6 +75,7 @@ enum aarch64_insn_register_type {
 	AARCH64_INSN_REGTYPE_RN,
 	AARCH64_INSN_REGTYPE_RT2,
 	AARCH64_INSN_REGTYPE_RM,
+	AARCH64_INSN_REGTYPE_RD,
 };
 
 enum aarch64_insn_register {
@@ -162,6 +163,13 @@ enum aarch64_insn_ldst_type {
 	AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX,
 };
 
+enum aarch64_insn_adsb_type {
+	AARCH64_INSN_ADSB_ADD,
+	AARCH64_INSN_ADSB_SUB,
+	AARCH64_INSN_ADSB_ADD_SETFLAGS,
+	AARCH64_INSN_ADSB_SUB_SETFLAGS
+};
+
 #define	__AARCH64_INSN_FUNCS(abbr, mask, val)	\
 static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
 { return (code & (mask)) == (val); } \
@@ -174,6 +182,10 @@ __AARCH64_INSN_FUNCS(stp_post,	0x7FC00000, 0x28800000)
 __AARCH64_INSN_FUNCS(ldp_post,	0x7FC00000, 0x28C00000)
 __AARCH64_INSN_FUNCS(stp_pre,	0x7FC00000, 0x29800000)
 __AARCH64_INSN_FUNCS(ldp_pre,	0x7FC00000, 0x29C00000)
+__AARCH64_INSN_FUNCS(add_imm,	0x7F000000, 0x11000000)
+__AARCH64_INSN_FUNCS(adds_imm,	0x7F000000, 0x31000000)
+__AARCH64_INSN_FUNCS(sub_imm,	0x7F000000, 0x51000000)
+__AARCH64_INSN_FUNCS(subs_imm,	0x7F000000, 0x71000000)
 __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
 __AARCH64_INSN_FUNCS(cbz,	0xFE000000, 0x34000000)
@@ -220,6 +232,10 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
 				     int offset,
 				     enum aarch64_insn_variant variant,
 				     enum aarch64_insn_ldst_type type);
+u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
+				 enum aarch64_insn_register src,
+				 int imm, enum aarch64_insn_variant variant,
+				 enum aarch64_insn_adsb_type type);
 
 bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
 
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index e01e789..e11acb7 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -283,6 +283,7 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
 
 	switch (type) {
 	case AARCH64_INSN_REGTYPE_RT:
+	case AARCH64_INSN_REGTYPE_RD:
 		shift = 0;
 		break;
 	case AARCH64_INSN_REGTYPE_RN:
@@ -553,3 +554,46 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
 					     offset >> shift);
 }
+
+u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
+				 enum aarch64_insn_register src,
+				 int imm, enum aarch64_insn_variant variant,
+				 enum aarch64_insn_adsb_type type)
+{
+	u32 insn;
+
+	switch (type) {
+	case AARCH64_INSN_ADSB_ADD:
+		insn = aarch64_insn_get_add_imm_value();
+		break;
+	case AARCH64_INSN_ADSB_SUB:
+		insn = aarch64_insn_get_sub_imm_value();
+		break;
+	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
+		insn = aarch64_insn_get_adds_imm_value();
+		break;
+	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
+		insn = aarch64_insn_get_subs_imm_value();
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	switch (variant) {
+	case AARCH64_INSN_VARIANT_32BIT:
+		break;
+	case AARCH64_INSN_VARIANT_64BIT:
+		insn |= BIT(31);
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	BUG_ON(imm < 0 || imm > 4095);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
+}
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RFCv3 07/14] arm64: introduce aarch64_insn_gen_bitfield()
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
                   ` (5 preceding siblings ...)
  2014-07-15  6:25 ` [PATCH RFCv3 06/14] arm64: introduce aarch64_insn_gen_add_sub_imm() Zi Shen Lim
@ 2014-07-15  6:25 ` Zi Shen Lim
  2014-07-15  6:25 ` [PATCH RFCv3 08/14] arm64: introduce aarch64_insn_gen_movewide() Zi Shen Lim
                   ` (7 subsequent siblings)
  14 siblings, 0 replies; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, Alexei Starovoitov
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

Introduce function to generate bitfield instructions.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
---
 arch/arm64/include/asm/insn.h | 16 +++++++++++++
 arch/arm64/kernel/insn.c      | 56 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 72 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 29386aa..8fd31fc 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -67,6 +67,8 @@ enum aarch64_insn_imm_type {
 	AARCH64_INSN_IMM_12,
 	AARCH64_INSN_IMM_9,
 	AARCH64_INSN_IMM_7,
+	AARCH64_INSN_IMM_S,
+	AARCH64_INSN_IMM_R,
 	AARCH64_INSN_IMM_MAX
 };
 
@@ -170,6 +172,12 @@ enum aarch64_insn_adsb_type {
 	AARCH64_INSN_ADSB_SUB_SETFLAGS
 };
 
+enum aarch64_insn_bitfield_type {
+	AARCH64_INSN_BITFIELD_MOVE,
+	AARCH64_INSN_BITFIELD_MOVE_UNSIGNED,
+	AARCH64_INSN_BITFIELD_MOVE_SIGNED
+};
+
 #define	__AARCH64_INSN_FUNCS(abbr, mask, val)	\
 static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
 { return (code & (mask)) == (val); } \
@@ -186,6 +194,9 @@ __AARCH64_INSN_FUNCS(add_imm,	0x7F000000, 0x11000000)
 __AARCH64_INSN_FUNCS(adds_imm,	0x7F000000, 0x31000000)
 __AARCH64_INSN_FUNCS(sub_imm,	0x7F000000, 0x51000000)
 __AARCH64_INSN_FUNCS(subs_imm,	0x7F000000, 0x71000000)
+__AARCH64_INSN_FUNCS(sbfm,	0x7F800000, 0x13000000)
+__AARCH64_INSN_FUNCS(bfm,	0x7F800000, 0x33000000)
+__AARCH64_INSN_FUNCS(ubfm,	0x7F800000, 0x53000000)
 __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
 __AARCH64_INSN_FUNCS(cbz,	0xFE000000, 0x34000000)
@@ -236,6 +247,11 @@ u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
 				 enum aarch64_insn_register src,
 				 int imm, enum aarch64_insn_variant variant,
 				 enum aarch64_insn_adsb_type type);
+u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
+			      enum aarch64_insn_register src,
+			      int immr, int imms,
+			      enum aarch64_insn_variant variant,
+			      enum aarch64_insn_bitfield_type type);
 
 bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
 
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index e11acb7..01ed35c 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -257,6 +257,14 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
 		mask = BIT(7) - 1;
 		shift = 15;
 		break;
+	case AARCH64_INSN_IMM_S:
+		mask = BIT(6) - 1;
+		shift = 10;
+		break;
+	case AARCH64_INSN_IMM_R:
+		mask = BIT(6) - 1;
+		shift = 16;
+		break;
 	default:
 		pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
 			type);
@@ -597,3 +605,51 @@ u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
 
 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
 }
+
+u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
+			      enum aarch64_insn_register src,
+			      int immr, int imms,
+			      enum aarch64_insn_variant variant,
+			      enum aarch64_insn_bitfield_type type)
+{
+	u32 insn;
+	u32 mask;
+
+	switch (type) {
+	case AARCH64_INSN_BITFIELD_MOVE:
+		insn = aarch64_insn_get_bfm_value();
+		break;
+	case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
+		insn = aarch64_insn_get_ubfm_value();
+		break;
+	case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
+		insn = aarch64_insn_get_sbfm_value();
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	switch (variant) {
+	case AARCH64_INSN_VARIANT_32BIT:
+		mask = GENMASK(4, 0);
+		break;
+	case AARCH64_INSN_VARIANT_64BIT:
+		insn |= BIT(31);
+		insn |= BIT(22);
+		mask = GENMASK(5, 0);
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	BUG_ON(immr & ~mask);
+	BUG_ON(imms & ~mask);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
+
+	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
+}
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RFCv3 08/14] arm64: introduce aarch64_insn_gen_movewide()
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
                   ` (6 preceding siblings ...)
  2014-07-15  6:25 ` [PATCH RFCv3 07/14] arm64: introduce aarch64_insn_gen_bitfield() Zi Shen Lim
@ 2014-07-15  6:25 ` Zi Shen Lim
  2014-07-16 16:17   ` Will Deacon
  2014-07-15  6:25 ` [PATCH RFCv3 09/14] arm64: introduce aarch64_insn_gen_add_sub_shifted_reg() Zi Shen Lim
                   ` (6 subsequent siblings)
  14 siblings, 1 reply; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, Alexei Starovoitov
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

Introduce function to generate move wide (immediate) instructions.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
---
 arch/arm64/include/asm/insn.h | 13 +++++++++++++
 arch/arm64/kernel/insn.c      | 43 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 56 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 8fd31fc..49dec28 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -172,6 +172,12 @@ enum aarch64_insn_adsb_type {
 	AARCH64_INSN_ADSB_SUB_SETFLAGS
 };
 
+enum aarch64_insn_movewide_type {
+	AARCH64_INSN_MOVEWIDE_ZERO,
+	AARCH64_INSN_MOVEWIDE_KEEP,
+	AARCH64_INSN_MOVEWIDE_INVERSE
+};
+
 enum aarch64_insn_bitfield_type {
 	AARCH64_INSN_BITFIELD_MOVE,
 	AARCH64_INSN_BITFIELD_MOVE_UNSIGNED,
@@ -194,9 +200,12 @@ __AARCH64_INSN_FUNCS(add_imm,	0x7F000000, 0x11000000)
 __AARCH64_INSN_FUNCS(adds_imm,	0x7F000000, 0x31000000)
 __AARCH64_INSN_FUNCS(sub_imm,	0x7F000000, 0x51000000)
 __AARCH64_INSN_FUNCS(subs_imm,	0x7F000000, 0x71000000)
+__AARCH64_INSN_FUNCS(movn,	0x7F800000, 0x12800000)
 __AARCH64_INSN_FUNCS(sbfm,	0x7F800000, 0x13000000)
 __AARCH64_INSN_FUNCS(bfm,	0x7F800000, 0x33000000)
+__AARCH64_INSN_FUNCS(movz,	0x7F800000, 0x52800000)
 __AARCH64_INSN_FUNCS(ubfm,	0x7F800000, 0x53000000)
+__AARCH64_INSN_FUNCS(movk,	0x7F800000, 0x72800000)
 __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
 __AARCH64_INSN_FUNCS(cbz,	0xFE000000, 0x34000000)
@@ -252,6 +261,10 @@ u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
 			      int immr, int imms,
 			      enum aarch64_insn_variant variant,
 			      enum aarch64_insn_bitfield_type type);
+u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
+			      int imm, int shift,
+			      enum aarch64_insn_variant variant,
+			      enum aarch64_insn_movewide_type type);
 
 bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
 
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 01ed35c..1cb94b4 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -653,3 +653,46 @@ u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
 
 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
 }
+
+u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
+			      int imm, int shift,
+			      enum aarch64_insn_variant variant,
+			      enum aarch64_insn_movewide_type type)
+{
+	u32 insn;
+
+	switch (type) {
+	case AARCH64_INSN_MOVEWIDE_ZERO:
+		insn = aarch64_insn_get_movz_value();
+		break;
+	case AARCH64_INSN_MOVEWIDE_KEEP:
+		insn = aarch64_insn_get_movk_value();
+		break;
+	case AARCH64_INSN_MOVEWIDE_INVERSE:
+		insn = aarch64_insn_get_movn_value();
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	BUG_ON(imm < 0 || imm > 65535);
+
+	switch (variant) {
+	case AARCH64_INSN_VARIANT_32BIT:
+		BUG_ON(shift != 0 && shift != 16);
+		break;
+	case AARCH64_INSN_VARIANT_64BIT:
+		insn |= BIT(31);
+		BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
+		       shift != 48);
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	insn |= (shift >> 4) << 21;
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
+}
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RFCv3 09/14] arm64: introduce aarch64_insn_gen_add_sub_shifted_reg()
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
                   ` (7 preceding siblings ...)
  2014-07-15  6:25 ` [PATCH RFCv3 08/14] arm64: introduce aarch64_insn_gen_movewide() Zi Shen Lim
@ 2014-07-15  6:25 ` Zi Shen Lim
  2014-07-15  6:25 ` [PATCH RFCv3 10/14] arm64: introduce aarch64_insn_gen_data1() Zi Shen Lim
                   ` (5 subsequent siblings)
  14 siblings, 0 replies; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, Alexei Starovoitov
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

Introduce function to generate add/subtract (shifted register)
instructions.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
---
 arch/arm64/include/asm/insn.h | 11 ++++++++++
 arch/arm64/kernel/insn.c      | 49 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 60 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 49dec28..c0a765d 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -67,6 +67,7 @@ enum aarch64_insn_imm_type {
 	AARCH64_INSN_IMM_12,
 	AARCH64_INSN_IMM_9,
 	AARCH64_INSN_IMM_7,
+	AARCH64_INSN_IMM_6,
 	AARCH64_INSN_IMM_S,
 	AARCH64_INSN_IMM_R,
 	AARCH64_INSN_IMM_MAX
@@ -206,6 +207,10 @@ __AARCH64_INSN_FUNCS(bfm,	0x7F800000, 0x33000000)
 __AARCH64_INSN_FUNCS(movz,	0x7F800000, 0x52800000)
 __AARCH64_INSN_FUNCS(ubfm,	0x7F800000, 0x53000000)
 __AARCH64_INSN_FUNCS(movk,	0x7F800000, 0x72800000)
+__AARCH64_INSN_FUNCS(add,	0x7F200000, 0x0B000000)
+__AARCH64_INSN_FUNCS(adds,	0x7F200000, 0x2B000000)
+__AARCH64_INSN_FUNCS(sub,	0x7F200000, 0x4B000000)
+__AARCH64_INSN_FUNCS(subs,	0x7F200000, 0x6B000000)
 __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
 __AARCH64_INSN_FUNCS(cbz,	0xFE000000, 0x34000000)
@@ -265,6 +270,12 @@ u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
 			      int imm, int shift,
 			      enum aarch64_insn_variant variant,
 			      enum aarch64_insn_movewide_type type);
+u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
+					 enum aarch64_insn_register src,
+					 enum aarch64_insn_register reg,
+					 int shift,
+					 enum aarch64_insn_variant variant,
+					 enum aarch64_insn_adsb_type type);
 
 bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
 
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 1cb94b4..e24cb13 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -257,6 +257,7 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
 		mask = BIT(7) - 1;
 		shift = 15;
 		break;
+	case AARCH64_INSN_IMM_6:
 	case AARCH64_INSN_IMM_S:
 		mask = BIT(6) - 1;
 		shift = 10;
@@ -696,3 +697,51 @@ u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
 
 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
 }
+
+u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
+					 enum aarch64_insn_register src,
+					 enum aarch64_insn_register reg,
+					 int shift,
+					 enum aarch64_insn_variant variant,
+					 enum aarch64_insn_adsb_type type)
+{
+	u32 insn;
+
+	switch (type) {
+	case AARCH64_INSN_ADSB_ADD:
+		insn = aarch64_insn_get_add_value();
+		break;
+	case AARCH64_INSN_ADSB_SUB:
+		insn = aarch64_insn_get_sub_value();
+		break;
+	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
+		insn = aarch64_insn_get_adds_value();
+		break;
+	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
+		insn = aarch64_insn_get_subs_value();
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	switch (variant) {
+	case AARCH64_INSN_VARIANT_32BIT:
+		BUG_ON(shift < 0 || shift > 31);
+		break;
+	case AARCH64_INSN_VARIANT_64BIT:
+		insn |= BIT(31);
+		BUG_ON(shift < 0 || shift > 63);
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
+
+	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
+}
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RFCv3 10/14] arm64: introduce aarch64_insn_gen_data1()
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
                   ` (8 preceding siblings ...)
  2014-07-15  6:25 ` [PATCH RFCv3 09/14] arm64: introduce aarch64_insn_gen_add_sub_shifted_reg() Zi Shen Lim
@ 2014-07-15  6:25 ` Zi Shen Lim
  2014-07-15  6:25 ` [PATCH RFCv3 11/14] arm64: introduce aarch64_insn_gen_data2() Zi Shen Lim
                   ` (4 subsequent siblings)
  14 siblings, 0 replies; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, Alexei Starovoitov
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

Introduce function to generate data-processing (1 source) instructions.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
---
 arch/arm64/include/asm/insn.h | 13 +++++++++++++
 arch/arm64/kernel/insn.c      | 37 +++++++++++++++++++++++++++++++++++++
 2 files changed, 50 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index c0a765d..246d214 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -185,6 +185,12 @@ enum aarch64_insn_bitfield_type {
 	AARCH64_INSN_BITFIELD_MOVE_SIGNED
 };
 
+enum aarch64_insn_data1_type {
+	AARCH64_INSN_DATA1_REVERSE_16,
+	AARCH64_INSN_DATA1_REVERSE_32,
+	AARCH64_INSN_DATA1_REVERSE_64,
+};
+
 #define	__AARCH64_INSN_FUNCS(abbr, mask, val)	\
 static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
 { return (code & (mask)) == (val); } \
@@ -211,6 +217,9 @@ __AARCH64_INSN_FUNCS(add,	0x7F200000, 0x0B000000)
 __AARCH64_INSN_FUNCS(adds,	0x7F200000, 0x2B000000)
 __AARCH64_INSN_FUNCS(sub,	0x7F200000, 0x4B000000)
 __AARCH64_INSN_FUNCS(subs,	0x7F200000, 0x6B000000)
+__AARCH64_INSN_FUNCS(rev16,	0x7FFFFC00, 0x5AC00400)
+__AARCH64_INSN_FUNCS(rev32,	0x7FFFFC00, 0x5AC00800)
+__AARCH64_INSN_FUNCS(rev64,	0x7FFFFC00, 0x5AC00C00)
 __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
 __AARCH64_INSN_FUNCS(cbz,	0xFE000000, 0x34000000)
@@ -276,6 +285,10 @@ u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
 					 int shift,
 					 enum aarch64_insn_variant variant,
 					 enum aarch64_insn_adsb_type type);
+u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
+			   enum aarch64_insn_register src,
+			   enum aarch64_insn_variant variant,
+			   enum aarch64_insn_data1_type type);
 
 bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
 
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index e24cb13..49a9528 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -745,3 +745,40 @@ u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
 
 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
 }
+
+u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
+			   enum aarch64_insn_register src,
+			   enum aarch64_insn_variant variant,
+			   enum aarch64_insn_data1_type type)
+{
+	u32 insn;
+
+	switch (type) {
+	case AARCH64_INSN_DATA1_REVERSE_16:
+		insn = aarch64_insn_get_rev16_value();
+		break;
+	case AARCH64_INSN_DATA1_REVERSE_32:
+		insn = aarch64_insn_get_rev32_value();
+		break;
+	case AARCH64_INSN_DATA1_REVERSE_64:
+		BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
+		insn = aarch64_insn_get_rev64_value();
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	switch (variant) {
+	case AARCH64_INSN_VARIANT_32BIT:
+		break;
+	case AARCH64_INSN_VARIANT_64BIT:
+		insn |= BIT(31);
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+}
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RFCv3 11/14] arm64: introduce aarch64_insn_gen_data2()
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
                   ` (9 preceding siblings ...)
  2014-07-15  6:25 ` [PATCH RFCv3 10/14] arm64: introduce aarch64_insn_gen_data1() Zi Shen Lim
@ 2014-07-15  6:25 ` Zi Shen Lim
  2014-07-15  6:25 ` [PATCH RFCv3 12/14] arm64: introduce aarch64_insn_gen_data3() Zi Shen Lim
                   ` (3 subsequent siblings)
  14 siblings, 0 replies; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, Alexei Starovoitov
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

Introduce function to generate data-processing (2 source) instructions.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
---
 arch/arm64/include/asm/insn.h | 20 ++++++++++++++++++
 arch/arm64/kernel/insn.c      | 48 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 68 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 246d214..367245f 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -191,6 +191,15 @@ enum aarch64_insn_data1_type {
 	AARCH64_INSN_DATA1_REVERSE_64,
 };
 
+enum aarch64_insn_data2_type {
+	AARCH64_INSN_DATA2_UDIV,
+	AARCH64_INSN_DATA2_SDIV,
+	AARCH64_INSN_DATA2_LSLV,
+	AARCH64_INSN_DATA2_LSRV,
+	AARCH64_INSN_DATA2_ASRV,
+	AARCH64_INSN_DATA2_RORV,
+};
+
 #define	__AARCH64_INSN_FUNCS(abbr, mask, val)	\
 static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
 { return (code & (mask)) == (val); } \
@@ -217,6 +226,12 @@ __AARCH64_INSN_FUNCS(add,	0x7F200000, 0x0B000000)
 __AARCH64_INSN_FUNCS(adds,	0x7F200000, 0x2B000000)
 __AARCH64_INSN_FUNCS(sub,	0x7F200000, 0x4B000000)
 __AARCH64_INSN_FUNCS(subs,	0x7F200000, 0x6B000000)
+__AARCH64_INSN_FUNCS(udiv,	0x7FE0FC00, 0x1AC00800)
+__AARCH64_INSN_FUNCS(sdiv,	0x7FE0FC00, 0x1AC00C00)
+__AARCH64_INSN_FUNCS(lslv,	0x7FE0FC00, 0x1AC02000)
+__AARCH64_INSN_FUNCS(lsrv,	0x7FE0FC00, 0x1AC02400)
+__AARCH64_INSN_FUNCS(asrv,	0x7FE0FC00, 0x1AC02800)
+__AARCH64_INSN_FUNCS(rorv,	0x7FE0FC00, 0x1AC02C00)
 __AARCH64_INSN_FUNCS(rev16,	0x7FFFFC00, 0x5AC00400)
 __AARCH64_INSN_FUNCS(rev32,	0x7FFFFC00, 0x5AC00800)
 __AARCH64_INSN_FUNCS(rev64,	0x7FFFFC00, 0x5AC00C00)
@@ -289,6 +304,11 @@ u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
 			   enum aarch64_insn_register src,
 			   enum aarch64_insn_variant variant,
 			   enum aarch64_insn_data1_type type);
+u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
+			   enum aarch64_insn_register src,
+			   enum aarch64_insn_register reg,
+			   enum aarch64_insn_variant variant,
+			   enum aarch64_insn_data2_type type);
 
 bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
 
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 49a9528..7778b18 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -782,3 +782,51 @@ u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
 
 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
 }
+
+u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
+			   enum aarch64_insn_register src,
+			   enum aarch64_insn_register reg,
+			   enum aarch64_insn_variant variant,
+			   enum aarch64_insn_data2_type type)
+{
+	u32 insn;
+
+	switch (type) {
+	case AARCH64_INSN_DATA2_UDIV:
+		insn = aarch64_insn_get_udiv_value();
+		break;
+	case AARCH64_INSN_DATA2_SDIV:
+		insn = aarch64_insn_get_sdiv_value();
+		break;
+	case AARCH64_INSN_DATA2_LSLV:
+		insn = aarch64_insn_get_lslv_value();
+		break;
+	case AARCH64_INSN_DATA2_LSRV:
+		insn = aarch64_insn_get_lsrv_value();
+		break;
+	case AARCH64_INSN_DATA2_ASRV:
+		insn = aarch64_insn_get_asrv_value();
+		break;
+	case AARCH64_INSN_DATA2_RORV:
+		insn = aarch64_insn_get_rorv_value();
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	switch (variant) {
+	case AARCH64_INSN_VARIANT_32BIT:
+		break;
+	case AARCH64_INSN_VARIANT_64BIT:
+		insn |= BIT(31);
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
+}
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RFCv3 12/14] arm64: introduce aarch64_insn_gen_data3()
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
                   ` (10 preceding siblings ...)
  2014-07-15  6:25 ` [PATCH RFCv3 11/14] arm64: introduce aarch64_insn_gen_data2() Zi Shen Lim
@ 2014-07-15  6:25 ` Zi Shen Lim
  2014-07-15  6:25 ` [PATCH RFCv3 13/14] arm64: introduce aarch64_insn_gen_logical_shifted_reg() Zi Shen Lim
                   ` (2 subsequent siblings)
  14 siblings, 0 replies; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, Alexei Starovoitov
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

Introduce function to generate data-processing (3 source) instructions.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
---
 arch/arm64/include/asm/insn.h | 14 ++++++++++++++
 arch/arm64/kernel/insn.c      | 42 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 56 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 367245f..36e8465 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -79,6 +79,7 @@ enum aarch64_insn_register_type {
 	AARCH64_INSN_REGTYPE_RT2,
 	AARCH64_INSN_REGTYPE_RM,
 	AARCH64_INSN_REGTYPE_RD,
+	AARCH64_INSN_REGTYPE_RA,
 };
 
 enum aarch64_insn_register {
@@ -200,6 +201,11 @@ enum aarch64_insn_data2_type {
 	AARCH64_INSN_DATA2_RORV,
 };
 
+enum aarch64_insn_data3_type {
+	AARCH64_INSN_DATA3_MADD,
+	AARCH64_INSN_DATA3_MSUB,
+};
+
 #define	__AARCH64_INSN_FUNCS(abbr, mask, val)	\
 static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
 { return (code & (mask)) == (val); } \
@@ -226,6 +232,8 @@ __AARCH64_INSN_FUNCS(add,	0x7F200000, 0x0B000000)
 __AARCH64_INSN_FUNCS(adds,	0x7F200000, 0x2B000000)
 __AARCH64_INSN_FUNCS(sub,	0x7F200000, 0x4B000000)
 __AARCH64_INSN_FUNCS(subs,	0x7F200000, 0x6B000000)
+__AARCH64_INSN_FUNCS(madd,	0x7FE08000, 0x1B000000)
+__AARCH64_INSN_FUNCS(msub,	0x7FE08000, 0x1B008000)
 __AARCH64_INSN_FUNCS(udiv,	0x7FE0FC00, 0x1AC00800)
 __AARCH64_INSN_FUNCS(sdiv,	0x7FE0FC00, 0x1AC00C00)
 __AARCH64_INSN_FUNCS(lslv,	0x7FE0FC00, 0x1AC02000)
@@ -309,6 +317,12 @@ u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
 			   enum aarch64_insn_register reg,
 			   enum aarch64_insn_variant variant,
 			   enum aarch64_insn_data2_type type);
+u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
+			   enum aarch64_insn_register src,
+			   enum aarch64_insn_register reg1,
+			   enum aarch64_insn_register reg2,
+			   enum aarch64_insn_variant variant,
+			   enum aarch64_insn_data3_type type);
 
 bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
 
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 7778b18..01664e3 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -299,6 +299,7 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
 		shift = 5;
 		break;
 	case AARCH64_INSN_REGTYPE_RT2:
+	case AARCH64_INSN_REGTYPE_RA:
 		shift = 10;
 		break;
 	case AARCH64_INSN_REGTYPE_RM:
@@ -830,3 +831,44 @@ u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
 
 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
 }
+
+u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
+			   enum aarch64_insn_register src,
+			   enum aarch64_insn_register reg1,
+			   enum aarch64_insn_register reg2,
+			   enum aarch64_insn_variant variant,
+			   enum aarch64_insn_data3_type type)
+{
+	u32 insn;
+
+	switch (type) {
+	case AARCH64_INSN_DATA3_MADD:
+		insn = aarch64_insn_get_madd_value();
+		break;
+	case AARCH64_INSN_DATA3_MSUB:
+		insn = aarch64_insn_get_msub_value();
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	switch (variant) {
+	case AARCH64_INSN_VARIANT_32BIT:
+		break;
+	case AARCH64_INSN_VARIANT_64BIT:
+		insn |= BIT(31);
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+					    reg1);
+
+	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
+					    reg2);
+}
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RFCv3 13/14] arm64: introduce aarch64_insn_gen_logical_shifted_reg()
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
                   ` (11 preceding siblings ...)
  2014-07-15  6:25 ` [PATCH RFCv3 12/14] arm64: introduce aarch64_insn_gen_data3() Zi Shen Lim
@ 2014-07-15  6:25 ` Zi Shen Lim
  2014-07-15  6:25 ` [PATCH RFCv3 14/14] arm64: eBPF JIT compiler Zi Shen Lim
  2014-07-16 10:41 ` [PATCH RFCv3 00/14] " Will Deacon
  14 siblings, 0 replies; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, Alexei Starovoitov
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

Introduce function to generate logical (shifted register)
instructions.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
---
 arch/arm64/include/asm/insn.h | 25 ++++++++++++++++++
 arch/arm64/kernel/insn.c      | 60 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 85 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 36e8465..56a9e63 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -206,6 +206,17 @@ enum aarch64_insn_data3_type {
 	AARCH64_INSN_DATA3_MSUB,
 };
 
+enum aarch64_insn_logic_type {
+	AARCH64_INSN_LOGIC_AND,
+	AARCH64_INSN_LOGIC_BIC,
+	AARCH64_INSN_LOGIC_ORR,
+	AARCH64_INSN_LOGIC_ORN,
+	AARCH64_INSN_LOGIC_EOR,
+	AARCH64_INSN_LOGIC_EON,
+	AARCH64_INSN_LOGIC_AND_SETFLAGS,
+	AARCH64_INSN_LOGIC_BIC_SETFLAGS
+};
+
 #define	__AARCH64_INSN_FUNCS(abbr, mask, val)	\
 static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
 { return (code & (mask)) == (val); } \
@@ -243,6 +254,14 @@ __AARCH64_INSN_FUNCS(rorv,	0x7FE0FC00, 0x1AC02C00)
 __AARCH64_INSN_FUNCS(rev16,	0x7FFFFC00, 0x5AC00400)
 __AARCH64_INSN_FUNCS(rev32,	0x7FFFFC00, 0x5AC00800)
 __AARCH64_INSN_FUNCS(rev64,	0x7FFFFC00, 0x5AC00C00)
+__AARCH64_INSN_FUNCS(and,	0x7F200000, 0x0A000000)
+__AARCH64_INSN_FUNCS(bic,	0x7F200000, 0x0A200000)
+__AARCH64_INSN_FUNCS(orr,	0x7F200000, 0x2A000000)
+__AARCH64_INSN_FUNCS(orn,	0x7F200000, 0x2A200000)
+__AARCH64_INSN_FUNCS(eor,	0x7F200000, 0x4A000000)
+__AARCH64_INSN_FUNCS(eon,	0x7F200000, 0x4A200000)
+__AARCH64_INSN_FUNCS(ands,	0x7F200000, 0x6A000000)
+__AARCH64_INSN_FUNCS(bics,	0x7F200000, 0x6A200000)
 __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
 __AARCH64_INSN_FUNCS(cbz,	0xFE000000, 0x34000000)
@@ -323,6 +342,12 @@ u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
 			   enum aarch64_insn_register reg2,
 			   enum aarch64_insn_variant variant,
 			   enum aarch64_insn_data3_type type);
+u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
+					 enum aarch64_insn_register src,
+					 enum aarch64_insn_register reg,
+					 int shift,
+					 enum aarch64_insn_variant variant,
+					 enum aarch64_insn_logic_type type);
 
 bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
 
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 01664e3..22b07dc 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -872,3 +872,63 @@ u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
 					    reg2);
 }
+
+u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
+					 enum aarch64_insn_register src,
+					 enum aarch64_insn_register reg,
+					 int shift,
+					 enum aarch64_insn_variant variant,
+					 enum aarch64_insn_logic_type type)
+{
+	u32 insn;
+
+	switch (type) {
+	case AARCH64_INSN_LOGIC_AND:
+		insn = aarch64_insn_get_and_value();
+		break;
+	case AARCH64_INSN_LOGIC_BIC:
+		insn = aarch64_insn_get_bic_value();
+		break;
+	case AARCH64_INSN_LOGIC_ORR:
+		insn = aarch64_insn_get_orr_value();
+		break;
+	case AARCH64_INSN_LOGIC_ORN:
+		insn = aarch64_insn_get_orn_value();
+		break;
+	case AARCH64_INSN_LOGIC_EOR:
+		insn = aarch64_insn_get_eor_value();
+		break;
+	case AARCH64_INSN_LOGIC_EON:
+		insn = aarch64_insn_get_eon_value();
+		break;
+	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
+		insn = aarch64_insn_get_ands_value();
+		break;
+	case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
+		insn = aarch64_insn_get_bics_value();
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	switch (variant) {
+	case AARCH64_INSN_VARIANT_32BIT:
+		BUG_ON(shift < 0 || shift > 31);
+		break;
+	case AARCH64_INSN_VARIANT_64BIT:
+		insn |= BIT(31);
+		BUG_ON(shift < 0 || shift > 63);
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
+
+	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
+}
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RFCv3 14/14] arm64: eBPF JIT compiler
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
                   ` (12 preceding siblings ...)
  2014-07-15  6:25 ` [PATCH RFCv3 13/14] arm64: introduce aarch64_insn_gen_logical_shifted_reg() Zi Shen Lim
@ 2014-07-15  6:25 ` Zi Shen Lim
  2014-07-16 10:41 ` [PATCH RFCv3 00/14] " Will Deacon
  14 siblings, 0 replies; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-15  6:25 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, David S. Miller, Daniel Borkmann,
	Alexei Starovoitov, Chema Gonzalez
  Cc: Zi Shen Lim, linux-kernel, linux-arm-kernel, netdev

The JIT compiler emits A64 instructions. It supports eBPF only.
Legacy BPF is supported thanks to conversion by BPF core.

JIT is enabled in the same way as for other architectures:

	echo 1 > /proc/sys/net/core/bpf_jit_enable

Or for additional compiler output:

	echo 2 > /proc/sys/net/core/bpf_jit_enable

See Documentation/networking/filter.txt for more information.

The implementation passes all 57 tests in lib/test_bpf.c
on ARMv8 Foundation Model :)

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>

---
RFCv2->RFCv3:

  - clarify 16B stack alignment requirement - I missed one reference
  - fixed a couple checks for immediate bits
  - make bpf_jit.h checkpatch clean
  - remove stale DW case in LD_IND and LD_ABS (good catch by Alexei)
  - add Alexei's Acked-by
  - rebase onto net-next

  Also, per discussion with Will, consolidated bpf_jit.h into
  arch/arm64/.../insn.{c,h}:
  - instruction encoding stuff moved into arch/arm64/kernel/insn.c
  - bpf_jit.h uses arch/arm64/include/asm/insn.h

RFCv1->RFCv2:

  Addressed review comments from Alexei:
  - use core-$(CONFIG_NET)
  - use GENMASK
  - lower-case function names in header file
  - drop LD_ABS+DW and LD_IND+DW, which do not exist in eBPF yet
  - use pr_xxx_once() to prevent spamming logs
  - clarify 16B stack alignment requirement
  - drop usage of EMIT macro which was saving just one argument,
    turns out having additional argument wasn't too much of an eyesore

  Also, per discussion with Alexei, and additional suggestion from
  Daniel:
  - moved load_pointer() from net/core/filter.c into filter.h
    as bpf_load_pointer()
  which is done as a separate preparatory patch. [1]

[1] http://patchwork.ozlabs.org/patch/366906/

NOTES:

* The preparatory patch [1] has been merged into net-next
  9f12fbe603f7 ("net: filter: move load_pointer() into filter.h").

* This patch applies on top of net-next @ 0854a7f13206
  ("Merge branch 'amd811e-cleanups'")

* bpf_jit_comp.c and bpf_jit.h is checkpatch clean.

* The following sparse warning is not applicable:
  warning: symbol 'bpf_jit_enable' was not declared. Should it be static?

FUTURE WORK:

1. Implement remaining classes of eBPF instructions: ST|MEM, STX|XADD
   which currently do not have corresponding test cases in test_bpf.

2. Further compiler optimization, such as optimization for small
   immediates.
---
 Documentation/networking/filter.txt |   2 +-
 arch/arm64/Kconfig                  |   1 +
 arch/arm64/Makefile                 |   1 +
 arch/arm64/net/Makefile             |   4 +
 arch/arm64/net/bpf_jit.h            | 169 +++++++++
 arch/arm64/net/bpf_jit_comp.c       | 677 ++++++++++++++++++++++++++++++++++++
 6 files changed, 853 insertions(+), 1 deletion(-)
 create mode 100644 arch/arm64/net/Makefile
 create mode 100644 arch/arm64/net/bpf_jit.h
 create mode 100644 arch/arm64/net/bpf_jit_comp.c

diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index ee78eba..d71e616 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -462,7 +462,7 @@ JIT compiler
 ------------
 
 The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC,
-ARM and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler is
+ARM, ARM64 and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler is
 transparently invoked for each attached filter from user space or for internal
 kernel users if it has been previously enabled by root:
 
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a474de34..b0a4ff8 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -32,6 +32,7 @@ config ARM64
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_C_RECORDMCOUNT
+	select HAVE_BPF_JIT
 	select HAVE_DEBUG_BUGVERBOSE
 	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_DMA_API_DEBUG
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 8185a91..9820fa7 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -43,6 +43,7 @@ TEXT_OFFSET := 0x00080000
 export	TEXT_OFFSET GZFLAGS
 
 core-y		+= arch/arm64/kernel/ arch/arm64/mm/
+core-$(CONFIG_NET) += arch/arm64/net/
 core-$(CONFIG_KVM) += arch/arm64/kvm/
 core-$(CONFIG_XEN) += arch/arm64/xen/
 core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
diff --git a/arch/arm64/net/Makefile b/arch/arm64/net/Makefile
new file mode 100644
index 0000000..da97633
--- /dev/null
+++ b/arch/arm64/net/Makefile
@@ -0,0 +1,4 @@
+#
+# ARM64 networking code
+#
+obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
new file mode 100644
index 0000000..2134f7e
--- /dev/null
+++ b/arch/arm64/net/bpf_jit.h
@@ -0,0 +1,169 @@
+/*
+ * BPF JIT compiler for ARM64
+ *
+ * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _BPF_JIT_H
+#define _BPF_JIT_H
+
+#include <asm/insn.h>
+
+/* 5-bit Register Operand */
+#define A64_R(x)	AARCH64_INSN_REG_##x
+#define A64_FP		AARCH64_INSN_REG_FP
+#define A64_LR		AARCH64_INSN_REG_LR
+#define A64_ZR		AARCH64_INSN_REG_ZR
+#define A64_SP		AARCH64_INSN_REG_SP
+
+#define A64_VARIANT(sf) \
+	((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT)
+
+/* Compare & branch (immediate) */
+#define A64_COMP_BRANCH(sf, Rt, offset, type) \
+	aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
+		AARCH64_INSN_BRANCH_COMP_##type)
+#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
+
+/* Conditional branch (immediate) */
+#define A64_COND_BRANCH(cond, offset) \
+	aarch64_insn_gen_cond_branch_imm(0, offset, cond)
+#define A64_COND_EQ	AARCH64_INSN_COND_EQ /* == */
+#define A64_COND_NE	AARCH64_INSN_COND_NE /* != */
+#define A64_COND_CS	AARCH64_INSN_COND_CS /* unsigned >= */
+#define A64_COND_HI	AARCH64_INSN_COND_HI /* unsigned > */
+#define A64_COND_GE	AARCH64_INSN_COND_GE /* signed >= */
+#define A64_COND_GT	AARCH64_INSN_COND_GT /* signed > */
+#define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
+
+/* Unconditional branch (immediate) */
+#define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \
+	AARCH64_INSN_BRANCH_##type)
+#define A64_B(imm26)  A64_BRANCH((imm26) << 2, NOLINK)
+#define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
+
+/* Unconditional branch (register) */
+#define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
+#define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
+
+/* Load/store register (register offset) */
+#define A64_LS_REG(Rt, Rn, Rm, size, type) \
+	aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \
+		AARCH64_INSN_SIZE_##size, \
+		AARCH64_INSN_LDST_##type##_REG_OFFSET)
+#define A64_STRB(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 8, STORE)
+#define A64_LDRB(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
+#define A64_STRH(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 16, STORE)
+#define A64_LDRH(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
+#define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
+#define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
+#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
+#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
+
+/* Load/store register pair */
+#define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
+	aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
+		AARCH64_INSN_VARIANT_64BIT, \
+		AARCH64_INSN_LDST_##ls##_PAIR_##type)
+/* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */
+#define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX)
+/* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
+#define A64_POP(Rt, Rt2, Rn)  A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
+
+/* Add/subtract (immediate) */
+#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
+	aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
+		A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
+/* Rd = Rn OP imm12 */
+#define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
+#define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
+/* Rd = Rn */
+#define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
+
+/* Bitfield move */
+#define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \
+	aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
+		A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type)
+/* Signed, with sign replication to left and zeros to right */
+#define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED)
+/* Unsigned, with zeros to left and right */
+#define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED)
+
+/* Rd = Rn << shift */
+#define A64_LSL(sf, Rd, Rn, shift) ({	\
+	int sz = (sf) ? 64 : 32;	\
+	A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
+})
+/* Rd = Rn >> shift */
+#define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
+/* Rd = Rn >> shift; signed */
+#define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
+
+/* Move wide (immediate) */
+#define A64_MOVEW(sf, Rd, imm16, shift, type) \
+	aarch64_insn_gen_movewide(Rd, imm16, shift, \
+		A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type)
+/* Rd = Zeros (for MOVZ);
+ * Rd |= imm16 << shift (where shift is {0, 16, 32, 48});
+ * Rd = ~Rd; (for MOVN); */
+#define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE)
+#define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO)
+#define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP)
+
+/* Add/subtract (shifted register) */
+#define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \
+	aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
+		A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
+/* Rd = Rn OP Rm */
+#define A64_ADD(sf, Rd, Rn, Rm)  A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD)
+#define A64_SUB(sf, Rd, Rn, Rm)  A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB)
+#define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS)
+/* Rd = -Rm */
+#define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm)
+/* Rn - Rm; set condition flags */
+#define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm)
+
+/* Data-processing (1 source) */
+#define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \
+	A64_VARIANT(sf), AARCH64_INSN_DATA1_##type)
+/* Rd = BSWAPx(Rn) */
+#define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16)
+#define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32)
+#define A64_REV64(Rd, Rn)     A64_DATA1(1, Rd, Rn, REVERSE_64)
+
+/* Data-processing (2 source) */
+/* Rd = Rn OP Rm */
+#define A64_UDIV(sf, Rd, Rn, Rm) aarch64_insn_gen_data2(Rd, Rn, Rm, \
+	A64_VARIANT(sf), AARCH64_INSN_DATA2_UDIV)
+
+/* Data-processing (3 source) */
+/* Rd = Ra + Rn * Rm */
+#define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
+	A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD)
+/* Rd = Rn * Rm */
+#define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm)
+
+/* Logical (shifted register) */
+#define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \
+	aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
+		A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type)
+/* Rd = Rn OP Rm */
+#define A64_AND(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND)
+#define A64_ORR(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR)
+#define A64_EOR(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR)
+#define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
+/* Rn & Rm; set condition flags */
+#define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
+
+#endif /* _BPF_JIT_H */
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
new file mode 100644
index 0000000..85f318f
--- /dev/null
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -0,0 +1,677 @@
+/*
+ * BPF JIT compiler for ARM64
+ *
+ * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "bpf_jit: " fmt
+
+#include <linux/filter.h>
+#include <linux/moduleloader.h>
+#include <linux/printk.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <asm/cacheflush.h>
+
+#include "bpf_jit.h"
+
+int bpf_jit_enable __read_mostly;
+
+#define TMP_REG_1 (MAX_BPF_REG + 0)
+#define TMP_REG_2 (MAX_BPF_REG + 1)
+
+/* Map BPF registers to A64 registers */
+static const int bpf2a64[] = {
+	/* return value from in-kernel function, and exit value from eBPF */
+	[BPF_REG_0] = A64_R(7),
+	/* arguments from eBPF program to in-kernel function */
+	[BPF_REG_1] = A64_R(0),
+	[BPF_REG_2] = A64_R(1),
+	[BPF_REG_3] = A64_R(2),
+	[BPF_REG_4] = A64_R(3),
+	[BPF_REG_5] = A64_R(4),
+	/* callee saved registers that in-kernel function will preserve */
+	[BPF_REG_6] = A64_R(19),
+	[BPF_REG_7] = A64_R(20),
+	[BPF_REG_8] = A64_R(21),
+	[BPF_REG_9] = A64_R(22),
+	/* read-only frame pointer to access stack */
+	[BPF_REG_FP] = A64_FP,
+	/* temporary register for internal BPF JIT */
+	[TMP_REG_1] = A64_R(23),
+	[TMP_REG_2] = A64_R(24),
+};
+
+struct jit_ctx {
+	const struct sk_filter *prog;
+	int idx;
+	int tmp_used;
+	int body_offset;
+	int *offset;
+	u32 *image;
+};
+
+static inline void emit(const u32 insn, struct jit_ctx *ctx)
+{
+	if (ctx->image != NULL)
+		ctx->image[ctx->idx] = cpu_to_le32(insn);
+
+	ctx->idx++;
+}
+
+static inline void emit_a64_mov_i64(const int reg, const u64 val,
+				    struct jit_ctx *ctx)
+{
+	u64 tmp = val;
+	int shift = 0;
+
+	emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
+	tmp >>= 16;
+	shift += 16;
+	while (tmp) {
+		if (tmp & 0xffff)
+			emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
+		tmp >>= 16;
+		shift += 16;
+	}
+}
+
+static inline void emit_a64_mov_i(const int is64, const int reg,
+				  const s32 val, struct jit_ctx *ctx)
+{
+	u16 hi = val >> 16;
+	u16 lo = val & 0xffff;
+
+	if (hi & 0x8000) {
+		if (hi == 0xffff) {
+			emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
+		} else {
+			emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
+			emit(A64_MOVK(is64, reg, lo, 0), ctx);
+		}
+	} else {
+		emit(A64_MOVZ(is64, reg, lo, 0), ctx);
+		if (hi)
+			emit(A64_MOVK(is64, reg, hi, 16), ctx);
+	}
+}
+
+static inline int bpf2a64_offset(int bpf_to, int bpf_from,
+				 const struct jit_ctx *ctx)
+{
+	int to = ctx->offset[bpf_to + 1];
+	/* -1 to account for the Branch instruction */
+	int from = ctx->offset[bpf_from + 1] - 1;
+
+	return to - from;
+}
+
+static inline int epilogue_offset(const struct jit_ctx *ctx)
+{
+	int to = ctx->offset[ctx->prog->len - 1];
+	int from = ctx->idx - ctx->body_offset;
+
+	return to - from;
+}
+
+/* Stack must be multiples of 16B */
+#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
+
+static void build_prologue(struct jit_ctx *ctx)
+{
+	const u8 r6 = bpf2a64[BPF_REG_6];
+	const u8 r7 = bpf2a64[BPF_REG_7];
+	const u8 r8 = bpf2a64[BPF_REG_8];
+	const u8 r9 = bpf2a64[BPF_REG_9];
+	const u8 fp = bpf2a64[BPF_REG_FP];
+	const u8 ra = bpf2a64[BPF_REG_A];
+	const u8 rx = bpf2a64[BPF_REG_X];
+	const u8 tmp1 = bpf2a64[TMP_REG_1];
+	const u8 tmp2 = bpf2a64[TMP_REG_2];
+	int stack_size = MAX_BPF_STACK;
+
+	stack_size += 4; /* extra for skb_copy_bits buffer */
+	stack_size = STACK_ALIGN(stack_size);
+
+	/* Save callee-saved register */
+	emit(A64_PUSH(r6, r7, A64_SP), ctx);
+	emit(A64_PUSH(r8, r9, A64_SP), ctx);
+	if (ctx->tmp_used)
+		emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
+
+	/* Set up BPF stack */
+	emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
+
+	/* Set up frame pointer */
+	emit(A64_MOV(1, fp, A64_SP), ctx);
+
+	/* Clear registers A and X */
+	emit_a64_mov_i64(ra, 0, ctx);
+	emit_a64_mov_i64(rx, 0, ctx);
+}
+
+static void build_epilogue(struct jit_ctx *ctx)
+{
+	const u8 r0 = bpf2a64[BPF_REG_0];
+	const u8 r6 = bpf2a64[BPF_REG_6];
+	const u8 r7 = bpf2a64[BPF_REG_7];
+	const u8 r8 = bpf2a64[BPF_REG_8];
+	const u8 r9 = bpf2a64[BPF_REG_9];
+	const u8 fp = bpf2a64[BPF_REG_FP];
+	const u8 tmp1 = bpf2a64[TMP_REG_1];
+	const u8 tmp2 = bpf2a64[TMP_REG_2];
+	int stack_size = MAX_BPF_STACK;
+
+	stack_size += 4; /* extra for skb_copy_bits buffer */
+	stack_size = STACK_ALIGN(stack_size);
+
+	/* We're done with BPF stack */
+	emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
+
+	/* Restore callee-saved register */
+	if (ctx->tmp_used)
+		emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
+	emit(A64_POP(r8, r9, A64_SP), ctx);
+	emit(A64_POP(r6, r7, A64_SP), ctx);
+
+	/* Restore frame pointer */
+	emit(A64_MOV(1, fp, A64_SP), ctx);
+
+	/* Set return value */
+	emit(A64_MOV(1, A64_R(0), r0), ctx);
+
+	emit(A64_RET(A64_LR), ctx);
+}
+
+static int build_insn(const struct sock_filter_int *insn, struct jit_ctx *ctx)
+{
+	const u8 code = insn->code;
+	const u8 dst = bpf2a64[insn->dst_reg];
+	const u8 src = bpf2a64[insn->src_reg];
+	const u8 tmp = bpf2a64[TMP_REG_1];
+	const u8 tmp2 = bpf2a64[TMP_REG_2];
+	const s16 off = insn->off;
+	const s32 imm = insn->imm;
+	const int i = insn - ctx->prog->insnsi;
+	const bool is64 = BPF_CLASS(code) == BPF_ALU64;
+	u8 jmp_cond;
+	s32 jmp_offset;
+
+	switch (code) {
+	/* dst = src */
+	case BPF_ALU | BPF_MOV | BPF_X:
+	case BPF_ALU64 | BPF_MOV | BPF_X:
+		emit(A64_MOV(is64, dst, src), ctx);
+		break;
+	/* dst = dst OP src */
+	case BPF_ALU | BPF_ADD | BPF_X:
+	case BPF_ALU64 | BPF_ADD | BPF_X:
+		emit(A64_ADD(is64, dst, dst, src), ctx);
+		break;
+	case BPF_ALU | BPF_SUB | BPF_X:
+	case BPF_ALU64 | BPF_SUB | BPF_X:
+		emit(A64_SUB(is64, dst, dst, src), ctx);
+		break;
+	case BPF_ALU | BPF_AND | BPF_X:
+	case BPF_ALU64 | BPF_AND | BPF_X:
+		emit(A64_AND(is64, dst, dst, src), ctx);
+		break;
+	case BPF_ALU | BPF_OR | BPF_X:
+	case BPF_ALU64 | BPF_OR | BPF_X:
+		emit(A64_ORR(is64, dst, dst, src), ctx);
+		break;
+	case BPF_ALU | BPF_XOR | BPF_X:
+	case BPF_ALU64 | BPF_XOR | BPF_X:
+		emit(A64_EOR(is64, dst, dst, src), ctx);
+		break;
+	case BPF_ALU | BPF_MUL | BPF_X:
+	case BPF_ALU64 | BPF_MUL | BPF_X:
+		emit(A64_MUL(is64, dst, dst, src), ctx);
+		break;
+	case BPF_ALU | BPF_DIV | BPF_X:
+	case BPF_ALU64 | BPF_DIV | BPF_X:
+		emit(A64_UDIV(is64, dst, dst, src), ctx);
+		break;
+	case BPF_ALU | BPF_MOD | BPF_X:
+	case BPF_ALU64 | BPF_MOD | BPF_X:
+		ctx->tmp_used = 1;
+		emit(A64_UDIV(is64, tmp, dst, src), ctx);
+		emit(A64_MUL(is64, tmp, tmp, src), ctx);
+		emit(A64_SUB(is64, dst, dst, tmp), ctx);
+		break;
+	/* dst = -dst */
+	case BPF_ALU | BPF_NEG:
+	case BPF_ALU64 | BPF_NEG:
+		emit(A64_NEG(is64, dst, dst), ctx);
+		break;
+	/* dst = BSWAP##imm(dst) */
+	case BPF_ALU | BPF_END | BPF_FROM_LE:
+	case BPF_ALU | BPF_END | BPF_FROM_BE:
+#ifdef CONFIG_CPU_BIG_ENDIAN
+		if (BPF_SRC(code) == BPF_FROM_BE)
+			break;
+#else /* !CONFIG_CPU_BIG_ENDIAN */
+		if (BPF_SRC(code) == BPF_FROM_LE)
+			break;
+#endif
+		switch (imm) {
+		case 16:
+			emit(A64_REV16(is64, dst, dst), ctx);
+			break;
+		case 32:
+			emit(A64_REV32(is64, dst, dst), ctx);
+			break;
+		case 64:
+			emit(A64_REV64(dst, dst), ctx);
+			break;
+		}
+		break;
+	/* dst = imm */
+	case BPF_ALU | BPF_MOV | BPF_K:
+	case BPF_ALU64 | BPF_MOV | BPF_K:
+		emit_a64_mov_i(is64, dst, imm, ctx);
+		break;
+	/* dst = dst OP imm */
+	case BPF_ALU | BPF_ADD | BPF_K:
+	case BPF_ALU64 | BPF_ADD | BPF_K:
+		ctx->tmp_used = 1;
+		emit_a64_mov_i(is64, tmp, imm, ctx);
+		emit(A64_ADD(is64, dst, dst, tmp), ctx);
+		break;
+	case BPF_ALU | BPF_SUB | BPF_K:
+	case BPF_ALU64 | BPF_SUB | BPF_K:
+		ctx->tmp_used = 1;
+		emit_a64_mov_i(is64, tmp, imm, ctx);
+		emit(A64_SUB(is64, dst, dst, tmp), ctx);
+		break;
+	case BPF_ALU | BPF_AND | BPF_K:
+	case BPF_ALU64 | BPF_AND | BPF_K:
+		ctx->tmp_used = 1;
+		emit_a64_mov_i(is64, tmp, imm, ctx);
+		emit(A64_AND(is64, dst, dst, tmp), ctx);
+		break;
+	case BPF_ALU | BPF_OR | BPF_K:
+	case BPF_ALU64 | BPF_OR | BPF_K:
+		ctx->tmp_used = 1;
+		emit_a64_mov_i(is64, tmp, imm, ctx);
+		emit(A64_ORR(is64, dst, dst, tmp), ctx);
+		break;
+	case BPF_ALU | BPF_XOR | BPF_K:
+	case BPF_ALU64 | BPF_XOR | BPF_K:
+		ctx->tmp_used = 1;
+		emit_a64_mov_i(is64, tmp, imm, ctx);
+		emit(A64_EOR(is64, dst, dst, tmp), ctx);
+		break;
+	case BPF_ALU | BPF_MUL | BPF_K:
+	case BPF_ALU64 | BPF_MUL | BPF_K:
+		ctx->tmp_used = 1;
+		emit_a64_mov_i(is64, tmp, imm, ctx);
+		emit(A64_MUL(is64, dst, dst, tmp), ctx);
+		break;
+	case BPF_ALU | BPF_DIV | BPF_K:
+	case BPF_ALU64 | BPF_DIV | BPF_K:
+		ctx->tmp_used = 1;
+		emit_a64_mov_i(is64, tmp, imm, ctx);
+		emit(A64_UDIV(is64, dst, dst, tmp), ctx);
+		break;
+	case BPF_ALU | BPF_MOD | BPF_K:
+	case BPF_ALU64 | BPF_MOD | BPF_K:
+		ctx->tmp_used = 1;
+		emit_a64_mov_i(is64, tmp2, imm, ctx);
+		emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
+		emit(A64_MUL(is64, tmp, tmp, tmp2), ctx);
+		emit(A64_SUB(is64, dst, dst, tmp), ctx);
+		break;
+	case BPF_ALU | BPF_LSH | BPF_K:
+	case BPF_ALU64 | BPF_LSH | BPF_K:
+		emit(A64_LSL(is64, dst, dst, imm), ctx);
+		break;
+	case BPF_ALU | BPF_RSH | BPF_K:
+	case BPF_ALU64 | BPF_RSH | BPF_K:
+		emit(A64_LSR(is64, dst, dst, imm), ctx);
+		break;
+	case BPF_ALU | BPF_ARSH | BPF_K:
+	case BPF_ALU64 | BPF_ARSH | BPF_K:
+		emit(A64_ASR(is64, dst, dst, imm), ctx);
+		break;
+
+#define check_imm(bits, imm) do {				\
+	if ((((imm) > 0) && ((imm) >> (bits))) ||		\
+	    (((imm) < 0) && (~(imm) >> (bits)))) {		\
+		pr_info("[%2d] imm=%d(0x%x) out of range\n",	\
+			i, imm, imm);				\
+		return -EINVAL;					\
+	}							\
+} while (0)
+#define check_imm19(imm) check_imm(19, imm)
+#define check_imm26(imm) check_imm(26, imm)
+
+	/* JUMP off */
+	case BPF_JMP | BPF_JA:
+		jmp_offset = bpf2a64_offset(i + off, i, ctx);
+		check_imm26(jmp_offset);
+		emit(A64_B(jmp_offset), ctx);
+		break;
+	/* IF (dst COND src) JUMP off */
+	case BPF_JMP | BPF_JEQ | BPF_X:
+	case BPF_JMP | BPF_JGT | BPF_X:
+	case BPF_JMP | BPF_JGE | BPF_X:
+	case BPF_JMP | BPF_JNE | BPF_X:
+	case BPF_JMP | BPF_JSGT | BPF_X:
+	case BPF_JMP | BPF_JSGE | BPF_X:
+		emit(A64_CMP(1, dst, src), ctx);
+emit_cond_jmp:
+		jmp_offset = bpf2a64_offset(i + off, i, ctx);
+		check_imm19(jmp_offset);
+		switch (BPF_OP(code)) {
+		case BPF_JEQ:
+			jmp_cond = A64_COND_EQ;
+			break;
+		case BPF_JGT:
+			jmp_cond = A64_COND_HI;
+			break;
+		case BPF_JGE:
+			jmp_cond = A64_COND_CS;
+			break;
+		case BPF_JNE:
+			jmp_cond = A64_COND_NE;
+			break;
+		case BPF_JSGT:
+			jmp_cond = A64_COND_GT;
+			break;
+		case BPF_JSGE:
+			jmp_cond = A64_COND_GE;
+			break;
+		default:
+			return -EFAULT;
+		}
+		emit(A64_B_(jmp_cond, jmp_offset), ctx);
+		break;
+	case BPF_JMP | BPF_JSET | BPF_X:
+		emit(A64_TST(1, dst, src), ctx);
+		goto emit_cond_jmp;
+	/* IF (dst COND imm) JUMP off */
+	case BPF_JMP | BPF_JEQ | BPF_K:
+	case BPF_JMP | BPF_JGT | BPF_K:
+	case BPF_JMP | BPF_JGE | BPF_K:
+	case BPF_JMP | BPF_JNE | BPF_K:
+	case BPF_JMP | BPF_JSGT | BPF_K:
+	case BPF_JMP | BPF_JSGE | BPF_K:
+		ctx->tmp_used = 1;
+		emit_a64_mov_i(1, tmp, imm, ctx);
+		emit(A64_CMP(1, dst, tmp), ctx);
+		goto emit_cond_jmp;
+	case BPF_JMP | BPF_JSET | BPF_K:
+		ctx->tmp_used = 1;
+		emit_a64_mov_i(1, tmp, imm, ctx);
+		emit(A64_TST(1, dst, tmp), ctx);
+		goto emit_cond_jmp;
+	/* function call */
+	case BPF_JMP | BPF_CALL:
+	{
+		const u8 r0 = bpf2a64[BPF_REG_0];
+		const u64 func = (u64)__bpf_call_base + imm;
+
+		ctx->tmp_used = 1;
+		emit_a64_mov_i64(tmp, func, ctx);
+		emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
+		emit(A64_MOV(1, A64_FP, A64_SP), ctx);
+		emit(A64_BLR(tmp), ctx);
+		emit(A64_MOV(1, r0, A64_R(0)), ctx);
+		emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
+		break;
+	}
+	/* function return */
+	case BPF_JMP | BPF_EXIT:
+		if (i == ctx->prog->len - 1)
+			break;
+		jmp_offset = epilogue_offset(ctx);
+		check_imm26(jmp_offset);
+		emit(A64_B(jmp_offset), ctx);
+		break;
+
+	/* LDX: dst = *(size *)(src + off) */
+	case BPF_LDX | BPF_MEM | BPF_W:
+	case BPF_LDX | BPF_MEM | BPF_H:
+	case BPF_LDX | BPF_MEM | BPF_B:
+	case BPF_LDX | BPF_MEM | BPF_DW:
+		ctx->tmp_used = 1;
+		emit_a64_mov_i(1, tmp, off, ctx);
+		switch (BPF_SIZE(code)) {
+		case BPF_W:
+			emit(A64_LDR32(dst, src, tmp), ctx);
+			break;
+		case BPF_H:
+			emit(A64_LDRH(dst, src, tmp), ctx);
+			break;
+		case BPF_B:
+			emit(A64_LDRB(dst, src, tmp), ctx);
+			break;
+		case BPF_DW:
+			emit(A64_LDR64(dst, src, tmp), ctx);
+			break;
+		}
+		break;
+
+	/* ST: *(size *)(dst + off) = imm */
+	case BPF_ST | BPF_MEM | BPF_W:
+	case BPF_ST | BPF_MEM | BPF_H:
+	case BPF_ST | BPF_MEM | BPF_B:
+	case BPF_ST | BPF_MEM | BPF_DW:
+		goto notyet;
+
+	/* STX: *(size *)(dst + off) = src */
+	case BPF_STX | BPF_MEM | BPF_W:
+	case BPF_STX | BPF_MEM | BPF_H:
+	case BPF_STX | BPF_MEM | BPF_B:
+	case BPF_STX | BPF_MEM | BPF_DW:
+		ctx->tmp_used = 1;
+		emit_a64_mov_i(1, tmp, off, ctx);
+		switch (BPF_SIZE(code)) {
+		case BPF_W:
+			emit(A64_STR32(src, dst, tmp), ctx);
+			break;
+		case BPF_H:
+			emit(A64_STRH(src, dst, tmp), ctx);
+			break;
+		case BPF_B:
+			emit(A64_STRB(src, dst, tmp), ctx);
+			break;
+		case BPF_DW:
+			emit(A64_STR64(src, dst, tmp), ctx);
+			break;
+		}
+		break;
+	/* STX XADD: lock *(u32 *)(dst + off) += src */
+	case BPF_STX | BPF_XADD | BPF_W:
+	/* STX XADD: lock *(u64 *)(dst + off) += src */
+	case BPF_STX | BPF_XADD | BPF_DW:
+		goto notyet;
+
+	/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
+	case BPF_LD | BPF_ABS | BPF_W:
+	case BPF_LD | BPF_ABS | BPF_H:
+	case BPF_LD | BPF_ABS | BPF_B:
+	/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
+	case BPF_LD | BPF_IND | BPF_W:
+	case BPF_LD | BPF_IND | BPF_H:
+	case BPF_LD | BPF_IND | BPF_B:
+	{
+		const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */
+		const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */
+		const u8 fp = bpf2a64[BPF_REG_FP];
+		const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */
+		const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */
+		const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */
+		const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */
+		const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */
+		int size;
+
+		emit(A64_MOV(1, r1, r6), ctx);
+		emit_a64_mov_i(0, r2, imm, ctx);
+		if (BPF_MODE(code) == BPF_IND)
+			emit(A64_ADD(0, r2, r2, src), ctx);
+		switch (BPF_SIZE(code)) {
+		case BPF_W:
+			size = 4;
+			break;
+		case BPF_H:
+			size = 2;
+			break;
+		case BPF_B:
+			size = 1;
+			break;
+		default:
+			return -EINVAL;
+		}
+		emit_a64_mov_i64(r3, size, ctx);
+		emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx);
+		emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
+		emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
+		emit(A64_MOV(1, A64_FP, A64_SP), ctx);
+		emit(A64_BLR(r5), ctx);
+		emit(A64_MOV(1, r0, A64_R(0)), ctx);
+		emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
+
+		jmp_offset = epilogue_offset(ctx);
+		check_imm19(jmp_offset);
+		emit(A64_CBZ(1, r0, jmp_offset), ctx);
+		emit(A64_MOV(1, r5, r0), ctx);
+		switch (BPF_SIZE(code)) {
+		case BPF_W:
+			emit(A64_LDR32(r0, r5, A64_ZR), ctx);
+#ifndef CONFIG_CPU_BIG_ENDIAN
+			emit(A64_REV32(0, r0, r0), ctx);
+#endif
+			break;
+		case BPF_H:
+			emit(A64_LDRH(r0, r5, A64_ZR), ctx);
+#ifndef CONFIG_CPU_BIG_ENDIAN
+			emit(A64_REV16(0, r0, r0), ctx);
+#endif
+			break;
+		case BPF_B:
+			emit(A64_LDRB(r0, r5, A64_ZR), ctx);
+			break;
+		}
+		break;
+	}
+notyet:
+		pr_info_once("*** NOT YET: opcode %02x ***\n", code);
+		return -EFAULT;
+
+	default:
+		pr_err_once("unknown opcode %02x\n", code);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int build_body(struct jit_ctx *ctx)
+{
+	const struct sk_filter *prog = ctx->prog;
+	int i;
+
+	for (i = 0; i < prog->len; i++) {
+		const struct sock_filter_int *insn = &prog->insnsi[i];
+		int ret;
+
+		if (ctx->image == NULL)
+			ctx->offset[i] = ctx->idx;
+
+		ret = build_insn(insn, ctx);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static inline void bpf_flush_icache(void *start, void *end)
+{
+	flush_icache_range((unsigned long)start, (unsigned long)end);
+}
+
+void bpf_jit_compile(struct sk_filter *prog)
+{
+	/* Nothing to do here. We support Internal BPF. */
+}
+
+void bpf_int_jit_compile(struct sk_filter *prog)
+{
+	struct jit_ctx ctx;
+	int image_size;
+
+	if (!bpf_jit_enable)
+		return;
+
+	if (!prog || !prog->len)
+		return;
+
+	memset(&ctx, 0, sizeof(ctx));
+	ctx.prog = prog;
+
+	ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
+	if (ctx.offset == NULL)
+		return;
+
+	/* 1. Initial fake pass to compute ctx->idx. */
+
+	/* Fake pass to fill in ctx->offset. */
+	if (build_body(&ctx))
+		goto out;
+
+	build_prologue(&ctx);
+
+	build_epilogue(&ctx);
+
+	/* Now we know the actual image size. */
+	image_size = sizeof(u32) * ctx.idx;
+	ctx.image = module_alloc(image_size);
+	if (unlikely(ctx.image == NULL))
+		goto out;
+
+	/* 2. Now, the actual pass. */
+
+	ctx.idx = 0;
+	build_prologue(&ctx);
+
+	ctx.body_offset = ctx.idx;
+	if (build_body(&ctx))
+		goto out;
+
+	build_epilogue(&ctx);
+
+	/* And we're done. */
+	if (bpf_jit_enable > 1)
+		bpf_jit_dump(prog->len, image_size, 2, ctx.image);
+
+	bpf_flush_icache(ctx.image, ctx.image + ctx.idx);
+	prog->bpf_func = (void *)ctx.image;
+	prog->jited = 1;
+
+out:
+	kfree(ctx.offset);
+}
+
+void bpf_jit_free(struct sk_filter *prog)
+{
+	if (prog->jited)
+		module_free(NULL, prog->bpf_func);
+
+	kfree(prog);
+}
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* Re: [PATCH RFCv3 00/14] arm64: eBPF JIT compiler
  2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
                   ` (13 preceding siblings ...)
  2014-07-15  6:25 ` [PATCH RFCv3 14/14] arm64: eBPF JIT compiler Zi Shen Lim
@ 2014-07-16 10:41 ` Will Deacon
  2014-07-16 16:21   ` Will Deacon
  14 siblings, 1 reply; 31+ messages in thread
From: Will Deacon @ 2014-07-16 10:41 UTC (permalink / raw)
  To: Zi Shen Lim
  Cc: Catalin Marinas, Jiang Liu, AKASHI Takahiro, David S. Miller,
	Daniel Borkmann, Alexei Starovoitov, Chema Gonzalez,
	linux-kernel, linux-arm-kernel, netdev

On Tue, Jul 15, 2014 at 07:24:58AM +0100, Zi Shen Lim wrote:
> This series implements eBPF JIT compiler for arm64.
> See [14/14] for change log.
> 
> Patches [1-13/14] implement code generation functions.

Nice work, I'll take a look. Thanks!

Will

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RFCv3 01/14] arm64: introduce aarch64_insn_gen_comp_branch_imm()
  2014-07-15  6:24 ` [PATCH RFCv3 01/14] arm64: introduce aarch64_insn_gen_comp_branch_imm() Zi Shen Lim
@ 2014-07-16 16:04   ` Will Deacon
  2014-07-16 21:19     ` Zi Shen Lim
  0 siblings, 1 reply; 31+ messages in thread
From: Will Deacon @ 2014-07-16 16:04 UTC (permalink / raw)
  To: Zi Shen Lim
  Cc: Catalin Marinas, Jiang Liu, AKASHI Takahiro, David S. Miller,
	Daniel Borkmann, Alexei Starovoitov, linux-kernel,
	linux-arm-kernel, netdev

On Tue, Jul 15, 2014 at 07:24:59AM +0100, Zi Shen Lim wrote:
> Introduce function to generate compare & branch (immediate)
> instructions.
> 
> Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
> ---
>  arch/arm64/include/asm/insn.h | 57 ++++++++++++++++++++++++++++
>  arch/arm64/kernel/insn.c      | 86 ++++++++++++++++++++++++++++++++++++++++---
>  2 files changed, 138 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
> index dc1f73b..a98c495 100644
> --- a/arch/arm64/include/asm/insn.h
> +++ b/arch/arm64/include/asm/insn.h
> @@ -2,6 +2,8 @@
>   * Copyright (C) 2013 Huawei Ltd.
>   * Author: Jiang Liu <liuj97@gmail.com>
>   *
> + * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
> + *
>   * This program is free software; you can redistribute it and/or modify
>   * it under the terms of the GNU General Public License version 2 as
>   * published by the Free Software Foundation.
> @@ -67,9 +69,58 @@ enum aarch64_insn_imm_type {
>  	AARCH64_INSN_IMM_MAX
>  };
>  
> +enum aarch64_insn_register_type {
> +	AARCH64_INSN_REGTYPE_RT,
> +};
> +
> +enum aarch64_insn_register {
> +	AARCH64_INSN_REG_0  = 0,
> +	AARCH64_INSN_REG_1  = 1,
> +	AARCH64_INSN_REG_2  = 2,
> +	AARCH64_INSN_REG_3  = 3,
> +	AARCH64_INSN_REG_4  = 4,
> +	AARCH64_INSN_REG_5  = 5,
> +	AARCH64_INSN_REG_6  = 6,
> +	AARCH64_INSN_REG_7  = 7,
> +	AARCH64_INSN_REG_8  = 8,
> +	AARCH64_INSN_REG_9  = 9,
> +	AARCH64_INSN_REG_10 = 10,
> +	AARCH64_INSN_REG_11 = 11,
> +	AARCH64_INSN_REG_12 = 12,
> +	AARCH64_INSN_REG_13 = 13,
> +	AARCH64_INSN_REG_14 = 14,
> +	AARCH64_INSN_REG_15 = 15,
> +	AARCH64_INSN_REG_16 = 16,
> +	AARCH64_INSN_REG_17 = 17,
> +	AARCH64_INSN_REG_18 = 18,
> +	AARCH64_INSN_REG_19 = 19,
> +	AARCH64_INSN_REG_20 = 20,
> +	AARCH64_INSN_REG_21 = 21,
> +	AARCH64_INSN_REG_22 = 22,
> +	AARCH64_INSN_REG_23 = 23,
> +	AARCH64_INSN_REG_24 = 24,
> +	AARCH64_INSN_REG_25 = 25,
> +	AARCH64_INSN_REG_26 = 26,
> +	AARCH64_INSN_REG_27 = 27,
> +	AARCH64_INSN_REG_28 = 28,
> +	AARCH64_INSN_REG_29 = 29,
> +	AARCH64_INSN_REG_FP = 29, /* Frame pointer */
> +	AARCH64_INSN_REG_30 = 30,
> +	AARCH64_INSN_REG_LR = 30, /* Link register */
> +	AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */
> +	AARCH64_INSN_REG_SP = 31  /* Stack pointer: as load/store base reg */

Can you just #define AARCH64_INSN_REG(x) instead, then have some magic
values like ARM64_REG_LR which are defined as the appropriate numbers?

> +};
> +
> +enum aarch64_insn_variant {
> +	AARCH64_INSN_VARIANT_32BIT,
> +	AARCH64_INSN_VARIANT_64BIT
> +};
> +
>  enum aarch64_insn_branch_type {
>  	AARCH64_INSN_BRANCH_NOLINK,
>  	AARCH64_INSN_BRANCH_LINK,
> +	AARCH64_INSN_BRANCH_COMP_ZERO,
> +	AARCH64_INSN_BRANCH_COMP_NONZERO,
>  };
>  
>  #define	__AARCH64_INSN_FUNCS(abbr, mask, val)	\
> @@ -80,6 +131,8 @@ static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
>  
>  __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
>  __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
> +__AARCH64_INSN_FUNCS(cbz,	0xFE000000, 0x34000000)
> +__AARCH64_INSN_FUNCS(cbnz,	0xFE000000, 0x35000000)
>  __AARCH64_INSN_FUNCS(svc,	0xFFE0001F, 0xD4000001)
>  __AARCH64_INSN_FUNCS(hvc,	0xFFE0001F, 0xD4000002)
>  __AARCH64_INSN_FUNCS(smc,	0xFFE0001F, 0xD4000003)
> @@ -97,6 +150,10 @@ u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
>  				  u32 insn, u64 imm);
>  u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
>  				enum aarch64_insn_branch_type type);
> +u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
> +				     enum aarch64_insn_register reg,
> +				     enum aarch64_insn_variant variant,
> +				     enum aarch64_insn_branch_type type);
>  u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
>  u32 aarch64_insn_gen_nop(void);
>  
> diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
> index 92f3683..d01bb4e 100644
> --- a/arch/arm64/kernel/insn.c
> +++ b/arch/arm64/kernel/insn.c
> @@ -2,6 +2,8 @@
>   * Copyright (C) 2013 Huawei Ltd.
>   * Author: Jiang Liu <liuj97@gmail.com>
>   *
> + * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
> + *
>   * This program is free software; you can redistribute it and/or modify
>   * it under the terms of the GNU General Public License version 2 as
>   * published by the Free Software Foundation.
> @@ -264,10 +266,36 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
>  	return insn;
>  }
>  
> -u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
> -					  enum aarch64_insn_branch_type type)
> +static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
> +					u32 insn,
> +					enum aarch64_insn_register reg)
> +{
> +	int shift;
> +
> +	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
> +		pr_err("%s: unknown register encoding %d\n", __func__, reg);
> +		return 0;
> +	}
> +
> +	switch (type) {
> +	case AARCH64_INSN_REGTYPE_RT:
> +		shift = 0;
> +		break;
> +	default:
> +		pr_err("%s: unknown register type encoding %d\n", __func__,
> +		       type);
> +		return 0;
> +	}
> +
> +	insn &= ~(GENMASK(4, 0) << shift);
> +	insn |= reg << shift;
> +
> +	return insn;
> +}
> +
> +static inline long branch_imm_common(unsigned long pc, unsigned long addr,
> +				     long range)
>  {
> -	u32 insn;
>  	long offset;
>  
>  	/*
> @@ -276,13 +304,24 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
>  	 */
>  	BUG_ON((pc & 0x3) || (addr & 0x3));
>  
> +	offset = ((long)addr - (long)pc);
> +	BUG_ON(offset < -range || offset >= range);
> +
> +	return offset;
> +}
> +
> +u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
> +					  enum aarch64_insn_branch_type type)
> +{
> +	u32 insn;
> +	long offset;
> +
>  	/*
>  	 * B/BL support [-128M, 128M) offset
>  	 * ARM64 virtual address arrangement guarantees all kernel and module
>  	 * texts are within +/-128M.
>  	 */
> -	offset = ((long)addr - (long)pc);
> -	BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
> +	offset = branch_imm_common(pc, addr, SZ_128M);
>  
>  	if (type == AARCH64_INSN_BRANCH_LINK)
>  		insn = aarch64_insn_get_bl_value();
> @@ -293,6 +332,43 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
>  					     offset >> 2);
>  }
>  
> +u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
> +				     enum aarch64_insn_register reg,
> +				     enum aarch64_insn_variant variant,
> +				     enum aarch64_insn_branch_type type)
> +{
> +	u32 insn;
> +	long offset;
> +
> +	offset = branch_imm_common(pc, addr, SZ_1M);
> +
> +	switch (type) {
> +	case AARCH64_INSN_BRANCH_COMP_ZERO:
> +		insn = aarch64_insn_get_cbz_value();
> +		break;
> +	case AARCH64_INSN_BRANCH_COMP_NONZERO:
> +		insn = aarch64_insn_get_cbnz_value();
> +		break;
> +	default:
> +		BUG_ON(1);
> +	}
> +
> +	switch (variant) {
> +	case AARCH64_INSN_VARIANT_32BIT:
> +		break;
> +	case AARCH64_INSN_VARIANT_64BIT:
> +		insn |= BIT(31);

FWIW, that bit (31) is referred to as the `SF' bit in the instruction
encodings (for Sixty-Four). You could have a #define for that to help people
match up the bitfield, if you like.

> +		break;
> +	default:
> +		BUG_ON(1);

Is a BUG_ON justifiable here? Is there not a nicer way to fail?

Will

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RFCv3 08/14] arm64: introduce aarch64_insn_gen_movewide()
  2014-07-15  6:25 ` [PATCH RFCv3 08/14] arm64: introduce aarch64_insn_gen_movewide() Zi Shen Lim
@ 2014-07-16 16:17   ` Will Deacon
  2014-07-16 16:25     ` David Laight
  2014-07-16 22:04     ` Zi Shen Lim
  0 siblings, 2 replies; 31+ messages in thread
From: Will Deacon @ 2014-07-16 16:17 UTC (permalink / raw)
  To: Zi Shen Lim
  Cc: Catalin Marinas, Jiang Liu, AKASHI Takahiro, David S. Miller,
	Daniel Borkmann, Alexei Starovoitov, linux-kernel,
	linux-arm-kernel, netdev

On Tue, Jul 15, 2014 at 07:25:06AM +0100, Zi Shen Lim wrote:
> Introduce function to generate move wide (immediate) instructions.

[...]

> +u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
> +			      int imm, int shift,
> +			      enum aarch64_insn_variant variant,
> +			      enum aarch64_insn_movewide_type type)
> +{
> +	u32 insn;
> +
> +	switch (type) {
> +	case AARCH64_INSN_MOVEWIDE_ZERO:
> +		insn = aarch64_insn_get_movz_value();
> +		break;
> +	case AARCH64_INSN_MOVEWIDE_KEEP:
> +		insn = aarch64_insn_get_movk_value();
> +		break;
> +	case AARCH64_INSN_MOVEWIDE_INVERSE:
> +		insn = aarch64_insn_get_movn_value();
> +		break;
> +	default:
> +		BUG_ON(1);
> +	}
> +
> +	BUG_ON(imm < 0 || imm > 65535);

Do this check with masking instead?

> +
> +	switch (variant) {
> +	case AARCH64_INSN_VARIANT_32BIT:
> +		BUG_ON(shift != 0 && shift != 16);
> +		break;
> +	case AARCH64_INSN_VARIANT_64BIT:
> +		insn |= BIT(31);
> +		BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
> +		       shift != 48);

Would be neater as a nested switch, perhaps? If you reorder the
outer-switch, you could probably fall-through too and combine the shift
checks.

Will

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RFCv3 00/14] arm64: eBPF JIT compiler
  2014-07-16 10:41 ` [PATCH RFCv3 00/14] " Will Deacon
@ 2014-07-16 16:21   ` Will Deacon
  2014-07-16 22:18     ` Zi Shen Lim
  0 siblings, 1 reply; 31+ messages in thread
From: Will Deacon @ 2014-07-16 16:21 UTC (permalink / raw)
  To: Zi Shen Lim
  Cc: Catalin Marinas, Jiang Liu, AKASHI Takahiro, David S. Miller,
	Daniel Borkmann, Alexei Starovoitov, Chema Gonzalez,
	linux-kernel, linux-arm-kernel, netdev

On Wed, Jul 16, 2014 at 11:41:53AM +0100, Will Deacon wrote:
> On Tue, Jul 15, 2014 at 07:24:58AM +0100, Zi Shen Lim wrote:
> > This series implements eBPF JIT compiler for arm64.
> > See [14/14] for change log.
> > 
> > Patches [1-13/14] implement code generation functions.
> 
> Nice work, I'll take a look. Thanks!

... and it all looks pretty damn good to me. I gave you a few in-line
comments but, rather than repeat myself for each patch, my main two gripes
were:

  - The use of BUG_ON all over the place

  - Explicit limit checks which could be done with masks

If you address those, this stuff looks pretty much ready to go from my
perspective. Do you have a branch somewhere I can play with please?

Cheers,

Will

^ permalink raw reply	[flat|nested] 31+ messages in thread

* RE: [PATCH RFCv3 08/14] arm64: introduce aarch64_insn_gen_movewide()
  2014-07-16 16:17   ` Will Deacon
@ 2014-07-16 16:25     ` David Laight
  2014-07-16 22:04     ` Zi Shen Lim
  1 sibling, 0 replies; 31+ messages in thread
From: David Laight @ 2014-07-16 16:25 UTC (permalink / raw)
  To: 'Will Deacon', Zi Shen Lim
  Cc: Catalin Marinas, Jiang Liu, AKASHI Takahiro, David S. Miller,
	Daniel Borkmann, Alexei Starovoitov, linux-kernel,
	linux-arm-kernel, netdev

From: Will Deacon
...
> > +	BUG_ON(imm < 0 || imm > 65535);
> 
> Do this check with masking instead?

The compiler will convert that to a single unsigned comparison.

...
> > +		BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
> > +		       shift != 48);

OTOH I don't think it will convert that to:
		BUG_ON(shift & ~48);

	David




^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RFCv3 01/14] arm64: introduce aarch64_insn_gen_comp_branch_imm()
  2014-07-16 16:04   ` Will Deacon
@ 2014-07-16 21:19     ` Zi Shen Lim
  2014-07-17  9:19       ` Will Deacon
  0 siblings, 1 reply; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-16 21:19 UTC (permalink / raw)
  To: Will Deacon
  Cc: Catalin Marinas, Jiang Liu, AKASHI Takahiro, David S. Miller,
	Daniel Borkmann, Alexei Starovoitov, linux-kernel,
	linux-arm-kernel, netdev

On Wed, Jul 16, 2014 at 05:04:50PM +0100, Will Deacon wrote:
> On Tue, Jul 15, 2014 at 07:24:59AM +0100, Zi Shen Lim wrote:
[...]
> > +enum aarch64_insn_register {
> > +	AARCH64_INSN_REG_0  = 0,
> > +	AARCH64_INSN_REG_1  = 1,
> > +	AARCH64_INSN_REG_2  = 2,
> > +	AARCH64_INSN_REG_3  = 3,
> > +	AARCH64_INSN_REG_4  = 4,
> > +	AARCH64_INSN_REG_5  = 5,
> > +	AARCH64_INSN_REG_6  = 6,
> > +	AARCH64_INSN_REG_7  = 7,
> > +	AARCH64_INSN_REG_8  = 8,
> > +	AARCH64_INSN_REG_9  = 9,
> > +	AARCH64_INSN_REG_10 = 10,
> > +	AARCH64_INSN_REG_11 = 11,
> > +	AARCH64_INSN_REG_12 = 12,
> > +	AARCH64_INSN_REG_13 = 13,
> > +	AARCH64_INSN_REG_14 = 14,
> > +	AARCH64_INSN_REG_15 = 15,
> > +	AARCH64_INSN_REG_16 = 16,
> > +	AARCH64_INSN_REG_17 = 17,
> > +	AARCH64_INSN_REG_18 = 18,
> > +	AARCH64_INSN_REG_19 = 19,
> > +	AARCH64_INSN_REG_20 = 20,
> > +	AARCH64_INSN_REG_21 = 21,
> > +	AARCH64_INSN_REG_22 = 22,
> > +	AARCH64_INSN_REG_23 = 23,
> > +	AARCH64_INSN_REG_24 = 24,
> > +	AARCH64_INSN_REG_25 = 25,
> > +	AARCH64_INSN_REG_26 = 26,
> > +	AARCH64_INSN_REG_27 = 27,
> > +	AARCH64_INSN_REG_28 = 28,
> > +	AARCH64_INSN_REG_29 = 29,
> > +	AARCH64_INSN_REG_FP = 29, /* Frame pointer */
> > +	AARCH64_INSN_REG_30 = 30,
> > +	AARCH64_INSN_REG_LR = 30, /* Link register */
> > +	AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */
> > +	AARCH64_INSN_REG_SP = 31  /* Stack pointer: as load/store base reg */
> 
> Can you just #define AARCH64_INSN_REG(x) instead, then have some magic
> values like ARM64_REG_LR which are defined as the appropriate numbers?

I actually had something like what you mentioned in the beginning, but
decided to go with the above - thinking that it's clearer to present
the complete set of valid register definitions.

The #define can still be added for convenience, though I think it's also a
potential source of errors - it's much easier to typo something like
AARCH64_INSN_REG(32) and not get caught.

[...]
> > +	switch (variant) {
> > +	case AARCH64_INSN_VARIANT_32BIT:
> > +		break;
> > +	case AARCH64_INSN_VARIANT_64BIT:
> > +		insn |= BIT(31);
> 
> FWIW, that bit (31) is referred to as the `SF' bit in the instruction
> encodings (for Sixty-Four). You could have a #define for that to help people
> match up the bitfield, if you like.

Something like this?

	#define AARCH64_INSN_SF_BIT  BIT(31)

	...

	case AARCH64_INSN_VARIANT_64BIT:
		insn |= AARCH64_INSN_SF_BIT;

In the case of bitfield instruction, there's also an "N" bit.
So something like this?

	#define AARCH64_INSN_N_BIT  BIT(22)

	...

	case AARCH64_INSN_VARIANT_64BIT:
		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;

> 
> > +		break;
> > +	default:
> > +		BUG_ON(1);
> 
> Is a BUG_ON justifiable here? Is there not a nicer way to fail?

In general, it'd be nice if we returned something like -EINVAL and
have all callers handle failures. Today all code gen functions return
the u32 instruction and there's no error handling by callers.
I think following the precedence (aarch64_insn_gen_branch_imm())
of failing with BUG_ON is a reasonable tradeoff.

In this case here, when we hit the default (failure) case, that means
there's a serious error of attempting to use an unsupported
variant. I think we're better off failing hard here than trying to
arbitrarily "fallback" on a default choice.

One potential option instead of switch (variant) is:

	if (variant == AARCH64_INSN_VARIANT_64BIT)
		/* do something */
	else
		/* do something else */

which would be quite reasonable to do as we only have VARIANT_{32,64}BIT
today.

However, consider the case where we add VARIANT_128BIT or other flavors
in the future. The if/else option (basically defaulting to VARIANT_32BIT)
would then make much less sense.

> 
> Will

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RFCv3 08/14] arm64: introduce aarch64_insn_gen_movewide()
  2014-07-16 16:17   ` Will Deacon
  2014-07-16 16:25     ` David Laight
@ 2014-07-16 22:04     ` Zi Shen Lim
  2014-07-17  9:41       ` Will Deacon
  1 sibling, 1 reply; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-16 22:04 UTC (permalink / raw)
  To: Will Deacon
  Cc: Catalin Marinas, Jiang Liu, AKASHI Takahiro, David S. Miller,
	Daniel Borkmann, Alexei Starovoitov, linux-kernel,
	linux-arm-kernel, netdev

On Wed, Jul 16, 2014 at 05:17:15PM +0100, Will Deacon wrote:
> On Tue, Jul 15, 2014 at 07:25:06AM +0100, Zi Shen Lim wrote:
> > Introduce function to generate move wide (immediate) instructions.
> 
> [...]
> 
> > +u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
> > +			      int imm, int shift,
> > +			      enum aarch64_insn_variant variant,
> > +			      enum aarch64_insn_movewide_type type)
> > +{
> > +	u32 insn;
> > +
> > +	switch (type) {
> > +	case AARCH64_INSN_MOVEWIDE_ZERO:
> > +		insn = aarch64_insn_get_movz_value();
> > +		break;
> > +	case AARCH64_INSN_MOVEWIDE_KEEP:
> > +		insn = aarch64_insn_get_movk_value();
> > +		break;
> > +	case AARCH64_INSN_MOVEWIDE_INVERSE:
> > +		insn = aarch64_insn_get_movn_value();
> > +		break;
> > +	default:
> > +		BUG_ON(1);
> > +	}
> > +
> > +	BUG_ON(imm < 0 || imm > 65535);
> 
> Do this check with masking instead?

Ok, if you prefer, I can change it to:

	BUG_ON(imm & ~GENMASK(15, 0));

> 
> > +
> > +	switch (variant) {
> > +	case AARCH64_INSN_VARIANT_32BIT:
> > +		BUG_ON(shift != 0 && shift != 16);
> > +		break;
> > +	case AARCH64_INSN_VARIANT_64BIT:
> > +		insn |= BIT(31);
> > +		BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
> > +		       shift != 48);
> 
> Would be neater as a nested switch, perhaps? If you reorder the
> outer-switch, you could probably fall-through too and combine the shift
> checks.

Not sure I picture what you had in mind... I couldn't come up with a
neater version with the properties you described.

The alternative I had was using masks instead of integer values, but
one could argue that while neater, it could also be harder to read:

	switch (variant) {
	case AARCH64_INSN_VARIANT_32BIT:
		BUG_ON(shift & ~BIT(4));
		break;
	case AARCH64_INSN_VARIANT_64BIT:
		insn |= BIT(31);
		BUG_ON(shift & ~GENMASK(5, 4));
	...

> 
> Will

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RFCv3 00/14] arm64: eBPF JIT compiler
  2014-07-16 16:21   ` Will Deacon
@ 2014-07-16 22:18     ` Zi Shen Lim
  0 siblings, 0 replies; 31+ messages in thread
From: Zi Shen Lim @ 2014-07-16 22:18 UTC (permalink / raw)
  To: Will Deacon
  Cc: Catalin Marinas, Jiang Liu, AKASHI Takahiro, David S. Miller,
	Daniel Borkmann, Alexei Starovoitov, Chema Gonzalez,
	linux-kernel, linux-arm-kernel, netdev

On Wed, Jul 16, 2014 at 05:21:47PM +0100, Will Deacon wrote:
> On Wed, Jul 16, 2014 at 11:41:53AM +0100, Will Deacon wrote:
> > On Tue, Jul 15, 2014 at 07:24:58AM +0100, Zi Shen Lim wrote:
> > > This series implements eBPF JIT compiler for arm64.
> > > See [14/14] for change log.
> > > 
> > > Patches [1-13/14] implement code generation functions.
> > 
> > Nice work, I'll take a look. Thanks!
> 
> ... and it all looks pretty damn good to me. I gave you a few in-line

Thanks :)

> comments but, rather than repeat myself for each patch, my main two gripes
> were:
> 
>   - The use of BUG_ON all over the place

I responded to your comment about BUG_ON in [1/14].

> 
>   - Explicit limit checks which could be done with masks

I responded to your comment about this in [8/14]. I can go ahead and
make changes throughout if you prefer.

> 
> If you address those, this stuff looks pretty much ready to go from my
> perspective. Do you have a branch somewhere I can play with please?

Just pushed it to:

	https://github.com/zlim/linux.git arm64/bpf

which is the same as this RFCv3 series, i.e. I haven't rebased it on
latest net-next.

Thanks so much for your time and attention!
Let me know how it goes for you.

I can post this series (dropping RFC) once we finalized the above.

Cheers,
z


^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RFCv3 01/14] arm64: introduce aarch64_insn_gen_comp_branch_imm()
  2014-07-16 21:19     ` Zi Shen Lim
@ 2014-07-17  9:19       ` Will Deacon
  2014-07-17 15:59         ` Alexei Starovoitov
  0 siblings, 1 reply; 31+ messages in thread
From: Will Deacon @ 2014-07-17  9:19 UTC (permalink / raw)
  To: Zi Shen Lim
  Cc: Catalin Marinas, Jiang Liu, AKASHI Takahiro, David S. Miller,
	Daniel Borkmann, Alexei Starovoitov, linux-kernel,
	linux-arm-kernel, netdev

On Wed, Jul 16, 2014 at 10:19:31PM +0100, Zi Shen Lim wrote:
> On Wed, Jul 16, 2014 at 05:04:50PM +0100, Will Deacon wrote:
> > On Tue, Jul 15, 2014 at 07:24:59AM +0100, Zi Shen Lim wrote:
> [...]
> > > +enum aarch64_insn_register {
> > > +	AARCH64_INSN_REG_0  = 0,
> > > +	AARCH64_INSN_REG_1  = 1,
> > > +	AARCH64_INSN_REG_2  = 2,
> > > +	AARCH64_INSN_REG_3  = 3,
> > > +	AARCH64_INSN_REG_4  = 4,
> > > +	AARCH64_INSN_REG_5  = 5,
> > > +	AARCH64_INSN_REG_6  = 6,
> > > +	AARCH64_INSN_REG_7  = 7,
> > > +	AARCH64_INSN_REG_8  = 8,
> > > +	AARCH64_INSN_REG_9  = 9,
> > > +	AARCH64_INSN_REG_10 = 10,
> > > +	AARCH64_INSN_REG_11 = 11,
> > > +	AARCH64_INSN_REG_12 = 12,
> > > +	AARCH64_INSN_REG_13 = 13,
> > > +	AARCH64_INSN_REG_14 = 14,
> > > +	AARCH64_INSN_REG_15 = 15,
> > > +	AARCH64_INSN_REG_16 = 16,
> > > +	AARCH64_INSN_REG_17 = 17,
> > > +	AARCH64_INSN_REG_18 = 18,
> > > +	AARCH64_INSN_REG_19 = 19,
> > > +	AARCH64_INSN_REG_20 = 20,
> > > +	AARCH64_INSN_REG_21 = 21,
> > > +	AARCH64_INSN_REG_22 = 22,
> > > +	AARCH64_INSN_REG_23 = 23,
> > > +	AARCH64_INSN_REG_24 = 24,
> > > +	AARCH64_INSN_REG_25 = 25,
> > > +	AARCH64_INSN_REG_26 = 26,
> > > +	AARCH64_INSN_REG_27 = 27,
> > > +	AARCH64_INSN_REG_28 = 28,
> > > +	AARCH64_INSN_REG_29 = 29,
> > > +	AARCH64_INSN_REG_FP = 29, /* Frame pointer */
> > > +	AARCH64_INSN_REG_30 = 30,
> > > +	AARCH64_INSN_REG_LR = 30, /* Link register */
> > > +	AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */
> > > +	AARCH64_INSN_REG_SP = 31  /* Stack pointer: as load/store base reg */
> > 
> > Can you just #define AARCH64_INSN_REG(x) instead, then have some magic
> > values like ARM64_REG_LR which are defined as the appropriate numbers?
> 
> I actually had something like what you mentioned in the beginning, but
> decided to go with the above - thinking that it's clearer to present
> the complete set of valid register definitions.
> 
> The #define can still be added for convenience, though I think it's also a
> potential source of errors - it's much easier to typo something like
> AARCH64_INSN_REG(32) and not get caught.

Fair enough, that's a good enough reason to leave it like it is.

> [...]
> > > +	switch (variant) {
> > > +	case AARCH64_INSN_VARIANT_32BIT:
> > > +		break;
> > > +	case AARCH64_INSN_VARIANT_64BIT:
> > > +		insn |= BIT(31);
> > 
> > FWIW, that bit (31) is referred to as the `SF' bit in the instruction
> > encodings (for Sixty-Four). You could have a #define for that to help people
> > match up the bitfield, if you like.
> 
> Something like this?
> 
> 	#define AARCH64_INSN_SF_BIT  BIT(31)
> 
> 	...
> 
> 	case AARCH64_INSN_VARIANT_64BIT:
> 		insn |= AARCH64_INSN_SF_BIT;
> 
> In the case of bitfield instruction, there's also an "N" bit.
> So something like this?
> 
> 	#define AARCH64_INSN_N_BIT  BIT(22)
> 
> 	...
> 
> 	case AARCH64_INSN_VARIANT_64BIT:
> 		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;

Looks good.

> > 
> > > +		break;
> > > +	default:
> > > +		BUG_ON(1);
> > 
> > Is a BUG_ON justifiable here? Is there not a nicer way to fail?
> 
> In general, it'd be nice if we returned something like -EINVAL and
> have all callers handle failures. Today all code gen functions return
> the u32 instruction and there's no error handling by callers.
> I think following the precedence (aarch64_insn_gen_branch_imm())
> of failing with BUG_ON is a reasonable tradeoff.

Well, I don't necessarily agree with that BUG_ON, either :)
I take it eBPF doesn't have a `trap' instruction or similar? Otherwise, we
could generate that and avoid having to propagate errors directly to the
caller.

> In this case here, when we hit the default (failure) case, that means
> there's a serious error of attempting to use an unsupported
> variant. I think we're better off failing hard here than trying to
> arbitrarily "fallback" on a default choice.

It might be a serious error for BPF, but a BUG_ON brings down the entire
machine, which I think is unfortunate.

> 
> One potential option instead of switch (variant) is:
> 
> 	if (variant == AARCH64_INSN_VARIANT_64BIT)
> 		/* do something */
> 	else
> 		/* do something else */
> 
> which would be quite reasonable to do as we only have VARIANT_{32,64}BIT
> today.
> 
> However, consider the case where we add VARIANT_128BIT or other flavors
> in the future. The if/else option (basically defaulting to VARIANT_32BIT)
> would then make much less sense.

I don't think we need to worry about hypothetical extensions to the
instruction set at this stage.

Will

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RFCv3 08/14] arm64: introduce aarch64_insn_gen_movewide()
  2014-07-16 22:04     ` Zi Shen Lim
@ 2014-07-17  9:41       ` Will Deacon
  2014-07-17  9:51         ` David Laight
  2014-07-18  5:47         ` Z Lim
  0 siblings, 2 replies; 31+ messages in thread
From: Will Deacon @ 2014-07-17  9:41 UTC (permalink / raw)
  To: Zi Shen Lim
  Cc: Catalin Marinas, Jiang Liu, AKASHI Takahiro, David S. Miller,
	Daniel Borkmann, Alexei Starovoitov, linux-kernel,
	linux-arm-kernel, netdev

On Wed, Jul 16, 2014 at 11:04:22PM +0100, Zi Shen Lim wrote:
> On Wed, Jul 16, 2014 at 05:17:15PM +0100, Will Deacon wrote:
> > On Tue, Jul 15, 2014 at 07:25:06AM +0100, Zi Shen Lim wrote:
> > > Introduce function to generate move wide (immediate) instructions.
> > 
> > [...]
> > 
> > > +u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
> > > +			      int imm, int shift,
> > > +			      enum aarch64_insn_variant variant,
> > > +			      enum aarch64_insn_movewide_type type)
> > > +{
> > > +	u32 insn;
> > > +
> > > +	switch (type) {
> > > +	case AARCH64_INSN_MOVEWIDE_ZERO:
> > > +		insn = aarch64_insn_get_movz_value();
> > > +		break;
> > > +	case AARCH64_INSN_MOVEWIDE_KEEP:
> > > +		insn = aarch64_insn_get_movk_value();
> > > +		break;
> > > +	case AARCH64_INSN_MOVEWIDE_INVERSE:
> > > +		insn = aarch64_insn_get_movn_value();
> > > +		break;
> > > +	default:
> > > +		BUG_ON(1);
> > > +	}
> > > +
> > > +	BUG_ON(imm < 0 || imm > 65535);
> > 
> > Do this check with masking instead?
> 
> Ok, if you prefer, I can change it to:
> 
> 	BUG_ON(imm & ~GENMASK(15, 0));

Sure, that or use a named constant for the upper-bound (SZ_64K - 1).

> > > +	switch (variant) {
> > > +	case AARCH64_INSN_VARIANT_32BIT:
> > > +		BUG_ON(shift != 0 && shift != 16);
> > > +		break;
> > > +	case AARCH64_INSN_VARIANT_64BIT:
> > > +		insn |= BIT(31);
> > > +		BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
> > > +		       shift != 48);
> > 
> > Would be neater as a nested switch, perhaps? If you reorder the
> > outer-switch, you could probably fall-through too and combine the shift
> > checks.
> 
> Not sure I picture what you had in mind... I couldn't come up with a
> neater version with the properties you described.
> 
> The alternative I had was using masks instead of integer values, but
> one could argue that while neater, it could also be harder to read:
> 
> 	switch (variant) {
> 	case AARCH64_INSN_VARIANT_32BIT:
> 		BUG_ON(shift & ~BIT(4));
> 		break;
> 	case AARCH64_INSN_VARIANT_64BIT:
> 		insn |= BIT(31);
> 		BUG_ON(shift & ~GENMASK(5, 4));
> 	...

I was thinking of using nested switches, but that doesn't fall out like I
hoped. How about:

	switch (variant) {
	case AARCH64_INSN_VARIANT_64BIT:
		BUG_ON(shift != 32 && shift != 48);
	case AARCH64_INSN_VARIANT_32BIT:
		BUG_ON(shift != 0 && shift != 16);
	};

?

Will

^ permalink raw reply	[flat|nested] 31+ messages in thread

* RE: [PATCH RFCv3 08/14] arm64: introduce aarch64_insn_gen_movewide()
  2014-07-17  9:41       ` Will Deacon
@ 2014-07-17  9:51         ` David Laight
  2014-07-18  5:47         ` Z Lim
  1 sibling, 0 replies; 31+ messages in thread
From: David Laight @ 2014-07-17  9:51 UTC (permalink / raw)
  To: 'Will Deacon', Zi Shen Lim
  Cc: Catalin Marinas, Jiang Liu, AKASHI Takahiro, David S. Miller,
	Daniel Borkmann, Alexei Starovoitov, linux-kernel,
	linux-arm-kernel, netdev

From: Will Deacon
...
> > > > +	BUG_ON(imm < 0 || imm > 65535);
> > >
> > > Do this check with masking instead?
> >
> > Ok, if you prefer, I can change it to:
> >
> > 	BUG_ON(imm & ~GENMASK(15, 0));

Gah - then anyone reading the code has to look up another define.
There isn't a prize for the most complicated method of defining
a constant that can never change.

> Sure, that or use a named constant for the upper-bound (SZ_64K - 1).

There is nothing wrong with the original code.
Maybe use 0xffff for those people (are there any) that don't know
their powers of two.

These are strong constants, they aren't going to be changed to any
other value, and it is unlikely that anyone will want to search for
their uses.

I presume that SZ_64K is defined somewhere to 0x10000u.
But IMHO using it (instead of the literal) doesn't make code
any more readable.

	David




^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RFCv3 01/14] arm64: introduce aarch64_insn_gen_comp_branch_imm()
  2014-07-17  9:19       ` Will Deacon
@ 2014-07-17 15:59         ` Alexei Starovoitov
  2014-07-17 17:25           ` Will Deacon
  0 siblings, 1 reply; 31+ messages in thread
From: Alexei Starovoitov @ 2014-07-17 15:59 UTC (permalink / raw)
  To: Will Deacon
  Cc: Zi Shen Lim, Catalin Marinas, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, linux-kernel, linux-arm-kernel,
	netdev

On Thu, Jul 17, 2014 at 2:19 AM, Will Deacon <will.deacon@arm.com> wrote:
> On Wed, Jul 16, 2014 at 10:19:31PM +0100, Zi Shen Lim wrote:
>> >
>> > Is a BUG_ON justifiable here? Is there not a nicer way to fail?
>>
>> In general, it'd be nice if we returned something like -EINVAL and
>> have all callers handle failures. Today all code gen functions return
>> the u32 instruction and there's no error handling by callers.
>> I think following the precedence (aarch64_insn_gen_branch_imm())
>> of failing with BUG_ON is a reasonable tradeoff.
>
> Well, I don't necessarily agree with that BUG_ON, either :)
> I take it eBPF doesn't have a `trap' instruction or similar? Otherwise, we
> could generate that and avoid having to propagate errors directly to the
> caller.
>
>> In this case here, when we hit the default (failure) case, that means
>> there's a serious error of attempting to use an unsupported
>> variant. I think we're better off failing hard here than trying to
>> arbitrarily "fallback" on a default choice.
>
> It might be a serious error for BPF, but a BUG_ON brings down the entire
> machine, which I think is unfortunate.

There is some misunderstanding here. Here BUG_ON will trigger
only on actual bug in JIT implementation, it cannot be triggered by user.
eBPF program is verified before it reaches JIT, so all instructions are
valid and input to JIT is proper. Two instruction are not yet
implemented in this JIT and they trigger pr_.._once().
So I don't see any issue with this usage of BUG_ON
imo living with silent bugs in JIT is more dangerous.

For the same reason there is no 'trap' instruction in eBPF.
Static verifier checks that program is valid. If there was a 'trap'
insn the program would be rejected. Like programs with
'div by zero' are rejected. There is normal 'bpf_exit' insn to
return from the program.

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RFCv3 01/14] arm64: introduce aarch64_insn_gen_comp_branch_imm()
  2014-07-17 15:59         ` Alexei Starovoitov
@ 2014-07-17 17:25           ` Will Deacon
  2014-07-18  5:44             ` Z Lim
  0 siblings, 1 reply; 31+ messages in thread
From: Will Deacon @ 2014-07-17 17:25 UTC (permalink / raw)
  To: Alexei Starovoitov
  Cc: Zi Shen Lim, Catalin Marinas, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, linux-kernel, linux-arm-kernel,
	netdev

On Thu, Jul 17, 2014 at 04:59:10PM +0100, Alexei Starovoitov wrote:
> On Thu, Jul 17, 2014 at 2:19 AM, Will Deacon <will.deacon@arm.com> wrote:
> > On Wed, Jul 16, 2014 at 10:19:31PM +0100, Zi Shen Lim wrote:
> >> >
> >> > Is a BUG_ON justifiable here? Is there not a nicer way to fail?
> >>
> >> In general, it'd be nice if we returned something like -EINVAL and
> >> have all callers handle failures. Today all code gen functions return
> >> the u32 instruction and there's no error handling by callers.
> >> I think following the precedence (aarch64_insn_gen_branch_imm())
> >> of failing with BUG_ON is a reasonable tradeoff.
> >
> > Well, I don't necessarily agree with that BUG_ON, either :)
> > I take it eBPF doesn't have a `trap' instruction or similar? Otherwise, we
> > could generate that and avoid having to propagate errors directly to the
> > caller.
> >
> >> In this case here, when we hit the default (failure) case, that means
> >> there's a serious error of attempting to use an unsupported
> >> variant. I think we're better off failing hard here than trying to
> >> arbitrarily "fallback" on a default choice.
> >
> > It might be a serious error for BPF, but a BUG_ON brings down the entire
> > machine, which I think is unfortunate.
> 
> There is some misunderstanding here. Here BUG_ON will trigger
> only on actual bug in JIT implementation, it cannot be triggered by user.
> eBPF program is verified before it reaches JIT, so all instructions are
> valid and input to JIT is proper. Two instruction are not yet
> implemented in this JIT and they trigger pr_.._once().
> So I don't see any issue with this usage of BUG_ON
> imo living with silent bugs in JIT is more dangerous.
> 
> For the same reason there is no 'trap' instruction in eBPF.
> Static verifier checks that program is valid. If there was a 'trap'
> insn the program would be rejected. Like programs with
> 'div by zero' are rejected. There is normal 'bpf_exit' insn to
> return from the program.

Ok, so assuming that BPF doesn't have any issues, I take your point.
However, we could very easily re-use these functions for things like SMP
alternatives and kprobes, where simply failing the instruction generation
might be acceptable.

It just feels like a bit hammer to me, when the machine is probably happily
scheduling user tasks, responding to interrupts, writing data to disk etc.

Will

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RFCv3 01/14] arm64: introduce aarch64_insn_gen_comp_branch_imm()
  2014-07-17 17:25           ` Will Deacon
@ 2014-07-18  5:44             ` Z Lim
  0 siblings, 0 replies; 31+ messages in thread
From: Z Lim @ 2014-07-18  5:44 UTC (permalink / raw)
  To: Will Deacon
  Cc: Alexei Starovoitov, Catalin Marinas, Jiang Liu, AKASHI Takahiro,
	David S. Miller, Daniel Borkmann, linux-kernel, linux-arm-kernel,
	netdev

(resending this email in case the first one got caught in your spam
filter. sorry.)

On Thu, Jul 17, 2014 at 06:25:26PM +0100, Will Deacon wrote:
> On Thu, Jul 17, 2014 at 04:59:10PM +0100, Alexei Starovoitov wrote:
> > On Thu, Jul 17, 2014 at 2:19 AM, Will Deacon <will.deacon@arm.com> wrote:
> > > On Wed, Jul 16, 2014 at 10:19:31PM +0100, Zi Shen Lim wrote:
> > >> >
> > >> > Is a BUG_ON justifiable here? Is there not a nicer way to fail?
> > >>
> > >> In general, it'd be nice if we returned something like -EINVAL and
> > >> have all callers handle failures. Today all code gen functions return
> > >> the u32 instruction and there's no error handling by callers.
> > >> I think following the precedence (aarch64_insn_gen_branch_imm())
> > >> of failing with BUG_ON is a reasonable tradeoff.
> > >
> > > Well, I don't necessarily agree with that BUG_ON, either :)
> > > I take it eBPF doesn't have a `trap' instruction or similar? Otherwise, we
> > > could generate that and avoid having to propagate errors directly to the
> > > caller.
> > >
> > >> In this case here, when we hit the default (failure) case, that means
> > >> there's a serious error of attempting to use an unsupported
> > >> variant. I think we're better off failing hard here than trying to
> > >> arbitrarily "fallback" on a default choice.
> > >
> > > It might be a serious error for BPF, but a BUG_ON brings down the entire
> > > machine, which I think is unfortunate.
> >
> > There is some misunderstanding here. Here BUG_ON will trigger
> > only on actual bug in JIT implementation, it cannot be triggered by user.
> > eBPF program is verified before it reaches JIT, so all instructions are
> > valid and input to JIT is proper. Two instruction are not yet
> > implemented in this JIT and they trigger pr_.._once().
> > So I don't see any issue with this usage of BUG_ON
> > imo living with silent bugs in JIT is more dangerous.
> >
> > For the same reason there is no 'trap' instruction in eBPF.
> > Static verifier checks that program is valid. If there was a 'trap'
> > insn the program would be rejected. Like programs with
> > 'div by zero' are rejected. There is normal 'bpf_exit' insn to
> > return from the program.
>
> Ok, so assuming that BPF doesn't have any issues, I take your point.
> However, we could very easily re-use these functions for things like SMP
> alternatives and kprobes, where simply failing the instruction generation
> might be acceptable.
>
> It just feels like a bit hammer to me, when the machine is probably happily
> scheduling user tasks, responding to interrupts, writing data to disk etc.

Yes I agree with you Will, it'd be truly unfortunate if we inadvertently
allow the entire system to be brought down.

Alexei accurately pointed out that if we ever hit such a case, it'd be a bug
in the BPF JIT implementation (or bug in other in-kernel implementations).

Our BPF JIT implementation actually handles this, making sure that input
to the codegen function is valid, or gracefully fail by not JITing and
falling back on the core BPF interpreter. This way our JIT will not trigger
the BUG_ON.

IMO, other future users of these codegen functions should do the same.

An alternative would be to throw away all the BUG_ON and have callers
check for and handle error conditions. I think this is actually more
dangerous as callers who don't handle the error conditions properly may
end up causing system crash later with subtle (and quite possibly hard to
debug) bugs.

>
> Will

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RFCv3 08/14] arm64: introduce aarch64_insn_gen_movewide()
  2014-07-17  9:41       ` Will Deacon
  2014-07-17  9:51         ` David Laight
@ 2014-07-18  5:47         ` Z Lim
  2014-07-18  8:43           ` Will Deacon
  1 sibling, 1 reply; 31+ messages in thread
From: Z Lim @ 2014-07-18  5:47 UTC (permalink / raw)
  To: Will Deacon
  Cc: Catalin Marinas, Jiang Liu, AKASHI Takahiro, David S. Miller,
	Daniel Borkmann, Alexei Starovoitov, linux-kernel,
	linux-arm-kernel, netdev

(resending this email in case the first one got caught in your spam
filter. sorry.)

On Thu, Jul 17, 2014 at 10:41:02AM +0100, Will Deacon wrote:
> On Wed, Jul 16, 2014 at 11:04:22PM +0100, Zi Shen Lim wrote:
> > On Wed, Jul 16, 2014 at 05:17:15PM +0100, Will Deacon wrote:
> > > On Tue, Jul 15, 2014 at 07:25:06AM +0100, Zi Shen Lim wrote:
> > > > Introduce function to generate move wide (immediate) instructions.
[...]
> > > > +       switch (variant) {
> > > > +       case AARCH64_INSN_VARIANT_32BIT:
> > > > +               BUG_ON(shift != 0 && shift != 16);
> > > > +               break;
> > > > +       case AARCH64_INSN_VARIANT_64BIT:
> > > > +               insn |= BIT(31);
> > > > +               BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
> > > > +                      shift != 48);
> > >
> > > Would be neater as a nested switch, perhaps? If you reorder the
> > > outer-switch, you could probably fall-through too and combine the shift
> > > checks.
> >
> > Not sure I picture what you had in mind... I couldn't come up with a
> > neater version with the properties you described.
> >
> > The alternative I had was using masks instead of integer values, but
> > one could argue that while neater, it could also be harder to read:
> >
> >     switch (variant) {
> >     case AARCH64_INSN_VARIANT_32BIT:
> >             BUG_ON(shift & ~BIT(4));
> >             break;
> >     case AARCH64_INSN_VARIANT_64BIT:
> >             insn |= BIT(31);
> >             BUG_ON(shift & ~GENMASK(5, 4));
> >     ...
>
> I was thinking of using nested switches, but that doesn't fall out like I
> hoped. How about:
>
>       switch (variant) {
>       case AARCH64_INSN_VARIANT_64BIT:
>               BUG_ON(shift != 32 && shift != 48);

Sorry this won't work. For example, on the valid case of shift==0,
we'll barf right here - no fallthrough.

Shall we just leave the code as is? :)


>       case AARCH64_INSN_VARIANT_32BIT:
>               BUG_ON(shift != 0 && shift != 16);
>       };
>
> ?
>
> Will

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RFCv3 08/14] arm64: introduce aarch64_insn_gen_movewide()
  2014-07-18  5:47         ` Z Lim
@ 2014-07-18  8:43           ` Will Deacon
  0 siblings, 0 replies; 31+ messages in thread
From: Will Deacon @ 2014-07-18  8:43 UTC (permalink / raw)
  To: Z Lim
  Cc: Catalin Marinas, Jiang Liu, AKASHI Takahiro, David S. Miller,
	Daniel Borkmann, Alexei Starovoitov, linux-kernel,
	linux-arm-kernel, netdev

On Fri, Jul 18, 2014 at 06:47:22AM +0100, Z Lim wrote:
> (resending this email in case the first one got caught in your spam
> filter. sorry.)
> 
> On Thu, Jul 17, 2014 at 10:41:02AM +0100, Will Deacon wrote:
> > On Wed, Jul 16, 2014 at 11:04:22PM +0100, Zi Shen Lim wrote:
> > > On Wed, Jul 16, 2014 at 05:17:15PM +0100, Will Deacon wrote:
> > > > On Tue, Jul 15, 2014 at 07:25:06AM +0100, Zi Shen Lim wrote:
> > > > > Introduce function to generate move wide (immediate) instructions.
> [...]
> > > > > +       switch (variant) {
> > > > > +       case AARCH64_INSN_VARIANT_32BIT:
> > > > > +               BUG_ON(shift != 0 && shift != 16);
> > > > > +               break;
> > > > > +       case AARCH64_INSN_VARIANT_64BIT:
> > > > > +               insn |= BIT(31);
> > > > > +               BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
> > > > > +                      shift != 48);
> > > >
> > > > Would be neater as a nested switch, perhaps? If you reorder the
> > > > outer-switch, you could probably fall-through too and combine the shift
> > > > checks.
> > >
> > > Not sure I picture what you had in mind... I couldn't come up with a
> > > neater version with the properties you described.
> > >
> > > The alternative I had was using masks instead of integer values, but
> > > one could argue that while neater, it could also be harder to read:
> > >
> > >     switch (variant) {
> > >     case AARCH64_INSN_VARIANT_32BIT:
> > >             BUG_ON(shift & ~BIT(4));
> > >             break;
> > >     case AARCH64_INSN_VARIANT_64BIT:
> > >             insn |= BIT(31);
> > >             BUG_ON(shift & ~GENMASK(5, 4));
> > >     ...
> >
> > I was thinking of using nested switches, but that doesn't fall out like I
> > hoped. How about:
> >
> >       switch (variant) {
> >       case AARCH64_INSN_VARIANT_64BIT:
> >               BUG_ON(shift != 32 && shift != 48);
> 
> Sorry this won't work. For example, on the valid case of shift==0,
> we'll barf right here - no fallthrough.
> 
> Shall we just leave the code as is? :)

Yeah, I'm an idiot ;)

Cheers,

Will

^ permalink raw reply	[flat|nested] 31+ messages in thread

end of thread, other threads:[~2014-07-18  8:44 UTC | newest]

Thread overview: 31+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-07-15  6:24 [PATCH RFCv3 00/14] arm64: eBPF JIT compiler Zi Shen Lim
2014-07-15  6:24 ` [PATCH RFCv3 01/14] arm64: introduce aarch64_insn_gen_comp_branch_imm() Zi Shen Lim
2014-07-16 16:04   ` Will Deacon
2014-07-16 21:19     ` Zi Shen Lim
2014-07-17  9:19       ` Will Deacon
2014-07-17 15:59         ` Alexei Starovoitov
2014-07-17 17:25           ` Will Deacon
2014-07-18  5:44             ` Z Lim
2014-07-15  6:25 ` [PATCH RFCv3 02/14] arm64: introduce aarch64_insn_gen_branch_reg() Zi Shen Lim
2014-07-15  6:25 ` [PATCH RFCv3 03/14] arm64: introduce aarch64_insn_gen_cond_branch_imm() Zi Shen Lim
2014-07-15  6:25 ` [PATCH RFCv3 04/14] arm64: introduce aarch64_insn_gen_load_store_reg() Zi Shen Lim
2014-07-15  6:25 ` [PATCH RFCv3 05/14] arm64: introduce aarch64_insn_gen_load_store_pair() Zi Shen Lim
2014-07-15  6:25 ` [PATCH RFCv3 06/14] arm64: introduce aarch64_insn_gen_add_sub_imm() Zi Shen Lim
2014-07-15  6:25 ` [PATCH RFCv3 07/14] arm64: introduce aarch64_insn_gen_bitfield() Zi Shen Lim
2014-07-15  6:25 ` [PATCH RFCv3 08/14] arm64: introduce aarch64_insn_gen_movewide() Zi Shen Lim
2014-07-16 16:17   ` Will Deacon
2014-07-16 16:25     ` David Laight
2014-07-16 22:04     ` Zi Shen Lim
2014-07-17  9:41       ` Will Deacon
2014-07-17  9:51         ` David Laight
2014-07-18  5:47         ` Z Lim
2014-07-18  8:43           ` Will Deacon
2014-07-15  6:25 ` [PATCH RFCv3 09/14] arm64: introduce aarch64_insn_gen_add_sub_shifted_reg() Zi Shen Lim
2014-07-15  6:25 ` [PATCH RFCv3 10/14] arm64: introduce aarch64_insn_gen_data1() Zi Shen Lim
2014-07-15  6:25 ` [PATCH RFCv3 11/14] arm64: introduce aarch64_insn_gen_data2() Zi Shen Lim
2014-07-15  6:25 ` [PATCH RFCv3 12/14] arm64: introduce aarch64_insn_gen_data3() Zi Shen Lim
2014-07-15  6:25 ` [PATCH RFCv3 13/14] arm64: introduce aarch64_insn_gen_logical_shifted_reg() Zi Shen Lim
2014-07-15  6:25 ` [PATCH RFCv3 14/14] arm64: eBPF JIT compiler Zi Shen Lim
2014-07-16 10:41 ` [PATCH RFCv3 00/14] " Will Deacon
2014-07-16 16:21   ` Will Deacon
2014-07-16 22:18     ` Zi Shen Lim

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).