All of lore.kernel.org
 help / color / mirror / Atom feed
* [sashal-stable:pending-4.9 19/19] arch/powerpc/net/bpf_jit_comp64.c:437:11: error: implicit declaration of function 'PPC_RAW_LI'; did you mean 'PPC_RLWIMI'?
@ 2021-10-12  0:30 kernel test robot
  0 siblings, 0 replies; only message in thread
From: kernel test robot @ 2021-10-12  0:30 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 26528 bytes --]

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/sashal/linux-stable.git pending-4.9
head:   3c1965454f26344149b15a3e418597b5d8b7b1fb
commit: 3c1965454f26344149b15a3e418597b5d8b7b1fb [19/19] powerpc/bpf: Fix BPF_MOD when imm == 1
config: powerpc-allyesconfig (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 7.5.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://git.kernel.org/pub/scm/linux/kernel/git/sashal/linux-stable.git/commit/?id=3c1965454f26344149b15a3e418597b5d8b7b1fb
        git remote add sashal-stable https://git.kernel.org/pub/scm/linux/kernel/git/sashal/linux-stable.git
        git fetch --no-tags sashal-stable pending-4.9
        git checkout 3c1965454f26344149b15a3e418597b5d8b7b1fb
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-7.5.0 make.cross ARCH=powerpc 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   In file included from arch/powerpc/net/bpf_jit64.h:15:0,
                    from arch/powerpc/net/bpf_jit_comp64.c:22:
   arch/powerpc/net/bpf_jit_comp64.c: In function 'bpf_jit_build_body':
>> arch/powerpc/net/bpf_jit_comp64.c:437:11: error: implicit declaration of function 'PPC_RAW_LI'; did you mean 'PPC_RLWIMI'? [-Werror=implicit-function-declaration]
         EMIT(PPC_RAW_LI(dst_reg, 0));
              ^
   arch/powerpc/net/bpf_jit.h:36:27: note: in definition of macro 'PLANT_INSTR'
     do { if (d) { (d)[idx] = instr; } idx++; } while (0)
                              ^~~~~
   arch/powerpc/net/bpf_jit_comp64.c:437:6: note: in expansion of macro 'EMIT'
         EMIT(PPC_RAW_LI(dst_reg, 0));
         ^~~~
   cc1: some warnings being treated as errors


vim +437 arch/powerpc/net/bpf_jit_comp64.c

   305	
   306	/* Assemble the body code between the prologue & epilogue */
   307	static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
   308				      struct codegen_context *ctx,
   309				      u32 *addrs)
   310	{
   311		const struct bpf_insn *insn = fp->insnsi;
   312		int flen = fp->len;
   313		int i;
   314	
   315		/* Start of epilogue code - will only be valid 2nd pass onwards */
   316		u32 exit_addr = addrs[flen];
   317	
   318		for (i = 0; i < flen; i++) {
   319			u32 code = insn[i].code;
   320			u32 dst_reg = b2p[insn[i].dst_reg];
   321			u32 src_reg = b2p[insn[i].src_reg];
   322			s16 off = insn[i].off;
   323			s32 imm = insn[i].imm;
   324			u64 imm64;
   325			u8 *func;
   326			u32 true_cond;
   327			u32 tmp_idx;
   328	
   329			/*
   330			 * addrs[] maps a BPF bytecode address into a real offset from
   331			 * the start of the body code.
   332			 */
   333			addrs[i] = ctx->idx * 4;
   334	
   335			/*
   336			 * As an optimization, we note down which non-volatile registers
   337			 * are used so that we can only save/restore those in our
   338			 * prologue and epilogue. We do this here regardless of whether
   339			 * the actual BPF instruction uses src/dst registers or not
   340			 * (for instance, BPF_CALL does not use them). The expectation
   341			 * is that those instructions will have src_reg/dst_reg set to
   342			 * 0. Even otherwise, we just lose some prologue/epilogue
   343			 * optimization but everything else should work without
   344			 * any issues.
   345			 */
   346			if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
   347				bpf_set_seen_register(ctx, insn[i].dst_reg);
   348			if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
   349				bpf_set_seen_register(ctx, insn[i].src_reg);
   350	
   351			switch (code) {
   352			/*
   353			 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
   354			 */
   355			case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
   356			case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
   357				PPC_ADD(dst_reg, dst_reg, src_reg);
   358				goto bpf_alu32_trunc;
   359			case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
   360			case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
   361				PPC_SUB(dst_reg, dst_reg, src_reg);
   362				goto bpf_alu32_trunc;
   363			case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
   364			case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
   365			case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
   366			case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
   367				if (BPF_OP(code) == BPF_SUB)
   368					imm = -imm;
   369				if (imm) {
   370					if (imm >= -32768 && imm < 32768)
   371						PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
   372					else {
   373						PPC_LI32(b2p[TMP_REG_1], imm);
   374						PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
   375					}
   376				}
   377				goto bpf_alu32_trunc;
   378			case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
   379			case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
   380				if (BPF_CLASS(code) == BPF_ALU)
   381					PPC_MULW(dst_reg, dst_reg, src_reg);
   382				else
   383					PPC_MULD(dst_reg, dst_reg, src_reg);
   384				goto bpf_alu32_trunc;
   385			case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
   386			case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
   387				if (imm >= -32768 && imm < 32768)
   388					PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
   389				else {
   390					PPC_LI32(b2p[TMP_REG_1], imm);
   391					if (BPF_CLASS(code) == BPF_ALU)
   392						PPC_MULW(dst_reg, dst_reg,
   393								b2p[TMP_REG_1]);
   394					else
   395						PPC_MULD(dst_reg, dst_reg,
   396								b2p[TMP_REG_1]);
   397				}
   398				goto bpf_alu32_trunc;
   399			case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
   400			case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
   401				PPC_CMPWI(src_reg, 0);
   402				PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
   403				PPC_LI(b2p[BPF_REG_0], 0);
   404				PPC_JMP(exit_addr);
   405				if (BPF_OP(code) == BPF_MOD) {
   406					PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
   407					PPC_MULW(b2p[TMP_REG_1], src_reg,
   408							b2p[TMP_REG_1]);
   409					PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
   410				} else
   411					PPC_DIVWU(dst_reg, dst_reg, src_reg);
   412				goto bpf_alu32_trunc;
   413			case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
   414			case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
   415				PPC_CMPDI(src_reg, 0);
   416				PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
   417				PPC_LI(b2p[BPF_REG_0], 0);
   418				PPC_JMP(exit_addr);
   419				if (BPF_OP(code) == BPF_MOD) {
   420					PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg);
   421					PPC_MULD(b2p[TMP_REG_1], src_reg,
   422							b2p[TMP_REG_1]);
   423					PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
   424				} else
   425					PPC_DIVDU(dst_reg, dst_reg, src_reg);
   426				break;
   427			case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
   428			case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
   429			case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
   430			case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
   431				if (imm == 0)
   432					return -EINVAL;
   433				if (imm == 1) {
   434					if (BPF_OP(code) == BPF_DIV) {
   435						goto bpf_alu32_trunc;
   436					} else {
 > 437						EMIT(PPC_RAW_LI(dst_reg, 0));
   438						break;
   439					}
   440				}
   441	
   442				PPC_LI32(b2p[TMP_REG_1], imm);
   443				switch (BPF_CLASS(code)) {
   444				case BPF_ALU:
   445					if (BPF_OP(code) == BPF_MOD) {
   446						PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
   447								b2p[TMP_REG_1]);
   448						PPC_MULW(b2p[TMP_REG_1],
   449								b2p[TMP_REG_1],
   450								b2p[TMP_REG_2]);
   451						PPC_SUB(dst_reg, dst_reg,
   452								b2p[TMP_REG_1]);
   453					} else
   454						PPC_DIVWU(dst_reg, dst_reg,
   455								b2p[TMP_REG_1]);
   456					break;
   457				case BPF_ALU64:
   458					if (BPF_OP(code) == BPF_MOD) {
   459						PPC_DIVDU(b2p[TMP_REG_2], dst_reg,
   460								b2p[TMP_REG_1]);
   461						PPC_MULD(b2p[TMP_REG_1],
   462								b2p[TMP_REG_1],
   463								b2p[TMP_REG_2]);
   464						PPC_SUB(dst_reg, dst_reg,
   465								b2p[TMP_REG_1]);
   466					} else
   467						PPC_DIVDU(dst_reg, dst_reg,
   468								b2p[TMP_REG_1]);
   469					break;
   470				}
   471				goto bpf_alu32_trunc;
   472			case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
   473			case BPF_ALU64 | BPF_NEG: /* dst = -dst */
   474				PPC_NEG(dst_reg, dst_reg);
   475				goto bpf_alu32_trunc;
   476	
   477			/*
   478			 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
   479			 */
   480			case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
   481			case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
   482				PPC_AND(dst_reg, dst_reg, src_reg);
   483				goto bpf_alu32_trunc;
   484			case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
   485			case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
   486				if (!IMM_H(imm))
   487					PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
   488				else {
   489					/* Sign-extended */
   490					PPC_LI32(b2p[TMP_REG_1], imm);
   491					PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
   492				}
   493				goto bpf_alu32_trunc;
   494			case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
   495			case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
   496				PPC_OR(dst_reg, dst_reg, src_reg);
   497				goto bpf_alu32_trunc;
   498			case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
   499			case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
   500				if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
   501					/* Sign-extended */
   502					PPC_LI32(b2p[TMP_REG_1], imm);
   503					PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
   504				} else {
   505					if (IMM_L(imm))
   506						PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
   507					if (IMM_H(imm))
   508						PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
   509				}
   510				goto bpf_alu32_trunc;
   511			case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
   512			case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
   513				PPC_XOR(dst_reg, dst_reg, src_reg);
   514				goto bpf_alu32_trunc;
   515			case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
   516			case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
   517				if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
   518					/* Sign-extended */
   519					PPC_LI32(b2p[TMP_REG_1], imm);
   520					PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
   521				} else {
   522					if (IMM_L(imm))
   523						PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
   524					if (IMM_H(imm))
   525						PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
   526				}
   527				goto bpf_alu32_trunc;
   528			case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
   529				/* slw clears top 32 bits */
   530				PPC_SLW(dst_reg, dst_reg, src_reg);
   531				break;
   532			case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
   533				PPC_SLD(dst_reg, dst_reg, src_reg);
   534				break;
   535			case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
   536				/* with imm 0, we still need to clear top 32 bits */
   537				PPC_SLWI(dst_reg, dst_reg, imm);
   538				break;
   539			case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
   540				if (imm != 0)
   541					PPC_SLDI(dst_reg, dst_reg, imm);
   542				break;
   543			case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
   544				PPC_SRW(dst_reg, dst_reg, src_reg);
   545				break;
   546			case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
   547				PPC_SRD(dst_reg, dst_reg, src_reg);
   548				break;
   549			case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
   550				PPC_SRWI(dst_reg, dst_reg, imm);
   551				break;
   552			case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
   553				if (imm != 0)
   554					PPC_SRDI(dst_reg, dst_reg, imm);
   555				break;
   556			case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
   557				PPC_SRAD(dst_reg, dst_reg, src_reg);
   558				break;
   559			case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
   560				if (imm != 0)
   561					PPC_SRADI(dst_reg, dst_reg, imm);
   562				break;
   563	
   564			/*
   565			 * MOV
   566			 */
   567			case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
   568			case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
   569				PPC_MR(dst_reg, src_reg);
   570				goto bpf_alu32_trunc;
   571			case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
   572			case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
   573				PPC_LI32(dst_reg, imm);
   574				if (imm < 0)
   575					goto bpf_alu32_trunc;
   576				break;
   577	
   578	bpf_alu32_trunc:
   579			/* Truncate to 32-bits */
   580			if (BPF_CLASS(code) == BPF_ALU)
   581				PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
   582			break;
   583	
   584			/*
   585			 * BPF_FROM_BE/LE
   586			 */
   587			case BPF_ALU | BPF_END | BPF_FROM_LE:
   588			case BPF_ALU | BPF_END | BPF_FROM_BE:
   589	#ifdef __BIG_ENDIAN__
   590				if (BPF_SRC(code) == BPF_FROM_BE)
   591					goto emit_clear;
   592	#else /* !__BIG_ENDIAN__ */
   593				if (BPF_SRC(code) == BPF_FROM_LE)
   594					goto emit_clear;
   595	#endif
   596				switch (imm) {
   597				case 16:
   598					/* Rotate 8 bits left & mask with 0x0000ff00 */
   599					PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
   600					/* Rotate 8 bits right & insert LSB to reg */
   601					PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
   602					/* Move result back to dst_reg */
   603					PPC_MR(dst_reg, b2p[TMP_REG_1]);
   604					break;
   605				case 32:
   606					/*
   607					 * Rotate word left by 8 bits:
   608					 * 2 bytes are already in their final position
   609					 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
   610					 */
   611					PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
   612					/* Rotate 24 bits and insert byte 1 */
   613					PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
   614					/* Rotate 24 bits and insert byte 3 */
   615					PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
   616					PPC_MR(dst_reg, b2p[TMP_REG_1]);
   617					break;
   618				case 64:
   619					/*
   620					 * Way easier and faster(?) to store the value
   621					 * into stack and then use ldbrx
   622					 *
   623					 * ctx->seen will be reliable in pass2, but
   624					 * the instructions generated will remain the
   625					 * same across all passes
   626					 */
   627					PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
   628					PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
   629					PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
   630					break;
   631				}
   632				break;
   633	
   634	emit_clear:
   635				switch (imm) {
   636				case 16:
   637					/* zero-extend 16 bits into 64 bits */
   638					PPC_RLDICL(dst_reg, dst_reg, 0, 48);
   639					break;
   640				case 32:
   641					/* zero-extend 32 bits into 64 bits */
   642					PPC_RLDICL(dst_reg, dst_reg, 0, 32);
   643					break;
   644				case 64:
   645					/* nop */
   646					break;
   647				}
   648				break;
   649	
   650			/*
   651			 * BPF_ST(X)
   652			 */
   653			case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
   654			case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
   655				if (BPF_CLASS(code) == BPF_ST) {
   656					PPC_LI(b2p[TMP_REG_1], imm);
   657					src_reg = b2p[TMP_REG_1];
   658				}
   659				PPC_STB(src_reg, dst_reg, off);
   660				break;
   661			case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
   662			case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
   663				if (BPF_CLASS(code) == BPF_ST) {
   664					PPC_LI(b2p[TMP_REG_1], imm);
   665					src_reg = b2p[TMP_REG_1];
   666				}
   667				PPC_STH(src_reg, dst_reg, off);
   668				break;
   669			case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
   670			case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
   671				if (BPF_CLASS(code) == BPF_ST) {
   672					PPC_LI32(b2p[TMP_REG_1], imm);
   673					src_reg = b2p[TMP_REG_1];
   674				}
   675				PPC_STW(src_reg, dst_reg, off);
   676				break;
   677			case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
   678			case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
   679				if (BPF_CLASS(code) == BPF_ST) {
   680					PPC_LI32(b2p[TMP_REG_1], imm);
   681					src_reg = b2p[TMP_REG_1];
   682				}
   683				PPC_BPF_STL(src_reg, dst_reg, off);
   684				break;
   685	
   686			/*
   687			 * BPF_STX XADD (atomic_add)
   688			 */
   689			/* *(u32 *)(dst + off) += src */
   690			case BPF_STX | BPF_XADD | BPF_W:
   691				/* Get EA into TMP_REG_1 */
   692				PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
   693				tmp_idx = ctx->idx * 4;
   694				/* load value from memory into TMP_REG_2 */
   695				PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
   696				/* add value from src_reg into this */
   697				PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
   698				/* store result back */
   699				PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
   700				/* we're done if this succeeded */
   701				PPC_BCC_SHORT(COND_NE, tmp_idx);
   702				break;
   703			/* *(u64 *)(dst + off) += src */
   704			case BPF_STX | BPF_XADD | BPF_DW:
   705				PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
   706				tmp_idx = ctx->idx * 4;
   707				PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
   708				PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
   709				PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
   710				PPC_BCC_SHORT(COND_NE, tmp_idx);
   711				break;
   712	
   713			/*
   714			 * BPF_LDX
   715			 */
   716			/* dst = *(u8 *)(ul) (src + off) */
   717			case BPF_LDX | BPF_MEM | BPF_B:
   718				PPC_LBZ(dst_reg, src_reg, off);
   719				break;
   720			/* dst = *(u16 *)(ul) (src + off) */
   721			case BPF_LDX | BPF_MEM | BPF_H:
   722				PPC_LHZ(dst_reg, src_reg, off);
   723				break;
   724			/* dst = *(u32 *)(ul) (src + off) */
   725			case BPF_LDX | BPF_MEM | BPF_W:
   726				PPC_LWZ(dst_reg, src_reg, off);
   727				break;
   728			/* dst = *(u64 *)(ul) (src + off) */
   729			case BPF_LDX | BPF_MEM | BPF_DW:
   730				PPC_BPF_LL(dst_reg, src_reg, off);
   731				break;
   732	
   733			/*
   734			 * Doubleword load
   735			 * 16 byte instruction that uses two 'struct bpf_insn'
   736			 */
   737			case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
   738				imm64 = ((u64)(u32) insn[i].imm) |
   739					    (((u64)(u32) insn[i+1].imm) << 32);
   740				/* Adjust for two bpf instructions */
   741				addrs[++i] = ctx->idx * 4;
   742				PPC_LI64(dst_reg, imm64);
   743				break;
   744	
   745			/*
   746			 * Return/Exit
   747			 */
   748			case BPF_JMP | BPF_EXIT:
   749				/*
   750				 * If this isn't the very last instruction, branch to
   751				 * the epilogue. If we _are_ the last instruction,
   752				 * we'll just fall through to the epilogue.
   753				 */
   754				if (i != flen - 1)
   755					PPC_JMP(exit_addr);
   756				/* else fall through to the epilogue */
   757				break;
   758	
   759			/*
   760			 * Call kernel helper
   761			 */
   762			case BPF_JMP | BPF_CALL:
   763				ctx->seen |= SEEN_FUNC;
   764				func = (u8 *) __bpf_call_base + imm;
   765	
   766				/* Save skb pointer if we need to re-cache skb data */
   767				if (bpf_helper_changes_skb_data(func))
   768					PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
   769	
   770				bpf_jit_emit_func_call(image, ctx, (u64)func);
   771	
   772				/* move return value from r3 to BPF_REG_0 */
   773				PPC_MR(b2p[BPF_REG_0], 3);
   774	
   775				/* refresh skb cache */
   776				if (bpf_helper_changes_skb_data(func)) {
   777					/* reload skb pointer to r3 */
   778					PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
   779					bpf_jit_emit_skb_loads(image, ctx);
   780				}
   781				break;
   782	
   783			/*
   784			 * Jumps and branches
   785			 */
   786			case BPF_JMP | BPF_JA:
   787				PPC_JMP(addrs[i + 1 + off]);
   788				break;
   789	
   790			case BPF_JMP | BPF_JGT | BPF_K:
   791			case BPF_JMP | BPF_JGT | BPF_X:
   792			case BPF_JMP | BPF_JSGT | BPF_K:
   793			case BPF_JMP | BPF_JSGT | BPF_X:
   794				true_cond = COND_GT;
   795				goto cond_branch;
   796			case BPF_JMP | BPF_JGE | BPF_K:
   797			case BPF_JMP | BPF_JGE | BPF_X:
   798			case BPF_JMP | BPF_JSGE | BPF_K:
   799			case BPF_JMP | BPF_JSGE | BPF_X:
   800				true_cond = COND_GE;
   801				goto cond_branch;
   802			case BPF_JMP | BPF_JEQ | BPF_K:
   803			case BPF_JMP | BPF_JEQ | BPF_X:
   804				true_cond = COND_EQ;
   805				goto cond_branch;
   806			case BPF_JMP | BPF_JNE | BPF_K:
   807			case BPF_JMP | BPF_JNE | BPF_X:
   808				true_cond = COND_NE;
   809				goto cond_branch;
   810			case BPF_JMP | BPF_JSET | BPF_K:
   811			case BPF_JMP | BPF_JSET | BPF_X:
   812				true_cond = COND_NE;
   813				/* Fall through */
   814	
   815	cond_branch:
   816				switch (code) {
   817				case BPF_JMP | BPF_JGT | BPF_X:
   818				case BPF_JMP | BPF_JGE | BPF_X:
   819				case BPF_JMP | BPF_JEQ | BPF_X:
   820				case BPF_JMP | BPF_JNE | BPF_X:
   821					/* unsigned comparison */
   822					PPC_CMPLD(dst_reg, src_reg);
   823					break;
   824				case BPF_JMP | BPF_JSGT | BPF_X:
   825				case BPF_JMP | BPF_JSGE | BPF_X:
   826					/* signed comparison */
   827					PPC_CMPD(dst_reg, src_reg);
   828					break;
   829				case BPF_JMP | BPF_JSET | BPF_X:
   830					PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
   831					break;
   832				case BPF_JMP | BPF_JNE | BPF_K:
   833				case BPF_JMP | BPF_JEQ | BPF_K:
   834				case BPF_JMP | BPF_JGT | BPF_K:
   835				case BPF_JMP | BPF_JGE | BPF_K:
   836					/*
   837					 * Need sign-extended load, so only positive
   838					 * values can be used as imm in cmpldi
   839					 */
   840					if (imm >= 0 && imm < 32768)
   841						PPC_CMPLDI(dst_reg, imm);
   842					else {
   843						/* sign-extending load */
   844						PPC_LI32(b2p[TMP_REG_1], imm);
   845						/* ... but unsigned comparison */
   846						PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
   847					}
   848					break;
   849				case BPF_JMP | BPF_JSGT | BPF_K:
   850				case BPF_JMP | BPF_JSGE | BPF_K:
   851					/*
   852					 * signed comparison, so any 16-bit value
   853					 * can be used in cmpdi
   854					 */
   855					if (imm >= -32768 && imm < 32768)
   856						PPC_CMPDI(dst_reg, imm);
   857					else {
   858						PPC_LI32(b2p[TMP_REG_1], imm);
   859						PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
   860					}
   861					break;
   862				case BPF_JMP | BPF_JSET | BPF_K:
   863					/* andi does not sign-extend the immediate */
   864					if (imm >= 0 && imm < 32768)
   865						/* PPC_ANDI is _only/always_ dot-form */
   866						PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
   867					else {
   868						PPC_LI32(b2p[TMP_REG_1], imm);
   869						PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
   870							    b2p[TMP_REG_1]);
   871					}
   872					break;
   873				}
   874				PPC_BCC(true_cond, addrs[i + 1 + off]);
   875				break;
   876	
   877			/*
   878			 * Loads from packet header/data
   879			 * Assume 32-bit input value in imm and X (src_reg)
   880			 */
   881	
   882			/* Absolute loads */
   883			case BPF_LD | BPF_W | BPF_ABS:
   884				func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_word);
   885				goto common_load_abs;
   886			case BPF_LD | BPF_H | BPF_ABS:
   887				func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_half);
   888				goto common_load_abs;
   889			case BPF_LD | BPF_B | BPF_ABS:
   890				func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_byte);
   891	common_load_abs:
   892				/*
   893				 * Load from [imm]
   894				 * Load into r4, which can just be passed onto
   895				 *  skb load helpers as the second parameter
   896				 */
   897				PPC_LI32(4, imm);
   898				goto common_load;
   899	
   900			/* Indirect loads */
   901			case BPF_LD | BPF_W | BPF_IND:
   902				func = (u8 *)sk_load_word;
   903				goto common_load_ind;
   904			case BPF_LD | BPF_H | BPF_IND:
   905				func = (u8 *)sk_load_half;
   906				goto common_load_ind;
   907			case BPF_LD | BPF_B | BPF_IND:
   908				func = (u8 *)sk_load_byte;
   909	common_load_ind:
   910				/*
   911				 * Load from [src_reg + imm]
   912				 * Treat src_reg as a 32-bit value
   913				 */
   914				PPC_EXTSW(4, src_reg);
   915				if (imm) {
   916					if (imm >= -32768 && imm < 32768)
   917						PPC_ADDI(4, 4, IMM_L(imm));
   918					else {
   919						PPC_LI32(b2p[TMP_REG_1], imm);
   920						PPC_ADD(4, 4, b2p[TMP_REG_1]);
   921					}
   922				}
   923	
   924	common_load:
   925				ctx->seen |= SEEN_SKB;
   926				ctx->seen |= SEEN_FUNC;
   927				bpf_jit_emit_func_call(image, ctx, (u64)func);
   928	
   929				/*
   930				 * Helper returns 'lt' condition on error, and an
   931				 * appropriate return value in BPF_REG_0
   932				 */
   933				PPC_BCC(COND_LT, exit_addr);
   934				break;
   935	
   936			/*
   937			 * Tail call
   938			 */
   939			case BPF_JMP | BPF_CALL | BPF_X:
   940				ctx->seen |= SEEN_TAILCALL;
   941				bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
   942				break;
   943	
   944			default:
   945				/*
   946				 * The filter contains something cruel & unusual.
   947				 * We don't handle it, but also there shouldn't be
   948				 * anything missing from our list.
   949				 */
   950				pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
   951						code, i);
   952				return -ENOTSUPP;
   953			}
   954		}
   955	
   956		/* Set end-of-body-code address for exit. */
   957		addrs[i] = ctx->idx * 4;
   958	
   959		return 0;
   960	}
   961	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 50927 bytes --]

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-10-12  0:30 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-12  0:30 [sashal-stable:pending-4.9 19/19] arch/powerpc/net/bpf_jit_comp64.c:437:11: error: implicit declaration of function 'PPC_RAW_LI'; did you mean 'PPC_RLWIMI'? kernel test robot

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.