* [bpf-next:master 1831/1834] kernel//bpf/verifier.c:9132:25: error: implicit declaration of function 'bpf_jit_blinding_enabled'
@ 2019-11-23 14:32 kbuild test robot
0 siblings, 0 replies; only message in thread
From: kbuild test robot @ 2019-11-23 14:32 UTC (permalink / raw)
To: kbuild-all
[-- Attachment #1: Type: text/plain, Size: 14056 bytes --]
tree: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
head: eb410450964195c745a3cd0785cf8a8c3fe144b2
commit: bad63c9ea55421c49775574d855d9c90d686654b [1831/1834] bpf: Constant map key tracking for prog array pokes
config: x86_64-randconfig-s1-20191123 (attached as .config)
compiler: gcc-6 (Debian 6.3.0-18+deb9u1) 6.3.0 20170516
reproduce:
git checkout bad63c9ea55421c49775574d855d9c90d686654b
# save the attached .config to linux build tree
make ARCH=x86_64
If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp@intel.com>
All errors (new ones prefixed by >>):
kernel//bpf/verifier.c: In function 'fixup_bpf_calls':
>> kernel//bpf/verifier.c:9132:25: error: implicit declaration of function 'bpf_jit_blinding_enabled' [-Werror=implicit-function-declaration]
bool expect_blinding = bpf_jit_blinding_enabled(prog);
^~~~~~~~~~~~~~~~~~~~~~~~
cc1: some warnings being treated as errors
vim +/bpf_jit_blinding_enabled +9132 kernel//bpf/verifier.c
9123
9124 /* fixup insn->imm field of bpf_call instructions
9125 * and inline eligible helpers as explicit sequence of BPF instructions
9126 *
9127 * this function is called after eBPF program passed verification
9128 */
9129 static int fixup_bpf_calls(struct bpf_verifier_env *env)
9130 {
9131 struct bpf_prog *prog = env->prog;
> 9132 bool expect_blinding = bpf_jit_blinding_enabled(prog);
9133 struct bpf_insn *insn = prog->insnsi;
9134 const struct bpf_func_proto *fn;
9135 const int insn_cnt = prog->len;
9136 const struct bpf_map_ops *ops;
9137 struct bpf_insn_aux_data *aux;
9138 struct bpf_insn insn_buf[16];
9139 struct bpf_prog *new_prog;
9140 struct bpf_map *map_ptr;
9141 int i, ret, cnt, delta = 0;
9142
9143 for (i = 0; i < insn_cnt; i++, insn++) {
9144 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
9145 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
9146 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
9147 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
9148 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
9149 struct bpf_insn mask_and_div[] = {
9150 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
9151 /* Rx div 0 -> 0 */
9152 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
9153 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
9154 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9155 *insn,
9156 };
9157 struct bpf_insn mask_and_mod[] = {
9158 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
9159 /* Rx mod 0 -> Rx */
9160 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
9161 *insn,
9162 };
9163 struct bpf_insn *patchlet;
9164
9165 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
9166 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
9167 patchlet = mask_and_div + (is64 ? 1 : 0);
9168 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
9169 } else {
9170 patchlet = mask_and_mod + (is64 ? 1 : 0);
9171 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
9172 }
9173
9174 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
9175 if (!new_prog)
9176 return -ENOMEM;
9177
9178 delta += cnt - 1;
9179 env->prog = prog = new_prog;
9180 insn = new_prog->insnsi + i + delta;
9181 continue;
9182 }
9183
9184 if (BPF_CLASS(insn->code) == BPF_LD &&
9185 (BPF_MODE(insn->code) == BPF_ABS ||
9186 BPF_MODE(insn->code) == BPF_IND)) {
9187 cnt = env->ops->gen_ld_abs(insn, insn_buf);
9188 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
9189 verbose(env, "bpf verifier is misconfigured\n");
9190 return -EINVAL;
9191 }
9192
9193 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9194 if (!new_prog)
9195 return -ENOMEM;
9196
9197 delta += cnt - 1;
9198 env->prog = prog = new_prog;
9199 insn = new_prog->insnsi + i + delta;
9200 continue;
9201 }
9202
9203 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
9204 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
9205 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
9206 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
9207 struct bpf_insn insn_buf[16];
9208 struct bpf_insn *patch = &insn_buf[0];
9209 bool issrc, isneg;
9210 u32 off_reg;
9211
9212 aux = &env->insn_aux_data[i + delta];
9213 if (!aux->alu_state ||
9214 aux->alu_state == BPF_ALU_NON_POINTER)
9215 continue;
9216
9217 isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
9218 issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
9219 BPF_ALU_SANITIZE_SRC;
9220
9221 off_reg = issrc ? insn->src_reg : insn->dst_reg;
9222 if (isneg)
9223 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
9224 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
9225 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
9226 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
9227 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
9228 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
9229 if (issrc) {
9230 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
9231 off_reg);
9232 insn->src_reg = BPF_REG_AX;
9233 } else {
9234 *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
9235 BPF_REG_AX);
9236 }
9237 if (isneg)
9238 insn->code = insn->code == code_add ?
9239 code_sub : code_add;
9240 *patch++ = *insn;
9241 if (issrc && isneg)
9242 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
9243 cnt = patch - insn_buf;
9244
9245 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9246 if (!new_prog)
9247 return -ENOMEM;
9248
9249 delta += cnt - 1;
9250 env->prog = prog = new_prog;
9251 insn = new_prog->insnsi + i + delta;
9252 continue;
9253 }
9254
9255 if (insn->code != (BPF_JMP | BPF_CALL))
9256 continue;
9257 if (insn->src_reg == BPF_PSEUDO_CALL)
9258 continue;
9259
9260 if (insn->imm == BPF_FUNC_get_route_realm)
9261 prog->dst_needed = 1;
9262 if (insn->imm == BPF_FUNC_get_prandom_u32)
9263 bpf_user_rnd_init_once();
9264 if (insn->imm == BPF_FUNC_override_return)
9265 prog->kprobe_override = 1;
9266 if (insn->imm == BPF_FUNC_tail_call) {
9267 /* If we tail call into other programs, we
9268 * cannot make any assumptions since they can
9269 * be replaced dynamically during runtime in
9270 * the program array.
9271 */
9272 prog->cb_access = 1;
9273 env->prog->aux->stack_depth = MAX_BPF_STACK;
9274 env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
9275
9276 /* mark bpf_tail_call as different opcode to avoid
9277 * conditional branch in the interpeter for every normal
9278 * call and to prevent accidental JITing by JIT compiler
9279 * that doesn't support bpf_tail_call yet
9280 */
9281 insn->imm = 0;
9282 insn->code = BPF_JMP | BPF_TAIL_CALL;
9283
9284 aux = &env->insn_aux_data[i + delta];
9285 if (prog->jit_requested && !expect_blinding &&
9286 !bpf_map_key_poisoned(aux) &&
9287 !bpf_map_ptr_poisoned(aux) &&
9288 !bpf_map_ptr_unpriv(aux)) {
9289 struct bpf_jit_poke_descriptor desc = {
9290 .reason = BPF_POKE_REASON_TAIL_CALL,
9291 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
9292 .tail_call.key = bpf_map_key_immediate(aux),
9293 };
9294
9295 ret = bpf_jit_add_poke_descriptor(prog, &desc);
9296 if (ret < 0) {
9297 verbose(env, "adding tail call poke descriptor failed\n");
9298 return ret;
9299 }
9300
9301 insn->imm = ret + 1;
9302 continue;
9303 }
9304
9305 if (!bpf_map_ptr_unpriv(aux))
9306 continue;
9307
9308 /* instead of changing every JIT dealing with tail_call
9309 * emit two extra insns:
9310 * if (index >= max_entries) goto out;
9311 * index &= array->index_mask;
9312 * to avoid out-of-bounds cpu speculation
9313 */
9314 if (bpf_map_ptr_poisoned(aux)) {
9315 verbose(env, "tail_call abusing map_ptr\n");
9316 return -EINVAL;
9317 }
9318
9319 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
9320 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
9321 map_ptr->max_entries, 2);
9322 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
9323 container_of(map_ptr,
9324 struct bpf_array,
9325 map)->index_mask);
9326 insn_buf[2] = *insn;
9327 cnt = 3;
9328 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9329 if (!new_prog)
9330 return -ENOMEM;
9331
9332 delta += cnt - 1;
9333 env->prog = prog = new_prog;
9334 insn = new_prog->insnsi + i + delta;
9335 continue;
9336 }
9337
9338 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
9339 * and other inlining handlers are currently limited to 64 bit
9340 * only.
9341 */
9342 if (prog->jit_requested && BITS_PER_LONG == 64 &&
9343 (insn->imm == BPF_FUNC_map_lookup_elem ||
9344 insn->imm == BPF_FUNC_map_update_elem ||
9345 insn->imm == BPF_FUNC_map_delete_elem ||
9346 insn->imm == BPF_FUNC_map_push_elem ||
9347 insn->imm == BPF_FUNC_map_pop_elem ||
9348 insn->imm == BPF_FUNC_map_peek_elem)) {
9349 aux = &env->insn_aux_data[i + delta];
9350 if (bpf_map_ptr_poisoned(aux))
9351 goto patch_call_imm;
9352
9353 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
9354 ops = map_ptr->ops;
9355 if (insn->imm == BPF_FUNC_map_lookup_elem &&
9356 ops->map_gen_lookup) {
9357 cnt = ops->map_gen_lookup(map_ptr, insn_buf);
9358 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
9359 verbose(env, "bpf verifier is misconfigured\n");
9360 return -EINVAL;
9361 }
9362
9363 new_prog = bpf_patch_insn_data(env, i + delta,
9364 insn_buf, cnt);
9365 if (!new_prog)
9366 return -ENOMEM;
9367
9368 delta += cnt - 1;
9369 env->prog = prog = new_prog;
9370 insn = new_prog->insnsi + i + delta;
9371 continue;
9372 }
9373
9374 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
9375 (void *(*)(struct bpf_map *map, void *key))NULL));
9376 BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
9377 (int (*)(struct bpf_map *map, void *key))NULL));
9378 BUILD_BUG_ON(!__same_type(ops->map_update_elem,
9379 (int (*)(struct bpf_map *map, void *key, void *value,
9380 u64 flags))NULL));
9381 BUILD_BUG_ON(!__same_type(ops->map_push_elem,
9382 (int (*)(struct bpf_map *map, void *value,
9383 u64 flags))NULL));
9384 BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
9385 (int (*)(struct bpf_map *map, void *value))NULL));
9386 BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
9387 (int (*)(struct bpf_map *map, void *value))NULL));
9388
9389 switch (insn->imm) {
9390 case BPF_FUNC_map_lookup_elem:
9391 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
9392 __bpf_call_base;
9393 continue;
9394 case BPF_FUNC_map_update_elem:
9395 insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
9396 __bpf_call_base;
9397 continue;
9398 case BPF_FUNC_map_delete_elem:
9399 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
9400 __bpf_call_base;
9401 continue;
9402 case BPF_FUNC_map_push_elem:
9403 insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
9404 __bpf_call_base;
9405 continue;
9406 case BPF_FUNC_map_pop_elem:
9407 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
9408 __bpf_call_base;
9409 continue;
9410 case BPF_FUNC_map_peek_elem:
9411 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
9412 __bpf_call_base;
9413 continue;
9414 }
9415
9416 goto patch_call_imm;
9417 }
9418
9419 patch_call_imm:
9420 fn = env->ops->get_func_proto(insn->imm, env->prog);
9421 /* all functions that have prototype and verifier allowed
9422 * programs to call them, must be real in-kernel functions
9423 */
9424 if (!fn->func) {
9425 verbose(env,
9426 "kernel subsystem misconfigured func %s#%d\n",
9427 func_id_name(insn->imm), insn->imm);
9428 return -EFAULT;
9429 }
9430 insn->imm = fn->func - __bpf_call_base;
9431 }
9432
9433 /* Since poke tab is now finalized, publish aux to tracker. */
9434 for (i = 0; i < prog->aux->size_poke_tab; i++) {
9435 map_ptr = prog->aux->poke_tab[i].tail_call.map;
9436 if (!map_ptr->ops->map_poke_track ||
9437 !map_ptr->ops->map_poke_untrack ||
9438 !map_ptr->ops->map_poke_run) {
9439 verbose(env, "bpf verifier is misconfigured\n");
9440 return -EINVAL;
9441 }
9442
9443 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
9444 if (ret < 0) {
9445 verbose(env, "tracking tail call prog failed\n");
9446 return ret;
9447 }
9448 }
9449
9450 return 0;
9451 }
9452
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org Intel Corporation
[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 28910 bytes --]
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2019-11-23 14:32 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-23 14:32 [bpf-next:master 1831/1834] kernel//bpf/verifier.c:9132:25: error: implicit declaration of function 'bpf_jit_blinding_enabled' kbuild test robot
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.