tree: https://github.com/congwang/linux.git sch_bpf head: ac00415a225e3cbd2c3a7fd5f38dacdfadfcc1ac commit: ac00415a225e3cbd2c3a7fd5f38dacdfadfcc1ac [2/2] net_sched: introduce eBPF based Qdisc config: arc-allyesconfig (attached as .config) compiler: arceb-elf-gcc (GCC) 11.2.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/congwang/linux/commit/ac00415a225e3cbd2c3a7fd5f38dacdfadfcc1ac git remote add congwang https://github.com/congwang/linux.git git fetch --no-tags congwang sch_bpf git checkout ac00415a225e3cbd2c3a7fd5f38dacdfadfcc1ac # save the attached .config to linux build tree mkdir build_dir COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross O=build_dir ARCH=arc SHELL=/bin/bash If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot All errors (new ones prefixed by >>): net/sched/sch_bpf.c: In function 'sch_bpf_enqueue': >> net/sched/sch_bpf.c:185:23: error: called object is not a function or function pointer 185 | res = BPF_PROG_RUN(enqueue, &ctx); | ^~~~~~~~~~~~ net/sched/sch_bpf.c: In function 'sch_bpf_dequeue': net/sched/sch_bpf.c:254:15: error: called object is not a function or function pointer 254 | res = BPF_PROG_RUN(dequeue, &ctx); | ^~~~~~~~~~~~ >> net/sched/sch_bpf.c:256:27: error: returning 'struct __sk_buff *' from a function with incompatible return type 'struct sk_buff *' [-Werror=incompatible-pointer-types] 256 | return ctx.skb; | ~~~^~~~ net/sched/sch_bpf.c: In function 'sch_bpf_dump_class_stats': net/sched/sch_bpf.c:525:31: warning: unused variable 'q' [-Wunused-variable] 525 | struct sch_bpf_qdisc *q = qdisc_priv(sch); | ^ cc1: some warnings being treated as errors vim +185 net/sched/sch_bpf.c 166 167 static int sch_bpf_enqueue(struct sk_buff *skb, struct Qdisc *sch, 168 struct sk_buff **to_free) 169 { 170 struct sch_bpf_qdisc *q = qdisc_priv(sch); 171 unsigned int len = qdisc_pkt_len(skb); 172 struct sch_bpf_ctx ctx = {}; 173 struct sch_bpf_class *cl; 174 int res = NET_XMIT_SUCCESS; 175 176 cl = sch_bpf_classify(skb, sch, &res); 177 if (!cl) { 178 struct bpf_prog *enqueue; 179 180 enqueue = rcu_dereference(q->enqueue_prog.prog); 181 bpf_compute_data_pointers(skb); 182 183 ctx.skb = (struct __sk_buff *)skb; 184 ctx.nr_flows = q->clhash.hashelems; > 185 res = BPF_PROG_RUN(enqueue, &ctx); 186 if (q->direct) 187 return res; 188 switch (res) { 189 case SCH_BPF_DROP: 190 __qdisc_drop(skb, to_free); 191 return NET_XMIT_DROP; 192 } 193 cl = sch_bpf_find(sch, ctx.classid); 194 if (!cl) { 195 if (res & __NET_XMIT_BYPASS) 196 qdisc_qstats_drop(sch); 197 __qdisc_drop(skb, to_free); 198 return res; 199 } 200 } 201 202 if (cl->qdisc) { 203 res = qdisc_enqueue(skb, cl->qdisc, to_free); 204 if (res != NET_XMIT_SUCCESS) { 205 if (net_xmit_drop_count(res)) { 206 qdisc_qstats_drop(sch); 207 cl->drops++; 208 } 209 return res; 210 } 211 } else { 212 sch_bpf_skb_cb(skb)->rank = ctx.rank; 213 pq_push(&cl->pq, &skb->pqnode); 214 } 215 216 sch->qstats.backlog += len; 217 sch->q.qlen++; 218 return res; 219 } 220 221 static struct sk_buff *sch_bpf_dequeue(struct Qdisc *sch) 222 { 223 struct sch_bpf_qdisc *q = qdisc_priv(sch); 224 struct sk_buff *skb, *ret = NULL; 225 struct sch_bpf_ctx ctx = {}; 226 struct bpf_prog *dequeue; 227 struct sch_bpf_class *cl; 228 struct pq_node *flow; 229 s64 now; 230 int res; 231 232 requeue: 233 flow = pq_pop(&q->flows); 234 if (!flow) 235 return NULL; 236 237 cl = container_of(flow, struct sch_bpf_class, node); 238 if (cl->qdisc) { 239 skb = cl->qdisc->dequeue(cl->qdisc); 240 ctx.classid = cl->common.classid; 241 } else { 242 struct pq_node *p = pq_pop(&cl->pq); 243 244 if (!p) 245 return NULL; 246 skb = container_of(p, struct sk_buff, pqnode); 247 ctx.classid = cl->rank; 248 } 249 ctx.skb = (struct __sk_buff *) skb; 250 ctx.nr_flows = q->clhash.hashelems; 251 252 dequeue = rcu_dereference(q->dequeue_prog.prog); 253 bpf_compute_data_pointers(skb); 254 res = BPF_PROG_RUN(dequeue, &ctx); 255 if (q->direct) > 256 return ctx.skb; 257 switch (res) { 258 case SCH_BPF_OK: 259 ret = skb; 260 break; 261 case SCH_BPF_REQUEUE: 262 sch_bpf_skb_cb(skb)->rank = ctx.rank; 263 cl->rank = ctx.classid; 264 pq_push(&cl->pq, &skb->pqnode); 265 bstats_update(&cl->bstats, skb); 266 pq_push(&q->flows, &cl->node); 267 goto requeue; 268 case SCH_BPF_THROTTLE: 269 now = ktime_get_ns(); 270 qdisc_watchdog_schedule_ns(&q->watchdog, now + ctx.delay); 271 qdisc_qstats_overlimit(sch); 272 cl->overlimits++; 273 return NULL; 274 default: 275 kfree_skb(skb); 276 ret = NULL; 277 } 278 279 if (pq_top(&cl->pq)) 280 pq_push(&q->flows, &cl->node); 281 return ret; 282 } 283 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org