From: kernel test robot <lkp@intel.com>
To: Xuan Zhuo <xuanzhuo@linux.alibaba.com>, netdev@vger.kernel.org
Cc: "Petr Machata" <petrm@nvidia.com>,
"Menglong Dong" <imagedong@tencent.com>,
"Maciej Fijalkowski" <maciej.fijalkowski@intel.com>,
"Jesper Dangaard Brouer" <hawk@kernel.org>,
"Daniel Borkmann" <daniel@iogearbox.net>,
"Michael S. Tsirkin" <mst@redhat.com>,
"Jonathan Lemon" <jonathan.lemon@gmail.com>,
"John Fastabend" <john.fastabend@gmail.com>,
"Björn Töpel" <bjorn@kernel.org>,
"Alexei Starovoitov" <ast@kernel.org>,
"Eric Dumazet" <edumazet@google.com>,
"Kuniyuki Iwashima" <kuniyu@amazon.com>,
oe-kbuild-all@lists.linux.dev, "Jakub Kicinski" <kuba@kernel.org>,
bpf@vger.kernel.org, "Paolo Abeni" <pabeni@redhat.com>,
virtualization@lists.linux-foundation.org,
"Sebastian Andrzej Siewior" <bigeasy@linutronix.de>,
"Magnus Karlsson" <magnus.karlsson@intel.com>
Subject: Re: [PATCH 10/33] xsk: support virtio DMA map
Date: Mon, 6 Feb 2023 06:04:34 +0800 [thread overview]
Message-ID: <202302060542.IxBGSiKh-lkp@intel.com> (raw)
In-Reply-To: <20230202110058.130695-11-xuanzhuo@linux.alibaba.com>
Hi Xuan,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on net-next/master]
[also build test ERROR on mst-vhost/linux-next linus/master v6.2-rc6 next-20230203]
[cannot apply to net/master]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Xuan-Zhuo/virtio_ring-virtqueue_add-support-premapped/20230202-190707
patch link: https://lore.kernel.org/r/20230202110058.130695-11-xuanzhuo%40linux.alibaba.com
patch subject: [PATCH 10/33] xsk: support virtio DMA map
config: i386-debian-10.3-kvm (https://download.01.org/0day-ci/archive/20230206/202302060542.IxBGSiKh-lkp@intel.com/config)
compiler: gcc-11 (Debian 11.3.0-8) 11.3.0
reproduce (this is a W=1 build):
# https://github.com/intel-lab-lkp/linux/commit/370aefebcea755f7c4c14e16f8dcb5540769fd26
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Xuan-Zhuo/virtio_ring-virtqueue_add-support-premapped/20230202-190707
git checkout 370aefebcea755f7c4c14e16f8dcb5540769fd26
# save the config file
mkdir build_dir && cp config build_dir/.config
make W=1 O=build_dir ARCH=i386 olddefconfig
make W=1 O=build_dir ARCH=i386 SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
All errors (new ones prefixed by >>):
ld: net/xdp/xsk_buff_pool.o: in function `xp_alloc':
>> net/xdp/xsk_buff_pool.c:575: undefined reference to `is_virtio_device'
>> ld: net/xdp/xsk_buff_pool.c:576: undefined reference to `virtio_dma_sync_signle_range_for_device'
ld: net/xdp/xsk_buff_pool.o: in function `__xp_dma_unmap':
net/xdp/xsk_buff_pool.c:338: undefined reference to `is_virtio_device'
>> ld: net/xdp/xsk_buff_pool.c:339: undefined reference to `virtio_dma_unmap'
ld: net/xdp/xsk_buff_pool.o: in function `xp_dma_map':
net/xdp/xsk_buff_pool.c:443: undefined reference to `is_virtio_device'
ld: net/xdp/xsk_buff_pool.c:443: undefined reference to `virtio_dma_sync_signle_range_for_device'
>> ld: net/xdp/xsk_buff_pool.c:443: undefined reference to `virtio_dma_sync_signle_range_for_cpu'
>> ld: net/xdp/xsk_buff_pool.c:458: undefined reference to `virtio_dma_map_page'
>> ld: net/xdp/xsk_buff_pool.c:461: undefined reference to `virtio_dma_mapping_error'
>> ld: net/xdp/xsk_buff_pool.c:464: undefined reference to `virtio_dma_need_sync'
>> ld: net/xdp/xsk_buff_pool.c:457: undefined reference to `is_virtio_device'
vim +575 net/xdp/xsk_buff_pool.c
424
425 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
426 unsigned long attrs, struct page **pages, u32 nr_pages)
427 {
428 struct xsk_dma_map *dma_map;
429 dma_addr_t dma;
430 int err;
431 u32 i;
432
433 dma_map = xp_find_dma_map(pool);
434 if (dma_map) {
435 err = xp_init_dma_info(pool, dma_map);
436 if (err)
437 return err;
438
439 refcount_inc(&dma_map->users);
440 return 0;
441 }
442
> 443 if (is_virtio_device(dev)) {
444 pool->dma_sync_for_cpu = virtio_dma_sync_signle_range_for_cpu;
445 pool->dma_sync_for_device = virtio_dma_sync_signle_range_for_device;
446
447 } else {
448 pool->dma_sync_for_cpu = dma_sync_for_cpu;
449 pool->dma_sync_for_device = dma_sync_for_device;
450 }
451
452 dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
453 if (!dma_map)
454 return -ENOMEM;
455
456 for (i = 0; i < dma_map->dma_pages_cnt; i++) {
> 457 if (is_virtio_device(dev)) {
> 458 dma = virtio_dma_map_page(dev, pages[i], 0, PAGE_SIZE,
459 DMA_BIDIRECTIONAL);
460
> 461 if (virtio_dma_mapping_error(dev, dma))
462 goto err;
463
> 464 if (virtio_dma_need_sync(dev, dma))
465 dma_map->dma_need_sync = true;
466
467 } else {
468 dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
469 DMA_BIDIRECTIONAL, attrs);
470
471 if (dma_mapping_error(dev, dma))
472 goto err;
473
474 if (dma_need_sync(dev, dma))
475 dma_map->dma_need_sync = true;
476 }
477 dma_map->dma_pages[i] = dma;
478 }
479
480 if (pool->unaligned)
481 xp_check_dma_contiguity(dma_map);
482
483 err = xp_init_dma_info(pool, dma_map);
484 if (err) {
485 __xp_dma_unmap(dma_map, attrs);
486 return err;
487 }
488
489 return 0;
490 err:
491 __xp_dma_unmap(dma_map, attrs);
492 return -ENOMEM;
493 }
494 EXPORT_SYMBOL(xp_dma_map);
495
496 static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool,
497 u64 addr)
498 {
499 return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size);
500 }
501
502 static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr)
503 {
504 *addr = xp_unaligned_extract_addr(*addr);
505 if (*addr >= pool->addrs_cnt ||
506 *addr + pool->chunk_size > pool->addrs_cnt ||
507 xp_addr_crosses_non_contig_pg(pool, *addr))
508 return false;
509 return true;
510 }
511
512 static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
513 {
514 *addr = xp_aligned_extract_addr(pool, *addr);
515 return *addr < pool->addrs_cnt;
516 }
517
518 static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
519 {
520 struct xdp_buff_xsk *xskb;
521 u64 addr;
522 bool ok;
523
524 if (pool->free_heads_cnt == 0)
525 return NULL;
526
527 for (;;) {
528 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
529 pool->fq->queue_empty_descs++;
530 return NULL;
531 }
532
533 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
534 xp_check_aligned(pool, &addr);
535 if (!ok) {
536 pool->fq->invalid_descs++;
537 xskq_cons_release(pool->fq);
538 continue;
539 }
540 break;
541 }
542
543 if (pool->unaligned) {
544 xskb = pool->free_heads[--pool->free_heads_cnt];
545 xp_init_xskb_addr(xskb, pool, addr);
546 if (pool->dma_pages_cnt)
547 xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
548 } else {
549 xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
550 }
551
552 xskq_cons_release(pool->fq);
553 return xskb;
554 }
555
556 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
557 {
558 struct xdp_buff_xsk *xskb;
559
560 if (!pool->free_list_cnt) {
561 xskb = __xp_alloc(pool);
562 if (!xskb)
563 return NULL;
564 } else {
565 pool->free_list_cnt--;
566 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
567 free_list_node);
568 list_del_init(&xskb->free_list_node);
569 }
570
571 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
572 xskb->xdp.data_meta = xskb->xdp.data;
573
574 if (pool->dma_need_sync) {
> 575 if (is_virtio_device(pool->dev))
> 576 virtio_dma_sync_signle_range_for_device(pool->dev, xskb->dma, 0,
577 pool->frame_len,
578 DMA_BIDIRECTIONAL);
579 else
580 dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
581 pool->frame_len,
582 DMA_BIDIRECTIONAL);
583 }
584 return &xskb->xdp;
585 }
586 EXPORT_SYMBOL(xp_alloc);
587
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
next prev parent reply other threads:[~2023-02-05 22:04 UTC|newest]
Thread overview: 76+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-02 11:00 [PATCH 00/33] virtio-net: support AF_XDP zero copy Xuan Zhuo
2023-02-02 11:00 ` [PATCH 01/33] virtio_ring: virtqueue_add() support premapped Xuan Zhuo
2023-02-02 11:00 ` [PATCH 02/33] virtio_ring: split: virtqueue_add_split() " Xuan Zhuo
2023-02-02 11:00 ` [PATCH 03/33] virtio_ring: packed: virtqueue_add_packed() " Xuan Zhuo
2023-02-03 9:16 ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 04/33] virtio_ring: introduce virtqueue_add_outbuf_premapped() Xuan Zhuo
2023-02-02 11:00 ` [PATCH 05/33] virtio_ring: introduce virtqueue_add_inbuf_premapped() Xuan Zhuo
2023-02-02 11:00 ` [PATCH 06/33] virtio_ring: introduce virtqueue_reset() Xuan Zhuo
2023-02-03 9:05 ` Michael S. Tsirkin
2023-02-03 9:09 ` Xuan Zhuo
2023-02-13 12:15 ` Michael S. Tsirkin
2023-02-14 1:53 ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 07/33] virtio_ring: add api virtio_dma_map() for advance dma Xuan Zhuo
2023-02-03 9:07 ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 08/33] virtio_ring: introduce dma sync api for virtio Xuan Zhuo
2023-02-03 9:24 ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 09/33] xsk: xsk_buff_pool add callback for dma_sync Xuan Zhuo
[not found] ` <CAJ8uoz2+4+wUFYF1GjF51DFBV8ZsBRtTEVWpu_2fBmFUEQzOLQ@mail.gmail.com>
2023-02-03 7:01 ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 10/33] xsk: support virtio DMA map Xuan Zhuo
2023-02-05 22:04 ` kernel test robot [this message]
2023-02-02 11:00 ` [PATCH 11/33] virtio_net: rename free_old_xmit_skbs to free_old_xmit Xuan Zhuo
2023-02-02 11:00 ` [PATCH 12/33] virtio_net: unify the code for recycling the xmit ptr Xuan Zhuo
2023-02-02 11:00 ` [PATCH 13/33] virtio_net: virtnet_poll_tx support rescheduled Xuan Zhuo
2023-02-02 11:00 ` [PATCH 14/33] virtio_net: independent directory Xuan Zhuo
2023-02-02 11:00 ` [PATCH 15/33] virtio_net: move to virtio_net.h Xuan Zhuo
2023-02-03 8:53 ` Michael S. Tsirkin
2023-02-03 9:04 ` Xuan Zhuo
2023-02-03 9:26 ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 16/33] virtio_net: introduce virtnet_xdp_handler() to seprate the logic of run xdp Xuan Zhuo
2023-02-03 8:55 ` Michael S. Tsirkin
2023-02-03 9:01 ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 17/33] virtio_net: receive_small() use virtnet_xdp_handler() Xuan Zhuo
2023-02-02 11:00 ` [PATCH 18/33] virtio_net: receive_merageable() " Xuan Zhuo
2023-02-02 17:16 ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 19/33] virtio_net: introduce virtnet_tx_reset() Xuan Zhuo
2023-02-02 17:23 ` Michael S. Tsirkin
2023-02-03 4:35 ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 20/33] virtio_net: xsk: introduce virtnet_rq_bind_xsk_pool() Xuan Zhuo
2023-02-03 8:48 ` Michael S. Tsirkin
2023-02-03 8:52 ` Xuan Zhuo
2023-02-03 9:28 ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 21/33] virtio_net: xsk: introduce virtnet_xsk_pool_enable() Xuan Zhuo
2023-02-02 11:00 ` [PATCH 22/33] virtio_net: xsk: introduce xsk disable Xuan Zhuo
2023-02-02 23:02 ` kernel test robot
2023-02-12 7:56 ` kernel test robot
2023-02-02 11:00 ` [PATCH 23/33] virtio_net: xsk: support xsk setup Xuan Zhuo
2023-02-02 11:00 ` [PATCH 24/33] virtio_net: xsk: stop disable tx napi Xuan Zhuo
2023-02-02 17:25 ` Michael S. Tsirkin
2023-02-03 3:24 ` Xuan Zhuo
2023-02-03 8:33 ` Michael S. Tsirkin
2023-02-03 8:49 ` Xuan Zhuo
2023-02-03 9:29 ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 25/33] virtio_net: xsk: __free_old_xmit distinguishes xsk buffer Xuan Zhuo
2023-02-02 11:00 ` [PATCH 26/33] virtio_net: virtnet_sq_free_unused_buf() check " Xuan Zhuo
2023-02-02 11:00 ` [PATCH 27/33] virtio_net: virtnet_rq_free_unused_buf() " Xuan Zhuo
2023-02-02 11:00 ` [PATCH 28/33] net: introduce napi_tx_raise() Xuan Zhuo
2023-02-02 11:00 ` [PATCH 29/33] virtio_net: xsk: tx: support tx Xuan Zhuo
[not found] ` <Y9zIPdKmTvXqyuYS@boxer>
2023-02-03 8:55 ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 30/33] virtio_net: xsk: tx: support wakeup Xuan Zhuo
2023-02-02 11:00 ` [PATCH 31/33] virtio_net: xsk: tx: auto wakeup when free old xmit Xuan Zhuo
2023-02-02 11:00 ` [PATCH 32/33] virtio_net: xsk: rx: introduce add_recvbuf_xsk() Xuan Zhuo
[not found] ` <Y9zJS+ugeY9qEMt9@boxer>
2023-02-03 8:56 ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 33/33] virtio_net: xsk: rx: introduce receive_xsk() to recv xsk buffer Xuan Zhuo
2023-02-02 11:08 ` [PATCH 00/33] virtio-net: support AF_XDP zero copy Xuan Zhuo
2023-02-02 11:08 ` Michael S. Tsirkin
2023-02-02 11:11 ` Xuan Zhuo
2023-02-02 11:44 ` Xuan Zhuo
2023-02-03 9:08 ` Michael S. Tsirkin
2023-02-03 9:09 ` Xuan Zhuo
2023-02-02 14:41 ` Paolo Abeni
2023-02-03 3:33 ` Xuan Zhuo
2023-02-03 8:37 ` Michael S. Tsirkin
[not found] ` <Y9zJ9j0GthvRSFHL@boxer>
2023-02-03 9:09 ` Michael S. Tsirkin
2023-02-03 9:17 ` Michael S. Tsirkin
2023-02-06 2:41 ` Xuan Zhuo
2023-02-13 12:14 ` Michael S. Tsirkin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=202302060542.IxBGSiKh-lkp@intel.com \
--to=lkp@intel.com \
--cc=ast@kernel.org \
--cc=bigeasy@linutronix.de \
--cc=bjorn@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=edumazet@google.com \
--cc=hawk@kernel.org \
--cc=imagedong@tencent.com \
--cc=john.fastabend@gmail.com \
--cc=jonathan.lemon@gmail.com \
--cc=kuba@kernel.org \
--cc=kuniyu@amazon.com \
--cc=maciej.fijalkowski@intel.com \
--cc=magnus.karlsson@intel.com \
--cc=mst@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=oe-kbuild-all@lists.linux.dev \
--cc=pabeni@redhat.com \
--cc=petrm@nvidia.com \
--cc=virtualization@lists.linux-foundation.org \
--cc=xuanzhuo@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).