* [drm-drm-misc:for-linux-next 2/5] drivers/infiniband/core/umem_dmabuf.c:69:10: error: implicit declaration of function 'dma_resv_get_excl'
@ 2021-06-07 4:18 kernel test robot
0 siblings, 0 replies; only message in thread
From: kernel test robot @ 2021-06-07 4:18 UTC (permalink / raw)
To: kbuild-all
[-- Attachment #1: Type: text/plain, Size: 6992 bytes --]
tree: git://anongit.freedesktop.org/drm/drm-misc for-linux-next
head: d3fae3b3daac09961ab871a25093b0ae404282d5
commit: 6edbd6abb783d54f6ac4c3ed5cd9e50cff6c15e9 [2/5] dma-buf: rename and cleanup dma_resv_get_excl v3
config: x86_64-randconfig-a016-20210606 (attached as .config)
compiler: clang version 13.0.0 (https://github.com/llvm/llvm-project 551a697c5cf33275b66add4fc467fcf59084cffb)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install x86_64 cross compiling tool for clang build
# apt-get install binutils-x86-64-linux-gnu
git remote add drm-drm-misc git://anongit.freedesktop.org/drm/drm-misc
git fetch --no-tags drm-drm-misc for-linux-next
git checkout 6edbd6abb783d54f6ac4c3ed5cd9e50cff6c15e9
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All errors (new ones prefixed by >>):
>> drivers/infiniband/core/umem_dmabuf.c:69:10: error: implicit declaration of function 'dma_resv_get_excl' [-Werror,-Wimplicit-function-declaration]
fence = dma_resv_get_excl(umem_dmabuf->attach->dmabuf->resv);
^
drivers/infiniband/core/umem_dmabuf.c:69:8: warning: incompatible integer to pointer conversion assigning to 'struct dma_fence *' from 'int' [-Wint-conversion]
fence = dma_resv_get_excl(umem_dmabuf->attach->dmabuf->resv);
^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1 warning and 1 error generated.
vim +/dma_resv_get_excl +69 drivers/infiniband/core/umem_dmabuf.c
368c0159d492d7 Jianxin Xiong 2020-12-15 11
368c0159d492d7 Jianxin Xiong 2020-12-15 12 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
368c0159d492d7 Jianxin Xiong 2020-12-15 13 {
368c0159d492d7 Jianxin Xiong 2020-12-15 14 struct sg_table *sgt;
368c0159d492d7 Jianxin Xiong 2020-12-15 15 struct scatterlist *sg;
368c0159d492d7 Jianxin Xiong 2020-12-15 16 struct dma_fence *fence;
368c0159d492d7 Jianxin Xiong 2020-12-15 17 unsigned long start, end, cur = 0;
368c0159d492d7 Jianxin Xiong 2020-12-15 18 unsigned int nmap = 0;
368c0159d492d7 Jianxin Xiong 2020-12-15 19 int i;
368c0159d492d7 Jianxin Xiong 2020-12-15 20
368c0159d492d7 Jianxin Xiong 2020-12-15 21 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
368c0159d492d7 Jianxin Xiong 2020-12-15 22
368c0159d492d7 Jianxin Xiong 2020-12-15 23 if (umem_dmabuf->sgt)
368c0159d492d7 Jianxin Xiong 2020-12-15 24 goto wait_fence;
368c0159d492d7 Jianxin Xiong 2020-12-15 25
368c0159d492d7 Jianxin Xiong 2020-12-15 26 sgt = dma_buf_map_attachment(umem_dmabuf->attach, DMA_BIDIRECTIONAL);
368c0159d492d7 Jianxin Xiong 2020-12-15 27 if (IS_ERR(sgt))
368c0159d492d7 Jianxin Xiong 2020-12-15 28 return PTR_ERR(sgt);
368c0159d492d7 Jianxin Xiong 2020-12-15 29
368c0159d492d7 Jianxin Xiong 2020-12-15 30 /* modify the sg list in-place to match umem address and length */
368c0159d492d7 Jianxin Xiong 2020-12-15 31
368c0159d492d7 Jianxin Xiong 2020-12-15 32 start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
368c0159d492d7 Jianxin Xiong 2020-12-15 33 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
368c0159d492d7 Jianxin Xiong 2020-12-15 34 PAGE_SIZE);
368c0159d492d7 Jianxin Xiong 2020-12-15 35 for_each_sgtable_dma_sg(sgt, sg, i) {
368c0159d492d7 Jianxin Xiong 2020-12-15 36 if (start < cur + sg_dma_len(sg) && cur < end)
368c0159d492d7 Jianxin Xiong 2020-12-15 37 nmap++;
368c0159d492d7 Jianxin Xiong 2020-12-15 38 if (cur <= start && start < cur + sg_dma_len(sg)) {
368c0159d492d7 Jianxin Xiong 2020-12-15 39 unsigned long offset = start - cur;
368c0159d492d7 Jianxin Xiong 2020-12-15 40
368c0159d492d7 Jianxin Xiong 2020-12-15 41 umem_dmabuf->first_sg = sg;
368c0159d492d7 Jianxin Xiong 2020-12-15 42 umem_dmabuf->first_sg_offset = offset;
368c0159d492d7 Jianxin Xiong 2020-12-15 43 sg_dma_address(sg) += offset;
368c0159d492d7 Jianxin Xiong 2020-12-15 44 sg_dma_len(sg) -= offset;
368c0159d492d7 Jianxin Xiong 2020-12-15 45 cur += offset;
368c0159d492d7 Jianxin Xiong 2020-12-15 46 }
368c0159d492d7 Jianxin Xiong 2020-12-15 47 if (cur < end && end <= cur + sg_dma_len(sg)) {
368c0159d492d7 Jianxin Xiong 2020-12-15 48 unsigned long trim = cur + sg_dma_len(sg) - end;
368c0159d492d7 Jianxin Xiong 2020-12-15 49
368c0159d492d7 Jianxin Xiong 2020-12-15 50 umem_dmabuf->last_sg = sg;
368c0159d492d7 Jianxin Xiong 2020-12-15 51 umem_dmabuf->last_sg_trim = trim;
368c0159d492d7 Jianxin Xiong 2020-12-15 52 sg_dma_len(sg) -= trim;
368c0159d492d7 Jianxin Xiong 2020-12-15 53 break;
368c0159d492d7 Jianxin Xiong 2020-12-15 54 }
368c0159d492d7 Jianxin Xiong 2020-12-15 55 cur += sg_dma_len(sg);
368c0159d492d7 Jianxin Xiong 2020-12-15 56 }
368c0159d492d7 Jianxin Xiong 2020-12-15 57
368c0159d492d7 Jianxin Xiong 2020-12-15 58 umem_dmabuf->umem.sg_head.sgl = umem_dmabuf->first_sg;
368c0159d492d7 Jianxin Xiong 2020-12-15 59 umem_dmabuf->umem.sg_head.nents = nmap;
368c0159d492d7 Jianxin Xiong 2020-12-15 60 umem_dmabuf->umem.nmap = nmap;
368c0159d492d7 Jianxin Xiong 2020-12-15 61 umem_dmabuf->sgt = sgt;
368c0159d492d7 Jianxin Xiong 2020-12-15 62
368c0159d492d7 Jianxin Xiong 2020-12-15 63 wait_fence:
368c0159d492d7 Jianxin Xiong 2020-12-15 64 /*
368c0159d492d7 Jianxin Xiong 2020-12-15 65 * Although the sg list is valid now, the content of the pages
368c0159d492d7 Jianxin Xiong 2020-12-15 66 * may be not up-to-date. Wait for the exporter to finish
368c0159d492d7 Jianxin Xiong 2020-12-15 67 * the migration.
368c0159d492d7 Jianxin Xiong 2020-12-15 68 */
368c0159d492d7 Jianxin Xiong 2020-12-15 @69 fence = dma_resv_get_excl(umem_dmabuf->attach->dmabuf->resv);
368c0159d492d7 Jianxin Xiong 2020-12-15 70 if (fence)
368c0159d492d7 Jianxin Xiong 2020-12-15 71 return dma_fence_wait(fence, false);
368c0159d492d7 Jianxin Xiong 2020-12-15 72
368c0159d492d7 Jianxin Xiong 2020-12-15 73 return 0;
368c0159d492d7 Jianxin Xiong 2020-12-15 74 }
368c0159d492d7 Jianxin Xiong 2020-12-15 75 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
368c0159d492d7 Jianxin Xiong 2020-12-15 76
:::::: The code at line 69 was first introduced by commit
:::::: 368c0159d492d7fbdb5791b40c9263ec4e97a10f RDMA/umem: Support importing dma-buf as user memory region
:::::: TO: Jianxin Xiong <jianxin.xiong@intel.com>
:::::: CC: Jason Gunthorpe <jgg@nvidia.com>
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org
[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 35719 bytes --]
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2021-06-07 4:18 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-07 4:18 [drm-drm-misc:for-linux-next 2/5] drivers/infiniband/core/umem_dmabuf.c:69:10: error: implicit declaration of function 'dma_resv_get_excl' kernel test robot
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.