All of lore.kernel.org
 help / color / mirror / Atom feed
* [android-goldfish:android-5.4 1/1] drivers/gpu/drm/ttm/ttm_bo_vm.c:276: undefined reference to `vmf_insert_mixed'
@ 2020-08-18  5:33 kernel test robot
  0 siblings, 0 replies; 2+ messages in thread
From: kernel test robot @ 2020-08-18  5:33 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 16277 bytes --]

tree:   https://android.googlesource.com/kernel/goldfish android-5.4
head:   7af30e526c2120f1f6ba4b5b19d683697e2c2833
commit: 7af30e526c2120f1f6ba4b5b19d683697e2c2833 [1/1] ANDROID: GKI: Add DRM_TTM config to GKI
config: arm-randconfig-r033-20200818 (attached as .config)
compiler: arm-linux-gnueabi-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        git checkout 7af30e526c2120f1f6ba4b5b19d683697e2c2833
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=arm 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   arm-linux-gnueabi-ld: drivers/gpu/drm/ttm/ttm_bo_vm.o: in function `ttm_bo_vm_fault':
>> drivers/gpu/drm/ttm/ttm_bo_vm.c:276: undefined reference to `vmf_insert_mixed'
>> arm-linux-gnueabi-ld: drivers/gpu/drm/ttm/ttm_bo_vm.c:279: undefined reference to `vmf_insert_pfn'

git remote add android-goldfish https://android.googlesource.com/kernel/goldfish
git fetch --no-tags android-goldfish android-5.4
git checkout 7af30e526c2120f1f6ba4b5b19d683697e2c2833
vim +276 drivers/gpu/drm/ttm/ttm_bo_vm.c

c67fa6edc8b11a Tan Xiaojun       2017-12-25  108  
4daa4fba3a3899 Souptick Joarder  2018-06-02  109  static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  110  {
11bac80004499e Dave Jiang        2017-02-24  111  	struct vm_area_struct *vma = vmf->vma;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  112  	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  113  	    vma->vm_private_data;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  114  	struct ttm_bo_device *bdev = bo->bdev;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  115  	unsigned long page_offset;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  116  	unsigned long page_last;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  117  	unsigned long pfn;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  118  	struct ttm_tt *ttm = NULL;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  119  	struct page *page;
4daa4fba3a3899 Souptick Joarder  2018-06-02  120  	int err;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  121  	int i;
4daa4fba3a3899 Souptick Joarder  2018-06-02  122  	vm_fault_t ret = VM_FAULT_NOPAGE;
1a29d85eb0f19b Jan Kara          2016-12-14  123  	unsigned long address = vmf->address;
eba67093f53532 Thomas Hellstrom  2010-11-11  124  	struct ttm_mem_type_manager *man =
eba67093f53532 Thomas Hellstrom  2010-11-11  125  		&bdev->man[bo->mem.mem_type];
3943875e7b73fd Thomas Hellstrom  2013-11-06  126  	struct vm_area_struct cvma;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  127  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  128  	/*
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  129  	 * Work around locking order reversal in fault / nopfn
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  130  	 * between mmap_sem and bo_reserve: Perform a trylock operation
c58f009e01c918 Thomas Hellstrom  2013-11-14  131  	 * for reserve, and if it fails, retry the fault after waiting
c58f009e01c918 Thomas Hellstrom  2013-11-14  132  	 * for the buffer to become unreserved.
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  133  	 */
52791eeec1d9f4 Christian König   2019-08-11  134  	if (unlikely(!dma_resv_trylock(bo->base.resv))) {
c58f009e01c918 Thomas Hellstrom  2013-11-14  135  		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
c58f009e01c918 Thomas Hellstrom  2013-11-14  136  			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
8129fdad387ae3 Thomas Zimmermann 2018-06-21  137  				ttm_bo_get(bo);
11bac80004499e Dave Jiang        2017-02-24  138  				up_read(&vmf->vma->vm_mm->mmap_sem);
c58f009e01c918 Thomas Hellstrom  2013-11-14  139  				(void) ttm_bo_wait_unreserved(bo);
f44907593d746d Thomas Zimmermann 2018-06-21  140  				ttm_bo_put(bo);
c58f009e01c918 Thomas Hellstrom  2013-11-14  141  			}
c58f009e01c918 Thomas Hellstrom  2013-11-14  142  
c58f009e01c918 Thomas Hellstrom  2013-11-14  143  			return VM_FAULT_RETRY;
c58f009e01c918 Thomas Hellstrom  2013-11-14  144  		}
c58f009e01c918 Thomas Hellstrom  2013-11-14  145  
c58f009e01c918 Thomas Hellstrom  2013-11-14  146  		/*
c58f009e01c918 Thomas Hellstrom  2013-11-14  147  		 * If we'd want to change locking order to
c58f009e01c918 Thomas Hellstrom  2013-11-14  148  		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
c58f009e01c918 Thomas Hellstrom  2013-11-14  149  		 * instead of retrying the fault...
c58f009e01c918 Thomas Hellstrom  2013-11-14  150  		 */
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  151  		return VM_FAULT_NOPAGE;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  152  	}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  153  
667a50db0477d4 Thomas Hellstrom  2014-01-03  154  	/*
667a50db0477d4 Thomas Hellstrom  2014-01-03  155  	 * Refuse to fault imported pages. This should be handled
667a50db0477d4 Thomas Hellstrom  2014-01-03  156  	 * (if at all) by redirecting mmap to the exporter.
667a50db0477d4 Thomas Hellstrom  2014-01-03  157  	 */
667a50db0477d4 Thomas Hellstrom  2014-01-03  158  	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
de8dfb8e3449c7 Tom St Denis      2018-01-26  159  		ret = VM_FAULT_SIGBUS;
667a50db0477d4 Thomas Hellstrom  2014-01-03  160  		goto out_unlock;
667a50db0477d4 Thomas Hellstrom  2014-01-03  161  	}
667a50db0477d4 Thomas Hellstrom  2014-01-03  162  
82c5da6bf8b55a Jerome Glisse     2010-04-09  163  	if (bdev->driver->fault_reserve_notify) {
5d50fcbda7b0ac Christian König   2019-01-11  164  		struct dma_fence *moving = dma_fence_get(bo->moving);
5d50fcbda7b0ac Christian König   2019-01-11  165  
4daa4fba3a3899 Souptick Joarder  2018-06-02  166  		err = bdev->driver->fault_reserve_notify(bo);
4daa4fba3a3899 Souptick Joarder  2018-06-02  167  		switch (err) {
82c5da6bf8b55a Jerome Glisse     2010-04-09  168  		case 0:
82c5da6bf8b55a Jerome Glisse     2010-04-09  169  			break;
82c5da6bf8b55a Jerome Glisse     2010-04-09  170  		case -EBUSY:
82c5da6bf8b55a Jerome Glisse     2010-04-09  171  		case -ERESTARTSYS:
de8dfb8e3449c7 Tom St Denis      2018-01-26  172  			ret = VM_FAULT_NOPAGE;
82c5da6bf8b55a Jerome Glisse     2010-04-09  173  			goto out_unlock;
82c5da6bf8b55a Jerome Glisse     2010-04-09  174  		default:
de8dfb8e3449c7 Tom St Denis      2018-01-26  175  			ret = VM_FAULT_SIGBUS;
82c5da6bf8b55a Jerome Glisse     2010-04-09  176  			goto out_unlock;
82c5da6bf8b55a Jerome Glisse     2010-04-09  177  		}
5d50fcbda7b0ac Christian König   2019-01-11  178  
5d50fcbda7b0ac Christian König   2019-01-11  179  		if (bo->moving != moving) {
5d50fcbda7b0ac Christian König   2019-01-11  180  			spin_lock(&bdev->glob->lru_lock);
5d50fcbda7b0ac Christian König   2019-01-11  181  			ttm_bo_move_to_lru_tail(bo, NULL);
5d50fcbda7b0ac Christian König   2019-01-11  182  			spin_unlock(&bdev->glob->lru_lock);
5d50fcbda7b0ac Christian König   2019-01-11  183  		}
5d50fcbda7b0ac Christian König   2019-01-11  184  		dma_fence_put(moving);
82c5da6bf8b55a Jerome Glisse     2010-04-09  185  	}
e024e11070a0a0 Dave Airlie       2009-06-24  186  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  187  	/*
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  188  	 * Wait for buffer data in transit, due to a pipelined
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  189  	 * move.
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  190  	 */
11bac80004499e Dave Jiang        2017-02-24  191  	ret = ttm_bo_vm_fault_idle(bo, vmf);
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  192  	if (unlikely(ret != 0)) {
de8dfb8e3449c7 Tom St Denis      2018-01-26  193  		if (ret == VM_FAULT_RETRY &&
3089c1df10e293 Nicolai Hähnle    2017-02-18  194  		    !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
3089c1df10e293 Nicolai Hähnle    2017-02-18  195  			/* The BO has already been unreserved. */
de8dfb8e3449c7 Tom St Denis      2018-01-26  196  			return ret;
3089c1df10e293 Nicolai Hähnle    2017-02-18  197  		}
3089c1df10e293 Nicolai Hähnle    2017-02-18  198  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  199  		goto out_unlock;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  200  	}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  201  
4daa4fba3a3899 Souptick Joarder  2018-06-02  202  	err = ttm_mem_io_lock(man, true);
4daa4fba3a3899 Souptick Joarder  2018-06-02  203  	if (unlikely(err != 0)) {
de8dfb8e3449c7 Tom St Denis      2018-01-26  204  		ret = VM_FAULT_NOPAGE;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  205  		goto out_unlock;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  206  	}
4daa4fba3a3899 Souptick Joarder  2018-06-02  207  	err = ttm_mem_io_reserve_vm(bo);
4daa4fba3a3899 Souptick Joarder  2018-06-02  208  	if (unlikely(err != 0)) {
de8dfb8e3449c7 Tom St Denis      2018-01-26  209  		ret = VM_FAULT_SIGBUS;
eba67093f53532 Thomas Hellstrom  2010-11-11  210  		goto out_io_unlock;
eba67093f53532 Thomas Hellstrom  2010-11-11  211  	}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  212  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  213  	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
b96f3e7c8069b7 Gerd Hoffmann     2019-08-05  214  		vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
d386735588c3e2 Thomas Hellstrom  2013-12-08  215  	page_last = vma_pages(vma) + vma->vm_pgoff -
b96f3e7c8069b7 Gerd Hoffmann     2019-08-05  216  		drm_vma_node_start(&bo->base.vma_node);
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  217  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  218  	if (unlikely(page_offset >= bo->num_pages)) {
de8dfb8e3449c7 Tom St Denis      2018-01-26  219  		ret = VM_FAULT_SIGBUS;
eba67093f53532 Thomas Hellstrom  2010-11-11  220  		goto out_io_unlock;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  221  	}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  222  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  223  	/*
3943875e7b73fd Thomas Hellstrom  2013-11-06  224  	 * Make a local vma copy to modify the page_prot member
3943875e7b73fd Thomas Hellstrom  2013-11-06  225  	 * and vm_flags if necessary. The vma parameter is protected
3943875e7b73fd Thomas Hellstrom  2013-11-06  226  	 * by mmap_sem in write mode.
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  227  	 */
3943875e7b73fd Thomas Hellstrom  2013-11-06  228  	cvma = *vma;
3943875e7b73fd Thomas Hellstrom  2013-11-06  229  	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
3943875e7b73fd Thomas Hellstrom  2013-11-06  230  
82c5da6bf8b55a Jerome Glisse     2010-04-09  231  	if (bo->mem.bus.is_iomem) {
3943875e7b73fd Thomas Hellstrom  2013-11-06  232  		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
3943875e7b73fd Thomas Hellstrom  2013-11-06  233  						cvma.vm_page_prot);
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  234  	} else {
d0cef9fa4411eb Roger He          2017-12-21  235  		struct ttm_operation_ctx ctx = {
d0cef9fa4411eb Roger He          2017-12-21  236  			.interruptible = false,
aa7662b67bf6f5 Roger He          2018-01-17  237  			.no_wait_gpu = false,
aa7662b67bf6f5 Roger He          2018-01-17  238  			.flags = TTM_OPT_FLAG_FORCE_ALLOC
aa7662b67bf6f5 Roger He          2018-01-17  239  
d0cef9fa4411eb Roger He          2017-12-21  240  		};
d0cef9fa4411eb Roger He          2017-12-21  241  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  242  		ttm = bo->ttm;
3943875e7b73fd Thomas Hellstrom  2013-11-06  243  		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
3943875e7b73fd Thomas Hellstrom  2013-11-06  244  						cvma.vm_page_prot);
b1e5f172325547 Jerome Glisse     2011-11-02  245  
b1e5f172325547 Jerome Glisse     2011-11-02  246  		/* Allocate all page at once, most common usage */
25893a14c938d5 Christian König   2018-02-01  247  		if (ttm_tt_populate(ttm, &ctx)) {
de8dfb8e3449c7 Tom St Denis      2018-01-26  248  			ret = VM_FAULT_OOM;
b1e5f172325547 Jerome Glisse     2011-11-02  249  			goto out_io_unlock;
b1e5f172325547 Jerome Glisse     2011-11-02  250  		}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  251  	}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  252  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  253  	/*
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  254  	 * Speculatively prefault a number of pages. Only error on
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  255  	 * first page.
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  256  	 */
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  257  	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
95cf9264d5f36c Tom Lendacky      2017-07-17  258  		if (bo->mem.bus.is_iomem) {
95cf9264d5f36c Tom Lendacky      2017-07-17  259  			/* Iomem should not be marked encrypted */
95cf9264d5f36c Tom Lendacky      2017-07-17  260  			cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
c67fa6edc8b11a Tan Xiaojun       2017-12-25  261  			pfn = ttm_bo_io_mem_pfn(bo, page_offset);
95cf9264d5f36c Tom Lendacky      2017-07-17  262  		} else {
b1e5f172325547 Jerome Glisse     2011-11-02  263  			page = ttm->pages[page_offset];
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  264  			if (unlikely(!page && i == 0)) {
de8dfb8e3449c7 Tom St Denis      2018-01-26  265  				ret = VM_FAULT_OOM;
eba67093f53532 Thomas Hellstrom  2010-11-11  266  				goto out_io_unlock;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  267  			} else if (unlikely(!page)) {
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  268  				break;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  269  			}
b96f3e7c8069b7 Gerd Hoffmann     2019-08-05  270  			page->index = drm_vma_node_start(&bo->base.vma_node) +
58aa6622d32af7 Thomas Hellstrom  2014-01-03  271  				page_offset;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  272  			pfn = page_to_pfn(page);
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  273  		}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  274  
7dfe8b6187f43d Thomas Hellstrom  2014-01-03  275  		if (vma->vm_flags & VM_MIXEDMAP)
4daa4fba3a3899 Souptick Joarder  2018-06-02 @276  			ret = vmf_insert_mixed(&cvma, address,
01c8f1c44b83a0 Dan Williams      2016-01-15  277  					__pfn_to_pfn_t(pfn, PFN_DEV));
7dfe8b6187f43d Thomas Hellstrom  2014-01-03  278  		else
4daa4fba3a3899 Souptick Joarder  2018-06-02 @279  			ret = vmf_insert_pfn(&cvma, address, pfn);
7dfe8b6187f43d Thomas Hellstrom  2014-01-03  280  
941f2f72dbbe0c Thomas Hellstrom  2019-09-12  281  		/* Never error on prefaulted PTEs */
941f2f72dbbe0c Thomas Hellstrom  2019-09-12  282  		if (unlikely((ret & VM_FAULT_ERROR))) {
941f2f72dbbe0c Thomas Hellstrom  2019-09-12  283  			if (i == 0)
eba67093f53532 Thomas Hellstrom  2010-11-11  284  				goto out_io_unlock;
941f2f72dbbe0c Thomas Hellstrom  2019-09-12  285  			else
941f2f72dbbe0c Thomas Hellstrom  2019-09-12  286  				break;
941f2f72dbbe0c Thomas Hellstrom  2019-09-12  287  		}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  288  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  289  		address += PAGE_SIZE;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  290  		if (unlikely(++page_offset >= page_last))
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  291  			break;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  292  	}
de8dfb8e3449c7 Tom St Denis      2018-01-26  293  	ret = VM_FAULT_NOPAGE;
eba67093f53532 Thomas Hellstrom  2010-11-11  294  out_io_unlock:
eba67093f53532 Thomas Hellstrom  2010-11-11  295  	ttm_mem_io_unlock(man);
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  296  out_unlock:
52791eeec1d9f4 Christian König   2019-08-11  297  	dma_resv_unlock(bo->base.resv);
de8dfb8e3449c7 Tom St Denis      2018-01-26  298  	return ret;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  299  }
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  300  

:::::: The code at line 276 was first introduced by commit
:::::: 4daa4fba3a3899a3eefff26e38cf680661a931e4 gpu: drm: ttm: Adding new return type vm_fault_t

:::::: TO: Souptick Joarder <jrdr.linux@gmail.com>
:::::: CC: Alex Deucher <alexander.deucher@amd.com>

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 24416 bytes --]

^ permalink raw reply	[flat|nested] 2+ messages in thread

* [android-goldfish:android-5.4 1/1] drivers/gpu/drm/ttm/ttm_bo_vm.c:276: undefined reference to `vmf_insert_mixed'
@ 2020-07-09 13:39 kernel test robot
  0 siblings, 0 replies; 2+ messages in thread
From: kernel test robot @ 2020-07-09 13:39 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 16225 bytes --]

tree:   https://android.googlesource.com/kernel/goldfish android-5.4
head:   7af30e526c2120f1f6ba4b5b19d683697e2c2833
commit: 7af30e526c2120f1f6ba4b5b19d683697e2c2833 [1/1] ANDROID: GKI: Add DRM_TTM config to GKI
config: h8300-randconfig-r002-20200709 (attached as .config)
compiler: h8300-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        git checkout 7af30e526c2120f1f6ba4b5b19d683697e2c2833
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=h8300 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   h8300-linux-ld: arch/h8300/kernel/entry.o: in function `resume_kernel':
   arch/h8300/kernel/entry.S:324: undefined reference to `TI_PRE_COUNT'
   h8300-linux-ld: drivers/gpu/drm/ttm/ttm_bo_vm.o: in function `ttm_bo_vm_fault':
>> drivers/gpu/drm/ttm/ttm_bo_vm.c:276: undefined reference to `vmf_insert_mixed'
>> h8300-linux-ld: drivers/gpu/drm/ttm/ttm_bo_vm.c:279: undefined reference to `vmf_insert_pfn'

vim +276 drivers/gpu/drm/ttm/ttm_bo_vm.c

c67fa6edc8b11a Tan Xiaojun       2017-12-25  108  
4daa4fba3a3899 Souptick Joarder  2018-06-02  109  static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  110  {
11bac80004499e Dave Jiang        2017-02-24  111  	struct vm_area_struct *vma = vmf->vma;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  112  	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  113  	    vma->vm_private_data;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  114  	struct ttm_bo_device *bdev = bo->bdev;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  115  	unsigned long page_offset;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  116  	unsigned long page_last;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  117  	unsigned long pfn;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  118  	struct ttm_tt *ttm = NULL;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  119  	struct page *page;
4daa4fba3a3899 Souptick Joarder  2018-06-02  120  	int err;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  121  	int i;
4daa4fba3a3899 Souptick Joarder  2018-06-02  122  	vm_fault_t ret = VM_FAULT_NOPAGE;
1a29d85eb0f19b Jan Kara          2016-12-14  123  	unsigned long address = vmf->address;
eba67093f53532 Thomas Hellstrom  2010-11-11  124  	struct ttm_mem_type_manager *man =
eba67093f53532 Thomas Hellstrom  2010-11-11  125  		&bdev->man[bo->mem.mem_type];
3943875e7b73fd Thomas Hellstrom  2013-11-06  126  	struct vm_area_struct cvma;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  127  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  128  	/*
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  129  	 * Work around locking order reversal in fault / nopfn
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  130  	 * between mmap_sem and bo_reserve: Perform a trylock operation
c58f009e01c918 Thomas Hellstrom  2013-11-14  131  	 * for reserve, and if it fails, retry the fault after waiting
c58f009e01c918 Thomas Hellstrom  2013-11-14  132  	 * for the buffer to become unreserved.
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  133  	 */
52791eeec1d9f4 Christian König   2019-08-11  134  	if (unlikely(!dma_resv_trylock(bo->base.resv))) {
c58f009e01c918 Thomas Hellstrom  2013-11-14  135  		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
c58f009e01c918 Thomas Hellstrom  2013-11-14  136  			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
8129fdad387ae3 Thomas Zimmermann 2018-06-21  137  				ttm_bo_get(bo);
11bac80004499e Dave Jiang        2017-02-24  138  				up_read(&vmf->vma->vm_mm->mmap_sem);
c58f009e01c918 Thomas Hellstrom  2013-11-14  139  				(void) ttm_bo_wait_unreserved(bo);
f44907593d746d Thomas Zimmermann 2018-06-21  140  				ttm_bo_put(bo);
c58f009e01c918 Thomas Hellstrom  2013-11-14  141  			}
c58f009e01c918 Thomas Hellstrom  2013-11-14  142  
c58f009e01c918 Thomas Hellstrom  2013-11-14  143  			return VM_FAULT_RETRY;
c58f009e01c918 Thomas Hellstrom  2013-11-14  144  		}
c58f009e01c918 Thomas Hellstrom  2013-11-14  145  
c58f009e01c918 Thomas Hellstrom  2013-11-14  146  		/*
c58f009e01c918 Thomas Hellstrom  2013-11-14  147  		 * If we'd want to change locking order to
c58f009e01c918 Thomas Hellstrom  2013-11-14  148  		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
c58f009e01c918 Thomas Hellstrom  2013-11-14  149  		 * instead of retrying the fault...
c58f009e01c918 Thomas Hellstrom  2013-11-14  150  		 */
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  151  		return VM_FAULT_NOPAGE;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  152  	}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  153  
667a50db0477d4 Thomas Hellstrom  2014-01-03  154  	/*
667a50db0477d4 Thomas Hellstrom  2014-01-03  155  	 * Refuse to fault imported pages. This should be handled
667a50db0477d4 Thomas Hellstrom  2014-01-03  156  	 * (if at all) by redirecting mmap to the exporter.
667a50db0477d4 Thomas Hellstrom  2014-01-03  157  	 */
667a50db0477d4 Thomas Hellstrom  2014-01-03  158  	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
de8dfb8e3449c7 Tom St Denis      2018-01-26  159  		ret = VM_FAULT_SIGBUS;
667a50db0477d4 Thomas Hellstrom  2014-01-03  160  		goto out_unlock;
667a50db0477d4 Thomas Hellstrom  2014-01-03  161  	}
667a50db0477d4 Thomas Hellstrom  2014-01-03  162  
82c5da6bf8b55a Jerome Glisse     2010-04-09  163  	if (bdev->driver->fault_reserve_notify) {
5d50fcbda7b0ac Christian König   2019-01-11  164  		struct dma_fence *moving = dma_fence_get(bo->moving);
5d50fcbda7b0ac Christian König   2019-01-11  165  
4daa4fba3a3899 Souptick Joarder  2018-06-02  166  		err = bdev->driver->fault_reserve_notify(bo);
4daa4fba3a3899 Souptick Joarder  2018-06-02  167  		switch (err) {
82c5da6bf8b55a Jerome Glisse     2010-04-09  168  		case 0:
82c5da6bf8b55a Jerome Glisse     2010-04-09  169  			break;
82c5da6bf8b55a Jerome Glisse     2010-04-09  170  		case -EBUSY:
82c5da6bf8b55a Jerome Glisse     2010-04-09  171  		case -ERESTARTSYS:
de8dfb8e3449c7 Tom St Denis      2018-01-26  172  			ret = VM_FAULT_NOPAGE;
82c5da6bf8b55a Jerome Glisse     2010-04-09  173  			goto out_unlock;
82c5da6bf8b55a Jerome Glisse     2010-04-09  174  		default:
de8dfb8e3449c7 Tom St Denis      2018-01-26  175  			ret = VM_FAULT_SIGBUS;
82c5da6bf8b55a Jerome Glisse     2010-04-09  176  			goto out_unlock;
82c5da6bf8b55a Jerome Glisse     2010-04-09  177  		}
5d50fcbda7b0ac Christian König   2019-01-11  178  
5d50fcbda7b0ac Christian König   2019-01-11  179  		if (bo->moving != moving) {
5d50fcbda7b0ac Christian König   2019-01-11  180  			spin_lock(&bdev->glob->lru_lock);
5d50fcbda7b0ac Christian König   2019-01-11  181  			ttm_bo_move_to_lru_tail(bo, NULL);
5d50fcbda7b0ac Christian König   2019-01-11  182  			spin_unlock(&bdev->glob->lru_lock);
5d50fcbda7b0ac Christian König   2019-01-11  183  		}
5d50fcbda7b0ac Christian König   2019-01-11  184  		dma_fence_put(moving);
82c5da6bf8b55a Jerome Glisse     2010-04-09  185  	}
e024e11070a0a0 Dave Airlie       2009-06-24  186  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  187  	/*
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  188  	 * Wait for buffer data in transit, due to a pipelined
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  189  	 * move.
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  190  	 */
11bac80004499e Dave Jiang        2017-02-24  191  	ret = ttm_bo_vm_fault_idle(bo, vmf);
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  192  	if (unlikely(ret != 0)) {
de8dfb8e3449c7 Tom St Denis      2018-01-26  193  		if (ret == VM_FAULT_RETRY &&
3089c1df10e293 Nicolai Hähnle    2017-02-18  194  		    !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
3089c1df10e293 Nicolai Hähnle    2017-02-18  195  			/* The BO has already been unreserved. */
de8dfb8e3449c7 Tom St Denis      2018-01-26  196  			return ret;
3089c1df10e293 Nicolai Hähnle    2017-02-18  197  		}
3089c1df10e293 Nicolai Hähnle    2017-02-18  198  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  199  		goto out_unlock;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  200  	}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  201  
4daa4fba3a3899 Souptick Joarder  2018-06-02  202  	err = ttm_mem_io_lock(man, true);
4daa4fba3a3899 Souptick Joarder  2018-06-02  203  	if (unlikely(err != 0)) {
de8dfb8e3449c7 Tom St Denis      2018-01-26  204  		ret = VM_FAULT_NOPAGE;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  205  		goto out_unlock;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  206  	}
4daa4fba3a3899 Souptick Joarder  2018-06-02  207  	err = ttm_mem_io_reserve_vm(bo);
4daa4fba3a3899 Souptick Joarder  2018-06-02  208  	if (unlikely(err != 0)) {
de8dfb8e3449c7 Tom St Denis      2018-01-26  209  		ret = VM_FAULT_SIGBUS;
eba67093f53532 Thomas Hellstrom  2010-11-11  210  		goto out_io_unlock;
eba67093f53532 Thomas Hellstrom  2010-11-11  211  	}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  212  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  213  	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
b96f3e7c8069b7 Gerd Hoffmann     2019-08-05  214  		vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
d386735588c3e2 Thomas Hellstrom  2013-12-08  215  	page_last = vma_pages(vma) + vma->vm_pgoff -
b96f3e7c8069b7 Gerd Hoffmann     2019-08-05  216  		drm_vma_node_start(&bo->base.vma_node);
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  217  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  218  	if (unlikely(page_offset >= bo->num_pages)) {
de8dfb8e3449c7 Tom St Denis      2018-01-26  219  		ret = VM_FAULT_SIGBUS;
eba67093f53532 Thomas Hellstrom  2010-11-11  220  		goto out_io_unlock;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  221  	}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  222  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  223  	/*
3943875e7b73fd Thomas Hellstrom  2013-11-06  224  	 * Make a local vma copy to modify the page_prot member
3943875e7b73fd Thomas Hellstrom  2013-11-06  225  	 * and vm_flags if necessary. The vma parameter is protected
3943875e7b73fd Thomas Hellstrom  2013-11-06  226  	 * by mmap_sem in write mode.
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  227  	 */
3943875e7b73fd Thomas Hellstrom  2013-11-06  228  	cvma = *vma;
3943875e7b73fd Thomas Hellstrom  2013-11-06  229  	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
3943875e7b73fd Thomas Hellstrom  2013-11-06  230  
82c5da6bf8b55a Jerome Glisse     2010-04-09  231  	if (bo->mem.bus.is_iomem) {
3943875e7b73fd Thomas Hellstrom  2013-11-06  232  		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
3943875e7b73fd Thomas Hellstrom  2013-11-06  233  						cvma.vm_page_prot);
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  234  	} else {
d0cef9fa4411eb Roger He          2017-12-21  235  		struct ttm_operation_ctx ctx = {
d0cef9fa4411eb Roger He          2017-12-21  236  			.interruptible = false,
aa7662b67bf6f5 Roger He          2018-01-17  237  			.no_wait_gpu = false,
aa7662b67bf6f5 Roger He          2018-01-17  238  			.flags = TTM_OPT_FLAG_FORCE_ALLOC
aa7662b67bf6f5 Roger He          2018-01-17  239  
d0cef9fa4411eb Roger He          2017-12-21  240  		};
d0cef9fa4411eb Roger He          2017-12-21  241  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  242  		ttm = bo->ttm;
3943875e7b73fd Thomas Hellstrom  2013-11-06  243  		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
3943875e7b73fd Thomas Hellstrom  2013-11-06  244  						cvma.vm_page_prot);
b1e5f172325547 Jerome Glisse     2011-11-02  245  
b1e5f172325547 Jerome Glisse     2011-11-02  246  		/* Allocate all page at once, most common usage */
25893a14c938d5 Christian König   2018-02-01  247  		if (ttm_tt_populate(ttm, &ctx)) {
de8dfb8e3449c7 Tom St Denis      2018-01-26  248  			ret = VM_FAULT_OOM;
b1e5f172325547 Jerome Glisse     2011-11-02  249  			goto out_io_unlock;
b1e5f172325547 Jerome Glisse     2011-11-02  250  		}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  251  	}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  252  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  253  	/*
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  254  	 * Speculatively prefault a number of pages. Only error on
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  255  	 * first page.
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  256  	 */
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  257  	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
95cf9264d5f36c Tom Lendacky      2017-07-17  258  		if (bo->mem.bus.is_iomem) {
95cf9264d5f36c Tom Lendacky      2017-07-17  259  			/* Iomem should not be marked encrypted */
95cf9264d5f36c Tom Lendacky      2017-07-17  260  			cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
c67fa6edc8b11a Tan Xiaojun       2017-12-25  261  			pfn = ttm_bo_io_mem_pfn(bo, page_offset);
95cf9264d5f36c Tom Lendacky      2017-07-17  262  		} else {
b1e5f172325547 Jerome Glisse     2011-11-02  263  			page = ttm->pages[page_offset];
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  264  			if (unlikely(!page && i == 0)) {
de8dfb8e3449c7 Tom St Denis      2018-01-26  265  				ret = VM_FAULT_OOM;
eba67093f53532 Thomas Hellstrom  2010-11-11  266  				goto out_io_unlock;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  267  			} else if (unlikely(!page)) {
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  268  				break;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  269  			}
b96f3e7c8069b7 Gerd Hoffmann     2019-08-05  270  			page->index = drm_vma_node_start(&bo->base.vma_node) +
58aa6622d32af7 Thomas Hellstrom  2014-01-03  271  				page_offset;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  272  			pfn = page_to_pfn(page);
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  273  		}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  274  
7dfe8b6187f43d Thomas Hellstrom  2014-01-03  275  		if (vma->vm_flags & VM_MIXEDMAP)
4daa4fba3a3899 Souptick Joarder  2018-06-02 @276  			ret = vmf_insert_mixed(&cvma, address,
01c8f1c44b83a0 Dan Williams      2016-01-15  277  					__pfn_to_pfn_t(pfn, PFN_DEV));
7dfe8b6187f43d Thomas Hellstrom  2014-01-03  278  		else
4daa4fba3a3899 Souptick Joarder  2018-06-02 @279  			ret = vmf_insert_pfn(&cvma, address, pfn);
7dfe8b6187f43d Thomas Hellstrom  2014-01-03  280  
941f2f72dbbe0c Thomas Hellstrom  2019-09-12  281  		/* Never error on prefaulted PTEs */
941f2f72dbbe0c Thomas Hellstrom  2019-09-12  282  		if (unlikely((ret & VM_FAULT_ERROR))) {
941f2f72dbbe0c Thomas Hellstrom  2019-09-12  283  			if (i == 0)
eba67093f53532 Thomas Hellstrom  2010-11-11  284  				goto out_io_unlock;
941f2f72dbbe0c Thomas Hellstrom  2019-09-12  285  			else
941f2f72dbbe0c Thomas Hellstrom  2019-09-12  286  				break;
941f2f72dbbe0c Thomas Hellstrom  2019-09-12  287  		}
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  288  
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  289  		address += PAGE_SIZE;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  290  		if (unlikely(++page_offset >= page_last))
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  291  			break;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  292  	}
de8dfb8e3449c7 Tom St Denis      2018-01-26  293  	ret = VM_FAULT_NOPAGE;
eba67093f53532 Thomas Hellstrom  2010-11-11  294  out_io_unlock:
eba67093f53532 Thomas Hellstrom  2010-11-11  295  	ttm_mem_io_unlock(man);
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  296  out_unlock:
52791eeec1d9f4 Christian König   2019-08-11  297  	dma_resv_unlock(bo->base.resv);
de8dfb8e3449c7 Tom St Denis      2018-01-26  298  	return ret;
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  299  }
ba4e7d973dd09b Thomas Hellstrom  2009-06-10  300  

:::::: The code at line 276 was first introduced by commit
:::::: 4daa4fba3a3899a3eefff26e38cf680661a931e4 gpu: drm: ttm: Adding new return type vm_fault_t

:::::: TO: Souptick Joarder <jrdr.linux@gmail.com>
:::::: CC: Alex Deucher <alexander.deucher@amd.com>

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 23942 bytes --]

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-08-18  5:33 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-18  5:33 [android-goldfish:android-5.4 1/1] drivers/gpu/drm/ttm/ttm_bo_vm.c:276: undefined reference to `vmf_insert_mixed' kernel test robot
  -- strict thread matches above, loose matches on Subject: below --
2020-07-09 13:39 kernel test robot

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.