linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] nouveau/hmm: fix migrate zero page to GPU
@ 2020-05-20 18:36 Ralph Campbell
  2020-05-20 19:20 ` Jason Gunthorpe
  0 siblings, 1 reply; 4+ messages in thread
From: Ralph Campbell @ 2020-05-20 18:36 UTC (permalink / raw)
  To: nouveau, linux-rdma, linux-kernel
  Cc: Jerome Glisse, John Hubbard, Christoph Hellwig, Jason Gunthorpe,
	Ben Skeggs, Ralph Campbell

When calling OpenCL clEnqueueSVMMigrateMem() on a region of memory that
is backed by pte_none() or zero pages, migrate_vma_setup() will fill the
source PFN array with an entry indicating the source page is zero.
Use this to optimize migration to device private memory by allocating
GPU memory and zero filling it instead of failing to migrate the page.

Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
---

This patch applies cleanly to Jason's Gunthorpe's hmm tree plus two
patches I posted earlier. The first is queued in Ben Skegg's nouveau
tree and the second is still pending review/not queued.
[1] ("nouveau/hmm: map pages after migration")
https://lore.kernel.org/linux-mm/20200304001339.8248-5-rcampbell@nvidia.com/
[2] ("nouveau/hmm: fix nouveau_dmem_chunk allocations")
https://lore.kernel.org/lkml/20200421231107.30958-1-rcampbell@nvidia.com/

 drivers/gpu/drm/nouveau/nouveau_dmem.c | 75 ++++++++++++++++++++++----
 1 file changed, 66 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index cbc71567f9a5..e5c230d9ae24 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -56,6 +56,8 @@ enum nouveau_aper {
 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
 				      enum nouveau_aper, u64 dst_addr,
 				      enum nouveau_aper, u64 src_addr);
+typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
+				      enum nouveau_aper, u64 dst_addr);
 
 struct nouveau_dmem_chunk {
 	struct list_head list;
@@ -67,6 +69,7 @@ struct nouveau_dmem_chunk {
 
 struct nouveau_dmem_migrate {
 	nouveau_migrate_copy_t copy_func;
+	nouveau_clear_page_t clear_func;
 	struct nouveau_channel *chan;
 };
 
@@ -436,6 +439,52 @@ nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
 	return 0;
 }
 
+static int
+nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
+		     enum nouveau_aper dst_aper, u64 dst_addr)
+{
+	struct nouveau_channel *chan = drm->dmem->migrate.chan;
+	u32 launch_dma = (1 << 10) /* REMAP_ENABLE_TRUE */ |
+			 (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
+			 (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
+			 (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
+			 (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
+	u32 remap = (4 <<  0) /* DST_X_CONST_A */ |
+		    (5 <<  4) /* DST_Y_CONST_B */ |
+		    (3 << 16) /* COMPONENT_SIZE_FOUR */ |
+		    (1 << 24) /* NUM_DST_COMPONENTS_TWO */;
+	int ret;
+
+	ret = RING_SPACE(chan, 12);
+	if (ret)
+		return ret;
+
+	switch (dst_aper) {
+	case NOUVEAU_APER_VRAM:
+		BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
+			break;
+	case NOUVEAU_APER_HOST:
+		BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
+		break;
+	default:
+		return -EINVAL;
+	}
+	launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
+
+	BEGIN_NVC0(chan, NvSubCopy, 0x0700, 3);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, remap);
+	BEGIN_NVC0(chan, NvSubCopy, 0x0408, 2);
+	OUT_RING(chan, upper_32_bits(dst_addr));
+	OUT_RING(chan, lower_32_bits(dst_addr));
+	BEGIN_NVC0(chan, NvSubCopy, 0x0418, 1);
+	OUT_RING(chan, length >> 3);
+	BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
+	OUT_RING(chan, launch_dma);
+	return 0;
+}
+
 static int
 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
 {
@@ -445,6 +494,7 @@ nouveau_dmem_migrate_init(struct nouveau_drm *drm)
 	case  VOLTA_DMA_COPY_A:
 	case TURING_DMA_COPY_A:
 		drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
+		drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
 		drm->dmem->migrate.chan = drm->ttm.chan;
 		return 0;
 	default:
@@ -487,21 +537,28 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
 	unsigned long paddr;
 
 	spage = migrate_pfn_to_page(src);
-	if (!spage || !(src & MIGRATE_PFN_MIGRATE))
+	if (!(src & MIGRATE_PFN_MIGRATE))
 		goto out;
 
 	dpage = nouveau_dmem_page_alloc_locked(drm);
 	if (!dpage)
 		goto out;
 
-	*dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(dev, *dma_addr))
-		goto out_free_page;
-
 	paddr = nouveau_dmem_page_addr(dpage);
-	if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
-			paddr, NOUVEAU_APER_HOST, *dma_addr))
-		goto out_dma_unmap;
+	if (spage) {
+		*dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
+					 DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(dev, *dma_addr))
+			goto out_free_page;
+		if (drm->dmem->migrate.copy_func(drm, page_size(spage),
+			NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
+			goto out_dma_unmap;
+	} else {
+		*dma_addr = DMA_MAPPING_ERROR;
+		if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
+			NOUVEAU_APER_VRAM, paddr))
+			goto out_free_page;
+	}
 
 	*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
 		((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
@@ -528,7 +585,7 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
 	for (i = 0; addr < args->end; i++) {
 		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
 				dma_addrs + nr_dma, pfns + i);
-		if (args->dst[i])
+		if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
 			nr_dma++;
 		addr += PAGE_SIZE;
 	}
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] nouveau/hmm: fix migrate zero page to GPU
  2020-05-20 18:36 [PATCH] nouveau/hmm: fix migrate zero page to GPU Ralph Campbell
@ 2020-05-20 19:20 ` Jason Gunthorpe
  2020-05-20 21:05   ` Ralph Campbell
  0 siblings, 1 reply; 4+ messages in thread
From: Jason Gunthorpe @ 2020-05-20 19:20 UTC (permalink / raw)
  To: Ralph Campbell
  Cc: nouveau, linux-rdma, linux-kernel, Jerome Glisse, John Hubbard,
	Christoph Hellwig, Ben Skeggs

On Wed, May 20, 2020 at 11:36:52AM -0700, Ralph Campbell wrote:
> When calling OpenCL clEnqueueSVMMigrateMem() on a region of memory that
> is backed by pte_none() or zero pages, migrate_vma_setup() will fill the
> source PFN array with an entry indicating the source page is zero.
> Use this to optimize migration to device private memory by allocating
> GPU memory and zero filling it instead of failing to migrate the page.
> 
> Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
> 
> This patch applies cleanly to Jason's Gunthorpe's hmm tree plus two
> patches I posted earlier. The first is queued in Ben Skegg's nouveau
> tree and the second is still pending review/not queued.
> [1] ("nouveau/hmm: map pages after migration")
> https://lore.kernel.org/linux-mm/20200304001339.8248-5-rcampbell@nvidia.com/
> [2] ("nouveau/hmm: fix nouveau_dmem_chunk allocations")
> https://lore.kernel.org/lkml/20200421231107.30958-1-rcampbell@nvidia.com/

It would be best if it goes through Ben's tree if it doesn't have
conflicts with the hunks I have in the hmm tree.. Is it the case?

Jason

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] nouveau/hmm: fix migrate zero page to GPU
  2020-05-20 19:20 ` Jason Gunthorpe
@ 2020-05-20 21:05   ` Ralph Campbell
  2020-05-22  0:47     ` [Nouveau] " Ben Skeggs
  0 siblings, 1 reply; 4+ messages in thread
From: Ralph Campbell @ 2020-05-20 21:05 UTC (permalink / raw)
  To: Jason Gunthorpe
  Cc: nouveau, linux-rdma, linux-kernel, Jerome Glisse, John Hubbard,
	Christoph Hellwig, Ben Skeggs


On 5/20/20 12:20 PM, Jason Gunthorpe wrote:
> On Wed, May 20, 2020 at 11:36:52AM -0700, Ralph Campbell wrote:
>> When calling OpenCL clEnqueueSVMMigrateMem() on a region of memory that
>> is backed by pte_none() or zero pages, migrate_vma_setup() will fill the
>> source PFN array with an entry indicating the source page is zero.
>> Use this to optimize migration to device private memory by allocating
>> GPU memory and zero filling it instead of failing to migrate the page.
>>
>> Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
>>
>> This patch applies cleanly to Jason's Gunthorpe's hmm tree plus two
>> patches I posted earlier. The first is queued in Ben Skegg's nouveau
>> tree and the second is still pending review/not queued.
>> [1] ("nouveau/hmm: map pages after migration")
>> https://lore.kernel.org/linux-mm/20200304001339.8248-5-rcampbell@nvidia.com/
>> [2] ("nouveau/hmm: fix nouveau_dmem_chunk allocations")
>> https://lore.kernel.org/lkml/20200421231107.30958-1-rcampbell@nvidia.com/
> 
> It would be best if it goes through Ben's tree if it doesn't have
> conflicts with the hunks I have in the hmm tree.. Is it the case?
> 
> Jason

I think there might be some merge conflicts even though it is semantically
independent of the other changes. I guess since we are at 5.7-rc6 and not
far from the merge window, I can rebase after 5.8-rc1 and resend.
I posted this mostly to get some review and as a "heads up" of the issue.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Nouveau] [PATCH] nouveau/hmm: fix migrate zero page to GPU
  2020-05-20 21:05   ` Ralph Campbell
@ 2020-05-22  0:47     ` Ben Skeggs
  0 siblings, 0 replies; 4+ messages in thread
From: Ben Skeggs @ 2020-05-22  0:47 UTC (permalink / raw)
  To: Ralph Campbell
  Cc: Jason Gunthorpe, linux-rdma, ML nouveau, LKML, Ben Skeggs,
	Christoph Hellwig

On Thu, 21 May 2020 at 07:05, Ralph Campbell <rcampbell@nvidia.com> wrote:
>
>
> On 5/20/20 12:20 PM, Jason Gunthorpe wrote:
> > On Wed, May 20, 2020 at 11:36:52AM -0700, Ralph Campbell wrote:
> >> When calling OpenCL clEnqueueSVMMigrateMem() on a region of memory that
> >> is backed by pte_none() or zero pages, migrate_vma_setup() will fill the
> >> source PFN array with an entry indicating the source page is zero.
> >> Use this to optimize migration to device private memory by allocating
> >> GPU memory and zero filling it instead of failing to migrate the page.
> >>
> >> Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
> >>
> >> This patch applies cleanly to Jason's Gunthorpe's hmm tree plus two
> >> patches I posted earlier. The first is queued in Ben Skegg's nouveau
> >> tree and the second is still pending review/not queued.
> >> [1] ("nouveau/hmm: map pages after migration")
> >> https://lore.kernel.org/linux-mm/20200304001339.8248-5-rcampbell@nvidia.com/
> >> [2] ("nouveau/hmm: fix nouveau_dmem_chunk allocations")
> >> https://lore.kernel.org/lkml/20200421231107.30958-1-rcampbell@nvidia.com/
> >
> > It would be best if it goes through Ben's tree if it doesn't have
> > conflicts with the hunks I have in the hmm tree.. Is it the case?
> >
> > Jason
>
> I think there might be some merge conflicts even though it is semantically
> independent of the other changes. I guess since we are at 5.7-rc6 and not
> far from the merge window, I can rebase after 5.8-rc1 and resend.
> I posted this mostly to get some review and as a "heads up" of the issue.
Both look alright to me, and apply cleanly on top of my tree already,
so I've got them locally.

Ben.

> _______________________________________________
> Nouveau mailing list
> Nouveau@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/nouveau

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2020-05-22  0:47 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-20 18:36 [PATCH] nouveau/hmm: fix migrate zero page to GPU Ralph Campbell
2020-05-20 19:20 ` Jason Gunthorpe
2020-05-20 21:05   ` Ralph Campbell
2020-05-22  0:47     ` [Nouveau] " Ben Skeggs

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).