All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] nouveau/hmm: map pages after migration
@ 2019-08-07 15:02 ` Ralph Campbell
  0 siblings, 0 replies; 15+ messages in thread
From: Ralph Campbell @ 2019-08-07 15:02 UTC (permalink / raw)
  To: linux-mm
  Cc: linux-kernel, amd-gfx, dri-devel, nouveau, Ralph Campbell,
	Christoph Hellwig, Jason Gunthorpe, Jérôme Glisse,
	Ben Skeggs

When memory is migrated to the GPU it is likely to be accessed by GPU
code soon afterwards. Instead of waiting for a GPU fault, map the
migrated memory into the GPU page tables with the same access permissions
as the source CPU page table entries. This preserves copy on write
semantics.

Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
---

This patch is based on top of Christoph Hellwig's 9 patch series
https://lore.kernel.org/linux-mm/20190729234611.GC7171@redhat.com/T/#u
"turn the hmm migrate_vma upside down" but without patch 9
"mm: remove the unused MIGRATE_PFN_WRITE" and adds a use for the flag.


 drivers/gpu/drm/nouveau/nouveau_dmem.c | 45 +++++++++-----
 drivers/gpu/drm/nouveau/nouveau_svm.c  | 86 ++++++++++++++++++++++++++
 drivers/gpu/drm/nouveau/nouveau_svm.h  | 19 ++++++
 3 files changed, 133 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index ef9de82b0744..c83e6f118817 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -25,11 +25,13 @@
 #include "nouveau_dma.h"
 #include "nouveau_mem.h"
 #include "nouveau_bo.h"
+#include "nouveau_svm.h"
 
 #include <nvif/class.h>
 #include <nvif/object.h>
 #include <nvif/if500b.h>
 #include <nvif/if900b.h>
+#include <nvif/if000c.h>
 
 #include <linux/sched/mm.h>
 #include <linux/hmm.h>
@@ -560,11 +562,12 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 }
 
 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
-		struct vm_area_struct *vma, unsigned long addr,
-		unsigned long src, dma_addr_t *dma_addr)
+		struct vm_area_struct *vma, unsigned long src,
+		dma_addr_t *dma_addr, u64 *pfn)
 {
 	struct device *dev = drm->dev->dev;
 	struct page *dpage, *spage;
+	unsigned long paddr;
 
 	spage = migrate_pfn_to_page(src);
 	if (!spage || !(src & MIGRATE_PFN_MIGRATE))
@@ -572,17 +575,21 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
 
 	dpage = nouveau_dmem_page_alloc_locked(drm);
 	if (!dpage)
-		return 0;
+		goto out;
 
 	*dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
 	if (dma_mapping_error(dev, *dma_addr))
 		goto out_free_page;
 
+	paddr = nouveau_dmem_page_addr(dpage);
 	if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
-			nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
-			*dma_addr))
+			paddr, NOUVEAU_APER_HOST, *dma_addr))
 		goto out_dma_unmap;
 
+	*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
+		((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
+	if (src & MIGRATE_PFN_WRITE)
+		*pfn |= NVIF_VMM_PFNMAP_V0_W;
 	return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
 
 out_dma_unmap:
@@ -590,18 +597,19 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
 out_free_page:
 	nouveau_dmem_page_free_locked(drm, dpage);
 out:
+	*pfn = NVIF_VMM_PFNMAP_V0_NONE;
 	return 0;
 }
 
 static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
-		struct nouveau_drm *drm, dma_addr_t *dma_addrs)
+		struct nouveau_drm *drm, dma_addr_t *dma_addrs, u64 *pfns)
 {
 	struct nouveau_fence *fence;
 	unsigned long addr = args->start, nr_dma = 0, i;
 
 	for (i = 0; addr < args->end; i++) {
 		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->vma,
-				addr, args->src[i], &dma_addrs[nr_dma]);
+				args->src[i], &dma_addrs[nr_dma], &pfns[i]);
 		if (args->dst[i])
 			nr_dma++;
 		addr += PAGE_SIZE;
@@ -615,10 +623,6 @@ static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
 		dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
 				DMA_BIDIRECTIONAL);
 	}
-	/*
-	 * FIXME optimization: update GPU page table to point to newly migrated
-	 * memory.
-	 */
 	migrate_vma_finalize(args);
 }
 
@@ -631,11 +635,12 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
 	unsigned long npages = (end - start) >> PAGE_SHIFT;
 	unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
 	dma_addr_t *dma_addrs;
+	u64 *pfns;
 	struct migrate_vma args = {
 		.vma		= vma,
 		.start		= start,
 	};
-	unsigned long c, i;
+	unsigned long i;
 	int ret = -ENOMEM;
 
 	args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
@@ -649,19 +654,25 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
 	if (!dma_addrs)
 		goto out_free_dst;
 
-	for (i = 0; i < npages; i += c) {
-		c = min(SG_MAX_SINGLE_ALLOC, npages);
-		args.end = start + (c << PAGE_SHIFT);
+	pfns = nouveau_pfns_alloc(max);
+	if (!pfns)
+		goto out_free_dma;
+
+	for (i = 0; i < npages; i += max) {
+		args.end = start + (max << PAGE_SHIFT);
 		ret = migrate_vma_setup(&args);
 		if (ret)
-			goto out_free_dma;
+			goto out_free_pfns;
 
 		if (args.cpages)
-			nouveau_dmem_migrate_chunk(&args, drm, dma_addrs);
+			nouveau_dmem_migrate_chunk(&args, drm, dma_addrs,
+						   pfns);
 		args.start = args.end;
 	}
 
 	ret = 0;
+out_free_pfns:
+	nouveau_pfns_free(pfns);
 out_free_dma:
 	kfree(dma_addrs);
 out_free_dst:
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index a74530b5a523..3e6d7f226576 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -70,6 +70,12 @@ struct nouveau_svm {
 #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
 
+struct nouveau_pfnmap_args {
+	struct nvif_ioctl_v0 i;
+	struct nvif_ioctl_mthd_v0 m;
+	struct nvif_vmm_pfnmap_v0 p;
+};
+
 struct nouveau_ivmm {
 	struct nouveau_svmm *svmm;
 	u64 inst;
@@ -734,6 +740,86 @@ nouveau_svm_fault(struct nvif_notify *notify)
 	return NVIF_NOTIFY_KEEP;
 }
 
+static inline struct nouveau_pfnmap_args *
+nouveau_pfns_to_args(void *pfns)
+{
+	struct nvif_vmm_pfnmap_v0 *p =
+		container_of(pfns, struct nvif_vmm_pfnmap_v0, phys);
+
+	return container_of(p, struct nouveau_pfnmap_args, p);
+}
+
+u64 *
+nouveau_pfns_alloc(unsigned long npages)
+{
+	struct nouveau_pfnmap_args *args;
+
+	args = kzalloc(sizeof(*args) + npages * sizeof(args->p.phys[0]),
+			GFP_KERNEL);
+	if (!args)
+		return NULL;
+
+	args->i.type = NVIF_IOCTL_V0_MTHD;
+	args->m.method = NVIF_VMM_V0_PFNMAP;
+	args->p.page = PAGE_SHIFT;
+
+	return args->p.phys;
+}
+
+void
+nouveau_pfns_free(u64 *pfns)
+{
+	struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
+
+	kfree(args);
+}
+
+static struct nouveau_svmm *
+nouveau_find_svmm(struct nouveau_svm *svm, struct mm_struct *mm)
+{
+	struct nouveau_ivmm *ivmm;
+
+	list_for_each_entry(ivmm, &svm->inst, head) {
+		if (ivmm->svmm->mm == mm)
+			return ivmm->svmm;
+	}
+	return NULL;
+}
+
+void
+nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
+		 unsigned long addr, u64 *pfns, unsigned long npages)
+{
+	struct nouveau_svm *svm = drm->svm;
+	struct nouveau_svmm *svmm;
+	struct nouveau_pfnmap_args *args;
+	int ret;
+
+	if (!svm)
+		return;
+
+	mutex_lock(&svm->mutex);
+	svmm = nouveau_find_svmm(svm, mm);
+	if (!svmm) {
+		mutex_unlock(&svm->mutex);
+		return;
+	}
+	mutex_unlock(&svm->mutex);
+
+	args = nouveau_pfns_to_args(pfns);
+	args->p.addr = addr;
+	args->p.size = npages << PAGE_SHIFT;
+
+	mutex_lock(&svmm->mutex);
+
+	svmm->vmm->vmm.object.client->super = true;
+	ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
+				npages * sizeof(args->p.phys[0]), NULL);
+	svmm->vmm->vmm.object.client->super = false;
+
+	mutex_unlock(&svmm->mutex);
+}
+
 static void
 nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.h b/drivers/gpu/drm/nouveau/nouveau_svm.h
index e839d8189461..c00c177e51ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.h
@@ -18,6 +18,11 @@ void nouveau_svmm_fini(struct nouveau_svmm **);
 int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
 void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
 int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
+
+u64 *nouveau_pfns_alloc(unsigned long npages);
+void nouveau_pfns_free(u64 *pfns);
+void nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
+		      unsigned long addr, u64 *pfns, unsigned long npages);
 #else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
 static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
 static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
@@ -44,5 +49,19 @@ static inline int nouveau_svmm_bind(struct drm_device *device, void *p,
 {
 	return -ENOSYS;
 }
+
+u64 *nouveau_pfns_alloc(unsigned long npages)
+{
+	return NULL;
+}
+
+void nouveau_pfns_free(u64 *pfns)
+{
+}
+
+void nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
+		      unsigned long addr, u64 *pfns, unsigned long npages)
+{
+}
 #endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
 #endif
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH] nouveau/hmm: map pages after migration
@ 2019-08-07 15:02 ` Ralph Campbell
  0 siblings, 0 replies; 15+ messages in thread
From: Ralph Campbell @ 2019-08-07 15:02 UTC (permalink / raw)
  To: linux-mm
  Cc: Ralph Campbell, nouveau, linux-kernel, dri-devel,
	Jérôme Glisse, Jason Gunthorpe, amd-gfx,
	Christoph Hellwig, Ben Skeggs

When memory is migrated to the GPU it is likely to be accessed by GPU
code soon afterwards. Instead of waiting for a GPU fault, map the
migrated memory into the GPU page tables with the same access permissions
as the source CPU page table entries. This preserves copy on write
semantics.

Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
---

This patch is based on top of Christoph Hellwig's 9 patch series
https://lore.kernel.org/linux-mm/20190729234611.GC7171@redhat.com/T/#u
"turn the hmm migrate_vma upside down" but without patch 9
"mm: remove the unused MIGRATE_PFN_WRITE" and adds a use for the flag.


 drivers/gpu/drm/nouveau/nouveau_dmem.c | 45 +++++++++-----
 drivers/gpu/drm/nouveau/nouveau_svm.c  | 86 ++++++++++++++++++++++++++
 drivers/gpu/drm/nouveau/nouveau_svm.h  | 19 ++++++
 3 files changed, 133 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index ef9de82b0744..c83e6f118817 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -25,11 +25,13 @@
 #include "nouveau_dma.h"
 #include "nouveau_mem.h"
 #include "nouveau_bo.h"
+#include "nouveau_svm.h"
 
 #include <nvif/class.h>
 #include <nvif/object.h>
 #include <nvif/if500b.h>
 #include <nvif/if900b.h>
+#include <nvif/if000c.h>
 
 #include <linux/sched/mm.h>
 #include <linux/hmm.h>
@@ -560,11 +562,12 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 }
 
 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
-		struct vm_area_struct *vma, unsigned long addr,
-		unsigned long src, dma_addr_t *dma_addr)
+		struct vm_area_struct *vma, unsigned long src,
+		dma_addr_t *dma_addr, u64 *pfn)
 {
 	struct device *dev = drm->dev->dev;
 	struct page *dpage, *spage;
+	unsigned long paddr;
 
 	spage = migrate_pfn_to_page(src);
 	if (!spage || !(src & MIGRATE_PFN_MIGRATE))
@@ -572,17 +575,21 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
 
 	dpage = nouveau_dmem_page_alloc_locked(drm);
 	if (!dpage)
-		return 0;
+		goto out;
 
 	*dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
 	if (dma_mapping_error(dev, *dma_addr))
 		goto out_free_page;
 
+	paddr = nouveau_dmem_page_addr(dpage);
 	if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
-			nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
-			*dma_addr))
+			paddr, NOUVEAU_APER_HOST, *dma_addr))
 		goto out_dma_unmap;
 
+	*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
+		((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
+	if (src & MIGRATE_PFN_WRITE)
+		*pfn |= NVIF_VMM_PFNMAP_V0_W;
 	return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
 
 out_dma_unmap:
@@ -590,18 +597,19 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
 out_free_page:
 	nouveau_dmem_page_free_locked(drm, dpage);
 out:
+	*pfn = NVIF_VMM_PFNMAP_V0_NONE;
 	return 0;
 }
 
 static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
-		struct nouveau_drm *drm, dma_addr_t *dma_addrs)
+		struct nouveau_drm *drm, dma_addr_t *dma_addrs, u64 *pfns)
 {
 	struct nouveau_fence *fence;
 	unsigned long addr = args->start, nr_dma = 0, i;
 
 	for (i = 0; addr < args->end; i++) {
 		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->vma,
-				addr, args->src[i], &dma_addrs[nr_dma]);
+				args->src[i], &dma_addrs[nr_dma], &pfns[i]);
 		if (args->dst[i])
 			nr_dma++;
 		addr += PAGE_SIZE;
@@ -615,10 +623,6 @@ static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
 		dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
 				DMA_BIDIRECTIONAL);
 	}
-	/*
-	 * FIXME optimization: update GPU page table to point to newly migrated
-	 * memory.
-	 */
 	migrate_vma_finalize(args);
 }
 
@@ -631,11 +635,12 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
 	unsigned long npages = (end - start) >> PAGE_SHIFT;
 	unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
 	dma_addr_t *dma_addrs;
+	u64 *pfns;
 	struct migrate_vma args = {
 		.vma		= vma,
 		.start		= start,
 	};
-	unsigned long c, i;
+	unsigned long i;
 	int ret = -ENOMEM;
 
 	args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
@@ -649,19 +654,25 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
 	if (!dma_addrs)
 		goto out_free_dst;
 
-	for (i = 0; i < npages; i += c) {
-		c = min(SG_MAX_SINGLE_ALLOC, npages);
-		args.end = start + (c << PAGE_SHIFT);
+	pfns = nouveau_pfns_alloc(max);
+	if (!pfns)
+		goto out_free_dma;
+
+	for (i = 0; i < npages; i += max) {
+		args.end = start + (max << PAGE_SHIFT);
 		ret = migrate_vma_setup(&args);
 		if (ret)
-			goto out_free_dma;
+			goto out_free_pfns;
 
 		if (args.cpages)
-			nouveau_dmem_migrate_chunk(&args, drm, dma_addrs);
+			nouveau_dmem_migrate_chunk(&args, drm, dma_addrs,
+						   pfns);
 		args.start = args.end;
 	}
 
 	ret = 0;
+out_free_pfns:
+	nouveau_pfns_free(pfns);
 out_free_dma:
 	kfree(dma_addrs);
 out_free_dst:
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index a74530b5a523..3e6d7f226576 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -70,6 +70,12 @@ struct nouveau_svm {
 #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
 
+struct nouveau_pfnmap_args {
+	struct nvif_ioctl_v0 i;
+	struct nvif_ioctl_mthd_v0 m;
+	struct nvif_vmm_pfnmap_v0 p;
+};
+
 struct nouveau_ivmm {
 	struct nouveau_svmm *svmm;
 	u64 inst;
@@ -734,6 +740,86 @@ nouveau_svm_fault(struct nvif_notify *notify)
 	return NVIF_NOTIFY_KEEP;
 }
 
+static inline struct nouveau_pfnmap_args *
+nouveau_pfns_to_args(void *pfns)
+{
+	struct nvif_vmm_pfnmap_v0 *p =
+		container_of(pfns, struct nvif_vmm_pfnmap_v0, phys);
+
+	return container_of(p, struct nouveau_pfnmap_args, p);
+}
+
+u64 *
+nouveau_pfns_alloc(unsigned long npages)
+{
+	struct nouveau_pfnmap_args *args;
+
+	args = kzalloc(sizeof(*args) + npages * sizeof(args->p.phys[0]),
+			GFP_KERNEL);
+	if (!args)
+		return NULL;
+
+	args->i.type = NVIF_IOCTL_V0_MTHD;
+	args->m.method = NVIF_VMM_V0_PFNMAP;
+	args->p.page = PAGE_SHIFT;
+
+	return args->p.phys;
+}
+
+void
+nouveau_pfns_free(u64 *pfns)
+{
+	struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
+
+	kfree(args);
+}
+
+static struct nouveau_svmm *
+nouveau_find_svmm(struct nouveau_svm *svm, struct mm_struct *mm)
+{
+	struct nouveau_ivmm *ivmm;
+
+	list_for_each_entry(ivmm, &svm->inst, head) {
+		if (ivmm->svmm->mm == mm)
+			return ivmm->svmm;
+	}
+	return NULL;
+}
+
+void
+nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
+		 unsigned long addr, u64 *pfns, unsigned long npages)
+{
+	struct nouveau_svm *svm = drm->svm;
+	struct nouveau_svmm *svmm;
+	struct nouveau_pfnmap_args *args;
+	int ret;
+
+	if (!svm)
+		return;
+
+	mutex_lock(&svm->mutex);
+	svmm = nouveau_find_svmm(svm, mm);
+	if (!svmm) {
+		mutex_unlock(&svm->mutex);
+		return;
+	}
+	mutex_unlock(&svm->mutex);
+
+	args = nouveau_pfns_to_args(pfns);
+	args->p.addr = addr;
+	args->p.size = npages << PAGE_SHIFT;
+
+	mutex_lock(&svmm->mutex);
+
+	svmm->vmm->vmm.object.client->super = true;
+	ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
+				npages * sizeof(args->p.phys[0]), NULL);
+	svmm->vmm->vmm.object.client->super = false;
+
+	mutex_unlock(&svmm->mutex);
+}
+
 static void
 nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.h b/drivers/gpu/drm/nouveau/nouveau_svm.h
index e839d8189461..c00c177e51ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.h
@@ -18,6 +18,11 @@ void nouveau_svmm_fini(struct nouveau_svmm **);
 int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
 void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
 int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
+
+u64 *nouveau_pfns_alloc(unsigned long npages);
+void nouveau_pfns_free(u64 *pfns);
+void nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
+		      unsigned long addr, u64 *pfns, unsigned long npages);
 #else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
 static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
 static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
@@ -44,5 +49,19 @@ static inline int nouveau_svmm_bind(struct drm_device *device, void *p,
 {
 	return -ENOSYS;
 }
+
+u64 *nouveau_pfns_alloc(unsigned long npages)
+{
+	return NULL;
+}
+
+void nouveau_pfns_free(u64 *pfns)
+{
+}
+
+void nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
+		      unsigned long addr, u64 *pfns, unsigned long npages)
+{
+}
 #endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
 #endif
-- 
2.20.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH] nouveau/hmm: map pages after migration
@ 2019-08-08  7:07   ` Christoph Hellwig
  0 siblings, 0 replies; 15+ messages in thread
From: Christoph Hellwig @ 2019-08-08  7:07 UTC (permalink / raw)
  To: Ralph Campbell
  Cc: linux-mm, linux-kernel, amd-gfx, dri-devel, nouveau,
	Christoph Hellwig, Jason Gunthorpe, Jérôme Glisse,
	Ben Skeggs

On Wed, Aug 07, 2019 at 08:02:14AM -0700, Ralph Campbell wrote:
> When memory is migrated to the GPU it is likely to be accessed by GPU
> code soon afterwards. Instead of waiting for a GPU fault, map the
> migrated memory into the GPU page tables with the same access permissions
> as the source CPU page table entries. This preserves copy on write
> semantics.
> 
> Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: Jason Gunthorpe <jgg@mellanox.com>
> Cc: "Jérôme Glisse" <jglisse@redhat.com>
> Cc: Ben Skeggs <bskeggs@redhat.com>
> ---
> 
> This patch is based on top of Christoph Hellwig's 9 patch series
> https://lore.kernel.org/linux-mm/20190729234611.GC7171@redhat.com/T/#u
> "turn the hmm migrate_vma upside down" but without patch 9
> "mm: remove the unused MIGRATE_PFN_WRITE" and adds a use for the flag.

This looks useful.  I've already dropped that patch for the pending
resend.

>  static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> -		struct vm_area_struct *vma, unsigned long addr,
> -		unsigned long src, dma_addr_t *dma_addr)
> +		struct vm_area_struct *vma, unsigned long src,
> +		dma_addr_t *dma_addr, u64 *pfn)

I'll pick up the removal of the not needed addr argument for the patch
introducing nouveau_dmem_migrate_copy_one, thanks,

>  static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
> -		struct nouveau_drm *drm, dma_addr_t *dma_addrs)
> +		struct nouveau_drm *drm, dma_addr_t *dma_addrs, u64 *pfns)
>  {
>  	struct nouveau_fence *fence;
>  	unsigned long addr = args->start, nr_dma = 0, i;
>  
>  	for (i = 0; addr < args->end; i++) {
>  		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->vma,
> -				addr, args->src[i], &dma_addrs[nr_dma]);
> +				args->src[i], &dma_addrs[nr_dma], &pfns[i]);

Nit: I find the &pfns[i] way to pass the argument a little weird to read.
Why not "pfns + i"?

> +u64 *
> +nouveau_pfns_alloc(unsigned long npages)
> +{
> +	struct nouveau_pfnmap_args *args;
> +
> +	args = kzalloc(sizeof(*args) + npages * sizeof(args->p.phys[0]),

Can we use struct_size here?

> +	int ret;
> +
> +	if (!svm)
> +		return;
> +
> +	mutex_lock(&svm->mutex);
> +	svmm = nouveau_find_svmm(svm, mm);
> +	if (!svmm) {
> +		mutex_unlock(&svm->mutex);
> +		return;
> +	}
> +	mutex_unlock(&svm->mutex);

Given that nouveau_find_svmm doesn't take any kind of reference, what
gurantees svmm doesn't go away after dropping the lock?

> @@ -44,5 +49,19 @@ static inline int nouveau_svmm_bind(struct drm_device *device, void *p,
>  {
>  	return -ENOSYS;
>  }
> +
> +u64 *nouveau_pfns_alloc(unsigned long npages)
> +{
> +	return NULL;
> +}
> +
> +void nouveau_pfns_free(u64 *pfns)
> +{
> +}
> +
> +void nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
> +		      unsigned long addr, u64 *pfns, unsigned long npages)
> +{
> +}
>  #endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */

nouveau_dmem.c and nouveau_svm.c are both built conditional on
CONFIG_DRM_NOUVEAU_SVM, so there is no need for stubs here.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] nouveau/hmm: map pages after migration
@ 2019-08-08  7:07   ` Christoph Hellwig
  0 siblings, 0 replies; 15+ messages in thread
From: Christoph Hellwig @ 2019-08-08  7:07 UTC (permalink / raw)
  To: Ralph Campbell
  Cc: nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-mm-Bw31MaZKKs3YtjvyW6yDsg, Jason Gunthorpe,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, Christoph Hellwig,
	Ben Skeggs

On Wed, Aug 07, 2019 at 08:02:14AM -0700, Ralph Campbell wrote:
> When memory is migrated to the GPU it is likely to be accessed by GPU
> code soon afterwards. Instead of waiting for a GPU fault, map the
> migrated memory into the GPU page tables with the same access permissions
> as the source CPU page table entries. This preserves copy on write
> semantics.
> 
> Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: Jason Gunthorpe <jgg@mellanox.com>
> Cc: "Jérôme Glisse" <jglisse@redhat.com>
> Cc: Ben Skeggs <bskeggs@redhat.com>
> ---
> 
> This patch is based on top of Christoph Hellwig's 9 patch series
> https://lore.kernel.org/linux-mm/20190729234611.GC7171@redhat.com/T/#u
> "turn the hmm migrate_vma upside down" but without patch 9
> "mm: remove the unused MIGRATE_PFN_WRITE" and adds a use for the flag.

This looks useful.  I've already dropped that patch for the pending
resend.

>  static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> -		struct vm_area_struct *vma, unsigned long addr,
> -		unsigned long src, dma_addr_t *dma_addr)
> +		struct vm_area_struct *vma, unsigned long src,
> +		dma_addr_t *dma_addr, u64 *pfn)

I'll pick up the removal of the not needed addr argument for the patch
introducing nouveau_dmem_migrate_copy_one, thanks,

>  static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
> -		struct nouveau_drm *drm, dma_addr_t *dma_addrs)
> +		struct nouveau_drm *drm, dma_addr_t *dma_addrs, u64 *pfns)
>  {
>  	struct nouveau_fence *fence;
>  	unsigned long addr = args->start, nr_dma = 0, i;
>  
>  	for (i = 0; addr < args->end; i++) {
>  		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->vma,
> -				addr, args->src[i], &dma_addrs[nr_dma]);
> +				args->src[i], &dma_addrs[nr_dma], &pfns[i]);

Nit: I find the &pfns[i] way to pass the argument a little weird to read.
Why not "pfns + i"?

> +u64 *
> +nouveau_pfns_alloc(unsigned long npages)
> +{
> +	struct nouveau_pfnmap_args *args;
> +
> +	args = kzalloc(sizeof(*args) + npages * sizeof(args->p.phys[0]),

Can we use struct_size here?

> +	int ret;
> +
> +	if (!svm)
> +		return;
> +
> +	mutex_lock(&svm->mutex);
> +	svmm = nouveau_find_svmm(svm, mm);
> +	if (!svmm) {
> +		mutex_unlock(&svm->mutex);
> +		return;
> +	}
> +	mutex_unlock(&svm->mutex);

Given that nouveau_find_svmm doesn't take any kind of reference, what
gurantees svmm doesn't go away after dropping the lock?

> @@ -44,5 +49,19 @@ static inline int nouveau_svmm_bind(struct drm_device *device, void *p,
>  {
>  	return -ENOSYS;
>  }
> +
> +u64 *nouveau_pfns_alloc(unsigned long npages)
> +{
> +	return NULL;
> +}
> +
> +void nouveau_pfns_free(u64 *pfns)
> +{
> +}
> +
> +void nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
> +		      unsigned long addr, u64 *pfns, unsigned long npages)
> +{
> +}
>  #endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */

nouveau_dmem.c and nouveau_svm.c are both built conditional on
CONFIG_DRM_NOUVEAU_SVM, so there is no need for stubs here.
_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] nouveau/hmm: map pages after migration
@ 2019-08-08 21:29     ` Ralph Campbell
  0 siblings, 0 replies; 15+ messages in thread
From: Ralph Campbell @ 2019-08-08 21:29 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: linux-mm, linux-kernel, amd-gfx, dri-devel, nouveau,
	Jason Gunthorpe, Jérôme Glisse, Ben Skeggs


On 8/8/19 12:07 AM, Christoph Hellwig wrote:
> On Wed, Aug 07, 2019 at 08:02:14AM -0700, Ralph Campbell wrote:
>> When memory is migrated to the GPU it is likely to be accessed by GPU
>> code soon afterwards. Instead of waiting for a GPU fault, map the
>> migrated memory into the GPU page tables with the same access permissions
>> as the source CPU page table entries. This preserves copy on write
>> semantics.
>>
>> Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
>> Cc: Christoph Hellwig <hch@lst.de>
>> Cc: Jason Gunthorpe <jgg@mellanox.com>
>> Cc: "Jérôme Glisse" <jglisse@redhat.com>
>> Cc: Ben Skeggs <bskeggs@redhat.com>
>> ---
>>
>> This patch is based on top of Christoph Hellwig's 9 patch series
>> https://lore.kernel.org/linux-mm/20190729234611.GC7171@redhat.com/T/#u
>> "turn the hmm migrate_vma upside down" but without patch 9
>> "mm: remove the unused MIGRATE_PFN_WRITE" and adds a use for the flag.
> 
> This looks useful.  I've already dropped that patch for the pending
> resend.

Thanks.

> 
>>   static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
>> -		struct vm_area_struct *vma, unsigned long addr,
>> -		unsigned long src, dma_addr_t *dma_addr)
>> +		struct vm_area_struct *vma, unsigned long src,
>> +		dma_addr_t *dma_addr, u64 *pfn)
> 
> I'll pick up the removal of the not needed addr argument for the patch
> introducing nouveau_dmem_migrate_copy_one, thanks,
> 
>>   static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
>> -		struct nouveau_drm *drm, dma_addr_t *dma_addrs)
>> +		struct nouveau_drm *drm, dma_addr_t *dma_addrs, u64 *pfns)
>>   {
>>   	struct nouveau_fence *fence;
>>   	unsigned long addr = args->start, nr_dma = 0, i;
>>   
>>   	for (i = 0; addr < args->end; i++) {
>>   		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->vma,
>> -				addr, args->src[i], &dma_addrs[nr_dma]);
>> +				args->src[i], &dma_addrs[nr_dma], &pfns[i]);
> 
> Nit: I find the &pfns[i] way to pass the argument a little weird to read.
> Why not "pfns + i"?

OK, will do in v2.
Should I convert to "dma_addrs + nr_dma" too?

>> +u64 *
>> +nouveau_pfns_alloc(unsigned long npages)
>> +{
>> +	struct nouveau_pfnmap_args *args;
>> +
>> +	args = kzalloc(sizeof(*args) + npages * sizeof(args->p.phys[0]),
> 
> Can we use struct_size here?

Yes, good suggestion.

> 
>> +	int ret;
>> +
>> +	if (!svm)
>> +		return;
>> +
>> +	mutex_lock(&svm->mutex);
>> +	svmm = nouveau_find_svmm(svm, mm);
>> +	if (!svmm) {
>> +		mutex_unlock(&svm->mutex);
>> +		return;
>> +	}
>> +	mutex_unlock(&svm->mutex);
> 
> Given that nouveau_find_svmm doesn't take any kind of reference, what
> gurantees svmm doesn't go away after dropping the lock?

I asked Ben and Jerome about this too.
I'm still looking into it.

> 
>> @@ -44,5 +49,19 @@ static inline int nouveau_svmm_bind(struct drm_device *device, void *p,
>>   {
>>   	return -ENOSYS;
>>   }
>> +
>> +u64 *nouveau_pfns_alloc(unsigned long npages)
>> +{
>> +	return NULL;
>> +}
>> +
>> +void nouveau_pfns_free(u64 *pfns)
>> +{
>> +}
>> +
>> +void nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
>> +		      unsigned long addr, u64 *pfns, unsigned long npages)
>> +{
>> +}
>>   #endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
> 
> nouveau_dmem.c and nouveau_svm.c are both built conditional on
> CONFIG_DRM_NOUVEAU_SVM, so there is no need for stubs here.
> 

Good point. I'll remove them in v2.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] nouveau/hmm: map pages after migration
@ 2019-08-08 21:29     ` Ralph Campbell
  0 siblings, 0 replies; 15+ messages in thread
From: Ralph Campbell @ 2019-08-08 21:29 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-mm-Bw31MaZKKs3YtjvyW6yDsg, Jason Gunthorpe,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, Ben Skeggs


On 8/8/19 12:07 AM, Christoph Hellwig wrote:
> On Wed, Aug 07, 2019 at 08:02:14AM -0700, Ralph Campbell wrote:
>> When memory is migrated to the GPU it is likely to be accessed by GPU
>> code soon afterwards. Instead of waiting for a GPU fault, map the
>> migrated memory into the GPU page tables with the same access permissions
>> as the source CPU page table entries. This preserves copy on write
>> semantics.
>>
>> Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
>> Cc: Christoph Hellwig <hch@lst.de>
>> Cc: Jason Gunthorpe <jgg@mellanox.com>
>> Cc: "Jérôme Glisse" <jglisse@redhat.com>
>> Cc: Ben Skeggs <bskeggs@redhat.com>
>> ---
>>
>> This patch is based on top of Christoph Hellwig's 9 patch series
>> https://lore.kernel.org/linux-mm/20190729234611.GC7171@redhat.com/T/#u
>> "turn the hmm migrate_vma upside down" but without patch 9
>> "mm: remove the unused MIGRATE_PFN_WRITE" and adds a use for the flag.
> 
> This looks useful.  I've already dropped that patch for the pending
> resend.

Thanks.

> 
>>   static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
>> -		struct vm_area_struct *vma, unsigned long addr,
>> -		unsigned long src, dma_addr_t *dma_addr)
>> +		struct vm_area_struct *vma, unsigned long src,
>> +		dma_addr_t *dma_addr, u64 *pfn)
> 
> I'll pick up the removal of the not needed addr argument for the patch
> introducing nouveau_dmem_migrate_copy_one, thanks,
> 
>>   static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
>> -		struct nouveau_drm *drm, dma_addr_t *dma_addrs)
>> +		struct nouveau_drm *drm, dma_addr_t *dma_addrs, u64 *pfns)
>>   {
>>   	struct nouveau_fence *fence;
>>   	unsigned long addr = args->start, nr_dma = 0, i;
>>   
>>   	for (i = 0; addr < args->end; i++) {
>>   		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->vma,
>> -				addr, args->src[i], &dma_addrs[nr_dma]);
>> +				args->src[i], &dma_addrs[nr_dma], &pfns[i]);
> 
> Nit: I find the &pfns[i] way to pass the argument a little weird to read.
> Why not "pfns + i"?

OK, will do in v2.
Should I convert to "dma_addrs + nr_dma" too?

>> +u64 *
>> +nouveau_pfns_alloc(unsigned long npages)
>> +{
>> +	struct nouveau_pfnmap_args *args;
>> +
>> +	args = kzalloc(sizeof(*args) + npages * sizeof(args->p.phys[0]),
> 
> Can we use struct_size here?

Yes, good suggestion.

> 
>> +	int ret;
>> +
>> +	if (!svm)
>> +		return;
>> +
>> +	mutex_lock(&svm->mutex);
>> +	svmm = nouveau_find_svmm(svm, mm);
>> +	if (!svmm) {
>> +		mutex_unlock(&svm->mutex);
>> +		return;
>> +	}
>> +	mutex_unlock(&svm->mutex);
> 
> Given that nouveau_find_svmm doesn't take any kind of reference, what
> gurantees svmm doesn't go away after dropping the lock?

I asked Ben and Jerome about this too.
I'm still looking into it.

> 
>> @@ -44,5 +49,19 @@ static inline int nouveau_svmm_bind(struct drm_device *device, void *p,
>>   {
>>   	return -ENOSYS;
>>   }
>> +
>> +u64 *nouveau_pfns_alloc(unsigned long npages)
>> +{
>> +	return NULL;
>> +}
>> +
>> +void nouveau_pfns_free(u64 *pfns)
>> +{
>> +}
>> +
>> +void nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
>> +		      unsigned long addr, u64 *pfns, unsigned long npages)
>> +{
>> +}
>>   #endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
> 
> nouveau_dmem.c and nouveau_svm.c are both built conditional on
> CONFIG_DRM_NOUVEAU_SVM, so there is no need for stubs here.
> 

Good point. I'll remove them in v2.
_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] nouveau/hmm: map pages after migration
@ 2019-08-10 11:13       ` Christoph Hellwig
  0 siblings, 0 replies; 15+ messages in thread
From: Christoph Hellwig @ 2019-08-10 11:13 UTC (permalink / raw)
  To: Ralph Campbell
  Cc: Christoph Hellwig, linux-mm, linux-kernel, amd-gfx, dri-devel,
	nouveau, Jason Gunthorpe, Jérôme Glisse, Ben Skeggs

On Thu, Aug 08, 2019 at 02:29:34PM -0700, Ralph Campbell wrote:
>>>   {
>>>   	struct nouveau_fence *fence;
>>>   	unsigned long addr = args->start, nr_dma = 0, i;
>>>     	for (i = 0; addr < args->end; i++) {
>>>   		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->vma,
>>> -				addr, args->src[i], &dma_addrs[nr_dma]);
>>> +				args->src[i], &dma_addrs[nr_dma], &pfns[i]);
>>
>> Nit: I find the &pfns[i] way to pass the argument a little weird to read.
>> Why not "pfns + i"?
>
> OK, will do in v2.
> Should I convert to "dma_addrs + nr_dma" too?

I'll fix it up for v3 of the migrate_vma series.  This is a leftover
from passing an args structure.

On something vaguely related to this patch:

You use the NVIF_VMM_PFNMAP_V0_V* defines from nvif/if000c.h, which are
a little odd as we only ever set these bits, but they also don't seem
to appear to be in values that are directly fed to the hardware.

On the other hand mmu/vmm.h defines a set of NVIF_VMM_PFNMAP_V0_*
constants with similar names and identical values, and those are used
in mmu/vmmgp100.c and what appears to finally do the low-level dma
mapping and talking to the hardware.  Are these two sets of constants
supposed to be the same?  Are the actual hardware values or just a
driver internal interface?

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] nouveau/hmm: map pages after migration
@ 2019-08-10 11:13       ` Christoph Hellwig
  0 siblings, 0 replies; 15+ messages in thread
From: Christoph Hellwig @ 2019-08-10 11:13 UTC (permalink / raw)
  To: Ralph Campbell
  Cc: nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-mm-Bw31MaZKKs3YtjvyW6yDsg, Jason Gunthorpe,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, Christoph Hellwig,
	Ben Skeggs

On Thu, Aug 08, 2019 at 02:29:34PM -0700, Ralph Campbell wrote:
>>>   {
>>>   	struct nouveau_fence *fence;
>>>   	unsigned long addr = args->start, nr_dma = 0, i;
>>>     	for (i = 0; addr < args->end; i++) {
>>>   		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->vma,
>>> -				addr, args->src[i], &dma_addrs[nr_dma]);
>>> +				args->src[i], &dma_addrs[nr_dma], &pfns[i]);
>>
>> Nit: I find the &pfns[i] way to pass the argument a little weird to read.
>> Why not "pfns + i"?
>
> OK, will do in v2.
> Should I convert to "dma_addrs + nr_dma" too?

I'll fix it up for v3 of the migrate_vma series.  This is a leftover
from passing an args structure.

On something vaguely related to this patch:

You use the NVIF_VMM_PFNMAP_V0_V* defines from nvif/if000c.h, which are
a little odd as we only ever set these bits, but they also don't seem
to appear to be in values that are directly fed to the hardware.

On the other hand mmu/vmm.h defines a set of NVIF_VMM_PFNMAP_V0_*
constants with similar names and identical values, and those are used
in mmu/vmmgp100.c and what appears to finally do the low-level dma
mapping and talking to the hardware.  Are these two sets of constants
supposed to be the same?  Are the actual hardware values or just a
driver internal interface?
_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] nouveau/hmm: map pages after migration
  2019-08-10 11:13       ` Christoph Hellwig
  (?)
@ 2019-08-12 19:42       ` Ralph Campbell
       [not found]         ` <1a84e6b6-31e6-6955-509f-9883f4a7a322-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
  -1 siblings, 1 reply; 15+ messages in thread
From: Ralph Campbell @ 2019-08-12 19:42 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: linux-mm, linux-kernel, amd-gfx, dri-devel, nouveau,
	Jason Gunthorpe, Jérôme Glisse, Ben Skeggs


On 8/10/19 4:13 AM, Christoph Hellwig wrote:
> On something vaguely related to this patch:
> 
> You use the NVIF_VMM_PFNMAP_V0_V* defines from nvif/if000c.h, which are
> a little odd as we only ever set these bits, but they also don't seem
> to appear to be in values that are directly fed to the hardware.
> 
> On the other hand mmu/vmm.h defines a set of NVIF_VMM_PFNMAP_V0_*

Yes, I see NVKM_VMM_PFN_*

> constants with similar names and identical values, and those are used
> in mmu/vmmgp100.c and what appears to finally do the low-level dma
> mapping and talking to the hardware.  Are these two sets of constants
> supposed to be the same?  Are the actual hardware values or just a
> driver internal interface?

It looks a bit odd to me too.
I don't really know the structure/history of nouveau.
Perhaps Ben Skeggs can shed more light on your question.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] nouveau/hmm: map pages after migration
  2019-08-07 15:02 ` Ralph Campbell
  (?)
  (?)
@ 2019-08-13 21:58 ` Jerome Glisse
  2019-08-15 17:43     ` Jerome Glisse
  -1 siblings, 1 reply; 15+ messages in thread
From: Jerome Glisse @ 2019-08-13 21:58 UTC (permalink / raw)
  To: Ralph Campbell
  Cc: linux-mm, linux-kernel, amd-gfx, dri-devel, nouveau,
	Christoph Hellwig, Jason Gunthorpe, Ben Skeggs

On Wed, Aug 07, 2019 at 08:02:14AM -0700, Ralph Campbell wrote:
> When memory is migrated to the GPU it is likely to be accessed by GPU
> code soon afterwards. Instead of waiting for a GPU fault, map the
> migrated memory into the GPU page tables with the same access permissions
> as the source CPU page table entries. This preserves copy on write
> semantics.
> 
> Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: Jason Gunthorpe <jgg@mellanox.com>
> Cc: "Jérôme Glisse" <jglisse@redhat.com>
> Cc: Ben Skeggs <bskeggs@redhat.com>

Sorry for delay i am swamp, couple issues:
    - nouveau_pfns_map() is never call, it should be call after
      the dma copy is done (iirc it is lacking proper fencing
      so that would need to be implemented first)

    - the migrate ioctl is disconnected from the svm part and
      thus we would need first to implement svm reference counting
      and take a reference at begining of migration process and
      release it at the end ie struct nouveau_svmm needs refcounting
      of some sort. I let Ben decides what he likes best for that.

    - i rather not have an extra pfns array, i am pretty sure we
      can directly feed what we get from the dma array to the svm
      code to update the GPU page table

Observation that can be delayed to latter patches:
    - i do not think we want to map directly if the dma engine
      is queue up and thus if the fence will take time to signal

      This is why i did not implement this in the first place.
      Maybe using a workqueue to schedule a pre-fill of the GPU
      page table and wakeup the workqueue with the fence notify
      event.


> ---
> 
> This patch is based on top of Christoph Hellwig's 9 patch series
> https://lore.kernel.org/linux-mm/20190729234611.GC7171@redhat.com/T/#u
> "turn the hmm migrate_vma upside down" but without patch 9
> "mm: remove the unused MIGRATE_PFN_WRITE" and adds a use for the flag.
> 
> 
>  drivers/gpu/drm/nouveau/nouveau_dmem.c | 45 +++++++++-----
>  drivers/gpu/drm/nouveau/nouveau_svm.c  | 86 ++++++++++++++++++++++++++
>  drivers/gpu/drm/nouveau/nouveau_svm.h  | 19 ++++++
>  3 files changed, 133 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> index ef9de82b0744..c83e6f118817 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> @@ -25,11 +25,13 @@
>  #include "nouveau_dma.h"
>  #include "nouveau_mem.h"
>  #include "nouveau_bo.h"
> +#include "nouveau_svm.h"
>  
>  #include <nvif/class.h>
>  #include <nvif/object.h>
>  #include <nvif/if500b.h>
>  #include <nvif/if900b.h>
> +#include <nvif/if000c.h>
>  
>  #include <linux/sched/mm.h>
>  #include <linux/hmm.h>
> @@ -560,11 +562,12 @@ nouveau_dmem_init(struct nouveau_drm *drm)
>  }
>  
>  static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> -		struct vm_area_struct *vma, unsigned long addr,
> -		unsigned long src, dma_addr_t *dma_addr)
> +		struct vm_area_struct *vma, unsigned long src,
> +		dma_addr_t *dma_addr, u64 *pfn)
>  {
>  	struct device *dev = drm->dev->dev;
>  	struct page *dpage, *spage;
> +	unsigned long paddr;
>  
>  	spage = migrate_pfn_to_page(src);
>  	if (!spage || !(src & MIGRATE_PFN_MIGRATE))
> @@ -572,17 +575,21 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
>  
>  	dpage = nouveau_dmem_page_alloc_locked(drm);
>  	if (!dpage)
> -		return 0;
> +		goto out;
>  
>  	*dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
>  	if (dma_mapping_error(dev, *dma_addr))
>  		goto out_free_page;
>  
> +	paddr = nouveau_dmem_page_addr(dpage);
>  	if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
> -			nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
> -			*dma_addr))
> +			paddr, NOUVEAU_APER_HOST, *dma_addr))
>  		goto out_dma_unmap;
>  
> +	*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
> +		((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
> +	if (src & MIGRATE_PFN_WRITE)
> +		*pfn |= NVIF_VMM_PFNMAP_V0_W;
>  	return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
>  
>  out_dma_unmap:
> @@ -590,18 +597,19 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
>  out_free_page:
>  	nouveau_dmem_page_free_locked(drm, dpage);
>  out:
> +	*pfn = NVIF_VMM_PFNMAP_V0_NONE;
>  	return 0;
>  }
>  
>  static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
> -		struct nouveau_drm *drm, dma_addr_t *dma_addrs)
> +		struct nouveau_drm *drm, dma_addr_t *dma_addrs, u64 *pfns)
>  {
>  	struct nouveau_fence *fence;
>  	unsigned long addr = args->start, nr_dma = 0, i;
>  
>  	for (i = 0; addr < args->end; i++) {
>  		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->vma,
> -				addr, args->src[i], &dma_addrs[nr_dma]);
> +				args->src[i], &dma_addrs[nr_dma], &pfns[i]);
>  		if (args->dst[i])
>  			nr_dma++;
>  		addr += PAGE_SIZE;
> @@ -615,10 +623,6 @@ static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
>  		dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
>  				DMA_BIDIRECTIONAL);
>  	}
> -	/*
> -	 * FIXME optimization: update GPU page table to point to newly migrated
> -	 * memory.
> -	 */
>  	migrate_vma_finalize(args);
>  }
>  
> @@ -631,11 +635,12 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
>  	unsigned long npages = (end - start) >> PAGE_SHIFT;
>  	unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
>  	dma_addr_t *dma_addrs;
> +	u64 *pfns;
>  	struct migrate_vma args = {
>  		.vma		= vma,
>  		.start		= start,
>  	};
> -	unsigned long c, i;
> +	unsigned long i;
>  	int ret = -ENOMEM;
>  
>  	args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
> @@ -649,19 +654,25 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
>  	if (!dma_addrs)
>  		goto out_free_dst;
>  
> -	for (i = 0; i < npages; i += c) {
> -		c = min(SG_MAX_SINGLE_ALLOC, npages);
> -		args.end = start + (c << PAGE_SHIFT);
> +	pfns = nouveau_pfns_alloc(max);
> +	if (!pfns)
> +		goto out_free_dma;
> +
> +	for (i = 0; i < npages; i += max) {
> +		args.end = start + (max << PAGE_SHIFT);
>  		ret = migrate_vma_setup(&args);
>  		if (ret)
> -			goto out_free_dma;
> +			goto out_free_pfns;
>  
>  		if (args.cpages)
> -			nouveau_dmem_migrate_chunk(&args, drm, dma_addrs);
> +			nouveau_dmem_migrate_chunk(&args, drm, dma_addrs,
> +						   pfns);
>  		args.start = args.end;
>  	}
>  
>  	ret = 0;
> +out_free_pfns:
> +	nouveau_pfns_free(pfns);
>  out_free_dma:
>  	kfree(dma_addrs);
>  out_free_dst:
> diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
> index a74530b5a523..3e6d7f226576 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_svm.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
> @@ -70,6 +70,12 @@ struct nouveau_svm {
>  #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
>  #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
>  
> +struct nouveau_pfnmap_args {
> +	struct nvif_ioctl_v0 i;
> +	struct nvif_ioctl_mthd_v0 m;
> +	struct nvif_vmm_pfnmap_v0 p;
> +};
> +
>  struct nouveau_ivmm {
>  	struct nouveau_svmm *svmm;
>  	u64 inst;
> @@ -734,6 +740,86 @@ nouveau_svm_fault(struct nvif_notify *notify)
>  	return NVIF_NOTIFY_KEEP;
>  }
>  
> +static inline struct nouveau_pfnmap_args *
> +nouveau_pfns_to_args(void *pfns)
> +{
> +	struct nvif_vmm_pfnmap_v0 *p =
> +		container_of(pfns, struct nvif_vmm_pfnmap_v0, phys);
> +
> +	return container_of(p, struct nouveau_pfnmap_args, p);
> +}
> +
> +u64 *
> +nouveau_pfns_alloc(unsigned long npages)
> +{
> +	struct nouveau_pfnmap_args *args;
> +
> +	args = kzalloc(sizeof(*args) + npages * sizeof(args->p.phys[0]),
> +			GFP_KERNEL);
> +	if (!args)
> +		return NULL;
> +
> +	args->i.type = NVIF_IOCTL_V0_MTHD;
> +	args->m.method = NVIF_VMM_V0_PFNMAP;
> +	args->p.page = PAGE_SHIFT;
> +
> +	return args->p.phys;
> +}
> +
> +void
> +nouveau_pfns_free(u64 *pfns)
> +{
> +	struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
> +
> +	kfree(args);
> +}
> +
> +static struct nouveau_svmm *
> +nouveau_find_svmm(struct nouveau_svm *svm, struct mm_struct *mm)
> +{
> +	struct nouveau_ivmm *ivmm;
> +
> +	list_for_each_entry(ivmm, &svm->inst, head) {
> +		if (ivmm->svmm->mm == mm)
> +			return ivmm->svmm;
> +	}
> +	return NULL;
> +}
> +
> +void
> +nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
> +		 unsigned long addr, u64 *pfns, unsigned long npages)
> +{
> +	struct nouveau_svm *svm = drm->svm;
> +	struct nouveau_svmm *svmm;
> +	struct nouveau_pfnmap_args *args;
> +	int ret;
> +
> +	if (!svm)
> +		return;
> +
> +	mutex_lock(&svm->mutex);
> +	svmm = nouveau_find_svmm(svm, mm);
> +	if (!svmm) {
> +		mutex_unlock(&svm->mutex);
> +		return;
> +	}
> +	mutex_unlock(&svm->mutex);
> +
> +	args = nouveau_pfns_to_args(pfns);
> +	args->p.addr = addr;
> +	args->p.size = npages << PAGE_SHIFT;
> +
> +	mutex_lock(&svmm->mutex);
> +
> +	svmm->vmm->vmm.object.client->super = true;
> +	ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
> +				npages * sizeof(args->p.phys[0]), NULL);
> +	svmm->vmm->vmm.object.client->super = false;
> +
> +	mutex_unlock(&svmm->mutex);
> +}
> +
>  static void
>  nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
>  {
> diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.h b/drivers/gpu/drm/nouveau/nouveau_svm.h
> index e839d8189461..c00c177e51ed 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_svm.h
> +++ b/drivers/gpu/drm/nouveau/nouveau_svm.h
> @@ -18,6 +18,11 @@ void nouveau_svmm_fini(struct nouveau_svmm **);
>  int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
>  void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
>  int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
> +
> +u64 *nouveau_pfns_alloc(unsigned long npages);
> +void nouveau_pfns_free(u64 *pfns);
> +void nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
> +		      unsigned long addr, u64 *pfns, unsigned long npages);
>  #else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
>  static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
>  static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
> @@ -44,5 +49,19 @@ static inline int nouveau_svmm_bind(struct drm_device *device, void *p,
>  {
>  	return -ENOSYS;
>  }
> +
> +u64 *nouveau_pfns_alloc(unsigned long npages)
> +{
> +	return NULL;
> +}
> +
> +void nouveau_pfns_free(u64 *pfns)
> +{
> +}
> +
> +void nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
> +		      unsigned long addr, u64 *pfns, unsigned long npages)
> +{
> +}
>  #endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
>  #endif
> -- 
> 2.20.1
> 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [Nouveau] [PATCH] nouveau/hmm: map pages after migration
@ 2019-08-15 17:43     ` Jerome Glisse
  0 siblings, 0 replies; 15+ messages in thread
From: Jerome Glisse @ 2019-08-15 17:43 UTC (permalink / raw)
  To: Ralph Campbell
  Cc: nouveau, linux-kernel, dri-devel, linux-mm, Jason Gunthorpe,
	amd-gfx, Christoph Hellwig, Ben Skeggs

On Tue, Aug 13, 2019 at 05:58:52PM -0400, Jerome Glisse wrote:
> On Wed, Aug 07, 2019 at 08:02:14AM -0700, Ralph Campbell wrote:
> > When memory is migrated to the GPU it is likely to be accessed by GPU
> > code soon afterwards. Instead of waiting for a GPU fault, map the
> > migrated memory into the GPU page tables with the same access permissions
> > as the source CPU page table entries. This preserves copy on write
> > semantics.
> > 
> > Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
> > Cc: Christoph Hellwig <hch@lst.de>
> > Cc: Jason Gunthorpe <jgg@mellanox.com>
> > Cc: "Jérôme Glisse" <jglisse@redhat.com>
> > Cc: Ben Skeggs <bskeggs@redhat.com>
> 
> Sorry for delay i am swamp, couple issues:
>     - nouveau_pfns_map() is never call, it should be call after
>       the dma copy is done (iirc it is lacking proper fencing
>       so that would need to be implemented first)
> 
>     - the migrate ioctl is disconnected from the svm part and
>       thus we would need first to implement svm reference counting
>       and take a reference at begining of migration process and
>       release it at the end ie struct nouveau_svmm needs refcounting
>       of some sort. I let Ben decides what he likes best for that.

Thinking more about that the svm lifetime is bound to the file
descriptor on the device driver file held by the process. So
when you call migrate ioctl the svm should not go away because
you are in an ioctl against the file descriptor. I need to double
check all that in respect of process that open the device file
multiple time with different file descriptor (or fork thing and
all).


>     - i rather not have an extra pfns array, i am pretty sure we
>       can directly feed what we get from the dma array to the svm
>       code to update the GPU page table
> 
> Observation that can be delayed to latter patches:
>     - i do not think we want to map directly if the dma engine
>       is queue up and thus if the fence will take time to signal
> 
>       This is why i did not implement this in the first place.
>       Maybe using a workqueue to schedule a pre-fill of the GPU
>       page table and wakeup the workqueue with the fence notify
>       event.
> 
> 
> > ---
> > 
> > This patch is based on top of Christoph Hellwig's 9 patch series
> > https://lore.kernel.org/linux-mm/20190729234611.GC7171@redhat.com/T/#u
> > "turn the hmm migrate_vma upside down" but without patch 9
> > "mm: remove the unused MIGRATE_PFN_WRITE" and adds a use for the flag.
> > 
> > 
> >  drivers/gpu/drm/nouveau/nouveau_dmem.c | 45 +++++++++-----
> >  drivers/gpu/drm/nouveau/nouveau_svm.c  | 86 ++++++++++++++++++++++++++
> >  drivers/gpu/drm/nouveau/nouveau_svm.h  | 19 ++++++
> >  3 files changed, 133 insertions(+), 17 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> > index ef9de82b0744..c83e6f118817 100644
> > --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
> > +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> > @@ -25,11 +25,13 @@
> >  #include "nouveau_dma.h"
> >  #include "nouveau_mem.h"
> >  #include "nouveau_bo.h"
> > +#include "nouveau_svm.h"
> >  
> >  #include <nvif/class.h>
> >  #include <nvif/object.h>
> >  #include <nvif/if500b.h>
> >  #include <nvif/if900b.h>
> > +#include <nvif/if000c.h>
> >  
> >  #include <linux/sched/mm.h>
> >  #include <linux/hmm.h>
> > @@ -560,11 +562,12 @@ nouveau_dmem_init(struct nouveau_drm *drm)
> >  }
> >  
> >  static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> > -		struct vm_area_struct *vma, unsigned long addr,
> > -		unsigned long src, dma_addr_t *dma_addr)
> > +		struct vm_area_struct *vma, unsigned long src,
> > +		dma_addr_t *dma_addr, u64 *pfn)
> >  {
> >  	struct device *dev = drm->dev->dev;
> >  	struct page *dpage, *spage;
> > +	unsigned long paddr;
> >  
> >  	spage = migrate_pfn_to_page(src);
> >  	if (!spage || !(src & MIGRATE_PFN_MIGRATE))
> > @@ -572,17 +575,21 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> >  
> >  	dpage = nouveau_dmem_page_alloc_locked(drm);
> >  	if (!dpage)
> > -		return 0;
> > +		goto out;
> >  
> >  	*dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
> >  	if (dma_mapping_error(dev, *dma_addr))
> >  		goto out_free_page;
> >  
> > +	paddr = nouveau_dmem_page_addr(dpage);
> >  	if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
> > -			nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
> > -			*dma_addr))
> > +			paddr, NOUVEAU_APER_HOST, *dma_addr))
> >  		goto out_dma_unmap;
> >  
> > +	*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
> > +		((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
> > +	if (src & MIGRATE_PFN_WRITE)
> > +		*pfn |= NVIF_VMM_PFNMAP_V0_W;
> >  	return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
> >  
> >  out_dma_unmap:
> > @@ -590,18 +597,19 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> >  out_free_page:
> >  	nouveau_dmem_page_free_locked(drm, dpage);
> >  out:
> > +	*pfn = NVIF_VMM_PFNMAP_V0_NONE;
> >  	return 0;
> >  }
> >  
> >  static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
> > -		struct nouveau_drm *drm, dma_addr_t *dma_addrs)
> > +		struct nouveau_drm *drm, dma_addr_t *dma_addrs, u64 *pfns)
> >  {
> >  	struct nouveau_fence *fence;
> >  	unsigned long addr = args->start, nr_dma = 0, i;
> >  
> >  	for (i = 0; addr < args->end; i++) {
> >  		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->vma,
> > -				addr, args->src[i], &dma_addrs[nr_dma]);
> > +				args->src[i], &dma_addrs[nr_dma], &pfns[i]);
> >  		if (args->dst[i])
> >  			nr_dma++;
> >  		addr += PAGE_SIZE;
> > @@ -615,10 +623,6 @@ static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
> >  		dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
> >  				DMA_BIDIRECTIONAL);
> >  	}
> > -	/*
> > -	 * FIXME optimization: update GPU page table to point to newly migrated
> > -	 * memory.
> > -	 */
> >  	migrate_vma_finalize(args);
> >  }
> >  
> > @@ -631,11 +635,12 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
> >  	unsigned long npages = (end - start) >> PAGE_SHIFT;
> >  	unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
> >  	dma_addr_t *dma_addrs;
> > +	u64 *pfns;
> >  	struct migrate_vma args = {
> >  		.vma		= vma,
> >  		.start		= start,
> >  	};
> > -	unsigned long c, i;
> > +	unsigned long i;
> >  	int ret = -ENOMEM;
> >  
> >  	args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
> > @@ -649,19 +654,25 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
> >  	if (!dma_addrs)
> >  		goto out_free_dst;
> >  
> > -	for (i = 0; i < npages; i += c) {
> > -		c = min(SG_MAX_SINGLE_ALLOC, npages);
> > -		args.end = start + (c << PAGE_SHIFT);
> > +	pfns = nouveau_pfns_alloc(max);
> > +	if (!pfns)
> > +		goto out_free_dma;
> > +
> > +	for (i = 0; i < npages; i += max) {
> > +		args.end = start + (max << PAGE_SHIFT);
> >  		ret = migrate_vma_setup(&args);
> >  		if (ret)
> > -			goto out_free_dma;
> > +			goto out_free_pfns;
> >  
> >  		if (args.cpages)
> > -			nouveau_dmem_migrate_chunk(&args, drm, dma_addrs);
> > +			nouveau_dmem_migrate_chunk(&args, drm, dma_addrs,
> > +						   pfns);
> >  		args.start = args.end;
> >  	}
> >  
> >  	ret = 0;
> > +out_free_pfns:
> > +	nouveau_pfns_free(pfns);
> >  out_free_dma:
> >  	kfree(dma_addrs);
> >  out_free_dst:
> > diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
> > index a74530b5a523..3e6d7f226576 100644
> > --- a/drivers/gpu/drm/nouveau/nouveau_svm.c
> > +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
> > @@ -70,6 +70,12 @@ struct nouveau_svm {
> >  #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
> >  #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
> >  
> > +struct nouveau_pfnmap_args {
> > +	struct nvif_ioctl_v0 i;
> > +	struct nvif_ioctl_mthd_v0 m;
> > +	struct nvif_vmm_pfnmap_v0 p;
> > +};
> > +
> >  struct nouveau_ivmm {
> >  	struct nouveau_svmm *svmm;
> >  	u64 inst;
> > @@ -734,6 +740,86 @@ nouveau_svm_fault(struct nvif_notify *notify)
> >  	return NVIF_NOTIFY_KEEP;
> >  }
> >  
> > +static inline struct nouveau_pfnmap_args *
> > +nouveau_pfns_to_args(void *pfns)
> > +{
> > +	struct nvif_vmm_pfnmap_v0 *p =
> > +		container_of(pfns, struct nvif_vmm_pfnmap_v0, phys);
> > +
> > +	return container_of(p, struct nouveau_pfnmap_args, p);
> > +}
> > +
> > +u64 *
> > +nouveau_pfns_alloc(unsigned long npages)
> > +{
> > +	struct nouveau_pfnmap_args *args;
> > +
> > +	args = kzalloc(sizeof(*args) + npages * sizeof(args->p.phys[0]),
> > +			GFP_KERNEL);
> > +	if (!args)
> > +		return NULL;
> > +
> > +	args->i.type = NVIF_IOCTL_V0_MTHD;
> > +	args->m.method = NVIF_VMM_V0_PFNMAP;
> > +	args->p.page = PAGE_SHIFT;
> > +
> > +	return args->p.phys;
> > +}
> > +
> > +void
> > +nouveau_pfns_free(u64 *pfns)
> > +{
> > +	struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
> > +
> > +	kfree(args);
> > +}
> > +
> > +static struct nouveau_svmm *
> > +nouveau_find_svmm(struct nouveau_svm *svm, struct mm_struct *mm)
> > +{
> > +	struct nouveau_ivmm *ivmm;
> > +
> > +	list_for_each_entry(ivmm, &svm->inst, head) {
> > +		if (ivmm->svmm->mm == mm)
> > +			return ivmm->svmm;
> > +	}
> > +	return NULL;
> > +}
> > +
> > +void
> > +nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
> > +		 unsigned long addr, u64 *pfns, unsigned long npages)
> > +{
> > +	struct nouveau_svm *svm = drm->svm;
> > +	struct nouveau_svmm *svmm;
> > +	struct nouveau_pfnmap_args *args;
> > +	int ret;
> > +
> > +	if (!svm)
> > +		return;
> > +
> > +	mutex_lock(&svm->mutex);
> > +	svmm = nouveau_find_svmm(svm, mm);
> > +	if (!svmm) {
> > +		mutex_unlock(&svm->mutex);
> > +		return;
> > +	}
> > +	mutex_unlock(&svm->mutex);
> > +
> > +	args = nouveau_pfns_to_args(pfns);
> > +	args->p.addr = addr;
> > +	args->p.size = npages << PAGE_SHIFT;
> > +
> > +	mutex_lock(&svmm->mutex);
> > +
> > +	svmm->vmm->vmm.object.client->super = true;
> > +	ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
> > +				npages * sizeof(args->p.phys[0]), NULL);
> > +	svmm->vmm->vmm.object.client->super = false;
> > +
> > +	mutex_unlock(&svmm->mutex);
> > +}
> > +
> >  static void
> >  nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
> >  {
> > diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.h b/drivers/gpu/drm/nouveau/nouveau_svm.h
> > index e839d8189461..c00c177e51ed 100644
> > --- a/drivers/gpu/drm/nouveau/nouveau_svm.h
> > +++ b/drivers/gpu/drm/nouveau/nouveau_svm.h
> > @@ -18,6 +18,11 @@ void nouveau_svmm_fini(struct nouveau_svmm **);
> >  int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
> >  void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
> >  int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
> > +
> > +u64 *nouveau_pfns_alloc(unsigned long npages);
> > +void nouveau_pfns_free(u64 *pfns);
> > +void nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
> > +		      unsigned long addr, u64 *pfns, unsigned long npages);
> >  #else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
> >  static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
> >  static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
> > @@ -44,5 +49,19 @@ static inline int nouveau_svmm_bind(struct drm_device *device, void *p,
> >  {
> >  	return -ENOSYS;
> >  }
> > +
> > +u64 *nouveau_pfns_alloc(unsigned long npages)
> > +{
> > +	return NULL;
> > +}
> > +
> > +void nouveau_pfns_free(u64 *pfns)
> > +{
> > +}
> > +
> > +void nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
> > +		      unsigned long addr, u64 *pfns, unsigned long npages)
> > +{
> > +}
> >  #endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
> >  #endif
> > -- 
> > 2.20.1
> > 
> _______________________________________________
> Nouveau mailing list
> Nouveau@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/nouveau

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [Nouveau] [PATCH] nouveau/hmm: map pages after migration
@ 2019-08-15 17:43     ` Jerome Glisse
  0 siblings, 0 replies; 15+ messages in thread
From: Jerome Glisse @ 2019-08-15 17:43 UTC (permalink / raw)
  To: Ralph Campbell
  Cc: nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-mm-Bw31MaZKKs3YtjvyW6yDsg, Jason Gunthorpe,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, Christoph Hellwig,
	Ben Skeggs

On Tue, Aug 13, 2019 at 05:58:52PM -0400, Jerome Glisse wrote:
> On Wed, Aug 07, 2019 at 08:02:14AM -0700, Ralph Campbell wrote:
> > When memory is migrated to the GPU it is likely to be accessed by GPU
> > code soon afterwards. Instead of waiting for a GPU fault, map the
> > migrated memory into the GPU page tables with the same access permissions
> > as the source CPU page table entries. This preserves copy on write
> > semantics.
> > 
> > Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
> > Cc: Christoph Hellwig <hch@lst.de>
> > Cc: Jason Gunthorpe <jgg@mellanox.com>
> > Cc: "Jérôme Glisse" <jglisse@redhat.com>
> > Cc: Ben Skeggs <bskeggs@redhat.com>
> 
> Sorry for delay i am swamp, couple issues:
>     - nouveau_pfns_map() is never call, it should be call after
>       the dma copy is done (iirc it is lacking proper fencing
>       so that would need to be implemented first)
> 
>     - the migrate ioctl is disconnected from the svm part and
>       thus we would need first to implement svm reference counting
>       and take a reference at begining of migration process and
>       release it at the end ie struct nouveau_svmm needs refcounting
>       of some sort. I let Ben decides what he likes best for that.

Thinking more about that the svm lifetime is bound to the file
descriptor on the device driver file held by the process. So
when you call migrate ioctl the svm should not go away because
you are in an ioctl against the file descriptor. I need to double
check all that in respect of process that open the device file
multiple time with different file descriptor (or fork thing and
all).


>     - i rather not have an extra pfns array, i am pretty sure we
>       can directly feed what we get from the dma array to the svm
>       code to update the GPU page table
> 
> Observation that can be delayed to latter patches:
>     - i do not think we want to map directly if the dma engine
>       is queue up and thus if the fence will take time to signal
> 
>       This is why i did not implement this in the first place.
>       Maybe using a workqueue to schedule a pre-fill of the GPU
>       page table and wakeup the workqueue with the fence notify
>       event.
> 
> 
> > ---
> > 
> > This patch is based on top of Christoph Hellwig's 9 patch series
> > https://lore.kernel.org/linux-mm/20190729234611.GC7171@redhat.com/T/#u
> > "turn the hmm migrate_vma upside down" but without patch 9
> > "mm: remove the unused MIGRATE_PFN_WRITE" and adds a use for the flag.
> > 
> > 
> >  drivers/gpu/drm/nouveau/nouveau_dmem.c | 45 +++++++++-----
> >  drivers/gpu/drm/nouveau/nouveau_svm.c  | 86 ++++++++++++++++++++++++++
> >  drivers/gpu/drm/nouveau/nouveau_svm.h  | 19 ++++++
> >  3 files changed, 133 insertions(+), 17 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> > index ef9de82b0744..c83e6f118817 100644
> > --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
> > +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> > @@ -25,11 +25,13 @@
> >  #include "nouveau_dma.h"
> >  #include "nouveau_mem.h"
> >  #include "nouveau_bo.h"
> > +#include "nouveau_svm.h"
> >  
> >  #include <nvif/class.h>
> >  #include <nvif/object.h>
> >  #include <nvif/if500b.h>
> >  #include <nvif/if900b.h>
> > +#include <nvif/if000c.h>
> >  
> >  #include <linux/sched/mm.h>
> >  #include <linux/hmm.h>
> > @@ -560,11 +562,12 @@ nouveau_dmem_init(struct nouveau_drm *drm)
> >  }
> >  
> >  static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> > -		struct vm_area_struct *vma, unsigned long addr,
> > -		unsigned long src, dma_addr_t *dma_addr)
> > +		struct vm_area_struct *vma, unsigned long src,
> > +		dma_addr_t *dma_addr, u64 *pfn)
> >  {
> >  	struct device *dev = drm->dev->dev;
> >  	struct page *dpage, *spage;
> > +	unsigned long paddr;
> >  
> >  	spage = migrate_pfn_to_page(src);
> >  	if (!spage || !(src & MIGRATE_PFN_MIGRATE))
> > @@ -572,17 +575,21 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> >  
> >  	dpage = nouveau_dmem_page_alloc_locked(drm);
> >  	if (!dpage)
> > -		return 0;
> > +		goto out;
> >  
> >  	*dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
> >  	if (dma_mapping_error(dev, *dma_addr))
> >  		goto out_free_page;
> >  
> > +	paddr = nouveau_dmem_page_addr(dpage);
> >  	if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
> > -			nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
> > -			*dma_addr))
> > +			paddr, NOUVEAU_APER_HOST, *dma_addr))
> >  		goto out_dma_unmap;
> >  
> > +	*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
> > +		((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
> > +	if (src & MIGRATE_PFN_WRITE)
> > +		*pfn |= NVIF_VMM_PFNMAP_V0_W;
> >  	return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
> >  
> >  out_dma_unmap:
> > @@ -590,18 +597,19 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> >  out_free_page:
> >  	nouveau_dmem_page_free_locked(drm, dpage);
> >  out:
> > +	*pfn = NVIF_VMM_PFNMAP_V0_NONE;
> >  	return 0;
> >  }
> >  
> >  static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
> > -		struct nouveau_drm *drm, dma_addr_t *dma_addrs)
> > +		struct nouveau_drm *drm, dma_addr_t *dma_addrs, u64 *pfns)
> >  {
> >  	struct nouveau_fence *fence;
> >  	unsigned long addr = args->start, nr_dma = 0, i;
> >  
> >  	for (i = 0; addr < args->end; i++) {
> >  		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->vma,
> > -				addr, args->src[i], &dma_addrs[nr_dma]);
> > +				args->src[i], &dma_addrs[nr_dma], &pfns[i]);
> >  		if (args->dst[i])
> >  			nr_dma++;
> >  		addr += PAGE_SIZE;
> > @@ -615,10 +623,6 @@ static void nouveau_dmem_migrate_chunk(struct migrate_vma *args,
> >  		dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
> >  				DMA_BIDIRECTIONAL);
> >  	}
> > -	/*
> > -	 * FIXME optimization: update GPU page table to point to newly migrated
> > -	 * memory.
> > -	 */
> >  	migrate_vma_finalize(args);
> >  }
> >  
> > @@ -631,11 +635,12 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
> >  	unsigned long npages = (end - start) >> PAGE_SHIFT;
> >  	unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
> >  	dma_addr_t *dma_addrs;
> > +	u64 *pfns;
> >  	struct migrate_vma args = {
> >  		.vma		= vma,
> >  		.start		= start,
> >  	};
> > -	unsigned long c, i;
> > +	unsigned long i;
> >  	int ret = -ENOMEM;
> >  
> >  	args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
> > @@ -649,19 +654,25 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
> >  	if (!dma_addrs)
> >  		goto out_free_dst;
> >  
> > -	for (i = 0; i < npages; i += c) {
> > -		c = min(SG_MAX_SINGLE_ALLOC, npages);
> > -		args.end = start + (c << PAGE_SHIFT);
> > +	pfns = nouveau_pfns_alloc(max);
> > +	if (!pfns)
> > +		goto out_free_dma;
> > +
> > +	for (i = 0; i < npages; i += max) {
> > +		args.end = start + (max << PAGE_SHIFT);
> >  		ret = migrate_vma_setup(&args);
> >  		if (ret)
> > -			goto out_free_dma;
> > +			goto out_free_pfns;
> >  
> >  		if (args.cpages)
> > -			nouveau_dmem_migrate_chunk(&args, drm, dma_addrs);
> > +			nouveau_dmem_migrate_chunk(&args, drm, dma_addrs,
> > +						   pfns);
> >  		args.start = args.end;
> >  	}
> >  
> >  	ret = 0;
> > +out_free_pfns:
> > +	nouveau_pfns_free(pfns);
> >  out_free_dma:
> >  	kfree(dma_addrs);
> >  out_free_dst:
> > diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
> > index a74530b5a523..3e6d7f226576 100644
> > --- a/drivers/gpu/drm/nouveau/nouveau_svm.c
> > +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
> > @@ -70,6 +70,12 @@ struct nouveau_svm {
> >  #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
> >  #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
> >  
> > +struct nouveau_pfnmap_args {
> > +	struct nvif_ioctl_v0 i;
> > +	struct nvif_ioctl_mthd_v0 m;
> > +	struct nvif_vmm_pfnmap_v0 p;
> > +};
> > +
> >  struct nouveau_ivmm {
> >  	struct nouveau_svmm *svmm;
> >  	u64 inst;
> > @@ -734,6 +740,86 @@ nouveau_svm_fault(struct nvif_notify *notify)
> >  	return NVIF_NOTIFY_KEEP;
> >  }
> >  
> > +static inline struct nouveau_pfnmap_args *
> > +nouveau_pfns_to_args(void *pfns)
> > +{
> > +	struct nvif_vmm_pfnmap_v0 *p =
> > +		container_of(pfns, struct nvif_vmm_pfnmap_v0, phys);
> > +
> > +	return container_of(p, struct nouveau_pfnmap_args, p);
> > +}
> > +
> > +u64 *
> > +nouveau_pfns_alloc(unsigned long npages)
> > +{
> > +	struct nouveau_pfnmap_args *args;
> > +
> > +	args = kzalloc(sizeof(*args) + npages * sizeof(args->p.phys[0]),
> > +			GFP_KERNEL);
> > +	if (!args)
> > +		return NULL;
> > +
> > +	args->i.type = NVIF_IOCTL_V0_MTHD;
> > +	args->m.method = NVIF_VMM_V0_PFNMAP;
> > +	args->p.page = PAGE_SHIFT;
> > +
> > +	return args->p.phys;
> > +}
> > +
> > +void
> > +nouveau_pfns_free(u64 *pfns)
> > +{
> > +	struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
> > +
> > +	kfree(args);
> > +}
> > +
> > +static struct nouveau_svmm *
> > +nouveau_find_svmm(struct nouveau_svm *svm, struct mm_struct *mm)
> > +{
> > +	struct nouveau_ivmm *ivmm;
> > +
> > +	list_for_each_entry(ivmm, &svm->inst, head) {
> > +		if (ivmm->svmm->mm == mm)
> > +			return ivmm->svmm;
> > +	}
> > +	return NULL;
> > +}
> > +
> > +void
> > +nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
> > +		 unsigned long addr, u64 *pfns, unsigned long npages)
> > +{
> > +	struct nouveau_svm *svm = drm->svm;
> > +	struct nouveau_svmm *svmm;
> > +	struct nouveau_pfnmap_args *args;
> > +	int ret;
> > +
> > +	if (!svm)
> > +		return;
> > +
> > +	mutex_lock(&svm->mutex);
> > +	svmm = nouveau_find_svmm(svm, mm);
> > +	if (!svmm) {
> > +		mutex_unlock(&svm->mutex);
> > +		return;
> > +	}
> > +	mutex_unlock(&svm->mutex);
> > +
> > +	args = nouveau_pfns_to_args(pfns);
> > +	args->p.addr = addr;
> > +	args->p.size = npages << PAGE_SHIFT;
> > +
> > +	mutex_lock(&svmm->mutex);
> > +
> > +	svmm->vmm->vmm.object.client->super = true;
> > +	ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
> > +				npages * sizeof(args->p.phys[0]), NULL);
> > +	svmm->vmm->vmm.object.client->super = false;
> > +
> > +	mutex_unlock(&svmm->mutex);
> > +}
> > +
> >  static void
> >  nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
> >  {
> > diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.h b/drivers/gpu/drm/nouveau/nouveau_svm.h
> > index e839d8189461..c00c177e51ed 100644
> > --- a/drivers/gpu/drm/nouveau/nouveau_svm.h
> > +++ b/drivers/gpu/drm/nouveau/nouveau_svm.h
> > @@ -18,6 +18,11 @@ void nouveau_svmm_fini(struct nouveau_svmm **);
> >  int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
> >  void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
> >  int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
> > +
> > +u64 *nouveau_pfns_alloc(unsigned long npages);
> > +void nouveau_pfns_free(u64 *pfns);
> > +void nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
> > +		      unsigned long addr, u64 *pfns, unsigned long npages);
> >  #else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
> >  static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
> >  static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
> > @@ -44,5 +49,19 @@ static inline int nouveau_svmm_bind(struct drm_device *device, void *p,
> >  {
> >  	return -ENOSYS;
> >  }
> > +
> > +u64 *nouveau_pfns_alloc(unsigned long npages)
> > +{
> > +	return NULL;
> > +}
> > +
> > +void nouveau_pfns_free(u64 *pfns)
> > +{
> > +}
> > +
> > +void nouveau_pfns_map(struct nouveau_drm *drm, struct mm_struct *mm,
> > +		      unsigned long addr, u64 *pfns, unsigned long npages)
> > +{
> > +}
> >  #endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
> >  #endif
> > -- 
> > 2.20.1
> > 
> _______________________________________________
> Nouveau mailing list
> Nouveau@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/nouveau
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] nouveau/hmm: map pages after migration
       [not found]         ` <1a84e6b6-31e6-6955-509f-9883f4a7a322-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
@ 2019-08-16  7:11           ` Christoph Hellwig
       [not found]             ` <20190816071132.GA7513-jcswGhMUV9g@public.gmane.org>
  0 siblings, 1 reply; 15+ messages in thread
From: Christoph Hellwig @ 2019-08-16  7:11 UTC (permalink / raw)
  To: Ben Skeggs
  Cc: nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On Mon, Aug 12, 2019 at 12:42:30PM -0700, Ralph Campbell wrote:
>
> On 8/10/19 4:13 AM, Christoph Hellwig wrote:
>> On something vaguely related to this patch:
>>
>> You use the NVIF_VMM_PFNMAP_V0_V* defines from nvif/if000c.h, which are
>> a little odd as we only ever set these bits, but they also don't seem
>> to appear to be in values that are directly fed to the hardware.
>>
>> On the other hand mmu/vmm.h defines a set of NVIF_VMM_PFNMAP_V0_*
>
> Yes, I see NVKM_VMM_PFN_*
>
>> constants with similar names and identical values, and those are used
>> in mmu/vmmgp100.c and what appears to finally do the low-level dma
>> mapping and talking to the hardware.  Are these two sets of constants
>> supposed to be the same?  Are the actual hardware values or just a
>> driver internal interface?
>
> It looks a bit odd to me too.
> I don't really know the structure/history of nouveau.
> Perhaps Ben Skeggs can shed more light on your question.

Ben, do you have any insights on these constants?
_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] nouveau/hmm: map pages after migration
       [not found]             ` <20190816071132.GA7513-jcswGhMUV9g@public.gmane.org>
@ 2019-08-17  1:05               ` Ben Skeggs
       [not found]                 ` <CABDvA=n4Y0QticHZowEWFOqiEB3p99nV71GvHAySYdkcgw-Aow-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 15+ messages in thread
From: Ben Skeggs @ 2019-08-17  1:05 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: nouveau, dri-devel

On Fri, Aug 16, 2019 at 5:11 PM Christoph Hellwig <hch@lst.de> wrote:
>
> On Mon, Aug 12, 2019 at 12:42:30PM -0700, Ralph Campbell wrote:
> >
> > On 8/10/19 4:13 AM, Christoph Hellwig wrote:
> >> On something vaguely related to this patch:
> >>
> >> You use the NVIF_VMM_PFNMAP_V0_V* defines from nvif/if000c.h, which are
> >> a little odd as we only ever set these bits, but they also don't seem
> >> to appear to be in values that are directly fed to the hardware.
> >>
> >> On the other hand mmu/vmm.h defines a set of NVIF_VMM_PFNMAP_V0_*
> >
> > Yes, I see NVKM_VMM_PFN_*
> >
> >> constants with similar names and identical values, and those are used
> >> in mmu/vmmgp100.c and what appears to finally do the low-level dma
> >> mapping and talking to the hardware.  Are these two sets of constants
> >> supposed to be the same?  Are the actual hardware values or just a
> >> driver internal interface?
> >
> > It looks a bit odd to me too.
> > I don't really know the structure/history of nouveau.
> > Perhaps Ben Skeggs can shed more light on your question.
>
> Ben, do you have any insights on these constants?
Those sets of constants are (currently) supposed to be the same value.
They don't necessarily map to the HW directly at this stage, and
something different will likely be done in the future as HW changes.

Ben.
_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] nouveau/hmm: map pages after migration
       [not found]                 ` <CABDvA=n4Y0QticHZowEWFOqiEB3p99nV71GvHAySYdkcgw-Aow-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2019-08-17  6:11                   ` Christoph Hellwig
  0 siblings, 0 replies; 15+ messages in thread
From: Christoph Hellwig @ 2019-08-17  6:11 UTC (permalink / raw)
  To: Ben Skeggs; +Cc: nouveau, Christoph Hellwig, dri-devel

On Sat, Aug 17, 2019 at 11:05:36AM +1000, Ben Skeggs wrote:
> > >> constants with similar names and identical values, and those are used
> > >> in mmu/vmmgp100.c and what appears to finally do the low-level dma
> > >> mapping and talking to the hardware.  Are these two sets of constants
> > >> supposed to be the same?  Are the actual hardware values or just a
> > >> driver internal interface?
> > >
> > > It looks a bit odd to me too.
> > > I don't really know the structure/history of nouveau.
> > > Perhaps Ben Skeggs can shed more light on your question.
> >
> > Ben, do you have any insights on these constants?
> Those sets of constants are (currently) supposed to be the same value.
> They don't necessarily map to the HW directly at this stage, and
> something different will likely be done in the future as HW changes.

So why do we have two sets of constants instead of one set for these
values?  That makes trying to follow the code pretty hard.
_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2019-08-17  6:11 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-07 15:02 [PATCH] nouveau/hmm: map pages after migration Ralph Campbell
2019-08-07 15:02 ` Ralph Campbell
2019-08-08  7:07 ` Christoph Hellwig
2019-08-08  7:07   ` Christoph Hellwig
2019-08-08 21:29   ` Ralph Campbell
2019-08-08 21:29     ` Ralph Campbell
2019-08-10 11:13     ` Christoph Hellwig
2019-08-10 11:13       ` Christoph Hellwig
2019-08-12 19:42       ` Ralph Campbell
     [not found]         ` <1a84e6b6-31e6-6955-509f-9883f4a7a322-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>
2019-08-16  7:11           ` Christoph Hellwig
     [not found]             ` <20190816071132.GA7513-jcswGhMUV9g@public.gmane.org>
2019-08-17  1:05               ` Ben Skeggs
     [not found]                 ` <CABDvA=n4Y0QticHZowEWFOqiEB3p99nV71GvHAySYdkcgw-Aow-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-08-17  6:11                   ` Christoph Hellwig
2019-08-13 21:58 ` Jerome Glisse
2019-08-15 17:43   ` [Nouveau] " Jerome Glisse
2019-08-15 17:43     ` Jerome Glisse

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.