All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: "Dan Williams" <dan.j.williams@intel.com>,
	"Jérôme Glisse" <jglisse@redhat.com>,
	"Jason Gunthorpe" <jgg@mellanox.com>,
	"Ben Skeggs" <bskeggs@redhat.com>
Cc: linux-nvdimm@lists.01.org, linux-pci@vger.kernel.org,
	linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org,
	linux-mm@kvack.org, nouveau@lists.freedesktop.org
Subject: [PATCH 15/22] nouveau: use devm_memremap_pages directly
Date: Thu, 13 Jun 2019 11:43:18 +0200	[thread overview]
Message-ID: <20190613094326.24093-16-hch@lst.de> (raw)
In-Reply-To: <20190613094326.24093-1-hch@lst.de>

Just use devm_memremap_pages instead of hmm_devmem_add pages to allow
killing that wrapper which doesn't provide a whole lot of benefits.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/gpu/drm/nouveau/nouveau_dmem.c | 80 ++++++++++++--------------
 1 file changed, 38 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index a50f6fd2fe24..9e32bc8ecbc7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -72,7 +72,8 @@ struct nouveau_dmem_migrate {
 };
 
 struct nouveau_dmem {
-	struct hmm_devmem *devmem;
+	struct nouveau_drm *drm;
+	struct dev_pagemap pagemap;
 	struct nouveau_dmem_migrate migrate;
 	struct list_head chunk_free;
 	struct list_head chunk_full;
@@ -80,6 +81,11 @@ struct nouveau_dmem {
 	struct mutex mutex;
 };
 
+static inline struct nouveau_dmem *page_to_dmem(struct page *page)
+{
+	return container_of(page->pgmap, struct nouveau_dmem, pagemap);
+}
+
 struct nouveau_dmem_fault {
 	struct nouveau_drm *drm;
 	struct nouveau_fence *fence;
@@ -96,8 +102,7 @@ struct nouveau_migrate {
 	unsigned long dma_nr;
 };
 
-static void
-nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
+static void nouveau_dmem_page_free(struct page *page)
 {
 	struct nouveau_dmem_chunk *chunk;
 	unsigned long idx;
@@ -260,29 +265,21 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
 	.finalize_and_map	= nouveau_dmem_fault_finalize_and_map,
 };
 
-static vm_fault_t
-nouveau_dmem_fault(struct hmm_devmem *devmem,
-		   struct vm_area_struct *vma,
-		   unsigned long addr,
-		   const struct page *page,
-		   unsigned int flags,
-		   pmd_t *pmdp)
+static vm_fault_t nouveau_dmem_devmem_migrate(struct vm_fault *vmf)
 {
-	struct drm_device *drm_dev = dev_get_drvdata(devmem->device);
+	struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
 	unsigned long src[1] = {0}, dst[1] = {0};
-	struct nouveau_dmem_fault fault = {0};
+	struct nouveau_dmem_fault fault = { .drm = dmem->drm };
 	int ret;
 
-
-
 	/*
 	 * FIXME what we really want is to find some heuristic to migrate more
 	 * than just one page on CPU fault. When such fault happens it is very
 	 * likely that more surrounding page will CPU fault too.
 	 */
-	fault.drm = nouveau_drm(drm_dev);
-	ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vma, addr,
-			  addr + PAGE_SIZE, src, dst, &fault);
+	ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vmf->vma,
+			vmf->address, vmf->address + PAGE_SIZE,
+			src, dst, &fault);
 	if (ret)
 		return VM_FAULT_SIGBUS;
 
@@ -292,10 +289,9 @@ nouveau_dmem_fault(struct hmm_devmem *devmem,
 	return 0;
 }
 
-static const struct hmm_devmem_ops
-nouveau_dmem_devmem_ops = {
-	.free = nouveau_dmem_free,
-	.fault = nouveau_dmem_fault,
+static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
+	.page_free		= nouveau_dmem_page_free,
+	.migrate		= nouveau_dmem_devmem_migrate,
 };
 
 static int
@@ -581,7 +577,8 @@ void
 nouveau_dmem_init(struct nouveau_drm *drm)
 {
 	struct device *device = drm->dev->dev;
-	unsigned long i, size;
+	struct resource *res;
+	unsigned long i, size, pfn_first;
 	int ret;
 
 	/* This only make sense on PASCAL or newer */
@@ -591,6 +588,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 	if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
 		return;
 
+	drm->dmem->drm = drm;
 	mutex_init(&drm->dmem->mutex);
 	INIT_LIST_HEAD(&drm->dmem->chunk_free);
 	INIT_LIST_HEAD(&drm->dmem->chunk_full);
@@ -600,11 +598,8 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 
 	/* Initialize migration dma helpers before registering memory */
 	ret = nouveau_dmem_migrate_init(drm);
-	if (ret) {
-		kfree(drm->dmem);
-		drm->dmem = NULL;
-		return;
-	}
+	if (ret)
+		goto out_free;
 
 	/*
 	 * FIXME we need some kind of policy to decide how much VRAM we
@@ -612,14 +607,16 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 	 * and latter if we want to do thing like over commit then we
 	 * could revisit this.
 	 */
-	drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops,
-					   device, size);
-	if (IS_ERR(drm->dmem->devmem)) {
-		kfree(drm->dmem);
-		drm->dmem = NULL;
-		return;
-	}
-
+	res = devm_request_free_mem_region(device, &iomem_resource, size);
+	if (IS_ERR(res))
+		goto out_free;
+	drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+	drm->dmem->pagemap.res = *res;
+	drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
+	if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
+		goto out_free;
+
+	pfn_first = res->start >> PAGE_SHIFT;
 	for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
 		struct nouveau_dmem_chunk *chunk;
 		struct page *page;
@@ -632,8 +629,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 		}
 
 		chunk->drm = drm;
-		chunk->pfn_first = drm->dmem->devmem->pfn_first;
-		chunk->pfn_first += (i * DMEM_CHUNK_NPAGES);
+		chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
 		list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
 
 		page = pfn_to_page(chunk->pfn_first);
@@ -643,6 +639,10 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 	}
 
 	NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
+	return;
+out_free:
+	kfree(drm->dmem);
+	drm->dmem = NULL;
 }
 
 static void
@@ -835,11 +835,7 @@ nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
 {
 	if (!is_device_private_page(page))
 		return false;
-
-	if (drm->dmem->devmem != page->pgmap->data)
-		return false;
-
-	return true;
+	return drm->dmem == page_to_dmem(page);
 }
 
 void
-- 
2.20.1

_______________________________________________
Linux-nvdimm mailing list
Linux-nvdimm@lists.01.org
https://lists.01.org/mailman/listinfo/linux-nvdimm

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: "Dan Williams" <dan.j.williams@intel.com>,
	"Jérôme Glisse" <jglisse@redhat.com>,
	"Jason Gunthorpe" <jgg@mellanox.com>,
	"Ben Skeggs" <bskeggs@redhat.com>
Cc: linux-mm@kvack.org, nouveau@lists.freedesktop.org,
	dri-devel@lists.freedesktop.org, linux-nvdimm@lists.01.org,
	linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH 15/22] nouveau: use devm_memremap_pages directly
Date: Thu, 13 Jun 2019 11:43:18 +0200	[thread overview]
Message-ID: <20190613094326.24093-16-hch@lst.de> (raw)
In-Reply-To: <20190613094326.24093-1-hch@lst.de>

Just use devm_memremap_pages instead of hmm_devmem_add pages to allow
killing that wrapper which doesn't provide a whole lot of benefits.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/gpu/drm/nouveau/nouveau_dmem.c | 80 ++++++++++++--------------
 1 file changed, 38 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index a50f6fd2fe24..9e32bc8ecbc7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -72,7 +72,8 @@ struct nouveau_dmem_migrate {
 };
 
 struct nouveau_dmem {
-	struct hmm_devmem *devmem;
+	struct nouveau_drm *drm;
+	struct dev_pagemap pagemap;
 	struct nouveau_dmem_migrate migrate;
 	struct list_head chunk_free;
 	struct list_head chunk_full;
@@ -80,6 +81,11 @@ struct nouveau_dmem {
 	struct mutex mutex;
 };
 
+static inline struct nouveau_dmem *page_to_dmem(struct page *page)
+{
+	return container_of(page->pgmap, struct nouveau_dmem, pagemap);
+}
+
 struct nouveau_dmem_fault {
 	struct nouveau_drm *drm;
 	struct nouveau_fence *fence;
@@ -96,8 +102,7 @@ struct nouveau_migrate {
 	unsigned long dma_nr;
 };
 
-static void
-nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
+static void nouveau_dmem_page_free(struct page *page)
 {
 	struct nouveau_dmem_chunk *chunk;
 	unsigned long idx;
@@ -260,29 +265,21 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
 	.finalize_and_map	= nouveau_dmem_fault_finalize_and_map,
 };
 
-static vm_fault_t
-nouveau_dmem_fault(struct hmm_devmem *devmem,
-		   struct vm_area_struct *vma,
-		   unsigned long addr,
-		   const struct page *page,
-		   unsigned int flags,
-		   pmd_t *pmdp)
+static vm_fault_t nouveau_dmem_devmem_migrate(struct vm_fault *vmf)
 {
-	struct drm_device *drm_dev = dev_get_drvdata(devmem->device);
+	struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
 	unsigned long src[1] = {0}, dst[1] = {0};
-	struct nouveau_dmem_fault fault = {0};
+	struct nouveau_dmem_fault fault = { .drm = dmem->drm };
 	int ret;
 
-
-
 	/*
 	 * FIXME what we really want is to find some heuristic to migrate more
 	 * than just one page on CPU fault. When such fault happens it is very
 	 * likely that more surrounding page will CPU fault too.
 	 */
-	fault.drm = nouveau_drm(drm_dev);
-	ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vma, addr,
-			  addr + PAGE_SIZE, src, dst, &fault);
+	ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vmf->vma,
+			vmf->address, vmf->address + PAGE_SIZE,
+			src, dst, &fault);
 	if (ret)
 		return VM_FAULT_SIGBUS;
 
@@ -292,10 +289,9 @@ nouveau_dmem_fault(struct hmm_devmem *devmem,
 	return 0;
 }
 
-static const struct hmm_devmem_ops
-nouveau_dmem_devmem_ops = {
-	.free = nouveau_dmem_free,
-	.fault = nouveau_dmem_fault,
+static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
+	.page_free		= nouveau_dmem_page_free,
+	.migrate		= nouveau_dmem_devmem_migrate,
 };
 
 static int
@@ -581,7 +577,8 @@ void
 nouveau_dmem_init(struct nouveau_drm *drm)
 {
 	struct device *device = drm->dev->dev;
-	unsigned long i, size;
+	struct resource *res;
+	unsigned long i, size, pfn_first;
 	int ret;
 
 	/* This only make sense on PASCAL or newer */
@@ -591,6 +588,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 	if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
 		return;
 
+	drm->dmem->drm = drm;
 	mutex_init(&drm->dmem->mutex);
 	INIT_LIST_HEAD(&drm->dmem->chunk_free);
 	INIT_LIST_HEAD(&drm->dmem->chunk_full);
@@ -600,11 +598,8 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 
 	/* Initialize migration dma helpers before registering memory */
 	ret = nouveau_dmem_migrate_init(drm);
-	if (ret) {
-		kfree(drm->dmem);
-		drm->dmem = NULL;
-		return;
-	}
+	if (ret)
+		goto out_free;
 
 	/*
 	 * FIXME we need some kind of policy to decide how much VRAM we
@@ -612,14 +607,16 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 	 * and latter if we want to do thing like over commit then we
 	 * could revisit this.
 	 */
-	drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops,
-					   device, size);
-	if (IS_ERR(drm->dmem->devmem)) {
-		kfree(drm->dmem);
-		drm->dmem = NULL;
-		return;
-	}
-
+	res = devm_request_free_mem_region(device, &iomem_resource, size);
+	if (IS_ERR(res))
+		goto out_free;
+	drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+	drm->dmem->pagemap.res = *res;
+	drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
+	if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
+		goto out_free;
+
+	pfn_first = res->start >> PAGE_SHIFT;
 	for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
 		struct nouveau_dmem_chunk *chunk;
 		struct page *page;
@@ -632,8 +629,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 		}
 
 		chunk->drm = drm;
-		chunk->pfn_first = drm->dmem->devmem->pfn_first;
-		chunk->pfn_first += (i * DMEM_CHUNK_NPAGES);
+		chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
 		list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
 
 		page = pfn_to_page(chunk->pfn_first);
@@ -643,6 +639,10 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 	}
 
 	NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
+	return;
+out_free:
+	kfree(drm->dmem);
+	drm->dmem = NULL;
 }
 
 static void
@@ -835,11 +835,7 @@ nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
 {
 	if (!is_device_private_page(page))
 		return false;
-
-	if (drm->dmem->devmem != page->pgmap->data)
-		return false;
-
-	return true;
+	return drm->dmem == page_to_dmem(page);
 }
 
 void
-- 
2.20.1


WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
To: "Dan Williams"
	<dan.j.williams-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
	"Jérôme Glisse" <jglisse-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Jason Gunthorpe" <jgg-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>,
	"Ben Skeggs" <bskeggs-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
Cc: linux-nvdimm-hn68Rpc1hR1g9hUCZPvPmw@public.gmane.org,
	linux-pci-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,
	linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org,
	nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Subject: [PATCH 15/22] nouveau: use devm_memremap_pages directly
Date: Thu, 13 Jun 2019 11:43:18 +0200	[thread overview]
Message-ID: <20190613094326.24093-16-hch@lst.de> (raw)
In-Reply-To: <20190613094326.24093-1-hch-jcswGhMUV9g@public.gmane.org>

Just use devm_memremap_pages instead of hmm_devmem_add pages to allow
killing that wrapper which doesn't provide a whole lot of benefits.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/gpu/drm/nouveau/nouveau_dmem.c | 80 ++++++++++++--------------
 1 file changed, 38 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index a50f6fd2fe24..9e32bc8ecbc7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -72,7 +72,8 @@ struct nouveau_dmem_migrate {
 };
 
 struct nouveau_dmem {
-	struct hmm_devmem *devmem;
+	struct nouveau_drm *drm;
+	struct dev_pagemap pagemap;
 	struct nouveau_dmem_migrate migrate;
 	struct list_head chunk_free;
 	struct list_head chunk_full;
@@ -80,6 +81,11 @@ struct nouveau_dmem {
 	struct mutex mutex;
 };
 
+static inline struct nouveau_dmem *page_to_dmem(struct page *page)
+{
+	return container_of(page->pgmap, struct nouveau_dmem, pagemap);
+}
+
 struct nouveau_dmem_fault {
 	struct nouveau_drm *drm;
 	struct nouveau_fence *fence;
@@ -96,8 +102,7 @@ struct nouveau_migrate {
 	unsigned long dma_nr;
 };
 
-static void
-nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
+static void nouveau_dmem_page_free(struct page *page)
 {
 	struct nouveau_dmem_chunk *chunk;
 	unsigned long idx;
@@ -260,29 +265,21 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
 	.finalize_and_map	= nouveau_dmem_fault_finalize_and_map,
 };
 
-static vm_fault_t
-nouveau_dmem_fault(struct hmm_devmem *devmem,
-		   struct vm_area_struct *vma,
-		   unsigned long addr,
-		   const struct page *page,
-		   unsigned int flags,
-		   pmd_t *pmdp)
+static vm_fault_t nouveau_dmem_devmem_migrate(struct vm_fault *vmf)
 {
-	struct drm_device *drm_dev = dev_get_drvdata(devmem->device);
+	struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
 	unsigned long src[1] = {0}, dst[1] = {0};
-	struct nouveau_dmem_fault fault = {0};
+	struct nouveau_dmem_fault fault = { .drm = dmem->drm };
 	int ret;
 
-
-
 	/*
 	 * FIXME what we really want is to find some heuristic to migrate more
 	 * than just one page on CPU fault. When such fault happens it is very
 	 * likely that more surrounding page will CPU fault too.
 	 */
-	fault.drm = nouveau_drm(drm_dev);
-	ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vma, addr,
-			  addr + PAGE_SIZE, src, dst, &fault);
+	ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vmf->vma,
+			vmf->address, vmf->address + PAGE_SIZE,
+			src, dst, &fault);
 	if (ret)
 		return VM_FAULT_SIGBUS;
 
@@ -292,10 +289,9 @@ nouveau_dmem_fault(struct hmm_devmem *devmem,
 	return 0;
 }
 
-static const struct hmm_devmem_ops
-nouveau_dmem_devmem_ops = {
-	.free = nouveau_dmem_free,
-	.fault = nouveau_dmem_fault,
+static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
+	.page_free		= nouveau_dmem_page_free,
+	.migrate		= nouveau_dmem_devmem_migrate,
 };
 
 static int
@@ -581,7 +577,8 @@ void
 nouveau_dmem_init(struct nouveau_drm *drm)
 {
 	struct device *device = drm->dev->dev;
-	unsigned long i, size;
+	struct resource *res;
+	unsigned long i, size, pfn_first;
 	int ret;
 
 	/* This only make sense on PASCAL or newer */
@@ -591,6 +588,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 	if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
 		return;
 
+	drm->dmem->drm = drm;
 	mutex_init(&drm->dmem->mutex);
 	INIT_LIST_HEAD(&drm->dmem->chunk_free);
 	INIT_LIST_HEAD(&drm->dmem->chunk_full);
@@ -600,11 +598,8 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 
 	/* Initialize migration dma helpers before registering memory */
 	ret = nouveau_dmem_migrate_init(drm);
-	if (ret) {
-		kfree(drm->dmem);
-		drm->dmem = NULL;
-		return;
-	}
+	if (ret)
+		goto out_free;
 
 	/*
 	 * FIXME we need some kind of policy to decide how much VRAM we
@@ -612,14 +607,16 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 	 * and latter if we want to do thing like over commit then we
 	 * could revisit this.
 	 */
-	drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops,
-					   device, size);
-	if (IS_ERR(drm->dmem->devmem)) {
-		kfree(drm->dmem);
-		drm->dmem = NULL;
-		return;
-	}
-
+	res = devm_request_free_mem_region(device, &iomem_resource, size);
+	if (IS_ERR(res))
+		goto out_free;
+	drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+	drm->dmem->pagemap.res = *res;
+	drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
+	if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
+		goto out_free;
+
+	pfn_first = res->start >> PAGE_SHIFT;
 	for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
 		struct nouveau_dmem_chunk *chunk;
 		struct page *page;
@@ -632,8 +629,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 		}
 
 		chunk->drm = drm;
-		chunk->pfn_first = drm->dmem->devmem->pfn_first;
-		chunk->pfn_first += (i * DMEM_CHUNK_NPAGES);
+		chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
 		list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
 
 		page = pfn_to_page(chunk->pfn_first);
@@ -643,6 +639,10 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 	}
 
 	NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
+	return;
+out_free:
+	kfree(drm->dmem);
+	drm->dmem = NULL;
 }
 
 static void
@@ -835,11 +835,7 @@ nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
 {
 	if (!is_device_private_page(page))
 		return false;
-
-	if (drm->dmem->devmem != page->pgmap->data)
-		return false;
-
-	return true;
+	return drm->dmem == page_to_dmem(page);
 }
 
 void
-- 
2.20.1

_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

  parent reply	other threads:[~2019-06-13  9:44 UTC|newest]

Thread overview: 254+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-13  9:43 dev_pagemap related cleanups Christoph Hellwig
2019-06-13  9:43 ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 01/22] mm: remove the unused ARCH_HAS_HMM_DEVICE Kconfig option Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 18:30   ` Jason Gunthorpe
2019-06-13 18:30     ` Jason Gunthorpe
2019-06-13 18:30     ` Jason Gunthorpe
2019-06-13  9:43 ` [PATCH 02/22] mm: remove the struct hmm_device infrastructure Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 18:46   ` Jason Gunthorpe
2019-06-13 18:46     ` Jason Gunthorpe
2019-06-13 18:46     ` Jason Gunthorpe
2019-06-13 23:06   ` [Nouveau] " John Hubbard
2019-06-13 23:06     ` John Hubbard
2019-06-13 23:06     ` [Nouveau] " John Hubbard
2019-06-13  9:43 ` [PATCH 03/22] mm: remove hmm_devmem_add_resource Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 18:52   ` Jason Gunthorpe
2019-06-13 18:52     ` Jason Gunthorpe
2019-06-14  6:19     ` Christoph Hellwig
2019-06-14  0:54   ` [Nouveau] " John Hubbard
2019-06-14  0:54     ` John Hubbard
2019-06-14  0:54     ` [Nouveau] " John Hubbard
2019-06-20 19:32   ` Michal Hocko
2019-06-13  9:43 ` [PATCH 04/22] mm: don't clear ->mapping in hmm_devmem_free Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:05   ` Jason Gunthorpe
2019-06-13 19:05     ` Jason Gunthorpe
2019-06-14  6:21     ` Christoph Hellwig
2019-06-14  6:21       ` Christoph Hellwig
2019-06-14  1:46   ` John Hubbard
2019-06-14  1:46     ` John Hubbard
2019-06-14  1:46     ` John Hubbard
2019-06-20 19:36   ` Michal Hocko
2019-06-20 19:36     ` Michal Hocko
2019-06-13  9:43 ` [PATCH 05/22] mm: export alloc_pages_vma Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-14  1:47   ` John Hubbard
2019-06-14  1:47     ` John Hubbard
2019-06-14  1:47     ` John Hubbard
2019-06-14  6:23     ` Christoph Hellwig
2019-06-20 19:17   ` Michal Hocko
2019-06-24 18:24     ` Dan Williams
2019-06-24 18:24       ` Dan Williams
2019-06-25  7:23       ` Christoph Hellwig
2019-06-25  7:23         ` Christoph Hellwig
2019-06-25 15:00         ` Michal Hocko
2019-06-25 15:00           ` Michal Hocko
2019-06-25 18:03           ` Dan Williams
2019-06-25 18:03             ` Dan Williams
2019-06-25 19:00             ` Michal Hocko
2019-06-25 19:00               ` Michal Hocko
2019-06-25 19:52               ` Dan Williams
2019-06-25 19:52                 ` Dan Williams
2019-06-26  5:46                 ` Michal Hocko
2019-06-26  5:46                   ` Michal Hocko
2019-06-26  5:46                   ` Michal Hocko
2019-06-26 16:14                   ` Dan Williams
2019-06-27  6:41                     ` Michal Hocko
2019-06-27  6:41                       ` Michal Hocko
2019-06-27  6:41                       ` Michal Hocko
2019-06-13  9:43 ` [PATCH 06/22] mm: factor out a devm_request_free_mem_region helper Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:16   ` Jason Gunthorpe
2019-06-13 19:16     ` Jason Gunthorpe
2019-06-14  6:24     ` Christoph Hellwig
2019-06-14  6:24       ` Christoph Hellwig
2019-06-15  2:21   ` John Hubbard
2019-06-15  2:21     ` John Hubbard
2019-06-15  2:21     ` John Hubbard
2019-06-15 14:30     ` Christoph Hellwig
2019-06-15 14:30       ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 07/22] memremap: move dev_pagemap callbacks into a separate structure Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:18   ` Jason Gunthorpe
2019-06-13 19:18     ` Jason Gunthorpe
2019-06-13 19:18     ` Jason Gunthorpe
2019-06-13 20:14   ` Logan Gunthorpe
2019-06-13 20:14     ` Logan Gunthorpe
2019-06-13  9:43 ` [PATCH 08/22] memremap: pass a struct dev_pagemap to ->kill Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:26   ` Jason Gunthorpe
2019-06-13 19:26     ` Jason Gunthorpe
2019-06-13 20:12   ` Logan Gunthorpe
2019-06-13 20:12     ` Logan Gunthorpe
2019-06-13 20:12     ` Logan Gunthorpe
2019-06-13 20:15     ` Dan Williams
2019-06-13 20:15       ` Dan Williams
2019-06-13  9:43 ` [PATCH 09/22] memremap: lift the devmap_enable manipulation into devm_memremap_pages Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:34   ` Jason Gunthorpe
2019-06-13 19:34     ` Jason Gunthorpe
2019-06-13 19:34     ` Jason Gunthorpe
2019-06-13 20:13     ` Dan Williams
2019-06-13 20:13       ` Dan Williams
2019-06-14  6:28     ` Christoph Hellwig
2019-06-14  6:28       ` Christoph Hellwig
2019-06-14  6:28       ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 10/22] memremap: add a migrate callback to struct dev_pagemap_ops Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 23:42   ` Ralph Campbell
2019-06-13 23:42     ` Ralph Campbell
2019-06-13 23:42     ` Ralph Campbell
2019-06-14  6:33     ` Christoph Hellwig
2019-06-14  6:33       ` Christoph Hellwig
2019-06-14  6:33       ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 11/22] memremap: remove the data field in struct dev_pagemap Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:37   ` Jason Gunthorpe
2019-06-13 19:37     ` Jason Gunthorpe
2019-06-13 19:37     ` Jason Gunthorpe
2019-06-13  9:43 ` [PATCH 12/22] memremap: provide an optional internal refcount " Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 13/22] device-dax: use the dev_pagemap internal refcount Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-14  0:22   ` Ira Weiny
2019-06-14  0:22     ` Ira Weiny
2019-06-14  0:22     ` Ira Weiny
2019-06-14  6:35     ` Christoph Hellwig
2019-06-14  6:35       ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 14/22] nouveau: use alloc_page_vma directly Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:39   ` Jason Gunthorpe
2019-06-13 19:39     ` Jason Gunthorpe
2019-06-13  9:43 ` Christoph Hellwig [this message]
2019-06-13  9:43   ` [PATCH 15/22] nouveau: use devm_memremap_pages directly Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 16/22] mm: remove hmm_vma_alloc_locked_page Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 17/22] mm: remove hmm_devmem_add Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:42   ` Jason Gunthorpe
2019-06-13 19:42     ` Jason Gunthorpe
2019-06-14  6:39     ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 18/22] mm: mark DEVICE_PUBLIC as broken Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:44   ` Jason Gunthorpe
2019-06-13 19:44     ` Jason Gunthorpe
2019-06-13 19:44     ` Jason Gunthorpe
2019-06-13 19:53     ` Ralph Campbell
2019-06-13 19:53       ` Ralph Campbell
2019-06-13 19:58       ` Jason Gunthorpe
2019-06-13 19:58         ` Jason Gunthorpe
2019-06-13 19:58         ` Jason Gunthorpe
2019-06-14  0:43         ` Ira Weiny
2019-06-14  0:43           ` Ira Weiny
2019-06-14  0:43           ` Ira Weiny
2019-06-14  1:23           ` John Hubbard
2019-06-14  1:23             ` John Hubbard
2019-06-14  1:23             ` John Hubbard
2019-06-19 19:27             ` Jason Gunthorpe
2019-06-19 19:27               ` Jason Gunthorpe
2019-06-19 19:46               ` Dan Williams
2019-06-19 19:46                 ` Dan Williams
2019-06-26  3:15               ` John Hubbard
2019-06-26  3:15                 ` John Hubbard
2019-06-26  3:15                 ` John Hubbard
2019-06-26  5:45                 ` Michal Hocko
2019-06-26  5:45                   ` Michal Hocko
2019-06-26  5:45                   ` Michal Hocko
2019-06-26  6:07                   ` John Hubbard
2019-06-26  6:07                     ` John Hubbard
2019-06-14  6:43           ` Christoph Hellwig
2019-06-14  6:43             ` Christoph Hellwig
2019-06-20 19:26   ` Michal Hocko
2019-06-25  7:29     ` Christoph Hellwig
2019-06-25 11:44       ` Jason Gunthorpe
2019-06-25 11:44         ` Jason Gunthorpe
2019-06-25 11:44         ` Jason Gunthorpe
2019-06-25 11:59         ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 19/22] mm: simplify ZONE_DEVICE page private data Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 20/22] mm: sort out the DEVICE_PRIVATE Kconfig mess Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:55   ` Jason Gunthorpe
2019-06-13 19:55     ` Jason Gunthorpe
2019-06-13  9:43 ` [PATCH 21/22] mm: remove the HMM config option Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 20:01   ` Jason Gunthorpe
2019-06-13 20:01     ` Jason Gunthorpe
2019-06-14  6:47     ` Christoph Hellwig
2019-06-14  6:47       ` Christoph Hellwig
2019-06-14  6:47       ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 22/22] mm: don't select MIGRATE_VMA_HELPER from HMM_MIRROR Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 20:04   ` Jason Gunthorpe
2019-06-13 20:04     ` Jason Gunthorpe
2019-06-13 20:04     ` Jason Gunthorpe
2019-06-14  1:53   ` [Nouveau] " John Hubbard
2019-06-14  1:53     ` John Hubbard
2019-06-14  1:53     ` John Hubbard
2019-06-14  6:48     ` Christoph Hellwig
2019-06-14  6:48       ` Christoph Hellwig
2019-06-13 14:16 ` dev_pagemap related cleanups Jason Gunthorpe
2019-06-13 14:16   ` Jason Gunthorpe
2019-06-14  6:12   ` Christoph Hellwig
2019-06-14  6:12     ` Christoph Hellwig
2019-06-14  6:12     ` Christoph Hellwig
2019-06-13 18:27 ` Dan Williams
2019-06-13 18:27   ` Dan Williams
2019-06-13 18:27   ` Dan Williams
2019-06-13 20:17   ` Logan Gunthorpe
2019-06-13 20:17     ` Logan Gunthorpe
2019-06-13 20:21     ` Dan Williams
2019-06-13 20:24       ` Logan Gunthorpe
2019-06-13 20:24         ` Logan Gunthorpe
2019-06-13 20:48         ` Andrew Morton
2019-06-13 20:48           ` Andrew Morton
2019-06-13 20:48           ` Andrew Morton
2019-06-13 20:40   ` Jason Gunthorpe
2019-06-13 20:40     ` Jason Gunthorpe
2019-06-13 20:40     ` Jason Gunthorpe
2019-06-13 21:21     ` Christoph Hellwig
2019-06-13 21:21       ` Christoph Hellwig
2019-06-13 23:10       ` Jason Gunthorpe
2019-06-13 23:10         ` Jason Gunthorpe
2019-06-13 23:10         ` Jason Gunthorpe
2019-06-14  6:14         ` Christoph Hellwig
2019-06-14  6:14           ` Christoph Hellwig
2019-06-14  6:14           ` Christoph Hellwig
2019-06-14  0:31     ` Ira Weiny
2019-06-14  0:31       ` Ira Weiny
2019-06-14  0:31       ` Ira Weiny
2019-06-14  6:13   ` Christoph Hellwig
2019-06-14  6:13     ` Christoph Hellwig
2019-06-15  1:14     ` Dan Williams
2019-06-15  1:14       ` Dan Williams
2019-06-15  8:33       ` Christoph Hellwig
2019-06-15  8:33         ` Christoph Hellwig
2019-06-15  8:33         ` Christoph Hellwig
2019-06-15 18:09         ` Dan Williams
2019-06-15 18:09           ` Dan Williams
2019-06-15 18:09           ` Dan Williams

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190613094326.24093-16-hch@lst.de \
    --to=hch@lst.de \
    --cc=bskeggs@redhat.com \
    --cc=dan.j.williams@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=jgg@mellanox.com \
    --cc=jglisse@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=nouveau@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.