All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alistair Popple <apopple@nvidia.com>
To: <linux-mm@kvack.org>, <nouveau@lists.freedesktop.org>,
	<bskeggs@redhat.com>, <akpm@linux-foundation.org>
Cc: <linux-doc@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
	<kvm-ppc@vger.kernel.org>, <dri-devel@lists.freedesktop.org>,
	<jhubbard@nvidia.com>, <rcampbell@nvidia.com>,
	<jglisse@redhat.com>, "Alistair Popple" <apopple@nvidia.com>
Subject: [PATCH 8/9] nouveau/dmem: Add support for multiple page types
Date: Tue, 9 Feb 2021 12:07:21 +1100	[thread overview]
Message-ID: <20210209010722.13839-9-apopple@nvidia.com> (raw)
In-Reply-To: <20210209010722.13839-1-apopple@nvidia.com>

Device private pages are used to track a per-page migrate_to_ram()
callback which is called when the CPU attempts to access a GPU page from
the CPU. Currently the same callback is used for all GPU pages tracked
by Nouveau. However a future patch requires support for calling a
different callback when accessing some GPU pages.

This patch extends the existing Nouveau device private page allocator to
make it easier to allocate device private pages with different callbacks
but should not introduce any functional changes.

Signed-off-by: Alistair Popple <apopple@nvidia.com>
---
 drivers/gpu/drm/nouveau/nouveau_dmem.c | 27 ++++++++++++++------------
 drivers/gpu/drm/nouveau/nouveau_dmem.h |  5 +++++
 2 files changed, 20 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 9579bd001f11..8fb4949f3778 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -67,6 +67,7 @@ struct nouveau_dmem_chunk {
 	struct nouveau_bo *bo;
 	struct nouveau_drm *drm;
 	unsigned long callocated;
+	enum nouveau_dmem_type type;
 	struct dev_pagemap pagemap;
 };
 
@@ -81,7 +82,7 @@ struct nouveau_dmem {
 	struct nouveau_dmem_migrate migrate;
 	struct list_head chunks;
 	struct mutex mutex;
-	struct page *free_pages;
+	struct page *free_pages[NOUVEAU_DMEM_NTYPES];
 	spinlock_t lock;
 };
 
@@ -112,8 +113,8 @@ static void nouveau_dmem_page_free(struct page *page)
 	struct nouveau_dmem *dmem = chunk->drm->dmem;
 
 	spin_lock(&dmem->lock);
-	page->zone_device_data = dmem->free_pages;
-	dmem->free_pages = page;
+	page->zone_device_data = dmem->free_pages[chunk->type];
+	dmem->free_pages[chunk->type] = page;
 
 	WARN_ON(!chunk->callocated);
 	chunk->callocated--;
@@ -224,7 +225,8 @@ static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
 };
 
 static int
-nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
+nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage,
+	enum nouveau_dmem_type type)
 {
 	struct nouveau_dmem_chunk *chunk;
 	struct resource *res;
@@ -248,6 +250,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
 	}
 
 	chunk->drm = drm;
+	chunk->type = type;
 	chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
 	chunk->pagemap.range.start = res->start;
 	chunk->pagemap.range.end = res->end;
@@ -279,8 +282,8 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
 	page = pfn_to_page(pfn_first);
 	spin_lock(&drm->dmem->lock);
 	for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
-		page->zone_device_data = drm->dmem->free_pages;
-		drm->dmem->free_pages = page;
+		page->zone_device_data = drm->dmem->free_pages[type];
+		drm->dmem->free_pages[type] = page;
 	}
 	*ppage = page;
 	chunk->callocated++;
@@ -304,22 +307,22 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
 }
 
 static struct page *
-nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
+nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm, enum nouveau_dmem_type type)
 {
 	struct nouveau_dmem_chunk *chunk;
 	struct page *page = NULL;
 	int ret;
 
 	spin_lock(&drm->dmem->lock);
-	if (drm->dmem->free_pages) {
-		page = drm->dmem->free_pages;
-		drm->dmem->free_pages = page->zone_device_data;
+	if (drm->dmem->free_pages[type]) {
+		page = drm->dmem->free_pages[type];
+		drm->dmem->free_pages[type] = page->zone_device_data;
 		chunk = nouveau_page_to_chunk(page);
 		chunk->callocated++;
 		spin_unlock(&drm->dmem->lock);
 	} else {
 		spin_unlock(&drm->dmem->lock);
-		ret = nouveau_dmem_chunk_alloc(drm, &page);
+		ret = nouveau_dmem_chunk_alloc(drm, &page, type);
 		if (ret)
 			return NULL;
 	}
@@ -577,7 +580,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
 	if (!(src & MIGRATE_PFN_MIGRATE))
 		goto out;
 
-	dpage = nouveau_dmem_page_alloc_locked(drm);
+	dpage = nouveau_dmem_page_alloc_locked(drm, NOUVEAU_DMEM);
 	if (!dpage)
 		goto out;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.h b/drivers/gpu/drm/nouveau/nouveau_dmem.h
index 64da5d3635c8..02e261c4acf1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.h
@@ -28,6 +28,11 @@ struct nouveau_drm;
 struct nouveau_svmm;
 struct hmm_range;
 
+enum nouveau_dmem_type {
+	NOUVEAU_DMEM,
+	NOUVEAU_DMEM_NTYPES, /* Number of types, must be last */
+};
+
 #if IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM)
 void nouveau_dmem_init(struct nouveau_drm *);
 void nouveau_dmem_fini(struct nouveau_drm *);
-- 
2.20.1


WARNING: multiple messages have this Message-ID (diff)
From: Alistair Popple <apopple@nvidia.com>
To: <linux-mm@kvack.org>, <nouveau@lists.freedesktop.org>,
	<bskeggs@redhat.com>, <akpm@linux-foundation.org>
Cc: rcampbell@nvidia.com, linux-doc@vger.kernel.org,
	Alistair Popple <apopple@nvidia.com>,
	linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org,
	kvm-ppc@vger.kernel.org
Subject: [Nouveau] [PATCH 8/9] nouveau/dmem: Add support for multiple page types
Date: Tue, 9 Feb 2021 12:07:21 +1100	[thread overview]
Message-ID: <20210209010722.13839-9-apopple@nvidia.com> (raw)
In-Reply-To: <20210209010722.13839-1-apopple@nvidia.com>

Device private pages are used to track a per-page migrate_to_ram()
callback which is called when the CPU attempts to access a GPU page from
the CPU. Currently the same callback is used for all GPU pages tracked
by Nouveau. However a future patch requires support for calling a
different callback when accessing some GPU pages.

This patch extends the existing Nouveau device private page allocator to
make it easier to allocate device private pages with different callbacks
but should not introduce any functional changes.

Signed-off-by: Alistair Popple <apopple@nvidia.com>
---
 drivers/gpu/drm/nouveau/nouveau_dmem.c | 27 ++++++++++++++------------
 drivers/gpu/drm/nouveau/nouveau_dmem.h |  5 +++++
 2 files changed, 20 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 9579bd001f11..8fb4949f3778 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -67,6 +67,7 @@ struct nouveau_dmem_chunk {
 	struct nouveau_bo *bo;
 	struct nouveau_drm *drm;
 	unsigned long callocated;
+	enum nouveau_dmem_type type;
 	struct dev_pagemap pagemap;
 };
 
@@ -81,7 +82,7 @@ struct nouveau_dmem {
 	struct nouveau_dmem_migrate migrate;
 	struct list_head chunks;
 	struct mutex mutex;
-	struct page *free_pages;
+	struct page *free_pages[NOUVEAU_DMEM_NTYPES];
 	spinlock_t lock;
 };
 
@@ -112,8 +113,8 @@ static void nouveau_dmem_page_free(struct page *page)
 	struct nouveau_dmem *dmem = chunk->drm->dmem;
 
 	spin_lock(&dmem->lock);
-	page->zone_device_data = dmem->free_pages;
-	dmem->free_pages = page;
+	page->zone_device_data = dmem->free_pages[chunk->type];
+	dmem->free_pages[chunk->type] = page;
 
 	WARN_ON(!chunk->callocated);
 	chunk->callocated--;
@@ -224,7 +225,8 @@ static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
 };
 
 static int
-nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
+nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage,
+	enum nouveau_dmem_type type)
 {
 	struct nouveau_dmem_chunk *chunk;
 	struct resource *res;
@@ -248,6 +250,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
 	}
 
 	chunk->drm = drm;
+	chunk->type = type;
 	chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
 	chunk->pagemap.range.start = res->start;
 	chunk->pagemap.range.end = res->end;
@@ -279,8 +282,8 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
 	page = pfn_to_page(pfn_first);
 	spin_lock(&drm->dmem->lock);
 	for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
-		page->zone_device_data = drm->dmem->free_pages;
-		drm->dmem->free_pages = page;
+		page->zone_device_data = drm->dmem->free_pages[type];
+		drm->dmem->free_pages[type] = page;
 	}
 	*ppage = page;
 	chunk->callocated++;
@@ -304,22 +307,22 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
 }
 
 static struct page *
-nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
+nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm, enum nouveau_dmem_type type)
 {
 	struct nouveau_dmem_chunk *chunk;
 	struct page *page = NULL;
 	int ret;
 
 	spin_lock(&drm->dmem->lock);
-	if (drm->dmem->free_pages) {
-		page = drm->dmem->free_pages;
-		drm->dmem->free_pages = page->zone_device_data;
+	if (drm->dmem->free_pages[type]) {
+		page = drm->dmem->free_pages[type];
+		drm->dmem->free_pages[type] = page->zone_device_data;
 		chunk = nouveau_page_to_chunk(page);
 		chunk->callocated++;
 		spin_unlock(&drm->dmem->lock);
 	} else {
 		spin_unlock(&drm->dmem->lock);
-		ret = nouveau_dmem_chunk_alloc(drm, &page);
+		ret = nouveau_dmem_chunk_alloc(drm, &page, type);
 		if (ret)
 			return NULL;
 	}
@@ -577,7 +580,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
 	if (!(src & MIGRATE_PFN_MIGRATE))
 		goto out;
 
-	dpage = nouveau_dmem_page_alloc_locked(drm);
+	dpage = nouveau_dmem_page_alloc_locked(drm, NOUVEAU_DMEM);
 	if (!dpage)
 		goto out;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.h b/drivers/gpu/drm/nouveau/nouveau_dmem.h
index 64da5d3635c8..02e261c4acf1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.h
@@ -28,6 +28,11 @@ struct nouveau_drm;
 struct nouveau_svmm;
 struct hmm_range;
 
+enum nouveau_dmem_type {
+	NOUVEAU_DMEM,
+	NOUVEAU_DMEM_NTYPES, /* Number of types, must be last */
+};
+
 #if IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM)
 void nouveau_dmem_init(struct nouveau_drm *);
 void nouveau_dmem_fini(struct nouveau_drm *);
-- 
2.20.1

_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

WARNING: multiple messages have this Message-ID (diff)
From: Alistair Popple <apopple@nvidia.com>
To: <linux-mm@kvack.org>, <nouveau@lists.freedesktop.org>,
	<bskeggs@redhat.com>, <akpm@linux-foundation.org>
Cc: rcampbell@nvidia.com, linux-doc@vger.kernel.org,
	jhubbard@nvidia.com, Alistair Popple <apopple@nvidia.com>,
	linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org,
	jglisse@redhat.com, kvm-ppc@vger.kernel.org
Subject: [PATCH 8/9] nouveau/dmem: Add support for multiple page types
Date: Tue, 9 Feb 2021 12:07:21 +1100	[thread overview]
Message-ID: <20210209010722.13839-9-apopple@nvidia.com> (raw)
In-Reply-To: <20210209010722.13839-1-apopple@nvidia.com>

Device private pages are used to track a per-page migrate_to_ram()
callback which is called when the CPU attempts to access a GPU page from
the CPU. Currently the same callback is used for all GPU pages tracked
by Nouveau. However a future patch requires support for calling a
different callback when accessing some GPU pages.

This patch extends the existing Nouveau device private page allocator to
make it easier to allocate device private pages with different callbacks
but should not introduce any functional changes.

Signed-off-by: Alistair Popple <apopple@nvidia.com>
---
 drivers/gpu/drm/nouveau/nouveau_dmem.c | 27 ++++++++++++++------------
 drivers/gpu/drm/nouveau/nouveau_dmem.h |  5 +++++
 2 files changed, 20 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 9579bd001f11..8fb4949f3778 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -67,6 +67,7 @@ struct nouveau_dmem_chunk {
 	struct nouveau_bo *bo;
 	struct nouveau_drm *drm;
 	unsigned long callocated;
+	enum nouveau_dmem_type type;
 	struct dev_pagemap pagemap;
 };
 
@@ -81,7 +82,7 @@ struct nouveau_dmem {
 	struct nouveau_dmem_migrate migrate;
 	struct list_head chunks;
 	struct mutex mutex;
-	struct page *free_pages;
+	struct page *free_pages[NOUVEAU_DMEM_NTYPES];
 	spinlock_t lock;
 };
 
@@ -112,8 +113,8 @@ static void nouveau_dmem_page_free(struct page *page)
 	struct nouveau_dmem *dmem = chunk->drm->dmem;
 
 	spin_lock(&dmem->lock);
-	page->zone_device_data = dmem->free_pages;
-	dmem->free_pages = page;
+	page->zone_device_data = dmem->free_pages[chunk->type];
+	dmem->free_pages[chunk->type] = page;
 
 	WARN_ON(!chunk->callocated);
 	chunk->callocated--;
@@ -224,7 +225,8 @@ static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
 };
 
 static int
-nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
+nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage,
+	enum nouveau_dmem_type type)
 {
 	struct nouveau_dmem_chunk *chunk;
 	struct resource *res;
@@ -248,6 +250,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
 	}
 
 	chunk->drm = drm;
+	chunk->type = type;
 	chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
 	chunk->pagemap.range.start = res->start;
 	chunk->pagemap.range.end = res->end;
@@ -279,8 +282,8 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
 	page = pfn_to_page(pfn_first);
 	spin_lock(&drm->dmem->lock);
 	for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
-		page->zone_device_data = drm->dmem->free_pages;
-		drm->dmem->free_pages = page;
+		page->zone_device_data = drm->dmem->free_pages[type];
+		drm->dmem->free_pages[type] = page;
 	}
 	*ppage = page;
 	chunk->callocated++;
@@ -304,22 +307,22 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
 }
 
 static struct page *
-nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
+nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm, enum nouveau_dmem_type type)
 {
 	struct nouveau_dmem_chunk *chunk;
 	struct page *page = NULL;
 	int ret;
 
 	spin_lock(&drm->dmem->lock);
-	if (drm->dmem->free_pages) {
-		page = drm->dmem->free_pages;
-		drm->dmem->free_pages = page->zone_device_data;
+	if (drm->dmem->free_pages[type]) {
+		page = drm->dmem->free_pages[type];
+		drm->dmem->free_pages[type] = page->zone_device_data;
 		chunk = nouveau_page_to_chunk(page);
 		chunk->callocated++;
 		spin_unlock(&drm->dmem->lock);
 	} else {
 		spin_unlock(&drm->dmem->lock);
-		ret = nouveau_dmem_chunk_alloc(drm, &page);
+		ret = nouveau_dmem_chunk_alloc(drm, &page, type);
 		if (ret)
 			return NULL;
 	}
@@ -577,7 +580,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
 	if (!(src & MIGRATE_PFN_MIGRATE))
 		goto out;
 
-	dpage = nouveau_dmem_page_alloc_locked(drm);
+	dpage = nouveau_dmem_page_alloc_locked(drm, NOUVEAU_DMEM);
 	if (!dpage)
 		goto out;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.h b/drivers/gpu/drm/nouveau/nouveau_dmem.h
index 64da5d3635c8..02e261c4acf1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.h
@@ -28,6 +28,11 @@ struct nouveau_drm;
 struct nouveau_svmm;
 struct hmm_range;
 
+enum nouveau_dmem_type {
+	NOUVEAU_DMEM,
+	NOUVEAU_DMEM_NTYPES, /* Number of types, must be last */
+};
+
 #if IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM)
 void nouveau_dmem_init(struct nouveau_drm *);
 void nouveau_dmem_fini(struct nouveau_drm *);
-- 
2.20.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

WARNING: multiple messages have this Message-ID (diff)
From: Alistair Popple <apopple@nvidia.com>
To: linux-mm@kvack.org, nouveau@lists.freedesktop.org,
	bskeggs@redhat.com, akpm@linux-foundation.org
Cc: linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
	kvm-ppc@vger.kernel.org, dri-devel@lists.freedesktop.org,
	jhubbard@nvidia.com, rcampbell@nvidia.com, jglisse@redhat.com,
	Alistair Popple <apopple@nvidia.com>
Subject: [PATCH 8/9] nouveau/dmem: Add support for multiple page types
Date: Tue, 09 Feb 2021 01:07:21 +0000	[thread overview]
Message-ID: <20210209010722.13839-9-apopple@nvidia.com> (raw)
In-Reply-To: <20210209010722.13839-1-apopple@nvidia.com>

Device private pages are used to track a per-page migrate_to_ram()
callback which is called when the CPU attempts to access a GPU page from
the CPU. Currently the same callback is used for all GPU pages tracked
by Nouveau. However a future patch requires support for calling a
different callback when accessing some GPU pages.

This patch extends the existing Nouveau device private page allocator to
make it easier to allocate device private pages with different callbacks
but should not introduce any functional changes.

Signed-off-by: Alistair Popple <apopple@nvidia.com>
---
 drivers/gpu/drm/nouveau/nouveau_dmem.c | 27 ++++++++++++++------------
 drivers/gpu/drm/nouveau/nouveau_dmem.h |  5 +++++
 2 files changed, 20 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 9579bd001f11..8fb4949f3778 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -67,6 +67,7 @@ struct nouveau_dmem_chunk {
 	struct nouveau_bo *bo;
 	struct nouveau_drm *drm;
 	unsigned long callocated;
+	enum nouveau_dmem_type type;
 	struct dev_pagemap pagemap;
 };
 
@@ -81,7 +82,7 @@ struct nouveau_dmem {
 	struct nouveau_dmem_migrate migrate;
 	struct list_head chunks;
 	struct mutex mutex;
-	struct page *free_pages;
+	struct page *free_pages[NOUVEAU_DMEM_NTYPES];
 	spinlock_t lock;
 };
 
@@ -112,8 +113,8 @@ static void nouveau_dmem_page_free(struct page *page)
 	struct nouveau_dmem *dmem = chunk->drm->dmem;
 
 	spin_lock(&dmem->lock);
-	page->zone_device_data = dmem->free_pages;
-	dmem->free_pages = page;
+	page->zone_device_data = dmem->free_pages[chunk->type];
+	dmem->free_pages[chunk->type] = page;
 
 	WARN_ON(!chunk->callocated);
 	chunk->callocated--;
@@ -224,7 +225,8 @@ static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
 };
 
 static int
-nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
+nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage,
+	enum nouveau_dmem_type type)
 {
 	struct nouveau_dmem_chunk *chunk;
 	struct resource *res;
@@ -248,6 +250,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
 	}
 
 	chunk->drm = drm;
+	chunk->type = type;
 	chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
 	chunk->pagemap.range.start = res->start;
 	chunk->pagemap.range.end = res->end;
@@ -279,8 +282,8 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
 	page = pfn_to_page(pfn_first);
 	spin_lock(&drm->dmem->lock);
 	for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
-		page->zone_device_data = drm->dmem->free_pages;
-		drm->dmem->free_pages = page;
+		page->zone_device_data = drm->dmem->free_pages[type];
+		drm->dmem->free_pages[type] = page;
 	}
 	*ppage = page;
 	chunk->callocated++;
@@ -304,22 +307,22 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
 }
 
 static struct page *
-nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
+nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm, enum nouveau_dmem_type type)
 {
 	struct nouveau_dmem_chunk *chunk;
 	struct page *page = NULL;
 	int ret;
 
 	spin_lock(&drm->dmem->lock);
-	if (drm->dmem->free_pages) {
-		page = drm->dmem->free_pages;
-		drm->dmem->free_pages = page->zone_device_data;
+	if (drm->dmem->free_pages[type]) {
+		page = drm->dmem->free_pages[type];
+		drm->dmem->free_pages[type] = page->zone_device_data;
 		chunk = nouveau_page_to_chunk(page);
 		chunk->callocated++;
 		spin_unlock(&drm->dmem->lock);
 	} else {
 		spin_unlock(&drm->dmem->lock);
-		ret = nouveau_dmem_chunk_alloc(drm, &page);
+		ret = nouveau_dmem_chunk_alloc(drm, &page, type);
 		if (ret)
 			return NULL;
 	}
@@ -577,7 +580,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
 	if (!(src & MIGRATE_PFN_MIGRATE))
 		goto out;
 
-	dpage = nouveau_dmem_page_alloc_locked(drm);
+	dpage = nouveau_dmem_page_alloc_locked(drm, NOUVEAU_DMEM);
 	if (!dpage)
 		goto out;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.h b/drivers/gpu/drm/nouveau/nouveau_dmem.h
index 64da5d3635c8..02e261c4acf1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.h
@@ -28,6 +28,11 @@ struct nouveau_drm;
 struct nouveau_svmm;
 struct hmm_range;
 
+enum nouveau_dmem_type {
+	NOUVEAU_DMEM,
+	NOUVEAU_DMEM_NTYPES, /* Number of types, must be last */
+};
+
 #if IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM)
 void nouveau_dmem_init(struct nouveau_drm *);
 void nouveau_dmem_fini(struct nouveau_drm *);
-- 
2.20.1

  parent reply	other threads:[~2021-02-09  1:12 UTC|newest]

Thread overview: 109+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-09  1:07 [PATCH 0/9] Add support for SVM atomics in Nouveau Alistair Popple
2021-02-09  1:07 ` Alistair Popple
2021-02-09  1:07 ` Alistair Popple
2021-02-09  1:07 ` [Nouveau] " Alistair Popple
2021-02-09  1:07 ` [PATCH 1/9] mm/migrate.c: Always allow device private pages to migrate Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` [Nouveau] " Alistair Popple
2021-02-09 13:39   ` Jason Gunthorpe
2021-02-09 13:39     ` Jason Gunthorpe
2021-02-09 13:39     ` Jason Gunthorpe
2021-02-09 13:39     ` [Nouveau] " Jason Gunthorpe
2021-02-10  3:40     ` Alistair Popple
2021-02-10  3:40       ` Alistair Popple
2021-02-10  3:40       ` Alistair Popple
2021-02-10  3:40       ` [Nouveau] " Alistair Popple
2021-02-10 12:56       ` Jason Gunthorpe
2021-02-10 12:56         ` Jason Gunthorpe
2021-02-10 12:56         ` Jason Gunthorpe
2021-02-10 12:56         ` [Nouveau] " Jason Gunthorpe
2021-02-09  1:07 ` [PATCH 2/9] mm/migrate.c: Allow pfn flags to be passed to migrate_vma_setup() Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` [Nouveau] " Alistair Popple
2021-02-09  1:07 ` [PATCH 3/9] mm/migrate: Add a unmap and pin migration mode Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` [Nouveau] " Alistair Popple
2021-02-09  1:07 ` [PATCH 4/9] Documentation: Add unmap and pin to HMM Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` [Nouveau] " Alistair Popple
2021-02-09  1:07 ` [PATCH 5/9] hmm-tests: Add test for unmap and pin Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` [Nouveau] " Alistair Popple
2021-02-09  1:07 ` [PATCH 6/9] nouveau/dmem: Only map migrating pages Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` [Nouveau] " Alistair Popple
2021-02-09  1:07 ` [PATCH 7/9] nouveau/svm: Refactor nouveau_range_fault Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` [Nouveau] " Alistair Popple
2021-02-09  1:07 ` Alistair Popple [this message]
2021-02-09  1:07   ` [PATCH 8/9] nouveau/dmem: Add support for multiple page types Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` [Nouveau] " Alistair Popple
2021-02-09  1:07 ` [PATCH 9/9] nouveau/svm: Implement atomic SVM access Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` Alistair Popple
2021-02-09  1:07   ` [Nouveau] " Alistair Popple
2021-02-09 10:27 ` [PATCH 0/9] Add support for SVM atomics in Nouveau Daniel Vetter
2021-02-09 10:27   ` Daniel Vetter
2021-02-09 10:27   ` Daniel Vetter
2021-02-09 10:27   ` [Nouveau] " Daniel Vetter
2021-02-09 10:27   ` Daniel Vetter
2021-02-09 12:57   ` Alistair Popple
2021-02-09 12:57     ` Alistair Popple
2021-02-09 12:57     ` Alistair Popple
2021-02-09 12:57     ` [Nouveau] " Alistair Popple
2021-02-09 13:35     ` Jason Gunthorpe
2021-02-09 13:35       ` Jason Gunthorpe
2021-02-09 13:35       ` Jason Gunthorpe
2021-02-09 13:35       ` [Nouveau] " Jason Gunthorpe
2021-02-09 13:39       ` Daniel Vetter
2021-02-09 13:39         ` Daniel Vetter
2021-02-09 13:39         ` Daniel Vetter
2021-02-09 13:39         ` [Nouveau] " Daniel Vetter
2021-02-09 13:39         ` Daniel Vetter
2021-02-09 13:44         ` Jason Gunthorpe
2021-02-09 13:44           ` Jason Gunthorpe
2021-02-09 13:44           ` Jason Gunthorpe
2021-02-09 13:44           ` [Nouveau] " Jason Gunthorpe
2021-02-09 21:17       ` Jerome Glisse
2021-02-09 21:17         ` Jerome Glisse
2021-02-09 21:17         ` Jerome Glisse
2021-02-09 21:17         ` [Nouveau] " Jerome Glisse
2021-02-10 17:56         ` Jason Gunthorpe
2021-02-10 17:56           ` Jason Gunthorpe
2021-02-10 17:56           ` Jason Gunthorpe
2021-02-10 17:56           ` [Nouveau] " Jason Gunthorpe
2021-02-09 13:37     ` Daniel Vetter
2021-02-09 13:37       ` Daniel Vetter
2021-02-09 13:37       ` Daniel Vetter
2021-02-09 13:37       ` [Nouveau] " Daniel Vetter
2021-02-09 13:37       ` Daniel Vetter
2021-02-09 20:53       ` John Hubbard
2021-02-09 20:53         ` John Hubbard
2021-02-09 20:53         ` John Hubbard
2021-02-09 20:53         ` [Nouveau] " John Hubbard
2021-02-10 12:59         ` Daniel Vetter
2021-02-10 12:59           ` Daniel Vetter
2021-02-10 12:59           ` Daniel Vetter
2021-02-10 12:59           ` [Nouveau] " Daniel Vetter
2021-02-11  2:26           ` John Hubbard
2021-02-11  2:26             ` John Hubbard
2021-02-11  2:26             ` John Hubbard
2021-02-11  2:26             ` [Nouveau] " John Hubbard
2021-02-10 17:59         ` Jason Gunthorpe
2021-02-10 17:59           ` Jason Gunthorpe
2021-02-10 17:59           ` Jason Gunthorpe
2021-02-10 17:59           ` [Nouveau] " Jason Gunthorpe
2021-02-11  7:55           ` Christoph Hellwig
2021-02-11  7:55             ` [Nouveau] " Christoph Hellwig
2021-02-17 23:00             ` Alistair Popple
2021-02-17 23:00               ` Alistair Popple
2021-02-17 23:00               ` Alistair Popple
2021-02-17 23:00               ` [Nouveau] " Alistair Popple

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210209010722.13839-9-apopple@nvidia.com \
    --to=apopple@nvidia.com \
    --cc=akpm@linux-foundation.org \
    --cc=bskeggs@redhat.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=jglisse@redhat.com \
    --cc=jhubbard@nvidia.com \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=nouveau@lists.freedesktop.org \
    --cc=rcampbell@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.