All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: "Jérôme Glisse" <jglisse@redhat.com>,
	"Jason Gunthorpe" <jgg@mellanox.com>,
	"Ben Skeggs" <bskeggs@redhat.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>,
	Bharata B Rao <bharata@linux.ibm.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	linux-mm@kvack.org, nouveau@lists.freedesktop.org,
	dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org
Subject: [PATCH 04/10] nouveau: factor out dmem fence completion
Date: Wed, 14 Aug 2019 09:59:22 +0200	[thread overview]
Message-ID: <20190814075928.23766-5-hch@lst.de> (raw)
In-Reply-To: <20190814075928.23766-1-hch@lst.de>

Factor out the end of fencing logic from the two migration routines.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
---
 drivers/gpu/drm/nouveau/nouveau_dmem.c | 33 ++++++++++++--------------
 1 file changed, 15 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index d469bc334438..21052a4aaf69 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -133,6 +133,19 @@ static void nouveau_dmem_page_free(struct page *page)
 	spin_unlock(&chunk->lock);
 }
 
+static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
+{
+	if (fence) {
+		nouveau_fence_wait(*fence, true, false);
+		nouveau_fence_unref(fence);
+	} else {
+		/*
+		 * FIXME wait for channel to be IDLE before calling finalizing
+		 * the hmem object.
+		 */
+	}
+}
+
 static void
 nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
 				  const unsigned long *src_pfns,
@@ -236,15 +249,7 @@ nouveau_dmem_fault_finalize_and_map(struct nouveau_dmem_fault *fault)
 {
 	struct nouveau_drm *drm = fault->drm;
 
-	if (fault->fence) {
-		nouveau_fence_wait(fault->fence, true, false);
-		nouveau_fence_unref(&fault->fence);
-	} else {
-		/*
-		 * FIXME wait for channel to be IDLE before calling finalizing
-		 * the hmem object below (nouveau_migrate_hmem_fini()).
-		 */
-	}
+	nouveau_dmem_fence_done(&fault->fence);
 
 	while (fault->npages--) {
 		dma_unmap_page(drm->dev->dev, fault->dma[fault->npages],
@@ -748,15 +753,7 @@ nouveau_dmem_migrate_finalize_and_map(struct nouveau_migrate *migrate)
 {
 	struct nouveau_drm *drm = migrate->drm;
 
-	if (migrate->fence) {
-		nouveau_fence_wait(migrate->fence, true, false);
-		nouveau_fence_unref(&migrate->fence);
-	} else {
-		/*
-		 * FIXME wait for channel to be IDLE before finalizing
-		 * the hmem object below (nouveau_migrate_hmem_fini()) ?
-		 */
-	}
+	nouveau_dmem_fence_done(&migrate->fence);
 
 	while (migrate->dma_nr--) {
 		dma_unmap_page(drm->dev->dev, migrate->dma[migrate->dma_nr],
-- 
2.20.1


WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
To: "Jérôme Glisse" <jglisse-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Jason Gunthorpe" <jgg-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>,
	"Ben Skeggs" <bskeggs-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
Cc: Ralph Campbell
	<rcampbell-DDmLM1+adcrQT0dZR+AlfA@public.gmane.org>,
	nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,
	Bharata B Rao <bharata-tEXmvtCZX7AybS5Ee8rs3A@public.gmane.org>,
	linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org,
	Andrew Morton
	<akpm-de/tnXTf+JLsfHDXvbKv3WD2FQJk+8+b@public.gmane.org>
Subject: [PATCH 04/10] nouveau: factor out dmem fence completion
Date: Wed, 14 Aug 2019 09:59:22 +0200	[thread overview]
Message-ID: <20190814075928.23766-5-hch@lst.de> (raw)
In-Reply-To: <20190814075928.23766-1-hch-jcswGhMUV9g@public.gmane.org>

Factor out the end of fencing logic from the two migration routines.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
---
 drivers/gpu/drm/nouveau/nouveau_dmem.c | 33 ++++++++++++--------------
 1 file changed, 15 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index d469bc334438..21052a4aaf69 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -133,6 +133,19 @@ static void nouveau_dmem_page_free(struct page *page)
 	spin_unlock(&chunk->lock);
 }
 
+static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
+{
+	if (fence) {
+		nouveau_fence_wait(*fence, true, false);
+		nouveau_fence_unref(fence);
+	} else {
+		/*
+		 * FIXME wait for channel to be IDLE before calling finalizing
+		 * the hmem object.
+		 */
+	}
+}
+
 static void
 nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
 				  const unsigned long *src_pfns,
@@ -236,15 +249,7 @@ nouveau_dmem_fault_finalize_and_map(struct nouveau_dmem_fault *fault)
 {
 	struct nouveau_drm *drm = fault->drm;
 
-	if (fault->fence) {
-		nouveau_fence_wait(fault->fence, true, false);
-		nouveau_fence_unref(&fault->fence);
-	} else {
-		/*
-		 * FIXME wait for channel to be IDLE before calling finalizing
-		 * the hmem object below (nouveau_migrate_hmem_fini()).
-		 */
-	}
+	nouveau_dmem_fence_done(&fault->fence);
 
 	while (fault->npages--) {
 		dma_unmap_page(drm->dev->dev, fault->dma[fault->npages],
@@ -748,15 +753,7 @@ nouveau_dmem_migrate_finalize_and_map(struct nouveau_migrate *migrate)
 {
 	struct nouveau_drm *drm = migrate->drm;
 
-	if (migrate->fence) {
-		nouveau_fence_wait(migrate->fence, true, false);
-		nouveau_fence_unref(&migrate->fence);
-	} else {
-		/*
-		 * FIXME wait for channel to be IDLE before finalizing
-		 * the hmem object below (nouveau_migrate_hmem_fini()) ?
-		 */
-	}
+	nouveau_dmem_fence_done(&migrate->fence);
 
 	while (migrate->dma_nr--) {
 		dma_unmap_page(drm->dev->dev, migrate->dma[migrate->dma_nr],
-- 
2.20.1

_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

  parent reply	other threads:[~2019-08-14  7:59 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-14  7:59 turn hmm migrate_vma upside down v3 Christoph Hellwig
2019-08-14  7:59 ` [PATCH 01/10] mm: turn migrate_vma upside down Christoph Hellwig
2019-08-14  7:59   ` Christoph Hellwig
     [not found]   ` <20190814075928.23766-2-hch-jcswGhMUV9g@public.gmane.org>
2019-08-14 13:47     ` William Lewis
2019-08-16 17:11   ` Jason Gunthorpe
2019-08-16 17:11     ` Jason Gunthorpe
2019-08-17 11:31     ` Christoph Hellwig
2019-08-17 12:50       ` Jason Gunthorpe
2019-08-17 12:50         ` Jason Gunthorpe
2019-08-14  7:59 ` [PATCH 02/10] nouveau: reset dma_nr in nouveau_dmem_migrate_alloc_and_copy Christoph Hellwig
2019-08-14  7:59 ` [PATCH 03/10] nouveau: factor out device memory address calculation Christoph Hellwig
2019-08-14  7:59 ` Christoph Hellwig [this message]
2019-08-14  7:59   ` [PATCH 04/10] nouveau: factor out dmem fence completion Christoph Hellwig
2019-08-14  7:59 ` [PATCH 05/10] nouveau: remove a few function stubs Christoph Hellwig
2019-08-14  7:59   ` Christoph Hellwig
2019-08-14  7:59 ` [PATCH 06/10] nouveau: simplify nouveau_dmem_migrate_to_ram Christoph Hellwig
2019-08-14  7:59   ` Christoph Hellwig
2019-08-14  7:59 ` [PATCH 07/10] nouveau: simplify nouveau_dmem_migrate_vma Christoph Hellwig
2019-08-14  7:59   ` Christoph Hellwig
2019-08-14  7:59 ` [PATCH 08/10] mm: remove the unused MIGRATE_PFN_ERROR flag Christoph Hellwig
2019-08-14  7:59 ` [PATCH 09/10] mm: remove the unused MIGRATE_PFN_DEVICE flag Christoph Hellwig
2019-08-14  7:59   ` Christoph Hellwig
2019-08-16 15:23   ` Jason Gunthorpe
2019-08-16 15:23     ` Jason Gunthorpe
2019-08-14  7:59 ` [PATCH 10/10] mm: remove CONFIG_MIGRATE_VMA_HELPER Christoph Hellwig
2019-08-14  7:59   ` Christoph Hellwig
2019-08-16 15:21   ` Jason Gunthorpe
2019-08-16 15:21     ` Jason Gunthorpe
2019-08-15  0:09 ` turn hmm migrate_vma upside down v3 Ralph Campbell
2019-08-15  0:09   ` Ralph Campbell
2019-08-15 13:28   ` Christoph Hellwig
2019-08-16  6:51 ` Christoph Hellwig
2019-08-16 11:45   ` Jason Gunthorpe
2019-08-16 17:23   ` Jason Gunthorpe
2019-08-16 17:12 ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190814075928.23766-5-hch@lst.de \
    --to=hch@lst.de \
    --cc=akpm@linux-foundation.org \
    --cc=bharata@linux.ibm.com \
    --cc=bskeggs@redhat.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=jgg@mellanox.com \
    --cc=jglisse@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=nouveau@lists.freedesktop.org \
    --cc=rcampbell@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.