All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jason Gunthorpe <jgg@nvidia.com>
To: Christoph Hellwig <hch@infradead.org>, Maor Gottlieb <maorg@nvidia.com>
Cc: Leon Romanovsky <leon@kernel.org>,
	Doug Ledford <dledford@redhat.com>,
	Daniel Vetter <daniel@ffwll.ch>, David Airlie <airlied@linux.ie>,
	Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>,
	dri-devel@lists.freedesktop.org, intel-gfx@lists.freedesktop.org,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	linux-kernel@vger.kernel.org, linux-rdma@vger.kernel.org,
	Maarten Lankhorst <maarten.lankhorst@linux.intel.com>,
	Maxime Ripard <mripard@kernel.org>,
	Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Roland Scheidegger <sroland@vmware.com>,
	Thomas Zimmermann <tzimmermann@suse.de>,
	VMware Graphics <linux-graphics-maintainer@vmware.com>,
	Yishai Hadas <yishaih@nvidia.com>, Zack Rusin <zackr@vmware.com>,
	Zhu Yanjun <zyjzyj2000@gmail.com>
Subject: Re: [PATCH rdma-next v2 1/2] lib/scatterlist: Fix wrong update of orig_nents
Date: Thu, 22 Jul 2021 10:39:00 -0300	[thread overview]
Message-ID: <20210722133900.GJ1117491@nvidia.com> (raw)
In-Reply-To: <YPltp39n9URglTXT@infradead.org>

On Thu, Jul 22, 2021 at 02:07:51PM +0100, Christoph Hellwig wrote:
> On Thu, Jul 22, 2021 at 10:00:40AM -0300, Jason Gunthorpe wrote:
> > this is better:
> > 
> >    struct sg_append_table state;
> > 
> >    sg_append_init(&state, sgt, gfp_mask);
> > 
> >    while (..)
> >      ret = sg_append_pages(&state, pages, n_pages, ..)
> >      if (ret)
> > 	 sg_append_abort(&state); // Frees the sgt and puts it to NULL
> >    sg_append_complete(&state)
> > 
> > Which allows sg_alloc_table_from_pages() to be written as
> > 
> >    struct sg_append_table state;
> >    sg_append_init(&state, sgt, gfp_mask);
> >    ret = sg_append_pages(&state,pages, n_pages, offset, size, UINT_MAX)
> >    if (ret) {
> >       sg_append_abort(&state);
> >       return ret;
> >    }
> >    sg_append_complete(&state);
> >    return 0;
> > 
> > And then the API can manage all of this in some sane and
> > understandable way.
> 
> That would be a lot easier to use for sure.  Not sure how invasive the
> changes would be, though.

Looks pretty good, Maor can you try it?

diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1739d10a2c556e..6c8e761ab389e8 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -806,27 +806,27 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
 struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
 				       struct page **pages, unsigned int nr_pages)
 {
-	struct sg_table *sg;
-	struct scatterlist *sge;
+	struct sg_table *sgt;
 	size_t max_segment = 0;
+	int ret;
 
-	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-	if (!sg)
+	sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!sgt)
 		return ERR_PTR(-ENOMEM);
 
 	if (dev)
 		max_segment = dma_max_mapping_size(dev->dev);
 	if (max_segment == 0)
 		max_segment = UINT_MAX;
-	sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
-					  nr_pages << PAGE_SHIFT,
-					  max_segment,
-					  NULL, 0, GFP_KERNEL, NULL);
-	if (IS_ERR(sge)) {
-		kfree(sg);
-		sg = ERR_CAST(sge);
+
+	ret = sg_alloc_table_from_pages_segment(sgt, pages, nr_pages, 0,
+						nr_pages << PAGE_SHIFT,
+						max_segment, GFP_KERNEL);
+	if (ret) {
+		kfree(sgt);
+		return ERR_PTR(ret);
 	}
-	return sg;
+	return sgt;
 }
 EXPORT_SYMBOL(drm_prime_pages_to_sg);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index c341d3e3386ccb..a2c5e1b30f425f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -131,6 +131,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
 	unsigned int max_segment = i915_sg_segment_size();
+	struct sg_append_table state;
 	struct sg_table *st;
 	unsigned int sg_page_sizes;
 	struct scatterlist *sg;
@@ -153,13 +154,11 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 	spin_unlock(&i915->mm.notifier_lock);
 
 alloc_table:
-	sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
-					 num_pages << PAGE_SHIFT, max_segment,
-					 NULL, 0, GFP_KERNEL, NULL);
-	if (IS_ERR(sg)) {
-		ret = PTR_ERR(sg);
+	ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
+						num_pages << PAGE_SHIFT,
+						max_segment, GFP_KERNEL);
+	if (ret)
 		goto err;
-	}
 
 	ret = i915_gem_gtt_prepare_pages(obj, st);
 	if (ret) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 2ad889272b0a15..56214bcc6c5280 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -386,15 +386,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
 		if (unlikely(ret != 0))
 			return ret;
 
-		sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
-				vsgt->num_pages, 0,
-				(unsigned long) vsgt->num_pages << PAGE_SHIFT,
-				dma_get_max_seg_size(dev_priv->drm.dev),
-				NULL, 0, GFP_KERNEL, NULL);
-		if (IS_ERR(sg)) {
-			ret = PTR_ERR(sg);
+		ret = sg_alloc_table_from_pages_segment(
+			vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
+			(unsigned long)vsgt->num_pages << PAGE_SHIFT,
+			dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
+		if (ret)
 			goto out_sg_alloc_fail;
-		}
 
 		if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
 			uint64_t over_alloc =
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 5cc41ae962ec1d..53de1ec77326be 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -556,6 +556,25 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
 }
 EXPORT_SYMBOL(__sg_alloc_table_from_pages);
 
+int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
+				      unsigned int n_pages, unsigned int offset,
+				      unsigned long size,
+				      unsigned int max_segment, gfp_t gfp_mask)
+{
+	struct sg_append_table state;
+	int ret;
+
+	sg_append_init(&state, sgt, max_segment, gfp_mask);
+	ret = sg_append_pages(&state, pages, n_pages, offset, size);
+	if (ret) {
+		sg_append_abort(&state);
+		return ret;
+	}
+	sg_append_complete(&state);
+	return 0;
+}
+EXPORT_SYMBOL(sg_alloc_table_from_pages_segment);
+
 /**
  * sg_alloc_table_from_pages - Allocate and initialize an sg table from
  *			       an array of pages
@@ -580,8 +599,8 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
 			      unsigned int n_pages, unsigned int offset,
 			      unsigned long size, gfp_t gfp_mask)
 {
-	return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages,
-			offset, size, UINT_MAX, NULL, 0, gfp_mask, NULL));
+	return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset,
+						 size, UINT_MAX, gfp_mask);
 }
 EXPORT_SYMBOL(sg_alloc_table_from_pages);
 

WARNING: multiple messages have this Message-ID (diff)
From: Jason Gunthorpe <jgg@nvidia.com>
To: Christoph Hellwig <hch@infradead.org>, Maor Gottlieb <maorg@nvidia.com>
Cc: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>,
	Thomas Zimmermann <tzimmermann@suse.de>,
	Leon Romanovsky <leon@kernel.org>,
	Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>,
	David Airlie <airlied@linux.ie>,
	Roland Scheidegger <sroland@vmware.com>,
	intel-gfx@lists.freedesktop.org, linux-kernel@vger.kernel.org,
	dri-devel@lists.freedesktop.org, linux-rdma@vger.kernel.org,
	Doug Ledford <dledford@redhat.com>,
	Yishai Hadas <yishaih@nvidia.com>,
	VMware Graphics <linux-graphics-maintainer@vmware.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Zhu Yanjun <zyjzyj2000@gmail.com>
Subject: Re: [PATCH rdma-next v2 1/2] lib/scatterlist: Fix wrong update of orig_nents
Date: Thu, 22 Jul 2021 10:39:00 -0300	[thread overview]
Message-ID: <20210722133900.GJ1117491@nvidia.com> (raw)
In-Reply-To: <YPltp39n9URglTXT@infradead.org>

On Thu, Jul 22, 2021 at 02:07:51PM +0100, Christoph Hellwig wrote:
> On Thu, Jul 22, 2021 at 10:00:40AM -0300, Jason Gunthorpe wrote:
> > this is better:
> > 
> >    struct sg_append_table state;
> > 
> >    sg_append_init(&state, sgt, gfp_mask);
> > 
> >    while (..)
> >      ret = sg_append_pages(&state, pages, n_pages, ..)
> >      if (ret)
> > 	 sg_append_abort(&state); // Frees the sgt and puts it to NULL
> >    sg_append_complete(&state)
> > 
> > Which allows sg_alloc_table_from_pages() to be written as
> > 
> >    struct sg_append_table state;
> >    sg_append_init(&state, sgt, gfp_mask);
> >    ret = sg_append_pages(&state,pages, n_pages, offset, size, UINT_MAX)
> >    if (ret) {
> >       sg_append_abort(&state);
> >       return ret;
> >    }
> >    sg_append_complete(&state);
> >    return 0;
> > 
> > And then the API can manage all of this in some sane and
> > understandable way.
> 
> That would be a lot easier to use for sure.  Not sure how invasive the
> changes would be, though.

Looks pretty good, Maor can you try it?

diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1739d10a2c556e..6c8e761ab389e8 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -806,27 +806,27 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
 struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
 				       struct page **pages, unsigned int nr_pages)
 {
-	struct sg_table *sg;
-	struct scatterlist *sge;
+	struct sg_table *sgt;
 	size_t max_segment = 0;
+	int ret;
 
-	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-	if (!sg)
+	sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!sgt)
 		return ERR_PTR(-ENOMEM);
 
 	if (dev)
 		max_segment = dma_max_mapping_size(dev->dev);
 	if (max_segment == 0)
 		max_segment = UINT_MAX;
-	sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
-					  nr_pages << PAGE_SHIFT,
-					  max_segment,
-					  NULL, 0, GFP_KERNEL, NULL);
-	if (IS_ERR(sge)) {
-		kfree(sg);
-		sg = ERR_CAST(sge);
+
+	ret = sg_alloc_table_from_pages_segment(sgt, pages, nr_pages, 0,
+						nr_pages << PAGE_SHIFT,
+						max_segment, GFP_KERNEL);
+	if (ret) {
+		kfree(sgt);
+		return ERR_PTR(ret);
 	}
-	return sg;
+	return sgt;
 }
 EXPORT_SYMBOL(drm_prime_pages_to_sg);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index c341d3e3386ccb..a2c5e1b30f425f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -131,6 +131,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
 	unsigned int max_segment = i915_sg_segment_size();
+	struct sg_append_table state;
 	struct sg_table *st;
 	unsigned int sg_page_sizes;
 	struct scatterlist *sg;
@@ -153,13 +154,11 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 	spin_unlock(&i915->mm.notifier_lock);
 
 alloc_table:
-	sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
-					 num_pages << PAGE_SHIFT, max_segment,
-					 NULL, 0, GFP_KERNEL, NULL);
-	if (IS_ERR(sg)) {
-		ret = PTR_ERR(sg);
+	ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
+						num_pages << PAGE_SHIFT,
+						max_segment, GFP_KERNEL);
+	if (ret)
 		goto err;
-	}
 
 	ret = i915_gem_gtt_prepare_pages(obj, st);
 	if (ret) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 2ad889272b0a15..56214bcc6c5280 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -386,15 +386,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
 		if (unlikely(ret != 0))
 			return ret;
 
-		sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
-				vsgt->num_pages, 0,
-				(unsigned long) vsgt->num_pages << PAGE_SHIFT,
-				dma_get_max_seg_size(dev_priv->drm.dev),
-				NULL, 0, GFP_KERNEL, NULL);
-		if (IS_ERR(sg)) {
-			ret = PTR_ERR(sg);
+		ret = sg_alloc_table_from_pages_segment(
+			vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
+			(unsigned long)vsgt->num_pages << PAGE_SHIFT,
+			dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
+		if (ret)
 			goto out_sg_alloc_fail;
-		}
 
 		if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
 			uint64_t over_alloc =
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 5cc41ae962ec1d..53de1ec77326be 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -556,6 +556,25 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
 }
 EXPORT_SYMBOL(__sg_alloc_table_from_pages);
 
+int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
+				      unsigned int n_pages, unsigned int offset,
+				      unsigned long size,
+				      unsigned int max_segment, gfp_t gfp_mask)
+{
+	struct sg_append_table state;
+	int ret;
+
+	sg_append_init(&state, sgt, max_segment, gfp_mask);
+	ret = sg_append_pages(&state, pages, n_pages, offset, size);
+	if (ret) {
+		sg_append_abort(&state);
+		return ret;
+	}
+	sg_append_complete(&state);
+	return 0;
+}
+EXPORT_SYMBOL(sg_alloc_table_from_pages_segment);
+
 /**
  * sg_alloc_table_from_pages - Allocate and initialize an sg table from
  *			       an array of pages
@@ -580,8 +599,8 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
 			      unsigned int n_pages, unsigned int offset,
 			      unsigned long size, gfp_t gfp_mask)
 {
-	return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages,
-			offset, size, UINT_MAX, NULL, 0, gfp_mask, NULL));
+	return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset,
+						 size, UINT_MAX, gfp_mask);
 }
 EXPORT_SYMBOL(sg_alloc_table_from_pages);
 

WARNING: multiple messages have this Message-ID (diff)
From: Jason Gunthorpe <jgg@nvidia.com>
To: Christoph Hellwig <hch@infradead.org>, Maor Gottlieb <maorg@nvidia.com>
Cc: Maxime Ripard <mripard@kernel.org>,
	Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>,
	Thomas Zimmermann <tzimmermann@suse.de>,
	Leon Romanovsky <leon@kernel.org>,
	Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>,
	David Airlie <airlied@linux.ie>,
	Roland Scheidegger <sroland@vmware.com>,
	intel-gfx@lists.freedesktop.org, linux-kernel@vger.kernel.org,
	dri-devel@lists.freedesktop.org, linux-rdma@vger.kernel.org,
	Doug Ledford <dledford@redhat.com>,
	Yishai Hadas <yishaih@nvidia.com>,
	VMware Graphics <linux-graphics-maintainer@vmware.com>,
	Zhu Yanjun <zyjzyj2000@gmail.com>, Zack Rusin <zackr@vmware.com>
Subject: Re: [Intel-gfx] [PATCH rdma-next v2 1/2] lib/scatterlist: Fix wrong update of orig_nents
Date: Thu, 22 Jul 2021 10:39:00 -0300	[thread overview]
Message-ID: <20210722133900.GJ1117491@nvidia.com> (raw)
In-Reply-To: <YPltp39n9URglTXT@infradead.org>

On Thu, Jul 22, 2021 at 02:07:51PM +0100, Christoph Hellwig wrote:
> On Thu, Jul 22, 2021 at 10:00:40AM -0300, Jason Gunthorpe wrote:
> > this is better:
> > 
> >    struct sg_append_table state;
> > 
> >    sg_append_init(&state, sgt, gfp_mask);
> > 
> >    while (..)
> >      ret = sg_append_pages(&state, pages, n_pages, ..)
> >      if (ret)
> > 	 sg_append_abort(&state); // Frees the sgt and puts it to NULL
> >    sg_append_complete(&state)
> > 
> > Which allows sg_alloc_table_from_pages() to be written as
> > 
> >    struct sg_append_table state;
> >    sg_append_init(&state, sgt, gfp_mask);
> >    ret = sg_append_pages(&state,pages, n_pages, offset, size, UINT_MAX)
> >    if (ret) {
> >       sg_append_abort(&state);
> >       return ret;
> >    }
> >    sg_append_complete(&state);
> >    return 0;
> > 
> > And then the API can manage all of this in some sane and
> > understandable way.
> 
> That would be a lot easier to use for sure.  Not sure how invasive the
> changes would be, though.

Looks pretty good, Maor can you try it?

diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1739d10a2c556e..6c8e761ab389e8 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -806,27 +806,27 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
 struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
 				       struct page **pages, unsigned int nr_pages)
 {
-	struct sg_table *sg;
-	struct scatterlist *sge;
+	struct sg_table *sgt;
 	size_t max_segment = 0;
+	int ret;
 
-	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-	if (!sg)
+	sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!sgt)
 		return ERR_PTR(-ENOMEM);
 
 	if (dev)
 		max_segment = dma_max_mapping_size(dev->dev);
 	if (max_segment == 0)
 		max_segment = UINT_MAX;
-	sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
-					  nr_pages << PAGE_SHIFT,
-					  max_segment,
-					  NULL, 0, GFP_KERNEL, NULL);
-	if (IS_ERR(sge)) {
-		kfree(sg);
-		sg = ERR_CAST(sge);
+
+	ret = sg_alloc_table_from_pages_segment(sgt, pages, nr_pages, 0,
+						nr_pages << PAGE_SHIFT,
+						max_segment, GFP_KERNEL);
+	if (ret) {
+		kfree(sgt);
+		return ERR_PTR(ret);
 	}
-	return sg;
+	return sgt;
 }
 EXPORT_SYMBOL(drm_prime_pages_to_sg);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index c341d3e3386ccb..a2c5e1b30f425f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -131,6 +131,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
 	unsigned int max_segment = i915_sg_segment_size();
+	struct sg_append_table state;
 	struct sg_table *st;
 	unsigned int sg_page_sizes;
 	struct scatterlist *sg;
@@ -153,13 +154,11 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 	spin_unlock(&i915->mm.notifier_lock);
 
 alloc_table:
-	sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
-					 num_pages << PAGE_SHIFT, max_segment,
-					 NULL, 0, GFP_KERNEL, NULL);
-	if (IS_ERR(sg)) {
-		ret = PTR_ERR(sg);
+	ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
+						num_pages << PAGE_SHIFT,
+						max_segment, GFP_KERNEL);
+	if (ret)
 		goto err;
-	}
 
 	ret = i915_gem_gtt_prepare_pages(obj, st);
 	if (ret) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 2ad889272b0a15..56214bcc6c5280 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -386,15 +386,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
 		if (unlikely(ret != 0))
 			return ret;
 
-		sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
-				vsgt->num_pages, 0,
-				(unsigned long) vsgt->num_pages << PAGE_SHIFT,
-				dma_get_max_seg_size(dev_priv->drm.dev),
-				NULL, 0, GFP_KERNEL, NULL);
-		if (IS_ERR(sg)) {
-			ret = PTR_ERR(sg);
+		ret = sg_alloc_table_from_pages_segment(
+			vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
+			(unsigned long)vsgt->num_pages << PAGE_SHIFT,
+			dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
+		if (ret)
 			goto out_sg_alloc_fail;
-		}
 
 		if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
 			uint64_t over_alloc =
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 5cc41ae962ec1d..53de1ec77326be 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -556,6 +556,25 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
 }
 EXPORT_SYMBOL(__sg_alloc_table_from_pages);
 
+int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
+				      unsigned int n_pages, unsigned int offset,
+				      unsigned long size,
+				      unsigned int max_segment, gfp_t gfp_mask)
+{
+	struct sg_append_table state;
+	int ret;
+
+	sg_append_init(&state, sgt, max_segment, gfp_mask);
+	ret = sg_append_pages(&state, pages, n_pages, offset, size);
+	if (ret) {
+		sg_append_abort(&state);
+		return ret;
+	}
+	sg_append_complete(&state);
+	return 0;
+}
+EXPORT_SYMBOL(sg_alloc_table_from_pages_segment);
+
 /**
  * sg_alloc_table_from_pages - Allocate and initialize an sg table from
  *			       an array of pages
@@ -580,8 +599,8 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
 			      unsigned int n_pages, unsigned int offset,
 			      unsigned long size, gfp_t gfp_mask)
 {
-	return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages,
-			offset, size, UINT_MAX, NULL, 0, gfp_mask, NULL));
+	return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset,
+						 size, UINT_MAX, gfp_mask);
 }
 EXPORT_SYMBOL(sg_alloc_table_from_pages);
 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2021-07-22 13:39 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-18 11:09 [PATCH rdma-next v2 0/2] SG fix together with update to RDMA umem Leon Romanovsky
2021-07-18 11:09 ` [Intel-gfx] " Leon Romanovsky
2021-07-18 11:09 ` Leon Romanovsky
2021-07-18 11:09 ` [PATCH rdma-next v2 1/2] lib/scatterlist: Fix wrong update of orig_nents Leon Romanovsky
2021-07-18 11:09   ` [Intel-gfx] " Leon Romanovsky
2021-07-18 11:09   ` Leon Romanovsky
2021-07-21 16:13   ` Christoph Hellwig
2021-07-21 16:13     ` [Intel-gfx] " Christoph Hellwig
2021-07-22 13:00   ` Jason Gunthorpe
2021-07-22 13:00     ` [Intel-gfx] " Jason Gunthorpe
2021-07-22 13:00     ` Jason Gunthorpe
2021-07-22 13:07     ` Christoph Hellwig
2021-07-22 13:07       ` [Intel-gfx] " Christoph Hellwig
2021-07-22 13:39       ` Jason Gunthorpe [this message]
2021-07-22 13:39         ` Jason Gunthorpe
2021-07-22 13:39         ` Jason Gunthorpe
2021-07-18 11:09 ` [PATCH rdma-next v2 2/2] RDMA: Use dma_map_sgtable for map umem pages Leon Romanovsky
2021-07-18 11:09   ` [Intel-gfx] " Leon Romanovsky
2021-07-18 11:09   ` Leon Romanovsky
2021-07-21 16:16   ` Christoph Hellwig
2021-07-21 16:16     ` [Intel-gfx] " Christoph Hellwig
2021-07-18 11:14 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for SG fix together with update to RDMA umem Patchwork
2021-07-22 14:31 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for SG fix together with update to RDMA umem (rev2) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210722133900.GJ1117491@nvidia.com \
    --to=jgg@nvidia.com \
    --cc=airlied@linux.ie \
    --cc=daniel@ffwll.ch \
    --cc=dennis.dalessandro@cornelisnetworks.com \
    --cc=dledford@redhat.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=hch@infradead.org \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=jani.nikula@linux.intel.com \
    --cc=joonas.lahtinen@linux.intel.com \
    --cc=leon@kernel.org \
    --cc=linux-graphics-maintainer@vmware.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=maarten.lankhorst@linux.intel.com \
    --cc=maorg@nvidia.com \
    --cc=mike.marciniszyn@cornelisnetworks.com \
    --cc=mripard@kernel.org \
    --cc=rodrigo.vivi@intel.com \
    --cc=sroland@vmware.com \
    --cc=tzimmermann@suse.de \
    --cc=yishaih@nvidia.com \
    --cc=zackr@vmware.com \
    --cc=zyjzyj2000@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.