All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] udmabuf: use cache_sgt_mapping option
@ 2019-12-03  1:36 Gurchetan Singh
  2019-12-03  1:36 ` [PATCH 2/4] udmabuf: add a pointer to the miscdevice in dma-buf private data Gurchetan Singh
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Gurchetan Singh @ 2019-12-03  1:36 UTC (permalink / raw)
  To: dri-devel; +Cc: Gurchetan Singh, kraxel, hch

The GEM prime helpers do it, so should we. It's also possible to make
it optional later.

Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
---
 drivers/dma-buf/udmabuf.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 9de539c1def4..be15eb6b0586 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -94,10 +94,11 @@ static void release_udmabuf(struct dma_buf *buf)
 }
 
 static const struct dma_buf_ops udmabuf_ops = {
-	.map_dma_buf	  = map_udmabuf,
-	.unmap_dma_buf	  = unmap_udmabuf,
-	.release	  = release_udmabuf,
-	.mmap		  = mmap_udmabuf,
+	.cache_sgt_mapping = true,
+	.map_dma_buf	   = map_udmabuf,
+	.unmap_dma_buf	   = unmap_udmabuf,
+	.release	   = release_udmabuf,
+	.mmap		   = mmap_udmabuf,
 };
 
 #define SEALS_WANTED (F_SEAL_SHRINK)
-- 
2.24.0.393.g34dc348eaf-goog

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/4] udmabuf: add a pointer to the miscdevice in dma-buf private data
  2019-12-03  1:36 [PATCH 1/4] udmabuf: use cache_sgt_mapping option Gurchetan Singh
@ 2019-12-03  1:36 ` Gurchetan Singh
  2019-12-03  1:36 ` [PATCH 3/4] udmabuf: separate out creating/destroying scatter-table Gurchetan Singh
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 7+ messages in thread
From: Gurchetan Singh @ 2019-12-03  1:36 UTC (permalink / raw)
  To: dri-devel; +Cc: Gurchetan Singh, kraxel, hch

Will be used later.

v2: rename 'udmabuf_misc' to 'device' (kraxel)

Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
---
 drivers/dma-buf/udmabuf.c | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index be15eb6b0586..f0bf3ba7441e 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -18,6 +18,7 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  */
 struct udmabuf {
 	pgoff_t pagecount;
 	struct page **pages;
+	struct miscdevice *device;
 };
 
 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
@@ -104,8 +105,9 @@ static const struct dma_buf_ops udmabuf_ops = {
 #define SEALS_WANTED (F_SEAL_SHRINK)
 #define SEALS_DENIED (F_SEAL_WRITE)
 
-static long udmabuf_create(const struct udmabuf_create_list *head,
-			   const struct udmabuf_create_item *list)
+static long udmabuf_create(struct miscdevice *device,
+			   struct udmabuf_create_list *head,
+			   struct udmabuf_create_item *list)
 {
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 	struct file *memfd = NULL;
@@ -172,6 +174,7 @@ static long udmabuf_create(const struct udmabuf_create_list *head,
 	exp_info.priv = ubuf;
 	exp_info.flags = O_RDWR;
 
+	ubuf->device = device;
 	buf = dma_buf_export(&exp_info);
 	if (IS_ERR(buf)) {
 		ret = PTR_ERR(buf);
@@ -209,7 +212,7 @@ static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
 	list.offset = create.offset;
 	list.size   = create.size;
 
-	return udmabuf_create(&head, &list);
+	return udmabuf_create(filp->private_data, &head, &list);
 }
 
 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
@@ -228,7 +231,7 @@ static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
 	if (IS_ERR(list))
 		return PTR_ERR(list);
 
-	ret = udmabuf_create(&head, list);
+	ret = udmabuf_create(filp->private_data, &head, list);
 	kfree(list);
 	return ret;
 }
-- 
2.24.0.393.g34dc348eaf-goog

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/4] udmabuf: separate out creating/destroying scatter-table
  2019-12-03  1:36 [PATCH 1/4] udmabuf: use cache_sgt_mapping option Gurchetan Singh
  2019-12-03  1:36 ` [PATCH 2/4] udmabuf: add a pointer to the miscdevice in dma-buf private data Gurchetan Singh
@ 2019-12-03  1:36 ` Gurchetan Singh
  2019-12-03  1:36 ` [PATCH 4/4] udmabuf: implement begin_cpu_access/end_cpu_access hooks Gurchetan Singh
  2019-12-05  7:59 ` [PATCH 1/4] udmabuf: use cache_sgt_mapping option Gerd Hoffmann
  3 siblings, 0 replies; 7+ messages in thread
From: Gurchetan Singh @ 2019-12-03  1:36 UTC (permalink / raw)
  To: dri-devel; +Cc: Gurchetan Singh, kraxel, hch

These are nice functions and can be re-used.

Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
---
 drivers/dma-buf/udmabuf.c | 26 +++++++++++++++++++-------
 1 file changed, 19 insertions(+), 7 deletions(-)

diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index f0bf3ba7441e..0a610e09ae23 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -47,10 +47,10 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
 	return 0;
 }
 
-static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
-				    enum dma_data_direction direction)
+static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
+				     enum dma_data_direction direction)
 {
-	struct udmabuf *ubuf = at->dmabuf->priv;
+	struct udmabuf *ubuf = buf->priv;
 	struct sg_table *sg;
 	int ret;
 
@@ -62,7 +62,7 @@ static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
 					GFP_KERNEL);
 	if (ret < 0)
 		goto err;
-	if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction)) {
+	if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) {
 		ret = -EINVAL;
 		goto err;
 	}
@@ -74,13 +74,25 @@ static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
 	return ERR_PTR(ret);
 }
 
+static void put_sg_table(struct device *dev, struct sg_table *sg,
+			 enum dma_data_direction direction)
+{
+	dma_unmap_sg(dev, sg->sgl, sg->nents, direction);
+	sg_free_table(sg);
+	kfree(sg);
+}
+
+static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
+				    enum dma_data_direction direction)
+{
+	return get_sg_table(at->dev, at->dmabuf, direction);
+}
+
 static void unmap_udmabuf(struct dma_buf_attachment *at,
 			  struct sg_table *sg,
 			  enum dma_data_direction direction)
 {
-	dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction);
-	sg_free_table(sg);
-	kfree(sg);
+	return put_sg_table(at->dev, sg, direction);
 }
 
 static void release_udmabuf(struct dma_buf *buf)
-- 
2.24.0.393.g34dc348eaf-goog

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 4/4] udmabuf: implement begin_cpu_access/end_cpu_access hooks
  2019-12-03  1:36 [PATCH 1/4] udmabuf: use cache_sgt_mapping option Gurchetan Singh
  2019-12-03  1:36 ` [PATCH 2/4] udmabuf: add a pointer to the miscdevice in dma-buf private data Gurchetan Singh
  2019-12-03  1:36 ` [PATCH 3/4] udmabuf: separate out creating/destroying scatter-table Gurchetan Singh
@ 2019-12-03  1:36 ` Gurchetan Singh
  2019-12-09 22:44   ` Chia-I Wu
  2019-12-05  7:59 ` [PATCH 1/4] udmabuf: use cache_sgt_mapping option Gerd Hoffmann
  3 siblings, 1 reply; 7+ messages in thread
From: Gurchetan Singh @ 2019-12-03  1:36 UTC (permalink / raw)
  To: dri-devel; +Cc: Gurchetan Singh, kraxel, hch

With the misc device, we should end up using the result of
get_arch_dma_ops(..) or dma-direct ops.

This can allow us to have WC mappings in the guest after
synchronization.

Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
---
 drivers/dma-buf/udmabuf.c | 39 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 39 insertions(+)

diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 0a610e09ae23..61b0a2cff874 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -18,6 +18,7 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  */
 struct udmabuf {
 	pgoff_t pagecount;
 	struct page **pages;
+	struct sg_table *sg;
 	struct miscdevice *device;
 };
 
@@ -98,20 +99,58 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
 static void release_udmabuf(struct dma_buf *buf)
 {
 	struct udmabuf *ubuf = buf->priv;
+	struct device *dev = ubuf->device->this_device;
 	pgoff_t pg;
 
+	if (ubuf->sg)
+		put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
+
 	for (pg = 0; pg < ubuf->pagecount; pg++)
 		put_page(ubuf->pages[pg]);
 	kfree(ubuf->pages);
 	kfree(ubuf);
 }
 
+static int begin_cpu_udmabuf(struct dma_buf *buf,
+			     enum dma_data_direction direction)
+{
+	struct udmabuf *ubuf = buf->priv;
+	struct device *dev = ubuf->device->this_device;
+
+	if (!ubuf->sg) {
+		ubuf->sg = get_sg_table(dev, buf, direction);
+		if (IS_ERR(ubuf->sg))
+			return PTR_ERR(ubuf->sg);
+	} else {
+		dma_sync_sg_for_device(dev, ubuf->sg->sgl,
+				       ubuf->sg->nents,
+				       direction);
+	}
+
+	return 0;
+}
+
+static int end_cpu_udmabuf(struct dma_buf *buf,
+			   enum dma_data_direction direction)
+{
+	struct udmabuf *ubuf = buf->priv;
+	struct device *dev = ubuf->device->this_device;
+
+	if (!ubuf->sg)
+		return -EINVAL;
+
+	dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+	return 0;
+}
+
 static const struct dma_buf_ops udmabuf_ops = {
 	.cache_sgt_mapping = true,
 	.map_dma_buf	   = map_udmabuf,
 	.unmap_dma_buf	   = unmap_udmabuf,
 	.release	   = release_udmabuf,
 	.mmap		   = mmap_udmabuf,
+	.begin_cpu_access  = begin_cpu_udmabuf,
+	.end_cpu_access    = end_cpu_udmabuf,
 };
 
 #define SEALS_WANTED (F_SEAL_SHRINK)
-- 
2.24.0.393.g34dc348eaf-goog

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/4] udmabuf: use cache_sgt_mapping option
  2019-12-03  1:36 [PATCH 1/4] udmabuf: use cache_sgt_mapping option Gurchetan Singh
                   ` (2 preceding siblings ...)
  2019-12-03  1:36 ` [PATCH 4/4] udmabuf: implement begin_cpu_access/end_cpu_access hooks Gurchetan Singh
@ 2019-12-05  7:59 ` Gerd Hoffmann
  3 siblings, 0 replies; 7+ messages in thread
From: Gerd Hoffmann @ 2019-12-05  7:59 UTC (permalink / raw)
  To: Gurchetan Singh; +Cc: hch, dri-devel

On Mon, Dec 02, 2019 at 05:36:24PM -0800, Gurchetan Singh wrote:
> The GEM prime helpers do it, so should we. It's also possible to make
> it optional later.

All 4 pushed to drm-misc-next.

thanks,
  Gerd

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 4/4] udmabuf: implement begin_cpu_access/end_cpu_access hooks
  2019-12-03  1:36 ` [PATCH 4/4] udmabuf: implement begin_cpu_access/end_cpu_access hooks Gurchetan Singh
@ 2019-12-09 22:44   ` Chia-I Wu
  2019-12-13  1:09     ` Chia-I Wu
  0 siblings, 1 reply; 7+ messages in thread
From: Chia-I Wu @ 2019-12-09 22:44 UTC (permalink / raw)
  To: Gurchetan Singh; +Cc: Gerd Hoffmann, ML dri-devel, hch

On Mon, Dec 2, 2019 at 5:36 PM Gurchetan Singh
<gurchetansingh@chromium.org> wrote:
>
> With the misc device, we should end up using the result of
> get_arch_dma_ops(..) or dma-direct ops.
>
> This can allow us to have WC mappings in the guest after
> synchronization.
>
> Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
> ---
>  drivers/dma-buf/udmabuf.c | 39 +++++++++++++++++++++++++++++++++++++++
>  1 file changed, 39 insertions(+)
>
> diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
> index 0a610e09ae23..61b0a2cff874 100644
> --- a/drivers/dma-buf/udmabuf.c
> +++ b/drivers/dma-buf/udmabuf.c
> @@ -18,6 +18,7 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  */
>  struct udmabuf {
>         pgoff_t pagecount;
>         struct page **pages;
> +       struct sg_table *sg;
>         struct miscdevice *device;
>  };
>
> @@ -98,20 +99,58 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
>  static void release_udmabuf(struct dma_buf *buf)
>  {
>         struct udmabuf *ubuf = buf->priv;
> +       struct device *dev = ubuf->device->this_device;
>         pgoff_t pg;
>
> +       if (ubuf->sg)
> +               put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
> +
>         for (pg = 0; pg < ubuf->pagecount; pg++)
>                 put_page(ubuf->pages[pg]);
>         kfree(ubuf->pages);
>         kfree(ubuf);
>  }
>
> +static int begin_cpu_udmabuf(struct dma_buf *buf,
> +                            enum dma_data_direction direction)
> +{
> +       struct udmabuf *ubuf = buf->priv;
> +       struct device *dev = ubuf->device->this_device;
> +
> +       if (!ubuf->sg) {
> +               ubuf->sg = get_sg_table(dev, buf, direction);
> +               if (IS_ERR(ubuf->sg))
> +                       return PTR_ERR(ubuf->sg);
> +       } else {
> +               dma_sync_sg_for_device(dev, ubuf->sg->sgl,
> +                                      ubuf->sg->nents,
> +                                      direction);
I know this solves the issue (flush the CPU cache before WC access),
but it looks like an abuse?  It is counter-intuitive that the buffer
is synced for device when one wants CPU access.

> +       }
> +
> +       return 0;
> +}
> +
> +static int end_cpu_udmabuf(struct dma_buf *buf,
> +                          enum dma_data_direction direction)
> +{
> +       struct udmabuf *ubuf = buf->priv;
> +       struct device *dev = ubuf->device->this_device;
> +
> +       if (!ubuf->sg)
> +               return -EINVAL;
> +
> +       dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
> +       return 0;
> +}
> +
>  static const struct dma_buf_ops udmabuf_ops = {
>         .cache_sgt_mapping = true,
>         .map_dma_buf       = map_udmabuf,
>         .unmap_dma_buf     = unmap_udmabuf,
>         .release           = release_udmabuf,
>         .mmap              = mmap_udmabuf,
> +       .begin_cpu_access  = begin_cpu_udmabuf,
> +       .end_cpu_access    = end_cpu_udmabuf,
>  };
>
>  #define SEALS_WANTED (F_SEAL_SHRINK)
> --
> 2.24.0.393.g34dc348eaf-goog
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 4/4] udmabuf: implement begin_cpu_access/end_cpu_access hooks
  2019-12-09 22:44   ` Chia-I Wu
@ 2019-12-13  1:09     ` Chia-I Wu
  0 siblings, 0 replies; 7+ messages in thread
From: Chia-I Wu @ 2019-12-13  1:09 UTC (permalink / raw)
  To: Gurchetan Singh; +Cc: Gerd Hoffmann, ML dri-devel, hch

Hi,

On Mon, Dec 9, 2019 at 2:44 PM Chia-I Wu <olvaffe@gmail.com> wrote:
>
> On Mon, Dec 2, 2019 at 5:36 PM Gurchetan Singh
> <gurchetansingh@chromium.org> wrote:
> >
> > With the misc device, we should end up using the result of
> > get_arch_dma_ops(..) or dma-direct ops.
> >
> > This can allow us to have WC mappings in the guest after
> > synchronization.
> >
> > Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
> > ---
> >  drivers/dma-buf/udmabuf.c | 39 +++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 39 insertions(+)
> >
> > diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
> > index 0a610e09ae23..61b0a2cff874 100644
> > --- a/drivers/dma-buf/udmabuf.c
> > +++ b/drivers/dma-buf/udmabuf.c
> > @@ -18,6 +18,7 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  */
> >  struct udmabuf {
> >         pgoff_t pagecount;
> >         struct page **pages;
> > +       struct sg_table *sg;
> >         struct miscdevice *device;
> >  };
> >
> > @@ -98,20 +99,58 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
> >  static void release_udmabuf(struct dma_buf *buf)
> >  {
> >         struct udmabuf *ubuf = buf->priv;
> > +       struct device *dev = ubuf->device->this_device;
> >         pgoff_t pg;
> >
> > +       if (ubuf->sg)
> > +               put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
> > +
> >         for (pg = 0; pg < ubuf->pagecount; pg++)
> >                 put_page(ubuf->pages[pg]);
> >         kfree(ubuf->pages);
> >         kfree(ubuf);
> >  }
> >
> > +static int begin_cpu_udmabuf(struct dma_buf *buf,
> > +                            enum dma_data_direction direction)
> > +{
> > +       struct udmabuf *ubuf = buf->priv;
> > +       struct device *dev = ubuf->device->this_device;
> > +
> > +       if (!ubuf->sg) {
> > +               ubuf->sg = get_sg_table(dev, buf, direction);
> > +               if (IS_ERR(ubuf->sg))
> > +                       return PTR_ERR(ubuf->sg);
> > +       } else {
> > +               dma_sync_sg_for_device(dev, ubuf->sg->sgl,
> > +                                      ubuf->sg->nents,
> > +                                      direction);
> I know this solves the issue (flush the CPU cache before WC access),
> but it looks like an abuse?  It is counter-intuitive that the buffer
> is synced for device when one wants CPU access.
I am skeptical about this change.

(1) Semantically, a dma-buf is in DMA domain.  CPU access from the
importer must be surrounded by {begin,end}_cpu_access.  This gives the
exporter a chance to move the buffer to the CPU domain temporarily.

(2) When the exporter itself has other means to do CPU access, it is
only reasonable for the exporter to move the buffer to the CPU domain
before access, and to the DMA domain after access.  The exporter can
potentially reuse {begin,end}_cpu_access for that purpose.

Because of (1), udmabuf does need to implement the
{begin,end}_cpu_access hooks.  But "begin" should mean
dma_sync_sg_for_cpu and "end" should mean dma_sync_sg_for_device.

Because of (2), if userspace wants to continuing accessing through the
memfd mapping, it should call udmabuf's {begin,end}_cpu_access to
avoid cache issues.




>
> > +       }
> > +
> > +       return 0;
> > +}
> > +
> > +static int end_cpu_udmabuf(struct dma_buf *buf,
> > +                          enum dma_data_direction direction)
> > +{
> > +       struct udmabuf *ubuf = buf->priv;
> > +       struct device *dev = ubuf->device->this_device;
> > +
> > +       if (!ubuf->sg)
> > +               return -EINVAL;
> > +
> > +       dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
> > +       return 0;
> > +}
> > +
> >  static const struct dma_buf_ops udmabuf_ops = {
> >         .cache_sgt_mapping = true,
> >         .map_dma_buf       = map_udmabuf,
> >         .unmap_dma_buf     = unmap_udmabuf,
> >         .release           = release_udmabuf,
> >         .mmap              = mmap_udmabuf,
> > +       .begin_cpu_access  = begin_cpu_udmabuf,
> > +       .end_cpu_access    = end_cpu_udmabuf,
> >  };
> >
> >  #define SEALS_WANTED (F_SEAL_SHRINK)
> > --
> > 2.24.0.393.g34dc348eaf-goog
> >
> > _______________________________________________
> > dri-devel mailing list
> > dri-devel@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/dri-devel
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2019-12-13  1:09 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-03  1:36 [PATCH 1/4] udmabuf: use cache_sgt_mapping option Gurchetan Singh
2019-12-03  1:36 ` [PATCH 2/4] udmabuf: add a pointer to the miscdevice in dma-buf private data Gurchetan Singh
2019-12-03  1:36 ` [PATCH 3/4] udmabuf: separate out creating/destroying scatter-table Gurchetan Singh
2019-12-03  1:36 ` [PATCH 4/4] udmabuf: implement begin_cpu_access/end_cpu_access hooks Gurchetan Singh
2019-12-09 22:44   ` Chia-I Wu
2019-12-13  1:09     ` Chia-I Wu
2019-12-05  7:59 ` [PATCH 1/4] udmabuf: use cache_sgt_mapping option Gerd Hoffmann

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.