* [PATCH] drm/vgem: Pin our pages for dmabuf exports
@ 2017-06-22 13:30 Chris Wilson
2017-06-22 13:34 ` Chris Wilson
` (3 more replies)
0 siblings, 4 replies; 9+ messages in thread
From: Chris Wilson @ 2017-06-22 13:30 UTC (permalink / raw)
To: dri-devel
Cc: intel-gfx, Chris Wilson, Tomi Sarvela, Laura Abbott, Sean Paul,
Matthew Auld, Daniel Vetter, stable
When the caller maps their dmabuf and we return an sg_table, the caller
doesn't expect the pages beneath that sg_table to vanish on a whim (i.e.
under mempressure). The contract is that the pages are pinned for the
duration of the mapping (from dma_buf_map_attachment() to
dma_buf_unmap_attachment). To comply, we need to introduce our own
vgem_object.pages_pin_count and elevate it across the mapping. However,
the drm_prime interface we use calls drv->prime_pin on dma_buf_attach
and drv->prime_unpin on dma_buf_detach, which while that does cover the
mapping is much broader than is desired -- but it will do for now.
Reported-by: Tomi Sarvela <tomi.p.sarvela@intel.com>
Testcase: igt/gem_concurrent_blit/*swap*vgem*
Fixes: 5ba6c9ff961a ("drm/vgem: Fix mmaping")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tomi Sarvela <tomi.p.sarvela@intel.com>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: <stable@vger.kernel.org>
---
drivers/gpu/drm/vgem/vgem_drv.c | 77 +++++++++++++++++++++++++++++++----------
drivers/gpu/drm/vgem/vgem_drv.h | 4 +++
2 files changed, 63 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 18f401b442c2..2a4aa2284248 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -52,6 +52,7 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
kvfree(vgem_obj->pages);
+ mutex_destroy(&vgem_obj->pages_lock);
if (obj->import_attach)
drm_prime_gem_destroy(obj, vgem_obj->table);
@@ -76,11 +77,15 @@ static int vgem_gem_fault(struct vm_fault *vmf)
if (page_offset > num_pages)
return VM_FAULT_SIGBUS;
+ ret = -ENOENT;
+ mutex_lock(&obj->pages_lock);
if (obj->pages) {
get_page(obj->pages[page_offset]);
vmf->page = obj->pages[page_offset];
ret = 0;
- } else {
+ }
+ mutex_unlock(&obj->pages_lock);
+ if (ret) {
struct page *page;
page = shmem_read_mapping_page(
@@ -108,6 +113,7 @@ static int vgem_gem_fault(struct vm_fault *vmf)
}
}
+
return ret;
}
@@ -161,6 +167,7 @@ static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
return ERR_PTR(ret);
}
+ mutex_init(&obj->pages_lock);
return obj;
}
@@ -274,37 +281,66 @@ static const struct file_operations vgem_driver_fops = {
.release = drm_release,
};
+static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
+{
+ mutex_lock(&bo->pages_lock);
+ if (bo->pages_pin_count++ == 0) {
+ struct page **pages;
+
+ pages = drm_gem_get_pages(&bo->base);
+ if (IS_ERR(pages)) {
+ bo->pages_pin_count--;
+ mutex_unlock(&bo->pages_lock);
+ return pages;
+ }
+
+ bo->pages = pages;
+ }
+ mutex_unlock(&bo->pages_lock);
+
+ return bo->pages;
+}
+
+static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
+{
+ mutex_lock(&bo->pages_lock);
+ if (--bo->pages_pin_count == 0) {
+ drm_gem_put_pages(&bo->base, bo->pages, true, true);
+ bo->pages = NULL;
+ }
+ mutex_unlock(&bo->pages_lock);
+}
+
static int vgem_prime_pin(struct drm_gem_object *obj)
{
- long n_pages = obj->size >> PAGE_SHIFT;
+ struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
+ long n_pages = bo->base.size >> PAGE_SHIFT;
struct page **pages;
- /* Flush the object from the CPU cache so that importers can rely
- * on coherent indirect access via the exported dma-address.
- */
- pages = drm_gem_get_pages(obj);
+ pages = vgem_pin_pages(bo);
if (IS_ERR(pages))
return PTR_ERR(pages);
+ /* Flush the object from the CPU cache so that importers can rely
+ * on coherent indirect access via the exported dma-address.
+ */
drm_clflush_pages(pages, n_pages);
- drm_gem_put_pages(obj, pages, true, false);
return 0;
}
-static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
+static void vgem_prime_unpin(struct drm_gem_object *obj)
{
- struct sg_table *st;
- struct page **pages;
+ struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- pages = drm_gem_get_pages(obj);
- if (IS_ERR(pages))
- return ERR_CAST(pages);
+ vgem_unpin_pages(bo);
+}
- st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
- drm_gem_put_pages(obj, pages, false, false);
+static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- return st;
+ return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
}
static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
@@ -333,6 +369,8 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
__vgem_gem_destroy(obj);
return ERR_PTR(-ENOMEM);
}
+
+ obj->pages_pin_count++; /* perma-pinned */
drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
npages);
return &obj->base;
@@ -340,16 +378,18 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
static void *vgem_prime_vmap(struct drm_gem_object *obj)
{
+ struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
long n_pages = obj->size >> PAGE_SHIFT;
struct page **pages;
void *addr;
- pages = drm_gem_get_pages(obj);
+ pages = vgem_pin_pages(bo);
if (IS_ERR(pages))
return NULL;
addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
- drm_gem_put_pages(obj, pages, false, false);
+
+ vgem_unpin_pages(bo);
return addr;
}
@@ -409,6 +449,7 @@ static struct drm_driver vgem_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_pin = vgem_prime_pin,
+ .gem_prime_unpin = vgem_prime_unpin,
.gem_prime_import = vgem_prime_import,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import_sg_table = vgem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 1aae01419112..5c8f6d619ff3 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -43,7 +43,11 @@ struct vgem_file {
#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
struct drm_vgem_gem_object {
struct drm_gem_object base;
+
struct page **pages;
+ unsigned int pages_pin_count;
+ struct mutex pages_lock;
+
struct sg_table *table;
};
--
2.11.0
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH] drm/vgem: Pin our pages for dmabuf exports
2017-06-22 13:30 [PATCH] drm/vgem: Pin our pages for dmabuf exports Chris Wilson
@ 2017-06-22 13:34 ` Chris Wilson
2017-06-22 13:46 ` [PATCH v2] " Chris Wilson
` (2 subsequent siblings)
3 siblings, 0 replies; 9+ messages in thread
From: Chris Wilson @ 2017-06-22 13:34 UTC (permalink / raw)
To: dri-devel
Cc: intel-gfx, Tomi Sarvela, Laura Abbott, Sean Paul, Matthew Auld,
Daniel Vetter, stable
Quoting Chris Wilson (2017-06-22 14:30:08)
> static void *vgem_prime_vmap(struct drm_gem_object *obj)
> {
> + struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> long n_pages = obj->size >> PAGE_SHIFT;
> struct page **pages;
> void *addr;
>
> - pages = drm_gem_get_pages(obj);
> + pages = vgem_pin_pages(bo);
> if (IS_ERR(pages))
> return NULL;
>
> addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
> - drm_gem_put_pages(obj, pages, false, false);
> +
> + vgem_unpin_pages(bo);
Ho hum, memory says vmap() itself doesn't pin the pages, so we need to
drop the pin on prime_vunamp.
-Chris
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH v2] drm/vgem: Pin our pages for dmabuf exports
2017-06-22 13:30 [PATCH] drm/vgem: Pin our pages for dmabuf exports Chris Wilson
2017-06-22 13:34 ` Chris Wilson
@ 2017-06-22 13:46 ` Chris Wilson
2017-06-23 11:02 ` Daniel Vetter
2017-06-22 14:01 ` ✓ Fi.CI.BAT: success for " Patchwork
2017-06-22 14:16 ` ✓ Fi.CI.BAT: success for drm/vgem: Pin our pages for dmabuf exports (rev2) Patchwork
3 siblings, 1 reply; 9+ messages in thread
From: Chris Wilson @ 2017-06-22 13:46 UTC (permalink / raw)
To: dri-devel
Cc: intel-gfx, Chris Wilson, Tomi Sarvela, Laura Abbott, Sean Paul,
Matthew Auld, Daniel Vetter, stable
When the caller maps their dmabuf and we return an sg_table, the caller
doesn't expect the pages beneath that sg_table to vanish on a whim (i.e.
under mempressure). The contract is that the pages are pinned for the
duration of the mapping (from dma_buf_map_attachment() to
dma_buf_unmap_attachment). To comply, we need to introduce our own
vgem_object.pages_pin_count and elevate it across the mapping. However,
the drm_prime interface we use calls drv->prime_pin on dma_buf_attach
and drv->prime_unpin on dma_buf_detach, which while that does cover the
mapping is much broader than is desired -- but it will do for now.
v2: also hold the pin across prime_vmap/vunmap
Reported-by: Tomi Sarvela <tomi.p.sarvela@intel.com>
Testcase: igt/gem_concurrent_blit/*swap*vgem*
Fixes: 5ba6c9ff961a ("drm/vgem: Fix mmaping")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tomi Sarvela <tomi.p.sarvela@intel.com>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: <stable@vger.kernel.org>
---
drivers/gpu/drm/vgem/vgem_drv.c | 81 ++++++++++++++++++++++++++++++-----------
drivers/gpu/drm/vgem/vgem_drv.h | 4 ++
2 files changed, 64 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 18f401b442c2..c938af8c40cf 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -52,6 +52,7 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
kvfree(vgem_obj->pages);
+ mutex_destroy(&vgem_obj->pages_lock);
if (obj->import_attach)
drm_prime_gem_destroy(obj, vgem_obj->table);
@@ -76,11 +77,15 @@ static int vgem_gem_fault(struct vm_fault *vmf)
if (page_offset > num_pages)
return VM_FAULT_SIGBUS;
+ ret = -ENOENT;
+ mutex_lock(&obj->pages_lock);
if (obj->pages) {
get_page(obj->pages[page_offset]);
vmf->page = obj->pages[page_offset];
ret = 0;
- } else {
+ }
+ mutex_unlock(&obj->pages_lock);
+ if (ret) {
struct page *page;
page = shmem_read_mapping_page(
@@ -161,6 +166,8 @@ static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
return ERR_PTR(ret);
}
+ mutex_init(&obj->pages_lock);
+
return obj;
}
@@ -274,37 +281,66 @@ static const struct file_operations vgem_driver_fops = {
.release = drm_release,
};
+static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
+{
+ mutex_lock(&bo->pages_lock);
+ if (bo->pages_pin_count++ == 0) {
+ struct page **pages;
+
+ pages = drm_gem_get_pages(&bo->base);
+ if (IS_ERR(pages)) {
+ bo->pages_pin_count--;
+ mutex_unlock(&bo->pages_lock);
+ return pages;
+ }
+
+ bo->pages = pages;
+ }
+ mutex_unlock(&bo->pages_lock);
+
+ return bo->pages;
+}
+
+static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
+{
+ mutex_lock(&bo->pages_lock);
+ if (--bo->pages_pin_count == 0) {
+ drm_gem_put_pages(&bo->base, bo->pages, true, true);
+ bo->pages = NULL;
+ }
+ mutex_unlock(&bo->pages_lock);
+}
+
static int vgem_prime_pin(struct drm_gem_object *obj)
{
+ struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
long n_pages = obj->size >> PAGE_SHIFT;
struct page **pages;
- /* Flush the object from the CPU cache so that importers can rely
- * on coherent indirect access via the exported dma-address.
- */
- pages = drm_gem_get_pages(obj);
+ pages = vgem_pin_pages(bo);
if (IS_ERR(pages))
return PTR_ERR(pages);
+ /* Flush the object from the CPU cache so that importers can rely
+ * on coherent indirect access via the exported dma-address.
+ */
drm_clflush_pages(pages, n_pages);
- drm_gem_put_pages(obj, pages, true, false);
return 0;
}
-static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
+static void vgem_prime_unpin(struct drm_gem_object *obj)
{
- struct sg_table *st;
- struct page **pages;
+ struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- pages = drm_gem_get_pages(obj);
- if (IS_ERR(pages))
- return ERR_CAST(pages);
+ vgem_unpin_pages(bo);
+}
- st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
- drm_gem_put_pages(obj, pages, false, false);
+static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- return st;
+ return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
}
static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
@@ -333,6 +369,8 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
__vgem_gem_destroy(obj);
return ERR_PTR(-ENOMEM);
}
+
+ obj->pages_pin_count++; /* perma-pinned */
drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
npages);
return &obj->base;
@@ -340,23 +378,23 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
static void *vgem_prime_vmap(struct drm_gem_object *obj)
{
+ struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
long n_pages = obj->size >> PAGE_SHIFT;
struct page **pages;
- void *addr;
- pages = drm_gem_get_pages(obj);
+ pages = vgem_pin_pages(bo);
if (IS_ERR(pages))
return NULL;
- addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
- drm_gem_put_pages(obj, pages, false, false);
-
- return addr;
+ return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
}
static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
+ struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
+
vunmap(vaddr);
+ vgem_unpin_pages(bo);
}
static int vgem_prime_mmap(struct drm_gem_object *obj,
@@ -409,6 +447,7 @@ static struct drm_driver vgem_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_pin = vgem_prime_pin,
+ .gem_prime_unpin = vgem_prime_unpin,
.gem_prime_import = vgem_prime_import,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import_sg_table = vgem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 1aae01419112..5c8f6d619ff3 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -43,7 +43,11 @@ struct vgem_file {
#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
struct drm_vgem_gem_object {
struct drm_gem_object base;
+
struct page **pages;
+ unsigned int pages_pin_count;
+ struct mutex pages_lock;
+
struct sg_table *table;
};
--
2.11.0
^ permalink raw reply related [flat|nested] 9+ messages in thread
* ✓ Fi.CI.BAT: success for drm/vgem: Pin our pages for dmabuf exports
2017-06-22 13:30 [PATCH] drm/vgem: Pin our pages for dmabuf exports Chris Wilson
2017-06-22 13:34 ` Chris Wilson
2017-06-22 13:46 ` [PATCH v2] " Chris Wilson
@ 2017-06-22 14:01 ` Patchwork
2017-06-22 14:16 ` ✓ Fi.CI.BAT: success for drm/vgem: Pin our pages for dmabuf exports (rev2) Patchwork
3 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2017-06-22 14:01 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: drm/vgem: Pin our pages for dmabuf exports
URL : https://patchwork.freedesktop.org/series/26222/
State : success
== Summary ==
Series 26222v1 drm/vgem: Pin our pages for dmabuf exports
https://patchwork.freedesktop.org/api/1.0/series/26222/revisions/1/mbox/
Test gem_exec_suspend:
Subgroup basic-s4-devices:
pass -> DMESG-WARN (fi-kbl-7560u) fdo#100125
Test prime_busy:
Subgroup basic-wait-after-default:
dmesg-warn -> PASS (fi-skl-6700hq) fdo#101515 +3
fdo#100125 https://bugs.freedesktop.org/show_bug.cgi?id=100125
fdo#101515 https://bugs.freedesktop.org/show_bug.cgi?id=101515
fi-bdw-5557u total:279 pass:268 dwarn:0 dfail:0 fail:0 skip:11 time:439s
fi-bdw-gvtdvm total:279 pass:257 dwarn:8 dfail:0 fail:0 skip:14 time:428s
fi-bsw-n3050 total:279 pass:242 dwarn:1 dfail:0 fail:0 skip:36 time:523s
fi-bxt-j4205 total:279 pass:260 dwarn:0 dfail:0 fail:0 skip:19 time:513s
fi-byt-j1900 total:279 pass:253 dwarn:2 dfail:0 fail:0 skip:24 time:490s
fi-byt-n2820 total:279 pass:249 dwarn:2 dfail:0 fail:0 skip:28 time:480s
fi-glk-2a total:279 pass:260 dwarn:0 dfail:0 fail:0 skip:19 time:593s
fi-hsw-4770 total:279 pass:263 dwarn:0 dfail:0 fail:0 skip:16 time:433s
fi-hsw-4770r total:279 pass:263 dwarn:0 dfail:0 fail:0 skip:16 time:413s
fi-ilk-650 total:279 pass:229 dwarn:0 dfail:0 fail:0 skip:50 time:418s
fi-ivb-3520m total:279 pass:261 dwarn:0 dfail:0 fail:0 skip:18 time:493s
fi-ivb-3770 total:279 pass:261 dwarn:0 dfail:0 fail:0 skip:18 time:477s
fi-kbl-7500u total:279 pass:261 dwarn:0 dfail:0 fail:0 skip:18 time:467s
fi-kbl-7560u total:279 pass:268 dwarn:1 dfail:0 fail:0 skip:10 time:575s
fi-kbl-r total:279 pass:260 dwarn:1 dfail:0 fail:0 skip:18 time:576s
fi-skl-6260u total:279 pass:269 dwarn:0 dfail:0 fail:0 skip:10 time:456s
fi-skl-6700hq total:279 pass:221 dwarn:3 dfail:0 fail:30 skip:24 time:342s
fi-skl-6700k total:279 pass:257 dwarn:4 dfail:0 fail:0 skip:18 time:467s
fi-skl-6770hq total:279 pass:269 dwarn:0 dfail:0 fail:0 skip:10 time:475s
fi-skl-gvtdvm total:279 pass:266 dwarn:0 dfail:0 fail:0 skip:13 time:432s
fi-snb-2520m total:279 pass:251 dwarn:0 dfail:0 fail:0 skip:28 time:544s
fi-snb-2600 total:279 pass:250 dwarn:0 dfail:0 fail:0 skip:29 time:404s
781051b4174a922661593ecf853618bcbd4ecf6c drm-tip: 2017y-06m-22d-13h-14m-47s UTC integration manifest
039a608 drm/vgem: Pin our pages for dmabuf exports
== Logs ==
For more details see: https://intel-gfx-ci.01.org/CI/Patchwork_5026/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* ✓ Fi.CI.BAT: success for drm/vgem: Pin our pages for dmabuf exports (rev2)
2017-06-22 13:30 [PATCH] drm/vgem: Pin our pages for dmabuf exports Chris Wilson
` (2 preceding siblings ...)
2017-06-22 14:01 ` ✓ Fi.CI.BAT: success for " Patchwork
@ 2017-06-22 14:16 ` Patchwork
3 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2017-06-22 14:16 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: drm/vgem: Pin our pages for dmabuf exports (rev2)
URL : https://patchwork.freedesktop.org/series/26222/
State : success
== Summary ==
Series 26222v2 drm/vgem: Pin our pages for dmabuf exports
https://patchwork.freedesktop.org/api/1.0/series/26222/revisions/2/mbox/
Test gem_exec_flush:
Subgroup basic-batch-kernel-default-uc:
pass -> FAIL (fi-snb-2600) fdo#100007
Test gem_exec_suspend:
Subgroup basic-s4-devices:
pass -> DMESG-WARN (fi-kbl-7560u) fdo#100125
Test kms_pipe_crc_basic:
Subgroup suspend-read-crc-pipe-a:
dmesg-warn -> PASS (fi-byt-j1900) fdo#101517
fdo#100007 https://bugs.freedesktop.org/show_bug.cgi?id=100007
fdo#100125 https://bugs.freedesktop.org/show_bug.cgi?id=100125
fdo#101517 https://bugs.freedesktop.org/show_bug.cgi?id=101517
fi-bdw-5557u total:279 pass:268 dwarn:0 dfail:0 fail:0 skip:11 time:445s
fi-bdw-gvtdvm total:279 pass:257 dwarn:8 dfail:0 fail:0 skip:14 time:434s
fi-bsw-n3050 total:279 pass:242 dwarn:1 dfail:0 fail:0 skip:36 time:529s
fi-bxt-j4205 total:279 pass:260 dwarn:0 dfail:0 fail:0 skip:19 time:517s
fi-byt-j1900 total:279 pass:254 dwarn:1 dfail:0 fail:0 skip:24 time:487s
fi-byt-n2820 total:279 pass:249 dwarn:2 dfail:0 fail:0 skip:28 time:485s
fi-glk-2a total:279 pass:260 dwarn:0 dfail:0 fail:0 skip:19 time:587s
fi-hsw-4770 total:279 pass:263 dwarn:0 dfail:0 fail:0 skip:16 time:439s
fi-hsw-4770r total:279 pass:263 dwarn:0 dfail:0 fail:0 skip:16 time:415s
fi-ilk-650 total:279 pass:229 dwarn:0 dfail:0 fail:0 skip:50 time:421s
fi-ivb-3520m total:279 pass:261 dwarn:0 dfail:0 fail:0 skip:18 time:491s
fi-ivb-3770 total:279 pass:261 dwarn:0 dfail:0 fail:0 skip:18 time:480s
fi-kbl-7500u total:279 pass:261 dwarn:0 dfail:0 fail:0 skip:18 time:465s
fi-kbl-7560u total:279 pass:268 dwarn:1 dfail:0 fail:0 skip:10 time:580s
fi-kbl-r total:279 pass:260 dwarn:1 dfail:0 fail:0 skip:18 time:590s
fi-skl-6260u total:279 pass:269 dwarn:0 dfail:0 fail:0 skip:10 time:460s
fi-skl-6700hq total:279 pass:221 dwarn:3 dfail:0 fail:30 skip:24 time:340s
fi-skl-6700k total:279 pass:257 dwarn:4 dfail:0 fail:0 skip:18 time:474s
fi-skl-6770hq total:279 pass:269 dwarn:0 dfail:0 fail:0 skip:10 time:474s
fi-skl-gvtdvm total:279 pass:266 dwarn:0 dfail:0 fail:0 skip:13 time:432s
fi-snb-2520m total:279 pass:251 dwarn:0 dfail:0 fail:0 skip:28 time:545s
fi-snb-2600 total:279 pass:249 dwarn:0 dfail:0 fail:1 skip:29 time:402s
781051b4174a922661593ecf853618bcbd4ecf6c drm-tip: 2017y-06m-22d-13h-14m-47s UTC integration manifest
8c89b4f drm/vgem: Pin our pages for dmabuf exports
== Logs ==
For more details see: https://intel-gfx-ci.01.org/CI/Patchwork_5027/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] drm/vgem: Pin our pages for dmabuf exports
2017-06-22 13:46 ` [PATCH v2] " Chris Wilson
@ 2017-06-23 11:02 ` Daniel Vetter
2017-06-23 11:06 ` Daniel Vetter
2017-06-23 11:17 ` Chris Wilson
0 siblings, 2 replies; 9+ messages in thread
From: Daniel Vetter @ 2017-06-23 11:02 UTC (permalink / raw)
To: Chris Wilson
Cc: dri-devel, intel-gfx, Tomi Sarvela, Laura Abbott, Sean Paul,
Matthew Auld, Daniel Vetter, stable
On Thu, Jun 22, 2017 at 02:46:17PM +0100, Chris Wilson wrote:
> When the caller maps their dmabuf and we return an sg_table, the caller
> doesn't expect the pages beneath that sg_table to vanish on a whim (i.e.
> under mempressure). The contract is that the pages are pinned for the
> duration of the mapping (from dma_buf_map_attachment() to
> dma_buf_unmap_attachment). To comply, we need to introduce our own
> vgem_object.pages_pin_count and elevate it across the mapping. However,
> the drm_prime interface we use calls drv->prime_pin on dma_buf_attach
> and drv->prime_unpin on dma_buf_detach, which while that does cover the
> mapping is much broader than is desired -- but it will do for now.
We could/should probably fix that ... Most drivers hold onto the mapping
forever anyway.
> v2: also hold the pin across prime_vmap/vunmap
>
> Reported-by: Tomi Sarvela <tomi.p.sarvela@intel.com>
> Testcase: igt/gem_concurrent_blit/*swap*vgem*
> Fixes: 5ba6c9ff961a ("drm/vgem: Fix mmaping")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tomi Sarvela <tomi.p.sarvela@intel.com>
> Cc: Laura Abbott <labbott@redhat.com>
> Cc: Sean Paul <seanpaul@chromium.org>
> Cc: Matthew Auld <matthew.auld@intel.com>
> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> Cc: <stable@vger.kernel.org>
> ---
> drivers/gpu/drm/vgem/vgem_drv.c | 81 ++++++++++++++++++++++++++++++-----------
> drivers/gpu/drm/vgem/vgem_drv.h | 4 ++
> 2 files changed, 64 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
> index 18f401b442c2..c938af8c40cf 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.c
> +++ b/drivers/gpu/drm/vgem/vgem_drv.c
> @@ -52,6 +52,7 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
> struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
>
> kvfree(vgem_obj->pages);
> + mutex_destroy(&vgem_obj->pages_lock);
>
> if (obj->import_attach)
> drm_prime_gem_destroy(obj, vgem_obj->table);
> @@ -76,11 +77,15 @@ static int vgem_gem_fault(struct vm_fault *vmf)
> if (page_offset > num_pages)
> return VM_FAULT_SIGBUS;
>
> + ret = -ENOENT;
> + mutex_lock(&obj->pages_lock);
> if (obj->pages) {
> get_page(obj->pages[page_offset]);
> vmf->page = obj->pages[page_offset];
> ret = 0;
> - } else {
> + }
> + mutex_unlock(&obj->pages_lock);
> + if (ret) {
> struct page *page;
>
> page = shmem_read_mapping_page(
> @@ -161,6 +166,8 @@ static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
> return ERR_PTR(ret);
> }
>
> + mutex_init(&obj->pages_lock);
> +
> return obj;
> }
>
> @@ -274,37 +281,66 @@ static const struct file_operations vgem_driver_fops = {
> .release = drm_release,
> };
>
> +static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
> +{
> + mutex_lock(&bo->pages_lock);
> + if (bo->pages_pin_count++ == 0) {
> + struct page **pages;
> +
> + pages = drm_gem_get_pages(&bo->base);
> + if (IS_ERR(pages)) {
> + bo->pages_pin_count--;
> + mutex_unlock(&bo->pages_lock);
> + return pages;
> + }
> +
> + bo->pages = pages;
> + }
> + mutex_unlock(&bo->pages_lock);
> +
> + return bo->pages;
> +}
> +
> +static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
> +{
> + mutex_lock(&bo->pages_lock);
> + if (--bo->pages_pin_count == 0) {
> + drm_gem_put_pages(&bo->base, bo->pages, true, true);
> + bo->pages = NULL;
> + }
> + mutex_unlock(&bo->pages_lock);
> +}
> +
> static int vgem_prime_pin(struct drm_gem_object *obj)
> {
> + struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> long n_pages = obj->size >> PAGE_SHIFT;
> struct page **pages;
>
> - /* Flush the object from the CPU cache so that importers can rely
> - * on coherent indirect access via the exported dma-address.
> - */
> - pages = drm_gem_get_pages(obj);
> + pages = vgem_pin_pages(bo);
> if (IS_ERR(pages))
> return PTR_ERR(pages);
>
> + /* Flush the object from the CPU cache so that importers can rely
> + * on coherent indirect access via the exported dma-address.
> + */
> drm_clflush_pages(pages, n_pages);
Just spotted this, but at least on x86 dma is supposed to be coherent.
We're the exception. But then this is all ill-defined with dma_buf, so
meh.
> - drm_gem_put_pages(obj, pages, true, false);
>
> return 0;
> }
>
> -static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
> +static void vgem_prime_unpin(struct drm_gem_object *obj)
> {
> - struct sg_table *st;
> - struct page **pages;
> + struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
>
> - pages = drm_gem_get_pages(obj);
> - if (IS_ERR(pages))
> - return ERR_CAST(pages);
> + vgem_unpin_pages(bo);
> +}
>
> - st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
> - drm_gem_put_pages(obj, pages, false, false);
> +static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
> +{
> + struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
>
> - return st;
> + return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
> }
>
> static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
> @@ -333,6 +369,8 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
> __vgem_gem_destroy(obj);
> return ERR_PTR(-ENOMEM);
> }
> +
> + obj->pages_pin_count++; /* perma-pinned */
> drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
> npages);
> return &obj->base;
> @@ -340,23 +378,23 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
>
> static void *vgem_prime_vmap(struct drm_gem_object *obj)
> {
> + struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> long n_pages = obj->size >> PAGE_SHIFT;
> struct page **pages;
> - void *addr;
>
> - pages = drm_gem_get_pages(obj);
> + pages = vgem_pin_pages(bo);
> if (IS_ERR(pages))
> return NULL;
>
> - addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
> - drm_gem_put_pages(obj, pages, false, false);
> -
> - return addr;
> + return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
> }
>
> static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
> {
> + struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> +
> vunmap(vaddr);
> + vgem_unpin_pages(bo);
> }
>
> static int vgem_prime_mmap(struct drm_gem_object *obj,
> @@ -409,6 +447,7 @@ static struct drm_driver vgem_driver = {
> .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
> .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
> .gem_prime_pin = vgem_prime_pin,
> + .gem_prime_unpin = vgem_prime_unpin,
> .gem_prime_import = vgem_prime_import,
> .gem_prime_export = drm_gem_prime_export,
> .gem_prime_import_sg_table = vgem_prime_import_sg_table,
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
> index 1aae01419112..5c8f6d619ff3 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.h
> +++ b/drivers/gpu/drm/vgem/vgem_drv.h
> @@ -43,7 +43,11 @@ struct vgem_file {
> #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
> struct drm_vgem_gem_object {
> struct drm_gem_object base;
> +
> struct page **pages;
> + unsigned int pages_pin_count;
> + struct mutex pages_lock;
> +
> struct sg_table *table;
> };
>
> --
> 2.11.0
Anyway looks all good, will push to drm-misc-fixes.
Thanks, Daniel
>
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] drm/vgem: Pin our pages for dmabuf exports
2017-06-23 11:02 ` Daniel Vetter
@ 2017-06-23 11:06 ` Daniel Vetter
2017-06-23 11:17 ` Chris Wilson
1 sibling, 0 replies; 9+ messages in thread
From: Daniel Vetter @ 2017-06-23 11:06 UTC (permalink / raw)
To: Chris Wilson
Cc: dri-devel, intel-gfx, Tomi Sarvela, Laura Abbott, Sean Paul,
Matthew Auld, Daniel Vetter, stable
On Fri, Jun 23, 2017 at 01:02:53PM +0200, Daniel Vetter wrote:
> Anyway looks all good, will push to drm-misc-fixes.
Correction, pushed to -misc-next because it conflicts with the dma-buf
import stuff from Laura and other bits. If you want it in -fixes I need a
backport. I left the cc: stable in just to annoy Greg a bit :-)
-Daniel
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] drm/vgem: Pin our pages for dmabuf exports
@ 2017-06-23 11:06 ` Daniel Vetter
0 siblings, 0 replies; 9+ messages in thread
From: Daniel Vetter @ 2017-06-23 11:06 UTC (permalink / raw)
To: Chris Wilson
Cc: Tomi Sarvela, Daniel Vetter, intel-gfx, dri-devel, Matthew Auld,
stable, Laura Abbott
On Fri, Jun 23, 2017 at 01:02:53PM +0200, Daniel Vetter wrote:
> Anyway looks all good, will push to drm-misc-fixes.
Correction, pushed to -misc-next because it conflicts with the dma-buf
import stuff from Laura and other bits. If you want it in -fixes I need a
backport. I left the cc: stable in just to annoy Greg a bit :-)
-Daniel
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] drm/vgem: Pin our pages for dmabuf exports
2017-06-23 11:02 ` Daniel Vetter
2017-06-23 11:06 ` Daniel Vetter
@ 2017-06-23 11:17 ` Chris Wilson
1 sibling, 0 replies; 9+ messages in thread
From: Chris Wilson @ 2017-06-23 11:17 UTC (permalink / raw)
To: Daniel Vetter
Cc: Tomi Sarvela, Daniel Vetter, intel-gfx, dri-devel, Matthew Auld,
stable, Laura Abbott
Quoting Daniel Vetter (2017-06-23 12:02:53)
> On Thu, Jun 22, 2017 at 02:46:17PM +0100, Chris Wilson wrote:
> > + /* Flush the object from the CPU cache so that importers can rely
> > + * on coherent indirect access via the exported dma-address.
> > + */
> > drm_clflush_pages(pages, n_pages);
>
> Just spotted this, but at least on x86 dma is supposed to be coherent.
> We're the exception. But then this is all ill-defined with dma_buf, so
> meh.
It's been a running debate on whether these are meant to be WC mapped or
not. dmabuf does have its sync ioctls that do allow us to mix WB for
clients and UC for device, which for us is very important. But there are
a few edge cases like device importing pages from a dmabuf as above.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2017-06-23 11:17 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-06-22 13:30 [PATCH] drm/vgem: Pin our pages for dmabuf exports Chris Wilson
2017-06-22 13:34 ` Chris Wilson
2017-06-22 13:46 ` [PATCH v2] " Chris Wilson
2017-06-23 11:02 ` Daniel Vetter
2017-06-23 11:06 ` Daniel Vetter
2017-06-23 11:06 ` Daniel Vetter
2017-06-23 11:17 ` Chris Wilson
2017-06-22 14:01 ` ✓ Fi.CI.BAT: success for " Patchwork
2017-06-22 14:16 ` ✓ Fi.CI.BAT: success for drm/vgem: Pin our pages for dmabuf exports (rev2) Patchwork
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.