* [PATCH 2/7] drm/ttm: remove the backing store if no placement is given
2020-02-19 12:59 [PATCH 1/7] dma-buf: add dynamic DMA-buf handling v15 Christian König
@ 2020-02-19 12:59 ` Christian König
2020-02-19 12:59 ` [PATCH 3/7] drm/amdgpu: use allowed_domains for exported DMA-bufs Christian König
` (5 subsequent siblings)
6 siblings, 0 replies; 9+ messages in thread
From: Christian König @ 2020-02-19 12:59 UTC (permalink / raw)
To: dri-devel, linaro-mm-sig, linux-media, intel-gfx, daniel
Pipeline removal of the BOs backing store when no placement is given
during validation.
Signed-off-by: Christian König <christian.koenig@amd.com>
---
drivers/gpu/drm/ttm/ttm_bo.c | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 151edfd8de77..6d1e91be9c78 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1196,6 +1196,18 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
uint32_t new_flags;
dma_resv_assert_held(bo->base.resv);
+
+ /*
+ * Remove the backing store if no placement is given.
+ */
+ if (!placement->num_placement && !placement->num_busy_placement) {
+ ret = ttm_bo_pipeline_gutting(bo);
+ if (ret)
+ return ret;
+
+ return ttm_tt_create(bo, false);
+ }
+
/*
* Check whether we need to move buffer.
*/
--
2.17.1
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 3/7] drm/amdgpu: use allowed_domains for exported DMA-bufs
2020-02-19 12:59 [PATCH 1/7] dma-buf: add dynamic DMA-buf handling v15 Christian König
2020-02-19 12:59 ` [PATCH 2/7] drm/ttm: remove the backing store if no placement is given Christian König
@ 2020-02-19 12:59 ` Christian König
2020-02-19 12:59 ` [PATCH 4/7] drm/amdgpu: add amdgpu_dma_buf_pin/unpin v2 Christian König
` (4 subsequent siblings)
6 siblings, 0 replies; 9+ messages in thread
From: Christian König @ 2020-02-19 12:59 UTC (permalink / raw)
To: dri-devel, linaro-mm-sig, linux-media, intel-gfx, daniel
Avoid that we ping/pong the buffers when we stop to pin DMA-buf
exports by using the allowed domains for exported buffers.
Signed-off-by: Christian König <christian.koenig@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index a52a084158b1..41bd2dad842c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -28,6 +28,7 @@
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/sync_file.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@@ -415,7 +416,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
/* Don't move this buffer if we have depleted our allowance
* to move it. Don't move anything if the threshold is zero.
*/
- if (p->bytes_moved < p->bytes_moved_threshold) {
+ if (p->bytes_moved < p->bytes_moved_threshold &&
+ (!bo->tbo.base.dma_buf ||
+ list_empty(&bo->tbo.base.dma_buf->attachments))) {
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
(bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
/* And don't move a CPU_ACCESS_REQUIRED BO to limited
--
2.17.1
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 4/7] drm/amdgpu: add amdgpu_dma_buf_pin/unpin v2
2020-02-19 12:59 [PATCH 1/7] dma-buf: add dynamic DMA-buf handling v15 Christian König
2020-02-19 12:59 ` [PATCH 2/7] drm/ttm: remove the backing store if no placement is given Christian König
2020-02-19 12:59 ` [PATCH 3/7] drm/amdgpu: use allowed_domains for exported DMA-bufs Christian König
@ 2020-02-19 12:59 ` Christian König
2020-02-19 12:59 ` [PATCH 5/7] drm/amdgpu: implement amdgpu_gem_prime_move_notify v2 Christian König
` (3 subsequent siblings)
6 siblings, 0 replies; 9+ messages in thread
From: Christian König @ 2020-02-19 12:59 UTC (permalink / raw)
To: dri-devel, linaro-mm-sig, linux-media, intel-gfx, daniel
This implements the exporter side of unpinned DMA-buf handling.
v2: fix minor coding style issues
Signed-off-by: Christian König <christian.koenig@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 53 ++++++++++++++++++---
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 ++
2 files changed, 51 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 7cafc65fd76a..86000c75b133 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -222,6 +222,37 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
bo->prime_shared_count--;
}
+/**
+ * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
+ *
+ * @attach: attachment to pin down
+ *
+ * Pin the BO which is backing the DMA-buf so that it can't move any more.
+ */
+static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ /* pin buffer into GTT */
+ return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+}
+
+/**
+ * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
+ *
+ * @attach: attachment to unpin
+ *
+ * Unpin a previously pinned BO to make it movable again.
+ */
+static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ amdgpu_bo_unpin(bo);
+}
+
/**
* amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
* @attach: DMA-buf attachment
@@ -244,9 +275,19 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
struct sg_table *sgt;
long r;
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
- if (r)
- return ERR_PTR(r);
+ if (!bo->pin_count) {
+ /* move buffer into GTT */
+ struct ttm_operation_ctx ctx = { false, false };
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ return ERR_PTR(r);
+
+ } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
+ AMDGPU_GEM_DOMAIN_GTT)) {
+ return ERR_PTR(-EBUSY);
+ }
sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
if (IS_ERR(sgt))
@@ -277,13 +318,9 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
sg_free_table(sgt);
kfree(sgt);
- amdgpu_bo_unpin(bo);
}
/**
@@ -330,6 +367,8 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
.dynamic_mapping = true,
.attach = amdgpu_dma_buf_attach,
.detach = amdgpu_dma_buf_detach,
+ .pin = amdgpu_dma_buf_pin,
+ .unpin = amdgpu_dma_buf_unpin,
.map_dma_buf = amdgpu_dma_buf_map,
.unmap_dma_buf = amdgpu_dma_buf_unmap,
.release = drm_gem_dmabuf_release,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e3f16b49e970..9de8374bbbab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -31,6 +31,7 @@
*/
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_cache.h>
@@ -1274,6 +1275,10 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
+ if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
+ bo->mem.mem_type != TTM_PL_SYSTEM)
+ dma_buf_move_notify(abo->tbo.base.dma_buf);
+
/* remember the eviction */
if (evict)
atomic64_inc(&adev->num_evictions);
--
2.17.1
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 5/7] drm/amdgpu: implement amdgpu_gem_prime_move_notify v2
2020-02-19 12:59 [PATCH 1/7] dma-buf: add dynamic DMA-buf handling v15 Christian König
` (2 preceding siblings ...)
2020-02-19 12:59 ` [PATCH 4/7] drm/amdgpu: add amdgpu_dma_buf_pin/unpin v2 Christian König
@ 2020-02-19 12:59 ` Christian König
2020-02-19 12:59 ` [PATCH 6/7] dma-buf: drop dynamic_mapping flag Christian König
` (2 subsequent siblings)
6 siblings, 0 replies; 9+ messages in thread
From: Christian König @ 2020-02-19 12:59 UTC (permalink / raw)
To: dri-devel, linaro-mm-sig, linux-media, intel-gfx, daniel
Implement the importer side of unpinned DMA-buf handling.
v2: update page tables immediately
Signed-off-by: Christian König <christian.koenig@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 66 ++++++++++++++++++++-
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 6 ++
2 files changed, 71 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 86000c75b133..1a040ccf61bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -451,7 +451,71 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
return ERR_PTR(ret);
}
+/**
+ * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
+ *
+ * @attach: the DMA-buf attachment
+ *
+ * Invalidate the DMA-buf attachment, making sure that the we re-create the
+ * mapping before the next use.
+ */
+static void
+amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_placement placement = {};
+ struct amdgpu_vm_bo_base *bo_base;
+ int r;
+
+ if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ return;
+
+ r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
+ if (r) {
+ DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
+ return;
+ }
+
+ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
+ struct amdgpu_vm *vm = bo_base->vm;
+ struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
+
+ if (ticket) {
+ /* When we get an error here it means that somebody
+ * else is holding the VM lock and updating page tables
+ * So we can just continue here.
+ */
+ r = dma_resv_lock(resv, ticket);
+ if (r)
+ continue;
+
+ } else {
+ /* TODO: This is more problematic and we actually need
+ * to allow page tables updates without holding the
+ * lock.
+ */
+ if (!dma_resv_trylock(resv))
+ continue;
+ }
+
+ r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ if (!r)
+ r = amdgpu_vm_handle_moved(adev, vm);
+
+ if (r && r != -EBUSY)
+ DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
+ r);
+
+ dma_resv_unlock(resv);
+ }
+}
+
static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
+ .move_notify = amdgpu_dma_buf_move_notify
};
/**
@@ -487,7 +551,7 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
return obj;
attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
- &amdgpu_dma_buf_attach_ops, NULL);
+ &amdgpu_dma_buf_attach_ops, obj);
if (IS_ERR(attach)) {
drm_gem_object_put(obj);
return ERR_CAST(attach);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 9de8374bbbab..5fa8f59c4ccf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -926,6 +926,9 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0;
}
+ if (bo->tbo.base.import_attach)
+ dma_buf_pin(bo->tbo.base.import_attach);
+
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
@@ -1009,6 +1012,9 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
amdgpu_bo_subtract_pin_size(bo);
+ if (bo->tbo.base.import_attach)
+ dma_buf_unpin(bo->tbo.base.import_attach);
+
for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
--
2.17.1
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 6/7] dma-buf: drop dynamic_mapping flag
2020-02-19 12:59 [PATCH 1/7] dma-buf: add dynamic DMA-buf handling v15 Christian König
` (3 preceding siblings ...)
2020-02-19 12:59 ` [PATCH 5/7] drm/amdgpu: implement amdgpu_gem_prime_move_notify v2 Christian König
@ 2020-02-19 12:59 ` Christian König
2020-02-19 12:59 ` [PATCH 7/7] dma-buf: make move_notify mandatory if importer_ops are provided Christian König
2020-02-26 10:09 ` [PATCH 1/7] dma-buf: add dynamic DMA-buf handling v15 Daniel Vetter
6 siblings, 0 replies; 9+ messages in thread
From: Christian König @ 2020-02-19 12:59 UTC (permalink / raw)
To: dri-devel, linaro-mm-sig, linux-media, intel-gfx, daniel
Instead use the pin() callback to detect dynamic DMA-buf handling.
Since amdgpu is now migrated it doesn't make much sense to keep
the extra flag.
Signed-off-by: Christian König <christian.koenig@amd.com>
---
drivers/dma-buf/dma-buf.c | 5 ++---
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 1 -
include/linux/dma-buf.h | 21 +++++----------------
3 files changed, 7 insertions(+), 20 deletions(-)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 5f10d1929476..6d0a82d1b23d 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -524,11 +524,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
}
if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
- exp_info->ops->dynamic_mapping))
+ (exp_info->ops->pin || exp_info->ops->unpin)))
return ERR_PTR(-EINVAL);
- if (WARN_ON(!exp_info->ops->dynamic_mapping &&
- (exp_info->ops->pin || exp_info->ops->unpin)))
+ if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
return ERR_PTR(-EINVAL);
if (!try_module_get(exp_info->owner))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 1a040ccf61bf..ffeb20f11c07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -364,7 +364,6 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
}
const struct dma_buf_ops amdgpu_dmabuf_ops = {
- .dynamic_mapping = true,
.attach = amdgpu_dma_buf_attach,
.detach = amdgpu_dma_buf_detach,
.pin = amdgpu_dma_buf_pin,
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index b38cea240b67..1ade486fc2bb 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -42,18 +42,6 @@ struct dma_buf_ops {
*/
bool cache_sgt_mapping;
- /**
- * @dynamic_mapping:
- *
- * If true the framework makes sure that the map/unmap_dma_buf
- * callbacks are always called with the dma_resv object locked.
- *
- * If false the framework makes sure that the map/unmap_dma_buf
- * callbacks are always called without the dma_resv object locked.
- * Mutual exclusive with @cache_sgt_mapping.
- */
- bool dynamic_mapping;
-
/**
* @attach:
*
@@ -99,7 +87,8 @@ struct dma_buf_ops {
* This is called by dma_buf_pin and lets the exporter know that the
* DMA-buf can't be moved any more.
*
- * This is called with the dmabuf->resv object locked.
+ * This is called with the dmabuf->resv object locked and is mutual
+ * exclusive with @cache_sgt_mapping.
*
* This callback is optional and should only be used in limited use
* cases like scanout and not for temporary pin operations.
@@ -116,7 +105,8 @@ struct dma_buf_ops {
* This is called by dma_buf_unpin and lets the exporter know that the
* DMA-buf can be moved again.
*
- * This is called with the dmabuf->resv object locked.
+ * This is called with the dmabuf->resv object locked and is mutual
+ * exclusive with @cache_sgt_mapping.
*
* This callback is optional.
*/
@@ -455,8 +445,7 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
*/
static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
{
- /* TODO: switch to using pin/unpin functions as indicator. */
- return dmabuf->ops->dynamic_mapping;
+ return !!dmabuf->ops->pin;
}
/**
--
2.17.1
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 7/7] dma-buf: make move_notify mandatory if importer_ops are provided
2020-02-19 12:59 [PATCH 1/7] dma-buf: add dynamic DMA-buf handling v15 Christian König
` (4 preceding siblings ...)
2020-02-19 12:59 ` [PATCH 6/7] dma-buf: drop dynamic_mapping flag Christian König
@ 2020-02-19 12:59 ` Christian König
2020-02-26 10:09 ` [PATCH 1/7] dma-buf: add dynamic DMA-buf handling v15 Daniel Vetter
6 siblings, 0 replies; 9+ messages in thread
From: Christian König @ 2020-02-19 12:59 UTC (permalink / raw)
To: dri-devel, linaro-mm-sig, linux-media, intel-gfx, daniel
This makes the move_notify callback mandatory when the importer_ops are
provided. Since amdgpu is now migrated it doesn't make much sense
anymore to allow this.
Signed-off-by: Christian König <christian.koenig@amd.com>
---
drivers/dma-buf/dma-buf.c | 15 +++++++--------
1 file changed, 7 insertions(+), 8 deletions(-)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 6d0a82d1b23d..f4ace9af2191 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -677,10 +677,12 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
struct dma_buf_attachment *attach;
int ret;
- /* TODO: make move_notify mandatory if importer_ops are provided. */
if (WARN_ON(!dmabuf || !dev))
return ERR_PTR(-EINVAL);
+ if (WARN_ON(importer_ops && !importer_ops->move_notify))
+ return ERR_PTR(-EINVAL);
+
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
if (!attach)
return ERR_PTR(-ENOMEM);
@@ -877,8 +879,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_assert_held(attach->dmabuf->resv);
- if (!attach->importer_ops->move_notify ||
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
+ if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
r = dma_buf_pin(attach);
if (r)
return ERR_PTR(r);
@@ -890,8 +891,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
sg_table = ERR_PTR(-ENOMEM);
if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
- (!attach->importer_ops->move_notify ||
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)))
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
dma_buf_unpin(attach);
if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
@@ -934,8 +934,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
if (dma_buf_is_dynamic(attach->dmabuf) &&
- (!attach->importer_ops->move_notify ||
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)))
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
dma_buf_unpin(attach);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
@@ -955,7 +954,7 @@ void dma_buf_move_notify(struct dma_buf *dmabuf)
dma_resv_assert_held(dmabuf->resv);
list_for_each_entry(attach, &dmabuf->attachments, node)
- if (attach->importer_ops && attach->importer_ops->move_notify)
+ if (attach->importer_ops)
attach->importer_ops->move_notify(attach);
}
EXPORT_SYMBOL_GPL(dma_buf_move_notify);
--
2.17.1
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH 1/7] dma-buf: add dynamic DMA-buf handling v15
2020-02-19 12:59 [PATCH 1/7] dma-buf: add dynamic DMA-buf handling v15 Christian König
` (5 preceding siblings ...)
2020-02-19 12:59 ` [PATCH 7/7] dma-buf: make move_notify mandatory if importer_ops are provided Christian König
@ 2020-02-26 10:09 ` Daniel Vetter
2020-03-23 13:10 ` Daniel Vetter
6 siblings, 1 reply; 9+ messages in thread
From: Daniel Vetter @ 2020-02-26 10:09 UTC (permalink / raw)
To: Christian König; +Cc: linaro-mm-sig, intel-gfx, dri-devel, linux-media
On Wed, Feb 19, 2020 at 01:59:04PM +0100, Christian König wrote:
> On the exporter side we add optional explicit pinning callbacks. Which are
> called when the importer doesn't implement dynamic handling, move notification
> or need the DMA-buf locked in place for its use case.
>
> On the importer side we add an optional move_notify callback. This callback is
> used by the exporter to inform the importers that their mappings should be
> destroyed as soon as possible.
>
> This allows the exporter to provide the mappings without the need to pin
> the backing store.
>
> v2: don't try to invalidate mappings when the callback is NULL,
> lock the reservation obj while using the attachments,
> add helper to set the callback
> v3: move flag for invalidation support into the DMA-buf,
> use new attach_info structure to set the callback
> v4: use importer_priv field instead of mangling exporter priv.
> v5: drop invalidation_supported flag
> v6: squash together with pin/unpin changes
> v7: pin/unpin takes an attachment now
> v8: nuke dma_buf_attachment_(map|unmap)_locked,
> everything is now handled backward compatible
> v9: always cache when export/importer don't agree on dynamic handling
> v10: minimal style cleanup
> v11: drop automatically re-entry avoidance
> v12: rename callback to move_notify
> v13: add might_lock in appropriate places
> v14: rebase on separated locking change
> v15: add EXPERIMENTAL flag, some more code comments
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
intel-gfx-ci seems now happy too after some prodding, and I think this is
a solid step in roughly the right direction. More important, and think we
now have a fairly good shared understanding of many of the additional pain
points we still need to solve. And some ideas for how to do that. I think
that was the really important thing to achieve, and over seemingly endless
discussions we've got there.
On the dma-buf patches:
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
For the ttm/amdgpu stuff:
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cheers, Daniel
> ---
> drivers/dma-buf/Kconfig | 10 ++
> drivers/dma-buf/dma-buf.c | 110 ++++++++++++++++++--
> drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 6 +-
> include/linux/dma-buf.h | 82 +++++++++++++--
> 4 files changed, 188 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
> index e7d820ce0724..ef73b678419c 100644
> --- a/drivers/dma-buf/Kconfig
> +++ b/drivers/dma-buf/Kconfig
> @@ -39,6 +39,16 @@ config UDMABUF
> A driver to let userspace turn memfd regions into dma-bufs.
> Qemu can use this to create host dmabufs for guest framebuffers.
>
> +config DMABUF_MOVE_NOTIFY
> + bool "Move notify between drivers (EXPERIMENTAL)"
> + default n
> + help
> + Don''t pin buffers if the dynamic DMA-buf interface is available on both the
> + exporter as well as the importer. This fixes a security problem where
> + userspace is able to pin unrestricted amounts of memory through DMA-buf.
> + But marked experimental because we don''t jet have a consistent execution
> + context and memory management between drivers.
> +
> config DMABUF_SELFTESTS
> tristate "Selftests for the dma-buf interfaces"
> default n
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index d4097856c86b..5f10d1929476 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -527,6 +527,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
> exp_info->ops->dynamic_mapping))
> return ERR_PTR(-EINVAL);
>
> + if (WARN_ON(!exp_info->ops->dynamic_mapping &&
> + (exp_info->ops->pin || exp_info->ops->unpin)))
> + return ERR_PTR(-EINVAL);
> +
> if (!try_module_get(exp_info->owner))
> return ERR_PTR(-ENOENT);
>
> @@ -651,7 +655,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
> * calls attach() of dma_buf_ops to allow device-specific attach functionality
> * @dmabuf: [in] buffer to attach device to.
> * @dev: [in] device to be attached.
> - * @dynamic_mapping: [in] calling convention for map/unmap
> + * @importer_ops [in] importer operations for the attachment
> + * @importer_priv [in] importer private pointer for the attachment
> *
> * Returns struct dma_buf_attachment pointer for this attachment. Attachments
> * must be cleaned up by calling dma_buf_detach().
> @@ -667,11 +672,13 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
> */
> struct dma_buf_attachment *
> dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
> - bool dynamic_mapping)
> + const struct dma_buf_attach_ops *importer_ops,
> + void *importer_priv)
> {
> struct dma_buf_attachment *attach;
> int ret;
>
> + /* TODO: make move_notify mandatory if importer_ops are provided. */
> if (WARN_ON(!dmabuf || !dev))
> return ERR_PTR(-EINVAL);
>
> @@ -681,7 +688,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
>
> attach->dev = dev;
> attach->dmabuf = dmabuf;
> - attach->dynamic_mapping = dynamic_mapping;
> + attach->importer_ops = importer_ops;
> + attach->importer_priv = importer_priv;
>
> if (dmabuf->ops->attach) {
> ret = dmabuf->ops->attach(dmabuf, attach);
> @@ -700,15 +708,19 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
> dma_buf_is_dynamic(dmabuf)) {
> struct sg_table *sgt;
>
> - if (dma_buf_is_dynamic(attach->dmabuf))
> + if (dma_buf_is_dynamic(attach->dmabuf)) {
> dma_resv_lock(attach->dmabuf->resv, NULL);
> + ret = dma_buf_pin(attach);
> + if (ret)
> + goto err_unlock;
> + }
>
> sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
> if (!sgt)
> sgt = ERR_PTR(-ENOMEM);
> if (IS_ERR(sgt)) {
> ret = PTR_ERR(sgt);
> - goto err_unlock;
> + goto err_unpin;
> }
> if (dma_buf_is_dynamic(attach->dmabuf))
> dma_resv_unlock(attach->dmabuf->resv);
> @@ -722,6 +734,10 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
> kfree(attach);
> return ERR_PTR(ret);
>
> +err_unpin:
> + if (dma_buf_is_dynamic(attach->dmabuf))
> + dma_buf_unpin(attach);
> +
> err_unlock:
> if (dma_buf_is_dynamic(attach->dmabuf))
> dma_resv_unlock(attach->dmabuf->resv);
> @@ -742,7 +758,7 @@ EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
> struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
> struct device *dev)
> {
> - return dma_buf_dynamic_attach(dmabuf, dev, false);
> + return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
> }
> EXPORT_SYMBOL_GPL(dma_buf_attach);
>
> @@ -765,8 +781,10 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
>
> dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
>
> - if (dma_buf_is_dynamic(attach->dmabuf))
> + if (dma_buf_is_dynamic(attach->dmabuf)) {
> + dma_buf_unpin(attach);
> dma_resv_unlock(attach->dmabuf->resv);
> + }
> }
>
> dma_resv_lock(dmabuf->resv, NULL);
> @@ -779,6 +797,44 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
> }
> EXPORT_SYMBOL_GPL(dma_buf_detach);
>
> +/**
> + * dma_buf_pin - Lock down the DMA-buf
> + *
> + * @attach: [in] attachment which should be pinned
> + *
> + * Returns:
> + * 0 on success, negative error code on failure.
> + */
> +int dma_buf_pin(struct dma_buf_attachment *attach)
> +{
> + struct dma_buf *dmabuf = attach->dmabuf;
> + int ret = 0;
> +
> + dma_resv_assert_held(dmabuf->resv);
> +
> + if (dmabuf->ops->pin)
> + ret = dmabuf->ops->pin(attach);
> +
> + return ret;
> +}
> +EXPORT_SYMBOL_GPL(dma_buf_pin);
> +
> +/**
> + * dma_buf_unpin - Remove lock from DMA-buf
> + *
> + * @attach: [in] attachment which should be unpinned
> + */
> +void dma_buf_unpin(struct dma_buf_attachment *attach)
> +{
> + struct dma_buf *dmabuf = attach->dmabuf;
> +
> + dma_resv_assert_held(dmabuf->resv);
> +
> + if (dmabuf->ops->unpin)
> + dmabuf->ops->unpin(attach);
> +}
> +EXPORT_SYMBOL_GPL(dma_buf_unpin);
> +
> /**
> * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
> * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
> @@ -798,6 +854,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
> enum dma_data_direction direction)
> {
> struct sg_table *sg_table;
> + int r;
>
> might_sleep();
>
> @@ -819,13 +876,25 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
> return attach->sgt;
> }
>
> - if (dma_buf_is_dynamic(attach->dmabuf))
> + if (dma_buf_is_dynamic(attach->dmabuf)) {
> dma_resv_assert_held(attach->dmabuf->resv);
> + if (!attach->importer_ops->move_notify ||
> + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
> + r = dma_buf_pin(attach);
> + if (r)
> + return ERR_PTR(r);
> + }
> + }
>
> sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> if (!sg_table)
> sg_table = ERR_PTR(-ENOMEM);
>
> + if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
> + (!attach->importer_ops->move_notify ||
> + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)))
> + dma_buf_unpin(attach);
> +
> if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
> attach->sgt = sg_table;
> attach->dir = direction;
> @@ -864,9 +933,34 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
> dma_resv_assert_held(attach->dmabuf->resv);
>
> attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
> +
> + if (dma_buf_is_dynamic(attach->dmabuf) &&
> + (!attach->importer_ops->move_notify ||
> + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)))
> + dma_buf_unpin(attach);
> }
> EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
>
> +/**
> + * dma_buf_move_notify - notify attachments that DMA-buf is moving
> + *
> + * @dmabuf: [in] buffer which is moving
> + *
> + * Informs all attachmenst that they need to destroy and recreated all their
> + * mappings.
> + */
> +void dma_buf_move_notify(struct dma_buf *dmabuf)
> +{
> + struct dma_buf_attachment *attach;
> +
> + dma_resv_assert_held(dmabuf->resv);
> +
> + list_for_each_entry(attach, &dmabuf->attachments, node)
> + if (attach->importer_ops && attach->importer_ops->move_notify)
> + attach->importer_ops->move_notify(attach);
> +}
> +EXPORT_SYMBOL_GPL(dma_buf_move_notify);
> +
> /**
> * DOC: cpu access
> *
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> index a59cd47aa6c1..7cafc65fd76a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> @@ -412,6 +412,9 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
> return ERR_PTR(ret);
> }
>
> +static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
> +};
> +
> /**
> * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
> * @dev: DRM device
> @@ -444,7 +447,8 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
> if (IS_ERR(obj))
> return obj;
>
> - attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
> + attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
> + &amdgpu_dma_buf_attach_ops, NULL);
> if (IS_ERR(attach)) {
> drm_gem_object_put(obj);
> return ERR_CAST(attach);
> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> index abf5459a5b9d..b38cea240b67 100644
> --- a/include/linux/dma-buf.h
> +++ b/include/linux/dma-buf.h
> @@ -93,14 +93,41 @@ struct dma_buf_ops {
> */
> void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
>
> + /**
> + * @pin:
> + *
> + * This is called by dma_buf_pin and lets the exporter know that the
> + * DMA-buf can't be moved any more.
> + *
> + * This is called with the dmabuf->resv object locked.
> + *
> + * This callback is optional and should only be used in limited use
> + * cases like scanout and not for temporary pin operations.
> + *
> + * Returns:
> + *
> + * 0 on success, negative error code on failure.
> + */
> + int (*pin)(struct dma_buf_attachment *attach);
> +
> + /**
> + * @unpin:
> + *
> + * This is called by dma_buf_unpin and lets the exporter know that the
> + * DMA-buf can be moved again.
> + *
> + * This is called with the dmabuf->resv object locked.
> + *
> + * This callback is optional.
> + */
> + void (*unpin)(struct dma_buf_attachment *attach);
> +
> /**
> * @map_dma_buf:
> *
> * This is called by dma_buf_map_attachment() and is used to map a
> * shared &dma_buf into device address space, and it is mandatory. It
> - * can only be called if @attach has been called successfully. This
> - * essentially pins the DMA buffer into place, and it cannot be moved
> - * any more
> + * can only be called if @attach has been called successfully.
> *
> * This call may sleep, e.g. when the backing storage first needs to be
> * allocated, or moved to a location suitable for all currently attached
> @@ -141,9 +168,8 @@ struct dma_buf_ops {
> *
> * This is called by dma_buf_unmap_attachment() and should unmap and
> * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
> - * It should also unpin the backing storage if this is the last mapping
> - * of the DMA buffer, it the exporter supports backing storage
> - * migration.
> + * For static dma_buf handling this might also unpins the backing
> + * storage if this is the last mapping of the DMA buffer.
> */
> void (*unmap_dma_buf)(struct dma_buf_attachment *,
> struct sg_table *,
> @@ -311,6 +337,34 @@ struct dma_buf {
> } cb_excl, cb_shared;
> };
>
> +/**
> + * struct dma_buf_attach_ops - importer operations for an attachment
> + * @move_notify: [optional] notification that the DMA-buf is moving
> + *
> + * Attachment operations implemented by the importer.
> + */
> +struct dma_buf_attach_ops {
> + /**
> + * @move_notify
> + *
> + * If this callback is provided the framework can avoid pinning the
> + * backing store while mappings exists.
> + *
> + * This callback is called with the lock of the reservation object
> + * associated with the dma_buf held and the mapping function must be
> + * called with this lock held as well. This makes sure that no mapping
> + * is created concurrently with an ongoing move operation.
> + *
> + * Mappings stay valid and are not directly affected by this callback.
> + * But the DMA-buf can now be in a different physical location, so all
> + * mappings should be destroyed and re-created as soon as possible.
> + *
> + * New mappings can be created after this callback returns, and will
> + * point to the new location of the DMA-buf.
> + */
> + void (*move_notify)(struct dma_buf_attachment *attach);
> +};
> +
> /**
> * struct dma_buf_attachment - holds device-buffer attachment data
> * @dmabuf: buffer for this attachment.
> @@ -319,8 +373,9 @@ struct dma_buf {
> * @sgt: cached mapping.
> * @dir: direction of cached mapping.
> * @priv: exporter specific attachment data.
> - * @dynamic_mapping: true if dma_buf_map/unmap_attachment() is called with the
> - * dma_resv lock held.
> + * @importer_ops: importer operations for this attachment, if provided
> + * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
> + * @importer_priv: importer specific attachment data.
> *
> * This structure holds the attachment information between the dma_buf buffer
> * and its user device(s). The list contains one attachment struct per device
> @@ -337,7 +392,8 @@ struct dma_buf_attachment {
> struct list_head node;
> struct sg_table *sgt;
> enum dma_data_direction dir;
> - bool dynamic_mapping;
> + const struct dma_buf_attach_ops *importer_ops;
> + void *importer_priv;
> void *priv;
> };
>
> @@ -399,6 +455,7 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
> */
> static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
> {
> + /* TODO: switch to using pin/unpin functions as indicator. */
> return dmabuf->ops->dynamic_mapping;
> }
>
> @@ -413,16 +470,19 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
> static inline bool
> dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
> {
> - return attach->dynamic_mapping;
> + return !!attach->importer_ops;
> }
>
> struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
> struct device *dev);
> struct dma_buf_attachment *
> dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
> - bool dynamic_mapping);
> + const struct dma_buf_attach_ops *importer_ops,
> + void *importer_priv);
> void dma_buf_detach(struct dma_buf *dmabuf,
> struct dma_buf_attachment *attach);
> +int dma_buf_pin(struct dma_buf_attachment *attach);
> +void dma_buf_unpin(struct dma_buf_attachment *attach);
>
> struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
>
> --
> 2.17.1
>
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 1/7] dma-buf: add dynamic DMA-buf handling v15
2020-02-26 10:09 ` [PATCH 1/7] dma-buf: add dynamic DMA-buf handling v15 Daniel Vetter
@ 2020-03-23 13:10 ` Daniel Vetter
0 siblings, 0 replies; 9+ messages in thread
From: Daniel Vetter @ 2020-03-23 13:10 UTC (permalink / raw)
To: Christian König; +Cc: linaro-mm-sig, intel-gfx, dri-devel, linux-media
On Wed, Feb 26, 2020 at 11:09:59AM +0100, Daniel Vetter wrote:
> On Wed, Feb 19, 2020 at 01:59:04PM +0100, Christian König wrote:
> > On the exporter side we add optional explicit pinning callbacks. Which are
> > called when the importer doesn't implement dynamic handling, move notification
> > or need the DMA-buf locked in place for its use case.
> >
> > On the importer side we add an optional move_notify callback. This callback is
> > used by the exporter to inform the importers that their mappings should be
> > destroyed as soon as possible.
> >
> > This allows the exporter to provide the mappings without the need to pin
> > the backing store.
> >
> > v2: don't try to invalidate mappings when the callback is NULL,
> > lock the reservation obj while using the attachments,
> > add helper to set the callback
> > v3: move flag for invalidation support into the DMA-buf,
> > use new attach_info structure to set the callback
> > v4: use importer_priv field instead of mangling exporter priv.
> > v5: drop invalidation_supported flag
> > v6: squash together with pin/unpin changes
> > v7: pin/unpin takes an attachment now
> > v8: nuke dma_buf_attachment_(map|unmap)_locked,
> > everything is now handled backward compatible
> > v9: always cache when export/importer don't agree on dynamic handling
> > v10: minimal style cleanup
> > v11: drop automatically re-entry avoidance
> > v12: rename callback to move_notify
> > v13: add might_lock in appropriate places
> > v14: rebase on separated locking change
> > v15: add EXPERIMENTAL flag, some more code comments
> >
> > Signed-off-by: Christian König <christian.koenig@amd.com>
>
> intel-gfx-ci seems now happy too after some prodding, and I think this is
> a solid step in roughly the right direction. More important, and think we
> now have a fairly good shared understanding of many of the additional pain
> points we still need to solve. And some ideas for how to do that. I think
> that was the really important thing to achieve, and over seemingly endless
> discussions we've got there.
>
> On the dma-buf patches:
>
> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
>
> For the ttm/amdgpu stuff:
>
> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
./drivers/dma-buf/dma-buf.c:678: warning: Function parameter or member 'importer_ops' not described in 'dma_buf_dynamic_attach'
./drivers/dma-buf/dma-buf.c:678: warning: Function parameter or member 'importer_priv' not described in 'dma_buf_dynamic_attach'
./include/linux/dma-buf.h:339: warning: Incorrect use of kernel-doc format: * @move_notify
Can you pls fix?
Thanks, Daniel
>
> Cheers, Daniel
>
> > ---
> > drivers/dma-buf/Kconfig | 10 ++
> > drivers/dma-buf/dma-buf.c | 110 ++++++++++++++++++--
> > drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 6 +-
> > include/linux/dma-buf.h | 82 +++++++++++++--
> > 4 files changed, 188 insertions(+), 20 deletions(-)
> >
> > diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
> > index e7d820ce0724..ef73b678419c 100644
> > --- a/drivers/dma-buf/Kconfig
> > +++ b/drivers/dma-buf/Kconfig
> > @@ -39,6 +39,16 @@ config UDMABUF
> > A driver to let userspace turn memfd regions into dma-bufs.
> > Qemu can use this to create host dmabufs for guest framebuffers.
> >
> > +config DMABUF_MOVE_NOTIFY
> > + bool "Move notify between drivers (EXPERIMENTAL)"
> > + default n
> > + help
> > + Don''t pin buffers if the dynamic DMA-buf interface is available on both the
> > + exporter as well as the importer. This fixes a security problem where
> > + userspace is able to pin unrestricted amounts of memory through DMA-buf.
> > + But marked experimental because we don''t jet have a consistent execution
> > + context and memory management between drivers.
> > +
> > config DMABUF_SELFTESTS
> > tristate "Selftests for the dma-buf interfaces"
> > default n
> > diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> > index d4097856c86b..5f10d1929476 100644
> > --- a/drivers/dma-buf/dma-buf.c
> > +++ b/drivers/dma-buf/dma-buf.c
> > @@ -527,6 +527,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
> > exp_info->ops->dynamic_mapping))
> > return ERR_PTR(-EINVAL);
> >
> > + if (WARN_ON(!exp_info->ops->dynamic_mapping &&
> > + (exp_info->ops->pin || exp_info->ops->unpin)))
> > + return ERR_PTR(-EINVAL);
> > +
> > if (!try_module_get(exp_info->owner))
> > return ERR_PTR(-ENOENT);
> >
> > @@ -651,7 +655,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
> > * calls attach() of dma_buf_ops to allow device-specific attach functionality
> > * @dmabuf: [in] buffer to attach device to.
> > * @dev: [in] device to be attached.
> > - * @dynamic_mapping: [in] calling convention for map/unmap
> > + * @importer_ops [in] importer operations for the attachment
> > + * @importer_priv [in] importer private pointer for the attachment
> > *
> > * Returns struct dma_buf_attachment pointer for this attachment. Attachments
> > * must be cleaned up by calling dma_buf_detach().
> > @@ -667,11 +672,13 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
> > */
> > struct dma_buf_attachment *
> > dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
> > - bool dynamic_mapping)
> > + const struct dma_buf_attach_ops *importer_ops,
> > + void *importer_priv)
> > {
> > struct dma_buf_attachment *attach;
> > int ret;
> >
> > + /* TODO: make move_notify mandatory if importer_ops are provided. */
> > if (WARN_ON(!dmabuf || !dev))
> > return ERR_PTR(-EINVAL);
> >
> > @@ -681,7 +688,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
> >
> > attach->dev = dev;
> > attach->dmabuf = dmabuf;
> > - attach->dynamic_mapping = dynamic_mapping;
> > + attach->importer_ops = importer_ops;
> > + attach->importer_priv = importer_priv;
> >
> > if (dmabuf->ops->attach) {
> > ret = dmabuf->ops->attach(dmabuf, attach);
> > @@ -700,15 +708,19 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
> > dma_buf_is_dynamic(dmabuf)) {
> > struct sg_table *sgt;
> >
> > - if (dma_buf_is_dynamic(attach->dmabuf))
> > + if (dma_buf_is_dynamic(attach->dmabuf)) {
> > dma_resv_lock(attach->dmabuf->resv, NULL);
> > + ret = dma_buf_pin(attach);
> > + if (ret)
> > + goto err_unlock;
> > + }
> >
> > sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
> > if (!sgt)
> > sgt = ERR_PTR(-ENOMEM);
> > if (IS_ERR(sgt)) {
> > ret = PTR_ERR(sgt);
> > - goto err_unlock;
> > + goto err_unpin;
> > }
> > if (dma_buf_is_dynamic(attach->dmabuf))
> > dma_resv_unlock(attach->dmabuf->resv);
> > @@ -722,6 +734,10 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
> > kfree(attach);
> > return ERR_PTR(ret);
> >
> > +err_unpin:
> > + if (dma_buf_is_dynamic(attach->dmabuf))
> > + dma_buf_unpin(attach);
> > +
> > err_unlock:
> > if (dma_buf_is_dynamic(attach->dmabuf))
> > dma_resv_unlock(attach->dmabuf->resv);
> > @@ -742,7 +758,7 @@ EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
> > struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
> > struct device *dev)
> > {
> > - return dma_buf_dynamic_attach(dmabuf, dev, false);
> > + return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
> > }
> > EXPORT_SYMBOL_GPL(dma_buf_attach);
> >
> > @@ -765,8 +781,10 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
> >
> > dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
> >
> > - if (dma_buf_is_dynamic(attach->dmabuf))
> > + if (dma_buf_is_dynamic(attach->dmabuf)) {
> > + dma_buf_unpin(attach);
> > dma_resv_unlock(attach->dmabuf->resv);
> > + }
> > }
> >
> > dma_resv_lock(dmabuf->resv, NULL);
> > @@ -779,6 +797,44 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
> > }
> > EXPORT_SYMBOL_GPL(dma_buf_detach);
> >
> > +/**
> > + * dma_buf_pin - Lock down the DMA-buf
> > + *
> > + * @attach: [in] attachment which should be pinned
> > + *
> > + * Returns:
> > + * 0 on success, negative error code on failure.
> > + */
> > +int dma_buf_pin(struct dma_buf_attachment *attach)
> > +{
> > + struct dma_buf *dmabuf = attach->dmabuf;
> > + int ret = 0;
> > +
> > + dma_resv_assert_held(dmabuf->resv);
> > +
> > + if (dmabuf->ops->pin)
> > + ret = dmabuf->ops->pin(attach);
> > +
> > + return ret;
> > +}
> > +EXPORT_SYMBOL_GPL(dma_buf_pin);
> > +
> > +/**
> > + * dma_buf_unpin - Remove lock from DMA-buf
> > + *
> > + * @attach: [in] attachment which should be unpinned
> > + */
> > +void dma_buf_unpin(struct dma_buf_attachment *attach)
> > +{
> > + struct dma_buf *dmabuf = attach->dmabuf;
> > +
> > + dma_resv_assert_held(dmabuf->resv);
> > +
> > + if (dmabuf->ops->unpin)
> > + dmabuf->ops->unpin(attach);
> > +}
> > +EXPORT_SYMBOL_GPL(dma_buf_unpin);
> > +
> > /**
> > * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
> > * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
> > @@ -798,6 +854,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
> > enum dma_data_direction direction)
> > {
> > struct sg_table *sg_table;
> > + int r;
> >
> > might_sleep();
> >
> > @@ -819,13 +876,25 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
> > return attach->sgt;
> > }
> >
> > - if (dma_buf_is_dynamic(attach->dmabuf))
> > + if (dma_buf_is_dynamic(attach->dmabuf)) {
> > dma_resv_assert_held(attach->dmabuf->resv);
> > + if (!attach->importer_ops->move_notify ||
> > + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
> > + r = dma_buf_pin(attach);
> > + if (r)
> > + return ERR_PTR(r);
> > + }
> > + }
> >
> > sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> > if (!sg_table)
> > sg_table = ERR_PTR(-ENOMEM);
> >
> > + if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
> > + (!attach->importer_ops->move_notify ||
> > + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)))
> > + dma_buf_unpin(attach);
> > +
> > if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
> > attach->sgt = sg_table;
> > attach->dir = direction;
> > @@ -864,9 +933,34 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
> > dma_resv_assert_held(attach->dmabuf->resv);
> >
> > attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
> > +
> > + if (dma_buf_is_dynamic(attach->dmabuf) &&
> > + (!attach->importer_ops->move_notify ||
> > + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)))
> > + dma_buf_unpin(attach);
> > }
> > EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
> >
> > +/**
> > + * dma_buf_move_notify - notify attachments that DMA-buf is moving
> > + *
> > + * @dmabuf: [in] buffer which is moving
> > + *
> > + * Informs all attachmenst that they need to destroy and recreated all their
> > + * mappings.
> > + */
> > +void dma_buf_move_notify(struct dma_buf *dmabuf)
> > +{
> > + struct dma_buf_attachment *attach;
> > +
> > + dma_resv_assert_held(dmabuf->resv);
> > +
> > + list_for_each_entry(attach, &dmabuf->attachments, node)
> > + if (attach->importer_ops && attach->importer_ops->move_notify)
> > + attach->importer_ops->move_notify(attach);
> > +}
> > +EXPORT_SYMBOL_GPL(dma_buf_move_notify);
> > +
> > /**
> > * DOC: cpu access
> > *
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> > index a59cd47aa6c1..7cafc65fd76a 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> > @@ -412,6 +412,9 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
> > return ERR_PTR(ret);
> > }
> >
> > +static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
> > +};
> > +
> > /**
> > * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
> > * @dev: DRM device
> > @@ -444,7 +447,8 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
> > if (IS_ERR(obj))
> > return obj;
> >
> > - attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
> > + attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
> > + &amdgpu_dma_buf_attach_ops, NULL);
> > if (IS_ERR(attach)) {
> > drm_gem_object_put(obj);
> > return ERR_CAST(attach);
> > diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> > index abf5459a5b9d..b38cea240b67 100644
> > --- a/include/linux/dma-buf.h
> > +++ b/include/linux/dma-buf.h
> > @@ -93,14 +93,41 @@ struct dma_buf_ops {
> > */
> > void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
> >
> > + /**
> > + * @pin:
> > + *
> > + * This is called by dma_buf_pin and lets the exporter know that the
> > + * DMA-buf can't be moved any more.
> > + *
> > + * This is called with the dmabuf->resv object locked.
> > + *
> > + * This callback is optional and should only be used in limited use
> > + * cases like scanout and not for temporary pin operations.
> > + *
> > + * Returns:
> > + *
> > + * 0 on success, negative error code on failure.
> > + */
> > + int (*pin)(struct dma_buf_attachment *attach);
> > +
> > + /**
> > + * @unpin:
> > + *
> > + * This is called by dma_buf_unpin and lets the exporter know that the
> > + * DMA-buf can be moved again.
> > + *
> > + * This is called with the dmabuf->resv object locked.
> > + *
> > + * This callback is optional.
> > + */
> > + void (*unpin)(struct dma_buf_attachment *attach);
> > +
> > /**
> > * @map_dma_buf:
> > *
> > * This is called by dma_buf_map_attachment() and is used to map a
> > * shared &dma_buf into device address space, and it is mandatory. It
> > - * can only be called if @attach has been called successfully. This
> > - * essentially pins the DMA buffer into place, and it cannot be moved
> > - * any more
> > + * can only be called if @attach has been called successfully.
> > *
> > * This call may sleep, e.g. when the backing storage first needs to be
> > * allocated, or moved to a location suitable for all currently attached
> > @@ -141,9 +168,8 @@ struct dma_buf_ops {
> > *
> > * This is called by dma_buf_unmap_attachment() and should unmap and
> > * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
> > - * It should also unpin the backing storage if this is the last mapping
> > - * of the DMA buffer, it the exporter supports backing storage
> > - * migration.
> > + * For static dma_buf handling this might also unpins the backing
> > + * storage if this is the last mapping of the DMA buffer.
> > */
> > void (*unmap_dma_buf)(struct dma_buf_attachment *,
> > struct sg_table *,
> > @@ -311,6 +337,34 @@ struct dma_buf {
> > } cb_excl, cb_shared;
> > };
> >
> > +/**
> > + * struct dma_buf_attach_ops - importer operations for an attachment
> > + * @move_notify: [optional] notification that the DMA-buf is moving
> > + *
> > + * Attachment operations implemented by the importer.
> > + */
> > +struct dma_buf_attach_ops {
> > + /**
> > + * @move_notify
> > + *
> > + * If this callback is provided the framework can avoid pinning the
> > + * backing store while mappings exists.
> > + *
> > + * This callback is called with the lock of the reservation object
> > + * associated with the dma_buf held and the mapping function must be
> > + * called with this lock held as well. This makes sure that no mapping
> > + * is created concurrently with an ongoing move operation.
> > + *
> > + * Mappings stay valid and are not directly affected by this callback.
> > + * But the DMA-buf can now be in a different physical location, so all
> > + * mappings should be destroyed and re-created as soon as possible.
> > + *
> > + * New mappings can be created after this callback returns, and will
> > + * point to the new location of the DMA-buf.
> > + */
> > + void (*move_notify)(struct dma_buf_attachment *attach);
> > +};
> > +
> > /**
> > * struct dma_buf_attachment - holds device-buffer attachment data
> > * @dmabuf: buffer for this attachment.
> > @@ -319,8 +373,9 @@ struct dma_buf {
> > * @sgt: cached mapping.
> > * @dir: direction of cached mapping.
> > * @priv: exporter specific attachment data.
> > - * @dynamic_mapping: true if dma_buf_map/unmap_attachment() is called with the
> > - * dma_resv lock held.
> > + * @importer_ops: importer operations for this attachment, if provided
> > + * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
> > + * @importer_priv: importer specific attachment data.
> > *
> > * This structure holds the attachment information between the dma_buf buffer
> > * and its user device(s). The list contains one attachment struct per device
> > @@ -337,7 +392,8 @@ struct dma_buf_attachment {
> > struct list_head node;
> > struct sg_table *sgt;
> > enum dma_data_direction dir;
> > - bool dynamic_mapping;
> > + const struct dma_buf_attach_ops *importer_ops;
> > + void *importer_priv;
> > void *priv;
> > };
> >
> > @@ -399,6 +455,7 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
> > */
> > static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
> > {
> > + /* TODO: switch to using pin/unpin functions as indicator. */
> > return dmabuf->ops->dynamic_mapping;
> > }
> >
> > @@ -413,16 +470,19 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
> > static inline bool
> > dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
> > {
> > - return attach->dynamic_mapping;
> > + return !!attach->importer_ops;
> > }
> >
> > struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
> > struct device *dev);
> > struct dma_buf_attachment *
> > dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
> > - bool dynamic_mapping);
> > + const struct dma_buf_attach_ops *importer_ops,
> > + void *importer_priv);
> > void dma_buf_detach(struct dma_buf *dmabuf,
> > struct dma_buf_attachment *attach);
> > +int dma_buf_pin(struct dma_buf_attachment *attach);
> > +void dma_buf_unpin(struct dma_buf_attachment *attach);
> >
> > struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
> >
> > --
> > 2.17.1
> >
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply [flat|nested] 9+ messages in thread