From mboxrd@z Thu Jan 1 00:00:00 1970 From: Daniel Vetter Subject: [PATCH 17/20] drm/prime: proper locking+refcounting for obj->dma_buf link Date: Thu, 15 Aug 2013 00:02:46 +0200 Message-ID: <1376517769-7171-18-git-send-email-daniel.vetter@ffwll.ch> References: <1376517769-7171-1-git-send-email-daniel.vetter@ffwll.ch> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: Received: from mail-ee0-f42.google.com (mail-ee0-f42.google.com [74.125.83.42]) by gabe.freedesktop.org (Postfix) with ESMTP id 3C243E6AA9 for ; Wed, 14 Aug 2013 15:03:26 -0700 (PDT) Received: by mail-ee0-f42.google.com with SMTP id b45so11261eek.29 for ; Wed, 14 Aug 2013 15:03:25 -0700 (PDT) In-Reply-To: <1376517769-7171-1-git-send-email-daniel.vetter@ffwll.ch> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: dri-devel-bounces+sf-dri-devel=m.gmane.org@lists.freedesktop.org Errors-To: dri-devel-bounces+sf-dri-devel=m.gmane.org@lists.freedesktop.org To: Intel Graphics Development , DRI Development Cc: Daniel Vetter List-Id: dri-devel@lists.freedesktop.org The export dma-buf cache is semantically similar to an flink name. So semantically it makes sense to treat it the same and remove the name (i.e. the dma_buf pointer) and its references when the last gem handle disappears. Again we need to be careful, but double so: Not just could someone race and export with a gem close ioctl (so we need to recheck obj->handle_count again when assigning the new name), but multiple exports can also race against each another. This is prevented by holding the dev->object_name_lock across the entire section which touches obj->dma_buf. With the new scheme we also need to reinstate the obj->dma_buf link at import time (in case the only reference userspace has held in-between was through the dma-buf fd and not through any native gem handle). For simplicity we don't check whether it's a native object but unconditionally set up that link - with the new scheme of removing the obj->dma_buf reference when the last handle disappears we can do that. To make it clear that this is not just for exported buffers anymore als rename it from export_dma_buf to dma_buf. To make sure that now one can race a fd_to_handle or handle_to_fd with gem_close we use the same tricks as in flink of extending the dev->object_name_locking critical section. With this change we finally have a guaranteed 1:1 relationship (at least for native objects) between gem objects and dma-bufs, even accounting for races (which can happen since the dma-buf itself holds a reference while in-flight). This prevent igt/prime_self_import/export-vs-gem_close-race from Oopsing the kernel. There is still a leak though since the per-file priv dma-buf/handle cache handling is racy. That will be fixed in a later patch. v2: Remove the bogus dma_buf_put from the export_and_register_object failure path if we've raced with the handle count dropping to 0. Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_fops.c | 1 + drivers/gpu/drm/drm_gem.c | 24 ++++++++++++++-- drivers/gpu/drm/drm_prime.c | 70 +++++++++++++++++++++++++++++++++++---------- include/drm/drmP.h | 12 ++++++-- 4 files changed, 87 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 72acae9..faf93ff3 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -545,6 +545,7 @@ int drm_release(struct inode *inode, struct file *filp) if (dev->driver->postclose) dev->driver->postclose(dev, file_priv); + if (drm_core_check_feature(dev, DRIVER_PRIME)) drm_prime_destroy_file_private(&file_priv->prime); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 0e9a9c7..6b2b54e 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -195,9 +195,14 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) drm_prime_remove_buf_handle(&filp->prime, obj->import_attach->dmabuf); } - if (obj->export_dma_buf) { + + /* + * Note: obj->dma_buf can't disappear as long as we still hold a + * handle reference in obj->handle_count. + */ + if (obj->dma_buf) { drm_prime_remove_buf_handle(&filp->prime, - obj->export_dma_buf); + obj->dma_buf); } } @@ -231,6 +236,15 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj) } } +static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) +{ + /* Unbreak the reference cycle if we have an exported dma_buf. */ + if (obj->dma_buf) { + dma_buf_put(obj->dma_buf); + obj->dma_buf = NULL; + } +} + static void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) { @@ -244,8 +258,10 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) */ mutex_lock(&obj->dev->object_name_lock); - if (--obj->handle_count == 0) + if (--obj->handle_count == 0) { drm_gem_object_handle_free(obj); + drm_gem_object_exported_dma_buf_free(obj); + } mutex_unlock(&obj->dev->object_name_lock); drm_gem_object_unreference_unlocked(obj); @@ -589,6 +605,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) void drm_gem_object_release(struct drm_gem_object *obj) { + WARN_ON(obj->dma_buf); + if (obj->filp) fput(obj->filp); } diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 3d57601..5e543e9 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -193,11 +193,8 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf) { struct drm_gem_object *obj = dma_buf->priv; - if (obj->export_dma_buf == dma_buf) { - /* drop the reference on the export fd holds */ - obj->export_dma_buf = NULL; - drm_gem_object_unreference_unlocked(obj); - } + /* drop the reference on the export fd holds */ + drm_gem_object_unreference_unlocked(obj); } EXPORT_SYMBOL(drm_gem_dmabuf_release); @@ -298,6 +295,37 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev, } EXPORT_SYMBOL(drm_gem_prime_export); +static struct dma_buf *export_and_register_object(struct drm_device *dev, + struct drm_gem_object *obj, + uint32_t flags) +{ + struct dma_buf *dmabuf; + + /* prevent races with concurrent gem_close. */ + if (obj->handle_count == 0) { + dmabuf = ERR_PTR(-ENOENT); + return dmabuf; + } + + dmabuf = dev->driver->gem_prime_export(dev, obj, flags); + if (IS_ERR(dmabuf)) { + /* normally the created dma-buf takes ownership of the ref, + * but if that fails then drop the ref + */ + return dmabuf; + } + + /* + * Note that callers do not need to clean up the export cache + * since the check for obj->handle_count guarantees that someone + * will clean it up. + */ + obj->dma_buf = dmabuf; + get_dma_buf(obj->dma_buf); + + return dmabuf; +} + int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd) @@ -313,15 +341,20 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, /* re-export the original imported object */ if (obj->import_attach) { dmabuf = obj->import_attach->dmabuf; + get_dma_buf(dmabuf); goto out_have_obj; } - if (obj->export_dma_buf) { - dmabuf = obj->export_dma_buf; + mutex_lock(&dev->object_name_lock); + if (obj->dma_buf) { + get_dma_buf(obj->dma_buf); + dmabuf = obj->dma_buf; + mutex_unlock(&dev->object_name_lock); goto out_have_obj; } - dmabuf = dev->driver->gem_prime_export(dev, obj, flags); + dmabuf = export_and_register_object(dev, obj, flags); + mutex_unlock(&dev->object_name_lock); if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref @@ -329,14 +362,13 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, ret = PTR_ERR(dmabuf); goto out; } - obj->export_dma_buf = dmabuf; mutex_lock(&file_priv->prime.lock); /* if we've exported this buffer the cheat and add it to the import list * so we get the correct handle back */ ret = drm_prime_add_buf_handle(&file_priv->prime, - obj->export_dma_buf, handle); + dmabuf, handle); if (ret) goto fail_put_dmabuf; @@ -349,7 +381,6 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, return 0; out_have_obj: - get_dma_buf(dmabuf); ret = dma_buf_fd(dmabuf, flags); if (ret < 0) { dma_buf_put(dmabuf); @@ -365,8 +396,6 @@ fail_rm_handle: dmabuf); mutex_unlock(&file_priv->prime.lock); fail_put_dmabuf: - /* clear NOT to be checked when releasing dma_buf */ - obj->export_dma_buf = NULL; dma_buf_put(dmabuf); out: drm_gem_object_unreference_unlocked(obj); @@ -448,13 +477,22 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, goto out_put; /* never seen this one, need to import */ + mutex_lock(&dev->object_name_lock); obj = dev->driver->gem_prime_import(dev, dma_buf); if (IS_ERR(obj)) { ret = PTR_ERR(obj); - goto out_put; + goto out_unlock; + } + + if (obj->dma_buf) { + WARN_ON(obj->dma_buf != dma_buf); + } else { + obj->dma_buf = dma_buf; + get_dma_buf(dma_buf); } - ret = drm_gem_handle_create(file_priv, obj, handle); + /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ + ret = drm_gem_handle_create_tail(file_priv, obj, handle); drm_gem_object_unreference_unlocked(obj); if (ret) goto out_put; @@ -475,6 +513,8 @@ fail: * to detach.. which seems ok.. */ drm_gem_handle_delete(file_priv, *handle); +out_unlock: + mutex_lock(&dev->object_name_lock); out_put: dma_buf_put(dma_buf); mutex_unlock(&file_priv->prime.lock); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 3b346bc..1db42c0 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -683,8 +683,16 @@ struct drm_gem_object { void *driver_private; - /* dma buf exported from this GEM object */ - struct dma_buf *export_dma_buf; + /** + * dma_buf - dma buf associated with this GEM object + * + * Pointer to the dma-buf associated with this gem object (either + * through importing or exporting). We break the resulting reference + * loop when the last gem handle for this object is released. + * + * Protected by obj->object_name_lock + */ + struct dma_buf *dma_buf; /** * import_attach - dma buf attachment backing this object -- 1.8.3.2