All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Christian König" <christian.koenig-5C7GfCeVMHo@public.gmane.org>
To: Chris Wilson
	<chris-Y6uKTt2uX1cEflXRtASbqLVCufUGDwFn@public.gmane.org>,
	intel-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Cc: Alex Deucher <alexander.deucher-5C7GfCeVMHo@public.gmane.org>,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Subject: Re: [PATCH v4] drm/amdgpu: Transfer fences to dmabuf importer
Date: Tue, 7 Aug 2018 19:57:16 +0200	[thread overview]
Message-ID: <100b177c-6bf2-ebaa-472a-8f3ba0228cb9@amd.com> (raw)
In-Reply-To: <20180807160841.31028-1-chris-Y6uKTt2uX1cEflXRtASbqLVCufUGDwFn@public.gmane.org>

Am 07.08.2018 um 18:08 schrieb Chris Wilson:
> amdgpu only uses shared-fences internally, but dmabuf importers rely on
> implicit write hazard tracking via the reservation_object.fence_excl.
> For example, the importer use the write hazard for timing a page flip to
> only occur after the exporter has finished flushing its write into the
> surface. As such, on exporting a dmabuf, we must either flush all
> outstanding fences (for we do not know which are writes and should have
> been exclusive) or alternatively create a new exclusive fence that is
> the composite of all the existing shared fences, and so will only be
> signaled when all earlier fences are signaled (ensuring that we can not
> be signaled before the completion of any earlier write).
>
> v2: reservation_object is already locked by amdgpu_bo_reserve()
>
> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107341
> Testcase: igt/amd_prime/amd-to-i915
> References: 8e94a46c1770 ("drm/amdgpu: Attach exclusive fence to prime exported bo's. (v5)")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Alex Deucher <alexander.deucher@amd.com>
> Cc: "Christian König" <christian.koenig@amd.com>
> ---
>
> This time, hopefully proofread and references complete.
> -Chris
>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 68 ++++++++++++++++++++---
>   1 file changed, 60 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
> index 1c5d97f4b4dd..dff2b01a3d89 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
> @@ -37,6 +37,7 @@
>   #include "amdgpu_display.h"
>   #include <drm/amdgpu_drm.h>
>   #include <linux/dma-buf.h>
> +#include <linux/dma-fence-array.h>
>   
>   static const struct dma_buf_ops amdgpu_dmabuf_ops;
>   
> @@ -188,6 +189,57 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
>   	return ERR_PTR(ret);
>   }
>   
> +static int
> +__reservation_object_make_exclusive(struct reservation_object *obj)
> +{
> +	struct reservation_object_list *fobj;
> +	struct dma_fence_array *array;
> +	struct dma_fence **fences;
> +	unsigned int count, i;
> +
> +	fobj = reservation_object_get_list(obj);
> +	if (!fobj)
> +		return 0;
> +
> +	count = !!rcu_access_pointer(obj->fence_excl);
> +	count += fobj->shared_count;
> +
> +	fences = kmalloc_array(count, sizeof(*fences), GFP_KERNEL);
> +	if (!fences)
> +		return -ENOMEM;
> +
> +	for (i = 0; i < fobj->shared_count; i++) {
> +		struct dma_fence *f =
> +			rcu_dereference_protected(fobj->shared[i],
> +						  reservation_object_held(obj));
> +
> +		fences[i] = dma_fence_get(f);
> +	}
> +
> +	if (rcu_access_pointer(obj->fence_excl)) {
> +		struct dma_fence *f =
> +			rcu_dereference_protected(obj->fence_excl,
> +						  reservation_object_held(obj));
> +
> +		fences[i] = dma_fence_get(f);
> +	}
> +
> +	array = dma_fence_array_create(count, fences,
> +				       dma_fence_context_alloc(1), 0,
> +				       false);
> +	if (!array)
> +		goto err_fences_put;
> +
> +	reservation_object_add_excl_fence(obj, &array->base);
> +	return 0;
> +
> +err_fences_put:
> +	for (i = 0; i < count; i++)
> +		dma_fence_put(fences[i]);
> +	kfree(fences);
> +	return -ENOMEM;
> +}
> +

This can be simplified a lot. See amdgpu_pasid_free_delayed() for an 
example:
>         r = reservation_object_get_fences_rcu(resv, NULL, &count, 
> &fences);
>         if (r)
>                 goto fallback;
>
>         if (count == 0) {
>                 amdgpu_pasid_free(pasid);
>                 return;
>         }
>
>         if (count == 1) {
>                 fence = fences[0];
>                 kfree(fences);
>         } else {
>                 uint64_t context = dma_fence_context_alloc(1);
>                 struct dma_fence_array *array;
>
>                 array = dma_fence_array_create(count, fences, context,
>                                                1, false);
>                 if (!array) {
>                         kfree(fences);
>                         goto fallback;
>                 }
>                 fence = &array->base;
>         }


Regards,
Christian.

>   /**
>    * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
>    * @dma_buf: shared DMA buffer
> @@ -219,16 +271,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
>   
>   	if (attach->dev->driver != adev->dev->driver) {
>   		/*
> -		 * Wait for all shared fences to complete before we switch to future
> -		 * use of exclusive fence on this prime shared bo.
> +		 * We only create shared fences for internal use, but importers
> +		 * of the dmabuf rely on exclusive fences for implicitly
> +		 * tracking write hazards. As any of the current fences may
> +		 * correspond to a write, we need to convert all existing
> +		 * fences on the reservation object into a single exclusive
> +		 * fence.
>   		 */
> -		r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
> -							true, false,
> -							MAX_SCHEDULE_TIMEOUT);
> -		if (unlikely(r < 0)) {
> -			DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
> +		r = __reservation_object_make_exclusive(bo->tbo.resv);
> +		if (r)
>   			goto error_unreserve;
> -		}
>   	}
>   
>   	/* pin buffer into GTT */

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  parent reply	other threads:[~2018-08-07 17:57 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-08-07 10:45 [PATCH] drm/amdgpu: Transfer fences to dmabuf importer Chris Wilson
2018-08-07 11:01 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
2018-08-07 11:01 ` ✗ Fi.CI.SPARSE: " Patchwork
     [not found] ` <20180807104500.31264-1-chris-Y6uKTt2uX1cEflXRtASbqLVCufUGDwFn@public.gmane.org>
2018-08-07 10:56   ` [PATCH] " Huang Rui
2018-08-07 11:05     ` Chris Wilson
2018-08-07 11:03   ` [PATCH v2] " Chris Wilson
2018-08-07 16:08   ` [PATCH v4] " Chris Wilson
     [not found]     ` <20180807160841.31028-1-chris-Y6uKTt2uX1cEflXRtASbqLVCufUGDwFn@public.gmane.org>
2018-08-07 17:57       ` Christian König [this message]
     [not found]         ` <100b177c-6bf2-ebaa-472a-8f3ba0228cb9-5C7GfCeVMHo@public.gmane.org>
2018-08-07 18:14           ` Chris Wilson
2018-08-07 18:18             ` Christian König
     [not found]               ` <46b0cdd8-ebe5-bcf2-9fdc-4098c4a46fd5-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-08-07 18:30                 ` Chris Wilson
2018-08-07 11:05 ` [PATCH v3] " Chris Wilson
2018-08-07 11:23   ` Christian König
2018-08-07 12:43     ` Daniel Vetter
     [not found]       ` <20180807124322.GV3008-dv86pmgwkMBes7Z6vYuT8azUEOm+Xw19@public.gmane.org>
2018-08-07 12:47         ` Daniel Vetter
2018-08-07 12:51       ` Christian König
     [not found]         ` <ca564057-a6de-4c2d-bc46-f5ee8cae2e85-5C7GfCeVMHo@public.gmane.org>
2018-08-07 12:59           ` Daniel Vetter
2018-08-07 13:17             ` Christian König
2018-08-07 13:37               ` Daniel Vetter
2018-08-07 13:54                 ` Christian König
     [not found]                   ` <de8d5d6c-1862-0e92-6a4f-6a88e3939a08-5C7GfCeVMHo@public.gmane.org>
2018-08-07 14:04                     ` Daniel Vetter
2018-08-07 11:17 ` ✓ Fi.CI.BAT: success for " Patchwork
2018-08-07 11:21 ` ✗ Fi.CI.SPARSE: warning for drm/amdgpu: Transfer fences to dmabuf importer (rev3) Patchwork
2018-08-07 11:37 ` ✓ Fi.CI.BAT: success " Patchwork
2018-08-07 12:27 ` ✓ Fi.CI.IGT: " Patchwork
2018-08-07 16:14 ` ✗ Fi.CI.CHECKPATCH: warning for drm/amdgpu: Transfer fences to dmabuf importer (rev4) Patchwork
2018-08-07 16:15 ` ✗ Fi.CI.SPARSE: " Patchwork
2018-08-07 18:32 ` [PATCH v5] drm/amdgpu: Transfer fences to dmabuf importer Chris Wilson
2018-08-07 18:53   ` Christian König
2018-08-07 18:54 ` ✗ Fi.CI.CHECKPATCH: warning for drm/amdgpu: Transfer fences to dmabuf importer (rev5) Patchwork
2018-08-07 19:09 ` ✓ Fi.CI.BAT: success " Patchwork
2018-08-07 19:36 ` [PATCH v6] drm/amdgpu: Transfer fences to dmabuf importer Chris Wilson
2018-08-07 21:33 ` ✗ Fi.CI.CHECKPATCH: warning for drm/amdgpu: Transfer fences to dmabuf importer (rev6) Patchwork
2018-08-07 21:48 ` ✓ Fi.CI.BAT: success " Patchwork
2018-08-07 23:27 ` ✓ Fi.CI.IGT: " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=100b177c-6bf2-ebaa-472a-8f3ba0228cb9@amd.com \
    --to=christian.koenig-5c7gfcevmho@public.gmane.org \
    --cc=alexander.deucher-5C7GfCeVMHo@public.gmane.org \
    --cc=amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
    --cc=chris-Y6uKTt2uX1cEflXRtASbqLVCufUGDwFn@public.gmane.org \
    --cc=dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
    --cc=intel-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.