All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Zeng, Oak" <oak.zeng@intel.com>
To: "Brost, Matthew" <matthew.brost@intel.com>
Cc: "intel-xe@lists.freedesktop.org" <intel-xe@lists.freedesktop.org>
Subject: RE: [PATCH v4 02/30] drm/xe: Add ops_execute function which returns a fence
Date: Fri, 22 Mar 2024 19:39:52 +0000	[thread overview]
Message-ID: <SA1PR11MB69911B9EFBD4BB94F2EBF77192312@SA1PR11MB6991.namprd11.prod.outlook.com> (raw)
In-Reply-To: <Zf3Aa/22uOZyFs9q@DUT025-TGLU.fm.intel.com>



> -----Original Message-----
> From: Brost, Matthew <matthew.brost@intel.com>
> Sent: Friday, March 22, 2024 1:31 PM
> To: Zeng, Oak <oak.zeng@intel.com>
> Cc: intel-xe@lists.freedesktop.org
> Subject: Re: [PATCH v4 02/30] drm/xe: Add ops_execute function which returns a
> fence
> 
> On Fri, Mar 22, 2024 at 10:11:41AM -0600, Zeng, Oak wrote:
> >
> >
> > > -----Original Message-----
> > > From: Intel-xe <intel-xe-bounces@lists.freedesktop.org> On Behalf Of
> Matthew
> > > Brost
> > > Sent: Friday, March 8, 2024 12:08 AM
> > > To: intel-xe@lists.freedesktop.org
> > > Cc: Brost, Matthew <matthew.brost@intel.com>
> > > Subject: [PATCH v4 02/30] drm/xe: Add ops_execute function which returns a
> > > fence
> > >
> > > Add ops_execute function which returns a fence. This will be helpful to
> > > initiate all binds (VM bind IOCTL, rebinds in exec IOCTL, rebinds in
> > > preempt rebind worker, and rebinds in pagefaults) via a gpuva ops list.
> > > Returning a fence is needed in various paths.
> > >
> > > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > > ---
> > >  drivers/gpu/drm/xe/xe_vm.c | 211 +++++++++++++++++++------------------
> > >  1 file changed, 111 insertions(+), 100 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > > index 3b5dc6de07f7..fb73afcab3b7 100644
> > > --- a/drivers/gpu/drm/xe/xe_vm.c
> > > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > > @@ -1789,16 +1789,17 @@ find_ufence_get(struct xe_sync_entry *syncs,
> u32
> > > num_syncs)
> > >  	return NULL;
> > >  }
> > >
> > > -static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
> > > -			struct xe_exec_queue *q, struct xe_sync_entry *syncs,
> > > -			u32 num_syncs, bool immediate, bool first_op,
> > > -			bool last_op)
> > > +static struct dma_fence *
> > > +xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct
> xe_exec_queue *q,
> > > +	   struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs,
> > > +	   bool immediate, bool first_op, bool last_op)
> > >  {
> > >  	struct dma_fence *fence;
> > >  	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm,
> > > q);
> > >  	struct xe_user_fence *ufence;
> > >
> > >  	xe_vm_assert_held(vm);
> > > +	xe_bo_assert_held(bo);
> > >
> > >  	ufence = find_ufence_get(syncs, num_syncs);
> > >  	if (vma->ufence && ufence)
> > > @@ -1810,7 +1811,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct
> > > xe_vma *vma,
> > >  		fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
> > >  				       last_op);
> > >  		if (IS_ERR(fence))
> > > -			return PTR_ERR(fence);
> > > +			return fence;
> > >  	} else {
> > >  		int i;
> > >
> > > @@ -1825,26 +1826,14 @@ static int __xe_vm_bind(struct xe_vm *vm, struct
> > > xe_vma *vma,
> > >
> > >  	if (last_op)
> > >  		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
> > > -	dma_fence_put(fence);
> > > -
> > > -	return 0;
> > > -}
> > > -
> > > -static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct
> > > xe_exec_queue *q,
> > > -		      struct xe_bo *bo, struct xe_sync_entry *syncs,
> > > -		      u32 num_syncs, bool immediate, bool first_op,
> > > -		      bool last_op)
> > > -{
> > > -	xe_vm_assert_held(vm);
> > > -	xe_bo_assert_held(bo);
> > >
> > > -	return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate,
> > > first_op,
> > > -			    last_op);
> > > +	return fence;
> > >  }
> > >
> > > -static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
> > > -			struct xe_exec_queue *q, struct xe_sync_entry *syncs,
> > > -			u32 num_syncs, bool first_op, bool last_op)
> > > +static struct dma_fence *
> > > +xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
> > > +	     struct xe_exec_queue *q, struct xe_sync_entry *syncs,
> > > +	     u32 num_syncs, bool first_op, bool last_op)
> > >  {
> > >  	struct dma_fence *fence;
> > >  	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm,
> > > q);
> > > @@ -1854,14 +1843,13 @@ static int xe_vm_unbind(struct xe_vm *vm, struct
> > > xe_vma *vma,
> > >
> > >  	fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op,
> > > last_op);
> > >  	if (IS_ERR(fence))
> > > -		return PTR_ERR(fence);
> > > +		return fence;
> > >
> > >  	xe_vma_destroy(vma, fence);
> > >  	if (last_op)
> > >  		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
> > > -	dma_fence_put(fence);
> > >
> > > -	return 0;
> > > +	return fence;
> > >  }
> > >
> > >  #define ALL_DRM_XE_VM_CREATE_FLAGS
> > > (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
> > > @@ -2004,10 +1992,11 @@ static const u32 region_to_mem_type[] = {
> > >  	XE_PL_VRAM1,
> > >  };
> > >
> > > -static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
> > > -			  struct xe_exec_queue *q, u32 region,
> > > -			  struct xe_sync_entry *syncs, u32 num_syncs,
> > > -			  bool first_op, bool last_op)
> > > +static struct dma_fence *
> > > +xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
> > > +	       struct xe_exec_queue *q, u32 region,
> > > +	       struct xe_sync_entry *syncs, u32 num_syncs,
> > > +	       bool first_op, bool last_op)
> > >  {
> > >  	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm,
> > > q);
> > >  	int err;
> > > @@ -2017,27 +2006,24 @@ static int xe_vm_prefetch(struct xe_vm *vm,
> struct
> > > xe_vma *vma,
> > >  	if (!xe_vma_has_no_bo(vma)) {
> > >  		err = xe_bo_migrate(xe_vma_bo(vma),
> > > region_to_mem_type[region]);
> > >  		if (err)
> > > -			return err;
> > > +			return ERR_PTR(err);
> > >  	}
> > >
> > >  	if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated))
> > > {
> > >  		return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs,
> > > num_syncs,
> > >  				  true, first_op, last_op);
> > >  	} else {
> > > +		struct dma_fence *fence =
> > > +			xe_exec_queue_last_fence_get(wait_exec_queue, vm);
> > >  		int i;
> > >
> > >  		/* Nothing to do, signal fences now */
> > >  		if (last_op) {
> > > -			for (i = 0; i < num_syncs; i++) {
> > > -				struct dma_fence *fence =
> > > -
> > > 	xe_exec_queue_last_fence_get(wait_exec_queue, vm);
> > > -
> > > +			for (i = 0; i < num_syncs; i++)
> > >  				xe_sync_entry_signal(&syncs[i], NULL, fence);
> > > -				dma_fence_put(fence);
> > > -			}
> > >  		}
> > >
> > > -		return 0;
> > > +		return fence;
> > >  	}
> > >  }
> > >
> > > @@ -2484,10 +2470,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm
> > > *vm, struct xe_exec_queue *q,
> > >  	return 0;
> > >  }
> > >
> > > -static int op_execute(struct xe_vm *vm, struct xe_vma *vma,
> > > -		      struct xe_vma_op *op)
> > > +static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma
> *vma,
> > > +				    struct xe_vma_op *op)
> > >  {
> > > -	int err;
> > > +	struct dma_fence *fence = NULL;
> > >
> > >  	lockdep_assert_held_write(&vm->lock);
> > >  	xe_vm_assert_held(vm);
> > > @@ -2495,11 +2481,11 @@ static int op_execute(struct xe_vm *vm, struct
> > > xe_vma *vma,
> > >
> > >  	switch (op->base.op) {
> > >  	case DRM_GPUVA_OP_MAP:
> > > -		err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
> > > -				 op->syncs, op->num_syncs,
> > > -				 !xe_vm_in_fault_mode(vm),
> > > -				 op->flags & XE_VMA_OP_FIRST,
> > > -				 op->flags & XE_VMA_OP_LAST);
> > > +		fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
> > > +				   op->syncs, op->num_syncs,
> > > +				   !xe_vm_in_fault_mode(vm),
> > > +				   op->flags & XE_VMA_OP_FIRST,
> > > +				   op->flags & XE_VMA_OP_LAST);
> > >  		break;
> > >  	case DRM_GPUVA_OP_REMAP:
> > >  	{
> > > @@ -2509,37 +2495,39 @@ static int op_execute(struct xe_vm *vm, struct
> > > xe_vma *vma,
> > >  		if (!op->remap.unmap_done) {
> > >  			if (prev || next)
> > >  				vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
> > > -			err = xe_vm_unbind(vm, vma, op->q, op->syncs,
> > > -					   op->num_syncs,
> > > -					   op->flags & XE_VMA_OP_FIRST,
> > > -					   op->flags & XE_VMA_OP_LAST &&
> > > -					   !prev && !next);
> > > -			if (err)
> > > +			fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
> > > +					     op->num_syncs,
> > > +					     op->flags & XE_VMA_OP_FIRST,
> > > +					     op->flags & XE_VMA_OP_LAST &&
> > > +					     !prev && !next);
> > > +			if (IS_ERR(fence))
> > >  				break;
> > >  			op->remap.unmap_done = true;
> > >  		}
> > >
> > >  		if (prev) {
> > >  			op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
> > > -			err = xe_vm_bind(vm, op->remap.prev, op->q,
> > > -					 xe_vma_bo(op->remap.prev), op->syncs,
> > > -					 op->num_syncs, true, false,
> > > -					 op->flags & XE_VMA_OP_LAST
> > > && !next);
> > > +			dma_fence_put(fence);
> >
> >
> > So you drop the previous fence. I assume in later operation, we will need to
> wait dma-fence for the previous operation to complete. Is it safe to only wait for
> the last fence? Shouldn't we wait all the fences, such as chain the fences in some
> way?
> >
> 
> All fences on single xe_exec_queue are should ordered (e.g if the last
> completes all fences prior are also complete).

Ok, if all fences are ordered, this code make sense to me.

> 
> Techincally this is broken on tip and at this point in the series too as
> we mix job fences and tlb invalidation fences on single xe_exec_queue.
> This series gets around to fix this once we convert an IOCTL into single
> bind job. So this patch while not perfect is regressing anything working
> toward 100% correctness.
> 
> > > +			fence = xe_vm_bind(vm, op->remap.prev, op->q,
> > > +					   xe_vma_bo(op->remap.prev), op-
> > > >syncs,
> > > +					   op->num_syncs, true, false,
> > > +					   op->flags & XE_VMA_OP_LAST
> > > && !next);
> > >  			op->remap.prev->gpuva.flags &=
> > > ~XE_VMA_LAST_REBIND;
> > > -			if (err)
> > > +			if (IS_ERR(fence))
> > >  				break;
> > >  			op->remap.prev = NULL;
> > >  		}
> > >
> > >  		if (next) {
> > >  			op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
> > > -			err = xe_vm_bind(vm, op->remap.next, op->q,
> > > -					 xe_vma_bo(op->remap.next),
> > > -					 op->syncs, op->num_syncs,
> > > -					 true, false,
> > > -					 op->flags & XE_VMA_OP_LAST);
> > > +			dma_fence_put(fence);
> >
> >
> > Same comment as above
> 
> Same answer.
> 
> > > +			fence = xe_vm_bind(vm, op->remap.next, op->q,
> > > +					   xe_vma_bo(op->remap.next),
> > > +					   op->syncs, op->num_syncs,
> > > +					   true, false,
> > > +					   op->flags & XE_VMA_OP_LAST);
> > >  			op->remap.next->gpuva.flags &=
> > > ~XE_VMA_LAST_REBIND;
> > > -			if (err)
> > > +			if (IS_ERR(fence))
> > >  				break;
> > >  			op->remap.next = NULL;
> > >  		}
> > > @@ -2547,34 +2535,36 @@ static int op_execute(struct xe_vm *vm, struct
> > > xe_vma *vma,
> > >  		break;
> > >  	}
> > >  	case DRM_GPUVA_OP_UNMAP:
> > > -		err = xe_vm_unbind(vm, vma, op->q, op->syncs,
> > > -				   op->num_syncs, op->flags &
> > > XE_VMA_OP_FIRST,
> > > -				   op->flags & XE_VMA_OP_LAST);
> > > +		fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
> > > +				     op->num_syncs, op->flags &
> > > XE_VMA_OP_FIRST,
> > > +				     op->flags & XE_VMA_OP_LAST);
> > >  		break;
> > >  	case DRM_GPUVA_OP_PREFETCH:
> > > -		err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
> > > -				     op->syncs, op->num_syncs,
> > > -				     op->flags & XE_VMA_OP_FIRST,
> > > -				     op->flags & XE_VMA_OP_LAST);
> > > +		fence = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
> > > +				       op->syncs, op->num_syncs,
> > > +				       op->flags & XE_VMA_OP_FIRST,
> > > +				       op->flags & XE_VMA_OP_LAST);
> > >  		break;
> > >  	default:
> > >  		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
> > >  	}
> > >
> > > -	if (err)
> > > +	if (IS_ERR(fence))
> > >  		trace_xe_vma_fail(vma);
> > >
> > > -	return err;
> > > +	return fence;
> > >  }
> > >
> > > -static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
> > > -			       struct xe_vma_op *op)
> > > +static struct dma_fence *
> > > +__xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
> > > +		    struct xe_vma_op *op)
> > >  {
> > > +	struct dma_fence *fence;
> > >  	int err;
> > >
> > >  retry_userptr:
> > > -	err = op_execute(vm, vma, op);
> > > -	if (err == -EAGAIN) {
> > > +	fence = op_execute(vm, vma, op);
> > > +	if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) {
> > >  		lockdep_assert_held_write(&vm->lock);
> > >
> > >  		if (op->base.op == DRM_GPUVA_OP_REMAP) {
> > > @@ -2591,22 +2581,24 @@ static int __xe_vma_op_execute(struct xe_vm
> *vm,
> > > struct xe_vma *vma,
> > >  			if (!err)
> > >  				goto retry_userptr;
> > >
> > > +			fence = ERR_PTR(err);
> > >  			trace_xe_vma_fail(vma);
> > >  		}
> > >  	}
> > >
> > > -	return err;
> > > +	return fence;
> > >  }
> > >
> > > -static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
> > > +static struct dma_fence *
> > > +xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
> > >  {
> > > -	int ret = 0;
> > > +	struct dma_fence *fence = ERR_PTR(-ENOMEM);
> > >
> > >  	lockdep_assert_held_write(&vm->lock);
> > >
> > >  	switch (op->base.op) {
> > >  	case DRM_GPUVA_OP_MAP:
> > > -		ret = __xe_vma_op_execute(vm, op->map.vma, op);
> > > +		fence = __xe_vma_op_execute(vm, op->map.vma, op);
> > >  		break;
> > >  	case DRM_GPUVA_OP_REMAP:
> > >  	{
> > > @@ -2619,23 +2611,23 @@ static int xe_vma_op_execute(struct xe_vm *vm,
> > > struct xe_vma_op *op)
> > >  		else
> > >  			vma = op->remap.next;
> > >
> > > -		ret = __xe_vma_op_execute(vm, vma, op);
> > > +		fence = __xe_vma_op_execute(vm, vma, op);
> > >  		break;
> > >  	}
> > >  	case DRM_GPUVA_OP_UNMAP:
> > > -		ret = __xe_vma_op_execute(vm, gpuva_to_vma(op-
> > > >base.unmap.va),
> > > -					  op);
> > > +		fence = __xe_vma_op_execute(vm, gpuva_to_vma(op-
> > > >base.unmap.va),
> > > +					    op);
> > >  		break;
> > >  	case DRM_GPUVA_OP_PREFETCH:
> > > -		ret = __xe_vma_op_execute(vm,
> > > -					  gpuva_to_vma(op->base.prefetch.va),
> > > -					  op);
> > > +		fence = __xe_vma_op_execute(vm,
> > > +					    gpuva_to_vma(op->base.prefetch.va),
> > > +					    op);
> > >  		break;
> > >  	default:
> > >  		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
> > >  	}
> > >
> > > -	return ret;
> > > +	return fence;
> > >  }
> > >
> > >  static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
> > > @@ -2803,11 +2795,35 @@ static int vm_bind_ioctl_ops_lock(struct
> drm_exec
> > > *exec,
> > >  	return 0;
> > >  }
> > >
> > > +static struct dma_fence *ops_execute(struct xe_vm *vm,
> > > +				     struct list_head *ops_list,
> > > +				     bool cleanup)
> > > +{
> > > +	struct xe_vma_op *op, *next;
> > > +	struct dma_fence *fence = NULL;
> > > +
> > > +	list_for_each_entry_safe(op, next, ops_list, link) {
> > > +		if (!IS_ERR(fence)) {
> > > +			dma_fence_put(fence);
> > > +			fence = xe_vma_op_execute(vm, op);
> >
> > So you only return the fence of the last operation. In the later on codes, do you
> use fence to wait for *all* operations to finish?
> >
> 
> Same answer as above, all fences on exec_queue should be ordered.
> 
> > > +		}
> > > +		if (IS_ERR(fence)) {
> > > +			drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld",
> > > +				 op->base.op, PTR_ERR(fence));
> > > +			fence = ERR_PTR(-ENOSPC);
> >
> > So even if there is error, you don't break the loop. Is it to perform the cleanup
> below?
> >
> > Once error happen for one operation, you seem to print the same error
> message for all the rest operations....because fence = xe_vma_op_execute(vm,
> op) is not called anymore after the first error
> >
> 
> Yes.

Is this problematic though? Lets say you have 2 ops in the list and op_execute failed with op1. You will print as below:

VM op1 failed with xxx
VM op1 failed with xxx


Oak

> 
> Matt
> 
> >
> > Oak
> >
> > > +		}
> > > +		if (cleanup)
> > > +			xe_vma_op_cleanup(vm, op);
> > > +	}
> > > +
> > > +	return fence;
> > > +}
> > > +
> > >  static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
> > >  				     struct list_head *ops_list)
> > >  {
> > >  	struct drm_exec exec;
> > > -	struct xe_vma_op *op, *next;
> > > +	struct dma_fence *fence;
> > >  	int err;
> > >
> > >  	lockdep_assert_held_write(&vm->lock);
> > > @@ -2820,19 +2836,14 @@ static int vm_bind_ioctl_ops_execute(struct
> xe_vm
> > > *vm,
> > >  		if (err)
> > >  			goto unlock;
> > >
> > > -		list_for_each_entry_safe(op, next, ops_list, link) {
> > > -			err = xe_vma_op_execute(vm, op);
> > > -			if (err) {
> > > -				drm_warn(&vm->xe->drm, "VM op(%d) failed
> > > with %d",
> > > -					 op->base.op, err);
> > > -				/*
> > > -				 * FIXME: Killing VM rather than proper error
> > > handling
> > > -				 */
> > > -				xe_vm_kill(vm, false);
> > > -				err = -ENOSPC;
> > > -				goto unlock;
> > > -			}
> > > -			xe_vma_op_cleanup(vm, op);
> > > +		fence = ops_execute(vm, ops_list, true);
> > > +		if (IS_ERR(fence)) {
> > > +			err = PTR_ERR(fence);
> > > +			/* FIXME: Killing VM rather than proper error handling */
> > > +			xe_vm_kill(vm, false);
> > > +			goto unlock;
> > > +		} else {
> > > +			dma_fence_put(fence);
> > >  		}
> > >  	}
> > >
> > > --
> > > 2.34.1
> >

  reply	other threads:[~2024-03-22 19:40 UTC|newest]

Thread overview: 76+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-08  5:07 [PATCH v4 00/30] Refactor VM bind code Matthew Brost
2024-03-08  5:07 ` [PATCH v4 01/30] drm/xe: Lock all gpuva ops during VM bind IOCTL Matthew Brost
2024-03-10 17:44   ` Zeng, Oak
2024-03-11 19:48     ` Matthew Brost
2024-03-11 22:02       ` Zeng, Oak
2024-03-12  1:29         ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 02/30] drm/xe: Add ops_execute function which returns a fence Matthew Brost
2024-03-22 16:11   ` Zeng, Oak
2024-03-22 17:31     ` Matthew Brost
2024-03-22 19:39       ` Zeng, Oak [this message]
2024-03-08  5:07 ` [PATCH v4 03/30] drm/xe: Move migrate to prefetch to op_lock function Matthew Brost
2024-03-22 17:06   ` Zeng, Oak
2024-03-22 17:36     ` Matthew Brost
2024-03-22 19:45       ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 04/30] drm/xe: Add struct xe_vma_ops abstraction Matthew Brost
2024-03-22 17:13   ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 05/30] drm/xe: Update xe_vm_rebind to use dummy VMA operations Matthew Brost
2024-03-22 21:23   ` Zeng, Oak
2024-03-22 22:51     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 06/30] drm/xe: Simplify VM bind IOCTL error handling and cleanup Matthew Brost
2024-03-25 16:03   ` Zeng, Oak
2024-03-26 18:46     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 07/30] drm/xe: Update pagefaults to use dummy VMA operations Matthew Brost
2024-03-08  5:07 ` [PATCH v4 08/30] drm/xe: s/xe_tile_migrate_engine/xe_tile_migrate_exec_queue Matthew Brost
2024-03-25 16:05   ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 09/30] drm/xe: Add some members to xe_vma_ops Matthew Brost
2024-03-25 16:10   ` Zeng, Oak
2024-03-26 18:47     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 10/30] drm/xe: Add vm_bind_ioctl_ops_install_fences helper Matthew Brost
2024-03-25 16:51   ` Zeng, Oak
2024-03-25 19:34     ` Matthew Brost
2024-03-25 19:44       ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 11/30] drm/xe: Move setting last fence to vm_bind_ioctl_ops_install_fences Matthew Brost
2024-03-25 17:02   ` Zeng, Oak
2024-03-25 19:35     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 12/30] drm/xe: Move ufence check to op_lock Matthew Brost
2024-03-25 20:37   ` Zeng, Oak
2024-03-26 18:49     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 13/30] drm/xe: Move ufence add to vm_bind_ioctl_ops_install_fences Matthew Brost
2024-03-25 20:54   ` Zeng, Oak
2024-03-26 18:54     ` Matthew Brost
2024-03-26 20:59       ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 14/30] drm/xe: Add xe_gt_tlb_invalidation_range and convert PT layer to use this Matthew Brost
2024-03-25 21:35   ` Zeng, Oak
2024-03-26 18:57     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 15/30] drm/xe: Add xe_vm_pgtable_update_op to xe_vma_ops Matthew Brost
2024-03-25 21:58   ` Zeng, Oak
2024-03-26 19:05     ` Matthew Brost
2024-03-27  1:29       ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 16/30] drm/xe: Use ordered WQ for TLB invalidation fences Matthew Brost
2024-03-25 22:30   ` Zeng, Oak
2024-03-26 19:10     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 17/30] drm/xe: Delete PT update selftest Matthew Brost
2024-03-25 22:31   ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 18/30] drm/xe: Convert multiple bind ops into single job Matthew Brost
2024-03-27  2:40   ` Zeng, Oak
2024-03-27 19:26     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 19/30] drm/xe: Remove old functions defs in xe_pt.h Matthew Brost
2024-03-08  5:07 ` [PATCH v4 20/30] drm/xe: Update PT layer with better error handling Matthew Brost
2024-03-08  5:07 ` [PATCH v4 21/30] drm/xe: Update xe_vm_rebind to return int Matthew Brost
2024-03-08  5:07 ` [PATCH v4 22/30] drm/xe: Move vma rebinding to the drm_exec locking loop Matthew Brost
2024-03-08  5:07 ` [PATCH v4 23/30] drm/xe: Update VM trace events Matthew Brost
2024-03-08  5:08 ` [PATCH v4 24/30] drm/xe: Update clear / populate arguments Matthew Brost
2024-03-08  5:08 ` [PATCH v4 25/30] drm/xe: Add __xe_migrate_update_pgtables_cpu helper Matthew Brost
2024-03-08  5:08 ` [PATCH v4 26/30] drm/xe: CPU binds for jobs Matthew Brost
2024-03-08  5:08 ` [PATCH v4 27/30] drm/xe: Don't use migrate exec queue for page fault binds Matthew Brost
2024-03-08  5:08 ` [PATCH v4 28/30] drm/xe: Add VM bind IOCTL error injection Matthew Brost
2024-03-08  5:08 ` [PATCH v4 29/30] drm/xe/guc: Assert time'd out jobs are not from a VM exec queue Matthew Brost
2024-03-08  5:08 ` [PATCH v4 30/30] drm/xe: Add PT exec queues Matthew Brost
2024-03-08  5:42 ` ✓ CI.Patch_applied: success for Refactor VM bind code (rev5) Patchwork
2024-03-08  5:43 ` ✗ CI.checkpatch: warning " Patchwork
2024-03-08  5:44 ` ✓ CI.KUnit: success " Patchwork
2024-03-08  5:55 ` ✓ CI.Build: " Patchwork
2024-03-08  5:55 ` ✗ CI.Hooks: failure " Patchwork
2024-03-08  5:56 ` ✓ CI.checksparse: success " Patchwork
2024-03-08  6:26 ` ✗ CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=SA1PR11MB69911B9EFBD4BB94F2EBF77192312@SA1PR11MB6991.namprd11.prod.outlook.com \
    --to=oak.zeng@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.brost@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.