All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: "Zeng, Oak" <oak.zeng@intel.com>
Cc: "intel-xe@lists.freedesktop.org" <intel-xe@lists.freedesktop.org>
Subject: Re: [PATCH v4 05/30] drm/xe: Update xe_vm_rebind to use dummy VMA operations
Date: Fri, 22 Mar 2024 22:51:31 +0000	[thread overview]
Message-ID: <Zf4LcyPsHYCuo9tE@DUT025-TGLU.fm.intel.com> (raw)
In-Reply-To: <SA1PR11MB69912CD0231A3F2D7F5DC94A92312@SA1PR11MB6991.namprd11.prod.outlook.com>

On Fri, Mar 22, 2024 at 03:23:08PM -0600, Zeng, Oak wrote:
> 
> 
> > -----Original Message-----
> > From: Intel-xe <intel-xe-bounces@lists.freedesktop.org> On Behalf Of Matthew
> > Brost
> > Sent: Friday, March 8, 2024 12:08 AM
> > To: intel-xe@lists.freedesktop.org
> > Cc: Brost, Matthew <matthew.brost@intel.com>
> > Subject: [PATCH v4 05/30] drm/xe: Update xe_vm_rebind to use dummy VMA
> > operations
> > 
> > All bind interfaces are transitioning to use VMA ops, update
> > xe_vm_rebind to use VMA ops.
> > 
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> >  drivers/gpu/drm/xe/xe_vm.c       |  47 +++++---
> >  drivers/gpu/drm/xe/xe_vm_types.h | 189 ++++++++++++++++---------------
> >  2 files changed, 132 insertions(+), 104 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > index e342af6b51b1..0bb807c05d7b 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.c
> > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > @@ -755,10 +755,22 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm)
> >  		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
> >  }
> > 
> > -static struct dma_fence *
> > -xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
> > -	       struct xe_sync_entry *syncs, u32 num_syncs,
> > -	       bool first_op, bool last_op);
> > +static void xe_vm_populate_dummy_rebind(struct xe_vm *vm, struct xe_vma
> > *vma)
> > +{
> > +	vm->dummy_ops.op.base.op = DRM_GPUVA_OP_MAP;
> > +	vm->dummy_ops.op.base.map.va.addr = vma->gpuva.va.addr;
> > +	vm->dummy_ops.op.base.map.va.range = vma->gpuva.va.range;
> > +	vm->dummy_ops.op.base.map.gem.obj = vma->gpuva.gem.obj;
> > +	vm->dummy_ops.op.base.map.gem.offset = vma->gpuva.gem.offset;
> > +	vm->dummy_ops.op.map.vma = vma;
> > +	vm->dummy_ops.op.map.immediate = true;
> > +	vm->dummy_ops.op.map.dumpable = vma->gpuva.flags &
> > XE_VMA_DUMPABLE;
> > +	vm->dummy_ops.op.map.is_null = xe_vma_is_null(vma);
> > +}
> > +
> > +static struct dma_fence *ops_execute(struct xe_vm *vm,
> > +				     struct xe_vma_ops *vops,
> > +				     bool cleanup);
> > 
> >  struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
> >  {
> > @@ -780,7 +792,9 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm,
> > bool rebind_worker)
> >  			trace_xe_vma_rebind_worker(vma);
> >  		else
> >  			trace_xe_vma_rebind_exec(vma);
> > -		fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
> > +
> > +		xe_vm_populate_dummy_rebind(vm, vma);
> > +		fence = ops_execute(vm, &vm->dummy_ops.vops, false);
> >  		if (IS_ERR(fence))
> >  			return fence;
> >  	}
> > @@ -1289,6 +1303,11 @@ static void xe_vm_free_scratch(struct xe_vm *vm)
> >  	}
> >  }
> > 
> > +static void xe_vma_ops_init(struct xe_vma_ops *vops)
> > +{
> > +	INIT_LIST_HEAD(&vops->list);
> > +}
> 
> this already showed up on patch 4... you just add it in patch5, then moved it to another location on patch 5...
> 
> can this be better organized?
> 

Yes.

> 
> > +
> >  struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
> >  {
> >  	struct drm_gem_object *vm_resv_obj;
> > @@ -1310,6 +1329,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe,
> > u32 flags)
> >  	init_rwsem(&vm->lock);
> >  	mutex_init(&vm->snap_mutex);
> > 
> > +	xe_vma_ops_init(&vm->dummy_ops.vops);
> > +	INIT_LIST_HEAD(&vm->dummy_ops.op.link);
> > +	list_add(&vm->dummy_ops.op.link, &vm->dummy_ops.vops.list);
> > +
> >  	INIT_LIST_HEAD(&vm->rebind_list);
> > 
> >  	INIT_LIST_HEAD(&vm->userptr.repin_list);
> > @@ -2140,6 +2163,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct
> > xe_bo *bo,
> >  		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
> > 
> >  		if (__op->op == DRM_GPUVA_OP_MAP) {
> > +			op->map.immediate = !xe_vm_in_fault_mode(vm);
> >  			op->map.is_null = flags &
> > DRM_XE_VM_BIND_FLAG_NULL;
> >  			op->map.dumpable = flags &
> > DRM_XE_VM_BIND_FLAG_DUMPABLE;
> >  			op->map.pat_index = pat_index;
> > @@ -2465,7 +2489,7 @@ static struct dma_fence *op_execute(struct xe_vm
> > *vm, struct xe_vma *vma,
> >  {
> >  	struct dma_fence *fence = NULL;
> > 
> > -	lockdep_assert_held_write(&vm->lock);
> > +	lockdep_assert_held(&vm->lock);
> >  	xe_vm_assert_held(vm);
> >  	xe_bo_assert_held(xe_vma_bo(vma));
> > 
> > @@ -2473,7 +2497,7 @@ static struct dma_fence *op_execute(struct xe_vm
> > *vm, struct xe_vma *vma,
> >  	case DRM_GPUVA_OP_MAP:
> >  		fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
> >  				   op->syncs, op->num_syncs,
> > -				   !xe_vm_in_fault_mode(vm),
> > +				   op->map.immediate,
> >  				   op->flags & XE_VMA_OP_FIRST,
> >  				   op->flags & XE_VMA_OP_LAST);
> >  		break;
> > @@ -2554,7 +2578,7 @@ __xe_vma_op_execute(struct xe_vm *vm, struct
> > xe_vma *vma,
> >  retry_userptr:
> >  	fence = op_execute(vm, vma, op);
> >  	if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) {
> > -		lockdep_assert_held_write(&vm->lock);
> > +		lockdep_assert_held(&vm->lock);
> > 
> >  		if (op->base.op == DRM_GPUVA_OP_REMAP) {
> >  			if (!op->remap.unmap_done)
> > @@ -2583,7 +2607,7 @@ xe_vma_op_execute(struct xe_vm *vm, struct
> > xe_vma_op *op)
> >  {
> >  	struct dma_fence *fence = ERR_PTR(-ENOMEM);
> > 
> > -	lockdep_assert_held_write(&vm->lock);
> > +	lockdep_assert_held(&vm->lock);
> > 
> >  	switch (op->base.op) {
> >  	case DRM_GPUVA_OP_MAP:
> > @@ -2992,11 +3016,6 @@ static int vm_bind_ioctl_signal_fences(struct xe_vm
> > *vm,
> >  	return err;
> >  }
> > 
> > -static void xe_vma_ops_init(struct xe_vma_ops *vops)
> > -{
> > -	INIT_LIST_HEAD(&vops->list);
> > -}
> > -
> >  int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> >  {
> >  	struct xe_device *xe = to_xe_device(dev);
> > diff --git a/drivers/gpu/drm/xe/xe_vm_types.h
> > b/drivers/gpu/drm/xe/xe_vm_types.h
> > index cc3dce893f1e..7ef9e632154a 100644
> > --- a/drivers/gpu/drm/xe/xe_vm_types.h
> > +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> > @@ -18,6 +18,7 @@
> >  #include "xe_range_fence.h"
> > 
> >  struct xe_bo;
> > +struct xe_device;
> >  struct xe_sync_entry;
> >  struct xe_user_fence;
> >  struct xe_vm;
> > @@ -124,7 +125,96 @@ struct xe_userptr_vma {
> >  	struct xe_userptr userptr;
> >  };
> > 
> > -struct xe_device;
> > +/** struct xe_vma_op_map - VMA map operation */
> > +struct xe_vma_op_map {
> > +	/** @vma: VMA to map */
> > +	struct xe_vma *vma;
> > +	/** @immediate: Immediate bind */
> > +	bool immediate;
> > +	/** @is_null: is NULL binding */
> > +	bool is_null;
> > +	/** @dumpable: whether BO is dumped on GPU hang */
> > +	bool dumpable;
> > +	/** @pat_index: The pat index to use for this operation. */
> > +	u16 pat_index;
> > +};
> > +
> > +/** struct xe_vma_op_remap - VMA remap operation */
> > +struct xe_vma_op_remap {
> > +	/** @prev: VMA preceding part of a split mapping */
> > +	struct xe_vma *prev;
> > +	/** @next: VMA subsequent part of a split mapping */
> > +	struct xe_vma *next;
> > +	/** @start: start of the VMA unmap */
> > +	u64 start;
> > +	/** @range: range of the VMA unmap */
> > +	u64 range;
> > +	/** @skip_prev: skip prev rebind */
> > +	bool skip_prev;
> > +	/** @skip_next: skip next rebind */
> > +	bool skip_next;
> > +	/** @unmap_done: unmap operation in done */
> > +	bool unmap_done;
> > +};
> > +
> > +/** struct xe_vma_op_prefetch - VMA prefetch operation */
> > +struct xe_vma_op_prefetch {
> > +	/** @region: memory region to prefetch to */
> > +	u32 region;
> > +};
> > +
> > +/** enum xe_vma_op_flags - flags for VMA operation */
> > +enum xe_vma_op_flags {
> > +	/** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
> > +	XE_VMA_OP_FIRST			= BIT(0),
> > +	/** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
> > +	XE_VMA_OP_LAST			= BIT(1),
> > +	/** @XE_VMA_OP_COMMITTED: VMA operation committed */
> > +	XE_VMA_OP_COMMITTED		= BIT(2),
> > +	/** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation
> > committed */
> > +	XE_VMA_OP_PREV_COMMITTED	= BIT(3),
> > +	/** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed
> > */
> > +	XE_VMA_OP_NEXT_COMMITTED	= BIT(4),
> > +};
> > +
> > +/** struct xe_vma_op - VMA operation */
> > +struct xe_vma_op {
> > +	/** @base: GPUVA base operation */
> > +	struct drm_gpuva_op base;
> > +	/**
> > +	 * @ops: GPUVA ops, when set call drm_gpuva_ops_free after this
> > +	 * operations is processed
> > +	 */
> > +	struct drm_gpuva_ops *ops;
> > +	/** @q: exec queue for this operation */
> > +	struct xe_exec_queue *q;
> > +	/**
> > +	 * @syncs: syncs for this operation, only used on first and last
> > +	 * operation
> > +	 */
> > +	struct xe_sync_entry *syncs;
> > +	/** @num_syncs: number of syncs */
> > +	u32 num_syncs;
> > +	/** @link: async operation link */
> > +	struct list_head link;
> > +	/** @flags: operation flags */
> > +	enum xe_vma_op_flags flags;
> > +
> > +	union {
> > +		/** @map: VMA map operation specific data */
> > +		struct xe_vma_op_map map;
> > +		/** @remap: VMA remap operation specific data */
> > +		struct xe_vma_op_remap remap;
> > +		/** @prefetch: VMA prefetch operation specific data */
> > +		struct xe_vma_op_prefetch prefetch;
> > +	};
> > +};
> > +
> > +/** struct xe_vma_ops - VMA operations */
> > +struct xe_vma_ops {
> > +	/** @list: list of VMA operations */
> > +	struct list_head list;
> > +};
> 
> this already showed up on patch 4... you just add it in patch5, then moved it to another location on patch 5...
> 

Yes.

> > 
> >  struct xe_vm {
> >  	/** @gpuvm: base GPUVM used to track VMAs */
> > @@ -267,99 +357,18 @@ struct xe_vm {
> >  		bool capture_once;
> >  	} error_capture;
> > 
> > +	/** @dummy_ops: dummy VMA ops to issue rebinds */
> > +	struct {
> > +		/** @dummy_ops.ops: dummy VMA ops */
> > +		struct xe_vma_ops vops;
> > +		/** @dummy_ops.op: dummy VMA op */
> > +		struct xe_vma_op op;
> > +	} dummy_ops;
> 
> If only from this patch, it seems you don't have to introduce this dummy_ops member to xe_vm. For example, it can be a local variable in xe_vm_rebind function. But I will keep looking. Maybe you made it this way for future patches.
> 
>

I'm going to rewrite or already have rewritten this to use local
xe_vm_ops member and execute all rebinds an atomic unit.

You can ignore this patch and also [1] in this rev of the review.

[1] https://patchwork.freedesktop.org/patch/582015/?series=125608&rev=5

> > +
> >  	/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
> >  	bool batch_invalidate_tlb;
> >  	/** @xef: XE file handle for tracking this VM's drm client */
> >  	struct xe_file *xef;
> >  };
> > 
> > -/** struct xe_vma_op_map - VMA map operation */
> > -struct xe_vma_op_map {
> > -	/** @vma: VMA to map */
> > -	struct xe_vma *vma;
> > -	/** @is_null: is NULL binding */
> > -	bool is_null;
> > -	/** @dumpable: whether BO is dumped on GPU hang */
> > -	bool dumpable;
> > -	/** @pat_index: The pat index to use for this operation. */
> > -	u16 pat_index;
> > -};
> > -
> > -/** struct xe_vma_op_remap - VMA remap operation */
> > -struct xe_vma_op_remap {
> > -	/** @prev: VMA preceding part of a split mapping */
> > -	struct xe_vma *prev;
> > -	/** @next: VMA subsequent part of a split mapping */
> > -	struct xe_vma *next;
> > -	/** @start: start of the VMA unmap */
> > -	u64 start;
> > -	/** @range: range of the VMA unmap */
> > -	u64 range;
> > -	/** @skip_prev: skip prev rebind */
> > -	bool skip_prev;
> > -	/** @skip_next: skip next rebind */
> > -	bool skip_next;
> > -	/** @unmap_done: unmap operation in done */
> > -	bool unmap_done;
> > -};
> > -
> > -/** struct xe_vma_op_prefetch - VMA prefetch operation */
> > -struct xe_vma_op_prefetch {
> > -	/** @region: memory region to prefetch to */
> > -	u32 region;
> > -};
> > -
> > -/** enum xe_vma_op_flags - flags for VMA operation */
> > -enum xe_vma_op_flags {
> > -	/** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
> > -	XE_VMA_OP_FIRST			= BIT(0),
> > -	/** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
> > -	XE_VMA_OP_LAST			= BIT(1),
> > -	/** @XE_VMA_OP_COMMITTED: VMA operation committed */
> > -	XE_VMA_OP_COMMITTED		= BIT(2),
> > -	/** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation
> > committed */
> > -	XE_VMA_OP_PREV_COMMITTED	= BIT(3),
> > -	/** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed
> > */
> > -	XE_VMA_OP_NEXT_COMMITTED	= BIT(4),
> > -};
> > -
> > -/** struct xe_vma_op - VMA operation */
> > -struct xe_vma_op {
> > -	/** @base: GPUVA base operation */
> > -	struct drm_gpuva_op base;
> > -	/**
> > -	 * @ops: GPUVA ops, when set call drm_gpuva_ops_free after this
> > -	 * operations is processed
> > -	 */
> > -	struct drm_gpuva_ops *ops;
> > -	/** @q: exec queue for this operation */
> > -	struct xe_exec_queue *q;
> > -	/**
> > -	 * @syncs: syncs for this operation, only used on first and last
> > -	 * operation
> > -	 */
> > -	struct xe_sync_entry *syncs;
> > -	/** @num_syncs: number of syncs */
> > -	u32 num_syncs;
> > -	/** @link: async operation link */
> > -	struct list_head link;
> > -	/** @flags: operation flags */
> > -	enum xe_vma_op_flags flags;
> > -
> > -	union {
> > -		/** @map: VMA map operation specific data */
> > -		struct xe_vma_op_map map;
> > -		/** @remap: VMA remap operation specific data */
> > -		struct xe_vma_op_remap remap;
> > -		/** @prefetch: VMA prefetch operation specific data */
> > -		struct xe_vma_op_prefetch prefetch;
> > -	};
> > -};
> > -
> > -/** struct xe_vma_ops - VMA operations */
> > -struct xe_vma_ops {
> > -	/** @list: list of VMA operations */
> > -	struct list_head list;
> > -};
> 
> It seems you moved a block of codes to another location. It caused more work for code review. Better to avoid this if we can.
> 

See above, with my refactor dummy binds and this moving this is not
required.

Matt

> Oak
> 
> > -
> >  #endif
> > --
> > 2.34.1
> 

  reply	other threads:[~2024-03-22 22:52 UTC|newest]

Thread overview: 76+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-08  5:07 [PATCH v4 00/30] Refactor VM bind code Matthew Brost
2024-03-08  5:07 ` [PATCH v4 01/30] drm/xe: Lock all gpuva ops during VM bind IOCTL Matthew Brost
2024-03-10 17:44   ` Zeng, Oak
2024-03-11 19:48     ` Matthew Brost
2024-03-11 22:02       ` Zeng, Oak
2024-03-12  1:29         ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 02/30] drm/xe: Add ops_execute function which returns a fence Matthew Brost
2024-03-22 16:11   ` Zeng, Oak
2024-03-22 17:31     ` Matthew Brost
2024-03-22 19:39       ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 03/30] drm/xe: Move migrate to prefetch to op_lock function Matthew Brost
2024-03-22 17:06   ` Zeng, Oak
2024-03-22 17:36     ` Matthew Brost
2024-03-22 19:45       ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 04/30] drm/xe: Add struct xe_vma_ops abstraction Matthew Brost
2024-03-22 17:13   ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 05/30] drm/xe: Update xe_vm_rebind to use dummy VMA operations Matthew Brost
2024-03-22 21:23   ` Zeng, Oak
2024-03-22 22:51     ` Matthew Brost [this message]
2024-03-08  5:07 ` [PATCH v4 06/30] drm/xe: Simplify VM bind IOCTL error handling and cleanup Matthew Brost
2024-03-25 16:03   ` Zeng, Oak
2024-03-26 18:46     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 07/30] drm/xe: Update pagefaults to use dummy VMA operations Matthew Brost
2024-03-08  5:07 ` [PATCH v4 08/30] drm/xe: s/xe_tile_migrate_engine/xe_tile_migrate_exec_queue Matthew Brost
2024-03-25 16:05   ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 09/30] drm/xe: Add some members to xe_vma_ops Matthew Brost
2024-03-25 16:10   ` Zeng, Oak
2024-03-26 18:47     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 10/30] drm/xe: Add vm_bind_ioctl_ops_install_fences helper Matthew Brost
2024-03-25 16:51   ` Zeng, Oak
2024-03-25 19:34     ` Matthew Brost
2024-03-25 19:44       ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 11/30] drm/xe: Move setting last fence to vm_bind_ioctl_ops_install_fences Matthew Brost
2024-03-25 17:02   ` Zeng, Oak
2024-03-25 19:35     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 12/30] drm/xe: Move ufence check to op_lock Matthew Brost
2024-03-25 20:37   ` Zeng, Oak
2024-03-26 18:49     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 13/30] drm/xe: Move ufence add to vm_bind_ioctl_ops_install_fences Matthew Brost
2024-03-25 20:54   ` Zeng, Oak
2024-03-26 18:54     ` Matthew Brost
2024-03-26 20:59       ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 14/30] drm/xe: Add xe_gt_tlb_invalidation_range and convert PT layer to use this Matthew Brost
2024-03-25 21:35   ` Zeng, Oak
2024-03-26 18:57     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 15/30] drm/xe: Add xe_vm_pgtable_update_op to xe_vma_ops Matthew Brost
2024-03-25 21:58   ` Zeng, Oak
2024-03-26 19:05     ` Matthew Brost
2024-03-27  1:29       ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 16/30] drm/xe: Use ordered WQ for TLB invalidation fences Matthew Brost
2024-03-25 22:30   ` Zeng, Oak
2024-03-26 19:10     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 17/30] drm/xe: Delete PT update selftest Matthew Brost
2024-03-25 22:31   ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 18/30] drm/xe: Convert multiple bind ops into single job Matthew Brost
2024-03-27  2:40   ` Zeng, Oak
2024-03-27 19:26     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 19/30] drm/xe: Remove old functions defs in xe_pt.h Matthew Brost
2024-03-08  5:07 ` [PATCH v4 20/30] drm/xe: Update PT layer with better error handling Matthew Brost
2024-03-08  5:07 ` [PATCH v4 21/30] drm/xe: Update xe_vm_rebind to return int Matthew Brost
2024-03-08  5:07 ` [PATCH v4 22/30] drm/xe: Move vma rebinding to the drm_exec locking loop Matthew Brost
2024-03-08  5:07 ` [PATCH v4 23/30] drm/xe: Update VM trace events Matthew Brost
2024-03-08  5:08 ` [PATCH v4 24/30] drm/xe: Update clear / populate arguments Matthew Brost
2024-03-08  5:08 ` [PATCH v4 25/30] drm/xe: Add __xe_migrate_update_pgtables_cpu helper Matthew Brost
2024-03-08  5:08 ` [PATCH v4 26/30] drm/xe: CPU binds for jobs Matthew Brost
2024-03-08  5:08 ` [PATCH v4 27/30] drm/xe: Don't use migrate exec queue for page fault binds Matthew Brost
2024-03-08  5:08 ` [PATCH v4 28/30] drm/xe: Add VM bind IOCTL error injection Matthew Brost
2024-03-08  5:08 ` [PATCH v4 29/30] drm/xe/guc: Assert time'd out jobs are not from a VM exec queue Matthew Brost
2024-03-08  5:08 ` [PATCH v4 30/30] drm/xe: Add PT exec queues Matthew Brost
2024-03-08  5:42 ` ✓ CI.Patch_applied: success for Refactor VM bind code (rev5) Patchwork
2024-03-08  5:43 ` ✗ CI.checkpatch: warning " Patchwork
2024-03-08  5:44 ` ✓ CI.KUnit: success " Patchwork
2024-03-08  5:55 ` ✓ CI.Build: " Patchwork
2024-03-08  5:55 ` ✗ CI.Hooks: failure " Patchwork
2024-03-08  5:56 ` ✓ CI.checksparse: success " Patchwork
2024-03-08  6:26 ` ✗ CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Zf4LcyPsHYCuo9tE@DUT025-TGLU.fm.intel.com \
    --to=matthew.brost@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=oak.zeng@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.