All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: <intel-xe@lists.freedesktop.org>
Cc: <thomas.hellstrom@linux.intel.com>,
	Matthew Brost <matthew.brost@intel.com>
Subject: [PATCH v3 07/22] drm/xe: Update pagefaults to use dummy VMA operations
Date: Tue,  6 Feb 2024 15:37:14 -0800	[thread overview]
Message-ID: <20240206233729.3173206-8-matthew.brost@intel.com> (raw)
In-Reply-To: <20240206233729.3173206-1-matthew.brost@intel.com>

All bind interfaces are transitioning to use VMA ops, update
pagefaults to use VMA ops.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/xe/xe_gt_pagefault.c |  7 ++--
 drivers/gpu/drm/xe/xe_vm.c           | 54 +++++++++++++++++++---------
 drivers/gpu/drm/xe/xe_vm.h           |  3 ++
 drivers/gpu/drm/xe/xe_vm_types.h     |  2 ++
 4 files changed, 47 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index c26e4fcca01e..4fd81a553bd0 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -19,7 +19,6 @@
 #include "xe_guc.h"
 #include "xe_guc_ct.h"
 #include "xe_migrate.h"
-#include "xe_pt.h"
 #include "xe_trace.h"
 #include "xe_vm.h"
 
@@ -207,8 +206,10 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
 
 	/* Bind VMA only to the GT that has faulted */
 	trace_xe_vma_pf_bind(vma);
-	fence = __xe_pt_bind_vma(tile, vma, xe_tile_migrate_engine(tile), NULL, 0,
-				 vma->tile_present & BIT(tile->id));
+	xe_vm_populate_dummy_rebind(vm, vma);
+	vm->dummy_ops.op.tile_mask = BIT(tile->id);
+	vm->dummy_ops.op.q = xe_tile_migrate_engine(tile);
+	fence = xe_vm_ops_execute(vm, &vm->dummy_ops.vops);
 	if (IS_ERR(fence)) {
 		ret = PTR_ERR(fence);
 		goto unlock_dma_resv;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 50c85da430c6..ebe822855233 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -753,22 +753,27 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm)
 		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
 }
 
-static void xe_vm_populate_dummy_rebind(struct xe_vm *vm, struct xe_vma *vma)
+/**
+ * xe_vm_populate_dummy_rebind() - Populate dummy rebind VMA ops
+ * @vm: The VM.
+ * @vma: VMA to populate dummy VMA ops
+ *
+ * Populate dummy VMA ops which can be used to issue a rebind for the VMA
+ */
+void xe_vm_populate_dummy_rebind(struct xe_vm *vm, struct xe_vma *vma)
 {
 	vm->dummy_ops.op.base.op = DRM_GPUVA_OP_MAP;
 	vm->dummy_ops.op.base.map.va.addr = vma->gpuva.va.addr;
 	vm->dummy_ops.op.base.map.va.range = vma->gpuva.va.range;
 	vm->dummy_ops.op.base.map.gem.obj = vma->gpuva.gem.obj;
 	vm->dummy_ops.op.base.map.gem.offset = vma->gpuva.gem.offset;
+	vm->dummy_ops.op.tile_mask = vma->tile_mask;
 	vm->dummy_ops.op.map.vma = vma;
 	vm->dummy_ops.op.map.immediate = true;
 	vm->dummy_ops.op.map.read_only = xe_vma_read_only(vma);
 	vm->dummy_ops.op.map.is_null = xe_vma_is_null(vma);
 }
 
-static struct dma_fence *ops_execute(struct xe_vm *vm,
-				     struct xe_vma_ops *vops);
-
 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
 {
 	struct dma_fence *fence = NULL;
@@ -791,7 +796,7 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
 			trace_xe_vma_rebind_exec(vma);
 
 		xe_vm_populate_dummy_rebind(vm, vma);
-		fence = ops_execute(vm, &vm->dummy_ops.vops);
+		fence = xe_vm_ops_execute(vm, &vm->dummy_ops.vops);
 		if (IS_ERR(fence))
 			return fence;
 	}
@@ -1688,7 +1693,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
 static struct dma_fence *
 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
 	       struct xe_sync_entry *syncs, u32 num_syncs,
-	       bool first_op, bool last_op)
+	       u8 tile_mask, bool first_op, bool last_op)
 {
 	struct xe_tile *tile;
 	struct dma_fence *fence;
@@ -1710,7 +1715,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
 	}
 
 	for_each_tile(tile, vm->xe, id) {
-		if (!(vma->tile_mask & BIT(id)))
+		if (!(tile_mask & BIT(id)))
 			goto next;
 
 		fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
@@ -1763,7 +1768,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
 static struct dma_fence *
 xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
 	   struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs,
-	   bool immediate, bool first_op, bool last_op)
+	   u8 tile_mask, bool immediate, bool first_op, bool last_op)
 {
 	struct dma_fence *fence;
 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
@@ -1772,8 +1777,8 @@ xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
 	xe_bo_assert_held(bo);
 
 	if (immediate) {
-		fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
-				       last_op);
+		fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
+				       first_op, last_op);
 		if (IS_ERR(fence))
 			return fence;
 	} else {
@@ -1965,7 +1970,7 @@ xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
 
 	if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
 		return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
-				  true, first_op, last_op);
+				  vma->tile_mask, true, first_op, last_op);
 	} else {
 		struct dma_fence *fence =
 			xe_exec_queue_last_fence_get(wait_exec_queue, vm);
@@ -2270,10 +2275,15 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
 {
 	struct xe_vma_op *last_op = NULL;
 	struct drm_gpuva_op *__op;
+	struct xe_tile *tile;
+	u8 id, tile_mask = 0;
 	int err = 0;
 
 	lockdep_assert_held_write(&vm->lock);
 
+	for_each_tile(tile, vm->xe, id)
+		tile_mask |= 0x1 << id;
+
 	drm_gpuva_for_each_op(__op, ops) {
 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
 		struct xe_vma *vma;
@@ -2290,6 +2300,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
 		}
 
 		op->q = q;
+		op->tile_mask = tile_mask;
 
 		switch (op->base.op) {
 		case DRM_GPUVA_OP_MAP:
@@ -2418,10 +2429,12 @@ static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
 
 	switch (op->base.op) {
 	case DRM_GPUVA_OP_MAP:
+		/* FIXME: Override vma->tile_mask for page faults */
 		fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
 				   op->syncs, op->num_syncs,
 				   op->map.immediate ||
 				   !xe_vm_in_fault_mode(vm),
+				   op->tile_mask,
 				   op->flags & XE_VMA_OP_FIRST,
 				   op->flags & XE_VMA_OP_LAST);
 		break;
@@ -2448,7 +2461,8 @@ static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
 			dma_fence_put(fence);
 			fence = xe_vm_bind(vm, op->remap.prev, op->q,
 					   xe_vma_bo(op->remap.prev), op->syncs,
-					   op->num_syncs, true, false,
+					   op->num_syncs, op->remap.prev->tile_mask,
+					   true, false,
 					   op->flags & XE_VMA_OP_LAST && !next);
 			op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
 			if (IS_ERR(fence))
@@ -2462,7 +2476,7 @@ static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
 			fence = xe_vm_bind(vm, op->remap.next, op->q,
 					   xe_vma_bo(op->remap.next),
 					   op->syncs, op->num_syncs,
-					   true, false,
+					   op->remap.next->tile_mask, true, false,
 					   op->flags & XE_VMA_OP_LAST);
 			op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
 			if (IS_ERR(fence))
@@ -2721,8 +2735,16 @@ static int vm_bind_ioctl_ops_lock(struct drm_exec *exec,
 	return 0;
 }
 
-static struct dma_fence *ops_execute(struct xe_vm *vm,
-				     struct xe_vma_ops *vops)
+/**
+ * xe_vm_ops_execute() - Execute VMA ops
+ * @vm: The VM.
+ * @vops: VMA ops to execute
+ *
+ * Execute VMA ops binding / unbinding VMAs
+ *
+ * Return: A fence for VMA ops on success, ERR_PTR on failure
+ */
+struct dma_fence *xe_vm_ops_execute(struct xe_vm *vm, struct xe_vma_ops *vops)
 {
 	struct xe_vma_op *op, *next;
 	struct dma_fence *fence = NULL;
@@ -2758,7 +2780,7 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
 		if (err)
 			goto unlock;
 
-		fence = ops_execute(vm, vops);
+		fence = xe_vm_ops_execute(vm, vops);
 		if (IS_ERR(fence)) {
 			err = PTR_ERR(fence);
 			/* FIXME: Killing VM rather than proper error handling */
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index df4a82e960ff..1d9a3b13aecc 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -262,6 +262,9 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
  */
 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
 
+void xe_vm_populate_dummy_rebind(struct xe_vm *vm, struct xe_vma *vma);
+struct dma_fence *xe_vm_ops_execute(struct xe_vm *vm, struct xe_vma_ops *vops);
+
 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
 #define vm_dbg drm_dbg
 #else
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 5020e0571108..6655a0645a18 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -184,6 +184,8 @@ struct xe_vma_op {
 	struct list_head link;
 	/** @flags: operation flags */
 	enum xe_vma_op_flags flags;
+	/** @tile_mask: Tile mask for operation */
+	u8 tile_mask;
 
 	union {
 		/** @map: VMA map operation specific data */
-- 
2.34.1


  parent reply	other threads:[~2024-02-06 23:37 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-06 23:37 [PATCH v3 00/22] Refactor VM bind code Matthew Brost
2024-02-06 23:37 ` [PATCH v3 01/22] drm/xe: Lock all gpuva ops during VM bind IOCTL Matthew Brost
2024-02-06 23:37 ` [PATCH v3 02/22] drm/xe: Add ops_execute function which returns a fence Matthew Brost
2024-02-06 23:37 ` [PATCH v3 03/22] drm/xe: Move migrate to prefetch to op_lock funtion Matthew Brost
2024-02-06 23:37 ` [PATCH v3 04/22] drm/xe: Add struct xe_vma_ops abstraction Matthew Brost
2024-02-06 23:37 ` [PATCH v3 05/22] drm/xe: Update xe_vm_rebind to use dummy VMA operations Matthew Brost
2024-02-06 23:37 ` [PATCH v3 06/22] drm/xe: Simplify VM bind IOCTL error handling and cleanup Matthew Brost
2024-02-06 23:37 ` Matthew Brost [this message]
2024-02-06 23:37 ` [PATCH v3 08/22] drm/xe: s/xe_tile_migrate_engine/xe_tile_migrate_exec_queue Matthew Brost
2024-02-06 23:37 ` [PATCH v3 09/22] drm/xe: Add vm_bind_ioctl_ops_install_fences helper Matthew Brost
2024-02-06 23:37 ` [PATCH v3 10/22] drm/xe: Move setting last fence to vm_bind_ioctl_ops_install_fences Matthew Brost
2024-02-06 23:37 ` [PATCH v3 11/22] drm/xe: Add xe_gt_tlb_invalidation_range and convert PT layer to use this Matthew Brost
2024-02-06 23:37 ` [PATCH v3 12/22] drm/xe: Add some members to xe_vma_ops Matthew Brost
2024-02-06 23:37 ` [PATCH v3 13/22] drm/xe: Add xe_vm_pgtable_update_op " Matthew Brost
2024-02-06 23:37 ` [PATCH v3 14/22] drm/xe: Convert multiple bind ops into single job Matthew Brost
2024-02-06 23:37 ` [PATCH v3 15/22] drm/xe: Remove old functions defs in xe_pt.h Matthew Brost
2024-02-06 23:37 ` [PATCH v3 16/22] drm/xe: Update PT layer with better error handling Matthew Brost
2024-02-06 23:37 ` [PATCH v3 17/22] drm/xe: Update VM trace events Matthew Brost
2024-02-06 23:37 ` [PATCH v3 18/22] drm/xe: Update clear / populate arguments Matthew Brost
2024-02-06 23:37 ` [PATCH v3 19/22] drm/xe: Add __xe_migrate_update_pgtables_cpu helper Matthew Brost
2024-02-06 23:37 ` [PATCH v3 20/22] drm/xe: CPU binds for jobs Matthew Brost
2024-03-28 11:28   ` Thomas Hellström
2024-02-06 23:37 ` [PATCH v3 21/22] drm/xe: Don't use migrate exec queue for page fault binds Matthew Brost
2024-02-06 23:37 ` [PATCH v3 22/22] drm/xe: Add VM bind IOCTL error injection Matthew Brost
2024-02-07  0:12 ` ✗ CI.Patch_applied: failure for Refactor VM bind code (rev3) Patchwork
2024-02-07 22:11 ` ✓ CI.Patch_applied: success for Refactor VM bind code (rev4) Patchwork
2024-02-07 22:12 ` ✗ CI.checkpatch: warning " Patchwork
2024-02-07 22:13 ` ✗ CI.KUnit: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240206233729.3173206-8-matthew.brost@intel.com \
    --to=matthew.brost@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.