All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-xe] [PATCH] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs
@ 2023-03-16 18:29 Lucas De Marchi
  2023-03-16 18:32 ` [Intel-xe] ✓ CI.Patch_applied: success for " Patchwork
                   ` (2 more replies)
  0 siblings, 3 replies; 21+ messages in thread
From: Lucas De Marchi @ 2023-03-16 18:29 UTC (permalink / raw)
  To: intel-xe
  Cc: Lucas De Marchi, thomas.hellstrom, mauro.chehab, maarten.lankhorst

Introduced with the 6.2 rebase due to
commit 000458b5966f ("drm: Only select I2C_ALGOBIT for drivers that
actually need it"). Make a similar selection when CONFIG_DRM_XE_DISPLAY
is enabled. Also, provide this as a fixup-only commit, to be squashed in
the next rebase. With this, the following command works again:

	./tools/testing/kunit/kunit.py build \
		--kunitconfig drivers/gpu/drm/xe/.kunitconfig

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
---
 drivers/gpu/drm/xe/Kconfig | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 4684e99549d3..aeaf3ce19c4f 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -44,6 +44,8 @@ config DRM_XE
 config DRM_XE_DISPLAY
 	bool "Enable display support"
 	depends on DRM_XE && EXPERT
+	select I2C
+	select I2C_ALGOBIT
 	default y
 	help
 	  Disable this option only if you want to compile out display support.
-- 
2.39.0


^ permalink raw reply related	[flat|nested] 21+ messages in thread
* [Intel-xe] [PATCH 2/2] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs
@ 2023-05-01 19:03 Rodrigo Vivi
  2023-05-01 19:37 ` [Intel-xe] [PATCH] " Rodrigo Vivi
  0 siblings, 1 reply; 21+ messages in thread
From: Rodrigo Vivi @ 2023-05-01 19:03 UTC (permalink / raw)
  To: intel-xe; +Cc: Rodrigo Vivi

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
---
 drivers/gpu/drm/xe/Makefile      |   1 +
 drivers/gpu/drm/xe/xe_pt.c       | 110 +++++++++++++++----------------
 drivers/gpu/drm/xe/xe_pt_types.h |   4 +-
 3 files changed, 56 insertions(+), 59 deletions(-)

diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 74a84080f242..b84e191ba14f 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -73,6 +73,7 @@ xe-y += xe_bb.o \
 	xe_pm.o \
 	xe_preempt_fence.o \
 	xe_pt.o \
+	xe_pt_walk.o \
 	xe_query.o \
 	xe_reg_sr.o \
 	xe_reg_whitelist.o \
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 4ee5ea2cabc9..f15282996c3b 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -5,14 +5,13 @@
 
 #include "xe_pt.h"
 
-#include <drm/drm_pt_walk.h>
-
 #include "xe_bo.h"
 #include "xe_device.h"
 #include "xe_gt.h"
 #include "xe_gt_tlb_invalidation.h"
 #include "xe_migrate.h"
 #include "xe_pt_types.h"
+#include "xe_pt_walk.h"
 #include "xe_res_cursor.h"
 #include "xe_trace.h"
 #include "xe_ttm_stolen_mgr.h"
@@ -20,8 +19,8 @@
 
 struct xe_pt_dir {
 	struct xe_pt pt;
-	/** @dir: Directory structure for the drm_pt_walk functionality */
-	struct drm_pt_dir dir;
+	/** @dir: Directory structure for the xe_pt_walk functionality */
+	struct xe_ptw_dir dir;
 };
 
 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
@@ -44,7 +43,7 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
 
 static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
 {
-	return container_of(pt_dir->dir.entries[index], struct xe_pt, drm);
+	return container_of(pt_dir->dir.entries[index], struct xe_pt, base);
 }
 
 /**
@@ -211,7 +210,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_gt *gt,
 	int err;
 
 	size = !level ?  sizeof(struct xe_pt) : sizeof(struct xe_pt_dir) +
-		XE_PDES * sizeof(struct drm_pt *);
+		XE_PDES * sizeof(struct xe_ptw *);
 	pt = kzalloc(size, GFP_KERNEL);
 	if (!pt)
 		return ERR_PTR(-ENOMEM);
@@ -227,7 +226,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_gt *gt,
 	}
 	pt->bo = bo;
 	pt->level = level;
-	pt->drm.dir = level ? &as_xe_pt_dir(pt)->dir : NULL;
+	pt->base.dir = level ? &as_xe_pt_dir(pt)->dir : NULL;
 
 	XE_BUG_ON(level > XE_VM_MAX_LEVEL);
 
@@ -404,8 +403,8 @@ struct xe_pt_update {
 };
 
 struct xe_pt_stage_bind_walk {
-	/** drm: The base class. */
-	struct drm_pt_walk drm;
+	/** base: The base class. */
+	struct xe_pt_walk base;
 
 	/* Input parameters for the walk */
 	/** @vm: The vm we're building for. */
@@ -532,7 +531,7 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
 		struct iosys_map *map = &parent->bo->vmap;
 
 		if (unlikely(xe_child))
-			parent->drm.dir->entries[offset] = &xe_child->drm;
+			parent->base.dir->entries[offset] = &xe_child->base;
 
 		xe_pt_write(xe_walk->vm->xe, map, offset, pte);
 		parent->num_live++;
@@ -556,7 +555,7 @@ static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level,
 	u64 size, dma;
 
 	/* Does the virtual range requested cover a huge pte? */
-	if (!drm_pt_covers(addr, next, level, &xe_walk->drm))
+	if (!xe_pt_covers(addr, next, level, &xe_walk->base))
 		return false;
 
 	/* Does the DMA segment cover the whole pte? */
@@ -618,15 +617,15 @@ xe_pt_is_pte_ps64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk)
 }
 
 static int
-xe_pt_stage_bind_entry(struct drm_pt *parent, pgoff_t offset,
+xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
 		       unsigned int level, u64 addr, u64 next,
-		       struct drm_pt **child,
+		       struct xe_ptw **child,
 		       enum page_walk_action *action,
-		       struct drm_pt_walk *walk)
+		       struct xe_pt_walk *walk)
 {
 	struct xe_pt_stage_bind_walk *xe_walk =
-		container_of(walk, typeof(*xe_walk), drm);
-	struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), drm);
+		container_of(walk, typeof(*xe_walk), base);
+	struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base);
 	struct xe_pt *xe_child;
 	bool covers;
 	int ret = 0;
@@ -675,7 +674,7 @@ xe_pt_stage_bind_entry(struct drm_pt *parent, pgoff_t offset,
 		xe_walk->l0_end_addr = next;
 	}
 
-	covers = drm_pt_covers(addr, next, level, &xe_walk->drm);
+	covers = xe_pt_covers(addr, next, level, &xe_walk->base);
 	if (covers || !*child) {
 		u64 flags = 0;
 
@@ -689,7 +688,7 @@ xe_pt_stage_bind_entry(struct drm_pt *parent, pgoff_t offset,
 		if (!covers)
 			xe_pt_populate_empty(xe_walk->gt, xe_walk->vm, xe_child);
 
-		*child = &xe_child->drm;
+		*child = &xe_child->base;
 
 		/*
 		 * Prefer the compact pagetable layout for L0 if possible.
@@ -712,7 +711,7 @@ xe_pt_stage_bind_entry(struct drm_pt *parent, pgoff_t offset,
 	return ret;
 }
 
-static const struct drm_pt_walk_ops xe_pt_stage_bind_ops = {
+static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
 	.pt_entry = xe_pt_stage_bind_entry,
 };
 
@@ -742,7 +741,7 @@ xe_pt_stage_bind(struct xe_gt *gt, struct xe_vma *vma,
 	bool is_vram = !xe_vma_is_userptr(vma) && bo && xe_bo_is_vram(bo);
 	struct xe_res_cursor curs;
 	struct xe_pt_stage_bind_walk xe_walk = {
-		.drm = {
+		.base = {
 			.ops = &xe_pt_stage_bind_ops,
 			.shifts = xe_normal_pt_shifts,
 			.max_level = XE_PT_HIGHEST_LEVEL,
@@ -787,8 +786,8 @@ xe_pt_stage_bind(struct xe_gt *gt, struct xe_vma *vma,
 		xe_res_first_sg(xe_bo_get_sg(bo), vma->bo_offset,
 				vma->end - vma->start + 1, &curs);
 
-	ret = drm_pt_walk_range(&pt->drm, pt->level, vma->start, vma->end + 1,
-				&xe_walk.drm);
+	ret = xe_pt_walk_range(&pt->base, pt->level, vma->start, vma->end + 1,
+				&xe_walk.base);
 
 	*num_entries = xe_walk.wupd.num_used_entries;
 	return ret;
@@ -814,20 +813,17 @@ xe_pt_stage_bind(struct xe_gt *gt, struct xe_vma *vma,
  * be shared page tables also at lower levels, so it adjusts the walk action
  * accordingly.
  *
- * Note that the function is not device-specific so could be made a drm
- * pagewalk helper.
- *
  * Return: true if there were non-shared entries, false otherwise.
  */
 static bool xe_pt_nonshared_offsets(u64 addr, u64 end, unsigned int level,
-				    struct drm_pt_walk *walk,
+				    struct xe_pt_walk *walk,
 				    enum page_walk_action *action,
 				    pgoff_t *offset, pgoff_t *end_offset)
 {
 	u64 size = 1ull << walk->shifts[level];
 
-	*offset = drm_pt_offset(addr, level, walk);
-	*end_offset = drm_pt_num_entries(addr, end, level, walk) + *offset;
+	*offset = xe_pt_offset(addr, level, walk);
+	*end_offset = xe_pt_num_entries(addr, end, level, walk) + *offset;
 
 	if (!level)
 		return true;
@@ -851,8 +847,8 @@ static bool xe_pt_nonshared_offsets(u64 addr, u64 end, unsigned int level,
 }
 
 struct xe_pt_zap_ptes_walk {
-	/** @drm: The walk base-class */
-	struct drm_pt_walk drm;
+	/** @base: The walk base-class */
+	struct xe_pt_walk base;
 
 	/* Input parameters for the walk */
 	/** @gt: The gt we're building for */
@@ -863,15 +859,15 @@ struct xe_pt_zap_ptes_walk {
 	bool needs_invalidate;
 };
 
-static int xe_pt_zap_ptes_entry(struct drm_pt *parent, pgoff_t offset,
+static int xe_pt_zap_ptes_entry(struct xe_ptw *parent, pgoff_t offset,
 				unsigned int level, u64 addr, u64 next,
-				struct drm_pt **child,
+				struct xe_ptw **child,
 				enum page_walk_action *action,
-				struct drm_pt_walk *walk)
+				struct xe_pt_walk *walk)
 {
 	struct xe_pt_zap_ptes_walk *xe_walk =
-		container_of(walk, typeof(*xe_walk), drm);
-	struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), drm);
+		container_of(walk, typeof(*xe_walk), base);
+	struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
 	pgoff_t end_offset;
 
 	XE_BUG_ON(!*child);
@@ -893,7 +889,7 @@ static int xe_pt_zap_ptes_entry(struct drm_pt *parent, pgoff_t offset,
 	return 0;
 }
 
-static const struct drm_pt_walk_ops xe_pt_zap_ptes_ops = {
+static const struct xe_pt_walk_ops xe_pt_zap_ptes_ops = {
 	.pt_entry = xe_pt_zap_ptes_entry,
 };
 
@@ -916,7 +912,7 @@ static const struct drm_pt_walk_ops xe_pt_zap_ptes_ops = {
 bool xe_pt_zap_ptes(struct xe_gt *gt, struct xe_vma *vma)
 {
 	struct xe_pt_zap_ptes_walk xe_walk = {
-		.drm = {
+		.base = {
 			.ops = &xe_pt_zap_ptes_ops,
 			.shifts = xe_normal_pt_shifts,
 			.max_level = XE_PT_HIGHEST_LEVEL,
@@ -928,8 +924,8 @@ bool xe_pt_zap_ptes(struct xe_gt *gt, struct xe_vma *vma)
 	if (!(vma->gt_present & BIT(gt->info.id)))
 		return false;
 
-	(void)drm_pt_walk_shared(&pt->drm, pt->level, vma->start, vma->end + 1,
-				 &xe_walk.drm);
+	(void)xe_pt_walk_shared(&pt->base, pt->level, vma->start, vma->end + 1,
+				 &xe_walk.base);
 
 	return xe_walk.needs_invalidate;
 }
@@ -1015,7 +1011,7 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
 				xe_pt_destroy(xe_pt_entry(pt_dir, j_),
 					      vma->vm->flags, deferred);
 
-			pt_dir->dir.entries[j_] = &newpte->drm;
+			pt_dir->dir.entries[j_] = &newpte->base;
 		}
 		kfree(entries[i].pt_entries);
 	}
@@ -1375,8 +1371,8 @@ __xe_pt_bind_vma(struct xe_gt *gt, struct xe_vma *vma, struct xe_engine *e,
 }
 
 struct xe_pt_stage_unbind_walk {
-	/** @drm: The pagewalk base-class. */
-	struct drm_pt_walk drm;
+	/** @base: The pagewalk base-class. */
+	struct xe_pt_walk base;
 
 	/* Input parameters for the walk */
 	/** @gt: The gt we're unbinding from. */
@@ -1404,10 +1400,10 @@ struct xe_pt_stage_unbind_walk {
 static bool xe_pt_check_kill(u64 addr, u64 next, unsigned int level,
 			     const struct xe_pt *child,
 			     enum page_walk_action *action,
-			     struct drm_pt_walk *walk)
+			     struct xe_pt_walk *walk)
 {
 	struct xe_pt_stage_unbind_walk *xe_walk =
-		container_of(walk, typeof(*xe_walk), drm);
+		container_of(walk, typeof(*xe_walk), base);
 	unsigned int shift = walk->shifts[level];
 	u64 size = 1ull << shift;
 
@@ -1428,13 +1424,13 @@ static bool xe_pt_check_kill(u64 addr, u64 next, unsigned int level,
 	return false;
 }
 
-static int xe_pt_stage_unbind_entry(struct drm_pt *parent, pgoff_t offset,
+static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset,
 				    unsigned int level, u64 addr, u64 next,
-				    struct drm_pt **child,
+				    struct xe_ptw **child,
 				    enum page_walk_action *action,
-				    struct drm_pt_walk *walk)
+				    struct xe_pt_walk *walk)
 {
-	struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), drm);
+	struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
 
 	XE_BUG_ON(!*child);
 	XE_BUG_ON(!level && xe_child->is_compact);
@@ -1445,15 +1441,15 @@ static int xe_pt_stage_unbind_entry(struct drm_pt *parent, pgoff_t offset,
 }
 
 static int
-xe_pt_stage_unbind_post_descend(struct drm_pt *parent, pgoff_t offset,
+xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
 				unsigned int level, u64 addr, u64 next,
-				struct drm_pt **child,
+				struct xe_ptw **child,
 				enum page_walk_action *action,
-				struct drm_pt_walk *walk)
+				struct xe_pt_walk *walk)
 {
 	struct xe_pt_stage_unbind_walk *xe_walk =
-		container_of(walk, typeof(*xe_walk), drm);
-	struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), drm);
+		container_of(walk, typeof(*xe_walk), base);
+	struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
 	pgoff_t end_offset;
 	u64 size = 1ull << walk->shifts[--level];
 
@@ -1477,7 +1473,7 @@ xe_pt_stage_unbind_post_descend(struct drm_pt *parent, pgoff_t offset,
 	return 0;
 }
 
-static const struct drm_pt_walk_ops xe_pt_stage_unbind_ops = {
+static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
 	.pt_entry = xe_pt_stage_unbind_entry,
 	.pt_post_descend = xe_pt_stage_unbind_post_descend,
 };
@@ -1500,7 +1496,7 @@ static unsigned int xe_pt_stage_unbind(struct xe_gt *gt, struct xe_vma *vma,
 				       struct xe_vm_pgtable_update *entries)
 {
 	struct xe_pt_stage_unbind_walk xe_walk = {
-		.drm = {
+		.base = {
 			.ops = &xe_pt_stage_unbind_ops,
 			.shifts = xe_normal_pt_shifts,
 			.max_level = XE_PT_HIGHEST_LEVEL,
@@ -1512,8 +1508,8 @@ static unsigned int xe_pt_stage_unbind(struct xe_gt *gt, struct xe_vma *vma,
 	};
 	struct xe_pt *pt = vma->vm->pt_root[gt->info.id];
 
-	(void)drm_pt_walk_shared(&pt->drm, pt->level, vma->start, vma->end + 1,
-				 &xe_walk.drm);
+	(void)xe_pt_walk_shared(&pt->base, pt->level, vma->start, vma->end + 1,
+				 &xe_walk.base);
 
 	return xe_walk.wupd.num_used_entries;
 }
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
index 2bb5d0e319b7..2ed64c0a4485 100644
--- a/drivers/gpu/drm/xe/xe_pt_types.h
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -6,7 +6,7 @@
 #ifndef _XE_PT_TYPES_H_
 #define _XE_PT_TYPES_H_
 
-#include <drm/drm_pt_walk.h>
+#include "xe_pt_walk.h"
 
 enum xe_cache_level {
 	XE_CACHE_NONE,
@@ -17,7 +17,7 @@ enum xe_cache_level {
 #define XE_VM_MAX_LEVEL 4
 
 struct xe_pt {
-	struct drm_pt drm;
+	struct xe_ptw base;
 	struct xe_bo *bo;
 	unsigned int level;
 	unsigned int num_live;
-- 
2.39.2


^ permalink raw reply related	[flat|nested] 21+ messages in thread
* [Intel-xe] [PATCH] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs
@ 2023-05-15 15:15 Francois Dugast
  2023-05-15 15:32 ` Lucas De Marchi
  0 siblings, 1 reply; 21+ messages in thread
From: Francois Dugast @ 2023-05-15 15:15 UTC (permalink / raw)
  To: intel-xe; +Cc: Dugast

From: "Dugast, Francois" <francois.dugast@intel.com>

The driver contains code under GPL v2 license and code under MIT license.

Link: https://www.kernel.org/doc/html/latest/process/license-rules.html
Cc: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Dugast, Francois <francois.dugast@intel.com>
---
 drivers/gpu/drm/xe/xe_module.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index 6860586ce7f8..ae37c229a0b7 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -79,4 +79,4 @@ module_exit(xe_exit);
 MODULE_AUTHOR("Intel Corporation");
 
 MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
+MODULE_LICENSE("Dual MIT/GPL");
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 21+ messages in thread
* [Intel-xe] [PATCH] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs
@ 2023-05-31  6:19 Lucas De Marchi
  2023-05-31 13:00 ` Gustavo Sousa
                   ` (2 more replies)
  0 siblings, 3 replies; 21+ messages in thread
From: Lucas De Marchi @ 2023-05-31  6:19 UTC (permalink / raw)
  To: intel-xe; +Cc: Lucas De Marchi, Matt Roper

drm/xe/sr: Fix too many kfree() on reallocation

When re-allocating the array, the previous location shouldn't be freed.
The issue can be more easily reproduced by reducing
XE_REG_SR_GROW_STEP_DEFAULT. This was crashing kunit during cleanup
on semi-random places depending on the number of save-restore entries.

Jointly debugged with Matt Roper.

Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
---
 drivers/gpu/drm/xe/xe_reg_sr.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
index 24d9c73ef279..434133444d74 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr.c
+++ b/drivers/gpu/drm/xe/xe_reg_sr.c
@@ -57,7 +57,6 @@ static struct xe_reg_sr_entry *alloc_entry(struct xe_reg_sr *sr)
 		if (!arr)
 			return NULL;
 
-		kfree(sr->pool.arr);
 		sr->pool.arr = arr;
 		sr->pool.allocated += sr->pool.grow_step;
 	}
-- 
2.40.1


^ permalink raw reply related	[flat|nested] 21+ messages in thread
* [Intel-xe] [PATCH] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs
@ 2023-06-07 22:38 Ashutosh Dixit
  2023-06-07 22:49 ` Matt Roper
  0 siblings, 1 reply; 21+ messages in thread
From: Ashutosh Dixit @ 2023-06-07 22:38 UTC (permalink / raw)
  To: intel-xe

Trivial kernel-doc fix, s/vm_id/engine_id/

Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
 include/uapi/drm/xe_drm.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 0ebc50beb5e59..edd29e7f39eb3 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -656,7 +656,7 @@ struct drm_xe_exec {
 	/** @extensions: Pointer to the first extension struct, if any */
 	__u64 extensions;
 
-	/** @vm_id: VM ID to run batch buffer in */
+	/** @engine_id: Engine ID for the batch buffer */
 	__u32 engine_id;
 
 	/** @num_syncs: Amount of struct drm_xe_sync in array. */
-- 
2.38.0


^ permalink raw reply related	[flat|nested] 21+ messages in thread
* [Intel-xe] [PATCH] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs
@ 2023-07-07 17:10 Francois Dugast
  2023-07-07 20:05 ` Matthew Brost
  0 siblings, 1 reply; 21+ messages in thread
From: Francois Dugast @ 2023-07-07 17:10 UTC (permalink / raw)
  To: intel-xe; +Cc: Francois Dugast

Fix the SPDX license string so that it can be picked by tools.

Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
 drivers/gpu/drm/xe/xe_trace.c | 2 +-
 drivers/gpu/drm/xe/xe_trace.h | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_trace.c b/drivers/gpu/drm/xe/xe_trace.c
index 1026fb37f75b..2570c0b859c4 100644
--- a/drivers/gpu/drm/xe/xe_trace.c
+++ b/drivers/gpu/drm/xe/xe_trace.c
@@ -1,4 +1,4 @@
-// SPDX-Liense-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright © 2022 Intel Corporation
  */
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 02861c26e145..7fdbcec8c781 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -1,4 +1,4 @@
-/* SPDX-Liense-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright © 2022 Intel Corporation
  */
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 21+ messages in thread
* [Intel-xe] [PATCH] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs
@ 2023-07-24  8:12 Niranjana Vishwanathapura
  0 siblings, 0 replies; 21+ messages in thread
From: Niranjana Vishwanathapura @ 2023-07-24  8:12 UTC (permalink / raw)
  To: intel-xe

Use kvmalloc_array() instead of kmalloc() to avoid
memory allocation failure in xe_vma_userptr_pin_pages().

v2: Add fixup

Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
---
 drivers/gpu/drm/xe/xe_vm.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 62a99c393d6b..6429d6e5113d 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -72,7 +72,7 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 	if (notifier_seq == vma->userptr.notifier_seq)
 		return 0;
 
-	pages = kmalloc(sizeof(*pages) * num_pages, GFP_KERNEL);
+	pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
 	if (!pages)
 		return -ENOMEM;
 
@@ -152,7 +152,7 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 
 out:
 	release_pages(pages, pinned);
-	kfree(pages);
+	kvfree(pages);
 
 	if (!(ret < 0)) {
 		vma->userptr.notifier_seq = notifier_seq;
-- 
2.21.0.rc0.32.g243a4c7e27


^ permalink raw reply related	[flat|nested] 21+ messages in thread
* [Intel-xe] [PATCH] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs
@ 2023-08-03 22:00 Daniele Ceraolo Spurio
  2023-08-04  0:24 ` Matthew Brost
  0 siblings, 1 reply; 21+ messages in thread
From: Daniele Ceraolo Spurio @ 2023-08-03 22:00 UTC (permalink / raw)
  To: intel-xe

Resets can be caused by userspace (and we do so in our testing),
so we can't print at warning level when they occur.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/xe/xe_guc_submit.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 193362518a62..60c311079fcc 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -843,8 +843,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
 		XE_WARN_ON(q->flags & EXEC_QUEUE_FLAG_KERNEL);
 		XE_WARN_ON(q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q));
 
-		drm_warn(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
-			 xe_sched_job_seqno(job), q->guc->id, q->flags);
+		drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
+			   xe_sched_job_seqno(job), q->guc->id, q->flags);
 		simple_error_capture(q);
 		xe_devcoredump(q);
 	} else {
@@ -1597,7 +1597,7 @@ int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
 	if (unlikely(!q))
 		return -EPROTO;
 
-	drm_warn(&xe->drm, "Engine reset: guc_id=%d", guc_id);
+	drm_info(&xe->drm, "Engine reset: guc_id=%d", guc_id);
 
 	/* FIXME: Do error capture, most likely async */
 
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2023-08-04  0:25 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-03-16 18:29 [Intel-xe] [PATCH] fixup! drm/xe: Introduce a new DRM driver for Intel GPUs Lucas De Marchi
2023-03-16 18:32 ` [Intel-xe] ✓ CI.Patch_applied: success for " Patchwork
2023-03-16 18:33 ` [Intel-xe] ✗ CI.KUnit: failure " Patchwork
2023-03-16 20:53   ` Lucas De Marchi
2023-03-17  6:08 ` [Intel-xe] [PATCH] " Mauro Carvalho Chehab
2023-05-01 19:03 [Intel-xe] [PATCH 2/2] " Rodrigo Vivi
2023-05-01 19:37 ` [Intel-xe] [PATCH] " Rodrigo Vivi
2023-05-15 15:15 Francois Dugast
2023-05-15 15:32 ` Lucas De Marchi
2023-05-15 21:09   ` Rodrigo Vivi
2023-05-15 21:16     ` Rodrigo Vivi
2023-05-31  6:19 Lucas De Marchi
2023-05-31 13:00 ` Gustavo Sousa
2023-05-31 14:46 ` Matt Roper
2023-05-31 16:24 ` Lucas De Marchi
2023-06-07 22:38 Ashutosh Dixit
2023-06-07 22:49 ` Matt Roper
2023-07-07 17:10 Francois Dugast
2023-07-07 20:05 ` Matthew Brost
2023-07-24  8:12 Niranjana Vishwanathapura
2023-08-03 22:00 Daniele Ceraolo Spurio
2023-08-04  0:24 ` Matthew Brost

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.