All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/7] SWDEV-275015 - drm/amdgpu: Add SMI-LIB ioctl
@ 2021-03-22  2:54 Roy Sun
  2021-03-22  2:54 ` [PATCH 2/7] SWDEV-275015 - drm: Change scheduled fence track Roy Sun
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Roy Sun @ 2021-03-22  2:54 UTC (permalink / raw)
  To: amd-gfx; +Cc: Roy Sun, yehonsun, David M Nieto

From: David M Nieto <david.nieto@amd.com>

Add definition for the SMI ioctl

Signed-off-by: David M Nieto <david.nieto@amd.com>
Signed-off-by: Roy Sun <Roy.Sun@amd.com>
---
 include/uapi/drm/amdgpu_drm.h | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 8b832f7458f2..1d0261239627 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -56,6 +56,7 @@ extern "C" {
 #define DRM_AMDGPU_SCHED		0x15
 /* not upstream */
 #define DRM_AMDGPU_FREESYNC	        0x5d
+#define DRM_AMDGPU_SMI			0x5e
 
 #define DRM_IOCTL_AMDGPU_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
 #define DRM_IOCTL_AMDGPU_GEM_MMAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -74,6 +75,7 @@ extern "C" {
 #define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
 #define DRM_IOCTL_AMDGPU_SCHED		DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
 #define DRM_IOCTL_AMDGPU_FREESYNC	DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
+#define DRM_IOCTL_AMDGPU_SMI		DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_SMI, struct drm_amdgpu_smi)
 
 /**
  * DOC: memory domains
@@ -1138,6 +1140,24 @@ struct drm_amdgpu_freesync {
 	__u32 spare[7];
 };
 
+/* Definition of the SMI handlers */
+#define AMDGPU_SMI_MAX_PAYLOAD 1024
+struct drm_amdgpu_smi_in_hdr {
+	__u32		code;
+	__u16		in_len;
+	__u16		out_len;
+};
+
+struct drm_amdgpu_smi_out_hdr {
+	__s32		status;
+};
+
+struct drm_amdgpu_smi {
+	struct drm_amdgpu_smi_in_hdr	in;
+	struct drm_amdgpu_smi_out_hdr	out;
+	__u64		payload;
+};
+
 #if defined(__cplusplus)
 }
 #endif
-- 
2.29.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/7] SWDEV-275015 - drm: Change scheduled fence track
  2021-03-22  2:54 [PATCH 1/7] SWDEV-275015 - drm/amdgpu: Add SMI-LIB ioctl Roy Sun
@ 2021-03-22  2:54 ` Roy Sun
  2021-03-22  2:54 ` [PATCH 3/7] SWDEV-275015 - drm/amdgpu: SMI-LIB handlers Roy Sun
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Roy Sun @ 2021-03-22  2:54 UTC (permalink / raw)
  To: amd-gfx; +Cc: Roy Sun, yehonsun, David M Nieto

From: David M Nieto <david.nieto@amd.com>

Update the timestamp of the scheduled fence on
HW completion of the previous fence.

This allows more accurate tracking of the fence
execution in HW

Signed-off-by: David M Nieto <david.nieto@amd.com>
Signed-off-by: Roy Sun <Roy.Sun@amd.com>
---
 drivers/gpu/drm/scheduler/sched_main.c | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 92d8de24d0a1..952c553c077e 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -671,7 +671,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
 static struct drm_sched_job *
 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 {
-	struct drm_sched_job *job;
+	struct drm_sched_job *job, *next;
 
 	/*
 	 * Don't destroy jobs while the timeout worker is running  OR thread
@@ -690,6 +690,13 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
 		/* remove job from pending_list */
 		list_del_init(&job->list);
+		/* account for the next fence in the queue */
+		next = list_first_entry_or_null(&sched->pending_list,
+				struct drm_sched_job, list);
+		if (next) {
+			next->s_fence->scheduled.timestamp =
+				job->s_fence->finished.timestamp;
+		}
 	} else {
 		job = NULL;
 		/* queue timeout for next job */
-- 
2.29.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/7] SWDEV-275015 - drm/amdgpu: SMI-LIB handlers
  2021-03-22  2:54 [PATCH 1/7] SWDEV-275015 - drm/amdgpu: Add SMI-LIB ioctl Roy Sun
  2021-03-22  2:54 ` [PATCH 2/7] SWDEV-275015 - drm: Change scheduled fence track Roy Sun
@ 2021-03-22  2:54 ` Roy Sun
  2021-03-22  2:54 ` [PATCH 4/7] SWDEV-275015 - drm/amdgpu: SMI debugfs tracking Roy Sun
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Roy Sun @ 2021-03-22  2:54 UTC (permalink / raw)
  To: amd-gfx; +Cc: Roy Sun, yehonsun, David M Nieto

From: David M Nieto <david.nieto@amd.com>

Add fence tracking interfaces and core
structures for SMI ioctl management

Signed-off-by: David M Nieto <david.nieto@amd.com>
Signed-off-by: Roy Sun <Roy.Sun@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/Makefile     |  10 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  12 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_smi.c | 433 ++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_smi.h |  61 ++++
 4 files changed, 514 insertions(+), 2 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_smi.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_smi.h

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 741b68874e53..789800d16804 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -36,7 +36,8 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
 	-I$(FULL_AMD_DISPLAY_PATH)/include \
 	-I$(FULL_AMD_DISPLAY_PATH)/dc \
 	-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
-	-I$(FULL_AMD_PATH)/amdkfd
+	-I$(FULL_AMD_PATH)/amdkfd \
+	-I$(FULL_AMD_PATH)/amdsmi/inc
 
 amdgpu-y := amdgpu_drv.o
 
@@ -56,7 +57,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
 	amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
 	amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
 	amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
-	amdgpu_fw_attestation.o amdgpu_securedisplay.o
+	amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_smi.o
 
 amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
 
@@ -236,4 +237,9 @@ amdgpu-y += $(AMD_DISPLAY_FILES)
 
 endif
 
+# SMI component
+AMDSMI_PATH := ../amdsmi
+include $(FULL_AMD_PATH)/amdsmi/Makefile
+amdgpu-y += $(AMDSMI_FILES)
+
 obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 7e1f66120c50..b10632866ea6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -108,6 +108,7 @@
 #include "amdgpu_df.h"
 #include "amdgpu_smuio.h"
 #include "amdgpu_hdp.h"
+#include "amdgpu_smi.h"
 
 #define MAX_GPU_INSTANCE		16
 
@@ -268,6 +269,7 @@ struct amdgpu_irq_src;
 struct amdgpu_fpriv;
 struct amdgpu_bo_va_mapping;
 struct amdgpu_atif;
+struct amdgpu_smi_proc;
 struct kfd_vm_fault_info;
 struct amdgpu_hive_info;
 
@@ -475,6 +477,9 @@ struct amdgpu_fpriv {
 	struct mutex		bo_list_lock;
 	struct idr		bo_list_handles;
 	struct amdgpu_ctx_mgr	ctx_mgr;
+	struct drm_file		*file;
+	struct amdgpu_smi_proc  *proc;
+	void			*smi_priv;
 };
 
 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
@@ -824,6 +829,9 @@ struct amdgpu_device {
 	struct notifier_block		acpi_nb;
 	struct amdgpu_i2c_chan		*i2c_bus[AMDGPU_MAX_I2C_BUS];
 	struct debugfs_blob_wrapper     debugfs_vbios_blob;
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry			*debugfs_proc;
+#endif
 	struct amdgpu_atif		*atif;
 	struct amdgpu_atcs		atcs;
 	struct mutex			srbm_mutex;
@@ -1082,6 +1090,10 @@ struct amdgpu_device {
 
 	bool                            in_pci_err_recovery;
 	struct pci_saved_state          *pci_state;
+
+	/* SMI process tracking */
+	struct idr			procs;
+	struct mutex			proc_lock;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_smi.c
new file mode 100644
index 000000000000..c7eb549b7ebb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smi.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/reboot.h>
+#include <linux/syscalls.h>
+
+#include <drm/amdgpu_drm.h>
+#include <drm/drm_debugfs.h>
+
+#include "amdgpu.h"
+#include "amdgpu_smi.h"
+
+#include "smi_core.h"
+
+static const char *amdgpu_smi_ip_name[AMDGPU_HW_IP_NUM] = {
+	[AMDGPU_HW_IP_GFX]	=	"gfx",
+	[AMDGPU_HW_IP_COMPUTE]	=	"compute",
+	[AMDGPU_HW_IP_DMA]	=	"dma",
+	[AMDGPU_HW_IP_UVD]	=	"dec",
+	[AMDGPU_HW_IP_VCE]	=	"enc",
+	[AMDGPU_HW_IP_UVD_ENC]	=	"enc_1",
+	[AMDGPU_HW_IP_VCN_DEC]	=	"dec",
+	[AMDGPU_HW_IP_VCN_ENC]	=	"enc",
+	[AMDGPU_HW_IP_VCN_JPEG]	=	"jpeg",
+};
+
+struct amdgpu_smi_proc_hwip {
+	struct dentry *entry;
+	int id;
+};
+
+
+struct amdgpu_smi_proc {
+	struct amdgpu_fpriv *priv;
+	struct dentry *entry;
+	int pasid;
+	struct dentry *d_name;
+	struct dentry *d_pid;
+	struct dentry *d_mem;
+	struct dentry *d_hwdir;
+	struct amdgpu_smi_proc_hwip hwip[AMDGPU_HW_IP_NUM];
+};
+
+uint64_t amdgpu_smi_get_proc_mem(struct amdgpu_fpriv *fpriv)
+{
+	int id;
+	struct drm_gem_object *gobj;
+	uint64_t total = 0;
+
+	spin_lock(&fpriv->file->table_lock);
+	idr_for_each_entry(&fpriv->file->object_idr, gobj, id) {
+		struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+		unsigned int domain = amdgpu_mem_type_to_domain(
+				bo->tbo.mem.mem_type);
+
+		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+			total += amdgpu_bo_size(bo);
+	}
+	spin_unlock(&fpriv->file->table_lock);
+
+	if (fpriv->vm.process_info) {
+		struct kgd_mem *mem;
+
+		mutex_lock(&fpriv->vm.process_info->lock);
+		list_for_each_entry(mem, &fpriv->vm.process_info->kfd_bo_list,
+			    validate_list.head) {
+			struct amdgpu_bo *bo = mem->bo;
+			unsigned int domain = amdgpu_mem_type_to_domain(
+				bo->tbo.mem.mem_type);
+
+			if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+				total += amdgpu_bo_size(bo);
+		}
+
+		list_for_each_entry(mem, &fpriv->vm.process_info->userptr_valid_list,
+			    validate_list.head) {
+			struct amdgpu_bo *bo = mem->bo;
+			unsigned int domain = amdgpu_mem_type_to_domain(
+				bo->tbo.mem.mem_type);
+
+			if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+				total += amdgpu_bo_size(bo);
+		}
+
+		mutex_unlock(&fpriv->vm.process_info->lock);
+	}
+
+	return total;
+}
+
+static int proc_mem_show(struct seq_file *seq, void *v)
+{
+	struct amdgpu_fpriv *fpriv = seq->private;
+
+	seq_printf(seq, "%lld\n", amdgpu_smi_get_proc_mem(fpriv));
+
+	return 0;
+}
+
+static int proc_mem_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_mem_show, inode->i_private);
+}
+
+static int proc_name_show(struct seq_file *seq, void *v)
+{
+	struct amdgpu_fpriv *fpriv = seq->private;
+
+	seq_printf(seq, "%s\n", fpriv->vm.task_info.process_name);
+
+	return 0;
+}
+
+static int proc_name_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_name_show, inode->i_private);
+}
+
+static int proc_pid_show(struct seq_file *seq, void *v)
+{
+	struct amdgpu_fpriv *fpriv = seq->private;
+
+	seq_printf(seq, "%d\n", fpriv->vm.task_info.pid);
+
+	return 0;
+}
+
+static int proc_pid_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_pid_show, inode->i_private);
+}
+
+static const struct file_operations proc_name_fops = {
+	.owner = THIS_MODULE,
+	.open = proc_name_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static const struct file_operations proc_pid_fops = {
+	.owner = THIS_MODULE,
+	.open = proc_pid_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static const struct file_operations proc_mem_fops = {
+	.owner = THIS_MODULE,
+	.open = proc_mem_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+
+uint64_t amdgpu_smi_get_fence_usage(struct amdgpu_fpriv *fpriv, uint32_t hwip,
+		uint32_t idx, uint64_t *elapsed)
+{
+	struct amdgpu_ctx_entity *centity;
+	struct idr *idp;
+	struct amdgpu_ctx *ctx;
+	uint32_t id, j;
+	uint64_t now, t1, t2;
+	uint64_t total = 0, min = 0;
+
+
+	if (idx >= AMDGPU_MAX_ENTITY_NUM)
+		return 0;
+
+	idp = &fpriv->ctx_mgr.ctx_handles;
+
+	mutex_lock(&fpriv->ctx_mgr.lock);
+	idr_for_each_entry(idp, ctx, id) {
+		if (!ctx->entities[hwip][idx])
+			continue;
+
+		centity = ctx->entities[hwip][idx];
+
+		for (j = 0; j < amdgpu_sched_jobs; j++) {
+			struct dma_fence *fence;
+			struct drm_sched_fence *s_fence;
+
+			spin_lock(&ctx->ring_lock);
+			fence = dma_fence_get(centity->fences[j]);
+			spin_unlock(&ctx->ring_lock);
+			if (!fence)
+				continue;
+			s_fence = to_drm_sched_fence(fence);
+			if (!dma_fence_is_signaled(&s_fence->scheduled))
+				continue;
+			now = ktime_to_ns(ktime_get());
+			t1 = ktime_to_ns(s_fence->scheduled.timestamp);
+			t2 = !dma_fence_is_signaled(&s_fence->finished) ?
+				0 : ktime_to_ns(s_fence->finished.timestamp);
+			dma_fence_put(fence);
+
+			t1 = now - t1;
+			t2 = t2 == 0 ? 0 : now - t2;
+			total += t1 - t2;
+			if (t1 > min)
+				min = t1;
+		}
+
+	}
+
+	mutex_unlock(&fpriv->ctx_mgr.lock);
+
+	if (elapsed)
+		*elapsed = min;
+
+	return total;
+}
+
+static int proc_hwip_seq_show(struct seq_file *seq, void *v)
+{
+	struct amdgpu_smi_proc_hwip *hwip = seq->private;
+	struct amdgpu_smi_proc *proc = container_of(hwip,
+			struct amdgpu_smi_proc, hwip[hwip->id]);
+	struct amdgpu_fpriv *fpriv = proc->priv;
+	int i;
+
+	for (i = 0; i < AMDGPU_MAX_ENTITY_NUM; i++) {
+		uint64_t total, min;
+		uint32_t perc, frac;
+
+		total = amdgpu_smi_get_fence_usage(fpriv, hwip->id, i, &min);
+
+		if ((total == 0) || (min == 0))
+			continue;
+
+		perc = div64_u64(10000 * total, min);
+		frac = perc % 100;
+
+		seq_printf(seq, "%s<%d>:%d.%d%%\n",
+				amdgpu_smi_ip_name[hwip->id],
+				i, perc/100, frac);
+	}
+	return 0;
+}
+
+static int proc_hwip_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_hwip_seq_show, inode->i_private);
+}
+
+static const struct file_operations proc_hwip_fops = {
+	.owner = THIS_MODULE,
+	.open = proc_hwip_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+uint32_t amdgpu_smi_get_ip_count(struct amdgpu_device *adev, int id)
+{
+	enum amd_ip_block_type type;
+	uint32_t count = 0;
+	int i;
+
+	switch (id) {
+	case AMDGPU_HW_IP_GFX:
+		type = AMD_IP_BLOCK_TYPE_GFX;
+		break;
+	case AMDGPU_HW_IP_COMPUTE:
+		type = AMD_IP_BLOCK_TYPE_GFX;
+		break;
+	case AMDGPU_HW_IP_DMA:
+		type = AMD_IP_BLOCK_TYPE_SDMA;
+		break;
+	case AMDGPU_HW_IP_UVD:
+		type = AMD_IP_BLOCK_TYPE_UVD;
+		break;
+	case AMDGPU_HW_IP_VCE:
+		type = AMD_IP_BLOCK_TYPE_VCE;
+		break;
+	case AMDGPU_HW_IP_UVD_ENC:
+		type = AMD_IP_BLOCK_TYPE_UVD;
+		break;
+	case AMDGPU_HW_IP_VCN_DEC:
+	case AMDGPU_HW_IP_VCN_ENC:
+		type = AMD_IP_BLOCK_TYPE_VCN;
+		break;
+	case AMDGPU_HW_IP_VCN_JPEG:
+		type = (amdgpu_device_ip_get_ip_block(adev,
+			AMD_IP_BLOCK_TYPE_JPEG)) ?
+			AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
+		break;
+	default:
+		return 0;
+	}
+
+	for (i = 0; i < adev->num_ip_blocks; i++)
+		if (adev->ip_blocks[i].version->type == type &&
+		    adev->ip_blocks[i].status.valid &&
+		    count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
+			count++;
+	return count;
+
+}
+
+int amdgpu_smi_create_proc_node(struct amdgpu_device *adev,
+		struct amdgpu_fpriv *fpriv, int pasid)
+{
+	char name[16];
+	struct amdgpu_smi_proc *proc;
+	int i, r, id;
+
+	if (!adev->debugfs_proc)
+		return -EIO;
+
+	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+
+	if (!proc)
+		return -ENOMEM;
+
+	if (fpriv->file) {
+		r = smi_core_open(fpriv->file);
+
+		if (r) {
+			kfree(proc);
+			return r;
+		}
+	}
+
+	id = idr_alloc(&adev->procs, fpriv, pasid, pasid + 1, GFP_KERNEL);
+
+	snprintf(name, 16, "%d", pasid);
+
+	proc->entry = debugfs_create_dir(name, adev->debugfs_proc);
+
+	if (!proc->entry) {
+		DRM_ERROR("unable to create proc %s debugfs dir\n", name);
+		kfree(proc);
+		return -EIO;
+	}
+
+	proc->pasid = id;
+
+	proc->d_name = debugfs_create_file("name", 0444, proc->entry, fpriv,
+			&proc_name_fops);
+
+	proc->d_pid = debugfs_create_file("pid", 0444, proc->entry, fpriv,
+			&proc_pid_fops);
+
+	proc->d_mem = debugfs_create_file("mem", 0444, proc->entry, fpriv,
+			&proc_mem_fops);
+
+	proc->d_hwdir = debugfs_create_dir("fences", proc->entry);
+
+	for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+		if (amdgpu_smi_get_ip_count(adev, i)) {
+			proc->hwip[i].entry =
+				debugfs_create_file(amdgpu_smi_ip_name[i], 0444,
+						proc->d_hwdir, &proc->hwip[i],
+						&proc_hwip_fops);
+				proc->hwip[i].id = i;
+		}
+	}
+
+	fpriv->proc = proc;
+	proc->priv = fpriv;
+
+	return 0;
+}
+
+int amdgpu_smi_remove_proc_node(struct amdgpu_device *adev,
+		struct amdgpu_fpriv *fpriv)
+{
+	struct amdgpu_smi_proc *proc = fpriv->proc;
+	struct smi_ctx *smi_ctx = (struct smi_ctx *) fpriv->smi_priv;
+	int i;
+
+	if (!adev->debugfs_proc)
+		return -EIO;
+
+	if (proc) {
+		idr_remove(&adev->procs, proc->pasid);
+		for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
+			if (proc->hwip[i].entry)
+				debugfs_remove(proc->hwip[i].entry);
+		debugfs_remove(proc->d_hwdir);
+		debugfs_remove(proc->d_pid);
+		debugfs_remove(proc->d_name);
+		debugfs_remove(proc->d_mem);
+		debugfs_remove(proc->entry);
+		kfree(proc);
+	}
+
+	fpriv->proc = NULL;
+
+	if (smi_ctx)
+		smi_core_release(fpriv);
+
+	return 0;
+}
+
+
+int amdgpu_smi_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *filp)
+{
+	struct drm_amdgpu_smi *args = data;
+	struct amdgpu_fpriv *fpriv = filp->driver_priv;
+
+	return smi_core_ioctl_handler(fpriv->smi_priv, args);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smi.h
new file mode 100644
index 000000000000..fd6747f090a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smi.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: David Nieto
+ */
+#ifndef __AMDGPU_SMI_H__
+#define __AMDGPU_SMI_H__
+
+
+#include <linux/idr.h>
+#include <linux/kfifo.h>
+#include <linux/rbtree.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/drm_file.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <linux/sched/mm.h>
+
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_ids.h"
+
+
+struct amdgpu_smi_proc;
+struct amdgpu_smi_ctx;
+
+int amdgpu_smi_create_proc_node(struct amdgpu_device *adev,
+				struct amdgpu_fpriv *fpriv,
+				int pasid);
+
+int amdgpu_smi_remove_proc_node(struct amdgpu_device *adev,
+				struct amdgpu_fpriv *fpriv);
+
+int amdgpu_smi_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp);
+
+uint32_t amdgpu_smi_get_ip_count(struct amdgpu_device *adev, int id);
+
+uint64_t amdgpu_smi_get_fence_usage(struct amdgpu_fpriv *fpriv, uint32_t hwip,
+		uint32_t idx, uint64_t *elapsed);
+
+uint64_t amdgpu_smi_get_proc_mem(struct amdgpu_fpriv *fpriv);
+
+#endif
-- 
2.29.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 4/7] SWDEV-275015 - drm/amdgpu: SMI debugfs tracking
  2021-03-22  2:54 [PATCH 1/7] SWDEV-275015 - drm/amdgpu: Add SMI-LIB ioctl Roy Sun
  2021-03-22  2:54 ` [PATCH 2/7] SWDEV-275015 - drm: Change scheduled fence track Roy Sun
  2021-03-22  2:54 ` [PATCH 3/7] SWDEV-275015 - drm/amdgpu: SMI-LIB handlers Roy Sun
@ 2021-03-22  2:54 ` Roy Sun
  2021-03-22  2:54 ` [PATCH 5/7] SWDEV-275015 - drm/amdgpu: Track fences on KMS Roy Sun
  2021-03-22  2:54 ` [PATCH 6/7] SWDEV-275015 - drm/amdkfd: Add fence tracking Roy Sun
  4 siblings, 0 replies; 6+ messages in thread
From: Roy Sun @ 2021-03-22  2:54 UTC (permalink / raw)
  To: amd-gfx; +Cc: Roy Sun, yehonsun, David M Nieto

From: David M Nieto <david.nieto@amd.com>

Add folder in debugfs structure for tracking of per
pid fences

Signed-off-by: David M Nieto <david.nieto@amd.com>
Signed-off-by: Roy Sun <Roy.Sun@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index bcaf271b39bf..02534a059f42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1544,7 +1544,12 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
 	struct dentry *ent;
 	int r, i;
 
+	ent = debugfs_create_dir("proc", root);
 
+	if (!ent) {
+		DRM_ERROR("unable to create proc debugfs dir\n");
+		return -EIO;
+	}
 
 	ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
 				  &fops_ib_preempt);
-- 
2.29.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 5/7] SWDEV-275015 - drm/amdgpu: Track fences on KMS
  2021-03-22  2:54 [PATCH 1/7] SWDEV-275015 - drm/amdgpu: Add SMI-LIB ioctl Roy Sun
                   ` (2 preceding siblings ...)
  2021-03-22  2:54 ` [PATCH 4/7] SWDEV-275015 - drm/amdgpu: SMI debugfs tracking Roy Sun
@ 2021-03-22  2:54 ` Roy Sun
  2021-03-22  2:54 ` [PATCH 6/7] SWDEV-275015 - drm/amdkfd: Add fence tracking Roy Sun
  4 siblings, 0 replies; 6+ messages in thread
From: Roy Sun @ 2021-03-22  2:54 UTC (permalink / raw)
  To: amd-gfx; +Cc: Roy Sun, yehonsun, David M Nieto

From: David M Nieto <david.nieto@amd.com>

Create SMI fence tracking structures of KMS open

Signed-off-by: David M Nieto <david.nieto@amd.com>
Signed-off-by: Roy Sun <Roy.Sun@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |  3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 10 ++++++++++
 2 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f98843eeb084..bc63a9662ca0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1679,7 +1679,8 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-	DRM_IOCTL_DEF_DRV(AMDGPU_FREESYNC, amdgpu_display_freesync_ioctl, DRM_MASTER)
+	DRM_IOCTL_DEF_DRV(AMDGPU_FREESYNC, amdgpu_display_freesync_ioctl, DRM_MASTER),
+	DRM_IOCTL_DEF_DRV(AMDGPU_SMI, amdgpu_smi_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
 };
 
 static const struct drm_driver amdgpu_kms_driver = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index ada807de978b..c393cbf87656 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -41,6 +41,7 @@
 #include "amdgpu_gem.h"
 #include "amdgpu_display.h"
 #include "amdgpu_ras.h"
+#include "amdgpu_smi.h"
 
 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
 {
@@ -210,6 +211,10 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
 		pm_runtime_put_autosuspend(dev->dev);
 	}
 
+	/* SMI */
+	mutex_init(&adev->proc_lock);
+	idr_init(&adev->procs);
+
 out:
 	if (r) {
 		/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
@@ -1135,6 +1140,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
 	amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
 
 	file_priv->driver_priv = fpriv;
+	fpriv->file = file_priv;
+
+	amdgpu_smi_create_proc_node(adev, fpriv, pasid);
 	goto out_suspend;
 
 error_vm:
@@ -1177,6 +1185,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 
 	pm_runtime_get_sync(dev->dev);
 
+	amdgpu_smi_remove_proc_node(adev, fpriv);
+
 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
 		amdgpu_uvd_free_handles(adev, file_priv);
 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
-- 
2.29.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 6/7] SWDEV-275015 - drm/amdkfd: Add fence tracking
  2021-03-22  2:54 [PATCH 1/7] SWDEV-275015 - drm/amdgpu: Add SMI-LIB ioctl Roy Sun
                   ` (3 preceding siblings ...)
  2021-03-22  2:54 ` [PATCH 5/7] SWDEV-275015 - drm/amdgpu: Track fences on KMS Roy Sun
@ 2021-03-22  2:54 ` Roy Sun
  4 siblings, 0 replies; 6+ messages in thread
From: Roy Sun @ 2021-03-22  2:54 UTC (permalink / raw)
  To: amd-gfx; +Cc: yehonsun, David M Nieto

From: David M Nieto <david.nieto@amd.com>

Add fence tracking for amdgpu resources on gpuvm creation

Signed-off-by: David M Nieto <david.nieto@amd.com>
---
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c    | 17 +++++++++++++----
 1 file changed, 13 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index e93850f2f3b1..26e84c2d6316 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1042,13 +1042,16 @@ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
 					  struct dma_fence **ef)
 {
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct amdgpu_fpriv *fpriv;
 	struct amdgpu_vm *new_vm;
 	int ret;
 
-	new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
-	if (!new_vm)
+	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+	if (!fpriv)
 		return -ENOMEM;
 
+	new_vm = &fpriv->vm;
+
 	/* Initialize AMDGPU part of the VM */
 	ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
 	if (ret) {
@@ -1063,12 +1066,14 @@ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
 
 	*vm = (void *) new_vm;
 
+	amdgpu_smi_create_proc_node(adev, fpriv, pasid);
+
 	return 0;
 
 init_kfd_vm_fail:
 	amdgpu_vm_fini(adev, new_vm);
 amdgpu_vm_init_fail:
-	kfree(new_vm);
+	kfree(fpriv);
 	return ret;
 }
 
@@ -1142,6 +1147,8 @@ void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
 {
 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+	struct amdgpu_fpriv *fpriv =
+		container_of(avm, struct amdgpu_fpriv, vm);
 
 	if (WARN_ON(!kgd || !vm))
 		return;
@@ -1149,8 +1156,10 @@ void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
 	pr_debug("Destroying process vm %p\n", vm);
 
 	/* Release the VM context */
+	amdgpu_smi_remove_proc_node(adev, fpriv);
+
 	amdgpu_vm_fini(adev, avm);
-	kfree(vm);
+	kfree(fpriv);
 }
 
 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
-- 
2.29.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2021-03-22  2:55 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-22  2:54 [PATCH 1/7] SWDEV-275015 - drm/amdgpu: Add SMI-LIB ioctl Roy Sun
2021-03-22  2:54 ` [PATCH 2/7] SWDEV-275015 - drm: Change scheduled fence track Roy Sun
2021-03-22  2:54 ` [PATCH 3/7] SWDEV-275015 - drm/amdgpu: SMI-LIB handlers Roy Sun
2021-03-22  2:54 ` [PATCH 4/7] SWDEV-275015 - drm/amdgpu: SMI debugfs tracking Roy Sun
2021-03-22  2:54 ` [PATCH 5/7] SWDEV-275015 - drm/amdgpu: Track fences on KMS Roy Sun
2021-03-22  2:54 ` [PATCH 6/7] SWDEV-275015 - drm/amdkfd: Add fence tracking Roy Sun

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.