All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/6] drm/amdkfd: Rename queue_count to active_queue_count
@ 2020-02-05 23:28 Yong Zhao
  2020-02-05 23:28 ` [PATCH 2/6] drm/amdkfd: Avoid ambiguity by indicating it's cp queue Yong Zhao
                   ` (4 more replies)
  0 siblings, 5 replies; 9+ messages in thread
From: Yong Zhao @ 2020-02-05 23:28 UTC (permalink / raw)
  To: amd-gfx; +Cc: Yong Zhao

The name is easier to understand the code.

Change-Id: I9064dab1d022e02780023131f940fff578a06b72
Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
---
 .../drm/amd/amdkfd/kfd_device_queue_manager.c | 38 +++++++++----------
 .../drm/amd/amdkfd/kfd_device_queue_manager.h |  2 +-
 .../gpu/drm/amd/amdkfd/kfd_packet_manager.c   |  4 +-
 .../amd/amdkfd/kfd_process_queue_manager.c    |  2 +-
 4 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 80d22bf702e8..7ef9b89f5c70 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -359,7 +359,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
 	list_add(&q->list, &qpd->queues_list);
 	qpd->queue_count++;
 	if (q->properties.is_active)
-		dqm->queue_count++;
+		dqm->active_queue_count++;
 
 	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
 		dqm->sdma_queue_count++;
@@ -494,7 +494,7 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
 	}
 	qpd->queue_count--;
 	if (q->properties.is_active)
-		dqm->queue_count--;
+		dqm->active_queue_count--;
 
 	return retval;
 }
@@ -563,13 +563,13 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
 	/*
 	 * check active state vs. the previous state and modify
 	 * counter accordingly. map_queues_cpsch uses the
-	 * dqm->queue_count to determine whether a new runlist must be
+	 * dqm->active_queue_count to determine whether a new runlist must be
 	 * uploaded.
 	 */
 	if (q->properties.is_active && !prev_active)
-		dqm->queue_count++;
+		dqm->active_queue_count++;
 	else if (!q->properties.is_active && prev_active)
-		dqm->queue_count--;
+		dqm->active_queue_count--;
 
 	if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
 		retval = map_queues_cpsch(dqm);
@@ -618,7 +618,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
 		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 				q->properties.type)];
 		q->properties.is_active = false;
-		dqm->queue_count--;
+		dqm->active_queue_count--;
 
 		if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
 			continue;
@@ -662,7 +662,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
 			continue;
 
 		q->properties.is_active = false;
-		dqm->queue_count--;
+		dqm->active_queue_count--;
 	}
 	retval = execute_queues_cpsch(dqm,
 				qpd->is_debug ?
@@ -731,7 +731,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
 		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 				q->properties.type)];
 		q->properties.is_active = true;
-		dqm->queue_count++;
+		dqm->active_queue_count++;
 
 		if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
 			continue;
@@ -786,7 +786,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
 			continue;
 
 		q->properties.is_active = true;
-		dqm->queue_count++;
+		dqm->active_queue_count++;
 	}
 	retval = execute_queues_cpsch(dqm,
 				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -899,7 +899,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
 
 	mutex_init(&dqm->lock_hidden);
 	INIT_LIST_HEAD(&dqm->queues);
-	dqm->queue_count = dqm->next_pipe_to_allocate = 0;
+	dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
 	dqm->sdma_queue_count = 0;
 	dqm->xgmi_sdma_queue_count = 0;
 
@@ -924,7 +924,7 @@ static void uninitialize(struct device_queue_manager *dqm)
 {
 	int i;
 
-	WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
+	WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
 
 	kfree(dqm->allocated_queues);
 	for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
@@ -1064,7 +1064,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
 
 	mutex_init(&dqm->lock_hidden);
 	INIT_LIST_HEAD(&dqm->queues);
-	dqm->queue_count = dqm->processes_count = 0;
+	dqm->active_queue_count = dqm->processes_count = 0;
 	dqm->sdma_queue_count = 0;
 	dqm->xgmi_sdma_queue_count = 0;
 	dqm->active_runlist = false;
@@ -1158,7 +1158,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
 			dqm->total_queue_count);
 
 	list_add(&kq->list, &qpd->priv_queue_list);
-	dqm->queue_count++;
+	dqm->active_queue_count++;
 	qpd->is_debug = true;
 	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 	dqm_unlock(dqm);
@@ -1172,7 +1172,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
 {
 	dqm_lock(dqm);
 	list_del(&kq->list);
-	dqm->queue_count--;
+	dqm->active_queue_count--;
 	qpd->is_debug = false;
 	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
 	/*
@@ -1244,7 +1244,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 		dqm->xgmi_sdma_queue_count++;
 
 	if (q->properties.is_active) {
-		dqm->queue_count++;
+		dqm->active_queue_count++;
 		retval = execute_queues_cpsch(dqm,
 				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 	}
@@ -1319,7 +1319,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
 
 	if (!dqm->sched_running)
 		return 0;
-	if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
+	if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
 		return 0;
 	if (dqm->active_runlist)
 		return 0;
@@ -1438,7 +1438,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
 	list_del(&q->list);
 	qpd->queue_count--;
 	if (q->properties.is_active) {
-		dqm->queue_count--;
+		dqm->active_queue_count--;
 		retval = execute_queues_cpsch(dqm,
 				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 		if (retval == -ETIME)
@@ -1648,7 +1648,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
 	/* Clean all kernel queues */
 	list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
 		list_del(&kq->list);
-		dqm->queue_count--;
+		dqm->active_queue_count--;
 		qpd->is_debug = false;
 		dqm->total_queue_count--;
 		filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
@@ -1665,7 +1665,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
 		}
 
 		if (q->properties.is_active)
-			dqm->queue_count--;
+			dqm->active_queue_count--;
 
 		dqm->total_queue_count--;
 	}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 871d3b628d2d..ee3400e92c30 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -180,7 +180,7 @@ struct device_queue_manager {
 	struct list_head	queues;
 	unsigned int		saved_flags;
 	unsigned int		processes_count;
-	unsigned int		queue_count;
+	unsigned int		active_queue_count;
 	unsigned int		sdma_queue_count;
 	unsigned int		xgmi_sdma_queue_count;
 	unsigned int		total_queue_count;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index dc406e6dee23..393c218734fd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -47,7 +47,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
 	struct kfd_dev *dev = pm->dqm->dev;
 
 	process_count = pm->dqm->processes_count;
-	queue_count = pm->dqm->queue_count;
+	queue_count = pm->dqm->active_queue_count;
 	compute_queue_count = queue_count - pm->dqm->sdma_queue_count -
 				pm->dqm->xgmi_sdma_queue_count;
 
@@ -141,7 +141,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
 	pm->ib_size_bytes = alloc_size_bytes;
 
 	pr_debug("Building runlist ib process count: %d queues count %d\n",
-		pm->dqm->processes_count, pm->dqm->queue_count);
+		pm->dqm->processes_count, pm->dqm->active_queue_count);
 
 	/* build the run list ib packet */
 	list_for_each_entry(cur, queues, list) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index cb1ca11b99c3..606b1a8aacad 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -266,7 +266,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 		if ((dev->dqm->sched_policy ==
 		     KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
 		((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
-		(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
+		(dev->dqm->active_queue_count >= get_queues_num(dev->dqm)))) {
 			pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
 			retval = -EPERM;
 			goto err_create_queue;
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/6] drm/amdkfd: Avoid ambiguity by indicating it's cp queue
  2020-02-05 23:28 [PATCH 1/6] drm/amdkfd: Rename queue_count to active_queue_count Yong Zhao
@ 2020-02-05 23:28 ` Yong Zhao
  2020-02-05 23:28 ` [PATCH 3/6] drm/amdkfd: Count active CP queues directly Yong Zhao
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 9+ messages in thread
From: Yong Zhao @ 2020-02-05 23:28 UTC (permalink / raw)
  To: amd-gfx; +Cc: Yong Zhao

The queues represented in queue_bitmap are only CP queues.

Change-Id: I7e6a75de39718d7c4da608166b85b9377d06d1b3
Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c           |  4 ++--
 .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c    | 12 ++++++------
 .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h    |  2 +-
 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c      |  2 +-
 .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c   |  2 +-
 drivers/gpu/drm/amd/amdkfd/kfd_topology.c            |  2 +-
 drivers/gpu/drm/amd/include/kgd_kfd_interface.h      |  2 +-
 7 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 8609287620ea..ebe4b8f88e79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -126,7 +126,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 		/* this is going to have a few of the MSBs set that we need to
 		 * clear
 		 */
-		bitmap_complement(gpu_resources.queue_bitmap,
+		bitmap_complement(gpu_resources.cp_queue_bitmap,
 				  adev->gfx.mec.queue_bitmap,
 				  KGD_MAX_QUEUES);
 
@@ -137,7 +137,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 				* adev->gfx.mec.num_pipe_per_mec
 				* adev->gfx.mec.num_queue_per_pipe;
 		for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
-			clear_bit(i, gpu_resources.queue_bitmap);
+			clear_bit(i, gpu_resources.cp_queue_bitmap);
 
 		amdgpu_doorbell_get_kfd_info(adev,
 				&gpu_resources.doorbell_physical_address,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 7ef9b89f5c70..973581c2b401 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -78,14 +78,14 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
 	/* queue is available for KFD usage if bit is 1 */
 	for (i = 0; i <  dqm->dev->shared_resources.num_queue_per_pipe; ++i)
 		if (test_bit(pipe_offset + i,
-			      dqm->dev->shared_resources.queue_bitmap))
+			      dqm->dev->shared_resources.cp_queue_bitmap))
 			return true;
 	return false;
 }
 
-unsigned int get_queues_num(struct device_queue_manager *dqm)
+unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
 {
-	return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
+	return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
 				KGD_MAX_QUEUES);
 }
 
@@ -908,7 +908,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
 
 		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
 			if (test_bit(pipe_offset + queue,
-				     dqm->dev->shared_resources.queue_bitmap))
+				     dqm->dev->shared_resources.cp_queue_bitmap))
 				dqm->allocated_queues[pipe] |= 1 << queue;
 	}
 
@@ -1029,7 +1029,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
 		mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
 			/ dqm->dev->shared_resources.num_pipe_per_mec;
 
-		if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
+		if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
 			continue;
 
 		/* only acquire queues from the first MEC */
@@ -1979,7 +1979,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
 
 		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
 			if (!test_bit(pipe_offset + queue,
-				      dqm->dev->shared_resources.queue_bitmap))
+				      dqm->dev->shared_resources.cp_queue_bitmap))
 				continue;
 
 			r = dqm->dev->kfd2kgd->hqd_dump(
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index ee3400e92c30..3f0fb0d28c01 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -219,7 +219,7 @@ void device_queue_manager_init_v10_navi10(
 		struct device_queue_manager_asic_ops *asic_ops);
 void program_sh_mem_settings(struct device_queue_manager *dqm,
 					struct qcm_process_device *qpd);
-unsigned int get_queues_num(struct device_queue_manager *dqm);
+unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 393c218734fd..377bde0e781c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -62,7 +62,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
 		max_proc_per_quantum = dev->max_proc_per_quantum;
 
 	if ((process_count > max_proc_per_quantum) ||
-	    compute_queue_count > get_queues_num(pm->dqm)) {
+	    compute_queue_count > get_cp_queues_num(pm->dqm)) {
 		*over_subscription = true;
 		pr_debug("Over subscribed runlist\n");
 	}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 606b1a8aacad..b62ee2e3344a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -266,7 +266,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 		if ((dev->dqm->sched_policy ==
 		     KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
 		((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
-		(dev->dqm->active_queue_count >= get_queues_num(dev->dqm)))) {
+		(dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
 			pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
 			retval = -EPERM;
 			goto err_create_queue;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 43a82cf76628..b70e6b25edc6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1318,7 +1318,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 	dev->node_props.num_gws = (dev->gpu->gws &&
 		dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
 		amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
-	dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm);
+	dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
 
 	kfd_fill_mem_clk_max_info(dev);
 	kfd_fill_iolink_non_crat_info(dev);
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index a607b1034962..55750890b73f 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -123,7 +123,7 @@ struct kgd2kfd_shared_resources {
 	uint32_t num_queue_per_pipe;
 
 	/* Bit n == 1 means Queue n is available for KFD */
-	DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
+	DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
 
 	/* SDMA doorbell assignments (SOC15 and later chips only). Only
 	 * specific doorbells are routed to each SDMA engine. Others
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/6] drm/amdkfd: Count active CP queues directly
  2020-02-05 23:28 [PATCH 1/6] drm/amdkfd: Rename queue_count to active_queue_count Yong Zhao
  2020-02-05 23:28 ` [PATCH 2/6] drm/amdkfd: Avoid ambiguity by indicating it's cp queue Yong Zhao
@ 2020-02-05 23:28 ` Yong Zhao
  2020-02-05 23:28 ` [PATCH 4/6] drm/amdkfd: Fix a memory leak in queue creation error handling Yong Zhao
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 9+ messages in thread
From: Yong Zhao @ 2020-02-05 23:28 UTC (permalink / raw)
  To: amd-gfx; +Cc: Yong Zhao

The previous code of calculating active CP queues is problematic if
some SDMA queues are inactive. Fix that by counting CP queues directly.

Change-Id: I5ffaa75a95cbebc984558199ba2f3db6909c52a9
Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
---
 .../drm/amd/amdkfd/kfd_device_queue_manager.c | 45 +++++++++++++------
 .../drm/amd/amdkfd/kfd_device_queue_manager.h |  1 +
 .../gpu/drm/amd/amdkfd/kfd_packet_manager.c   |  3 +-
 3 files changed, 33 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 973581c2b401..064108cf493b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -132,6 +132,22 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
 						qpd->sh_mem_bases);
 }
 
+void increment_queue_count(struct device_queue_manager *dqm,
+			enum kfd_queue_type type)
+{
+	dqm->active_queue_count++;
+	if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+		dqm->active_cp_queue_count++;
+}
+
+void decrement_queue_count(struct device_queue_manager *dqm,
+			enum kfd_queue_type type)
+{
+	dqm->active_queue_count--;
+	if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+		dqm->active_cp_queue_count--;
+}
+
 static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
 {
 	struct kfd_dev *dev = qpd->dqm->dev;
@@ -359,7 +375,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
 	list_add(&q->list, &qpd->queues_list);
 	qpd->queue_count++;
 	if (q->properties.is_active)
-		dqm->active_queue_count++;
+		increment_queue_count(dqm, q->properties.type);
 
 	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
 		dqm->sdma_queue_count++;
@@ -494,7 +510,7 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
 	}
 	qpd->queue_count--;
 	if (q->properties.is_active)
-		dqm->active_queue_count--;
+		decrement_queue_count(dqm, q->properties.type);
 
 	return retval;
 }
@@ -567,9 +583,9 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
 	 * uploaded.
 	 */
 	if (q->properties.is_active && !prev_active)
-		dqm->active_queue_count++;
+		increment_queue_count(dqm, q->properties.type);
 	else if (!q->properties.is_active && prev_active)
-		dqm->active_queue_count--;
+		decrement_queue_count(dqm, q->properties.type);
 
 	if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
 		retval = map_queues_cpsch(dqm);
@@ -618,7 +634,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
 		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 				q->properties.type)];
 		q->properties.is_active = false;
-		dqm->active_queue_count--;
+		decrement_queue_count(dqm, q->properties.type);
 
 		if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
 			continue;
@@ -662,7 +678,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
 			continue;
 
 		q->properties.is_active = false;
-		dqm->active_queue_count--;
+		decrement_queue_count(dqm, q->properties.type);
 	}
 	retval = execute_queues_cpsch(dqm,
 				qpd->is_debug ?
@@ -731,7 +747,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
 		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 				q->properties.type)];
 		q->properties.is_active = true;
-		dqm->active_queue_count++;
+		increment_queue_count(dqm, q->properties.type);
 
 		if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
 			continue;
@@ -786,7 +802,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
 			continue;
 
 		q->properties.is_active = true;
-		dqm->active_queue_count++;
+		increment_queue_count(dqm, q->properties.type);
 	}
 	retval = execute_queues_cpsch(dqm,
 				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -1158,7 +1174,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
 			dqm->total_queue_count);
 
 	list_add(&kq->list, &qpd->priv_queue_list);
-	dqm->active_queue_count++;
+	increment_queue_count(dqm, kq->queue->properties.type);
 	qpd->is_debug = true;
 	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 	dqm_unlock(dqm);
@@ -1172,7 +1188,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
 {
 	dqm_lock(dqm);
 	list_del(&kq->list);
-	dqm->active_queue_count--;
+	decrement_queue_count(dqm, kq->queue->properties.type);
 	qpd->is_debug = false;
 	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
 	/*
@@ -1244,7 +1260,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 		dqm->xgmi_sdma_queue_count++;
 
 	if (q->properties.is_active) {
-		dqm->active_queue_count++;
+		increment_queue_count(dqm, q->properties.type);
+
 		retval = execute_queues_cpsch(dqm,
 				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 	}
@@ -1438,7 +1455,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
 	list_del(&q->list);
 	qpd->queue_count--;
 	if (q->properties.is_active) {
-		dqm->active_queue_count--;
+		decrement_queue_count(dqm, q->properties.type);
 		retval = execute_queues_cpsch(dqm,
 				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 		if (retval == -ETIME)
@@ -1648,7 +1665,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
 	/* Clean all kernel queues */
 	list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
 		list_del(&kq->list);
-		dqm->active_queue_count--;
+		decrement_queue_count(dqm, kq->queue->properties.type);
 		qpd->is_debug = false;
 		dqm->total_queue_count--;
 		filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
@@ -1665,7 +1682,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
 		}
 
 		if (q->properties.is_active)
-			dqm->active_queue_count--;
+			decrement_queue_count(dqm, q->properties.type);
 
 		dqm->total_queue_count--;
 	}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 3f0fb0d28c01..05e0afc04cd9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -181,6 +181,7 @@ struct device_queue_manager {
 	unsigned int		saved_flags;
 	unsigned int		processes_count;
 	unsigned int		active_queue_count;
+	unsigned int		active_cp_queue_count;
 	unsigned int		sdma_queue_count;
 	unsigned int		xgmi_sdma_queue_count;
 	unsigned int		total_queue_count;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 377bde0e781c..efdb75e7677b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -48,8 +48,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
 
 	process_count = pm->dqm->processes_count;
 	queue_count = pm->dqm->active_queue_count;
-	compute_queue_count = queue_count - pm->dqm->sdma_queue_count -
-				pm->dqm->xgmi_sdma_queue_count;
+	compute_queue_count = pm->dqm->active_cp_queue_count;
 
 	/* check if there is over subscription
 	 * Note: the arbitration between the number of VMIDs and
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 4/6] drm/amdkfd: Fix a memory leak in queue creation error handling
  2020-02-05 23:28 [PATCH 1/6] drm/amdkfd: Rename queue_count to active_queue_count Yong Zhao
  2020-02-05 23:28 ` [PATCH 2/6] drm/amdkfd: Avoid ambiguity by indicating it's cp queue Yong Zhao
  2020-02-05 23:28 ` [PATCH 3/6] drm/amdkfd: Count active CP queues directly Yong Zhao
@ 2020-02-05 23:28 ` Yong Zhao
  2020-02-05 23:28 ` [PATCH 5/6] drm/amdkfd: Only count active sdma queues Yong Zhao
  2020-02-05 23:28 ` [PATCH 6/6] drm/amdkfd: Delete excessive printings Yong Zhao
  4 siblings, 0 replies; 9+ messages in thread
From: Yong Zhao @ 2020-02-05 23:28 UTC (permalink / raw)
  To: amd-gfx; +Cc: Yong Zhao

When the queue creation is failed, some resources were not freed. Fix it.

Change-Id: Ia24b6ad31528dceddfd4d1c58bb1d22c35d3eabf
Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index b62ee2e3344a..c604a2ede3f5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -329,6 +329,9 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 	return retval;
 
 err_create_queue:
+	uninit_queue(q);
+	if (kq)
+		kernel_queue_uninit(kq, false);
 	kfree(pqn);
 err_allocate_pqn:
 	/* check if queues list is empty unregister process from device */
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 5/6] drm/amdkfd: Only count active sdma queues
  2020-02-05 23:28 [PATCH 1/6] drm/amdkfd: Rename queue_count to active_queue_count Yong Zhao
                   ` (2 preceding siblings ...)
  2020-02-05 23:28 ` [PATCH 4/6] drm/amdkfd: Fix a memory leak in queue creation error handling Yong Zhao
@ 2020-02-05 23:28 ` Yong Zhao
  2020-02-05 23:39   ` Yong Zhao
  2020-02-05 23:28 ` [PATCH 6/6] drm/amdkfd: Delete excessive printings Yong Zhao
  4 siblings, 1 reply; 9+ messages in thread
From: Yong Zhao @ 2020-02-05 23:28 UTC (permalink / raw)
  To: amd-gfx; +Cc: Yong Zhao

The sdma_queue_count was only used for inferring whether we should
unmap SDMA queues under HWS mode. In contrast, We mapped active queues
rather than all in map_queues_cpsch(). In order to match the map and unmap
for SDMA queues, we should just count active SDMA queues. Meanwhile,
rename sdma_queue_count to active_sdma_queue_count to reflect the new
usage.

Change-Id: I9f1c3305dad044a3c779ec0730fcf7554050de8b
Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
---
 .../drm/amd/amdkfd/kfd_device_queue_manager.c | 54 ++++++++-----------
 .../drm/amd/amdkfd/kfd_device_queue_manager.h |  5 +-
 .../amd/amdkfd/kfd_process_queue_manager.c    | 16 +++---
 3 files changed, 31 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 064108cf493b..cf77b866054a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -138,6 +138,10 @@ void increment_queue_count(struct device_queue_manager *dqm,
 	dqm->active_queue_count++;
 	if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
 		dqm->active_cp_queue_count++;
+	else if (type == KFD_QUEUE_TYPE_SDMA)
+		dqm->active_sdma_queue_count++;
+	else if (type == KFD_QUEUE_TYPE_SDMA_XGMI)
+		dqm->active_xgmi_sdma_queue_count++;
 }
 
 void decrement_queue_count(struct device_queue_manager *dqm,
@@ -146,6 +150,10 @@ void decrement_queue_count(struct device_queue_manager *dqm,
 	dqm->active_queue_count--;
 	if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
 		dqm->active_cp_queue_count--;
+	else if (type == KFD_QUEUE_TYPE_SDMA)
+		dqm->active_sdma_queue_count--;
+	else if (type == KFD_QUEUE_TYPE_SDMA_XGMI)
+		dqm->active_xgmi_sdma_queue_count--;
 }
 
 static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
@@ -377,11 +385,6 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
 	if (q->properties.is_active)
 		increment_queue_count(dqm, q->properties.type);
 
-	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
-		dqm->sdma_queue_count++;
-	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
-		dqm->xgmi_sdma_queue_count++;
-
 	/*
 	 * Unconditionally increment this counter, regardless of the queue's
 	 * type or whether the queue is active.
@@ -462,15 +465,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
 	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 			q->properties.type)];
 
-	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
+	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
 		deallocate_hqd(dqm, q);
-	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
-		dqm->sdma_queue_count--;
+	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
 		deallocate_sdma_queue(dqm, q);
-	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
-		dqm->xgmi_sdma_queue_count--;
+	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
 		deallocate_sdma_queue(dqm, q);
-	} else {
+	else {
 		pr_debug("q->properties.type %d is invalid\n",
 				q->properties.type);
 		return -EINVAL;
@@ -916,8 +917,8 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
 	mutex_init(&dqm->lock_hidden);
 	INIT_LIST_HEAD(&dqm->queues);
 	dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
-	dqm->sdma_queue_count = 0;
-	dqm->xgmi_sdma_queue_count = 0;
+	dqm->active_sdma_queue_count = 0;
+	dqm->active_xgmi_sdma_queue_count = 0;
 
 	for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
 		int pipe_offset = pipe * get_queues_per_pipe(dqm);
@@ -1081,8 +1082,8 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
 	mutex_init(&dqm->lock_hidden);
 	INIT_LIST_HEAD(&dqm->queues);
 	dqm->active_queue_count = dqm->processes_count = 0;
-	dqm->sdma_queue_count = 0;
-	dqm->xgmi_sdma_queue_count = 0;
+	dqm->active_sdma_queue_count = 0;
+	dqm->active_xgmi_sdma_queue_count = 0;
 	dqm->active_runlist = false;
 	dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
 	dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -1254,11 +1255,6 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 	list_add(&q->list, &qpd->queues_list);
 	qpd->queue_count++;
 
-	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
-		dqm->sdma_queue_count++;
-	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
-		dqm->xgmi_sdma_queue_count++;
-
 	if (q->properties.is_active) {
 		increment_queue_count(dqm, q->properties.type);
 
@@ -1367,9 +1363,9 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
 		return retval;
 
 	pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
-		dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
+		dqm->active_sdma_queue_count, dqm->active_xgmi_sdma_queue_count);
 
-	if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
+	if (dqm->active_sdma_queue_count > 0 || dqm->active_xgmi_sdma_queue_count)
 		unmap_sdma_queues(dqm);
 
 	retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
@@ -1444,13 +1440,10 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
 
 	deallocate_doorbell(qpd, q);
 
-	if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
-		dqm->sdma_queue_count--;
+	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
 		deallocate_sdma_queue(dqm, q);
-	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
-		dqm->xgmi_sdma_queue_count--;
+	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
 		deallocate_sdma_queue(dqm, q);
-	}
 
 	list_del(&q->list);
 	qpd->queue_count--;
@@ -1673,13 +1666,10 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
 
 	/* Clear all user mode queues */
 	list_for_each_entry(q, &qpd->queues_list, list) {
-		if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
-			dqm->sdma_queue_count--;
+		if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
 			deallocate_sdma_queue(dqm, q);
-		} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
-			dqm->xgmi_sdma_queue_count--;
+		else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
 			deallocate_sdma_queue(dqm, q);
-		}
 
 		if (q->properties.is_active)
 			decrement_queue_count(dqm, q->properties.type);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 05e0afc04cd9..62472a186d4f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -182,8 +182,9 @@ struct device_queue_manager {
 	unsigned int		processes_count;
 	unsigned int		active_queue_count;
 	unsigned int		active_cp_queue_count;
-	unsigned int		sdma_queue_count;
-	unsigned int		xgmi_sdma_queue_count;
+	unsigned int		cp_queue_count;
+	unsigned int		active_sdma_queue_count;
+	unsigned int		active_xgmi_sdma_queue_count;
 	unsigned int		total_queue_count;
 	unsigned int		next_pipe_to_allocate;
 	unsigned int		*allocated_queues;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index c604a2ede3f5..941b5876f19f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -241,16 +241,12 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 	switch (type) {
 	case KFD_QUEUE_TYPE_SDMA:
 	case KFD_QUEUE_TYPE_SDMA_XGMI:
-		if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count
-			>= get_num_sdma_queues(dev->dqm)) ||
-			(type == KFD_QUEUE_TYPE_SDMA_XGMI &&
-			dev->dqm->xgmi_sdma_queue_count
-			>= get_num_xgmi_sdma_queues(dev->dqm))) {
-			pr_debug("Over-subscription is not allowed for SDMA.\n");
-			retval = -EPERM;
-			goto err_create_queue;
-		}
-
+		/* SDMA queues are always allocated statically no matter
+		 * which scheduler mode is used. We also do not need to
+		 * check whether a SDMA queue can be allocated here, because
+		 * allocate_sdma_queue() in create_queue() has the
+		 * corresponding check logic there.
+		 */
 		retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
 		if (retval != 0)
 			goto err_create_queue;
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 6/6] drm/amdkfd: Delete excessive printings
  2020-02-05 23:28 [PATCH 1/6] drm/amdkfd: Rename queue_count to active_queue_count Yong Zhao
                   ` (3 preceding siblings ...)
  2020-02-05 23:28 ` [PATCH 5/6] drm/amdkfd: Only count active sdma queues Yong Zhao
@ 2020-02-05 23:28 ` Yong Zhao
  4 siblings, 0 replies; 9+ messages in thread
From: Yong Zhao @ 2020-02-05 23:28 UTC (permalink / raw)
  To: amd-gfx; +Cc: Yong Zhao

Those printings are duplicated or useless.

Change-Id: I88fbe8f5748bbd0a20bcf1f6ca67b9dde99733fe
Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c  | 2 --
 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 4 +---
 2 files changed, 1 insertion(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index cf77b866054a..3bfdc9b251b3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -305,8 +305,6 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
 	struct mqd_manager *mqd_mgr;
 	int retval;
 
-	print_queue(q);
-
 	dqm_lock(dqm);
 
 	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 941b5876f19f..cf11f4dce98a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -253,7 +253,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 		pqn->q = q;
 		pqn->kq = NULL;
 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
-		pr_debug("DQM returned %d for create_queue\n", retval);
 		print_queue(q);
 		break;
 
@@ -274,7 +273,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 		pqn->q = q;
 		pqn->kq = NULL;
 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
-		pr_debug("DQM returned %d for create_queue\n", retval);
 		print_queue(q);
 		break;
 	case KFD_QUEUE_TYPE_DIQ:
@@ -295,7 +293,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 	}
 
 	if (retval != 0) {
-		pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n",
+		pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
 			pqm->process->pasid, type, retval);
 		goto err_create_queue;
 	}
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 5/6] drm/amdkfd: Only count active sdma queues
  2020-02-05 23:28 ` [PATCH 5/6] drm/amdkfd: Only count active sdma queues Yong Zhao
@ 2020-02-05 23:39   ` Yong Zhao
  2020-02-06  2:36     ` Yong Zhao
  0 siblings, 1 reply; 9+ messages in thread
From: Yong Zhao @ 2020-02-05 23:39 UTC (permalink / raw)
  To: Yong Zhao, amd-gfx list

One minor fix added.

Yong

On 2020-02-05 6:28 p.m., Yong Zhao wrote:
> The sdma_queue_count was only used for inferring whether we should
> unmap SDMA queues under HWS mode. In contrast, We mapped active queues
> rather than all in map_queues_cpsch(). In order to match the map and unmap
> for SDMA queues, we should just count active SDMA queues. Meanwhile,
> rename sdma_queue_count to active_sdma_queue_count to reflect the new
> usage.
>
> Change-Id: I9f1c3305dad044a3c779ec0730fcf7554050de8b
> Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
> ---
>   .../drm/amd/amdkfd/kfd_device_queue_manager.c | 54 ++++++++-----------
>   .../drm/amd/amdkfd/kfd_device_queue_manager.h |  5 +-
>   .../amd/amdkfd/kfd_process_queue_manager.c    | 16 +++---
>   3 files changed, 31 insertions(+), 44 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> index 064108cf493b..cf77b866054a 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> @@ -138,6 +138,10 @@ void increment_queue_count(struct device_queue_manager *dqm,
>   	dqm->active_queue_count++;
>   	if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
>   		dqm->active_cp_queue_count++;
> +	else if (type == KFD_QUEUE_TYPE_SDMA)
> +		dqm->active_sdma_queue_count++;
> +	else if (type == KFD_QUEUE_TYPE_SDMA_XGMI)
> +		dqm->active_xgmi_sdma_queue_count++;
>   }
>   
>   void decrement_queue_count(struct device_queue_manager *dqm,
> @@ -146,6 +150,10 @@ void decrement_queue_count(struct device_queue_manager *dqm,
>   	dqm->active_queue_count--;
>   	if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
>   		dqm->active_cp_queue_count--;
> +	else if (type == KFD_QUEUE_TYPE_SDMA)
> +		dqm->active_sdma_queue_count--;
> +	else if (type == KFD_QUEUE_TYPE_SDMA_XGMI)
> +		dqm->active_xgmi_sdma_queue_count--;
>   }
>   
>   static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
> @@ -377,11 +385,6 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
>   	if (q->properties.is_active)
>   		increment_queue_count(dqm, q->properties.type);
>   
> -	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
> -		dqm->sdma_queue_count++;
> -	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
> -		dqm->xgmi_sdma_queue_count++;
> -
>   	/*
>   	 * Unconditionally increment this counter, regardless of the queue's
>   	 * type or whether the queue is active.
> @@ -462,15 +465,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
>   	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
>   			q->properties.type)];
>   
> -	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
> +	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
>   		deallocate_hqd(dqm, q);
> -	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
> -		dqm->sdma_queue_count--;
> +	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
>   		deallocate_sdma_queue(dqm, q);
> -	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
> -		dqm->xgmi_sdma_queue_count--;
> +	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
>   		deallocate_sdma_queue(dqm, q);
> -	} else {
> +	else {
>   		pr_debug("q->properties.type %d is invalid\n",
>   				q->properties.type);
>   		return -EINVAL;
> @@ -916,8 +917,8 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
>   	mutex_init(&dqm->lock_hidden);
>   	INIT_LIST_HEAD(&dqm->queues);
>   	dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
> -	dqm->sdma_queue_count = 0;
> -	dqm->xgmi_sdma_queue_count = 0;
> +	dqm->active_sdma_queue_count = 0;
> +	dqm->active_xgmi_sdma_queue_count = 0;
>   
>   	for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
>   		int pipe_offset = pipe * get_queues_per_pipe(dqm);
> @@ -1081,8 +1082,8 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
>   	mutex_init(&dqm->lock_hidden);
>   	INIT_LIST_HEAD(&dqm->queues);
>   	dqm->active_queue_count = dqm->processes_count = 0;
> -	dqm->sdma_queue_count = 0;
> -	dqm->xgmi_sdma_queue_count = 0;
> +	dqm->active_sdma_queue_count = 0;
> +	dqm->active_xgmi_sdma_queue_count = 0;
>   	dqm->active_runlist = false;
>   	dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
>   	dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
> @@ -1254,11 +1255,6 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
>   	list_add(&q->list, &qpd->queues_list);
>   	qpd->queue_count++;
>   
> -	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
> -		dqm->sdma_queue_count++;
> -	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
> -		dqm->xgmi_sdma_queue_count++;
> -
>   	if (q->properties.is_active) {
>   		increment_queue_count(dqm, q->properties.type);
>   
> @@ -1367,9 +1363,9 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
>   		return retval;
>   
>   	pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
> -		dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
> +		dqm->active_sdma_queue_count, dqm->active_xgmi_sdma_queue_count);
>   
> -	if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
> +	if (dqm->active_sdma_queue_count > 0 || dqm->active_xgmi_sdma_queue_count)
>   		unmap_sdma_queues(dqm);
>   
>   	retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
> @@ -1444,13 +1440,10 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
>   
>   	deallocate_doorbell(qpd, q);
>   
> -	if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
> -		dqm->sdma_queue_count--;
> +	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
>   		deallocate_sdma_queue(dqm, q);
> -	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
> -		dqm->xgmi_sdma_queue_count--;
> +	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
>   		deallocate_sdma_queue(dqm, q);
> -	}
>   
>   	list_del(&q->list);
>   	qpd->queue_count--;
> @@ -1673,13 +1666,10 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
>   
>   	/* Clear all user mode queues */
>   	list_for_each_entry(q, &qpd->queues_list, list) {
> -		if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
> -			dqm->sdma_queue_count--;
> +		if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
>   			deallocate_sdma_queue(dqm, q);
> -		} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
> -			dqm->xgmi_sdma_queue_count--;
> +		else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
>   			deallocate_sdma_queue(dqm, q);
> -		}
>   
>   		if (q->properties.is_active)
>   			decrement_queue_count(dqm, q->properties.type);
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> index 05e0afc04cd9..62472a186d4f 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> @@ -182,8 +182,9 @@ struct device_queue_manager {
>   	unsigned int		processes_count;
>   	unsigned int		active_queue_count;
>   	unsigned int		active_cp_queue_count;
> -	unsigned int		sdma_queue_count;
> -	unsigned int		xgmi_sdma_queue_count;
> +	unsigned int		cp_queue_count;
[yz] I have deleted this item as it is actually not needed.
> +	unsigned int		active_sdma_queue_count;
> +	unsigned int		active_xgmi_sdma_queue_count;
>   	unsigned int		total_queue_count;
>   	unsigned int		next_pipe_to_allocate;
>   	unsigned int		*allocated_queues;
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> index c604a2ede3f5..941b5876f19f 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> @@ -241,16 +241,12 @@ int pqm_create_queue(struct process_queue_manager *pqm,
>   	switch (type) {
>   	case KFD_QUEUE_TYPE_SDMA:
>   	case KFD_QUEUE_TYPE_SDMA_XGMI:
> -		if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count
> -			>= get_num_sdma_queues(dev->dqm)) ||
> -			(type == KFD_QUEUE_TYPE_SDMA_XGMI &&
> -			dev->dqm->xgmi_sdma_queue_count
> -			>= get_num_xgmi_sdma_queues(dev->dqm))) {
> -			pr_debug("Over-subscription is not allowed for SDMA.\n");
> -			retval = -EPERM;
> -			goto err_create_queue;
> -		}
> -
> +		/* SDMA queues are always allocated statically no matter
> +		 * which scheduler mode is used. We also do not need to
> +		 * check whether a SDMA queue can be allocated here, because
> +		 * allocate_sdma_queue() in create_queue() has the
> +		 * corresponding check logic there.
> +		 */
>   		retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
>   		if (retval != 0)
>   			goto err_create_queue;
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 5/6] drm/amdkfd: Only count active sdma queues
  2020-02-05 23:39   ` Yong Zhao
@ 2020-02-06  2:36     ` Yong Zhao
  0 siblings, 0 replies; 9+ messages in thread
From: Yong Zhao @ 2020-02-06  2:36 UTC (permalink / raw)
  To: Yong Zhao, amd-gfx list

Please disregard the patch 5 and 6, as I have a new version for them.

Yong

On 2020-02-05 6:39 p.m., Yong Zhao wrote:
> One minor fix added.
>
> Yong
>
> On 2020-02-05 6:28 p.m., Yong Zhao wrote:
>> The sdma_queue_count was only used for inferring whether we should
>> unmap SDMA queues under HWS mode. In contrast, We mapped active queues
>> rather than all in map_queues_cpsch(). In order to match the map and 
>> unmap
>> for SDMA queues, we should just count active SDMA queues. Meanwhile,
>> rename sdma_queue_count to active_sdma_queue_count to reflect the new
>> usage.
>>
>> Change-Id: I9f1c3305dad044a3c779ec0730fcf7554050de8b
>> Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
>> ---
>>   .../drm/amd/amdkfd/kfd_device_queue_manager.c | 54 ++++++++-----------
>>   .../drm/amd/amdkfd/kfd_device_queue_manager.h |  5 +-
>>   .../amd/amdkfd/kfd_process_queue_manager.c    | 16 +++---
>>   3 files changed, 31 insertions(+), 44 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
>> b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
>> index 064108cf493b..cf77b866054a 100644
>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
>> @@ -138,6 +138,10 @@ void increment_queue_count(struct 
>> device_queue_manager *dqm,
>>       dqm->active_queue_count++;
>>       if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
>>           dqm->active_cp_queue_count++;
>> +    else if (type == KFD_QUEUE_TYPE_SDMA)
>> +        dqm->active_sdma_queue_count++;
>> +    else if (type == KFD_QUEUE_TYPE_SDMA_XGMI)
>> +        dqm->active_xgmi_sdma_queue_count++;
>>   }
>>     void decrement_queue_count(struct device_queue_manager *dqm,
>> @@ -146,6 +150,10 @@ void decrement_queue_count(struct 
>> device_queue_manager *dqm,
>>       dqm->active_queue_count--;
>>       if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
>>           dqm->active_cp_queue_count--;
>> +    else if (type == KFD_QUEUE_TYPE_SDMA)
>> +        dqm->active_sdma_queue_count--;
>> +    else if (type == KFD_QUEUE_TYPE_SDMA_XGMI)
>> +        dqm->active_xgmi_sdma_queue_count--;
>>   }
>>     static int allocate_doorbell(struct qcm_process_device *qpd, 
>> struct queue *q)
>> @@ -377,11 +385,6 @@ static int create_queue_nocpsch(struct 
>> device_queue_manager *dqm,
>>       if (q->properties.is_active)
>>           increment_queue_count(dqm, q->properties.type);
>>   -    if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
>> -        dqm->sdma_queue_count++;
>> -    else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
>> -        dqm->xgmi_sdma_queue_count++;
>> -
>>       /*
>>        * Unconditionally increment this counter, regardless of the 
>> queue's
>>        * type or whether the queue is active.
>> @@ -462,15 +465,13 @@ static int destroy_queue_nocpsch_locked(struct 
>> device_queue_manager *dqm,
>>       mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
>>               q->properties.type)];
>>   -    if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
>> +    if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
>>           deallocate_hqd(dqm, q);
>> -    } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
>> -        dqm->sdma_queue_count--;
>> +    else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
>>           deallocate_sdma_queue(dqm, q);
>> -    } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
>> -        dqm->xgmi_sdma_queue_count--;
>> +    else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
>>           deallocate_sdma_queue(dqm, q);
>> -    } else {
>> +    else {
>>           pr_debug("q->properties.type %d is invalid\n",
>>                   q->properties.type);
>>           return -EINVAL;
>> @@ -916,8 +917,8 @@ static int initialize_nocpsch(struct 
>> device_queue_manager *dqm)
>>       mutex_init(&dqm->lock_hidden);
>>       INIT_LIST_HEAD(&dqm->queues);
>>       dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
>> -    dqm->sdma_queue_count = 0;
>> -    dqm->xgmi_sdma_queue_count = 0;
>> +    dqm->active_sdma_queue_count = 0;
>> +    dqm->active_xgmi_sdma_queue_count = 0;
>>         for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
>>           int pipe_offset = pipe * get_queues_per_pipe(dqm);
>> @@ -1081,8 +1082,8 @@ static int initialize_cpsch(struct 
>> device_queue_manager *dqm)
>>       mutex_init(&dqm->lock_hidden);
>>       INIT_LIST_HEAD(&dqm->queues);
>>       dqm->active_queue_count = dqm->processes_count = 0;
>> -    dqm->sdma_queue_count = 0;
>> -    dqm->xgmi_sdma_queue_count = 0;
>> +    dqm->active_sdma_queue_count = 0;
>> +    dqm->active_xgmi_sdma_queue_count = 0;
>>       dqm->active_runlist = false;
>>       dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
>>       dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - 
>> get_num_xgmi_sdma_queues(dqm));
>> @@ -1254,11 +1255,6 @@ static int create_queue_cpsch(struct 
>> device_queue_manager *dqm, struct queue *q,
>>       list_add(&q->list, &qpd->queues_list);
>>       qpd->queue_count++;
>>   -    if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
>> -        dqm->sdma_queue_count++;
>> -    else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
>> -        dqm->xgmi_sdma_queue_count++;
>> -
>>       if (q->properties.is_active) {
>>           increment_queue_count(dqm, q->properties.type);
>>   @@ -1367,9 +1363,9 @@ static int unmap_queues_cpsch(struct 
>> device_queue_manager *dqm,
>>           return retval;
>>         pr_debug("Before destroying queues, sdma queue count is : %u, 
>> xgmi sdma queue count is : %u\n",
>> -        dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
>> +        dqm->active_sdma_queue_count, 
>> dqm->active_xgmi_sdma_queue_count);
>>   -    if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
>> +    if (dqm->active_sdma_queue_count > 0 || 
>> dqm->active_xgmi_sdma_queue_count)
>>           unmap_sdma_queues(dqm);
>>         retval = pm_send_unmap_queue(&dqm->packets, 
>> KFD_QUEUE_TYPE_COMPUTE,
>> @@ -1444,13 +1440,10 @@ static int destroy_queue_cpsch(struct 
>> device_queue_manager *dqm,
>>         deallocate_doorbell(qpd, q);
>>   -    if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
>> -        dqm->sdma_queue_count--;
>> +    if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
>>           deallocate_sdma_queue(dqm, q);
>> -    } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
>> -        dqm->xgmi_sdma_queue_count--;
>> +    else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
>>           deallocate_sdma_queue(dqm, q);
>> -    }
>>         list_del(&q->list);
>>       qpd->queue_count--;
>> @@ -1673,13 +1666,10 @@ static int process_termination_cpsch(struct 
>> device_queue_manager *dqm,
>>         /* Clear all user mode queues */
>>       list_for_each_entry(q, &qpd->queues_list, list) {
>> -        if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
>> -            dqm->sdma_queue_count--;
>> +        if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
>>               deallocate_sdma_queue(dqm, q);
>> -        } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
>> -            dqm->xgmi_sdma_queue_count--;
>> +        else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
>>               deallocate_sdma_queue(dqm, q);
>> -        }
>>             if (q->properties.is_active)
>>               decrement_queue_count(dqm, q->properties.type);
>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h 
>> b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
>> index 05e0afc04cd9..62472a186d4f 100644
>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
>> @@ -182,8 +182,9 @@ struct device_queue_manager {
>>       unsigned int        processes_count;
>>       unsigned int        active_queue_count;
>>       unsigned int        active_cp_queue_count;
>> -    unsigned int        sdma_queue_count;
>> -    unsigned int        xgmi_sdma_queue_count;
>> +    unsigned int        cp_queue_count;
> [yz] I have deleted this item as it is actually not needed.
>> +    unsigned int active_sdma_queue_count;
>> +    unsigned int        active_xgmi_sdma_queue_count;
>>       unsigned int        total_queue_count;
>>       unsigned int        next_pipe_to_allocate;
>>       unsigned int        *allocated_queues;
>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 
>> b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
>> index c604a2ede3f5..941b5876f19f 100644
>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
>> @@ -241,16 +241,12 @@ int pqm_create_queue(struct 
>> process_queue_manager *pqm,
>>       switch (type) {
>>       case KFD_QUEUE_TYPE_SDMA:
>>       case KFD_QUEUE_TYPE_SDMA_XGMI:
>> -        if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count
>> -            >= get_num_sdma_queues(dev->dqm)) ||
>> -            (type == KFD_QUEUE_TYPE_SDMA_XGMI &&
>> -            dev->dqm->xgmi_sdma_queue_count
>> -            >= get_num_xgmi_sdma_queues(dev->dqm))) {
>> -            pr_debug("Over-subscription is not allowed for SDMA.\n");
>> -            retval = -EPERM;
>> -            goto err_create_queue;
>> -        }
>> -
>> +        /* SDMA queues are always allocated statically no matter
>> +         * which scheduler mode is used. We also do not need to
>> +         * check whether a SDMA queue can be allocated here, because
>> +         * allocate_sdma_queue() in create_queue() has the
>> +         * corresponding check logic there.
>> +         */
>>           retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
>>           if (retval != 0)
>>               goto err_create_queue;
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 4/6] drm/amdkfd: Fix a memory leak in queue creation error handling
  2020-02-24 22:18 [PATCH 1/6] drm/amdkfd: Rename queue_count to active_queue_count Yong Zhao
@ 2020-02-24 22:18 ` Yong Zhao
  0 siblings, 0 replies; 9+ messages in thread
From: Yong Zhao @ 2020-02-24 22:18 UTC (permalink / raw)
  To: amd-gfx; +Cc: Yong Zhao

When the queue creation failed, some resources were not freed. Fix it.

Change-Id: Ia24b6ad31528dceddfd4d1c58bb1d22c35d3eabf
Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index b62ee2e3344a..c604a2ede3f5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -329,6 +329,9 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 	return retval;
 
 err_create_queue:
+	uninit_queue(q);
+	if (kq)
+		kernel_queue_uninit(kq, false);
 	kfree(pqn);
 err_allocate_pqn:
 	/* check if queues list is empty unregister process from device */
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2020-02-24 22:19 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-02-05 23:28 [PATCH 1/6] drm/amdkfd: Rename queue_count to active_queue_count Yong Zhao
2020-02-05 23:28 ` [PATCH 2/6] drm/amdkfd: Avoid ambiguity by indicating it's cp queue Yong Zhao
2020-02-05 23:28 ` [PATCH 3/6] drm/amdkfd: Count active CP queues directly Yong Zhao
2020-02-05 23:28 ` [PATCH 4/6] drm/amdkfd: Fix a memory leak in queue creation error handling Yong Zhao
2020-02-05 23:28 ` [PATCH 5/6] drm/amdkfd: Only count active sdma queues Yong Zhao
2020-02-05 23:39   ` Yong Zhao
2020-02-06  2:36     ` Yong Zhao
2020-02-05 23:28 ` [PATCH 6/6] drm/amdkfd: Delete excessive printings Yong Zhao
2020-02-24 22:18 [PATCH 1/6] drm/amdkfd: Rename queue_count to active_queue_count Yong Zhao
2020-02-24 22:18 ` [PATCH 4/6] drm/amdkfd: Fix a memory leak in queue creation error handling Yong Zhao

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.