All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: dri-devel@lists.freedesktop.org
Cc: Matthew Wilcox <willy@infradead.org>
Subject: [PATCH 15/34] drm/amdkfd: Convert event_idr to XArray
Date: Thu, 21 Feb 2019 10:41:50 -0800	[thread overview]
Message-ID: <20190221184226.2149-31-willy@infradead.org> (raw)
In-Reply-To: <20190221184226.2149-1-willy@infradead.org>

Signed-off-by: Matthew Wilcox <willy@infradead.org>
---
 drivers/gpu/drm/amd/amdkfd/kfd_events.c | 71 ++++++++++---------------
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h   |  2 +-
 2 files changed, 30 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index e9f0e0a1b41c..28adfb52d7ca 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -94,7 +94,7 @@ static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
 static int allocate_event_notification_slot(struct kfd_process *p,
 					    struct kfd_event *ev)
 {
-	int id;
+	int err;
 
 	if (!p->signal_page) {
 		p->signal_page = allocate_signal_page(p);
@@ -110,13 +110,12 @@ static int allocate_event_notification_slot(struct kfd_process *p,
 	 * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
 	 * of the event limit without breaking user mode.
 	 */
-	id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
-		       GFP_KERNEL);
-	if (id < 0)
-		return id;
+	err = xa_alloc(&p->events, &ev->event_id, ev,
+			XA_LIMIT(0, p->signal_mapped_size / 8 - 1), GFP_KERNEL);
+	if (err < 0)
+		return err;
 
-	ev->event_id = id;
-	page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
+	page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
 
 	return 0;
 }
@@ -127,7 +126,7 @@ static int allocate_event_notification_slot(struct kfd_process *p,
  */
 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
 {
-	return idr_find(&p->event_idr, id);
+	return xa_load(&p->events, id);
 }
 
 /**
@@ -162,7 +161,7 @@ static struct kfd_event *lookup_signaled_event_by_partial_id(
 		if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
 			return NULL;
 
-		return idr_find(&p->event_idr, id);
+		return xa_load(&p->events, id);
 	}
 
 	/* General case for partial IDs: Iterate over all matching IDs
@@ -172,7 +171,7 @@ static struct kfd_event *lookup_signaled_event_by_partial_id(
 		if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
 			continue;
 
-		ev = idr_find(&p->event_idr, id);
+		ev = xa_load(&p->events, id);
 	}
 
 	return ev;
@@ -211,26 +210,15 @@ static int create_signal_event(struct file *devkfd,
 
 static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
 {
-	/* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
-	 * intentional integer overflow to -1 without a compiler
-	 * warning. idr_alloc treats a negative value as "maximum
-	 * signed integer".
-	 */
-	int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
-			   (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
-			   GFP_KERNEL);
-
-	if (id < 0)
-		return id;
-	ev->event_id = id;
-
-	return 0;
+	return xa_alloc(&p->events, &ev->event_id, ev,
+			XA_LIMIT(KFD_FIRST_NONSIGNAL_EVENT_ID,
+				KFD_LAST_NONSIGNAL_EVENT_ID), GFP_KERNEL);
 }
 
 void kfd_event_init_process(struct kfd_process *p)
 {
 	mutex_init(&p->event_mutex);
-	idr_init(&p->event_idr);
+	xa_init_flags(&p->events, XA_FLAGS_ALLOC);
 	p->signal_page = NULL;
 	p->signal_event_count = 0;
 }
@@ -248,18 +236,18 @@ static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
 	    ev->type == KFD_EVENT_TYPE_DEBUG)
 		p->signal_event_count--;
 
-	idr_remove(&p->event_idr, ev->event_id);
+	xa_erase(&p->events, ev->event_id);
 	kfree(ev);
 }
 
 static void destroy_events(struct kfd_process *p)
 {
 	struct kfd_event *ev;
-	uint32_t id;
+	unsigned long id;
 
-	idr_for_each_entry(&p->event_idr, ev, id)
+	xa_for_each(&p->events, id, ev)
 		destroy_event(p, ev);
-	idr_destroy(&p->event_idr);
+	xa_destroy(&p->events);
 }
 
 /*
@@ -490,7 +478,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
 		 * exhaustive search of signaled events.
 		 */
 		uint64_t *slots = page_slots(p->signal_page);
-		uint32_t id;
+		unsigned long id;
 
 		if (valid_id_bits)
 			pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
@@ -498,9 +486,9 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
 
 		if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) {
 			/* With relatively few events, it's faster to
-			 * iterate over the event IDR
+			 * iterate over the event array
 			 */
-			idr_for_each_entry(&p->event_idr, ev, id) {
+			xa_for_each(&p->events, id, ev) {
 				if (id >= KFD_SIGNAL_EVENT_LIMIT)
 					break;
 
@@ -510,7 +498,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
 		} else {
 			/* With relatively many events, it's faster to
 			 * iterate over the signal slots and lookup
-			 * only signaled events from the IDR.
+			 * only signaled events from the array.
 			 */
 			for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
 				if (slots[id] != UNSIGNALED_EVENT_SLOT) {
@@ -833,13 +821,12 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
 {
 	struct kfd_hsa_memory_exception_data *ev_data;
 	struct kfd_event *ev;
-	uint32_t id;
+	unsigned long id;
 	bool send_signal = true;
 
 	ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
 
-	id = KFD_FIRST_NONSIGNAL_EVENT_ID;
-	idr_for_each_entry_continue(&p->event_idr, ev, id)
+	xa_for_each_start(&p->events, id, ev, KFD_FIRST_NONSIGNAL_EVENT_ID)
 		if (ev->type == type) {
 			send_signal = false;
 			dev_dbg(kfd_device,
@@ -975,7 +962,7 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
 				struct kfd_vm_fault_info *info)
 {
 	struct kfd_event *ev;
-	uint32_t id;
+	unsigned long id;
 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
 	struct kfd_hsa_memory_exception_data memory_exception_data;
 
@@ -997,8 +984,7 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
 	}
 	mutex_lock(&p->event_mutex);
 
-	id = KFD_FIRST_NONSIGNAL_EVENT_ID;
-	idr_for_each_entry_continue(&p->event_idr, ev, id)
+	xa_for_each_start(&p->events, id, ev, KFD_FIRST_NONSIGNAL_EVENT_ID)
 		if (ev->type == KFD_EVENT_TYPE_MEMORY) {
 			ev->memory_exception_data = memory_exception_data;
 			set_event(ev);
@@ -1014,7 +1000,8 @@ void kfd_signal_reset_event(struct kfd_dev *dev)
 	struct kfd_process *p;
 	struct kfd_event *ev;
 	unsigned int temp;
-	uint32_t id, idx;
+	unsigned long id;
+	int idx;
 
 	/* Whole gpu reset caused by GPU hang and memory is lost */
 	memset(&hw_exception_data, 0, sizeof(hw_exception_data));
@@ -1024,8 +1011,8 @@ void kfd_signal_reset_event(struct kfd_dev *dev)
 	idx = srcu_read_lock(&kfd_processes_srcu);
 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
 		mutex_lock(&p->event_mutex);
-		id = KFD_FIRST_NONSIGNAL_EVENT_ID;
-		idr_for_each_entry_continue(&p->event_idr, ev, id)
+		xa_for_each_start(&p->events, id, ev,
+				KFD_FIRST_NONSIGNAL_EVENT_ID)
 			if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
 				ev->hw_exception_data = hw_exception_data;
 				set_event(ev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 0689d4ccbbc0..9878abc6d847 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -657,7 +657,7 @@ struct kfd_process {
 	/* Event-related data */
 	struct mutex event_mutex;
 	/* Event ID allocator and lookup */
-	struct idr event_idr;
+	struct xarray events;
 	/* Event page */
 	struct kfd_signal_page *signal_page;
 	size_t signal_mapped_size;
-- 
2.20.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

  parent reply	other threads:[~2019-02-21 18:42 UTC|newest]

Thread overview: 53+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-21 18:41 [PATCH 00/34] Convert DRM to XArray Matthew Wilcox
2019-02-21 18:41 ` [PATCH 01/34] drm: Convert drm_minors_idr " Matthew Wilcox
2019-02-22  9:11   ` Daniel Vetter
2019-02-22  9:55     ` Daniel Vetter
2019-02-22 15:13     ` Matthew Wilcox
2019-02-21 18:41 ` [PATCH 02/34] drm: Convert aux_idr " Matthew Wilcox
2019-02-25 17:57   ` Ville Syrjälä
2019-02-25 18:42     ` Matthew Wilcox
2019-02-25 18:50       ` Ville Syrjälä
2019-02-21 18:41 ` [PATCH 03/34] drm: Convert object_name_idr " Matthew Wilcox
2019-02-22  9:17   ` Daniel Vetter
2019-02-21 18:41 ` [PATCH 04/34] drm: Convert object_idr " Matthew Wilcox
2019-02-21 18:41 ` [PATCH 05/34] drm: Convert syncobj_idr " Matthew Wilcox
2019-02-21 18:41 ` [PATCH 06/34] drm: Convert magic_map " Matthew Wilcox
2019-02-21 18:41 ` [PATCH 07/34] drm: Convert lessee_idr " Matthew Wilcox
2019-02-21 18:41 ` [PATCH 08/34] drm: Remove linked lists for lessees Matthew Wilcox
2019-02-21 18:41 ` [PATCH 09/34] drm: Convert ctx_idr to XArray Matthew Wilcox
2019-02-21 18:41 ` [PATCH 10/34] drm: Convert tile_idr " Matthew Wilcox
2019-02-21 18:41 ` [PATCH 11/34] drm: Convert crtc_idr " Matthew Wilcox
2019-02-22  9:40   ` Daniel Vetter
2019-02-22 15:32     ` Matthew Wilcox
2019-02-22 17:12       ` Daniel Vetter
2019-02-21 18:41 ` [PATCH 12/34] drm/agp: Convert bo_list_handles " Matthew Wilcox
2019-02-25 16:06   ` Christian König
2019-02-25 16:39     ` Matthew Wilcox
2019-02-21 18:41 ` [PATCH 13/34] drm/amdgpu: Convert ctx_handles " Matthew Wilcox
2019-02-25 16:07   ` Christian König
2019-02-25 16:39     ` Matthew Wilcox
2019-02-25 16:59       ` Koenig, Christian
2019-02-25 18:47         ` Matthew Wilcox
2019-02-21 18:41 ` [PATCH 14/34] drm/amdgpu: Convert pasid_idr " Matthew Wilcox
2019-02-21 18:41 ` Matthew Wilcox [this message]
2019-02-21 18:41 ` [PATCH 16/34] drm/amdkfd: Convert alloc_idr " Matthew Wilcox
2019-02-21 18:41 ` [PATCH 17/34] drm/etnaviv: Convert fence_idr " Matthew Wilcox
2019-02-21 18:41 ` [PATCH 18/34] drm/i915: Convert handles_vma " Matthew Wilcox
2019-02-21 18:41 ` [PATCH 19/34] drm/i915: Convert spt_tree " Matthew Wilcox
2019-02-21 18:41 ` [PATCH 20/34] drm/i915: Convert page_track_tree " Matthew Wilcox
2019-02-21 18:42 ` [PATCH 21/34] drm/i915: Convert get_page " Matthew Wilcox
2019-02-21 18:42 ` [PATCH 22/34] drm/i915: Convert object_idr to IDA Matthew Wilcox
2019-02-21 18:42 ` [PATCH 23/34] drm/i915: Convert context_idr to XArray Matthew Wilcox
2019-02-21 18:42 ` [PATCH 24/34] drm/i915: Convert metrics_idr " Matthew Wilcox
2019-02-21 18:42 ` [PATCH 25/34] drm/i915: Convert vgpus_idr " Matthew Wilcox
2019-02-21 18:42 ` [PATCH 26/34] drm/qxl: Convert release_idr " Matthew Wilcox
2019-02-21 18:42 ` [PATCH 27/34] drm/qxl: Convert surf_id_idr " Matthew Wilcox
2019-02-21 18:42 ` [PATCH 28/34] drm/tegra: Convert contexts IDR " Matthew Wilcox
2019-02-21 18:42 ` [PATCH 29/34] drm/vc4: Convert perfmon " Matthew Wilcox
2019-02-21 18:42 ` [PATCH 30/34] drm/sis: Convert object_idr " Matthew Wilcox
2019-02-21 18:42 ` [PATCH 31/34] drm/vgem: Convert fence_idr " Matthew Wilcox
2019-02-21 18:42 ` [PATCH 32/34] drm/via: Convert object_idr " Matthew Wilcox
2019-02-21 18:42 ` [PATCH 33/34] drm/vmwgfx: Convert base IDR " Matthew Wilcox
2019-02-21 18:42 ` [PATCH 34/34] drm/vmwgfx: Convert res_idr " Matthew Wilcox
2019-02-22  9:54 ` [PATCH 00/34] Convert DRM " Daniel Vetter
2019-02-24  4:21   ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190221184226.2149-31-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=dri-devel@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.