linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Bob Pearson <rpearsonhpe@gmail.com>
To: jgg@nvidia.com, zyjzyj2000@gmail.com, linux-rdma@vger.kernel.org
Cc: Bob Pearson <rpearsonhpe@gmail.com>
Subject: [PATCH for-next 4/7] RDMA/rxe: Replace pool_lock by xa_lock
Date: Wed, 20 Oct 2021 17:05:47 -0500	[thread overview]
Message-ID: <20211020220549.36145-5-rpearsonhpe@gmail.com> (raw)
In-Reply-To: <20211020220549.36145-1-rpearsonhpe@gmail.com>

In rxe_pool.c xa_alloc_bh and xa_erase_bh and variants already include
	spin_lock_bh()
	__xa_alloc()
	spin_unlock_bh()
So we are double locking. Replacing pool_lock by xa_lock and using xa_lock
in all the places that were previously locked by pool_lock but dropping the
double locks is a performance improvement.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_pool.c | 54 ++++++++++++++--------------
 1 file changed, 26 insertions(+), 28 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index ba5c600fa9e8..1b7269dd6d9e 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -133,8 +133,6 @@ int rxe_pool_init(
 
 	atomic_set(&pool->num_elem, 0);
 
-	rwlock_init(&pool->pool_lock);
-
 	if (info->flags & RXE_POOL_XARRAY) {
 		xa_init_flags(&pool->xarray.xa, XA_FLAGS_ALLOC);
 		pool->xarray.limit.max = info->max_index;
@@ -292,9 +290,9 @@ static void *__rxe_alloc_locked(struct rxe_pool *pool)
 	elem->obj = obj;
 
 	if (pool->flags & RXE_POOL_XARRAY) {
-		err = xa_alloc_cyclic_bh(&pool->xarray.xa, &elem->index, elem,
-					 pool->xarray.limit,
-					 &pool->xarray.next, GFP_KERNEL);
+		err = __xa_alloc_cyclic(&pool->xarray.xa, &elem->index, elem,
+					pool->xarray.limit,
+					&pool->xarray.next, GFP_KERNEL);
 		if (err)
 			goto err;
 	}
@@ -359,9 +357,9 @@ void *rxe_alloc(struct rxe_pool *pool)
 {
 	void *obj;
 
-	write_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	obj = rxe_alloc_locked(pool);
-	write_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return obj;
 }
@@ -370,9 +368,9 @@ void *rxe_alloc_with_key(struct rxe_pool *pool, void *key)
 {
 	void *obj;
 
-	write_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	obj = rxe_alloc_with_key_locked(pool, key);
-	write_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return obj;
 }
@@ -381,7 +379,7 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
 {
 	int err;
 
-	write_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
 		goto err;
 
@@ -389,9 +387,9 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
 	elem->obj = (u8 *)elem - pool->elem_offset;
 
 	if (pool->flags & RXE_POOL_XARRAY) {
-		err = xa_alloc_cyclic_bh(&pool->xarray.xa, &elem->index, elem,
-					 pool->xarray.limit,
-					 &pool->xarray.next, GFP_KERNEL);
+		err = __xa_alloc_cyclic(&pool->xarray.xa, &elem->index, elem,
+					pool->xarray.limit,
+					&pool->xarray.next, GFP_KERNEL);
 		if (err)
 			goto err;
 	}
@@ -403,13 +401,13 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
 	}
 
 	refcount_set(&elem->refcnt, 1);
-	write_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return 0;
 
 err:
 	atomic_dec(&pool->num_elem);
-	write_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 	return -EINVAL;
 }
 
@@ -442,9 +440,9 @@ static void *__rxe_get_index(struct rxe_pool *pool, u32 index)
 {
 	void *obj;
 
-	read_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	obj = __rxe_get_index_locked(pool, index);
-	read_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return obj;
 }
@@ -465,9 +463,9 @@ static void *__rxe_get_xarray(struct rxe_pool *pool, u32 index)
 {
 	void *obj;
 
-	read_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	obj = __rxe_get_xarray_locked(pool, index);
-	read_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return obj;
 }
@@ -523,9 +521,9 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
 {
 	void *obj;
 
-	read_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	obj = rxe_pool_get_key_locked(pool, key);
-	read_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return obj;
 }
@@ -546,9 +544,9 @@ int __rxe_add_ref(struct rxe_pool_elem *elem)
 	struct rxe_pool *pool = elem->pool;
 	int ret;
 
-	read_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	ret = __rxe_add_ref_locked(elem);
-	read_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return ret;
 }
@@ -569,9 +567,9 @@ int __rxe_drop_ref(struct rxe_pool_elem *elem)
 	struct rxe_pool *pool = elem->pool;
 	int ret;
 
-	read_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	ret = __rxe_drop_ref_locked(elem);
-	read_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return ret;
 }
@@ -584,7 +582,7 @@ static int __rxe_fini(struct rxe_pool_elem *elem)
 	done = refcount_dec_if_one(&elem->refcnt);
 	if (done) {
 		if (pool->flags & RXE_POOL_XARRAY)
-			xa_erase(&pool->xarray.xa, elem->index);
+			__xa_erase(&pool->xarray.xa, elem->index);
 		if (pool->flags & RXE_POOL_INDEX)
 			rxe_drop_index(elem);
 		if (pool->flags & RXE_POOL_KEY)
@@ -621,9 +619,9 @@ int __rxe_fini_ref(struct rxe_pool_elem *elem)
 	struct rxe_pool *pool = elem->pool;
 	int ret;
 
-	read_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	ret = __rxe_fini(elem);
-	read_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	if (!ret) {
 		if (pool->cleanup)
-- 
2.30.2


  parent reply	other threads:[~2021-10-20 22:07 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-20 22:05 [PATCH for-next 0/7] Replace red-black tree by xarray Bob Pearson
2021-10-20 22:05 ` [PATCH for-next 1/7] RDMA/rxe: Replace irqsave locks with bh locks Bob Pearson
2021-10-20 22:05 ` [PATCH for-next 2/7] RDMA/rxe: Cleanup rxe_pool_entry Bob Pearson
2021-10-20 22:05 ` [PATCH for-next 3/7] RDMA/rxe: Add xarray support to rxe_pool.c Bob Pearson
2021-10-21 11:53   ` Dennis Dalessandro
2021-10-21 17:02     ` Bob Pearson
2021-10-20 22:05 ` Bob Pearson [this message]
2021-10-20 22:05 ` [PATCH for-next 5/7] RDMA/rxe: Convert remaining pools to xarrays Bob Pearson
2021-10-20 22:05 ` [PATCH for-next 6/7] RDMA/rxe: Remove old index code from rxe_pool.c Bob Pearson
2021-10-20 22:05 ` [PATCH for-next 7/7] RDMA/rxe: Rename XARRAY as INDEX Bob Pearson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211020220549.36145-5-rpearsonhpe@gmail.com \
    --to=rpearsonhpe@gmail.com \
    --cc=jgg@nvidia.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=zyjzyj2000@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).