linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Max Gurtovoy <maxg@mellanox.com>
To: linux-nvme@lists.infradead.org, sagi@grimberg.me, hch@lst.de,
	loberman@redhat.com, bvanassche@acm.org,
	linux-rdma@vger.kernel.org
Cc: rgirase@redhat.com, vladimirk@mellanox.com, shlomin@mellanox.com,
	leonro@mellanox.com, dledford@redhat.com, jgg@mellanox.com,
	oren@mellanox.com, kbusch@kernel.org,
	Max Gurtovoy <maxg@mellanox.com>,
	idanb@mellanox.com
Subject: [PATCH v2 1/5] IB/core: add a simple SRQ pool per PD
Date: Wed, 18 Mar 2020 17:02:53 +0200	[thread overview]
Message-ID: <20200318150257.198402-2-maxg@mellanox.com> (raw)
In-Reply-To: <20200318150257.198402-1-maxg@mellanox.com>

ULP's can use this API to create/destroy SRQ's with the same
characteristics for implementing a logic that aimed to save resources
without significant performance penalty (e.g. create SRQ per completion
vector and use shared receive buffers for multiple controllers of the
ULP).

Signed-off-by: Max Gurtovoy <maxg@mellanox.com>
---
 drivers/infiniband/core/Makefile   |  2 +-
 drivers/infiniband/core/srq_pool.c | 75 ++++++++++++++++++++++++++++++++++++++
 drivers/infiniband/core/verbs.c    |  3 ++
 include/rdma/ib_verbs.h            |  4 ++
 include/rdma/srq_pool.h            | 18 +++++++++
 5 files changed, 101 insertions(+), 1 deletion(-)
 create mode 100644 drivers/infiniband/core/srq_pool.c
 create mode 100644 include/rdma/srq_pool.h

diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index d1b14887..ca377b0 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -12,7 +12,7 @@ ib_core-y :=			packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
 				roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
 				multicast.o mad.o smi.o agent.o mad_rmpp.o \
 				nldev.o restrack.o counters.o ib_core_uverbs.o \
-				trace.o
+				trace.o srq_pool.o
 
 ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
 ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
diff --git a/drivers/infiniband/core/srq_pool.c b/drivers/infiniband/core/srq_pool.c
new file mode 100644
index 0000000..68321f0
--- /dev/null
+++ b/drivers/infiniband/core/srq_pool.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#include <rdma/srq_pool.h>
+
+struct ib_srq *rdma_srq_pool_get(struct ib_pd *pd)
+{
+	struct ib_srq *srq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pd->srq_lock, flags);
+	srq = list_first_entry_or_null(&pd->srqs, struct ib_srq, pd_entry);
+	if (srq)
+		list_del(&srq->pd_entry);
+	spin_unlock_irqrestore(&pd->srq_lock, flags);
+
+	return srq;
+}
+EXPORT_SYMBOL(rdma_srq_pool_get);
+
+void rdma_srq_pool_put(struct ib_pd *pd, struct ib_srq *srq)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pd->srq_lock, flags);
+	list_add(&srq->pd_entry, &pd->srqs);
+	spin_unlock_irqrestore(&pd->srq_lock, flags);
+}
+EXPORT_SYMBOL(rdma_srq_pool_put);
+
+int rdma_srq_pool_init(struct ib_pd *pd, int nr,
+		struct ib_srq_init_attr *srq_attr)
+{
+	struct ib_srq *srq;
+	unsigned long flags;
+	int ret, i;
+
+	for (i = 0; i < nr; i++) {
+		srq = ib_create_srq(pd, srq_attr);
+		if (IS_ERR(srq)) {
+			ret = PTR_ERR(srq);
+			goto out;
+		}
+
+		spin_lock_irqsave(&pd->srq_lock, flags);
+		list_add_tail(&srq->pd_entry, &pd->srqs);
+		spin_unlock_irqrestore(&pd->srq_lock, flags);
+	}
+
+	return 0;
+out:
+	rdma_srq_pool_destroy(pd);
+	return ret;
+}
+EXPORT_SYMBOL(rdma_srq_pool_init);
+
+void rdma_srq_pool_destroy(struct ib_pd *pd)
+{
+	struct ib_srq *srq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pd->srq_lock, flags);
+	while (!list_empty(&pd->srqs)) {
+		srq = list_first_entry(&pd->srqs, struct ib_srq, pd_entry);
+		list_del(&srq->pd_entry);
+
+		spin_unlock_irqrestore(&pd->srq_lock, flags);
+		ib_destroy_srq(srq);
+		spin_lock_irqsave(&pd->srq_lock, flags);
+	}
+	spin_unlock_irqrestore(&pd->srq_lock, flags);
+}
+EXPORT_SYMBOL(rdma_srq_pool_destroy);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index e62c9df..0bb69d2 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -272,6 +272,8 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
 	pd->__internal_mr = NULL;
 	atomic_set(&pd->usecnt, 0);
 	pd->flags = flags;
+	spin_lock_init(&pd->srq_lock);
+	INIT_LIST_HEAD(&pd->srqs);
 
 	pd->res.type = RDMA_RESTRACK_PD;
 	rdma_restrack_set_task(&pd->res, caller);
@@ -340,6 +342,7 @@ void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
 		pd->__internal_mr = NULL;
 	}
 
+	WARN_ON_ONCE(!list_empty(&pd->srqs));
 	/* uverbs manipulates usecnt with proper locking, while the kabi
 	   requires the caller to guarantee we can't race here. */
 	WARN_ON(atomic_read(&pd->usecnt));
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 1f779fa..1dcfefb 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1517,6 +1517,9 @@ struct ib_pd {
 
 	u32			unsafe_global_rkey;
 
+	spinlock_t		srq_lock;
+	struct list_head	srqs;
+
 	/*
 	 * Implementation details of the RDMA core, don't use in drivers:
 	 */
@@ -1585,6 +1588,7 @@ struct ib_srq {
 	void		       *srq_context;
 	enum ib_srq_type	srq_type;
 	atomic_t		usecnt;
+	struct list_head	pd_entry; /* srq pool entry */
 
 	struct {
 		struct ib_cq   *cq;
diff --git a/include/rdma/srq_pool.h b/include/rdma/srq_pool.h
new file mode 100644
index 0000000..ee83896
--- /dev/null
+++ b/include/rdma/srq_pool.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) */
+/*
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _RDMA_SRQ_POOL_H
+#define _RDMA_SRQ_POOL_H
+
+#include <rdma/ib_verbs.h>
+
+struct ib_srq *rdma_srq_pool_get(struct ib_pd *pd);
+void rdma_srq_pool_put(struct ib_pd *pd, struct ib_srq *srq);
+
+int rdma_srq_pool_init(struct ib_pd *pd, int nr,
+		struct ib_srq_init_attr *srq_attr);
+void rdma_srq_pool_destroy(struct ib_pd *pd);
+
+#endif /* _RDMA_SRQ_POOL_H */
-- 
1.8.3.1


_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

  reply	other threads:[~2020-03-18 16:15 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-18 15:02 [PATCH v2 0/5] nvmet-rdma/srpt: SRQ per completion vector Max Gurtovoy
2020-03-18 15:02 ` Max Gurtovoy [this message]
2020-03-20  5:59   ` [PATCH v2 1/5] IB/core: add a simple SRQ pool per PD Sagi Grimberg
2020-03-20 13:21     ` Max Gurtovoy
2020-03-20 14:27     ` Leon Romanovsky
2020-03-18 15:02 ` [PATCH v2 2/5] nvmet-rdma: add srq pointer to rdma_cmd Max Gurtovoy
2020-03-18 23:32   ` Jason Gunthorpe
2020-03-19  8:48     ` Max Gurtovoy
2020-03-19  9:14       ` Leon Romanovsky
2020-03-19 10:55         ` Max Gurtovoy
2020-03-19 11:54       ` Jason Gunthorpe
2020-03-19 14:08         ` Konstantin Ryabitsev
2020-03-19 21:58         ` Konstantin Ryabitsev
2020-03-19  4:05   ` Bart Van Assche
2020-03-18 15:02 ` [PATCH v2 3/5] nvmet-rdma: use SRQ per completion vector Max Gurtovoy
2020-03-19  4:09   ` Bart Van Assche
2020-03-19  9:15     ` Max Gurtovoy
2020-03-19 11:56       ` Jason Gunthorpe
2020-03-19 12:48         ` Max Gurtovoy
2020-03-19 13:53           ` Jason Gunthorpe
2020-03-19 14:49             ` Bart Van Assche
     [not found]               ` <50dd8f5d-d092-54bc-236d-1e702fb95240@mellanox.com>
     [not found]                 ` <6e3cc1c4-b24e-f607-42b3-5b83dd8c312c@mellanox.com>
2020-03-19 16:27                   ` Max Gurtovoy
2020-03-20  5:47   ` Sagi Grimberg
2020-03-18 15:02 ` [PATCH v2 4/5] RDMA/srpt: use ib_alloc_cq instead of ib_alloc_cq_any Max Gurtovoy
2020-03-19  4:15   ` Bart Van Assche
2020-03-18 15:02 ` [PATCH v2 5/5] RDMA/srpt: use SRQ per completion vector Max Gurtovoy
2020-03-19  4:20   ` Bart Van Assche
2020-03-19  4:02 ` [PATCH v2 0/5] nvmet-rdma/srpt: " Bart Van Assche

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200318150257.198402-2-maxg@mellanox.com \
    --to=maxg@mellanox.com \
    --cc=bvanassche@acm.org \
    --cc=dledford@redhat.com \
    --cc=hch@lst.de \
    --cc=idanb@mellanox.com \
    --cc=jgg@mellanox.com \
    --cc=kbusch@kernel.org \
    --cc=leonro@mellanox.com \
    --cc=linux-nvme@lists.infradead.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=loberman@redhat.com \
    --cc=oren@mellanox.com \
    --cc=rgirase@redhat.com \
    --cc=sagi@grimberg.me \
    --cc=shlomin@mellanox.com \
    --cc=vladimirk@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).