linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH rdma-next 0/4] Define and use mana queues for CQs and WQs
@ 2024-03-13 13:24 Konstantin Taranov
  2024-03-13 13:24 ` [PATCH rdma-next 1/4] RDMA/mana_ib: Introduce helpers to create and destroy mana queues Konstantin Taranov
                   ` (3 more replies)
  0 siblings, 4 replies; 13+ messages in thread
From: Konstantin Taranov @ 2024-03-13 13:24 UTC (permalink / raw)
  To: kotaranov, sharmaajay, longli, jgg, leon; +Cc: linux-rdma, linux-kernel

From: Konstantin Taranov <kotaranov@microsoft.com>

This patch series aims to reduce code duplication by
introducing a notion of mana ib queues and corresponding helpers
to create and destroy them.

Konstantin Taranov (4):
  RDMA/mana_ib: Introduce helpers to create and destroy mana queues
  RDMA:mana_ib: Use struct mana_ib_queue for CQs
  RDMA/mana_ib: Use struct mana_ib_queue for WQs
  RDMA/mana_ib: Use struct mana_ib_queue for RAW QPs

 drivers/infiniband/hw/mana/cq.c      | 52 ++++-------------
 drivers/infiniband/hw/mana/main.c    | 40 +++++++++++++
 drivers/infiniband/hw/mana/mana_ib.h | 31 ++++++----
 drivers/infiniband/hw/mana/qp.c      | 86 ++++++++++------------------
 drivers/infiniband/hw/mana/wq.c      | 31 ++--------
 5 files changed, 104 insertions(+), 136 deletions(-)


base-commit: 96d9cbe2f2ff7abde021bac75eafaceabe9a51fa
-- 
2.43.0


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH rdma-next 1/4] RDMA/mana_ib: Introduce helpers to create and destroy mana queues
  2024-03-13 13:24 [PATCH rdma-next 0/4] Define and use mana queues for CQs and WQs Konstantin Taranov
@ 2024-03-13 13:24 ` Konstantin Taranov
  2024-03-15 16:45   ` Long Li
  2024-03-17  6:42   ` Zhu Yanjun
  2024-03-13 13:24 ` [PATCH rdma-next 2/4] RDMA:mana_ib: Use struct mana_ib_queue for CQs Konstantin Taranov
                   ` (2 subsequent siblings)
  3 siblings, 2 replies; 13+ messages in thread
From: Konstantin Taranov @ 2024-03-13 13:24 UTC (permalink / raw)
  To: kotaranov, sharmaajay, longli, jgg, leon; +Cc: linux-rdma, linux-kernel

From: Konstantin Taranov <kotaranov@microsoft.com>

Intoduce helpers to work with mana ib queues (struct mana_ib_queue).
A queue always consists of umem, gdma_region, and id.
A queue can be used for a WQ or a CQ.

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
---
 drivers/infiniband/hw/mana/main.c    | 40 ++++++++++++++++++++++++++++
 drivers/infiniband/hw/mana/mana_ib.h | 10 +++++++
 2 files changed, 50 insertions(+)

diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 71e33feee..0ec940b97 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -237,6 +237,46 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
 		ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
 }
 
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+			 struct mana_ib_queue *queue)
+{
+	struct ib_umem *umem;
+	int err;
+
+	queue->umem = NULL;
+	queue->id = INVALID_QUEUE_ID;
+	queue->gdma_region = GDMA_INVALID_DMA_REGION;
+
+	umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
+	if (IS_ERR(umem)) {
+		err = PTR_ERR(umem);
+		ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
+		return err;
+	}
+
+	err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
+	if (err) {
+		ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
+		goto free_umem;
+	}
+	queue->umem = umem;
+
+	ibdev_dbg(&mdev->ib_dev,
+		  "create_dma_region ret %d gdma_region 0x%llx\n",
+		  err, queue->gdma_region);
+
+	return 0;
+free_umem:
+	ib_umem_release(umem);
+	return err;
+}
+
+void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
+{
+	mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
+	ib_umem_release(queue->umem);
+}
+
 static int
 mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
 			    struct gdma_context *gc,
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index f83390eeb..859fd3bfc 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -45,6 +45,12 @@ struct mana_ib_adapter_caps {
 	u32 max_inline_data_size;
 };
 
+struct mana_ib_queue {
+	struct ib_umem *umem;
+	u64 gdma_region;
+	u64 id;
+};
+
 struct mana_ib_dev {
 	struct ib_device ib_dev;
 	struct gdma_dev *gdma_dev;
@@ -169,6 +175,10 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
 				  mana_handle_t gdma_region);
 
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+			 struct mana_ib_queue *queue);
+void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
+
 struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
 				struct ib_wq_init_attr *init_attr,
 				struct ib_udata *udata);
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH rdma-next 2/4] RDMA:mana_ib: Use struct mana_ib_queue for CQs
  2024-03-13 13:24 [PATCH rdma-next 0/4] Define and use mana queues for CQs and WQs Konstantin Taranov
  2024-03-13 13:24 ` [PATCH rdma-next 1/4] RDMA/mana_ib: Introduce helpers to create and destroy mana queues Konstantin Taranov
@ 2024-03-13 13:24 ` Konstantin Taranov
  2024-03-15 16:54   ` Long Li
  2024-03-13 13:24 ` [PATCH rdma-next 3/4] RDMA/mana_ib: Use struct mana_ib_queue for WQs Konstantin Taranov
  2024-03-13 13:24 ` [PATCH rdma-next 4/4] RDMA/mana_ib: Use struct mana_ib_queue for RAW QPs Konstantin Taranov
  3 siblings, 1 reply; 13+ messages in thread
From: Konstantin Taranov @ 2024-03-13 13:24 UTC (permalink / raw)
  To: kotaranov, sharmaajay, longli, jgg, leon; +Cc: linux-rdma, linux-kernel

From: Konstantin Taranov <kotaranov@microsoft.com>

Use struct mana_ib_queue and its helpers for CQs

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
---
 drivers/infiniband/hw/mana/cq.c      | 52 ++++++----------------------
 drivers/infiniband/hw/mana/mana_ib.h |  4 +--
 drivers/infiniband/hw/mana/qp.c      | 26 +++++++-------
 3 files changed, 24 insertions(+), 58 deletions(-)

diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 4a71e678d..c9129218f 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -39,37 +39,13 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
 	}
 
 	cq->cqe = attr->cqe;
-	cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
-			       IB_ACCESS_LOCAL_WRITE);
-	if (IS_ERR(cq->umem)) {
-		err = PTR_ERR(cq->umem);
-		ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n",
-			  err);
-		return err;
-	}
-
-	err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region);
+	err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue);
 	if (err) {
-		ibdev_dbg(ibdev,
-			  "Failed to create dma region for create cq, %d\n",
-			  err);
-		goto err_release_umem;
+		ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
+		return err;
 	}
 
-	ibdev_dbg(ibdev,
-		  "create_dma_region ret %d gdma_region 0x%llx\n",
-		  err, cq->gdma_region);
-
-	/*
-	 * The CQ ID is not known at this time. The ID is generated at create_qp
-	 */
-	cq->id = INVALID_QUEUE_ID;
-
 	return 0;
-
-err_release_umem:
-	ib_umem_release(cq->umem);
-	return err;
 }
 
 int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
@@ -78,24 +54,16 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
 	struct ib_device *ibdev = ibcq->device;
 	struct mana_ib_dev *mdev;
 	struct gdma_context *gc;
-	int err;
 
 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
 	gc = mdev_to_gc(mdev);
 
-	err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
-	if (err) {
-		ibdev_dbg(ibdev,
-			  "Failed to destroy dma region, %d\n", err);
-		return err;
-	}
-
-	if (cq->id != INVALID_QUEUE_ID) {
-		kfree(gc->cq_table[cq->id]);
-		gc->cq_table[cq->id] = NULL;
+	if (cq->queue.id != INVALID_QUEUE_ID) {
+		kfree(gc->cq_table[cq->queue.id]);
+		gc->cq_table[cq->queue.id] = NULL;
 	}
 
-	ib_umem_release(cq->umem);
+	mana_ib_destroy_queue(mdev, &cq->queue);
 
 	return 0;
 }
@@ -114,7 +82,7 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
 	struct gdma_queue *gdma_cq;
 
 	/* Create CQ table entry */
-	WARN_ON(gc->cq_table[cq->id]);
+	WARN_ON(gc->cq_table[cq->queue.id]);
 	gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
 	if (!gdma_cq)
 		return -ENOMEM;
@@ -122,7 +90,7 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
 	gdma_cq->cq.context = cq;
 	gdma_cq->type = GDMA_CQ;
 	gdma_cq->cq.callback = mana_ib_cq_handler;
-	gdma_cq->id = cq->id;
-	gc->cq_table[cq->id] = gdma_cq;
+	gdma_cq->id = cq->queue.id;
+	gc->cq_table[cq->queue.id] = gdma_cq;
 	return 0;
 }
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 859fd3bfc..6acb5c281 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -88,10 +88,8 @@ struct mana_ib_mr {
 
 struct mana_ib_cq {
 	struct ib_cq ibcq;
-	struct ib_umem *umem;
+	struct mana_ib_queue queue;
 	int cqe;
-	u64 gdma_region;
-	u64 id;
 	u32 comp_vector;
 };
 
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 6e7627745..d7485ee6a 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -197,7 +197,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
 		wq_spec.gdma_region = wq->gdma_region;
 		wq_spec.queue_size = wq->wq_buf_size;
 
-		cq_spec.gdma_region = cq->gdma_region;
+		cq_spec.gdma_region = cq->queue.gdma_region;
 		cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
 		cq_spec.modr_ctx_id = 0;
 		eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues];
@@ -213,16 +213,16 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
 
 		/* The GDMA regions are now owned by the WQ object */
 		wq->gdma_region = GDMA_INVALID_DMA_REGION;
-		cq->gdma_region = GDMA_INVALID_DMA_REGION;
+		cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
 
 		wq->id = wq_spec.queue_index;
-		cq->id = cq_spec.queue_index;
+		cq->queue.id = cq_spec.queue_index;
 
 		ibdev_dbg(&mdev->ib_dev,
 			  "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
-			  ret, wq->rx_object, wq->id, cq->id);
+			  ret, wq->rx_object, wq->id, cq->queue.id);
 
-		resp.entries[i].cqid = cq->id;
+		resp.entries[i].cqid = cq->queue.id;
 		resp.entries[i].wqid = wq->id;
 
 		mana_ind_table[i] = wq->rx_object;
@@ -232,7 +232,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
 		if (ret)
 			goto fail;
 
-		gdma_cq_allocated[i] = gc->cq_table[cq->id];
+		gdma_cq_allocated[i] = gc->cq_table[cq->queue.id];
 	}
 	resp.num_entries = i;
 
@@ -264,7 +264,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
 		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
 		cq = container_of(ibcq, struct mana_ib_cq, ibcq);
 
-		gc->cq_table[cq->id] = NULL;
+		gc->cq_table[cq->queue.id] = NULL;
 		kfree(gdma_cq_allocated[i]);
 
 		mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
@@ -374,7 +374,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
 	wq_spec.gdma_region = qp->sq_gdma_region;
 	wq_spec.queue_size = ucmd.sq_buf_size;
 
-	cq_spec.gdma_region = send_cq->gdma_region;
+	cq_spec.gdma_region = send_cq->queue.gdma_region;
 	cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
 	cq_spec.modr_ctx_id = 0;
 	eq_vec = send_cq->comp_vector % gc->max_num_queues;
@@ -392,10 +392,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
 
 	/* The GDMA regions are now owned by the WQ object */
 	qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
-	send_cq->gdma_region = GDMA_INVALID_DMA_REGION;
+	send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
 
 	qp->sq_id = wq_spec.queue_index;
-	send_cq->id = cq_spec.queue_index;
+	send_cq->queue.id = cq_spec.queue_index;
 
 	/* Create CQ table entry */
 	err = mana_ib_install_cq_cb(mdev, send_cq);
@@ -404,10 +404,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
 
 	ibdev_dbg(&mdev->ib_dev,
 		  "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
-		  qp->tx_object, qp->sq_id, send_cq->id);
+		  qp->tx_object, qp->sq_id, send_cq->queue.id);
 
 	resp.sqid = qp->sq_id;
-	resp.cqid = send_cq->id;
+	resp.cqid = send_cq->queue.id;
 	resp.tx_vp_offset = pd->tx_vp_offset;
 
 	err = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -422,7 +422,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
 
 err_release_gdma_cq:
 	kfree(gdma_cq);
-	gc->cq_table[send_cq->id] = NULL;
+	gc->cq_table[send_cq->queue.id] = NULL;
 
 err_destroy_wq_obj:
 	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH rdma-next 3/4] RDMA/mana_ib: Use struct mana_ib_queue for WQs
  2024-03-13 13:24 [PATCH rdma-next 0/4] Define and use mana queues for CQs and WQs Konstantin Taranov
  2024-03-13 13:24 ` [PATCH rdma-next 1/4] RDMA/mana_ib: Introduce helpers to create and destroy mana queues Konstantin Taranov
  2024-03-13 13:24 ` [PATCH rdma-next 2/4] RDMA:mana_ib: Use struct mana_ib_queue for CQs Konstantin Taranov
@ 2024-03-13 13:24 ` Konstantin Taranov
  2024-03-15 16:56   ` Long Li
  2024-03-13 13:24 ` [PATCH rdma-next 4/4] RDMA/mana_ib: Use struct mana_ib_queue for RAW QPs Konstantin Taranov
  3 siblings, 1 reply; 13+ messages in thread
From: Konstantin Taranov @ 2024-03-13 13:24 UTC (permalink / raw)
  To: kotaranov, sharmaajay, longli, jgg, leon; +Cc: linux-rdma, linux-kernel

From: Konstantin Taranov <kotaranov@microsoft.com>

Use struct mana_ib_queue and its helpers for WQs

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
---
 drivers/infiniband/hw/mana/mana_ib.h |  4 +---
 drivers/infiniband/hw/mana/qp.c      | 10 ++++-----
 drivers/infiniband/hw/mana/wq.c      | 31 ++++------------------------
 3 files changed, 10 insertions(+), 35 deletions(-)

diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 6acb5c281..a8953ee80 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -59,11 +59,9 @@ struct mana_ib_dev {
 
 struct mana_ib_wq {
 	struct ib_wq ibwq;
-	struct ib_umem *umem;
+	struct mana_ib_queue queue;
 	int wqe;
 	u32 wq_buf_size;
-	u64 gdma_region;
-	u64 id;
 	mana_handle_t rx_object;
 };
 
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index d7485ee6a..f606caa75 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -194,7 +194,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
 		ibcq = ibwq->cq;
 		cq = container_of(ibcq, struct mana_ib_cq, ibcq);
 
-		wq_spec.gdma_region = wq->gdma_region;
+		wq_spec.gdma_region = wq->queue.gdma_region;
 		wq_spec.queue_size = wq->wq_buf_size;
 
 		cq_spec.gdma_region = cq->queue.gdma_region;
@@ -212,18 +212,18 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
 		}
 
 		/* The GDMA regions are now owned by the WQ object */
-		wq->gdma_region = GDMA_INVALID_DMA_REGION;
+		wq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
 		cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
 
-		wq->id = wq_spec.queue_index;
+		wq->queue.id = wq_spec.queue_index;
 		cq->queue.id = cq_spec.queue_index;
 
 		ibdev_dbg(&mdev->ib_dev,
 			  "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
-			  ret, wq->rx_object, wq->id, cq->queue.id);
+			  ret, wq->rx_object, wq->queue.id, cq->queue.id);
 
 		resp.entries[i].cqid = cq->queue.id;
-		resp.entries[i].wqid = wq->id;
+		resp.entries[i].wqid = wq->queue.id;
 
 		mana_ind_table[i] = wq->rx_object;
 
diff --git a/drivers/infiniband/hw/mana/wq.c b/drivers/infiniband/hw/mana/wq.c
index 7c9c69962..f959f4b92 100644
--- a/drivers/infiniband/hw/mana/wq.c
+++ b/drivers/infiniband/hw/mana/wq.c
@@ -13,7 +13,6 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
 		container_of(pd->device, struct mana_ib_dev, ib_dev);
 	struct mana_ib_create_wq ucmd = {};
 	struct mana_ib_wq *wq;
-	struct ib_umem *umem;
 	int err;
 
 	if (udata->inlen < sizeof(ucmd))
@@ -32,39 +31,18 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
 
 	ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr);
 
-	umem = ib_umem_get(pd->device, ucmd.wq_buf_addr, ucmd.wq_buf_size,
-			   IB_ACCESS_LOCAL_WRITE);
-	if (IS_ERR(umem)) {
-		err = PTR_ERR(umem);
+	err = mana_ib_create_queue(mdev, ucmd.wq_buf_addr, ucmd.wq_buf_size, &wq->queue);
+	if (err) {
 		ibdev_dbg(&mdev->ib_dev,
-			  "Failed to get umem for create wq, err %d\n", err);
+			  "Failed to create queue for create wq, %d\n", err);
 		goto err_free_wq;
 	}
 
-	wq->umem = umem;
 	wq->wqe = init_attr->max_wr;
 	wq->wq_buf_size = ucmd.wq_buf_size;
 	wq->rx_object = INVALID_MANA_HANDLE;
-
-	err = mana_ib_create_zero_offset_dma_region(mdev, wq->umem, &wq->gdma_region);
-	if (err) {
-		ibdev_dbg(&mdev->ib_dev,
-			  "Failed to create dma region for create wq, %d\n",
-			  err);
-		goto err_release_umem;
-	}
-
-	ibdev_dbg(&mdev->ib_dev,
-		  "create_dma_region ret %d gdma_region 0x%llx\n",
-		  err, wq->gdma_region);
-
-	/* WQ ID is returned at wq_create time, doesn't know the value yet */
-
 	return &wq->ibwq;
 
-err_release_umem:
-	ib_umem_release(umem);
-
 err_free_wq:
 	kfree(wq);
 
@@ -86,8 +64,7 @@ int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
 
 	mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev);
 
-	mana_ib_gd_destroy_dma_region(mdev, wq->gdma_region);
-	ib_umem_release(wq->umem);
+	mana_ib_destroy_queue(mdev, &wq->queue);
 
 	kfree(wq);
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH rdma-next 4/4] RDMA/mana_ib: Use struct mana_ib_queue for RAW QPs
  2024-03-13 13:24 [PATCH rdma-next 0/4] Define and use mana queues for CQs and WQs Konstantin Taranov
                   ` (2 preceding siblings ...)
  2024-03-13 13:24 ` [PATCH rdma-next 3/4] RDMA/mana_ib: Use struct mana_ib_queue for WQs Konstantin Taranov
@ 2024-03-13 13:24 ` Konstantin Taranov
  2024-03-15 17:03   ` Long Li
  3 siblings, 1 reply; 13+ messages in thread
From: Konstantin Taranov @ 2024-03-13 13:24 UTC (permalink / raw)
  To: kotaranov, sharmaajay, longli, jgg, leon; +Cc: linux-rdma, linux-kernel

From: Konstantin Taranov <kotaranov@microsoft.com>

Use struct mana_ib_queue and its helpers for RAW QPs

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
---
 drivers/infiniband/hw/mana/mana_ib.h | 13 +++----
 drivers/infiniband/hw/mana/qp.c      | 54 ++++++++--------------------
 2 files changed, 22 insertions(+), 45 deletions(-)

diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index a8953ee80..ddb6e73e3 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -91,15 +91,16 @@ struct mana_ib_cq {
 	u32 comp_vector;
 };
 
+struct mana_ib_raw_qp {
+	/* Work queue info */
+	struct mana_ib_queue queue;
+	mana_handle_t tx_object;
+};
+
 struct mana_ib_qp {
 	struct ib_qp ibqp;
 
-	/* Work queue info */
-	struct ib_umem *sq_umem;
-	int sqe;
-	u64 sq_gdma_region;
-	u64 sq_id;
-	mana_handle_t tx_object;
+	struct mana_ib_raw_qp sq;
 
 	/* The port on the IB device, starting with 1 */
 	u32 port;
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index f606caa75..5818b665e 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -297,7 +297,6 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
 	struct mana_obj_spec cq_spec = {};
 	struct mana_port_context *mpc;
 	struct net_device *ndev;
-	struct ib_umem *umem;
 	struct mana_eq *eq;
 	int eq_vec;
 	u32 port;
@@ -346,32 +345,15 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
 	ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
 		  ucmd.sq_buf_addr, ucmd.port);
 
-	umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size,
-			   IB_ACCESS_LOCAL_WRITE);
-	if (IS_ERR(umem)) {
-		err = PTR_ERR(umem);
-		ibdev_dbg(&mdev->ib_dev,
-			  "Failed to get umem for create qp-raw, err %d\n",
-			  err);
-		goto err_free_vport;
-	}
-	qp->sq_umem = umem;
-
-	err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem,
-						    &qp->sq_gdma_region);
+	err = mana_ib_create_queue(mdev, ucmd.sq_buf_addr, ucmd.sq_buf_size, &qp->sq.queue);
 	if (err) {
 		ibdev_dbg(&mdev->ib_dev,
-			  "Failed to create dma region for create qp-raw, %d\n",
-			  err);
-		goto err_release_umem;
+			  "Failed to create queue for create qp-raw, err %d\n", err);
+		goto err_free_vport;
 	}
 
-	ibdev_dbg(&mdev->ib_dev,
-		  "create_dma_region ret %d gdma_region 0x%llx\n",
-		  err, qp->sq_gdma_region);
-
 	/* Create a WQ on the same port handle used by the Ethernet */
-	wq_spec.gdma_region = qp->sq_gdma_region;
+	wq_spec.gdma_region = qp->sq.queue.gdma_region;
 	wq_spec.queue_size = ucmd.sq_buf_size;
 
 	cq_spec.gdma_region = send_cq->queue.gdma_region;
@@ -382,19 +364,19 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
 	cq_spec.attached_eq = eq->eq->id;
 
 	err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
-				 &cq_spec, &qp->tx_object);
+				 &cq_spec, &qp->sq.tx_object);
 	if (err) {
 		ibdev_dbg(&mdev->ib_dev,
 			  "Failed to create wq for create raw-qp, err %d\n",
 			  err);
-		goto err_destroy_dma_region;
+		goto err_destroy_queue;
 	}
 
 	/* The GDMA regions are now owned by the WQ object */
-	qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
+	qp->sq.queue.gdma_region = GDMA_INVALID_DMA_REGION;
 	send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
 
-	qp->sq_id = wq_spec.queue_index;
+	qp->sq.queue.id = wq_spec.queue_index;
 	send_cq->queue.id = cq_spec.queue_index;
 
 	/* Create CQ table entry */
@@ -404,9 +386,9 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
 
 	ibdev_dbg(&mdev->ib_dev,
 		  "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
-		  qp->tx_object, qp->sq_id, send_cq->queue.id);
+		  qp->sq.tx_object, qp->sq.queue.id, send_cq->queue.id);
 
-	resp.sqid = qp->sq_id;
+	resp.sqid = qp->sq.queue.id;
 	resp.cqid = send_cq->queue.id;
 	resp.tx_vp_offset = pd->tx_vp_offset;
 
@@ -425,13 +407,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
 	gc->cq_table[send_cq->queue.id] = NULL;
 
 err_destroy_wq_obj:
-	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
+	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->sq.tx_object);
 
-err_destroy_dma_region:
-	mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
-
-err_release_umem:
-	ib_umem_release(umem);
+err_destroy_queue:
+	mana_ib_destroy_queue(mdev, &qp->sq.queue);
 
 err_free_vport:
 	mana_ib_uncfg_vport(mdev, pd, port);
@@ -505,12 +484,9 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
 	mpc = netdev_priv(ndev);
 	pd = container_of(ibpd, struct mana_ib_pd, ibpd);
 
-	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
+	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->sq.tx_object);
 
-	if (qp->sq_umem) {
-		mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
-		ib_umem_release(qp->sq_umem);
-	}
+	mana_ib_destroy_queue(mdev, &qp->sq.queue);
 
 	mana_ib_uncfg_vport(mdev, pd, qp->port);
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* RE: [PATCH rdma-next 1/4] RDMA/mana_ib: Introduce helpers to create and destroy mana queues
  2024-03-13 13:24 ` [PATCH rdma-next 1/4] RDMA/mana_ib: Introduce helpers to create and destroy mana queues Konstantin Taranov
@ 2024-03-15 16:45   ` Long Li
  2024-03-17  6:42   ` Zhu Yanjun
  1 sibling, 0 replies; 13+ messages in thread
From: Long Li @ 2024-03-15 16:45 UTC (permalink / raw)
  To: Konstantin Taranov, Konstantin Taranov, sharmaajay, jgg, leon
  Cc: linux-rdma, linux-kernel



> -----Original Message-----
> From: Konstantin Taranov <kotaranov@linux.microsoft.com>
> Sent: Wednesday, March 13, 2024 6:25 AM
> To: Konstantin Taranov <kotaranov@microsoft.com>;
> sharmaajay@microsoft.com; Long Li <longli@microsoft.com>; jgg@ziepe.ca;
> leon@kernel.org
> Cc: linux-rdma@vger.kernel.org; linux-kernel@vger.kernel.org
> Subject: [PATCH rdma-next 1/4] RDMA/mana_ib: Introduce helpers to create and
> destroy mana queues
> 
> From: Konstantin Taranov <kotaranov@microsoft.com>
> 
> Intoduce helpers to work with mana ib queues (struct mana_ib_queue).
> A queue always consists of umem, gdma_region, and id.
> A queue can be used for a WQ or a CQ.
> 
> Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>

Reviewed-by: Long Li <longli@microsoft.com>


^ permalink raw reply	[flat|nested] 13+ messages in thread

* RE: [PATCH rdma-next 2/4] RDMA:mana_ib: Use struct mana_ib_queue for CQs
  2024-03-13 13:24 ` [PATCH rdma-next 2/4] RDMA:mana_ib: Use struct mana_ib_queue for CQs Konstantin Taranov
@ 2024-03-15 16:54   ` Long Li
  0 siblings, 0 replies; 13+ messages in thread
From: Long Li @ 2024-03-15 16:54 UTC (permalink / raw)
  To: Konstantin Taranov, Konstantin Taranov, sharmaajay, jgg, leon
  Cc: linux-rdma, linux-kernel



> -----Original Message-----
> From: Konstantin Taranov <kotaranov@linux.microsoft.com>
> Sent: Wednesday, March 13, 2024 6:25 AM
> To: Konstantin Taranov <kotaranov@microsoft.com>;
> sharmaajay@microsoft.com; Long Li <longli@microsoft.com>; jgg@ziepe.ca;
> leon@kernel.org
> Cc: linux-rdma@vger.kernel.org; linux-kernel@vger.kernel.org
> Subject: [PATCH rdma-next 2/4] RDMA:mana_ib: Use struct mana_ib_queue for
> CQs
> 
> From: Konstantin Taranov <kotaranov@microsoft.com>
> 
> Use struct mana_ib_queue and its helpers for CQs
> 
> Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>

Reviewed-by: Long Li <longli@microsoft.com>


^ permalink raw reply	[flat|nested] 13+ messages in thread

* RE: [PATCH rdma-next 3/4] RDMA/mana_ib: Use struct mana_ib_queue for WQs
  2024-03-13 13:24 ` [PATCH rdma-next 3/4] RDMA/mana_ib: Use struct mana_ib_queue for WQs Konstantin Taranov
@ 2024-03-15 16:56   ` Long Li
  0 siblings, 0 replies; 13+ messages in thread
From: Long Li @ 2024-03-15 16:56 UTC (permalink / raw)
  To: Konstantin Taranov, Konstantin Taranov, sharmaajay, jgg, leon
  Cc: linux-rdma, linux-kernel



> -----Original Message-----
> From: Konstantin Taranov <kotaranov@linux.microsoft.com>
> Sent: Wednesday, March 13, 2024 6:25 AM
> To: Konstantin Taranov <kotaranov@microsoft.com>;
> sharmaajay@microsoft.com; Long Li <longli@microsoft.com>; jgg@ziepe.ca;
> leon@kernel.org
> Cc: linux-rdma@vger.kernel.org; linux-kernel@vger.kernel.org
> Subject: [PATCH rdma-next 3/4] RDMA/mana_ib: Use struct mana_ib_queue for
> WQs
> 
> From: Konstantin Taranov <kotaranov@microsoft.com>
> 
> Use struct mana_ib_queue and its helpers for WQs
> 
> Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>

Reviewed-by: Long Li <longli@microsoft.com>


^ permalink raw reply	[flat|nested] 13+ messages in thread

* RE: [PATCH rdma-next 4/4] RDMA/mana_ib: Use struct mana_ib_queue for RAW QPs
  2024-03-13 13:24 ` [PATCH rdma-next 4/4] RDMA/mana_ib: Use struct mana_ib_queue for RAW QPs Konstantin Taranov
@ 2024-03-15 17:03   ` Long Li
  2024-03-18  9:21     ` Konstantin Taranov
  0 siblings, 1 reply; 13+ messages in thread
From: Long Li @ 2024-03-15 17:03 UTC (permalink / raw)
  To: Konstantin Taranov, Konstantin Taranov, sharmaajay, jgg, leon
  Cc: linux-rdma, linux-kernel

> +struct mana_ib_raw_qp {
> +	/* Work queue info */
> +	struct mana_ib_queue queue;
> +	mana_handle_t tx_object;
> +};
> +
>  struct mana_ib_qp {
>  	struct ib_qp ibqp;
> 
> -	/* Work queue info */
> -	struct ib_umem *sq_umem;
> -	int sqe;
> -	u64 sq_gdma_region;
> -	u64 sq_id;
> -	mana_handle_t tx_object;
> +	struct mana_ib_raw_qp sq;

What's the naming scheme for RC? Maybe use raw_sq here?

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH rdma-next 1/4] RDMA/mana_ib: Introduce helpers to create and destroy mana queues
  2024-03-13 13:24 ` [PATCH rdma-next 1/4] RDMA/mana_ib: Introduce helpers to create and destroy mana queues Konstantin Taranov
  2024-03-15 16:45   ` Long Li
@ 2024-03-17  6:42   ` Zhu Yanjun
  2024-03-18  9:31     ` [EXTERNAL] " Konstantin Taranov
  1 sibling, 1 reply; 13+ messages in thread
From: Zhu Yanjun @ 2024-03-17  6:42 UTC (permalink / raw)
  To: Konstantin Taranov, kotaranov, sharmaajay, longli, jgg, leon
  Cc: linux-rdma, linux-kernel

在 2024/3/13 14:24, Konstantin Taranov 写道:
> From: Konstantin Taranov <kotaranov@microsoft.com>
> 
> Intoduce helpers to work with mana ib queues (struct mana_ib_queue).
> A queue always consists of umem, gdma_region, and id.
> A queue can be used for a WQ or a CQ.
> 
> Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
> ---
>   drivers/infiniband/hw/mana/main.c    | 40 ++++++++++++++++++++++++++++
>   drivers/infiniband/hw/mana/mana_ib.h | 10 +++++++
>   2 files changed, 50 insertions(+)
> 
> diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
> index 71e33feee..0ec940b97 100644
> --- a/drivers/infiniband/hw/mana/main.c
> +++ b/drivers/infiniband/hw/mana/main.c
> @@ -237,6 +237,46 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
>   		ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
>   }
>   
> +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
> +			 struct mana_ib_queue *queue)
> +{
> +	struct ib_umem *umem;
> +	int err;
> +
> +	queue->umem = NULL;
> +	queue->id = INVALID_QUEUE_ID;
> +	queue->gdma_region = GDMA_INVALID_DMA_REGION;
> +
> +	umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
> +	if (IS_ERR(umem)) {
> +		err = PTR_ERR(umem);
> +		ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
> +		return err;
> +	}
> +
> +	err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
> +	if (err) {
> +		ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
> +		goto free_umem;
> +	}
> +	queue->umem = umem;
> +
> +	ibdev_dbg(&mdev->ib_dev,
> +		  "create_dma_region ret %d gdma_region 0x%llx\n",
> +		  err, queue->gdma_region);
> +
> +	return 0;
> +free_umem:
> +	ib_umem_release(umem);
> +	return err;
> +}
> +
> +void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
> +{
> +	mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);

The function mana_ib_gd_destroy_dma_region will call 
mana_gd_destroy_dma_region. In the function mana_gd_destroy_dma_region, 
the function mana_gd_send_request will return the error -EPROTO.
The procedure is as below. So the function mana_ib_destroy_queue should 
also handle this error?

mana_ib_gd_destroy_dma_region --- > mana_gd_destroy_dma_region

  693 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 
dma_region_handle)
  694 {

...

  706         err = mana_gd_send_request(gc, sizeof(req), &req, 
sizeof(resp), &resp);
  707         if (err || resp.hdr.status) {
  708                 dev_err(gc->dev, "Failed to destroy DMA region: 
%d, 0x%x\n",
  709                         err, resp.hdr.status);
  710                 return -EPROTO;
  711         }

...

  714 }

Zhu Yanjun

> +	ib_umem_release(queue->umem);
> +}
> +
>   static int
>   mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
>   			    struct gdma_context *gc,
> diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
> index f83390eeb..859fd3bfc 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -45,6 +45,12 @@ struct mana_ib_adapter_caps {
>   	u32 max_inline_data_size;
>   };
>   
> +struct mana_ib_queue {
> +	struct ib_umem *umem;
> +	u64 gdma_region;
> +	u64 id;
> +};
> +
>   struct mana_ib_dev {
>   	struct ib_device ib_dev;
>   	struct gdma_dev *gdma_dev;
> @@ -169,6 +175,10 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
>   int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
>   				  mana_handle_t gdma_region);
>   
> +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
> +			 struct mana_ib_queue *queue);
> +void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
> +
>   struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
>   				struct ib_wq_init_attr *init_attr,
>   				struct ib_udata *udata);


^ permalink raw reply	[flat|nested] 13+ messages in thread

* RE: [PATCH rdma-next 4/4] RDMA/mana_ib: Use struct mana_ib_queue for RAW QPs
  2024-03-15 17:03   ` Long Li
@ 2024-03-18  9:21     ` Konstantin Taranov
  0 siblings, 0 replies; 13+ messages in thread
From: Konstantin Taranov @ 2024-03-18  9:21 UTC (permalink / raw)
  To: Long Li, Konstantin Taranov, sharmaajay, jgg, leon
  Cc: linux-rdma, linux-kernel

> From: Long Li <longli@microsoft.com>
> Sent: Friday, 15 March 2024 18:04
> To: Konstantin Taranov <kotaranov@linux.microsoft.com>; Konstantin
> Taranov <kotaranov@microsoft.com>; sharmaajay@microsoft.com;
> jgg@ziepe.ca; leon@kernel.org
> Cc: linux-rdma@vger.kernel.org; linux-kernel@vger.kernel.org
> Subject: RE: [PATCH rdma-next 4/4] RDMA/mana_ib: Use struct
> mana_ib_queue for RAW QPs
> 
> > +struct mana_ib_raw_qp {
> > +	/* Work queue info */
> > +	struct mana_ib_queue queue;
> > +	mana_handle_t tx_object;
> > +};
> > +
> >  struct mana_ib_qp {
> >  	struct ib_qp ibqp;
> >
> > -	/* Work queue info */
> > -	struct ib_umem *sq_umem;
> > -	int sqe;
> > -	u64 sq_gdma_region;
> > -	u64 sq_id;
> > -	mana_handle_t tx_object;
> > +	struct mana_ib_raw_qp sq;
> 
> What's the naming scheme for RC? Maybe use raw_sq here?

The plan is to use struct mana_ib_rc_qp for RC QP.
But I think mana_ib_raw_sq is a good proposal for RAW packets. I will make it in v2.
Thanks!


^ permalink raw reply	[flat|nested] 13+ messages in thread

* RE: [EXTERNAL] Re: [PATCH rdma-next 1/4] RDMA/mana_ib: Introduce helpers to create and destroy mana queues
  2024-03-17  6:42   ` Zhu Yanjun
@ 2024-03-18  9:31     ` Konstantin Taranov
  2024-03-18 13:56       ` Zhu Yanjun
  0 siblings, 1 reply; 13+ messages in thread
From: Konstantin Taranov @ 2024-03-18  9:31 UTC (permalink / raw)
  To: Zhu Yanjun, Konstantin Taranov, sharmaajay, Long Li, jgg, leon
  Cc: linux-rdma, linux-kernel

> > From: Konstantin Taranov <kotaranov@microsoft.com>
> >
> > Intoduce helpers to work with mana ib queues (struct mana_ib_queue).
> > A queue always consists of umem, gdma_region, and id.
> > A queue can be used for a WQ or a CQ.
> >
> > Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
> > ---
> >   drivers/infiniband/hw/mana/main.c    | 40
> ++++++++++++++++++++++++++++
> >   drivers/infiniband/hw/mana/mana_ib.h | 10 +++++++
> >   2 files changed, 50 insertions(+)
> >
> > diff --git a/drivers/infiniband/hw/mana/main.c
> > b/drivers/infiniband/hw/mana/main.c
> > index 71e33feee..0ec940b97 100644
> > --- a/drivers/infiniband/hw/mana/main.c
> > +++ b/drivers/infiniband/hw/mana/main.c
> > @@ -237,6 +237,46 @@ void mana_ib_dealloc_ucontext(struct
> ib_ucontext *ibcontext)
> >               ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
> >   }
> >
> > +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
> > +                      struct mana_ib_queue *queue) {
> > +     struct ib_umem *umem;
> > +     int err;
> > +
> > +     queue->umem = NULL;
> > +     queue->id = INVALID_QUEUE_ID;
> > +     queue->gdma_region = GDMA_INVALID_DMA_REGION;
> > +
> > +     umem = ib_umem_get(&mdev->ib_dev, addr, size,
> IB_ACCESS_LOCAL_WRITE);
> > +     if (IS_ERR(umem)) {
> > +             err = PTR_ERR(umem);
> > +             ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
> > +             return err;
> > +     }
> > +
> > +     err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue-
> >gdma_region);
> > +     if (err) {
> > +             ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n",
> err);
> > +             goto free_umem;
> > +     }
> > +     queue->umem = umem;
> > +
> > +     ibdev_dbg(&mdev->ib_dev,
> > +               "create_dma_region ret %d gdma_region 0x%llx\n",
> > +               err, queue->gdma_region);
> > +
> > +     return 0;
> > +free_umem:
> > +     ib_umem_release(umem);
> > +     return err;
> > +}
> > +
> > +void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct
> > +mana_ib_queue *queue) {
> > +     mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
> 
> The function mana_ib_gd_destroy_dma_region will call
> mana_gd_destroy_dma_region. In the function
> mana_gd_destroy_dma_region, the function mana_gd_send_request will
> return the error -EPROTO.
> The procedure is as below. So the function mana_ib_destroy_queue should
> also handle this error?

Thanks for the comment!
This error can be ignored and it was ignored before this commit.
I checked the corresponding Windows driver code, and it is also intentionally ignored there.
I can add a comment that the error is ignored intentionally if you want. 

> 
> mana_ib_gd_destroy_dma_region --- > mana_gd_destroy_dma_region
> 
>   693 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64
> dma_region_handle)
>   694 {
> 
> ...
> 
>   706         err = mana_gd_send_request(gc, sizeof(req), &req,
> sizeof(resp), &resp);
>   707         if (err || resp.hdr.status) {
>   708                 dev_err(gc->dev, "Failed to destroy DMA region:
> %d, 0x%x\n",
>   709                         err, resp.hdr.status);
>   710                 return -EPROTO;
>   711         }
> 
> ...
> 
>   714 }
> 
> Zhu Yanjun
> 
> > +     ib_umem_release(queue->umem);
> > +}
> > +
> >   static int
> >   mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
> >                           struct gdma_context *gc, diff --git
> > a/drivers/infiniband/hw/mana/mana_ib.h
> > b/drivers/infiniband/hw/mana/mana_ib.h
> > index f83390eeb..859fd3bfc 100644
> > --- a/drivers/infiniband/hw/mana/mana_ib.h
> > +++ b/drivers/infiniband/hw/mana/mana_ib.h
> > @@ -45,6 +45,12 @@ struct mana_ib_adapter_caps {
> >       u32 max_inline_data_size;
> >   };
> >
> > +struct mana_ib_queue {
> > +     struct ib_umem *umem;
> > +     u64 gdma_region;
> > +     u64 id;
> > +};
> > +
> >   struct mana_ib_dev {
> >       struct ib_device ib_dev;
> >       struct gdma_dev *gdma_dev;
> > @@ -169,6 +175,10 @@ int mana_ib_create_dma_region(struct
> mana_ib_dev *dev, struct ib_umem *umem,
> >   int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
> >                                 mana_handle_t gdma_region);
> >
> > +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
> > +                      struct mana_ib_queue *queue); void
> > +mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct
> mana_ib_queue
> > +*queue);
> > +
> >   struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
> >                               struct ib_wq_init_attr *init_attr,
> >                               struct ib_udata *udata);


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [EXTERNAL] Re: [PATCH rdma-next 1/4] RDMA/mana_ib: Introduce helpers to create and destroy mana queues
  2024-03-18  9:31     ` [EXTERNAL] " Konstantin Taranov
@ 2024-03-18 13:56       ` Zhu Yanjun
  0 siblings, 0 replies; 13+ messages in thread
From: Zhu Yanjun @ 2024-03-18 13:56 UTC (permalink / raw)
  To: Konstantin Taranov, Konstantin Taranov, sharmaajay, Long Li, jgg, leon
  Cc: linux-rdma, linux-kernel


On 18.03.24 10:31, Konstantin Taranov wrote:
>>> From: Konstantin Taranov <kotaranov@microsoft.com>
>>>
>>> Intoduce helpers to work with mana ib queues (struct mana_ib_queue).
>>> A queue always consists of umem, gdma_region, and id.
>>> A queue can be used for a WQ or a CQ.
>>>
>>> Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
>>> ---
>>>    drivers/infiniband/hw/mana/main.c    | 40
>> ++++++++++++++++++++++++++++
>>>    drivers/infiniband/hw/mana/mana_ib.h | 10 +++++++
>>>    2 files changed, 50 insertions(+)
>>>
>>> diff --git a/drivers/infiniband/hw/mana/main.c
>>> b/drivers/infiniband/hw/mana/main.c
>>> index 71e33feee..0ec940b97 100644
>>> --- a/drivers/infiniband/hw/mana/main.c
>>> +++ b/drivers/infiniband/hw/mana/main.c
>>> @@ -237,6 +237,46 @@ void mana_ib_dealloc_ucontext(struct
>> ib_ucontext *ibcontext)
>>>                ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
>>>    }
>>>
>>> +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
>>> +                      struct mana_ib_queue *queue) {
>>> +     struct ib_umem *umem;
>>> +     int err;
>>> +
>>> +     queue->umem = NULL;
>>> +     queue->id = INVALID_QUEUE_ID;
>>> +     queue->gdma_region = GDMA_INVALID_DMA_REGION;
>>> +
>>> +     umem = ib_umem_get(&mdev->ib_dev, addr, size,
>> IB_ACCESS_LOCAL_WRITE);
>>> +     if (IS_ERR(umem)) {
>>> +             err = PTR_ERR(umem);
>>> +             ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
>>> +             return err;
>>> +     }
>>> +
>>> +     err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue-
>>> gdma_region);
>>> +     if (err) {
>>> +             ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n",
>> err);
>>> +             goto free_umem;
>>> +     }
>>> +     queue->umem = umem;
>>> +
>>> +     ibdev_dbg(&mdev->ib_dev,
>>> +               "create_dma_region ret %d gdma_region 0x%llx\n",
>>> +               err, queue->gdma_region);
>>> +
>>> +     return 0;
>>> +free_umem:
>>> +     ib_umem_release(umem);
>>> +     return err;
>>> +}
>>> +
>>> +void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct
>>> +mana_ib_queue *queue) {
>>> +     mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
>> The function mana_ib_gd_destroy_dma_region will call
>> mana_gd_destroy_dma_region. In the function
>> mana_gd_destroy_dma_region, the function mana_gd_send_request will
>> return the error -EPROTO.
>> The procedure is as below. So the function mana_ib_destroy_queue should
>> also handle this error?
> Thanks for the comment!
> This error can be ignored and it was ignored before this commit.
> I checked the corresponding Windows driver code, and it is also intentionally ignored there.
> I can add a comment that the error is ignored intentionally if you want.

Sure. Thanks a lot.

Zhu Yanjun

>
>> mana_ib_gd_destroy_dma_region --- > mana_gd_destroy_dma_region
>>
>>    693 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64
>> dma_region_handle)
>>    694 {
>>
>> ...
>>
>>    706         err = mana_gd_send_request(gc, sizeof(req), &req,
>> sizeof(resp), &resp);
>>    707         if (err || resp.hdr.status) {
>>    708                 dev_err(gc->dev, "Failed to destroy DMA region:
>> %d, 0x%x\n",
>>    709                         err, resp.hdr.status);
>>    710                 return -EPROTO;
>>    711         }
>>
>> ...
>>
>>    714 }
>>
>> Zhu Yanjun
>>
>>> +     ib_umem_release(queue->umem);
>>> +}
>>> +
>>>    static int
>>>    mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
>>>                            struct gdma_context *gc, diff --git
>>> a/drivers/infiniband/hw/mana/mana_ib.h
>>> b/drivers/infiniband/hw/mana/mana_ib.h
>>> index f83390eeb..859fd3bfc 100644
>>> --- a/drivers/infiniband/hw/mana/mana_ib.h
>>> +++ b/drivers/infiniband/hw/mana/mana_ib.h
>>> @@ -45,6 +45,12 @@ struct mana_ib_adapter_caps {
>>>        u32 max_inline_data_size;
>>>    };
>>>
>>> +struct mana_ib_queue {
>>> +     struct ib_umem *umem;
>>> +     u64 gdma_region;
>>> +     u64 id;
>>> +};
>>> +
>>>    struct mana_ib_dev {
>>>        struct ib_device ib_dev;
>>>        struct gdma_dev *gdma_dev;
>>> @@ -169,6 +175,10 @@ int mana_ib_create_dma_region(struct
>> mana_ib_dev *dev, struct ib_umem *umem,
>>>    int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
>>>                                  mana_handle_t gdma_region);
>>>
>>> +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
>>> +                      struct mana_ib_queue *queue); void
>>> +mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct
>> mana_ib_queue
>>> +*queue);
>>> +
>>>    struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
>>>                                struct ib_wq_init_attr *init_attr,
>>>                                struct ib_udata *udata);

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2024-03-18 13:56 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-03-13 13:24 [PATCH rdma-next 0/4] Define and use mana queues for CQs and WQs Konstantin Taranov
2024-03-13 13:24 ` [PATCH rdma-next 1/4] RDMA/mana_ib: Introduce helpers to create and destroy mana queues Konstantin Taranov
2024-03-15 16:45   ` Long Li
2024-03-17  6:42   ` Zhu Yanjun
2024-03-18  9:31     ` [EXTERNAL] " Konstantin Taranov
2024-03-18 13:56       ` Zhu Yanjun
2024-03-13 13:24 ` [PATCH rdma-next 2/4] RDMA:mana_ib: Use struct mana_ib_queue for CQs Konstantin Taranov
2024-03-15 16:54   ` Long Li
2024-03-13 13:24 ` [PATCH rdma-next 3/4] RDMA/mana_ib: Use struct mana_ib_queue for WQs Konstantin Taranov
2024-03-15 16:56   ` Long Li
2024-03-13 13:24 ` [PATCH rdma-next 4/4] RDMA/mana_ib: Use struct mana_ib_queue for RAW QPs Konstantin Taranov
2024-03-15 17:03   ` Long Li
2024-03-18  9:21     ` Konstantin Taranov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).