linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH for-next 0/4] RDMA/hns: Refactor codes of CQ
@ 2019-11-18  2:34 Weihang Li
  2019-11-18  2:34 ` [PATCH for-next 1/4] RDMA/hns: Redefine interfaces used in creating cq Weihang Li
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Weihang Li @ 2019-11-18  2:34 UTC (permalink / raw)
  To: dledford, jgg; +Cc: linux-rdma, linuxarm

This series redefine/rename/remove some interfaces to improve readability
and maintainability of codes of CQ in hip08.

The 4th patch has conflict with "RDMA/hns: Add support for reporting wc
as software mode", will send a rebased one if any of them was applied.

Yixian Liu (4):
  RDMA/hns: Redefine interfaces used in creating cq
  RDMA/hns: Redefine the member of hns_roce_cq struct
  RDMA/hns: Rename the functions used inside creating cq
  RDMA/hns: Delete unnecessary callback functions for cq

 drivers/infiniband/hw/hns/hns_roce_cmd.h    |   4 +-
 drivers/infiniband/hw/hns/hns_roce_cq.c     | 271 +++++++++++-----------------
 drivers/infiniband/hw/hns/hns_roce_device.h |  27 +--
 drivers/infiniband/hw/hns/hns_roce_hw_v1.c  |  26 ++-
 drivers/infiniband/hw/hns/hns_roce_hw_v2.c  |  13 +-
 drivers/infiniband/hw/hns/hns_roce_main.c   |   4 +-
 6 files changed, 141 insertions(+), 204 deletions(-)

-- 
2.8.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH for-next 1/4] RDMA/hns: Redefine interfaces used in creating cq
  2019-11-18  2:34 [PATCH for-next 0/4] RDMA/hns: Refactor codes of CQ Weihang Li
@ 2019-11-18  2:34 ` Weihang Li
  2019-11-18  2:34 ` [PATCH for-next 2/4] RDMA/hns: Redefine the member of hns_roce_cq struct Weihang Li
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Weihang Li @ 2019-11-18  2:34 UTC (permalink / raw)
  To: dledford, jgg; +Cc: linux-rdma, linuxarm

From: Yixian Liu <liuyixian@huawei.com>

Some interfaces defined with unnecessary input parameters, such
as "nent" and "vector". This patch redefined these interfaces to
make the code more readable and simple.

Signed-off-by: Yixian Liu <liuyixian@huawei.com>
Signed-off-by: Weihang Li <liweihang@hisilicon.com>
---
 drivers/infiniband/hw/hns/hns_roce_cq.c     | 83 ++++++++++++++---------------
 drivers/infiniband/hw/hns/hns_roce_device.h |  6 +--
 drivers/infiniband/hw/hns/hns_roce_hw_v1.c  |  9 ++--
 drivers/infiniband/hw/hns/hns_roce_hw_v2.c  | 10 ++--
 4 files changed, 53 insertions(+), 55 deletions(-)

diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index cde2960..87d9b0f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -82,9 +82,8 @@ static int hns_roce_hw_create_cq(struct hns_roce_dev *dev,
 				 HNS_ROCE_CMD_TIMEOUT_MSECS);
 }
 
-static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
-			     struct hns_roce_mtt *hr_mtt,
-			     struct hns_roce_cq *hr_cq, int vector)
+static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev,
+			     struct hns_roce_cq *hr_cq)
 {
 	struct hns_roce_cmd_mailbox *mailbox;
 	struct hns_roce_hem_table *mtt_table;
@@ -103,18 +102,13 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
 		mtt_table = &hr_dev->mr_table.mtt_table;
 
 	mtts = hns_roce_table_find(hr_dev, mtt_table,
-				   hr_mtt->first_seg, &dma_handle);
+				   hr_cq->hr_buf.hr_mtt.first_seg,
+				   &dma_handle);
 	if (!mtts) {
 		dev_err(dev, "Failed to find mtt for CQ buf.\n");
 		return -EINVAL;
 	}
 
-	if (vector >= hr_dev->caps.num_comp_vectors) {
-		dev_err(dev, "Invalid vector(0x%x) for CQ alloc.\n", vector);
-		return -EINVAL;
-	}
-	hr_cq->vector = vector;
-
 	ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
 	if (ret) {
 		dev_err(dev, "Num of CQ out of range.\n");
@@ -143,8 +137,7 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
 		goto err_xa;
 	}
 
-	hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
-			      nent, vector);
+	hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
 
 	/* Send mailbox to hw */
 	ret = hns_roce_hw_create_cq(hr_dev, mailbox, hr_cq->cqn);
@@ -210,15 +203,18 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
 }
 
 static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
-				   struct ib_udata *udata,
-				   struct hns_roce_cq_buf *buf,
-				   struct ib_umem **umem, u64 buf_addr, int cqe)
+				   struct hns_roce_cq *hr_cq,
+				   struct hns_roce_ib_create_cq ucmd,
+				   struct ib_udata *udata)
 {
-	int ret;
+	struct hns_roce_cq_buf *buf = &hr_cq->hr_buf;
+	struct ib_umem **umem = &hr_cq->umem;
 	u32 page_shift;
 	u32 npages;
+	int ret;
 
-	*umem = ib_umem_get(udata, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
+	*umem = ib_umem_get(udata, ucmd.buf_addr,
+			    hr_cq->cq_depth * hr_dev->caps.cq_entry_sz,
 			    IB_ACCESS_LOCAL_WRITE);
 	if (IS_ERR(*umem))
 		return PTR_ERR(*umem);
@@ -257,10 +253,12 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
 }
 
 static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
-				    struct hns_roce_cq_buf *buf, u32 nent)
+				    struct hns_roce_cq *hr_cq)
 {
-	int ret;
+	struct hns_roce_cq_buf *buf = &hr_cq->hr_buf;
 	u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
+	u32 nent = hr_cq->cq_depth;
+	int ret;
 
 	ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
 				 (1 << page_shift) * 2, &buf->hr_buf,
@@ -295,17 +293,16 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
 }
 
 static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
-				    struct hns_roce_cq_buf *buf, int cqe)
+				    struct hns_roce_cq *hr_cq)
 {
-	hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
-			  &buf->hr_buf);
+	hns_roce_buf_free(hr_dev, hr_cq->cq_depth * hr_dev->caps.cq_entry_sz,
+			  &hr_cq->hr_buf.hr_buf);
 }
 
 static int create_user_cq(struct hns_roce_dev *hr_dev,
 			  struct hns_roce_cq *hr_cq,
 			  struct ib_udata *udata,
-			  struct hns_roce_ib_create_cq_resp *resp,
-			  int cq_entries)
+			  struct hns_roce_ib_create_cq_resp *resp)
 {
 	struct hns_roce_ib_create_cq ucmd;
 	struct device *dev = hr_dev->dev;
@@ -319,9 +316,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
 	}
 
 	/* Get user space address, write it into mtt table */
-	ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
-				      &hr_cq->umem, ucmd.buf_addr,
-				      cq_entries);
+	ret = hns_roce_ib_get_cq_umem(hr_dev, hr_cq, ucmd, udata);
 	if (ret) {
 		dev_err(dev, "Failed to get_cq_umem.\n");
 		return ret;
@@ -349,7 +344,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
 }
 
 static int create_kernel_cq(struct hns_roce_dev *hr_dev,
-			    struct hns_roce_cq *hr_cq, int cq_entries)
+			    struct hns_roce_cq *hr_cq)
 {
 	struct device *dev = hr_dev->dev;
 	int ret;
@@ -365,7 +360,7 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev,
 	}
 
 	/* Init mtt table and write buff address to mtt table */
-	ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries);
+	ret = hns_roce_ib_alloc_cq_buf(hr_dev, hr_cq);
 	if (ret) {
 		dev_err(dev, "Failed to alloc_cq_buf.\n");
 		goto err_db;
@@ -403,7 +398,7 @@ static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
 			      struct hns_roce_cq *hr_cq)
 {
 	hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
-	hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe);
+	hns_roce_ib_free_cq_buf(hr_dev, hr_cq);
 
 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
 		hns_roce_free_db(hr_dev, &hr_cq->db);
@@ -414,11 +409,11 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
 			  struct ib_udata *udata)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
-	struct device *dev = hr_dev->dev;
 	struct hns_roce_ib_create_cq_resp resp = {};
 	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+	struct device *dev = hr_dev->dev;
 	int vector = attr->comp_vector;
-	int cq_entries = attr->cqe;
+	u32 cq_entries = attr->cqe;
 	int ret;
 
 	if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
@@ -427,21 +422,27 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
 		return -EINVAL;
 	}
 
-	if (hr_dev->caps.min_cqes)
-		cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+	if (vector >= hr_dev->caps.num_comp_vectors) {
+		dev_err(dev, "Create CQ failed, vector=%d, max=%d\n",
+			vector, hr_dev->caps.num_comp_vectors);
+		return -EINVAL;
+	}
 
-	cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
-	hr_cq->ib_cq.cqe = cq_entries - 1;
+	cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+	cq_entries = roundup_pow_of_two(cq_entries);
+	hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
+	hr_cq->cq_depth = cq_entries;
+	hr_cq->vector = vector;
 	spin_lock_init(&hr_cq->lock);
 
 	if (udata) {
-		ret = create_user_cq(hr_dev, hr_cq, udata, &resp, cq_entries);
+		ret = create_user_cq(hr_dev, hr_cq, udata, &resp);
 		if (ret) {
 			dev_err(dev, "Create cq failed in user mode!\n");
 			goto err_cq;
 		}
 	} else {
-		ret = create_kernel_cq(hr_dev, hr_cq, cq_entries);
+		ret = create_kernel_cq(hr_dev, hr_cq);
 		if (ret) {
 			dev_err(dev, "Create cq failed in kernel mode!\n");
 			goto err_cq;
@@ -449,8 +450,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
 	}
 
 	/* Allocate cq index, fill cq_context */
-	ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
-				hr_cq, vector);
+	ret = hns_roce_cq_alloc(hr_dev,	hr_cq);
 	if (ret) {
 		dev_err(dev, "Alloc CQ failed(%d).\n", ret);
 		goto err_dbmap;
@@ -468,7 +468,6 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
 	/* Get created cq handler and carry out event */
 	hr_cq->comp = hns_roce_ib_cq_comp;
 	hr_cq->event = hns_roce_ib_cq_event;
-	hr_cq->cq_depth = cq_entries;
 
 	if (udata) {
 		resp.cqn = hr_cq->cqn;
@@ -515,7 +514,7 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
 					       &hr_cq->db);
 	} else {
 		/* Free the buff of stored cq */
-		hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
+		hns_roce_ib_free_cq_buf(hr_dev, hr_cq);
 		if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
 			hns_roce_free_db(hr_dev, &hr_cq->db);
 	}
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index a1b712e..8f628a7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -812,8 +812,8 @@ struct hns_roce_caps {
 	int		max_qp_init_rdma;
 	int		max_qp_dest_rdma;
 	int		num_cqs;
-	int		max_cqes;
-	int		min_cqes;
+	u32		max_cqes;
+	u32		min_cqes;
 	u32		min_wqes;
 	int		reserved_cqs;
 	int		reserved_srqs;
@@ -944,7 +944,7 @@ struct hns_roce_hw {
 	int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
 	void (*write_cqc)(struct hns_roce_dev *hr_dev,
 			  struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
-			  dma_addr_t dma_handle, int nent, u32 vector);
+			  dma_addr_t dma_handle);
 	int (*set_hem)(struct hns_roce_dev *hr_dev,
 		       struct hns_roce_hem_table *table, int obj, int step_idx);
 	int (*clear_hem)(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 89a4c3a..600d9f9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -1990,7 +1990,7 @@ static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
 
 	/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
 	return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
-		!!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
+		!!(n & hr_cq->cq_depth)) ? hr_cqe : NULL;
 }
 
 static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
@@ -2073,8 +2073,7 @@ static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
 
 static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
 				  struct hns_roce_cq *hr_cq, void *mb_buf,
-				  u64 *mtts, dma_addr_t dma_handle, int nent,
-				  u32 vector)
+				  u64 *mtts, dma_addr_t dma_handle)
 {
 	struct hns_roce_cq_context *cq_context = NULL;
 	struct hns_roce_buf_list *tptr_buf;
@@ -2109,9 +2108,9 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
 	roce_set_field(cq_context->cqc_byte_12,
 		       CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
 		       CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
-		       ilog2((unsigned int)nent));
+		       ilog2(hr_cq->cq_depth));
 	roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
-		       CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
+		       CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector);
 
 	cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
 
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 907c951..77c9d7f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -2457,7 +2457,7 @@ static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
 
 	/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
 	return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
-		!!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
+		!!(n & hr_cq->cq_depth)) ? cqe : NULL;
 }
 
 static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
@@ -2550,8 +2550,7 @@ static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
 
 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
 				  struct hns_roce_cq *hr_cq, void *mb_buf,
-				  u64 *mtts, dma_addr_t dma_handle, int nent,
-				  u32 vector)
+				  u64 *mtts, dma_addr_t dma_handle)
 {
 	struct hns_roce_v2_cq_context *cq_context;
 
@@ -2563,9 +2562,10 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
 	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
 		       V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
 	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
-		       V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
+		       V2_CQC_BYTE_4_SHIFT_S,
+		       ilog2(hr_cq->cq_depth));
 	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
-		       V2_CQC_BYTE_4_CEQN_S, vector);
+		       V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
 
 	roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
 		       V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
-- 
2.8.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH for-next 2/4] RDMA/hns: Redefine the member of hns_roce_cq struct
  2019-11-18  2:34 [PATCH for-next 0/4] RDMA/hns: Refactor codes of CQ Weihang Li
  2019-11-18  2:34 ` [PATCH for-next 1/4] RDMA/hns: Redefine interfaces used in creating cq Weihang Li
@ 2019-11-18  2:34 ` Weihang Li
  2019-11-18  2:34 ` [PATCH for-next 3/4] RDMA/hns: Rename the functions used inside creating cq Weihang Li
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Weihang Li @ 2019-11-18  2:34 UTC (permalink / raw)
  To: dledford, jgg; +Cc: linux-rdma, linuxarm

From: Yixian Liu <liuyixian@huawei.com>

There is no need to package buf and mtt into hns_roce_cq_buf,
which will make code more complex, just delete this struct
and move buf and mtt into hns_roce_cq. Furthermore, we add
size member for hns_roce_buf to avoid repeatly calculating
where needed it.

Signed-off-by: Yixian Liu <liuyixian@huawei.com>
Signed-off-by: Weihang Li <liweihang@hisilicon.com>
---
 drivers/infiniband/hw/hns/hns_roce_cq.c     | 73 ++++++++++++-----------------
 drivers/infiniband/hw/hns/hns_roce_device.h |  9 ++--
 drivers/infiniband/hw/hns/hns_roce_hw_v1.c  |  9 ++--
 drivers/infiniband/hw/hns/hns_roce_hw_v2.c  |  3 +-
 4 files changed, 38 insertions(+), 56 deletions(-)

diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 87d9b0f..e9c9453 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -101,9 +101,9 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev,
 	else
 		mtt_table = &hr_dev->mr_table.mtt_table;
 
-	mtts = hns_roce_table_find(hr_dev, mtt_table,
-				   hr_cq->hr_buf.hr_mtt.first_seg,
+	mtts = hns_roce_table_find(hr_dev, mtt_table, hr_cq->mtt.first_seg,
 				   &dma_handle);
+
 	if (!mtts) {
 		dev_err(dev, "Failed to find mtt for CQ buf.\n");
 		return -EINVAL;
@@ -207,45 +207,36 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
 				   struct hns_roce_ib_create_cq ucmd,
 				   struct ib_udata *udata)
 {
-	struct hns_roce_cq_buf *buf = &hr_cq->hr_buf;
+	struct hns_roce_buf *buf = &hr_cq->buf;
+	struct hns_roce_mtt *mtt = &hr_cq->mtt;
 	struct ib_umem **umem = &hr_cq->umem;
-	u32 page_shift;
 	u32 npages;
 	int ret;
 
-	*umem = ib_umem_get(udata, ucmd.buf_addr,
-			    hr_cq->cq_depth * hr_dev->caps.cq_entry_sz,
+	*umem = ib_umem_get(udata, ucmd.buf_addr, buf->size,
 			    IB_ACCESS_LOCAL_WRITE);
 	if (IS_ERR(*umem))
 		return PTR_ERR(*umem);
 
 	if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
-		buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+		mtt->mtt_type = MTT_TYPE_CQE;
 	else
-		buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
-
-	if (hr_dev->caps.cqe_buf_pg_sz) {
-		npages = (ib_umem_page_count(*umem) +
-			(1 << hr_dev->caps.cqe_buf_pg_sz) - 1) /
-			(1 << hr_dev->caps.cqe_buf_pg_sz);
-		page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
-		ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
-					&buf->hr_mtt);
-	} else {
-		ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
-					PAGE_SHIFT, &buf->hr_mtt);
-	}
+		mtt->mtt_type = MTT_TYPE_WQE;
+
+	npages = DIV_ROUND_UP(ib_umem_page_count(*umem),
+			      1 << hr_dev->caps.cqe_buf_pg_sz);
+	ret = hns_roce_mtt_init(hr_dev, npages, buf->page_shift, mtt);
 	if (ret)
 		goto err_buf;
 
-	ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
+	ret = hns_roce_ib_umem_write_mtt(hr_dev, mtt, *umem);
 	if (ret)
 		goto err_mtt;
 
 	return 0;
 
 err_mtt:
-	hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+	hns_roce_mtt_cleanup(hr_dev, mtt);
 
 err_buf:
 	ib_umem_release(*umem);
@@ -255,39 +246,36 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
 static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
 				    struct hns_roce_cq *hr_cq)
 {
-	struct hns_roce_cq_buf *buf = &hr_cq->hr_buf;
-	u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
-	u32 nent = hr_cq->cq_depth;
+	struct hns_roce_buf *buf = &hr_cq->buf;
+	struct hns_roce_mtt *mtt = &hr_cq->mtt;
 	int ret;
 
-	ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
-				 (1 << page_shift) * 2, &buf->hr_buf,
-				 page_shift);
+	ret = hns_roce_buf_alloc(hr_dev, buf->size, (1 << buf->page_shift) * 2,
+				 buf, buf->page_shift);
 	if (ret)
 		goto out;
 
 	if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
-		buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+		mtt->mtt_type = MTT_TYPE_CQE;
 	else
-		buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
+		mtt->mtt_type = MTT_TYPE_WQE;
 
-	ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
-				buf->hr_buf.page_shift, &buf->hr_mtt);
+	ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, mtt);
 	if (ret)
 		goto err_buf;
 
-	ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
+	ret = hns_roce_buf_write_mtt(hr_dev, mtt, buf);
 	if (ret)
 		goto err_mtt;
 
 	return 0;
 
 err_mtt:
-	hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+	hns_roce_mtt_cleanup(hr_dev, mtt);
 
 err_buf:
-	hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
-			  &buf->hr_buf);
+	hns_roce_buf_free(hr_dev, buf->size, buf);
+
 out:
 	return ret;
 }
@@ -295,8 +283,7 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
 static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
 				    struct hns_roce_cq *hr_cq)
 {
-	hns_roce_buf_free(hr_dev, hr_cq->cq_depth * hr_dev->caps.cq_entry_sz,
-			  &hr_cq->hr_buf.hr_buf);
+	hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
 }
 
 static int create_user_cq(struct hns_roce_dev *hr_dev,
@@ -337,7 +324,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
 	return 0;
 
 err_mtt:
-	hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+	hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
 	ib_umem_release(hr_cq->umem);
 
 	return ret;
@@ -390,14 +377,14 @@ static void destroy_user_cq(struct hns_roce_dev *hr_dev,
 	    (udata->outlen >= sizeof(*resp)))
 		hns_roce_db_unmap_user(context, &hr_cq->db);
 
-	hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+	hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
 	ib_umem_release(hr_cq->umem);
 }
 
 static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
 			      struct hns_roce_cq *hr_cq)
 {
-	hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+	hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
 	hns_roce_ib_free_cq_buf(hr_dev, hr_cq);
 
 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
@@ -433,6 +420,8 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
 	hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
 	hr_cq->cq_depth = cq_entries;
 	hr_cq->vector = vector;
+	hr_cq->buf.size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
+	hr_cq->buf.page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
 	spin_lock_init(&hr_cq->lock);
 
 	if (udata) {
@@ -502,7 +491,7 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
 	}
 
 	hns_roce_free_cq(hr_dev, hr_cq);
-	hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+	hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
 
 	ib_umem_release(hr_cq->umem);
 	if (udata) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 8f628a7..608ec59 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -448,6 +448,7 @@ struct hns_roce_buf {
 	struct hns_roce_buf_list	*page_list;
 	int				nbufs;
 	u32				npages;
+	u32				size;
 	int				page_shift;
 };
 
@@ -479,14 +480,10 @@ struct hns_roce_db {
 	int		order;
 };
 
-struct hns_roce_cq_buf {
-	struct hns_roce_buf hr_buf;
-	struct hns_roce_mtt hr_mtt;
-};
-
 struct hns_roce_cq {
 	struct ib_cq			ib_cq;
-	struct hns_roce_cq_buf		hr_buf;
+	struct hns_roce_buf		buf;
+	struct hns_roce_mtt		mtt;
 	struct hns_roce_db		db;
 	u8				db_en;
 	spinlock_t			lock;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 600d9f9..9a00361 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -1980,8 +1980,7 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
 
 static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
 {
-	return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
-				   n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
+	return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
 }
 
 static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
@@ -3655,7 +3654,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
 	struct device *dev = &hr_dev->pdev->dev;
 	u32 cqe_cnt_ori;
 	u32 cqe_cnt_cur;
-	u32 cq_buf_size;
 	int wait_time = 0;
 
 	hns_roce_free_cq(hr_dev, hr_cq);
@@ -3683,13 +3681,12 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
 		wait_time++;
 	}
 
-	hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+	hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
 
 	ib_umem_release(hr_cq->umem);
 	if (!udata) {
 		/* Free the buff of stored cq */
-		cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
-		hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
+		hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
 	}
 }
 
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 77c9d7f..1026ac6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -2447,8 +2447,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
 
 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
 {
-	return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
-				   n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
+	return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
 }
 
 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
-- 
2.8.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH for-next 3/4] RDMA/hns: Rename the functions used inside creating cq
  2019-11-18  2:34 [PATCH for-next 0/4] RDMA/hns: Refactor codes of CQ Weihang Li
  2019-11-18  2:34 ` [PATCH for-next 1/4] RDMA/hns: Redefine interfaces used in creating cq Weihang Li
  2019-11-18  2:34 ` [PATCH for-next 2/4] RDMA/hns: Redefine the member of hns_roce_cq struct Weihang Li
@ 2019-11-18  2:34 ` Weihang Li
  2019-11-18  2:34 ` [PATCH for-next 4/4] RDMA/hns: Delete unnecessary callback functions for cq Weihang Li
  2019-11-22 20:48 ` [PATCH for-next 0/4] RDMA/hns: Refactor codes of CQ Jason Gunthorpe
  4 siblings, 0 replies; 6+ messages in thread
From: Weihang Li @ 2019-11-18  2:34 UTC (permalink / raw)
  To: dledford, jgg; +Cc: linux-rdma, linuxarm

From: Yixian Liu <liuyixian@huawei.com>

Current names of functions are not proper, such as hns_roce_free_cq,
actually it means free cqc, thus we rename them. Furthermore,
functions used inside one file can be named without the prefix
hns_roce_ which will make the functions for verbs symbols
more eye-catching.

Signed-off-by: Yixian Liu <liuyixian@huawei.com>
Signed-off-by: Weihang Li <liweihang@hisilicon.com>
---
 drivers/infiniband/hw/hns/hns_roce_cmd.h    |  4 +-
 drivers/infiniband/hw/hns/hns_roce_cq.c     | 66 ++++++++++-------------------
 drivers/infiniband/hw/hns/hns_roce_device.h |  9 ++--
 drivers/infiniband/hw/hns/hns_roce_hw_v1.c  |  8 ++--
 drivers/infiniband/hw/hns/hns_roce_main.c   |  4 +-
 5 files changed, 35 insertions(+), 56 deletions(-)

diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index cd3ed2b..1915bac 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -119,8 +119,8 @@ enum {
 	HNS_ROCE_CMD_DESTROY_MPT	= 0xf,
 
 	/* CQ commands */
-	HNS_ROCE_CMD_CREATE_CQ		= 0x16,
-	HNS_ROCE_CMD_DESTROY_CQ		= 0x17,
+	HNS_ROCE_CMD_CREATE_CQC		= 0x16,
+	HNS_ROCE_CMD_DESTROY_CQC	= 0x17,
 
 	/* QP/EE commands */
 	HNS_ROCE_CMD_RST2INIT_QP	= 0x19,
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index e9c9453..9174d33 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -73,17 +73,8 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
 	}
 }
 
-static int hns_roce_hw_create_cq(struct hns_roce_dev *dev,
-				 struct hns_roce_cmd_mailbox *mailbox,
-				 unsigned long cq_num)
-{
-	return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
-				 HNS_ROCE_CMD_CREATE_CQ,
-				 HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev,
-			     struct hns_roce_cq *hr_cq)
+static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
+			      struct hns_roce_cq *hr_cq)
 {
 	struct hns_roce_cmd_mailbox *mailbox;
 	struct hns_roce_hem_table *mtt_table;
@@ -140,7 +131,8 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev,
 	hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
 
 	/* Send mailbox to hw */
-	ret = hns_roce_hw_create_cq(hr_dev, mailbox, hr_cq->cqn);
+	ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0,
+			HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS);
 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
 	if (ret) {
 		dev_err(dev,
@@ -168,22 +160,15 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev,
 	return ret;
 }
 
-static int hns_roce_hw_destroy_cq(struct hns_roce_dev *dev,
-				  struct hns_roce_cmd_mailbox *mailbox,
-				  unsigned long cq_num)
-{
-	return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
-				 mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_CQ,
-				 HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
 {
 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
 	struct device *dev = hr_dev->dev;
 	int ret;
 
-	ret = hns_roce_hw_destroy_cq(hr_dev, NULL, hr_cq->cqn);
+	ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1,
+				HNS_ROCE_CMD_DESTROY_CQC,
+				HNS_ROCE_CMD_TIMEOUT_MSECS);
 	if (ret)
 		dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
 			hr_cq->cqn);
@@ -202,10 +187,9 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
 	hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
 }
 
-static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
-				   struct hns_roce_cq *hr_cq,
-				   struct hns_roce_ib_create_cq ucmd,
-				   struct ib_udata *udata)
+static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+		       struct hns_roce_ib_create_cq ucmd,
+		       struct ib_udata *udata)
 {
 	struct hns_roce_buf *buf = &hr_cq->buf;
 	struct hns_roce_mtt *mtt = &hr_cq->mtt;
@@ -243,8 +227,7 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
 	return ret;
 }
 
-static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
-				    struct hns_roce_cq *hr_cq)
+static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
 {
 	struct hns_roce_buf *buf = &hr_cq->buf;
 	struct hns_roce_mtt *mtt = &hr_cq->mtt;
@@ -280,8 +263,7 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
 	return ret;
 }
 
-static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
-				    struct hns_roce_cq *hr_cq)
+static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
 {
 	hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
 }
@@ -303,7 +285,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
 	}
 
 	/* Get user space address, write it into mtt table */
-	ret = hns_roce_ib_get_cq_umem(hr_dev, hr_cq, ucmd, udata);
+	ret = get_cq_umem(hr_dev, hr_cq, ucmd, udata);
 	if (ret) {
 		dev_err(dev, "Failed to get_cq_umem.\n");
 		return ret;
@@ -347,7 +329,7 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev,
 	}
 
 	/* Init mtt table and write buff address to mtt table */
-	ret = hns_roce_ib_alloc_cq_buf(hr_dev, hr_cq);
+	ret = alloc_cq_buf(hr_dev, hr_cq);
 	if (ret) {
 		dev_err(dev, "Failed to alloc_cq_buf.\n");
 		goto err_db;
@@ -385,15 +367,14 @@ static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
 			      struct hns_roce_cq *hr_cq)
 {
 	hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
-	hns_roce_ib_free_cq_buf(hr_dev, hr_cq);
+	free_cq_buf(hr_dev, hr_cq);
 
 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
 		hns_roce_free_db(hr_dev, &hr_cq->db);
 }
 
-int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
-			  const struct ib_cq_init_attr *attr,
-			  struct ib_udata *udata)
+int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+		       struct ib_udata *udata)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
 	struct hns_roce_ib_create_cq_resp resp = {};
@@ -438,8 +419,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
 		}
 	}
 
-	/* Allocate cq index, fill cq_context */
-	ret = hns_roce_cq_alloc(hr_dev,	hr_cq);
+	ret = hns_roce_alloc_cqc(hr_dev, hr_cq);
 	if (ret) {
 		dev_err(dev, "Alloc CQ failed(%d).\n", ret);
 		goto err_dbmap;
@@ -468,7 +448,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
 	return 0;
 
 err_cqc:
-	hns_roce_free_cq(hr_dev, hr_cq);
+	hns_roce_free_cqc(hr_dev, hr_cq);
 
 err_dbmap:
 	if (udata)
@@ -480,7 +460,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
 	return ret;
 }
 
-void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
 	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
@@ -490,7 +470,7 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
 		return;
 	}
 
-	hns_roce_free_cq(hr_dev, hr_cq);
+	hns_roce_free_cqc(hr_dev, hr_cq);
 	hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
 
 	ib_umem_release(hr_cq->umem);
@@ -503,7 +483,7 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
 					       &hr_cq->db);
 	} else {
 		/* Free the buff of stored cq */
-		hns_roce_ib_free_cq_buf(hr_dev, hr_cq);
+		free_cq_buf(hr_dev, hr_cq);
 		if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
 			hns_roce_free_db(hr_dev, &hr_cq->db);
 	}
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 608ec59..152701e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -1240,12 +1240,11 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
 __be32 send_ieth(const struct ib_send_wr *wr);
 int to_hr_qp_type(int qp_type);
 
-int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
-			  const struct ib_cq_init_attr *attr,
-			  struct ib_udata *udata);
+int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+		       struct ib_udata *udata);
 
-void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
-void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
+void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
 
 int hns_roce_db_map_user(struct hns_roce_ucontext *context,
 			 struct ib_udata *udata, unsigned long virt,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 9a00361..2a2b211 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -732,7 +732,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
 	if (!cq)
 		return -ENOMEM;
 
-	ret = hns_roce_ib_create_cq(cq, &cq_init_attr, NULL);
+	ret = hns_roce_create_cq(cq, &cq_init_attr, NULL);
 	if (ret) {
 		dev_err(dev, "Create cq for reserved loop qp failed!");
 		goto alloc_cq_failed;
@@ -868,7 +868,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
 	kfree(pd);
 
 alloc_mem_failed:
-	hns_roce_ib_destroy_cq(cq, NULL);
+	hns_roce_destroy_cq(cq, NULL);
 alloc_cq_failed:
 	kfree(cq);
 	return ret;
@@ -897,7 +897,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
 				i, ret);
 	}
 
-	hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
+	hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
 	kfree(&free_mr->mr_free_cq->ib_cq);
 	hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
 	kfree(&free_mr->mr_free_pd->ibpd);
@@ -3656,7 +3656,7 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
 	u32 cqe_cnt_cur;
 	int wait_time = 0;
 
-	hns_roce_free_cq(hr_dev, hr_cq);
+	hns_roce_free_cqc(hr_dev, hr_cq);
 
 	/*
 	 * Before freeing cq buffer, we need to ensure that the outstanding CQE
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 066f01c..854ef6e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -419,14 +419,14 @@ static const struct ib_device_ops hns_roce_dev_ops = {
 	.alloc_pd = hns_roce_alloc_pd,
 	.alloc_ucontext = hns_roce_alloc_ucontext,
 	.create_ah = hns_roce_create_ah,
-	.create_cq = hns_roce_ib_create_cq,
+	.create_cq = hns_roce_create_cq,
 	.create_qp = hns_roce_create_qp,
 	.dealloc_pd = hns_roce_dealloc_pd,
 	.dealloc_ucontext = hns_roce_dealloc_ucontext,
 	.del_gid = hns_roce_del_gid,
 	.dereg_mr = hns_roce_dereg_mr,
 	.destroy_ah = hns_roce_destroy_ah,
-	.destroy_cq = hns_roce_ib_destroy_cq,
+	.destroy_cq = hns_roce_destroy_cq,
 	.disassociate_ucontext = hns_roce_disassociate_ucontext,
 	.fill_res_entry = hns_roce_fill_res_entry,
 	.get_dma_mr = hns_roce_get_dma_mr,
-- 
2.8.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH for-next 4/4] RDMA/hns: Delete unnecessary callback functions for cq
  2019-11-18  2:34 [PATCH for-next 0/4] RDMA/hns: Refactor codes of CQ Weihang Li
                   ` (2 preceding siblings ...)
  2019-11-18  2:34 ` [PATCH for-next 3/4] RDMA/hns: Rename the functions used inside creating cq Weihang Li
@ 2019-11-18  2:34 ` Weihang Li
  2019-11-22 20:48 ` [PATCH for-next 0/4] RDMA/hns: Refactor codes of CQ Jason Gunthorpe
  4 siblings, 0 replies; 6+ messages in thread
From: Weihang Li @ 2019-11-18  2:34 UTC (permalink / raw)
  To: dledford, jgg; +Cc: linux-rdma, linuxarm

From: Yixian Liu <liuyixian@huawei.com>

Currently, when cq event occurred, we first call our own callback
functions in the event process function, then call ib callback
functions. Actually, we can directly call ib callback functions.

Signed-off-by: Yixian Liu <liuyixian@huawei.com>
Signed-off-by: Weihang Li <liweihang@hisilicon.com>
---
 drivers/infiniband/hw/hns/hns_roce_cq.c     | 91 ++++++++++++-----------------
 drivers/infiniband/hw/hns/hns_roce_device.h |  3 -
 2 files changed, 36 insertions(+), 58 deletions(-)

diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 9174d33..af1d882 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -39,40 +39,6 @@
 #include <rdma/hns-abi.h>
 #include "hns_roce_common.h"
 
-static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
-{
-	struct ib_cq *ibcq = &hr_cq->ib_cq;
-
-	ibcq->comp_handler(ibcq, ibcq->cq_context);
-}
-
-static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
-				 enum hns_roce_event event_type)
-{
-	struct hns_roce_dev *hr_dev;
-	struct ib_event event;
-	struct ib_cq *ibcq;
-
-	ibcq = &hr_cq->ib_cq;
-	hr_dev = to_hr_dev(ibcq->device);
-
-	if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
-	    event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
-	    event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
-		dev_err(hr_dev->dev,
-			"hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
-			event_type, hr_cq->cqn);
-		return;
-	}
-
-	if (ibcq->event_handler) {
-		event.device = ibcq->device;
-		event.event = IB_EVENT_CQ_ERR;
-		event.element.cq = ibcq;
-		ibcq->event_handler(&event, ibcq->cq_context);
-	}
-}
-
 static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
 			      struct hns_roce_cq *hr_cq)
 {
@@ -434,10 +400,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
 	if (!udata && hr_cq->tptr_addr)
 		*hr_cq->tptr_addr = 0;
 
-	/* Get created cq handler and carry out event */
-	hr_cq->comp = hns_roce_ib_cq_comp;
-	hr_cq->event = hns_roce_ib_cq_event;
-
 	if (udata) {
 		resp.cqn = hr_cq->cqn;
 		ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -491,38 +453,57 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
 
 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
 {
-	struct device *dev = hr_dev->dev;
-	struct hns_roce_cq *cq;
+	struct hns_roce_cq *hr_cq;
+	struct ib_cq *ibcq;
 
-	cq = xa_load(&hr_dev->cq_table.array, cqn & (hr_dev->caps.num_cqs - 1));
-	if (!cq) {
-		dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
+	hr_cq = xa_load(&hr_dev->cq_table.array,
+			cqn & (hr_dev->caps.num_cqs - 1));
+	if (!hr_cq) {
+		dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n",
+			 cqn);
 		return;
 	}
 
-	++cq->arm_sn;
-	cq->comp(cq);
+	++hr_cq->arm_sn;
+	ibcq = &hr_cq->ib_cq;
+	if (ibcq->comp_handler)
+		ibcq->comp_handler(ibcq, ibcq->cq_context);
 }
 
 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
 {
-	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
 	struct device *dev = hr_dev->dev;
-	struct hns_roce_cq *cq;
+	struct hns_roce_cq *hr_cq;
+	struct ib_event event;
+	struct ib_cq *ibcq;
 
-	cq = xa_load(&cq_table->array, cqn & (hr_dev->caps.num_cqs - 1));
-	if (cq)
-		atomic_inc(&cq->refcount);
+	hr_cq = xa_load(&hr_dev->cq_table.array,
+			cqn & (hr_dev->caps.num_cqs - 1));
+	if (!hr_cq) {
+		dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn);
+		return;
+	}
 
-	if (!cq) {
-		dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+	if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
+	    event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
+	    event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
+		dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n",
+			event_type, cqn);
 		return;
 	}
 
-	cq->event(cq, (enum hns_roce_event)event_type);
+	atomic_inc(&hr_cq->refcount);
 
-	if (atomic_dec_and_test(&cq->refcount))
-		complete(&cq->free);
+	ibcq = &hr_cq->ib_cq;
+	if (ibcq->event_handler) {
+		event.device = ibcq->device;
+		event.element.cq = ibcq;
+		event.event = IB_EVENT_CQ_ERR;
+		ibcq->event_handler(&event, ibcq->cq_context);
+	}
+
+	if (atomic_dec_and_test(&hr_cq->refcount))
+		complete(&hr_cq->free);
 }
 
 int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 152701e..5617434 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -488,9 +488,6 @@ struct hns_roce_cq {
 	u8				db_en;
 	spinlock_t			lock;
 	struct ib_umem			*umem;
-	void (*comp)(struct hns_roce_cq *cq);
-	void (*event)(struct hns_roce_cq *cq, enum hns_roce_event event_type);
-
 	u32				cq_depth;
 	u32				cons_index;
 	u32				*set_ci_db;
-- 
2.8.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH for-next 0/4] RDMA/hns: Refactor codes of CQ
  2019-11-18  2:34 [PATCH for-next 0/4] RDMA/hns: Refactor codes of CQ Weihang Li
                   ` (3 preceding siblings ...)
  2019-11-18  2:34 ` [PATCH for-next 4/4] RDMA/hns: Delete unnecessary callback functions for cq Weihang Li
@ 2019-11-22 20:48 ` Jason Gunthorpe
  4 siblings, 0 replies; 6+ messages in thread
From: Jason Gunthorpe @ 2019-11-22 20:48 UTC (permalink / raw)
  To: Weihang Li; +Cc: dledford, linux-rdma, linuxarm

On Mon, Nov 18, 2019 at 10:34:49AM +0800, Weihang Li wrote:
> This series redefine/rename/remove some interfaces to improve readability
> and maintainability of codes of CQ in hip08.
> 
> The 4th patch has conflict with "RDMA/hns: Add support for reporting wc
> as software mode", will send a rebased one if any of them was applied.
> 
> Yixian Liu (4):
>   RDMA/hns: Redefine interfaces used in creating cq
>   RDMA/hns: Redefine the member of hns_roce_cq struct
>   RDMA/hns: Rename the functions used inside creating cq
>   RDMA/hns: Delete unnecessary callback functions for cq

Applied to for-next, thanks

Jason

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-11-22 20:48 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-18  2:34 [PATCH for-next 0/4] RDMA/hns: Refactor codes of CQ Weihang Li
2019-11-18  2:34 ` [PATCH for-next 1/4] RDMA/hns: Redefine interfaces used in creating cq Weihang Li
2019-11-18  2:34 ` [PATCH for-next 2/4] RDMA/hns: Redefine the member of hns_roce_cq struct Weihang Li
2019-11-18  2:34 ` [PATCH for-next 3/4] RDMA/hns: Rename the functions used inside creating cq Weihang Li
2019-11-18  2:34 ` [PATCH for-next 4/4] RDMA/hns: Delete unnecessary callback functions for cq Weihang Li
2019-11-22 20:48 ` [PATCH for-next 0/4] RDMA/hns: Refactor codes of CQ Jason Gunthorpe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).