All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 for-next 0/4] RDMA/hns: Extend some capabilities for HIP09
@ 2020-09-03 13:16 Weihang Li
  2020-09-03 13:16 ` [PATCH v3 for-next 1/4] RDMA/hns: Add support for EQE in size of 64 Bytes Weihang Li
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Weihang Li @ 2020-09-03 13:16 UTC (permalink / raw)
  To: dledford, jgg; +Cc: leon, linux-rdma, linuxarm

HIP09 supports larger entries/contexts to improve the performance of bus or
exchange more information with the hardware. The capbilities changes from
HIP08 to HIP09 is as follows:
- CEQE: 4B -> 64B
- AEQE: 16B -> 64B
- CQE: 32B -> 64B
- QPC: 256B -> 512B
- SCCC: 32B -> 64B

Previous discussions can be found at:
v2: https://patchwork.kernel.org/cover/11726269/
v1: https://patchwork.kernel.org/cover/11718143/

Changes since v2:
- Fix comments from Jason about passing cap_flags to the userspace and drop
  #1 from this series.
- Add a new patch to support SCCC in size of 64 Bytes.

Changes since v1:
- Fix comments from Lang Cheng about redundant comments and type of
  reserved fields in structure of eqe.
- Rename some variables.

Wenpeng Liang (3):
  RDMA/hns: Add support for EQE in size of 64 Bytes
  RDMA/hns: Add support for CQE in size of 64 Bytes
  RDMA/hns: Add support for QPC in size of 512 Bytes

Yangyang Li (1):
  RDMA/hns: Add support for SCCC in size of 64 Bytes

 drivers/infiniband/hw/hns/hns_roce_cq.c     |  19 +++-
 drivers/infiniband/hw/hns/hns_roce_device.h |  27 +++--
 drivers/infiniband/hw/hns/hns_roce_hem.c    |   2 +-
 drivers/infiniband/hw/hns/hns_roce_hw_v1.c  |  17 ++--
 drivers/infiniband/hw/hns/hns_roce_hw_v1.h  |   4 +-
 drivers/infiniband/hw/hns/hns_roce_hw_v2.c  | 147 ++++++++++++++++++++++------
 drivers/infiniband/hw/hns/hns_roce_hw_v2.h  |  38 +++++--
 drivers/infiniband/hw/hns/hns_roce_main.c   |  10 +-
 drivers/infiniband/hw/hns/hns_roce_qp.c     |   2 +-
 include/uapi/rdma/hns-abi.h                 |   4 +-
 10 files changed, 205 insertions(+), 65 deletions(-)

-- 
2.8.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v3 for-next 1/4] RDMA/hns: Add support for EQE in size of 64 Bytes
  2020-09-03 13:16 [PATCH v3 for-next 0/4] RDMA/hns: Extend some capabilities for HIP09 Weihang Li
@ 2020-09-03 13:16 ` Weihang Li
  2020-09-03 13:16 ` [PATCH v3 for-next 2/4] RDMA/hns: Add support for CQE " Weihang Li
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 7+ messages in thread
From: Weihang Li @ 2020-09-03 13:16 UTC (permalink / raw)
  To: dledford, jgg; +Cc: leon, linux-rdma, linuxarm

From: Wenpeng Liang <liangwenpeng@huawei.com>

The new version of RoCEE supports using CEQE in size of 4B or 64B, AEQE in
size of 16B or 64B. The performance of bus can be improved by using larger
size of EQE.

Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
---
 drivers/infiniband/hw/hns/hns_roce_device.h | 14 ++++++++----
 drivers/infiniband/hw/hns/hns_roce_hw_v1.c  | 10 ++++-----
 drivers/infiniband/hw/hns/hns_roce_hw_v2.c  | 33 ++++++++++++++++++++++-------
 drivers/infiniband/hw/hns/hns_roce_hw_v2.h  |  7 ++++--
 4 files changed, 44 insertions(+), 20 deletions(-)

diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 4f1dd91..cbf3478 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -37,8 +37,8 @@
 
 #define DRV_NAME "hns_roce"
 
-/* hip08 is a pci device */
 #define PCI_REVISION_ID_HIP08			0x21
+#define PCI_REVISION_ID_HIP09			0x30
 
 #define HNS_ROCE_HW_VER1	('h' << 24 | 'i' << 16 | '0' << 8 | '6')
 
@@ -76,8 +76,10 @@
 #define HNS_ROCE_CEQ				0
 #define HNS_ROCE_AEQ				1
 
-#define HNS_ROCE_CEQ_ENTRY_SIZE			0x4
-#define HNS_ROCE_AEQ_ENTRY_SIZE			0x10
+#define HNS_ROCE_CEQE_SIZE 0x4
+#define HNS_ROCE_AEQE_SIZE 0x10
+
+#define HNS_ROCE_V3_EQE_SIZE 0x40
 
 #define HNS_ROCE_SL_SHIFT			28
 #define HNS_ROCE_TCLASS_SHIFT			20
@@ -679,7 +681,8 @@ enum {
 };
 
 struct hns_roce_ceqe {
-	__le32			comp;
+	__le32	comp;
+	__le32	rsv[15];
 };
 
 struct hns_roce_aeqe {
@@ -716,6 +719,7 @@ struct hns_roce_aeqe {
 			u8	rsv0;
 		} __packed cmd;
 	 } event;
+	__le32 rsv[12];
 };
 
 struct hns_roce_eq {
@@ -810,6 +814,8 @@ struct hns_roce_caps {
 	u32		pbl_hop_num;
 	int		aeqe_depth;
 	int		ceqe_depth;
+	u32		aeqe_size;
+	u32		ceqe_size;
 	enum ib_mtu	max_mtu;
 	u32		qpc_bt_num;
 	u32		qpc_timer_bt_num;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index aeb3a6f..83c07c2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -3775,8 +3775,7 @@ static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
 
 static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
 {
-	unsigned long off = (entry & (eq->entries - 1)) *
-			     HNS_ROCE_AEQ_ENTRY_SIZE;
+	unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQE_SIZE;
 
 	return (struct hns_roce_aeqe *)((u8 *)
 		(eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
@@ -3881,8 +3880,7 @@ static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
 
 static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
 {
-	unsigned long off = (entry & (eq->entries - 1)) *
-			     HNS_ROCE_CEQ_ENTRY_SIZE;
+	unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQE_SIZE;
 
 	return (struct hns_roce_ceqe *)((u8 *)
 			(eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
@@ -4253,7 +4251,7 @@ static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
 				       CEQ_REG_OFFSET * i;
 			eq->entries = hr_dev->caps.ceqe_depth;
 			eq->log_entries = ilog2(eq->entries);
-			eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
+			eq->eqe_size = HNS_ROCE_CEQE_SIZE;
 		} else {
 			/* AEQ */
 			eq_table->eqc_base[i] = hr_dev->reg_base +
@@ -4263,7 +4261,7 @@ static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
 				       ROCEE_CAEP_AEQE_CONS_IDX_REG;
 			eq->entries = hr_dev->caps.aeqe_depth;
 			eq->log_entries = ilog2(eq->entries);
-			eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
+			eq->eqe_size = HNS_ROCE_AEQE_SIZE;
 		}
 	}
 
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 96e08b4..71eee67 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1739,6 +1739,8 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
 	caps->gid_table_len[0]	= HNS_ROCE_V2_GID_INDEX_NUM;
 	caps->ceqe_depth	= HNS_ROCE_V2_COMP_EQE_NUM;
 	caps->aeqe_depth	= HNS_ROCE_V2_ASYNC_EQE_NUM;
+	caps->aeqe_size		= HNS_ROCE_AEQE_SIZE;
+	caps->ceqe_size		= HNS_ROCE_CEQE_SIZE;
 	caps->local_ca_ack_delay = 0;
 	caps->max_mtu = IB_MTU_4096;
 
@@ -1764,6 +1766,11 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
 	caps->sccc_ba_pg_sz	  = 0;
 	caps->sccc_buf_pg_sz	  = 0;
 	caps->sccc_hop_num	  = HNS_ROCE_SCCC_HOP_NUM;
+
+	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+		caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
+		caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
+	}
 }
 
 static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
@@ -1958,6 +1965,8 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
 	caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
 	caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
 	caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
+	caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
+	caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
 	caps->mtt_ba_pg_sz = 0;
 	caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
 	caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
@@ -1981,6 +1990,11 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
 					  V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
 					  V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
 
+	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+		caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
+		caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
+	}
+
 	calc_pg_sz(caps->num_qps, caps->qpc_entry_sz, caps->qpc_hop_num,
 		   caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
 		   HEM_TYPE_QPC);
@@ -5242,7 +5256,7 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
 
 	aeqe = hns_roce_buf_offset(eq->mtr.kmem,
 				   (eq->cons_index & (eq->entries - 1)) *
-				   HNS_ROCE_AEQ_ENTRY_SIZE);
+				   eq->eqe_size);
 
 	return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
 		!!(eq->cons_index & eq->entries)) ? aeqe : NULL;
@@ -5342,7 +5356,8 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
 
 	ceqe = hns_roce_buf_offset(eq->mtr.kmem,
 				   (eq->cons_index & (eq->entries - 1)) *
-				   HNS_ROCE_CEQ_ENTRY_SIZE);
+				   eq->eqe_size);
+
 	return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
 		(!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
 }
@@ -5618,14 +5633,16 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
 	roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
 		       HNS_ROCE_EQC_CONS_INDX_S, HNS_ROCE_EQ_INIT_CONS_IDX);
 
-	/* set nex_eqe_ba[43:12] */
-	roce_set_field(eqc->nxt_eqe_ba0, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
+	roce_set_field(eqc->byte_40, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
 		       HNS_ROCE_EQC_NXT_EQE_BA_L_S, eqe_ba[1] >> 12);
 
-	/* set nex_eqe_ba[63:44] */
-	roce_set_field(eqc->nxt_eqe_ba1, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
+	roce_set_field(eqc->byte_44, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
 		       HNS_ROCE_EQC_NXT_EQE_BA_H_S, eqe_ba[1] >> 44);
 
+	roce_set_field(eqc->byte_44, HNS_ROCE_EQC_EQE_SIZE_M,
+		       HNS_ROCE_EQC_EQE_SIZE_S,
+		       eq->eqe_size == HNS_ROCE_V3_EQE_SIZE ? 1 : 0);
+
 	return 0;
 }
 
@@ -5816,7 +5833,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
 			eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
 			eq->type_flag = HNS_ROCE_CEQ;
 			eq->entries = hr_dev->caps.ceqe_depth;
-			eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
+			eq->eqe_size = hr_dev->caps.ceqe_size;
 			eq->irq = hr_dev->irq[i + other_num + aeq_num];
 			eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
 			eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
@@ -5825,7 +5842,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
 			eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
 			eq->type_flag = HNS_ROCE_AEQ;
 			eq->entries = hr_dev->caps.aeqe_depth;
-			eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
+			eq->eqe_size = hr_dev->caps.aeqe_size;
 			eq->irq = hr_dev->irq[i - comp_num + other_num];
 			eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
 			eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index ac29be4..f98c55a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -1777,8 +1777,8 @@ struct hns_roce_eq_context {
 	__le32	byte_28;
 	__le32	byte_32;
 	__le32	byte_36;
-	__le32	nxt_eqe_ba0;
-	__le32	nxt_eqe_ba1;
+	__le32	byte_40;
+	__le32	byte_44;
 	__le32	rsv[5];
 };
 
@@ -1920,6 +1920,9 @@ struct hns_roce_eq_context {
 #define HNS_ROCE_EQC_NXT_EQE_BA_H_S 0
 #define HNS_ROCE_EQC_NXT_EQE_BA_H_M GENMASK(19, 0)
 
+#define HNS_ROCE_EQC_EQE_SIZE_S 20
+#define HNS_ROCE_EQC_EQE_SIZE_M GENMASK(21, 20)
+
 #define HNS_ROCE_V2_CEQE_COMP_CQN_S 0
 #define HNS_ROCE_V2_CEQE_COMP_CQN_M GENMASK(23, 0)
 
-- 
2.8.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v3 for-next 2/4] RDMA/hns: Add support for CQE in size of 64 Bytes
  2020-09-03 13:16 [PATCH v3 for-next 0/4] RDMA/hns: Extend some capabilities for HIP09 Weihang Li
  2020-09-03 13:16 ` [PATCH v3 for-next 1/4] RDMA/hns: Add support for EQE in size of 64 Bytes Weihang Li
@ 2020-09-03 13:16 ` Weihang Li
  2020-09-15 20:08   ` Jason Gunthorpe
  2020-09-03 13:16 ` [PATCH v3 for-next 3/4] RDMA/hns: Add support for QPC in size of 512 Bytes Weihang Li
  2020-09-03 13:16 ` [PATCH v3 for-next 4/4] RDMA/hns: Add support for SCCC in size of 64 Bytes Weihang Li
  3 siblings, 1 reply; 7+ messages in thread
From: Weihang Li @ 2020-09-03 13:16 UTC (permalink / raw)
  To: dledford, jgg; +Cc: leon, linux-rdma, linuxarm

From: Wenpeng Liang <liangwenpeng@huawei.com>

The new version of RoCEE supports using CQE in size of 32B or 64B. The
performance of bus can be improved by using larger size of CQE.

Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
---
 drivers/infiniband/hw/hns/hns_roce_cq.c     | 19 ++++++++++++++++++-
 drivers/infiniband/hw/hns/hns_roce_device.h |  6 +++++-
 drivers/infiniband/hw/hns/hns_roce_hw_v1.c  |  5 ++---
 drivers/infiniband/hw/hns/hns_roce_hw_v1.h  |  2 +-
 drivers/infiniband/hw/hns/hns_roce_hw_v2.c  | 20 +++++++++++++-------
 drivers/infiniband/hw/hns/hns_roce_hw_v2.h  |  7 +++++--
 drivers/infiniband/hw/hns/hns_roce_main.c   |  2 ++
 include/uapi/rdma/hns-abi.h                 |  4 +++-
 8 files changed, 49 insertions(+), 16 deletions(-)

diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index e87d616..9a2f745 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -150,7 +150,7 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
 	int err;
 
 	buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
-	buf_attr.region[0].size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
+	buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
 	buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
 	buf_attr.region_count = 1;
 	buf_attr.fixed_page = true;
@@ -224,6 +224,21 @@ static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
 	}
 }
 
+static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
+			 struct hns_roce_ib_create_cq *ucmd)
+{
+	struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+
+	if (udata) {
+		if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size))
+			hr_cq->cqe_size = ucmd->cqe_size;
+		else
+			hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
+	} else {
+		hr_cq->cqe_size = hr_dev->caps.cqe_sz;
+	}
+}
+
 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
 		       struct ib_udata *udata)
 {
@@ -266,6 +281,8 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
 		}
 	}
 
+	set_cqe_size(hr_cq, udata, &ucmd);
+
 	ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
 	if (ret) {
 		ibdev_err(ibdev, "Failed to alloc CQ buf, err %d\n", ret);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index cbf3478..2e4f6b1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -81,6 +81,9 @@
 
 #define HNS_ROCE_V3_EQE_SIZE 0x40
 
+#define HNS_ROCE_V2_CQE_SIZE 32
+#define HNS_ROCE_V3_CQE_SIZE 64
+
 #define HNS_ROCE_SL_SHIFT			28
 #define HNS_ROCE_TCLASS_SHIFT			20
 #define HNS_ROCE_FLOW_LABEL_MASK		0xfffff
@@ -469,6 +472,7 @@ struct hns_roce_cq {
 	void __iomem			*cq_db_l;
 	u16				*tptr_addr;
 	int				arm_sn;
+	int				cqe_size;
 	unsigned long			cqn;
 	u32				vector;
 	atomic_t			refcount;
@@ -796,7 +800,7 @@ struct hns_roce_caps {
 	int		num_pds;
 	int		reserved_pds;
 	u32		mtt_entry_sz;
-	u32		cq_entry_sz;
+	u32		cqe_sz;
 	u32		page_size_cap;
 	u32		reserved_lkey;
 	int		mtpt_entry_sz;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 83c07c2..f2fcea0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -1476,7 +1476,7 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
 	caps->cqc_entry_sz	= HNS_ROCE_V1_CQC_ENTRY_SIZE;
 	caps->mtpt_entry_sz	= HNS_ROCE_V1_MTPT_ENTRY_SIZE;
 	caps->mtt_entry_sz	= HNS_ROCE_V1_MTT_ENTRY_SIZE;
-	caps->cq_entry_sz	= HNS_ROCE_V1_CQE_ENTRY_SIZE;
+	caps->cqe_sz		= HNS_ROCE_V1_CQE_SIZE;
 	caps->page_size_cap	= HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
 	caps->reserved_lkey	= 0;
 	caps->reserved_pds	= 0;
@@ -1897,8 +1897,7 @@ static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
 
 static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
 {
-	return hns_roce_buf_offset(hr_cq->mtr.kmem,
-				   n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
+	return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE);
 }
 
 static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 52307b2..5996892 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -74,7 +74,7 @@
 #define HNS_ROCE_V1_MTPT_ENTRY_SIZE			64
 #define HNS_ROCE_V1_MTT_ENTRY_SIZE			64
 
-#define HNS_ROCE_V1_CQE_ENTRY_SIZE			32
+#define HNS_ROCE_V1_CQE_SIZE				32
 #define HNS_ROCE_V1_PAGE_SIZE_SUPPORT			0xFFFFF000
 
 #define HNS_ROCE_V1_TABLE_CHUNK_SIZE			(1 << 17)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 71eee67..8f7e85d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1690,7 +1690,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
 	caps->mtpt_entry_sz	= HNS_ROCE_V2_MTPT_ENTRY_SZ;
 	caps->mtt_entry_sz	= HNS_ROCE_V2_MTT_ENTRY_SZ;
 	caps->idx_entry_sz	= HNS_ROCE_V2_IDX_ENTRY_SZ;
-	caps->cq_entry_sz	= HNS_ROCE_V2_CQE_ENTRY_SIZE;
+	caps->cqe_sz		= HNS_ROCE_V2_CQE_SIZE;
 	caps->page_size_cap	= HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
 	caps->reserved_lkey	= 0;
 	caps->reserved_pds	= 0;
@@ -1770,6 +1770,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
 	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
 		caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
 		caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
+		caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
 	}
 }
 
@@ -1862,7 +1863,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
 	caps->max_sq_desc_sz	     = resp_a->max_sq_desc_sz;
 	caps->max_rq_desc_sz	     = resp_a->max_rq_desc_sz;
 	caps->max_srq_desc_sz	     = resp_a->max_srq_desc_sz;
-	caps->cq_entry_sz	     = resp_a->cq_entry_sz;
+	caps->cqe_sz		     = HNS_ROCE_V2_CQE_SIZE;
 
 	caps->mtpt_entry_sz	     = resp_b->mtpt_entry_sz;
 	caps->irrl_entry_sz	     = resp_b->irrl_entry_sz;
@@ -1993,6 +1994,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
 	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
 		caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
 		caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
+		caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
 	}
 
 	calc_pg_sz(caps->num_qps, caps->qpc_entry_sz, caps->qpc_hop_num,
@@ -2771,8 +2773,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
 
 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
 {
-	return hns_roce_buf_offset(hr_cq->mtr.kmem,
-				   n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
+	return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
 }
 
 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
@@ -2872,6 +2873,10 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
 	roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
 		       V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
 
+	roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQE_SIZE_M,
+		       V2_CQC_BYTE_8_CQE_SIZE_S, hr_cq->cqe_size ==
+		       HNS_ROCE_V3_CQE_SIZE ? 1 : 0);
+
 	cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
 
 	roce_set_field(cq_context->byte_16_hop_addr,
@@ -3039,7 +3044,8 @@ static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
 }
 
 static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
-			   struct hns_roce_v2_cqe *cqe, struct ib_wc *wc)
+			   struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
+			   struct ib_wc *wc)
 {
 	static const struct {
 		u32 cqe_status;
@@ -3080,7 +3086,7 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
 
 	ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
-		       sizeof(*cqe), false);
+		       cq->cqe_size, false);
 
 	/*
 	 * For hns ROCEE, GENERAL_ERR is an error type that is not defined in
@@ -3177,7 +3183,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
 		++wq->tail;
 	}
 
-	get_cqe_status(hr_dev, *cur_qp, cqe, wc);
+	get_cqe_status(hr_dev, *cur_qp, hr_cq, cqe, wc);
 	if (unlikely(wc->status != IB_WC_SUCCESS))
 		return 0;
 
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index f98c55a..ca6b055 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -86,7 +86,6 @@
 #define HNS_ROCE_V2_MTPT_ENTRY_SZ		64
 #define HNS_ROCE_V2_MTT_ENTRY_SZ		64
 #define HNS_ROCE_V2_IDX_ENTRY_SZ		4
-#define HNS_ROCE_V2_CQE_ENTRY_SIZE		32
 #define HNS_ROCE_V2_SCCC_ENTRY_SZ		32
 #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ		PAGE_SIZE
 #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ		PAGE_SIZE
@@ -309,6 +308,9 @@ struct hns_roce_v2_cq_context {
 #define	V2_CQC_BYTE_8_CQN_S 0
 #define V2_CQC_BYTE_8_CQN_M GENMASK(23, 0)
 
+#define V2_CQC_BYTE_8_CQE_SIZE_S 27
+#define V2_CQC_BYTE_8_CQE_SIZE_M GENMASK(28, 27)
+
 #define	V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S 0
 #define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M GENMASK(19, 0)
 
@@ -896,6 +898,7 @@ struct hns_roce_v2_cqe {
 	u8	smac[4];
 	__le32	byte_28;
 	__le32	byte_32;
+	__le32	rsv[8];
 };
 
 #define	V2_CQE_BYTE_4_OPCODE_S 0
@@ -1571,7 +1574,7 @@ struct hns_roce_query_pf_caps_a {
 	u8 max_sq_desc_sz;
 	u8 max_rq_desc_sz;
 	u8 max_srq_desc_sz;
-	u8 cq_entry_sz;
+	u8 cqe_sz;
 };
 
 struct hns_roce_query_pf_caps_b {
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 5907cfd..73bdec7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -323,6 +323,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
 		mutex_init(&context->page_mutex);
 	}
 
+	resp.cqe_size = hr_dev->caps.cqe_sz;
+
 	ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
 	if (ret)
 		goto error_fail_copy_to_udata;
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index eb76b38..9ec85f7 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -39,6 +39,8 @@
 struct hns_roce_ib_create_cq {
 	__aligned_u64 buf_addr;
 	__aligned_u64 db_addr;
+	__u32 cqe_size;
+	__u32 reserved;
 };
 
 struct hns_roce_ib_create_cq_resp {
@@ -73,7 +75,7 @@ struct hns_roce_ib_create_qp_resp {
 
 struct hns_roce_ib_alloc_ucontext_resp {
 	__u32	qp_tab_size;
-	__u32	reserved;
+	__u32	cqe_size;
 };
 
 struct hns_roce_ib_alloc_pd_resp {
-- 
2.8.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v3 for-next 3/4] RDMA/hns: Add support for QPC in size of 512 Bytes
  2020-09-03 13:16 [PATCH v3 for-next 0/4] RDMA/hns: Extend some capabilities for HIP09 Weihang Li
  2020-09-03 13:16 ` [PATCH v3 for-next 1/4] RDMA/hns: Add support for EQE in size of 64 Bytes Weihang Li
  2020-09-03 13:16 ` [PATCH v3 for-next 2/4] RDMA/hns: Add support for CQE " Weihang Li
@ 2020-09-03 13:16 ` Weihang Li
  2020-09-03 13:16 ` [PATCH v3 for-next 4/4] RDMA/hns: Add support for SCCC in size of 64 Bytes Weihang Li
  3 siblings, 0 replies; 7+ messages in thread
From: Weihang Li @ 2020-09-03 13:16 UTC (permalink / raw)
  To: dledford, jgg; +Cc: leon, linux-rdma, linuxarm

From: Wenpeng Liang <liangwenpeng@huawei.com>

The new version of RoCEE supports using QPC in size of 256B or 512B, so
that HIP09 can supports new congestion control algorithms by using QPC in
larger size.

Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
---
 drivers/infiniband/hw/hns/hns_roce_device.h |  5 ++-
 drivers/infiniband/hw/hns/hns_roce_hw_v1.c  |  2 +-
 drivers/infiniband/hw/hns/hns_roce_hw_v1.h  |  2 +-
 drivers/infiniband/hw/hns/hns_roce_hw_v2.c  | 66 +++++++++++++++++++++++------
 drivers/infiniband/hw/hns/hns_roce_hw_v2.h  | 16 ++++++-
 drivers/infiniband/hw/hns/hns_roce_main.c   |  2 +-
 6 files changed, 75 insertions(+), 18 deletions(-)

diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 2e4f6b1..ddf54d2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -84,6 +84,9 @@
 #define HNS_ROCE_V2_CQE_SIZE 32
 #define HNS_ROCE_V3_CQE_SIZE 64
 
+#define HNS_ROCE_V2_QPC_SZ 256
+#define HNS_ROCE_V3_QPC_SZ 512
+
 #define HNS_ROCE_SL_SHIFT			28
 #define HNS_ROCE_TCLASS_SHIFT			20
 #define HNS_ROCE_FLOW_LABEL_MASK		0xfffff
@@ -804,7 +807,7 @@ struct hns_roce_caps {
 	u32		page_size_cap;
 	u32		reserved_lkey;
 	int		mtpt_entry_sz;
-	int		qpc_entry_sz;
+	int		qpc_sz;
 	int		irrl_entry_sz;
 	int		trrl_entry_sz;
 	int		cqc_entry_sz;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index f2fcea0..a3f5346 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -1471,7 +1471,7 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
 	caps->max_qp_dest_rdma	= HNS_ROCE_V1_MAX_QP_DEST_RDMA;
 	caps->max_sq_desc_sz	= HNS_ROCE_V1_MAX_SQ_DESC_SZ;
 	caps->max_rq_desc_sz	= HNS_ROCE_V1_MAX_RQ_DESC_SZ;
-	caps->qpc_entry_sz	= HNS_ROCE_V1_QPC_ENTRY_SIZE;
+	caps->qpc_sz		= HNS_ROCE_V1_QPC_SIZE;
 	caps->irrl_entry_sz	= HNS_ROCE_V1_IRRL_ENTRY_SIZE;
 	caps->cqc_entry_sz	= HNS_ROCE_V1_CQC_ENTRY_SIZE;
 	caps->mtpt_entry_sz	= HNS_ROCE_V1_MTPT_ENTRY_SIZE;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 5996892..ffd0156 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -68,7 +68,7 @@
 #define HNS_ROCE_V1_COMP_EQE_NUM			0x8000
 #define HNS_ROCE_V1_ASYNC_EQE_NUM			0x400
 
-#define HNS_ROCE_V1_QPC_ENTRY_SIZE			256
+#define HNS_ROCE_V1_QPC_SIZE				256
 #define HNS_ROCE_V1_IRRL_ENTRY_SIZE			8
 #define HNS_ROCE_V1_CQC_ENTRY_SIZE			64
 #define HNS_ROCE_V1_MTPT_ENTRY_SIZE			64
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 8f7e85d..2e2cfb4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1682,7 +1682,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
 	caps->max_sq_desc_sz	= HNS_ROCE_V2_MAX_SQ_DESC_SZ;
 	caps->max_rq_desc_sz	= HNS_ROCE_V2_MAX_RQ_DESC_SZ;
 	caps->max_srq_desc_sz	= HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
-	caps->qpc_entry_sz	= HNS_ROCE_V2_QPC_ENTRY_SZ;
+	caps->qpc_sz		= HNS_ROCE_V2_QPC_SZ;
 	caps->irrl_entry_sz	= HNS_ROCE_V2_IRRL_ENTRY_SZ;
 	caps->trrl_entry_sz	= HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
 	caps->cqc_entry_sz	= HNS_ROCE_V2_CQC_ENTRY_SZ;
@@ -1771,6 +1771,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
 		caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
 		caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
 		caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
+		caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
 	}
 }
 
@@ -1873,7 +1874,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
 	caps->idx_entry_sz	     = resp_b->idx_entry_sz;
 	caps->sccc_entry_sz	     = resp_b->scc_ctx_entry_sz;
 	caps->max_mtu		     = resp_b->max_mtu;
-	caps->qpc_entry_sz	     = le16_to_cpu(resp_b->qpc_entry_sz);
+	caps->qpc_sz		     = HNS_ROCE_V2_QPC_SZ;
 	caps->min_cqes		     = resp_b->min_cqes;
 	caps->min_wqes		     = resp_b->min_wqes;
 	caps->page_size_cap	     = le32_to_cpu(resp_b->page_size_cap);
@@ -1995,9 +1996,10 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
 		caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
 		caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
 		caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
+		caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
 	}
 
-	calc_pg_sz(caps->num_qps, caps->qpc_entry_sz, caps->qpc_hop_num,
+	calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
 		   caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
 		   HEM_TYPE_QPC);
 	calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
@@ -2034,6 +2036,35 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
 	return 0;
 }
 
+static int hns_roce_config_qpc_size(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_cmq_desc desc;
+	struct hns_roce_cfg_entry_size *cfg_size =
+				  (struct hns_roce_cfg_entry_size *)desc.data;
+
+	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
+				      false);
+
+	cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_QPC_SIZE);
+	cfg_size->size = cpu_to_le32(hr_dev->caps.qpc_sz);
+
+	return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
+{
+	int ret;
+
+	if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
+		return 0;
+
+	ret = hns_roce_config_qpc_size(hr_dev);
+	if (ret)
+		dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
+
+	return ret;
+}
+
 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
 {
 	struct hns_roce_caps *caps = &hr_dev->caps;
@@ -2106,9 +2137,14 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
 	}
 
 	ret = hns_roce_v2_set_bt(hr_dev);
-	if (ret)
-		dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
-			ret);
+	if (ret) {
+		dev_err(hr_dev->dev,
+			"Configure bt attribute fail, ret = %d.\n", ret);
+		return ret;
+	}
+
+	/* Configure the size of QPC, SCCC, etc. */
+	ret = hns_roce_config_entry_size(hr_dev);
 
 	return ret;
 }
@@ -3534,16 +3570,21 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
 
 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
 				 struct hns_roce_v2_qp_context *context,
+				 struct hns_roce_v2_qp_context *qpc_mask,
 				 struct hns_roce_qp *hr_qp)
 {
 	struct hns_roce_cmd_mailbox *mailbox;
+	int qpc_size;
 	int ret;
 
 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 
-	memcpy(mailbox->buf, context, sizeof(*context) * 2);
+	/* The qpc size of HIP08 is only 256B, which is half of HIP09 */
+	qpc_size = hr_dev->caps.qpc_sz;
+	memcpy(mailbox->buf, context, qpc_size);
+	memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size);
 
 	ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
 				HNS_ROCE_CMD_MODIFY_QPC,
@@ -4338,7 +4379,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
 	}
 
 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
-		memset(qpc_mask, 0, sizeof(*qpc_mask));
+		memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
 		modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
 					qpc_mask);
 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
@@ -4561,8 +4602,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
 	 * we should set all bits of the relevant fields in context mask to
 	 * 0 at the same time, else set them to 0x1.
 	 */
-	memset(context, 0, sizeof(*context));
-	memset(qpc_mask, 0xff, sizeof(*qpc_mask));
+	memset(context, 0, hr_dev->caps.qpc_sz);
+	memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
+
 	ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
 					 new_state, context, qpc_mask);
 	if (ret)
@@ -4612,7 +4654,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
 		       V2_QPC_BYTE_60_QP_ST_S, 0);
 
 	/* SW pass context to HW */
-	ret = hns_roce_v2_qp_modify(hr_dev, ctx, hr_qp);
+	ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
 	if (ret) {
 		ibdev_err(ibdev, "failed to modify QP, ret = %d\n", ret);
 		goto out;
@@ -4675,7 +4717,7 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
 	if (ret)
 		goto out;
 
-	memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
+	memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz);
 
 out:
 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index ca6b055..32c5ddc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -77,7 +77,6 @@
 #define HNS_ROCE_V2_MAX_SQ_DESC_SZ		64
 #define HNS_ROCE_V2_MAX_RQ_DESC_SZ		16
 #define HNS_ROCE_V2_MAX_SRQ_DESC_SZ		64
-#define HNS_ROCE_V2_QPC_ENTRY_SZ		256
 #define HNS_ROCE_V2_IRRL_ENTRY_SZ		64
 #define HNS_ROCE_V2_TRRL_ENTRY_SZ		48
 #define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ	100
@@ -228,6 +227,7 @@ enum hns_roce_opcode_type {
 	HNS_ROCE_OPC_CFG_TMOUT_LLM			= 0x8404,
 	HNS_ROCE_OPC_QUERY_PF_TIMER_RES			= 0x8406,
 	HNS_ROCE_OPC_QUERY_PF_CAPS_NUM                  = 0x8408,
+	HNS_ROCE_OPC_CFG_ENTRY_SIZE			= 0x8409,
 	HNS_ROCE_OPC_CFG_SGID_TB			= 0x8500,
 	HNS_ROCE_OPC_CFG_SMAC_TB			= 0x8501,
 	HNS_ROCE_OPC_POST_MB				= 0x8504,
@@ -514,6 +514,7 @@ struct hns_roce_v2_qp_context {
 	__le32	byte_248_ack_psn;
 	__le32	byte_252_err_txcqn;
 	__le32	byte_256_sqflush_rqcqe;
+	__le32	ext[64];
 };
 
 #define	V2_QPC_BYTE_4_TST_S 0
@@ -1540,6 +1541,17 @@ struct hns_roce_cfg_sgid_tb {
 	__le32	vf_sgid_h;
 	__le32	vf_sgid_type_rsv;
 };
+
+enum {
+	HNS_ROCE_CFG_QPC_SIZE = BIT(0),
+};
+
+struct hns_roce_cfg_entry_size {
+	__le32	type;
+	__le32	rsv[4];
+	__le32	size;
+};
+
 #define CFG_SGID_TB_TABLE_IDX_S 0
 #define CFG_SGID_TB_TABLE_IDX_M GENMASK(7, 0)
 
@@ -1586,7 +1598,7 @@ struct hns_roce_query_pf_caps_b {
 	u8 idx_entry_sz;
 	u8 scc_ctx_entry_sz;
 	u8 max_mtu;
-	__le16 qpc_entry_sz;
+	__le16 qpc_sz;
 	__le16 qpc_timer_entry_sz;
 	__le16 cqc_timer_entry_sz;
 	u8 min_cqes;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 73bdec7..a0551c9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -589,7 +589,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
 	}
 
 	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
-				      HEM_TYPE_QPC, hr_dev->caps.qpc_entry_sz,
+				      HEM_TYPE_QPC, hr_dev->caps.qpc_sz,
 				      hr_dev->caps.num_qps, 1);
 	if (ret) {
 		dev_err(dev, "Failed to init QP context memory, aborting.\n");
-- 
2.8.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v3 for-next 4/4] RDMA/hns: Add support for SCCC in size of 64 Bytes
  2020-09-03 13:16 [PATCH v3 for-next 0/4] RDMA/hns: Extend some capabilities for HIP09 Weihang Li
                   ` (2 preceding siblings ...)
  2020-09-03 13:16 ` [PATCH v3 for-next 3/4] RDMA/hns: Add support for QPC in size of 512 Bytes Weihang Li
@ 2020-09-03 13:16 ` Weihang Li
  3 siblings, 0 replies; 7+ messages in thread
From: Weihang Li @ 2020-09-03 13:16 UTC (permalink / raw)
  To: dledford, jgg; +Cc: leon, linux-rdma, linuxarm

From: Yangyang Li <liyangyang20@huawei.com>

For HIP09, size of SCCC (Soft Congestion Control Context) is increased to
64 Bytes from 32 Bytes. The hardware will get the configuration of SCCC
from driver instead of using a fixed value.

Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
---
 drivers/infiniband/hw/hns/hns_roce_device.h |  2 +-
 drivers/infiniband/hw/hns/hns_roce_hem.c    |  2 +-
 drivers/infiniband/hw/hns/hns_roce_hw_v2.c  | 30 +++++++++++++++++++++++++----
 drivers/infiniband/hw/hns/hns_roce_hw_v2.h  |  8 ++++++--
 drivers/infiniband/hw/hns/hns_roce_main.c   |  6 +++---
 drivers/infiniband/hw/hns/hns_roce_qp.c     |  2 +-
 6 files changed, 38 insertions(+), 12 deletions(-)

diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index ddf54d2..7bee2de 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -811,7 +811,7 @@ struct hns_roce_caps {
 	int		irrl_entry_sz;
 	int		trrl_entry_sz;
 	int		cqc_entry_sz;
-	int		sccc_entry_sz;
+	int		sccc_sz;
 	int		qpc_timer_entry_sz;
 	int		cqc_timer_entry_sz;
 	int		srqc_entry_sz;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index c8db6f8..c10966f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -1027,7 +1027,7 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
 	if (hr_dev->caps.cqc_timer_entry_sz)
 		hns_roce_cleanup_hem_table(hr_dev,
 					   &hr_dev->cqc_timer_table);
-	if (hr_dev->caps.sccc_entry_sz)
+	if (hr_dev->caps.sccc_sz)
 		hns_roce_cleanup_hem_table(hr_dev,
 					   &hr_dev->qp_table.sccc_table);
 	if (hr_dev->caps.trrl_entry_sz)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 2e2cfb4..82bf495 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1762,7 +1762,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
 	caps->cqc_timer_buf_pg_sz = 0;
 	caps->cqc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
 
-	caps->sccc_entry_sz	  = HNS_ROCE_V2_SCCC_ENTRY_SZ;
+	caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
 	caps->sccc_ba_pg_sz	  = 0;
 	caps->sccc_buf_pg_sz	  = 0;
 	caps->sccc_hop_num	  = HNS_ROCE_SCCC_HOP_NUM;
@@ -1872,7 +1872,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
 	caps->cqc_entry_sz	     = resp_b->cqc_entry_sz;
 	caps->srqc_entry_sz	     = resp_b->srqc_entry_sz;
 	caps->idx_entry_sz	     = resp_b->idx_entry_sz;
-	caps->sccc_entry_sz	     = resp_b->scc_ctx_entry_sz;
+	caps->sccc_sz		     = resp_b->sccc_sz;
 	caps->max_mtu		     = resp_b->max_mtu;
 	caps->qpc_sz		     = HNS_ROCE_V2_QPC_SZ;
 	caps->min_cqes		     = resp_b->min_cqes;
@@ -1997,6 +1997,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
 		caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
 		caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
 		caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
+		caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
 	}
 
 	calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
@@ -2016,7 +2017,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
 	caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
 	caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
 
-	calc_pg_sz(caps->num_qps, caps->sccc_entry_sz,
+	calc_pg_sz(caps->num_qps, caps->sccc_sz,
 		   caps->sccc_hop_num, caps->sccc_bt_num,
 		   &caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz,
 		   HEM_TYPE_SCCC);
@@ -2051,6 +2052,21 @@ static int hns_roce_config_qpc_size(struct hns_roce_dev *hr_dev)
 	return hns_roce_cmq_send(hr_dev, &desc, 1);
 }
 
+static int hns_roce_config_sccc_size(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_cmq_desc desc;
+	struct hns_roce_cfg_entry_size *cfg_size =
+				  (struct hns_roce_cfg_entry_size *)desc.data;
+
+	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
+				      false);
+
+	cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_SCCC_SIZE);
+	cfg_size->size = cpu_to_le32(hr_dev->caps.sccc_sz);
+
+	return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
 static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
 {
 	int ret;
@@ -2059,8 +2075,14 @@ static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
 		return 0;
 
 	ret = hns_roce_config_qpc_size(hr_dev);
-	if (ret)
+	if (ret) {
 		dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
+		return ret;
+	}
+
+	ret = hns_roce_config_sccc_size(hr_dev);
+	if (ret)
+		dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
 
 	return ret;
 }
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 32c5ddc..a964d04 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -85,7 +85,10 @@
 #define HNS_ROCE_V2_MTPT_ENTRY_SZ		64
 #define HNS_ROCE_V2_MTT_ENTRY_SZ		64
 #define HNS_ROCE_V2_IDX_ENTRY_SZ		4
-#define HNS_ROCE_V2_SCCC_ENTRY_SZ		32
+
+#define HNS_ROCE_V2_SCCC_SZ			32
+#define HNS_ROCE_V3_SCCC_SZ			64
+
 #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ		PAGE_SIZE
 #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ		PAGE_SIZE
 #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED		0xFFFFF000
@@ -1544,6 +1547,7 @@ struct hns_roce_cfg_sgid_tb {
 
 enum {
 	HNS_ROCE_CFG_QPC_SIZE = BIT(0),
+	HNS_ROCE_CFG_SCCC_SIZE = BIT(1),
 };
 
 struct hns_roce_cfg_entry_size {
@@ -1596,7 +1600,7 @@ struct hns_roce_query_pf_caps_b {
 	u8 cqc_entry_sz;
 	u8 srqc_entry_sz;
 	u8 idx_entry_sz;
-	u8 scc_ctx_entry_sz;
+	u8 sccc_sz;
 	u8 max_mtu;
 	__le16 qpc_sz;
 	__le16 qpc_timer_entry_sz;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index a0551c9..ad0ce11c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -640,11 +640,11 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
 		}
 	}
 
-	if (hr_dev->caps.sccc_entry_sz) {
+	if (hr_dev->caps.sccc_sz) {
 		ret = hns_roce_init_hem_table(hr_dev,
 					      &hr_dev->qp_table.sccc_table,
 					      HEM_TYPE_SCCC,
-					      hr_dev->caps.sccc_entry_sz,
+					      hr_dev->caps.sccc_sz,
 					      hr_dev->caps.num_qps, 1);
 		if (ret) {
 			dev_err(dev,
@@ -684,7 +684,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
 		hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
 
 err_unmap_ctx:
-	if (hr_dev->caps.sccc_entry_sz)
+	if (hr_dev->caps.sccc_sz)
 		hns_roce_cleanup_hem_table(hr_dev,
 					   &hr_dev->qp_table.sccc_table);
 err_unmap_srq:
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 975281f..a343930 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -288,7 +288,7 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 		}
 	}
 
-	if (hr_dev->caps.sccc_entry_sz) {
+	if (hr_dev->caps.sccc_sz) {
 		/* Alloc memory for SCC CTX */
 		ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
 					 hr_qp->qpn);
-- 
2.8.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v3 for-next 2/4] RDMA/hns: Add support for CQE in size of 64 Bytes
  2020-09-03 13:16 ` [PATCH v3 for-next 2/4] RDMA/hns: Add support for CQE " Weihang Li
@ 2020-09-15 20:08   ` Jason Gunthorpe
  2020-09-16  3:02     ` liweihang
  0 siblings, 1 reply; 7+ messages in thread
From: Jason Gunthorpe @ 2020-09-15 20:08 UTC (permalink / raw)
  To: Weihang Li; +Cc: dledford, leon, linux-rdma, linuxarm

On Thu, Sep 03, 2020 at 09:16:05PM +0800, Weihang Li wrote:
> From: Wenpeng Liang <liangwenpeng@huawei.com>
> 
> The new version of RoCEE supports using CQE in size of 32B or 64B. The
> performance of bus can be improved by using larger size of CQE.
> 
> Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
> Signed-off-by: Weihang Li <liweihang@huawei.com>
>  drivers/infiniband/hw/hns/hns_roce_cq.c     | 19 ++++++++++++++++++-
>  drivers/infiniband/hw/hns/hns_roce_device.h |  6 +++++-
>  drivers/infiniband/hw/hns/hns_roce_hw_v1.c  |  5 ++---
>  drivers/infiniband/hw/hns/hns_roce_hw_v1.h  |  2 +-
>  drivers/infiniband/hw/hns/hns_roce_hw_v2.c  | 20 +++++++++++++-------
>  drivers/infiniband/hw/hns/hns_roce_hw_v2.h  |  7 +++++--
>  drivers/infiniband/hw/hns/hns_roce_main.c   |  2 ++
>  include/uapi/rdma/hns-abi.h                 |  4 +++-
>  8 files changed, 49 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
> index e87d616..9a2f745 100644
> +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
> @@ -150,7 +150,7 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
>  	int err;
>  
>  	buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
> -	buf_attr.region[0].size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
> +	buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
>  	buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
>  	buf_attr.region_count = 1;
>  	buf_attr.fixed_page = true;
> @@ -224,6 +224,21 @@ static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
>  	}
>  }
>  
> +static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
> +			 struct hns_roce_ib_create_cq *ucmd)
> +{
> +	struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
> +
> +	if (udata) {
> +		if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size))
> +			hr_cq->cqe_size = ucmd->cqe_size;
> +		else
> +			hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
> +	} else {
> +		hr_cq->cqe_size = hr_dev->caps.cqe_sz;
> +	}
> +}
> +
>  int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
>  		       struct ib_udata *udata)
>  {
> @@ -266,6 +281,8 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
>  		}
>  	}
>  
> +	set_cqe_size(hr_cq, udata, &ucmd);
> +
>  	ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
>  	if (ret) {
>  		ibdev_err(ibdev, "Failed to alloc CQ buf, err %d\n", ret);
> diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
> index cbf3478..2e4f6b1 100644
> +++ b/drivers/infiniband/hw/hns/hns_roce_device.h
> @@ -81,6 +81,9 @@
>  
>  #define HNS_ROCE_V3_EQE_SIZE 0x40
>  
> +#define HNS_ROCE_V2_CQE_SIZE 32
> +#define HNS_ROCE_V3_CQE_SIZE 64
> +
>  #define HNS_ROCE_SL_SHIFT			28
>  #define HNS_ROCE_TCLASS_SHIFT			20
>  #define HNS_ROCE_FLOW_LABEL_MASK		0xfffff
> @@ -469,6 +472,7 @@ struct hns_roce_cq {
>  	void __iomem			*cq_db_l;
>  	u16				*tptr_addr;
>  	int				arm_sn;
> +	int				cqe_size;
>  	unsigned long			cqn;
>  	u32				vector;
>  	atomic_t			refcount;
> @@ -796,7 +800,7 @@ struct hns_roce_caps {
>  	int		num_pds;
>  	int		reserved_pds;
>  	u32		mtt_entry_sz;
> -	u32		cq_entry_sz;
> +	u32		cqe_sz;
>  	u32		page_size_cap;
>  	u32		reserved_lkey;
>  	int		mtpt_entry_sz;
> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
> index 83c07c2..f2fcea0 100644
> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
> @@ -1476,7 +1476,7 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
>  	caps->cqc_entry_sz	= HNS_ROCE_V1_CQC_ENTRY_SIZE;
>  	caps->mtpt_entry_sz	= HNS_ROCE_V1_MTPT_ENTRY_SIZE;
>  	caps->mtt_entry_sz	= HNS_ROCE_V1_MTT_ENTRY_SIZE;
> -	caps->cq_entry_sz	= HNS_ROCE_V1_CQE_ENTRY_SIZE;
> +	caps->cqe_sz		= HNS_ROCE_V1_CQE_SIZE;
>  	caps->page_size_cap	= HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
>  	caps->reserved_lkey	= 0;
>  	caps->reserved_pds	= 0;
> @@ -1897,8 +1897,7 @@ static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
>  
>  static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
>  {
> -	return hns_roce_buf_offset(hr_cq->mtr.kmem,
> -				   n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
> +	return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE);
>  }
>  
>  static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
> index 52307b2..5996892 100644
> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
> @@ -74,7 +74,7 @@
>  #define HNS_ROCE_V1_MTPT_ENTRY_SIZE			64
>  #define HNS_ROCE_V1_MTT_ENTRY_SIZE			64
>  
> -#define HNS_ROCE_V1_CQE_ENTRY_SIZE			32
> +#define HNS_ROCE_V1_CQE_SIZE				32
>  #define HNS_ROCE_V1_PAGE_SIZE_SUPPORT			0xFFFFF000
>  
>  #define HNS_ROCE_V1_TABLE_CHUNK_SIZE			(1 << 17)
> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> index 71eee67..8f7e85d 100644
> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> @@ -1690,7 +1690,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
>  	caps->mtpt_entry_sz	= HNS_ROCE_V2_MTPT_ENTRY_SZ;
>  	caps->mtt_entry_sz	= HNS_ROCE_V2_MTT_ENTRY_SZ;
>  	caps->idx_entry_sz	= HNS_ROCE_V2_IDX_ENTRY_SZ;
> -	caps->cq_entry_sz	= HNS_ROCE_V2_CQE_ENTRY_SIZE;
> +	caps->cqe_sz		= HNS_ROCE_V2_CQE_SIZE;
>  	caps->page_size_cap	= HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
>  	caps->reserved_lkey	= 0;
>  	caps->reserved_pds	= 0;
> @@ -1770,6 +1770,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
>  	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
>  		caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
>  		caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
> +		caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
>  	}
>  }
>  
> @@ -1862,7 +1863,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
>  	caps->max_sq_desc_sz	     = resp_a->max_sq_desc_sz;
>  	caps->max_rq_desc_sz	     = resp_a->max_rq_desc_sz;
>  	caps->max_srq_desc_sz	     = resp_a->max_srq_desc_sz;
> -	caps->cq_entry_sz	     = resp_a->cq_entry_sz;
> +	caps->cqe_sz		     = HNS_ROCE_V2_CQE_SIZE;
>  
>  	caps->mtpt_entry_sz	     = resp_b->mtpt_entry_sz;
>  	caps->irrl_entry_sz	     = resp_b->irrl_entry_sz;
> @@ -1993,6 +1994,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
>  	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
>  		caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
>  		caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
> +		caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
>  	}
>  
>  	calc_pg_sz(caps->num_qps, caps->qpc_entry_sz, caps->qpc_hop_num,
> @@ -2771,8 +2773,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
>  
>  static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
>  {
> -	return hns_roce_buf_offset(hr_cq->mtr.kmem,
> -				   n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
> +	return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
>  }
>  
>  static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
> @@ -2872,6 +2873,10 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
>  	roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
>  		       V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
>  
> +	roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQE_SIZE_M,
> +		       V2_CQC_BYTE_8_CQE_SIZE_S, hr_cq->cqe_size ==
> +		       HNS_ROCE_V3_CQE_SIZE ? 1 : 0);
> +
>  	cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
>  
>  	roce_set_field(cq_context->byte_16_hop_addr,
> @@ -3039,7 +3044,8 @@ static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
>  }
>  
>  static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
> -			   struct hns_roce_v2_cqe *cqe, struct ib_wc *wc)
> +			   struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
> +			   struct ib_wc *wc)
>  {
>  	static const struct {
>  		u32 cqe_status;
> @@ -3080,7 +3086,7 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
>  
>  	ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
>  	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
> -		       sizeof(*cqe), false);
> +		       cq->cqe_size, false);
>  
>  	/*
>  	 * For hns ROCEE, GENERAL_ERR is an error type that is not defined in
> @@ -3177,7 +3183,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
>  		++wq->tail;
>  	}
>  
> -	get_cqe_status(hr_dev, *cur_qp, cqe, wc);
> +	get_cqe_status(hr_dev, *cur_qp, hr_cq, cqe, wc);
>  	if (unlikely(wc->status != IB_WC_SUCCESS))
>  		return 0;
>  
> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
> index f98c55a..ca6b055 100644
> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
> @@ -86,7 +86,6 @@
>  #define HNS_ROCE_V2_MTPT_ENTRY_SZ		64
>  #define HNS_ROCE_V2_MTT_ENTRY_SZ		64
>  #define HNS_ROCE_V2_IDX_ENTRY_SZ		4
> -#define HNS_ROCE_V2_CQE_ENTRY_SIZE		32
>  #define HNS_ROCE_V2_SCCC_ENTRY_SZ		32
>  #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ		PAGE_SIZE
>  #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ		PAGE_SIZE
> @@ -309,6 +308,9 @@ struct hns_roce_v2_cq_context {
>  #define	V2_CQC_BYTE_8_CQN_S 0
>  #define V2_CQC_BYTE_8_CQN_M GENMASK(23, 0)
>  
> +#define V2_CQC_BYTE_8_CQE_SIZE_S 27
> +#define V2_CQC_BYTE_8_CQE_SIZE_M GENMASK(28, 27)
> +
>  #define	V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S 0
>  #define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M GENMASK(19, 0)
>  
> @@ -896,6 +898,7 @@ struct hns_roce_v2_cqe {
>  	u8	smac[4];
>  	__le32	byte_28;
>  	__le32	byte_32;
> +	__le32	rsv[8];
>  };
>  
>  #define	V2_CQE_BYTE_4_OPCODE_S 0
> @@ -1571,7 +1574,7 @@ struct hns_roce_query_pf_caps_a {
>  	u8 max_sq_desc_sz;
>  	u8 max_rq_desc_sz;
>  	u8 max_srq_desc_sz;
> -	u8 cq_entry_sz;
> +	u8 cqe_sz;
>  };
>  
>  struct hns_roce_query_pf_caps_b {
> diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
> index 5907cfd..73bdec7 100644
> +++ b/drivers/infiniband/hw/hns/hns_roce_main.c
> @@ -323,6 +323,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
>  		mutex_init(&context->page_mutex);
>  	}
>  
> +	resp.cqe_size = hr_dev->caps.cqe_sz;
> +
>  	ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
>  	if (ret)
>  		goto error_fail_copy_to_udata;
> diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
> index eb76b38..9ec85f7 100644
> +++ b/include/uapi/rdma/hns-abi.h
> @@ -39,6 +39,8 @@
>  struct hns_roce_ib_create_cq {
>  	__aligned_u64 buf_addr;
>  	__aligned_u64 db_addr;
> +	__u32 cqe_size;
> +	__u32 reserved;
>  };

This struct was made bigger, but the copy has to change to allow the
user to supply the smaller struct:

int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
		       struct ib_udata *udata)
{
	struct hns_roce_ib_create_cq ucmd = {};

		ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));

Copies past the end of the buffer

Jason

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3 for-next 2/4] RDMA/hns: Add support for CQE in size of 64 Bytes
  2020-09-15 20:08   ` Jason Gunthorpe
@ 2020-09-16  3:02     ` liweihang
  0 siblings, 0 replies; 7+ messages in thread
From: liweihang @ 2020-09-16  3:02 UTC (permalink / raw)
  To: Jason Gunthorpe; +Cc: dledford, leon, linux-rdma, Linuxarm

On 2020/9/16 4:08, Jason Gunthorpe wrote:
> On Thu, Sep 03, 2020 at 09:16:05PM +0800, Weihang Li wrote:
>> From: Wenpeng Liang <liangwenpeng@huawei.com>
>>
>> The new version of RoCEE supports using CQE in size of 32B or 64B. The
>> performance of bus can be improved by using larger size of CQE.
>>
>> Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
>> Signed-off-by: Weihang Li <liweihang@huawei.com>
>>  drivers/infiniband/hw/hns/hns_roce_cq.c     | 19 ++++++++++++++++++-
>>  drivers/infiniband/hw/hns/hns_roce_device.h |  6 +++++-
>>  drivers/infiniband/hw/hns/hns_roce_hw_v1.c  |  5 ++---
>>  drivers/infiniband/hw/hns/hns_roce_hw_v1.h  |  2 +-
>>  drivers/infiniband/hw/hns/hns_roce_hw_v2.c  | 20 +++++++++++++-------
>>  drivers/infiniband/hw/hns/hns_roce_hw_v2.h  |  7 +++++--
>>  drivers/infiniband/hw/hns/hns_roce_main.c   |  2 ++
>>  include/uapi/rdma/hns-abi.h                 |  4 +++-
>>  8 files changed, 49 insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
>> index e87d616..9a2f745 100644
>> +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
>> @@ -150,7 +150,7 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
>>  	int err;
>>  
>>  	buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
>> -	buf_attr.region[0].size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
>> +	buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
>>  	buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
>>  	buf_attr.region_count = 1;
>>  	buf_attr.fixed_page = true;
>> @@ -224,6 +224,21 @@ static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
>>  	}
>>  }
>>  
>> +static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
>> +			 struct hns_roce_ib_create_cq *ucmd)
>> +{
>> +	struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
>> +
>> +	if (udata) {
>> +		if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size))
>> +			hr_cq->cqe_size = ucmd->cqe_size;
>> +		else
>> +			hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
>> +	} else {
>> +		hr_cq->cqe_size = hr_dev->caps.cqe_sz;
>> +	}
>> +}
>> +
>>  int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
>>  		       struct ib_udata *udata)
>>  {
>> @@ -266,6 +281,8 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
>>  		}
>>  	}
>>  
>> +	set_cqe_size(hr_cq, udata, &ucmd);
>> +
>>  	ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
>>  	if (ret) {
>>  		ibdev_err(ibdev, "Failed to alloc CQ buf, err %d\n", ret);
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
>> index cbf3478..2e4f6b1 100644
>> +++ b/drivers/infiniband/hw/hns/hns_roce_device.h
>> @@ -81,6 +81,9 @@
>>  
>>  #define HNS_ROCE_V3_EQE_SIZE 0x40
>>  
>> +#define HNS_ROCE_V2_CQE_SIZE 32
>> +#define HNS_ROCE_V3_CQE_SIZE 64
>> +
>>  #define HNS_ROCE_SL_SHIFT			28
>>  #define HNS_ROCE_TCLASS_SHIFT			20
>>  #define HNS_ROCE_FLOW_LABEL_MASK		0xfffff
>> @@ -469,6 +472,7 @@ struct hns_roce_cq {
>>  	void __iomem			*cq_db_l;
>>  	u16				*tptr_addr;
>>  	int				arm_sn;
>> +	int				cqe_size;
>>  	unsigned long			cqn;
>>  	u32				vector;
>>  	atomic_t			refcount;
>> @@ -796,7 +800,7 @@ struct hns_roce_caps {
>>  	int		num_pds;
>>  	int		reserved_pds;
>>  	u32		mtt_entry_sz;
>> -	u32		cq_entry_sz;
>> +	u32		cqe_sz;
>>  	u32		page_size_cap;
>>  	u32		reserved_lkey;
>>  	int		mtpt_entry_sz;
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
>> index 83c07c2..f2fcea0 100644
>> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
>> @@ -1476,7 +1476,7 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
>>  	caps->cqc_entry_sz	= HNS_ROCE_V1_CQC_ENTRY_SIZE;
>>  	caps->mtpt_entry_sz	= HNS_ROCE_V1_MTPT_ENTRY_SIZE;
>>  	caps->mtt_entry_sz	= HNS_ROCE_V1_MTT_ENTRY_SIZE;
>> -	caps->cq_entry_sz	= HNS_ROCE_V1_CQE_ENTRY_SIZE;
>> +	caps->cqe_sz		= HNS_ROCE_V1_CQE_SIZE;
>>  	caps->page_size_cap	= HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
>>  	caps->reserved_lkey	= 0;
>>  	caps->reserved_pds	= 0;
>> @@ -1897,8 +1897,7 @@ static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
>>  
>>  static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
>>  {
>> -	return hns_roce_buf_offset(hr_cq->mtr.kmem,
>> -				   n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
>> +	return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE);
>>  }
>>  
>>  static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
>> index 52307b2..5996892 100644
>> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
>> @@ -74,7 +74,7 @@
>>  #define HNS_ROCE_V1_MTPT_ENTRY_SIZE			64
>>  #define HNS_ROCE_V1_MTT_ENTRY_SIZE			64
>>  
>> -#define HNS_ROCE_V1_CQE_ENTRY_SIZE			32
>> +#define HNS_ROCE_V1_CQE_SIZE				32
>>  #define HNS_ROCE_V1_PAGE_SIZE_SUPPORT			0xFFFFF000
>>  
>>  #define HNS_ROCE_V1_TABLE_CHUNK_SIZE			(1 << 17)
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> index 71eee67..8f7e85d 100644
>> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
>> @@ -1690,7 +1690,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
>>  	caps->mtpt_entry_sz	= HNS_ROCE_V2_MTPT_ENTRY_SZ;
>>  	caps->mtt_entry_sz	= HNS_ROCE_V2_MTT_ENTRY_SZ;
>>  	caps->idx_entry_sz	= HNS_ROCE_V2_IDX_ENTRY_SZ;
>> -	caps->cq_entry_sz	= HNS_ROCE_V2_CQE_ENTRY_SIZE;
>> +	caps->cqe_sz		= HNS_ROCE_V2_CQE_SIZE;
>>  	caps->page_size_cap	= HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
>>  	caps->reserved_lkey	= 0;
>>  	caps->reserved_pds	= 0;
>> @@ -1770,6 +1770,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
>>  	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
>>  		caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
>>  		caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
>> +		caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
>>  	}
>>  }
>>  
>> @@ -1862,7 +1863,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
>>  	caps->max_sq_desc_sz	     = resp_a->max_sq_desc_sz;
>>  	caps->max_rq_desc_sz	     = resp_a->max_rq_desc_sz;
>>  	caps->max_srq_desc_sz	     = resp_a->max_srq_desc_sz;
>> -	caps->cq_entry_sz	     = resp_a->cq_entry_sz;
>> +	caps->cqe_sz		     = HNS_ROCE_V2_CQE_SIZE;
>>  
>>  	caps->mtpt_entry_sz	     = resp_b->mtpt_entry_sz;
>>  	caps->irrl_entry_sz	     = resp_b->irrl_entry_sz;
>> @@ -1993,6 +1994,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
>>  	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
>>  		caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
>>  		caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
>> +		caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
>>  	}
>>  
>>  	calc_pg_sz(caps->num_qps, caps->qpc_entry_sz, caps->qpc_hop_num,
>> @@ -2771,8 +2773,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
>>  
>>  static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
>>  {
>> -	return hns_roce_buf_offset(hr_cq->mtr.kmem,
>> -				   n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
>> +	return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
>>  }
>>  
>>  static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
>> @@ -2872,6 +2873,10 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
>>  	roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
>>  		       V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
>>  
>> +	roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQE_SIZE_M,
>> +		       V2_CQC_BYTE_8_CQE_SIZE_S, hr_cq->cqe_size ==
>> +		       HNS_ROCE_V3_CQE_SIZE ? 1 : 0);
>> +
>>  	cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
>>  
>>  	roce_set_field(cq_context->byte_16_hop_addr,
>> @@ -3039,7 +3044,8 @@ static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
>>  }
>>  
>>  static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
>> -			   struct hns_roce_v2_cqe *cqe, struct ib_wc *wc)
>> +			   struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
>> +			   struct ib_wc *wc)
>>  {
>>  	static const struct {
>>  		u32 cqe_status;
>> @@ -3080,7 +3086,7 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
>>  
>>  	ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
>>  	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
>> -		       sizeof(*cqe), false);
>> +		       cq->cqe_size, false);
>>  
>>  	/*
>>  	 * For hns ROCEE, GENERAL_ERR is an error type that is not defined in
>> @@ -3177,7 +3183,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
>>  		++wq->tail;
>>  	}
>>  
>> -	get_cqe_status(hr_dev, *cur_qp, cqe, wc);
>> +	get_cqe_status(hr_dev, *cur_qp, hr_cq, cqe, wc);
>>  	if (unlikely(wc->status != IB_WC_SUCCESS))
>>  		return 0;
>>  
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
>> index f98c55a..ca6b055 100644
>> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
>> @@ -86,7 +86,6 @@
>>  #define HNS_ROCE_V2_MTPT_ENTRY_SZ		64
>>  #define HNS_ROCE_V2_MTT_ENTRY_SZ		64
>>  #define HNS_ROCE_V2_IDX_ENTRY_SZ		4
>> -#define HNS_ROCE_V2_CQE_ENTRY_SIZE		32
>>  #define HNS_ROCE_V2_SCCC_ENTRY_SZ		32
>>  #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ		PAGE_SIZE
>>  #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ		PAGE_SIZE
>> @@ -309,6 +308,9 @@ struct hns_roce_v2_cq_context {
>>  #define	V2_CQC_BYTE_8_CQN_S 0
>>  #define V2_CQC_BYTE_8_CQN_M GENMASK(23, 0)
>>  
>> +#define V2_CQC_BYTE_8_CQE_SIZE_S 27
>> +#define V2_CQC_BYTE_8_CQE_SIZE_M GENMASK(28, 27)
>> +
>>  #define	V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S 0
>>  #define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M GENMASK(19, 0)
>>  
>> @@ -896,6 +898,7 @@ struct hns_roce_v2_cqe {
>>  	u8	smac[4];
>>  	__le32	byte_28;
>>  	__le32	byte_32;
>> +	__le32	rsv[8];
>>  };
>>  
>>  #define	V2_CQE_BYTE_4_OPCODE_S 0
>> @@ -1571,7 +1574,7 @@ struct hns_roce_query_pf_caps_a {
>>  	u8 max_sq_desc_sz;
>>  	u8 max_rq_desc_sz;
>>  	u8 max_srq_desc_sz;
>> -	u8 cq_entry_sz;
>> +	u8 cqe_sz;
>>  };
>>  
>>  struct hns_roce_query_pf_caps_b {
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
>> index 5907cfd..73bdec7 100644
>> +++ b/drivers/infiniband/hw/hns/hns_roce_main.c
>> @@ -323,6 +323,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
>>  		mutex_init(&context->page_mutex);
>>  	}
>>  
>> +	resp.cqe_size = hr_dev->caps.cqe_sz;
>> +
>>  	ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
>>  	if (ret)
>>  		goto error_fail_copy_to_udata;
>> diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
>> index eb76b38..9ec85f7 100644
>> +++ b/include/uapi/rdma/hns-abi.h
>> @@ -39,6 +39,8 @@
>>  struct hns_roce_ib_create_cq {
>>  	__aligned_u64 buf_addr;
>>  	__aligned_u64 db_addr;
>> +	__u32 cqe_size;
>> +	__u32 reserved;
>>  };
> 
> This struct was made bigger, but the copy has to change to allow the
> user to supply the smaller struct:
> 
> int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
> 		       struct ib_udata *udata)
> {
> 	struct hns_roce_ib_create_cq ucmd = {};
> 
> 		ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
> 
> Copies past the end of the buffer
> 
> Jason
> 

Thanks for your reminder, I will fix it.

Weihang

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-09-16  3:02 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-03 13:16 [PATCH v3 for-next 0/4] RDMA/hns: Extend some capabilities for HIP09 Weihang Li
2020-09-03 13:16 ` [PATCH v3 for-next 1/4] RDMA/hns: Add support for EQE in size of 64 Bytes Weihang Li
2020-09-03 13:16 ` [PATCH v3 for-next 2/4] RDMA/hns: Add support for CQE " Weihang Li
2020-09-15 20:08   ` Jason Gunthorpe
2020-09-16  3:02     ` liweihang
2020-09-03 13:16 ` [PATCH v3 for-next 3/4] RDMA/hns: Add support for QPC in size of 512 Bytes Weihang Li
2020-09-03 13:16 ` [PATCH v3 for-next 4/4] RDMA/hns: Add support for SCCC in size of 64 Bytes Weihang Li

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.