All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
To: dledford@redhat.com, jgg@ziepe.ca
Cc: linux-rdma@vger.kernel.org, linux-kernel@vger.kernel.org,
	xavier.huwei@tom.com, lijun_nudt@163.com
Subject: [PATCH rdma-next 5/5] RDMA/hns: Fix the illegal memory operation when cross page
Date: Thu, 17 May 2018 16:02:53 +0800	[thread overview]
Message-ID: <1526544173-106587-6-git-send-email-xavier.huwei@huawei.com> (raw)
In-Reply-To: <1526544173-106587-1-git-send-email-xavier.huwei@huawei.com>

This patch fixed the potential illegal operation when using the
extend sge buffer cross page in post send operation. The bug
will cause the calltrace.

Reported-by: Jie Chen <chenjie103@huawei.com>
Reported-by: Xiping Zhang (Francis) <zhangxiping3@huawei.com>
Fixes: b1c1583("RDMA/hns: Get rid of virt_to_page and vmap calls after dma_alloc_coherent")
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 70 +++++++++++++++++++++---------
 drivers/infiniband/hw/hns/hns_roce_hw_v2.h |  1 +
 2 files changed, 51 insertions(+), 20 deletions(-)

diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index e1c44a6..5393149 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -52,6 +52,52 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
 	dseg->len  = cpu_to_le32(sg->length);
 }
 
+static void set_extend_sge(struct hns_roce_qp *qp, struct ib_send_wr *wr,
+			   unsigned int *sge_ind)
+{
+	struct hns_roce_v2_wqe_data_seg *dseg;
+	struct ib_sge *sg;
+	int num_in_wqe = 0;
+	int extend_sge_num;
+	int fi_sge_num;
+	int se_sge_num;
+	int shift;
+	int i;
+
+	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
+		num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
+	extend_sge_num = wr->num_sge - num_in_wqe;
+	sg = wr->sg_list + num_in_wqe;
+	shift = qp->hr_buf.page_shift;
+
+	/*
+	 * Check whether wr->num_sge sges are in the same page. If not, we
+	 * should calculate how many sges in the first page and the second
+	 * page.
+	 */
+	dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
+	fi_sge_num = (((((u64)dseg >> shift) + 1) << shift) - (u64)dseg) /
+		      sizeof(struct hns_roce_v2_wqe_data_seg);
+	if (extend_sge_num > fi_sge_num) {
+		se_sge_num = extend_sge_num - fi_sge_num;
+		for (i = 0; i < fi_sge_num; i++) {
+			set_data_seg_v2(dseg++, sg + i);
+			(*sge_ind)++;
+		}
+		dseg = get_send_extend_sge(qp,
+					   (*sge_ind) & (qp->sge.sge_cnt - 1));
+		for (i = 0; i < se_sge_num; i++) {
+			set_data_seg_v2(dseg++, sg + fi_sge_num + i);
+			(*sge_ind)++;
+		}
+	} else {
+		for (i = 0; i < extend_sge_num; i++) {
+			set_data_seg_v2(dseg++, sg + i);
+			(*sge_ind)++;
+		}
+	}
+}
+
 static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			     struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
 			     void *wqe, unsigned int *sge_ind,
@@ -85,7 +131,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
 		roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
 			     1);
 	} else {
-		if (wr->num_sge <= 2) {
+		if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
 			for (i = 0; i < wr->num_sge; i++) {
 				if (likely(wr->sg_list[i].length)) {
 					set_data_seg_v2(dseg, wr->sg_list + i);
@@ -98,24 +144,14 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				     V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
 				     (*sge_ind) & (qp->sge.sge_cnt - 1));
 
-			for (i = 0; i < 2; i++) {
+			for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
 				if (likely(wr->sg_list[i].length)) {
 					set_data_seg_v2(dseg, wr->sg_list + i);
 					dseg++;
 				}
 			}
 
-			dseg = get_send_extend_sge(qp,
-					    (*sge_ind) & (qp->sge.sge_cnt - 1));
-
-			for (i = 0; i < wr->num_sge - 2; i++) {
-				if (likely(wr->sg_list[i + 2].length)) {
-					set_data_seg_v2(dseg,
-							wr->sg_list + 2 + i);
-					dseg++;
-					(*sge_ind)++;
-				}
-			}
+			set_extend_sge(qp, wr, sge_ind);
 		}
 
 		roce_set_field(rc_sq_wqe->byte_16,
@@ -318,13 +354,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
 			       GID_LEN_V2);
 
-			dseg = get_send_extend_sge(qp,
-					    sge_ind & (qp->sge.sge_cnt - 1));
-			for (i = 0; i < wr->num_sge; i++) {
-				set_data_seg_v2(dseg + i, wr->sg_list + i);
-				sge_ind++;
-			}
-
+			set_extend_sge(qp, wr, &sge_ind);
 			ind++;
 		} else if (ibqp->qp_type == IB_QPT_RC) {
 			rc_sq_wqe = wqe;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index f16df1b..97f9bef 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -77,6 +77,7 @@
 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM		2
 #define HNS_ROCE_INVALID_LKEY			0x100
 #define HNS_ROCE_CMQ_TX_TIMEOUT			30000
+#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE	2
 
 #define HNS_ROCE_CONTEXT_HOP_NUM		1
 #define HNS_ROCE_MTT_HOP_NUM			1
-- 
1.9.1

WARNING: multiple messages have this Message-ID (diff)
From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
To: <dledford@redhat.com>, <jgg@ziepe.ca>
Cc: <linux-rdma@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
	<xavier.huwei@tom.com>, <lijun_nudt@163.com>
Subject: [PATCH rdma-next 5/5] RDMA/hns: Fix the illegal memory operation when cross page
Date: Thu, 17 May 2018 16:02:53 +0800	[thread overview]
Message-ID: <1526544173-106587-6-git-send-email-xavier.huwei@huawei.com> (raw)
In-Reply-To: <1526544173-106587-1-git-send-email-xavier.huwei@huawei.com>

This patch fixed the potential illegal operation when using the
extend sge buffer cross page in post send operation. The bug
will cause the calltrace.

Reported-by: Jie Chen <chenjie103@huawei.com>
Reported-by: Xiping Zhang (Francis) <zhangxiping3@huawei.com>
Fixes: b1c1583("RDMA/hns: Get rid of virt_to_page and vmap calls after dma_alloc_coherent")
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 70 +++++++++++++++++++++---------
 drivers/infiniband/hw/hns/hns_roce_hw_v2.h |  1 +
 2 files changed, 51 insertions(+), 20 deletions(-)

diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index e1c44a6..5393149 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -52,6 +52,52 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
 	dseg->len  = cpu_to_le32(sg->length);
 }
 
+static void set_extend_sge(struct hns_roce_qp *qp, struct ib_send_wr *wr,
+			   unsigned int *sge_ind)
+{
+	struct hns_roce_v2_wqe_data_seg *dseg;
+	struct ib_sge *sg;
+	int num_in_wqe = 0;
+	int extend_sge_num;
+	int fi_sge_num;
+	int se_sge_num;
+	int shift;
+	int i;
+
+	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
+		num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
+	extend_sge_num = wr->num_sge - num_in_wqe;
+	sg = wr->sg_list + num_in_wqe;
+	shift = qp->hr_buf.page_shift;
+
+	/*
+	 * Check whether wr->num_sge sges are in the same page. If not, we
+	 * should calculate how many sges in the first page and the second
+	 * page.
+	 */
+	dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
+	fi_sge_num = (((((u64)dseg >> shift) + 1) << shift) - (u64)dseg) /
+		      sizeof(struct hns_roce_v2_wqe_data_seg);
+	if (extend_sge_num > fi_sge_num) {
+		se_sge_num = extend_sge_num - fi_sge_num;
+		for (i = 0; i < fi_sge_num; i++) {
+			set_data_seg_v2(dseg++, sg + i);
+			(*sge_ind)++;
+		}
+		dseg = get_send_extend_sge(qp,
+					   (*sge_ind) & (qp->sge.sge_cnt - 1));
+		for (i = 0; i < se_sge_num; i++) {
+			set_data_seg_v2(dseg++, sg + fi_sge_num + i);
+			(*sge_ind)++;
+		}
+	} else {
+		for (i = 0; i < extend_sge_num; i++) {
+			set_data_seg_v2(dseg++, sg + i);
+			(*sge_ind)++;
+		}
+	}
+}
+
 static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			     struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
 			     void *wqe, unsigned int *sge_ind,
@@ -85,7 +131,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
 		roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
 			     1);
 	} else {
-		if (wr->num_sge <= 2) {
+		if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
 			for (i = 0; i < wr->num_sge; i++) {
 				if (likely(wr->sg_list[i].length)) {
 					set_data_seg_v2(dseg, wr->sg_list + i);
@@ -98,24 +144,14 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				     V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
 				     (*sge_ind) & (qp->sge.sge_cnt - 1));
 
-			for (i = 0; i < 2; i++) {
+			for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
 				if (likely(wr->sg_list[i].length)) {
 					set_data_seg_v2(dseg, wr->sg_list + i);
 					dseg++;
 				}
 			}
 
-			dseg = get_send_extend_sge(qp,
-					    (*sge_ind) & (qp->sge.sge_cnt - 1));
-
-			for (i = 0; i < wr->num_sge - 2; i++) {
-				if (likely(wr->sg_list[i + 2].length)) {
-					set_data_seg_v2(dseg,
-							wr->sg_list + 2 + i);
-					dseg++;
-					(*sge_ind)++;
-				}
-			}
+			set_extend_sge(qp, wr, sge_ind);
 		}
 
 		roce_set_field(rc_sq_wqe->byte_16,
@@ -318,13 +354,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
 			       GID_LEN_V2);
 
-			dseg = get_send_extend_sge(qp,
-					    sge_ind & (qp->sge.sge_cnt - 1));
-			for (i = 0; i < wr->num_sge; i++) {
-				set_data_seg_v2(dseg + i, wr->sg_list + i);
-				sge_ind++;
-			}
-
+			set_extend_sge(qp, wr, &sge_ind);
 			ind++;
 		} else if (ibqp->qp_type == IB_QPT_RC) {
 			rc_sq_wqe = wqe;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index f16df1b..97f9bef 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -77,6 +77,7 @@
 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM		2
 #define HNS_ROCE_INVALID_LKEY			0x100
 #define HNS_ROCE_CMQ_TX_TIMEOUT			30000
+#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE	2
 
 #define HNS_ROCE_CONTEXT_HOP_NUM		1
 #define HNS_ROCE_MTT_HOP_NUM			1
-- 
1.9.1

  parent reply	other threads:[~2018-05-17  8:02 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-17  8:02 [PATCH rdma-next 0/5] Misc update for hns driver Wei Hu (Xavier)
2018-05-17  8:02 ` Wei Hu (Xavier)
2018-05-17  8:02 ` [PATCH rdma-next 1/5] RDMA/hns: Implement the disassociate_ucontext API Wei Hu (Xavier)
2018-05-17  8:02   ` Wei Hu (Xavier)
2018-05-17 15:00   ` Jason Gunthorpe
2018-05-19  8:24     ` Wei Hu (Xavier)
2018-05-19  8:24       ` Wei Hu (Xavier)
2018-05-22 20:21       ` Jason Gunthorpe
2018-05-23  9:33         ` Wei Hu (Xavier)
2018-05-23  9:33           ` Wei Hu (Xavier)
2018-05-17  8:02 ` [PATCH rdma-next 2/5] RDMA/hns: Modify uar allocation algorithm to avoid bitmap exhaust Wei Hu (Xavier)
2018-05-17  8:02   ` Wei Hu (Xavier)
2018-05-23  6:05   ` Leon Romanovsky
2018-05-23  6:49     ` Wei Hu (Xavier)
2018-05-23  6:49       ` Wei Hu (Xavier)
2018-05-23  7:00       ` Leon Romanovsky
2018-05-23  7:12         ` Wei Hu (Xavier)
2018-05-23  7:12           ` Wei Hu (Xavier)
2018-05-23  7:22           ` Leon Romanovsky
2018-05-17  8:02 ` [PATCH rdma-next 3/5] RDMA/hns: Increase checking CMQ status timeout value Wei Hu (Xavier)
2018-05-17  8:02   ` Wei Hu (Xavier)
2018-05-23  5:49   ` Leon Romanovsky
2018-05-23  6:09     ` Wei Hu (Xavier)
2018-05-23  6:09       ` Wei Hu (Xavier)
2018-05-23  6:15       ` Wei Hu (Xavier)
2018-05-23  6:15         ` Wei Hu (Xavier)
2018-05-23  6:23         ` Leon Romanovsky
2018-05-17  8:02 ` [PATCH rdma-next 4/5] RDMA/hns: Add reset process for RoCE in hip08 Wei Hu (Xavier)
2018-05-17  8:02   ` Wei Hu (Xavier)
2018-05-17 15:14   ` Jason Gunthorpe
2018-05-18  3:28     ` Wei Hu (Xavier)
2018-05-18  3:28       ` Wei Hu (Xavier)
2018-05-18  4:15       ` Jason Gunthorpe
2018-05-18  7:23         ` Wei Hu (Xavier)
2018-05-18  7:23           ` Wei Hu (Xavier)
2018-05-22 20:26           ` Jason Gunthorpe
2018-05-23  2:54             ` Wei Hu (Xavier)
2018-05-23  2:54               ` Wei Hu (Xavier)
2018-05-23  3:47               ` Jason Gunthorpe
2018-05-23  9:35                 ` Wei Hu (Xavier)
2018-05-23  9:35                   ` Wei Hu (Xavier)
2018-05-23  3:49               ` Wei Hu (Xavier)
2018-05-23  3:49                 ` Wei Hu (Xavier)
2018-05-17  8:02 ` Wei Hu (Xavier) [this message]
2018-05-17  8:02   ` [PATCH rdma-next 5/5] RDMA/hns: Fix the illegal memory operation when cross page Wei Hu (Xavier)
2018-05-23  6:17   ` Leon Romanovsky
2018-05-23  6:38     ` Wei Hu (Xavier)
2018-05-23  6:38       ` Wei Hu (Xavier)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1526544173-106587-6-git-send-email-xavier.huwei@huawei.com \
    --to=xavier.huwei@huawei.com \
    --cc=dledford@redhat.com \
    --cc=jgg@ziepe.ca \
    --cc=lijun_nudt@163.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=xavier.huwei@tom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.