From: Wenpeng Liang <liangwenpeng@huawei.com>
To: <dledford@redhat.com>, <jgg@nvidia.com>
Cc: <linux-rdma@vger.kernel.org>, <linuxarm@huawei.com>,
<leon@kernel.org>, <liangwenpeng@huawei.com>,
Xi Wang <wangxi11@huawei.com>
Subject: [PATCH v4 for-next 06/12] RDMA/hns: Setup the configuration of WQE addressing to QPC
Date: Thu, 29 Jul 2021 10:19:17 +0800 [thread overview]
Message-ID: <1627525163-1683-7-git-send-email-liangwenpeng@huawei.com> (raw)
In-Reply-To: <1627525163-1683-1-git-send-email-liangwenpeng@huawei.com>
From: Xi Wang <wangxi11@huawei.com>
Add a new command to update the configuration of WQE buffer addressing to
QPC in DCA mode.
Signed-off-by: Xi Wang <wangxi11@huawei.com>
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
---
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 82 +++++++++++++++++++++++++++---
drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 1 +
2 files changed, 77 insertions(+), 6 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index b31b493..7e44128 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -2782,6 +2782,17 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
free_dip_list(hr_dev);
}
+static inline void mbox_desc_init(struct hns_roce_post_mbox *mb, u64 in_param,
+ u64 out_param, u32 in_modifier,
+ u8 op_modifier, u16 op)
+{
+ mb->in_param_l = cpu_to_le32(in_param);
+ mb->in_param_h = cpu_to_le32(in_param >> 32);
+ mb->out_param_l = cpu_to_le32(out_param);
+ mb->out_param_h = cpu_to_le32(out_param >> 32);
+ mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
+}
+
static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier, u8 op_modifier,
u16 op, u16 token, int event)
@@ -2790,17 +2801,34 @@ static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
-
- mb->in_param_l = cpu_to_le32(in_param);
- mb->in_param_h = cpu_to_le32(in_param >> 32);
- mb->out_param_l = cpu_to_le32(out_param);
- mb->out_param_h = cpu_to_le32(out_param >> 32);
- mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
+ mbox_desc_init(mb, in_param, out_param, in_modifier, op_modifier, op);
mb->token_event_en = cpu_to_le32(event << 16 | token);
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
+static int hns_roce_mbox_send(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, u32 in_modifier, u8 op_modifier,
+ u16 op)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_SYNC_MB, false);
+
+ mbox_desc_init(mb, in_param, out_param, in_modifier, op_modifier, op);
+
+ /* The hardware doesn't care about the token fields when working in
+ * sync mode.
+ */
+ mb->token_event_en = 0;
+
+ /* The cmdq send returns 0 indicates that the hardware has already
+ * finished the operation defined in this mbox.
+ */
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
u8 *complete_status)
{
@@ -5062,6 +5090,47 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
return ret;
}
+static int hns_roce_v2_set_dca_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_v2_qp_context *qpc, *msk;
+ dma_addr_t dma_handle;
+ int qpc_sz;
+ int ret;
+
+ qpc_sz = hr_dev->caps.qpc_sz;
+ WARN_ON(2 * qpc_sz > HNS_ROCE_MAILBOX_SIZE);
+ qpc = dma_pool_alloc(hr_dev->cmd.pool, GFP_NOWAIT, &dma_handle);
+ if (!qpc)
+ return -ENOMEM;
+
+ msk = (struct hns_roce_v2_qp_context *)((void *)qpc + qpc_sz);
+ memset(msk, 0xff, qpc_sz);
+
+ ret = config_qp_rq_buf(hr_dev, hr_qp, qpc, msk);
+ if (ret) {
+ ibdev_err(ibdev, "failed to config rq qpc, ret = %d.\n", ret);
+ goto done;
+ }
+
+ ret = config_qp_sq_buf(hr_dev, hr_qp, qpc, msk);
+ if (ret) {
+ ibdev_err(ibdev, "failed to config sq qpc, ret = %d.\n", ret);
+ goto done;
+ }
+
+ ret = hns_roce_mbox_send(hr_dev, dma_handle, 0, hr_qp->qpn, 0,
+ HNS_ROCE_CMD_MODIFY_QPC);
+ if (ret)
+ ibdev_err(ibdev, "failed to modify DCA buf, ret = %d.\n", ret);
+
+done:
+ dma_pool_free(hr_dev->cmd.pool, qpc, dma_handle);
+
+ return ret;
+}
+
static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
{
static const enum ib_qp_state map[] = {
@@ -6239,6 +6308,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.write_cqc = hns_roce_v2_write_cqc,
.set_hem = hns_roce_v2_set_hem,
.clear_hem = hns_roce_v2_clear_hem,
+ .set_dca_buf = hns_roce_v2_set_dca_buf,
.modify_qp = hns_roce_v2_modify_qp,
.qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
.init_eq = hns_roce_v2_init_eq_table,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index b8a09d4..3f758d6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -257,6 +257,7 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_QUERY_VF_RES = 0x850e,
HNS_ROCE_OPC_CFG_GMV_TBL = 0x850f,
HNS_ROCE_OPC_CFG_GMV_BT = 0x8510,
+ HNS_ROCE_OPC_SYNC_MB = 0x8511,
HNS_ROCE_OPC_EXT_CFG = 0x8512,
HNS_SWITCH_PARAMETER_CFG = 0x1033,
};
--
2.8.1
next prev parent reply other threads:[~2021-07-29 2:23 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-29 2:19 [PATCH v4 for-next 00/12] RDMA/hns: Add support for Dynamic Context Attachment Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 01/12] RDMA/hns: Introduce DCA for RC QP Wenpeng Liang
2021-08-19 23:54 ` Jason Gunthorpe
2021-08-21 9:37 ` Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 02/12] RDMA/hns: Add method for shrinking DCA memory pool Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 03/12] RDMA/hns: Configure DCA mode for the userspace QP Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 04/12] RDMA/hns: Refactor QP modify flow Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 05/12] RDMA/hns: Add method for attaching WQE buffer Wenpeng Liang
2021-07-29 2:19 ` Wenpeng Liang [this message]
2021-07-29 2:19 ` [PATCH v4 for-next 07/12] RDMA/hns: Add method to detach " Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 08/12] RDMA/hns: Add method to query WQE buffer's address Wenpeng Liang
2021-08-19 23:58 ` Jason Gunthorpe
2021-08-21 9:39 ` Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 09/12] RDMA/hns: Add a shared memory to sync DCA status Wenpeng Liang
2021-08-19 23:43 ` Jason Gunthorpe
2021-08-21 9:42 ` Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 10/12] RDMA/hns: Sync DCA status by the shared memory Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 11/12] RDMA/nldev: Add detailed CTX information support Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 12/12] RDMA/hns: Dump detailed driver-specific UCTX Wenpeng Liang
2021-08-12 12:42 ` [PATCH v4 for-next 00/12] RDMA/hns: Add support for Dynamic Context Attachment Wenpeng Liang
2021-08-21 9:34 ` Wenpeng Liang
2021-08-22 22:25 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1627525163-1683-7-git-send-email-liangwenpeng@huawei.com \
--to=liangwenpeng@huawei.com \
--cc=dledford@redhat.com \
--cc=jgg@nvidia.com \
--cc=leon@kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=linuxarm@huawei.com \
--cc=wangxi11@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).