From: Wenpeng Liang <liangwenpeng@huawei.com>
To: <dledford@redhat.com>, <jgg@nvidia.com>
Cc: <linux-rdma@vger.kernel.org>, <linuxarm@huawei.com>,
<leon@kernel.org>, <liangwenpeng@huawei.com>,
Xi Wang <wangxi11@huawei.com>
Subject: [PATCH v4 for-next 08/12] RDMA/hns: Add method to query WQE buffer's address
Date: Thu, 29 Jul 2021 10:19:19 +0800 [thread overview]
Message-ID: <1627525163-1683-9-git-send-email-liangwenpeng@huawei.com> (raw)
In-Reply-To: <1627525163-1683-1-git-send-email-liangwenpeng@huawei.com>
From: Xi Wang <wangxi11@huawei.com>
If a uQP works in DCA mode, the userspace driver need to get the buffer's
address in DCA memory pool by calling the 'HNS_IB_METHOD_DCA_MEM_QUERY'
method after the QP was attached by calling the
'HNS_IB_METHOD_DCA_MEM_ATTACH' method.
This method will return the DCA mem object's key and the offset to let the
userspace driver get the WQE's virtual address in DCA memory pool.
Signed-off-by: Xi Wang <wangxi11@huawei.com>
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
---
drivers/infiniband/hw/hns/hns_roce_dca.c | 110 ++++++++++++++++++++++++++++++-
drivers/infiniband/hw/hns/hns_roce_dca.h | 6 ++
include/uapi/rdma/hns-abi.h | 10 +++
3 files changed, 124 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_dca.c b/drivers/infiniband/hw/hns/hns_roce_dca.c
index dd1d6c3..7d59744 100644
--- a/drivers/infiniband/hw/hns/hns_roce_dca.c
+++ b/drivers/infiniband/hw/hns/hns_roce_dca.c
@@ -74,7 +74,11 @@ static inline void unlock_dca_page_to_active(struct hns_dca_page_state *state,
((HNS_DCA_OWN_MASK & (id)) == (HNS_DCA_OWN_MASK & (s)->buf_id))
#define dca_page_is_allocated(s, id) \
- (dca_page_is_attached(s, id) && (s)->lock)
+ (dca_page_is_attached(s, id) && (s)->lock)
+
+/* all buf id bits must be matched */
+#define dca_page_is_active(s, id) ((HNS_DCA_ID_MASK & (id)) == \
+ (s)->buf_id && !(s)->lock && (s)->active)
#define dca_page_is_inactive(s) (!(s)->lock && !(s)->active)
@@ -870,6 +874,64 @@ static int attach_dca_mem(struct hns_roce_dev *hr_dev,
return 0;
}
+struct dca_page_query_active_attr {
+ u32 buf_id;
+ u32 curr_index;
+ u32 start_index;
+ u32 page_index;
+ u32 page_count;
+ u64 mem_key;
+};
+
+static int query_dca_active_pages_proc(struct dca_mem *mem, int index,
+ void *param)
+{
+ struct hns_dca_page_state *state = &mem->states[index];
+ struct dca_page_query_active_attr *attr = param;
+
+ if (!dca_page_is_active(state, attr->buf_id))
+ return 0;
+
+ if (attr->curr_index < attr->start_index) {
+ attr->curr_index++;
+ return 0;
+ } else if (attr->curr_index > attr->start_index) {
+ return DCA_MEM_STOP_ITERATE;
+ }
+
+ /* Search first page in DCA mem */
+ attr->page_index = index;
+ attr->mem_key = mem->key;
+ /* Search active pages in continuous addresses */
+ while (index < mem->page_count) {
+ state = &mem->states[index];
+ if (!dca_page_is_active(state, attr->buf_id))
+ break;
+
+ index++;
+ attr->page_count++;
+ }
+
+ return DCA_MEM_STOP_ITERATE;
+}
+
+static int query_dca_mem(struct hns_roce_qp *hr_qp, u32 page_index,
+ struct hns_dca_query_resp *resp)
+{
+ struct hns_roce_dca_ctx *ctx = hr_qp_to_dca_ctx(hr_qp);
+ struct dca_page_query_active_attr attr = {};
+
+ attr.buf_id = hr_qp->dca_cfg.buf_id;
+ attr.start_index = page_index;
+ travel_dca_pages(ctx, &attr, query_dca_active_pages_proc);
+
+ resp->mem_key = attr.mem_key;
+ resp->mem_ofs = attr.page_index << HNS_HW_PAGE_SHIFT;
+ resp->page_count = attr.page_count;
+
+ return attr.page_count ? 0 : -ENOMEM;
+}
+
struct dca_page_free_buf_attr {
u32 buf_id;
u32 max_pages;
@@ -1202,13 +1264,57 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_PTR_IN(HNS_IB_ATTR_DCA_MEM_DETACH_SQ_INDEX,
UVERBS_ATTR_TYPE(u32), UA_MANDATORY));
+static int UVERBS_HANDLER(HNS_IB_METHOD_DCA_MEM_QUERY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct hns_roce_qp *hr_qp = uverbs_attr_to_hr_qp(attrs);
+ struct hns_dca_query_resp resp = {};
+ u32 page_idx;
+ int ret;
+
+ if (!hr_qp)
+ return -EINVAL;
+
+ if (uverbs_copy_from(&page_idx, attrs,
+ HNS_IB_ATTR_DCA_MEM_QUERY_PAGE_INDEX))
+ return -EFAULT;
+
+ ret = query_dca_mem(hr_qp, page_idx, &resp);
+ if (ret)
+ return ret;
+
+ if (uverbs_copy_to(attrs, HNS_IB_ATTR_DCA_MEM_QUERY_OUT_KEY,
+ &resp.mem_key, sizeof(resp.mem_key)) ||
+ uverbs_copy_to(attrs, HNS_IB_ATTR_DCA_MEM_QUERY_OUT_OFFSET,
+ &resp.mem_ofs, sizeof(resp.mem_ofs)) ||
+ uverbs_copy_to(attrs, HNS_IB_ATTR_DCA_MEM_QUERY_OUT_PAGE_COUNT,
+ &resp.page_count, sizeof(resp.page_count)))
+ return -EFAULT;
+
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ HNS_IB_METHOD_DCA_MEM_QUERY,
+ UVERBS_ATTR_IDR(HNS_IB_ATTR_DCA_MEM_QUERY_HANDLE, UVERBS_OBJECT_QP,
+ UVERBS_ACCESS_READ, UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(HNS_IB_ATTR_DCA_MEM_QUERY_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u32), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(HNS_IB_ATTR_DCA_MEM_QUERY_OUT_KEY,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(HNS_IB_ATTR_DCA_MEM_QUERY_OUT_OFFSET,
+ UVERBS_ATTR_TYPE(u32), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(HNS_IB_ATTR_DCA_MEM_QUERY_OUT_PAGE_COUNT,
+ UVERBS_ATTR_TYPE(u32), UA_MANDATORY));
+
DECLARE_UVERBS_NAMED_OBJECT(HNS_IB_OBJECT_DCA_MEM,
UVERBS_TYPE_ALLOC_IDR(dca_cleanup),
&UVERBS_METHOD(HNS_IB_METHOD_DCA_MEM_REG),
&UVERBS_METHOD(HNS_IB_METHOD_DCA_MEM_DEREG),
&UVERBS_METHOD(HNS_IB_METHOD_DCA_MEM_SHRINK),
&UVERBS_METHOD(HNS_IB_METHOD_DCA_MEM_ATTACH),
- &UVERBS_METHOD(HNS_IB_METHOD_DCA_MEM_DETACH));
+ &UVERBS_METHOD(HNS_IB_METHOD_DCA_MEM_DETACH),
+ &UVERBS_METHOD(HNS_IB_METHOD_DCA_MEM_QUERY));
static bool dca_is_supported(struct ib_device *device)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_dca.h b/drivers/infiniband/hw/hns/hns_roce_dca.h
index 4493854..cb7bd6a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_dca.h
+++ b/drivers/infiniband/hw/hns/hns_roce_dca.h
@@ -50,6 +50,12 @@ struct hns_dca_detach_attr {
u32 sq_idx;
};
+struct hns_dca_query_resp {
+ u64 mem_key;
+ u32 mem_ofs;
+ u32 page_count;
+};
+
void hns_roce_register_udca(struct hns_roce_dev *hr_dev,
struct hns_roce_ucontext *uctx);
void hns_roce_unregister_udca(struct hns_roce_dev *hr_dev,
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index 97ab795..7f5d2d5 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -114,6 +114,7 @@ enum hns_ib_dca_mem_methods {
HNS_IB_METHOD_DCA_MEM_SHRINK,
HNS_IB_METHOD_DCA_MEM_ATTACH,
HNS_IB_METHOD_DCA_MEM_DETACH,
+ HNS_IB_METHOD_DCA_MEM_QUERY,
};
enum hns_ib_dca_mem_reg_attrs {
@@ -149,4 +150,13 @@ enum hns_ib_dca_mem_detach_attrs {
HNS_IB_ATTR_DCA_MEM_DETACH_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
HNS_IB_ATTR_DCA_MEM_DETACH_SQ_INDEX,
};
+
+enum hns_ib_dca_mem_query_attrs {
+ HNS_IB_ATTR_DCA_MEM_QUERY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ HNS_IB_ATTR_DCA_MEM_QUERY_PAGE_INDEX,
+ HNS_IB_ATTR_DCA_MEM_QUERY_OUT_KEY,
+ HNS_IB_ATTR_DCA_MEM_QUERY_OUT_OFFSET,
+ HNS_IB_ATTR_DCA_MEM_QUERY_OUT_PAGE_COUNT,
+};
+
#endif /* HNS_ABI_USER_H */
--
2.8.1
next prev parent reply other threads:[~2021-07-29 2:23 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-29 2:19 [PATCH v4 for-next 00/12] RDMA/hns: Add support for Dynamic Context Attachment Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 01/12] RDMA/hns: Introduce DCA for RC QP Wenpeng Liang
2021-08-19 23:54 ` Jason Gunthorpe
2021-08-21 9:37 ` Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 02/12] RDMA/hns: Add method for shrinking DCA memory pool Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 03/12] RDMA/hns: Configure DCA mode for the userspace QP Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 04/12] RDMA/hns: Refactor QP modify flow Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 05/12] RDMA/hns: Add method for attaching WQE buffer Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 06/12] RDMA/hns: Setup the configuration of WQE addressing to QPC Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 07/12] RDMA/hns: Add method to detach WQE buffer Wenpeng Liang
2021-07-29 2:19 ` Wenpeng Liang [this message]
2021-08-19 23:58 ` [PATCH v4 for-next 08/12] RDMA/hns: Add method to query WQE buffer's address Jason Gunthorpe
2021-08-21 9:39 ` Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 09/12] RDMA/hns: Add a shared memory to sync DCA status Wenpeng Liang
2021-08-19 23:43 ` Jason Gunthorpe
2021-08-21 9:42 ` Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 10/12] RDMA/hns: Sync DCA status by the shared memory Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 11/12] RDMA/nldev: Add detailed CTX information support Wenpeng Liang
2021-07-29 2:19 ` [PATCH v4 for-next 12/12] RDMA/hns: Dump detailed driver-specific UCTX Wenpeng Liang
2021-08-12 12:42 ` [PATCH v4 for-next 00/12] RDMA/hns: Add support for Dynamic Context Attachment Wenpeng Liang
2021-08-21 9:34 ` Wenpeng Liang
2021-08-22 22:25 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1627525163-1683-9-git-send-email-liangwenpeng@huawei.com \
--to=liangwenpeng@huawei.com \
--cc=dledford@redhat.com \
--cc=jgg@nvidia.com \
--cc=leon@kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=linuxarm@huawei.com \
--cc=wangxi11@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).