All of lore.kernel.org
 help / color / mirror / Atom feed
From: Cheng Xu <chengyou@linux.alibaba.com>
To: jgg@ziepe.ca, dledford@redhat.com
Cc: leon@kernel.org, linux-rdma@vger.kernel.org,
	KaiShen@linux.alibaba.com, chengyou@linux.alibaba.com,
	tonylu@linux.alibaba.com, BMT@zurich.ibm.com
Subject: [PATCH for-next v4 07/12] RDMA/erdma: Add verbs header file
Date: Mon, 14 Mar 2022 14:47:34 +0800	[thread overview]
Message-ID: <20220314064739.81647-8-chengyou@linux.alibaba.com> (raw)
In-Reply-To: <20220314064739.81647-1-chengyou@linux.alibaba.com>

This header file defines the main structrues and functions used for RDMA
Verbs, including qp, cq, mr ucontext, etc,.

Signed-off-by: Cheng Xu <chengyou@linux.alibaba.com>
---
 drivers/infiniband/hw/erdma/erdma_verbs.h | 344 ++++++++++++++++++++++
 1 file changed, 344 insertions(+)
 create mode 100644 drivers/infiniband/hw/erdma/erdma_verbs.h

diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
new file mode 100644
index 000000000000..3c3f4b90715f
--- /dev/null
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+
+/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
+/*          Kai Shen <kaishen@linux.alibaba.com> */
+/* Copyright (c) 2020-2022, Alibaba Group. */
+
+#ifndef __ERDMA_VERBS_H__
+#define __ERDMA_VERBS_H__
+
+#include <linux/errno.h>
+
+#include <rdma/iw_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "erdma.h"
+#include "erdma_cm.h"
+#include "erdma_hw.h"
+
+/* RDMA Capability. */
+#define ERDMA_MAX_PD (128 * 1024)
+#define ERDMA_MAX_SEND_WR 4096
+#define ERDMA_MAX_ORD 128
+#define ERDMA_MAX_IRD 128
+#define ERDMA_MAX_SGE_RD 1
+#define ERDMA_MAX_FMR 0
+#define ERDMA_MAX_SRQ 0 /* not support srq now. */
+#define ERDMA_MAX_SRQ_WR 0 /* not support srq now. */
+#define ERDMA_MAX_SRQ_SGE 0 /* not support srq now. */
+#define ERDMA_MAX_CONTEXT (128 * 1024)
+#define ERDMA_MAX_SEND_SGE 6
+#define ERDMA_MAX_RECV_SGE 1
+#define ERDMA_MAX_INLINE (sizeof(struct erdma_sge) * (ERDMA_MAX_SEND_SGE))
+#define ERDMA_MAX_FRMR_PA 512
+
+enum {
+	ERDMA_MMAP_IO_NC = 0, /* no cache */
+};
+
+struct erdma_user_mmap_entry {
+	struct rdma_user_mmap_entry rdma_entry;
+	u64 address;
+	u8 mmap_flag;
+};
+
+struct erdma_ucontext {
+	struct ib_ucontext ibucontext;
+	struct erdma_dev *dev;
+
+	u32 sdb_type;
+	u32 sdb_idx;
+	u32 sdb_page_idx;
+	u32 sdb_page_off;
+	u64 sdb;
+	u64 rdb;
+	u64 cdb;
+
+	struct rdma_user_mmap_entry *sq_db_mmap_entry;
+	struct rdma_user_mmap_entry *rq_db_mmap_entry;
+	struct rdma_user_mmap_entry *cq_db_mmap_entry;
+
+	/* doorbell records */
+	struct list_head dbrecords_page_list;
+	struct mutex dbrecords_page_mutex;
+};
+
+struct erdma_pd {
+	struct ib_pd ibpd;
+	u32 pdn;
+};
+
+/*
+ * MemoryRegion definition.
+ */
+#define ERDMA_MAX_INLINE_MTT_ENTRIES 4
+#define MTT_SIZE(x) (x << 3) /* per mtt takes 8 Bytes. */
+#define ERDMA_MR_MAX_MTT_CNT 524288
+#define ERDMA_MTT_ENTRY_SIZE 8
+
+#define ERDMA_MR_TYPE_NORMAL 0
+#define ERDMA_MR_TYPE_FRMR 1
+#define ERDMA_MR_TYPE_DMA 2
+
+#define ERDMA_MR_INLINE_MTT 0
+#define ERDMA_MR_INDIRECT_MTT 1
+
+#define ERDMA_MR_ACC_LR BIT(0)
+#define ERDMA_MR_ACC_LW BIT(1)
+#define ERDMA_MR_ACC_RR BIT(2)
+#define ERDMA_MR_ACC_RW BIT(3)
+
+struct erdma_mem {
+	struct ib_umem *umem;
+	void *mtt_buf;
+	u32 mtt_type;
+	u32 page_size;
+	u32 page_offset;
+	u32 page_cnt;
+	u32 mtt_nents;
+
+	u64 va;
+	u64 len;
+
+	u64 mtt_entry[ERDMA_MAX_INLINE_MTT_ENTRIES];
+};
+
+struct erdma_mr {
+	struct ib_mr ibmr;
+	struct erdma_mem mem;
+	u8 type;
+	u8 access;
+	u8 valid;
+};
+
+struct erdma_user_dbrecords_page {
+	struct list_head list;
+	struct ib_umem *umem;
+	u64 va;
+	int refcnt;
+};
+
+struct erdma_uqp {
+	struct erdma_mem sq_mtt;
+	struct erdma_mem rq_mtt;
+
+	dma_addr_t sq_db_info_dma_addr;
+	dma_addr_t rq_db_info_dma_addr;
+
+	struct erdma_user_dbrecords_page *user_dbr_page;
+
+	u32 rq_offset;
+};
+struct erdma_kqp {
+	u16 sq_pi;
+	u16 sq_ci;
+
+	u16 rq_pi;
+	u16 rq_ci;
+
+	u64 *swr_tbl;
+	u64 *rwr_tbl;
+
+	void *hw_sq_db;
+	void *hw_rq_db;
+
+	void *sq_buf;
+	dma_addr_t sq_buf_dma_addr;
+
+	void *rq_buf;
+	dma_addr_t rq_buf_dma_addr;
+
+	void *sq_db_info;
+	void *rq_db_info;
+
+	u8 sig_all;
+};
+
+enum erdma_qp_state {
+	ERDMA_QP_STATE_IDLE = 0,
+	ERDMA_QP_STATE_RTR = 1,
+	ERDMA_QP_STATE_RTS = 2,
+	ERDMA_QP_STATE_CLOSING = 3,
+	ERDMA_QP_STATE_TERMINATE = 4,
+	ERDMA_QP_STATE_ERROR = 5,
+	ERDMA_QP_STATE_UNDEF = 7,
+	ERDMA_QP_STATE_COUNT = 8
+};
+
+enum erdma_qp_attr_mask {
+	ERDMA_QP_ATTR_STATE = (1 << 0),
+	ERDMA_QP_ATTR_LLP_HANDLE = (1 << 2),
+	ERDMA_QP_ATTR_ORD = (1 << 3),
+	ERDMA_QP_ATTR_IRD = (1 << 4),
+	ERDMA_QP_ATTR_SQ_SIZE = (1 << 5),
+	ERDMA_QP_ATTR_RQ_SIZE = (1 << 6),
+	ERDMA_QP_ATTR_MPA = (1 << 7)
+};
+
+struct erdma_qp_attrs {
+	enum erdma_qp_state state;
+	enum erdma_cc_method cc_method;
+	u32 sq_size;
+	u32 rq_size;
+	u32 orq_size;
+	u32 irq_size;
+	u32 max_send_sge;
+	u32 max_recv_sge;
+	u32 cookie;
+#define ERDMA_QP_ACTIVE 0
+#define ERDMA_QP_PASSIVE 1
+	u8 qp_type;
+	u8 pd_len;
+};
+
+struct erdma_qp {
+	struct ib_qp ibqp;
+	struct kref ref;
+	struct completion safe_free;
+	struct erdma_dev *dev;
+	struct erdma_cep *cep;
+	struct rw_semaphore state_lock;
+
+	union {
+		struct erdma_kqp kern_qp;
+		struct erdma_uqp user_qp;
+	};
+
+	struct erdma_cq *scq;
+	struct erdma_cq *rcq;
+
+	struct erdma_qp_attrs attrs;
+	spinlock_t lock;
+};
+
+struct erdma_kcq_info {
+	struct erdma_cqe *qbuf;
+	dma_addr_t qbuf_dma_addr;
+	u32 ci;
+	u32 owner;
+	u32 cmdsn;
+
+	spinlock_t lock;
+	u8 __iomem *db;
+	u64 *db_record;
+};
+
+struct erdma_ucq_info {
+	struct erdma_mem qbuf_mtt;
+	struct erdma_user_dbrecords_page *user_dbr_page;
+	dma_addr_t db_info_dma_addr;
+};
+
+struct erdma_cq {
+	struct ib_cq ibcq;
+	u32 cqn;
+
+	u32 depth;
+	u32 assoc_eqn;
+
+	union {
+		struct erdma_kcq_info kern_cq;
+		struct erdma_ucq_info user_cq;
+	};
+};
+
+#define QP_ID(qp) ((qp)->ibqp.qp_num)
+
+static inline struct erdma_qp *find_qp_by_qpn(struct erdma_dev *dev, int id)
+{
+	return (struct erdma_qp *)xa_load(&dev->qp_xa, id);
+}
+
+static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id)
+{
+	return (struct erdma_cq *)xa_load(&dev->cq_xa, id);
+}
+
+void erdma_qp_get(struct erdma_qp *qp);
+void erdma_qp_put(struct erdma_qp *qp);
+int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
+			     enum erdma_qp_attr_mask mask);
+void erdma_qp_llp_close(struct erdma_qp *qp);
+void erdma_qp_cm_drop(struct erdma_qp *qp);
+
+static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx)
+{
+	return container_of(ibctx, struct erdma_ucontext, ibucontext);
+}
+
+static inline struct erdma_pd *to_epd(struct ib_pd *pd)
+{
+	return container_of(pd, struct erdma_pd, ibpd);
+}
+
+static inline struct erdma_mr *to_emr(struct ib_mr *ibmr)
+{
+	return container_of(ibmr, struct erdma_mr, ibmr);
+}
+
+static inline struct erdma_qp *to_eqp(struct ib_qp *qp)
+{
+	return container_of(qp, struct erdma_qp, ibqp);
+}
+
+static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq)
+{
+	return container_of(ibcq, struct erdma_cq, ibcq);
+}
+
+static inline struct erdma_user_mmap_entry *
+to_emmap(struct rdma_user_mmap_entry *ibmmap)
+{
+	return container_of(ibmmap, struct erdma_user_mmap_entry, rdma_entry);
+}
+
+static inline void *get_sq_entry(struct erdma_qp *qp, u16 idx)
+{
+	idx &= (qp->attrs.sq_size - 1);
+	return qp->kern_qp.sq_buf + (idx << SQEBB_SHIFT);
+}
+
+int erdma_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *data);
+void erdma_dealloc_ucontext(struct ib_ucontext *ctx);
+int erdma_query_device(struct ib_device *dev, struct ib_device_attr *attr,
+		       struct ib_udata *data);
+int erdma_get_port_immutable(struct ib_device *dev, u32 port,
+			     struct ib_port_immutable *ib_port_immutable);
+int erdma_create_cq(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
+		    struct ib_udata *data);
+int erdma_query_port(struct ib_device *dev, u32 port,
+		     struct ib_port_attr *attr);
+int erdma_query_gid(struct ib_device *dev, u32 port, int idx,
+		    union ib_gid *gid);
+int erdma_alloc_pd(struct ib_pd *pd, struct ib_udata *data);
+int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+		    struct ib_udata *data);
+int erdma_query_qp(struct ib_qp *qp, struct ib_qp_attr *attr, int mask,
+		   struct ib_qp_init_attr *init_attr);
+int erdma_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, int mask,
+		    struct ib_udata *data);
+int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int erdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
+struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+				u64 virt, int access, struct ib_udata *udata);
+struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int rights);
+int erdma_dereg_mr(struct ib_mr *mr, struct ib_udata *data);
+int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
+void erdma_qp_get_ref(struct ib_qp *qp);
+void erdma_qp_put_ref(struct ib_qp *qp);
+struct ib_qp *erdma_get_ibqp(struct ib_device *dev, int id);
+int erdma_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr,
+		    const struct ib_send_wr **bad_send_wr);
+int erdma_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
+		    const struct ib_recv_wr **bad_recv_wr);
+int erdma_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
+struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+				u32 max_num_sg);
+int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+		    unsigned int *sg_offset);
+void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
+
+#endif
-- 
2.27.0


  parent reply	other threads:[~2022-03-14  6:48 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-14  6:47 [PATCH for-next v4 00/12] Elastic RDMA Adapter (ERDMA) driver Cheng Xu
2022-03-14  6:47 ` [PATCH for-next v4 01/12] RDMA: Add ERDMA to rdma_driver_id definition Cheng Xu
2022-03-14  6:47 ` [PATCH for-next v4 02/12] RDMA/core: Allow calling query_port when netdev isn't attached in iWarp Cheng Xu
2022-03-14  6:47 ` [PATCH for-next v4 03/12] RDMA/erdma: Add the hardware related definitions Cheng Xu
2022-03-18 10:27   ` Wenpeng Liang
2022-03-19  7:53     ` Cheng Xu
2022-03-14  6:47 ` [PATCH for-next v4 04/12] RDMA/erdma: Add main include file Cheng Xu
2022-03-18 10:35   ` Wenpeng Liang
2022-03-19  8:11     ` Cheng Xu
2022-03-14  6:47 ` [PATCH for-next v4 05/12] RDMA/erdma: Add cmdq implementation Cheng Xu
2022-03-18 11:13   ` Wenpeng Liang
2022-03-19  8:38     ` Cheng Xu
2022-03-18 11:16   ` Wenpeng Liang
2022-03-18 18:17     ` Jason Gunthorpe
2022-03-19  1:26       ` Wenpeng Liang
2022-03-18 12:57   ` Wenpeng Liang
2022-03-19  9:18     ` Cheng Xu
2022-03-14  6:47 ` [PATCH for-next v4 06/12] RDMA/erdma: Add event queue implementation Cheng Xu
2022-03-18 11:43   ` Wenpeng Liang
2022-03-18 18:18     ` Jason Gunthorpe
2022-03-19  9:43       ` Cheng Xu
2022-03-21 22:23         ` Jason Gunthorpe
2022-03-22  3:06           ` Cheng Xu
2022-03-19  8:54     ` Cheng Xu
2022-03-14  6:47 ` Cheng Xu [this message]
2022-03-18 11:46   ` [PATCH for-next v4 07/12] RDMA/erdma: Add verbs header file Wenpeng Liang
2022-03-19  8:55     ` Cheng Xu
2022-03-14  6:47 ` [PATCH for-next v4 08/12] RDMA/erdma: Add verbs implementation Cheng Xu
2022-03-18 12:24   ` Wenpeng Liang
2022-03-19  9:06     ` Cheng Xu
2022-03-14  6:47 ` [PATCH for-next v4 09/12] RDMA/erdma: Add connection management (CM) support Cheng Xu
2022-03-18 12:38   ` Wenpeng Liang
2022-03-19  9:10     ` Cheng Xu
2022-03-14  6:47 ` [PATCH for-next v4 10/12] RDMA/erdma: Add the erdma module Cheng Xu
2022-03-18 12:46   ` Wenpeng Liang
2022-03-19  9:13     ` Cheng Xu
2022-03-14  6:47 ` [PATCH for-next v4 11/12] RDMA/erdma: Add the ABI definitions Cheng Xu
2022-03-14  6:47 ` [PATCH for-next v4 12/12] RDMA/erdma: Add driver to kernel build environment Cheng Xu
2022-03-17  3:14   ` kernel test robot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220314064739.81647-8-chengyou@linux.alibaba.com \
    --to=chengyou@linux.alibaba.com \
    --cc=BMT@zurich.ibm.com \
    --cc=KaiShen@linux.alibaba.com \
    --cc=dledford@redhat.com \
    --cc=jgg@ziepe.ca \
    --cc=leon@kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=tonylu@linux.alibaba.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.