All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] providers/rxe: Implement the xrc transport protocol
@ 2022-09-17  3:15 Bob Pearson
  2022-09-17  3:15 ` [PATCH 1/2] Update kernel headers Bob Pearson
  2022-09-17  3:15 ` [PATCH 2/2] providers/rxe: Implement the xrc transport protocol Bob Pearson
  0 siblings, 2 replies; 9+ messages in thread
From: Bob Pearson @ 2022-09-17  3:15 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, linux-rdma; +Cc: Bob Pearson

Make changes to implement the xrc transport protocol including
- Implement ibv_create_srq_ex
- Implement ibv_open/close_xrcd
- Implement ibv_get_srq_num
- Implement xrc support for qp_ex

The patch "providers/rxe: Remove redundant num_sge fields" is
a pre-requisite for this patch.

Bob Pearson (2):
  Update kernel headers
  providers/rxe: Implement the xrc transport protocol

 kernel-headers/rdma/ib_user_ioctl_verbs.h |   2 +
 kernel-headers/rdma/ib_user_verbs.h       |  16 +
 kernel-headers/rdma/rdma_user_rxe.h       |  14 +-
 providers/rxe/rxe-abi.h                   |   2 +
 providers/rxe/rxe.c                       | 387 +++++++++++++++-------
 providers/rxe/rxe.h                       |  21 +-
 6 files changed, 318 insertions(+), 124 deletions(-)


base-commit: 41c28b03d2b7cfc982eedd2e7491b01df984f5d7
-- 
2.34.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/2] Update kernel headers
  2022-09-17  3:15 [PATCH 0/2] providers/rxe: Implement the xrc transport protocol Bob Pearson
@ 2022-09-17  3:15 ` Bob Pearson
  2022-09-17  3:15 ` [PATCH 2/2] providers/rxe: Implement the xrc transport protocol Bob Pearson
  1 sibling, 0 replies; 9+ messages in thread
From: Bob Pearson @ 2022-09-17  3:15 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, linux-rdma; +Cc: Bob Pearson

To commit ?? ("RDMA/rxe: Extend rxe_resp.c to support xrc qps").

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 kernel-headers/rdma/rdma_user_rxe.h       | 14 ++++++++++----
 1 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/kernel-headers/rdma/rdma_user_rxe.h b/kernel-headers/rdma/rdma_user_rxe.h
index 73f679df..c44db5fa 100644
--- a/kernel-headers/rdma/rdma_user_rxe.h
+++ b/kernel-headers/rdma/rdma_user_rxe.h
@@ -74,7 +74,7 @@ struct rxe_av {
 
 struct rxe_send_wr {
 	__aligned_u64		wr_id;
-	__u32			reserved;
+	__u32			srq_num;	/* xrc only */
 	__u32			opcode;
 	__u32			send_flags;
 	union {
@@ -166,7 +173,7 @@ struct rxe_send_wqe {
 
 struct rxe_recv_wqe {
 	__aligned_u64		wr_id;
-	__u32			reserved;
+	__u32			num_sge;
 	__u32			padding;
 	struct rxe_dma_info	dma;
 };
@@ -191,8 +198,7 @@ struct rxe_create_qp_resp {
 
 struct rxe_create_srq_resp {
 	struct mminfo mi;
-	__u32 srq_num;
-	__u32 reserved;
+	__u32 reserved[2];
 };
 
 struct rxe_modify_srq_cmd {
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/2] providers/rxe: Implement the xrc transport protocol
  2022-09-17  3:15 [PATCH 0/2] providers/rxe: Implement the xrc transport protocol Bob Pearson
  2022-09-17  3:15 ` [PATCH 1/2] Update kernel headers Bob Pearson
@ 2022-09-17  3:15 ` Bob Pearson
  2022-09-26 17:58   ` Jason Gunthorpe
  1 sibling, 1 reply; 9+ messages in thread
From: Bob Pearson @ 2022-09-17  3:15 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, linux-rdma; +Cc: Bob Pearson

Make changes to implement the xrc transport protocol.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 providers/rxe/rxe-abi.h |   2 +
 providers/rxe/rxe.c     | 387 ++++++++++++++++++++++++++++------------
 providers/rxe/rxe.h     |  21 ++-
 3 files changed, 290 insertions(+), 120 deletions(-)

diff --git a/providers/rxe/rxe-abi.h b/providers/rxe/rxe-abi.h
index 020201a9..07e90d81 100644
--- a/providers/rxe/rxe-abi.h
+++ b/providers/rxe/rxe-abi.h
@@ -51,6 +51,8 @@ DECLARE_DRV_CMD(urxe_create_qp_ex, IB_USER_VERBS_EX_CMD_CREATE_QP,
 		empty, rxe_create_qp_resp);
 DECLARE_DRV_CMD(urxe_create_srq, IB_USER_VERBS_CMD_CREATE_SRQ,
 		empty, rxe_create_srq_resp);
+DECLARE_DRV_CMD(urxe_create_srq_ex, IB_USER_VERBS_CMD_CREATE_XSRQ,
+		empty, rxe_create_srq_resp);
 DECLARE_DRV_CMD(urxe_modify_srq, IB_USER_VERBS_CMD_MODIFY_SRQ,
 		rxe_modify_srq_cmd, empty);
 DECLARE_DRV_CMD(urxe_resize_cq, IB_USER_VERBS_CMD_RESIZE_CQ,
diff --git a/providers/rxe/rxe.c b/providers/rxe/rxe.c
index 0e8f5605..4acd7140 100644
--- a/providers/rxe/rxe.c
+++ b/providers/rxe/rxe.c
@@ -574,10 +574,52 @@ static int rxe_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
 	return npolled;
 }
 
-static struct ibv_srq *rxe_create_srq(struct ibv_pd *pd,
+static struct ibv_xrcd *rxe_open_xrcd(struct ibv_context *ibcontext,
+				      struct ibv_xrcd_init_attr *attr)
+{
+	struct ibv_open_xrcd cmd;
+	struct ib_uverbs_open_xrcd_resp resp;
+	struct rxe_xrcd *xrcd;
+	struct verbs_xrcd *vxrcd;
+	int err;
+
+	xrcd = calloc(1, sizeof *xrcd);
+	if (!xrcd)
+		return NULL;
+
+	vxrcd = &xrcd->vxrcd;
+
+	err = ibv_cmd_open_xrcd(ibcontext, vxrcd, sizeof(*vxrcd), attr,
+				&cmd, sizeof cmd, &resp, sizeof resp);
+	if (err)
+		goto err_out;
+
+	return &vxrcd->xrcd;
+
+err_out:
+	errno = err;
+	free(xrcd);
+	return NULL;
+}
+
+static int rxe_close_xrcd(struct ibv_xrcd *ibxrcd)
+{
+	struct rxe_xrcd *xrcd = to_rxrcd(ibxrcd);
+	int err;
+
+	err = ibv_cmd_close_xrcd(&xrcd->vxrcd);
+	if (err)
+		return err;
+
+	free(xrcd);
+	return 0;
+}
+
+static struct ibv_srq *rxe_create_srq(struct ibv_pd *ibpd,
 				      struct ibv_srq_init_attr *attr)
 {
 	struct rxe_srq *srq;
+	struct ibv_srq *ibsrq;
 	struct ibv_create_srq cmd;
 	struct urxe_create_srq_resp resp;
 	int ret;
@@ -586,7 +628,9 @@ static struct ibv_srq *rxe_create_srq(struct ibv_pd *pd,
 	if (srq == NULL)
 		return NULL;
 
-	ret = ibv_cmd_create_srq(pd, &srq->ibv_srq, attr, &cmd, sizeof(cmd),
+	ibsrq = &srq->vsrq.srq;
+
+	ret = ibv_cmd_create_srq(ibpd, ibsrq, attr, &cmd, sizeof(cmd),
 				 &resp.ibv_resp, sizeof(resp));
 	if (ret) {
 		free(srq);
@@ -595,9 +639,9 @@ static struct ibv_srq *rxe_create_srq(struct ibv_pd *pd,
 
 	srq->rq.queue = mmap(NULL, resp.mi.size,
 			     PROT_READ | PROT_WRITE, MAP_SHARED,
-			     pd->context->cmd_fd, resp.mi.offset);
+			     ibpd->context->cmd_fd, resp.mi.offset);
 	if ((void *)srq->rq.queue == MAP_FAILED) {
-		ibv_cmd_destroy_srq(&srq->ibv_srq);
+		ibv_cmd_destroy_srq(ibsrq);
 		free(srq);
 		return NULL;
 	}
@@ -606,16 +650,55 @@ static struct ibv_srq *rxe_create_srq(struct ibv_pd *pd,
 	srq->rq.max_sge = attr->attr.max_sge;
 	pthread_spin_init(&srq->rq.lock, PTHREAD_PROCESS_PRIVATE);
 
-	return &srq->ibv_srq;
+	return ibsrq;
+}
+
+static struct ibv_srq *rxe_create_srq_ex(
+		struct ibv_context *ibcontext,
+		struct ibv_srq_init_attr_ex *attr_ex)
+{
+	struct rxe_srq *srq;
+	struct ibv_srq *ibsrq;
+	struct ibv_create_xsrq cmd;
+	struct urxe_create_srq_ex_resp resp;
+	int ret;
+
+	srq = calloc(1, sizeof(*srq));
+	if (srq == NULL)
+		return NULL;
+
+	ibsrq = &srq->vsrq.srq;
+
+	ret = ibv_cmd_create_srq_ex(ibcontext, &srq->vsrq, attr_ex,
+			  &cmd, sizeof(cmd), &resp.ibv_resp, sizeof(resp));
+	if (ret) {
+		free(srq);
+		return NULL;
+	}
+
+	srq->rq.queue = mmap(NULL, resp.mi.size,
+			     PROT_READ | PROT_WRITE, MAP_SHARED,
+			     ibcontext->cmd_fd, resp.mi.offset);
+	if ((void *)srq->rq.queue == MAP_FAILED) {
+		ibv_cmd_destroy_srq(ibsrq);
+		free(srq);
+		return NULL;
+	}
+
+	srq->mmap_info = resp.mi;
+	srq->rq.max_sge = attr_ex->attr.max_sge;
+	pthread_spin_init(&srq->rq.lock, PTHREAD_PROCESS_PRIVATE);
+
+	return ibsrq;
 }
 
-static int rxe_modify_srq(struct ibv_srq *ibsrq,
-		   struct ibv_srq_attr *attr, int attr_mask)
+static int rxe_modify_srq(struct ibv_srq *ibsrq, struct ibv_srq_attr *attr,
+			  int attr_mask)
 {
 	struct rxe_srq *srq = to_rsrq(ibsrq);
 	struct urxe_modify_srq cmd;
-	int rc = 0;
 	struct mminfo mi;
+	int err;
 
 	mi.offset = 0;
 	mi.size = 0;
@@ -624,9 +707,9 @@ static int rxe_modify_srq(struct ibv_srq *ibsrq,
 		pthread_spin_lock(&srq->rq.lock);
 
 	cmd.mmap_info_addr = (__u64)(uintptr_t) &mi;
-	rc = ibv_cmd_modify_srq(ibsrq, attr, attr_mask,
-				&cmd.ibv_cmd, sizeof(cmd));
-	if (rc)
+	err = ibv_cmd_modify_srq(ibsrq, attr, attr_mask,
+				 &cmd.ibv_cmd, sizeof(cmd));
+	if (err)
 		goto out;
 
 	if (attr_mask & IBV_SRQ_MAX_WR) {
@@ -636,7 +719,7 @@ static int rxe_modify_srq(struct ibv_srq *ibsrq,
 				     ibsrq->context->cmd_fd, mi.offset);
 
 		if ((void *)srq->rq.queue == MAP_FAILED) {
-			rc = errno;
+			err = errno;
 			srq->rq.queue = NULL;
 			srq->mmap_info.size = 0;
 			goto out;
@@ -648,30 +731,43 @@ static int rxe_modify_srq(struct ibv_srq *ibsrq,
 out:
 	if (attr_mask & IBV_SRQ_MAX_WR)
 		pthread_spin_unlock(&srq->rq.lock);
-	return rc;
+
+	return err;
+}
+
+static int rxe_get_srq_num(struct ibv_srq *ibsrq, uint32_t *srq_num)
+{
+	struct rxe_srq *srq = to_rsrq(ibsrq);
+
+	if (!srq->vsrq.xrcd)
+		return EOPNOTSUPP;
+
+	*srq_num = srq->vsrq.srq_num;
+
+	return 0;
 }
 
-static int rxe_query_srq(struct ibv_srq *srq, struct ibv_srq_attr *attr)
+static int rxe_query_srq(struct ibv_srq *ibsrq, struct ibv_srq_attr *attr)
 {
 	struct ibv_query_srq cmd;
 
-	return ibv_cmd_query_srq(srq, attr, &cmd, sizeof(cmd));
+	return ibv_cmd_query_srq(ibsrq, attr, &cmd, sizeof(cmd));
 }
 
 static int rxe_destroy_srq(struct ibv_srq *ibvsrq)
 {
-	int ret;
+	int err;
 	struct rxe_srq *srq = to_rsrq(ibvsrq);
 	struct rxe_queue_buf *q = srq->rq.queue;
 
-	ret = ibv_cmd_destroy_srq(ibvsrq);
-	if (!ret) {
+	err = ibv_cmd_destroy_srq(ibvsrq);
+	if (!err) {
 		if (srq->mmap_info.size)
 			munmap(q, srq->mmap_info.size);
 		free(srq);
 	}
 
-	return ret;
+	return err;
 }
 
 static int rxe_post_one_recv(struct rxe_wq *rq, struct ibv_recv_wr *recv_wr)
@@ -715,11 +811,11 @@ out:
 	return rc;
 }
 
-static int rxe_post_srq_recv(struct ibv_srq *ibvsrq,
+static int rxe_post_srq_recv(struct ibv_srq *ibsrq,
 			     struct ibv_recv_wr *recv_wr,
 			     struct ibv_recv_wr **bad_recv_wr)
 {
-	struct rxe_srq *srq = to_rsrq(ibvsrq);
+	struct rxe_srq *srq = to_rsrq(ibsrq);
 	int rc = 0;
 
 	pthread_spin_lock(&srq->rq.lock);
@@ -979,6 +1075,18 @@ static void wr_set_ud_addr(struct ibv_qp_ex *ibqp, struct ibv_ah *ibah,
 		memcpy(&wqe->wr.wr.ud.av, &ah->av, sizeof(ah->av));
 }
 
+static void wr_set_xrc_srqn(struct ibv_qp_ex *ibqp, uint32_t remote_srqn)
+{
+	struct rxe_qp *qp = container_of(ibqp, struct rxe_qp, vqp.qp_ex);
+	struct rxe_send_wqe *wqe = addr_from_index(qp->sq.queue,
+						   qp->cur_index - 1);
+
+	if (qp->err)
+		return;
+
+	wqe->wr.srq_num = remote_srqn;
+}
+
 static void wr_set_inline_data(struct ibv_qp_ex *ibqp, void *addr,
 			       size_t length)
 {
@@ -1118,36 +1226,54 @@ static int map_queue_pair(int cmd_fd, struct rxe_qp *qp,
 			  struct ibv_qp_init_attr *attr,
 			  struct rxe_create_qp_resp *resp)
 {
-	if (attr->srq) {
-		qp->rq.max_sge = 0;
-		qp->rq.queue = NULL;
-		qp->rq_mmap_info.size = 0;
-	} else {
-		qp->rq.max_sge = attr->cap.max_recv_sge;
-		qp->rq.queue = mmap(NULL, resp->rq_mi.size, PROT_READ | PROT_WRITE,
-				    MAP_SHARED,
-				    cmd_fd, resp->rq_mi.offset);
-		if ((void *)qp->rq.queue == MAP_FAILED)
+	switch (attr->qp_type) {
+	case IBV_QPT_RC:
+	case IBV_QPT_UC:
+	case IBV_QPT_UD:
+		if (attr->srq) {
+			qp->rq.max_sge = 0;
+			qp->rq.queue = NULL;
+			qp->rq_mmap_info.size = 0;
+		} else {
+			qp->rq.max_sge = attr->cap.max_recv_sge;
+			qp->rq.queue = mmap(NULL, resp->rq_mi.size,
+					    PROT_READ | PROT_WRITE,
+					    MAP_SHARED, cmd_fd,
+					    resp->rq_mi.offset);
+			if ((void *)qp->rq.queue == MAP_FAILED)
+				return errno;
+
+			qp->rq_mmap_info = resp->rq_mi;
+			pthread_spin_init(&qp->rq.lock,
+					  PTHREAD_PROCESS_PRIVATE);
+		}
+		/* fall through */
+	case IBV_QPT_XRC_SEND:
+		qp->sq.max_sge = attr->cap.max_send_sge;
+		qp->sq.max_inline = attr->cap.max_inline_data;
+		qp->sq.queue = mmap(NULL, resp->sq_mi.size,
+				    PROT_READ | PROT_WRITE,
+				    MAP_SHARED, cmd_fd,
+				    resp->sq_mi.offset);
+		if ((void *)qp->sq.queue == MAP_FAILED) {
+			if (qp->rq_mmap_info.size)
+				munmap(qp->rq.queue, qp->rq_mmap_info.size);
 			return errno;
+		}
 
-		qp->rq_mmap_info = resp->rq_mi;
-		pthread_spin_init(&qp->rq.lock, PTHREAD_PROCESS_PRIVATE);
-	}
-
-	qp->sq.max_sge = attr->cap.max_send_sge;
-	qp->sq.max_inline = attr->cap.max_inline_data;
-	qp->sq.queue = mmap(NULL, resp->sq_mi.size, PROT_READ | PROT_WRITE,
-			    MAP_SHARED,
-			    cmd_fd, resp->sq_mi.offset);
-	if ((void *)qp->sq.queue == MAP_FAILED) {
-		if (qp->rq_mmap_info.size)
-			munmap(qp->rq.queue, qp->rq_mmap_info.size);
-		return errno;
+		qp->sq_mmap_info = resp->sq_mi;
+		pthread_spin_init(&qp->sq.lock, PTHREAD_PROCESS_PRIVATE);
+		break;
+	case IBV_QPT_XRC_RECV:
+		break;
+	case IBV_QPT_RAW_PACKET:
+	case IBV_QPT_DRIVER:
+		/* not reached */
+		return EOPNOTSUPP;
+	default:
+		return EINVAL;
 	}
 
-	qp->sq_mmap_info = resp->sq_mi;
-	pthread_spin_init(&qp->sq.lock, PTHREAD_PROCESS_PRIVATE);
-
 	return 0;
 }
 
@@ -1189,7 +1315,7 @@ err:
 enum {
 	RXE_QP_CREATE_FLAGS_SUP = 0,
 
-	RXE_QP_COMP_MASK_SUP = IBV_QP_INIT_ATTR_PD |
+	RXE_QP_COMP_MASK_SUP = IBV_QP_INIT_ATTR_PD | IBV_QP_INIT_ATTR_XRCD |
 		IBV_QP_INIT_ATTR_CREATE_FLAGS | IBV_QP_INIT_ATTR_SEND_OPS_FLAGS,
 
 	RXE_SUP_RC_QP_SEND_OPS_FLAGS =
@@ -1206,6 +1332,13 @@ enum {
 
 	RXE_SUP_UD_QP_SEND_OPS_FLAGS =
 		IBV_QP_EX_WITH_SEND | IBV_QP_EX_WITH_SEND_WITH_IMM,
+
+	RXE_SUP_XRC_QP_SEND_OPS_FLAGS =
+		IBV_QP_EX_WITH_RDMA_WRITE | IBV_QP_EX_WITH_RDMA_WRITE_WITH_IMM |
+		IBV_QP_EX_WITH_SEND | IBV_QP_EX_WITH_SEND_WITH_IMM |
+		IBV_QP_EX_WITH_RDMA_READ | IBV_QP_EX_WITH_ATOMIC_CMP_AND_SWP |
+		IBV_QP_EX_WITH_ATOMIC_FETCH_AND_ADD | IBV_QP_EX_WITH_LOCAL_INV |
+		IBV_QP_EX_WITH_BIND_MW | IBV_QP_EX_WITH_SEND_WITH_INV,
 };
 
 static int check_qp_init_attr(struct ibv_qp_init_attr_ex *attr)
@@ -1220,17 +1353,28 @@ static int check_qp_init_attr(struct ibv_qp_init_attr_ex *attr)
 	if (attr->comp_mask & IBV_QP_INIT_ATTR_SEND_OPS_FLAGS) {
 		switch (attr->qp_type) {
 		case IBV_QPT_RC:
-			if (attr->send_ops_flags & ~RXE_SUP_RC_QP_SEND_OPS_FLAGS)
+			if (attr->send_ops_flags &
+					~RXE_SUP_RC_QP_SEND_OPS_FLAGS)
 				goto err;
 			break;
 		case IBV_QPT_UC:
-			if (attr->send_ops_flags & ~RXE_SUP_UC_QP_SEND_OPS_FLAGS)
+			if (attr->send_ops_flags &
+					~RXE_SUP_UC_QP_SEND_OPS_FLAGS)
 				goto err;
 			break;
 		case IBV_QPT_UD:
-			if (attr->send_ops_flags & ~RXE_SUP_UD_QP_SEND_OPS_FLAGS)
+			if (attr->send_ops_flags &
+					~RXE_SUP_UD_QP_SEND_OPS_FLAGS)
+				goto err;
+			break;
+		case IBV_QPT_XRC_SEND:
+			if (attr->send_ops_flags &
+					~RXE_SUP_XRC_QP_SEND_OPS_FLAGS)
 				goto err;
 			break;
+		case IBV_QPT_XRC_RECV:
+			goto err;
+			break;
 		default:
 			goto err;
 		}
@@ -1275,6 +1419,7 @@ static void set_qp_send_ops(struct rxe_qp *qp, uint64_t flags)
 		qp->vqp.qp_ex.wr_send_inv = wr_send_inv;
 
 	qp->vqp.qp_ex.wr_set_ud_addr = wr_set_ud_addr;
+	qp->vqp.qp_ex.wr_set_xrc_srqn = wr_set_xrc_srqn;
 	qp->vqp.qp_ex.wr_set_inline_data = wr_set_inline_data;
 	qp->vqp.qp_ex.wr_set_inline_data_list = wr_set_inline_data_list;
 	qp->vqp.qp_ex.wr_set_sge = wr_set_sge;
@@ -1286,38 +1431,38 @@ static void set_qp_send_ops(struct rxe_qp *qp, uint64_t flags)
 }
 
 static struct ibv_qp *rxe_create_qp_ex(struct ibv_context *context,
-				struct ibv_qp_init_attr_ex *attr)
+				       struct ibv_qp_init_attr_ex *attr)
 {
-	int ret;
+	int err;
 	struct rxe_qp *qp;
 	struct ibv_create_qp_ex cmd = {};
 	struct urxe_create_qp_ex_resp resp = {};
 	size_t cmd_size = sizeof(cmd);
 	size_t resp_size = sizeof(resp);
 
-	ret = check_qp_init_attr(attr);
-	if (ret)
-		goto err;
+	err = check_qp_init_attr(attr);
+	if (err)
+		goto err_out;
 
 	qp = calloc(1, sizeof(*qp));
 	if (!qp)
-		goto err;
+		goto err_out;
 
 	if (attr->comp_mask & IBV_QP_INIT_ATTR_SEND_OPS_FLAGS)
 		set_qp_send_ops(qp, attr->send_ops_flags);
 
-	ret = ibv_cmd_create_qp_ex2(context, &qp->vqp, attr,
+	err = ibv_cmd_create_qp_ex2(context, &qp->vqp, attr,
 				    &cmd, cmd_size,
 				    &resp.ibv_resp, resp_size);
-	if (ret)
+	if (err)
 		goto err_free;
 
 	qp->vqp.comp_mask |= VERBS_QP_EX;
 
-	ret = map_queue_pair(context->cmd_fd, qp,
-			     (struct ibv_qp_init_attr *)attr,
-			     &resp.drv_payload);
-	if (ret)
+	err = map_queue_pair(context->cmd_fd, qp,
+		     (struct ibv_qp_init_attr *)attr,
+		     &resp.drv_payload);
+	if (err)
 		goto err_destroy;
 
 	return &qp->vqp.qp;
@@ -1326,7 +1471,8 @@ err_destroy:
 	ibv_cmd_destroy_qp(&qp->vqp.qp);
 err_free:
 	free(qp);
-err:
+err_out:
+	errno = err;
 	return NULL;
 }
 
@@ -1397,56 +1543,57 @@ static int validate_send_wr(struct rxe_qp *qp, struct ibv_send_wr *ibwr,
 	return 0;
 }
 
-static void convert_send_wr(struct rxe_qp *qp, struct rxe_send_wr *kwr,
-					struct ibv_send_wr *uwr)
+static void convert_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
+					struct ibv_send_wr *ibwr)
 {
 	struct ibv_mw *ibmw;
 	struct ibv_mr *ibmr;
 
-	memset(kwr, 0, sizeof(*kwr));
+	memset(wr, 0, sizeof(*wr));
 
-	kwr->wr_id		= uwr->wr_id;
-	kwr->opcode		= uwr->opcode;
-	kwr->send_flags		= uwr->send_flags;
-	kwr->ex.imm_data	= uwr->imm_data;
+	wr->wr_id		= ibwr->wr_id;
+	wr->srq_num		= ibwr->qp_type.xrc.remote_srqn;
+	wr->opcode		= ibwr->opcode;
+	wr->send_flags		= ibwr->send_flags;
+	wr->ex.imm_data		= ibwr->imm_data;
 
-	switch (uwr->opcode) {
+	switch (ibwr->opcode) {
 	case IBV_WR_RDMA_WRITE:
 	case IBV_WR_RDMA_WRITE_WITH_IMM:
 	case IBV_WR_RDMA_READ:
-		kwr->wr.rdma.remote_addr	= uwr->wr.rdma.remote_addr;
-		kwr->wr.rdma.rkey		= uwr->wr.rdma.rkey;
+		wr->wr.rdma.remote_addr		= ibwr->wr.rdma.remote_addr;
+		wr->wr.rdma.rkey		= ibwr->wr.rdma.rkey;
 		break;
 
 	case IBV_WR_SEND:
 	case IBV_WR_SEND_WITH_IMM:
 		if (qp_type(qp) == IBV_QPT_UD) {
-			struct rxe_ah *ah = to_rah(uwr->wr.ud.ah);
+			struct rxe_ah *ah = to_rah(ibwr->wr.ud.ah);
 
-			kwr->wr.ud.remote_qpn	= uwr->wr.ud.remote_qpn;
-			kwr->wr.ud.remote_qkey	= uwr->wr.ud.remote_qkey;
-			kwr->wr.ud.ah_num	= ah->ah_num;
+			wr->wr.ud.remote_qpn	= ibwr->wr.ud.remote_qpn;
+			wr->wr.ud.remote_qkey	= ibwr->wr.ud.remote_qkey;
+			wr->wr.ud.ah_num	= ah->ah_num;
 		}
 		break;
 
 	case IBV_WR_ATOMIC_CMP_AND_SWP:
 	case IBV_WR_ATOMIC_FETCH_AND_ADD:
-		kwr->wr.atomic.remote_addr	= uwr->wr.atomic.remote_addr;
-		kwr->wr.atomic.compare_add	= uwr->wr.atomic.compare_add;
-		kwr->wr.atomic.swap		= uwr->wr.atomic.swap;
-		kwr->wr.atomic.rkey		= uwr->wr.atomic.rkey;
+		wr->wr.atomic.remote_addr	= ibwr->wr.atomic.remote_addr;
+		wr->wr.atomic.compare_add	= ibwr->wr.atomic.compare_add;
+		wr->wr.atomic.swap		= ibwr->wr.atomic.swap;
+		wr->wr.atomic.rkey		= ibwr->wr.atomic.rkey;
 		break;
 
 	case IBV_WR_BIND_MW:
-		ibmr = uwr->bind_mw.bind_info.mr;
-		ibmw = uwr->bind_mw.mw;
-
-		kwr->wr.mw.addr = uwr->bind_mw.bind_info.addr;
-		kwr->wr.mw.length = uwr->bind_mw.bind_info.length;
-		kwr->wr.mw.mr_lkey = ibmr->lkey;
-		kwr->wr.mw.mw_rkey = ibmw->rkey;
-		kwr->wr.mw.rkey = uwr->bind_mw.rkey;
-		kwr->wr.mw.access = uwr->bind_mw.bind_info.mw_access_flags;
+		ibmr = ibwr->bind_mw.bind_info.mr;
+		ibmw = ibwr->bind_mw.mw;
+
+		wr->wr.mw.addr = ibwr->bind_mw.bind_info.addr;
+		wr->wr.mw.length = ibwr->bind_mw.bind_info.length;
+		wr->wr.mw.mr_lkey = ibmr->lkey;
+		wr->wr.mw.mw_rkey = ibmw->rkey;
+		wr->wr.mw.rkey = ibwr->bind_mw.rkey;
+		wr->wr.mw.access = ibwr->bind_mw.bind_info.mw_access_flags;
 		break;
 
 	default:
@@ -1539,6 +1686,7 @@ static int post_send_db(struct ibv_qp *ibqp)
 {
 	struct ibv_post_send cmd;
 	struct ib_uverbs_post_send_resp resp;
+	ssize_t ret;
 
 	cmd.hdr.command	= IB_USER_VERBS_CMD_POST_SEND;
 	cmd.hdr.in_words = sizeof(cmd) / 4;
@@ -1549,7 +1697,8 @@ static int post_send_db(struct ibv_qp *ibqp)
 	cmd.sge_count	= 0;
 	cmd.wqe_size	= sizeof(struct ibv_send_wr);
 
-	if (write(ibqp->context->cmd_fd, &cmd, sizeof(cmd)) != sizeof(cmd))
+	ret = write(ibqp->context->cmd_fd, &cmd, sizeof(cmd));
+	if (ret != sizeof(cmd))
 		return errno;
 
 	return 0;
@@ -1729,38 +1878,42 @@ static int rxe_destroy_ah(struct ibv_ah *ibah)
 }
 
 static const struct verbs_context_ops rxe_ctx_ops = {
-	.query_device_ex = rxe_query_device,
-	.query_port = rxe_query_port,
-	.alloc_pd = rxe_alloc_pd,
-	.dealloc_pd = rxe_dealloc_pd,
-	.reg_mr = rxe_reg_mr,
-	.dereg_mr = rxe_dereg_mr,
 	.alloc_mw = rxe_alloc_mw,
-	.dealloc_mw = rxe_dealloc_mw,
+	.alloc_pd = rxe_alloc_pd,
+	.attach_mcast = ibv_cmd_attach_mcast,
 	.bind_mw = rxe_bind_mw,
-	.create_cq = rxe_create_cq,
+	.close_xrcd = rxe_close_xrcd,
+	.create_ah = rxe_create_ah,
 	.create_cq_ex = rxe_create_cq_ex,
-	.poll_cq = rxe_poll_cq,
-	.req_notify_cq = ibv_cmd_req_notify_cq,
-	.resize_cq = rxe_resize_cq,
-	.destroy_cq = rxe_destroy_cq,
-	.create_srq = rxe_create_srq,
-	.modify_srq = rxe_modify_srq,
-	.query_srq = rxe_query_srq,
-	.destroy_srq = rxe_destroy_srq,
-	.post_srq_recv = rxe_post_srq_recv,
-	.create_qp = rxe_create_qp,
+	.create_cq = rxe_create_cq,
 	.create_qp_ex = rxe_create_qp_ex,
-	.query_qp = rxe_query_qp,
-	.modify_qp = rxe_modify_qp,
-	.destroy_qp = rxe_destroy_qp,
-	.post_send = rxe_post_send,
-	.post_recv = rxe_post_recv,
-	.create_ah = rxe_create_ah,
+	.create_qp = rxe_create_qp,
+	.create_srq = rxe_create_srq,
+	.create_srq_ex = rxe_create_srq_ex,
+	.dealloc_mw = rxe_dealloc_mw,
+	.dealloc_pd = rxe_dealloc_pd,
+	.dereg_mr = rxe_dereg_mr,
 	.destroy_ah = rxe_destroy_ah,
-	.attach_mcast = ibv_cmd_attach_mcast,
+	.destroy_cq = rxe_destroy_cq,
+	.destroy_qp = rxe_destroy_qp,
+	.destroy_srq = rxe_destroy_srq,
 	.detach_mcast = ibv_cmd_detach_mcast,
 	.free_context = rxe_free_context,
+	.get_srq_num = rxe_get_srq_num,
+	.modify_qp = rxe_modify_qp,
+	.modify_srq = rxe_modify_srq,
+	.open_xrcd = rxe_open_xrcd,
+	.poll_cq = rxe_poll_cq,
+	.post_recv = rxe_post_recv,
+	.post_send = rxe_post_send,
+	.post_srq_recv = rxe_post_srq_recv,
+	.query_device_ex = rxe_query_device,
+	.query_port = rxe_query_port,
+	.query_qp = rxe_query_qp,
+	.query_srq = rxe_query_srq,
+	.reg_mr = rxe_reg_mr,
+	.req_notify_cq = ibv_cmd_req_notify_cq,
+	.resize_cq = rxe_resize_cq,
 };
 
 static struct verbs_context *rxe_alloc_context(struct ibv_device *ibdev,
diff --git a/providers/rxe/rxe.h b/providers/rxe/rxe.h
index 6882d9c7..2023ecae 100644
--- a/providers/rxe/rxe.h
+++ b/providers/rxe/rxe.h
@@ -40,6 +40,8 @@
 #include <sys/socket.h>
 #include <netinet/in.h>
 #include <rdma/rdma_user_rxe.h>
+#include <unistd.h>
+
 #include "rxe-abi.h"
 
 struct rxe_device {
@@ -90,13 +92,21 @@ struct rxe_qp {
 	int			err;
 };
 
+struct rxe_xrcd {
+	struct verbs_xrcd	vxrcd;
+};
+
 struct rxe_srq {
-	struct ibv_srq		ibv_srq;
+	struct verbs_srq	vsrq;
 	struct mminfo		mmap_info;
 	struct rxe_wq		rq;
-	uint32_t		srq_num;
 };
 
+static inline unsigned int srq_num(struct rxe_srq *srq)
+{
+	return srq->vsrq.srq_num;
+}
+
 #define to_rxxx(xxx, type) container_of(ib##xxx, struct rxe_##type, ibv_##xxx)
 
 static inline struct rxe_context *to_rctx(struct ibv_context *ibctx)
@@ -119,9 +129,14 @@ static inline struct rxe_qp *to_rqp(struct ibv_qp *ibqp)
 	return container_of(ibqp, struct rxe_qp, vqp.qp);
 }
 
+static inline struct rxe_xrcd *to_rxrcd(struct ibv_xrcd *ibxrcd)
+{
+	return container_of(ibxrcd, struct rxe_xrcd, vxrcd.xrcd);
+}
+
 static inline struct rxe_srq *to_rsrq(struct ibv_srq *ibsrq)
 {
-	return to_rxxx(srq, srq);
+	return container_of(ibsrq, struct rxe_srq, vsrq.srq);
 }
 
 static inline struct rxe_ah *to_rah(struct ibv_ah *ibah)
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] providers/rxe: Implement the xrc transport protocol
  2022-09-17  3:15 ` [PATCH 2/2] providers/rxe: Implement the xrc transport protocol Bob Pearson
@ 2022-09-26 17:58   ` Jason Gunthorpe
  0 siblings, 0 replies; 9+ messages in thread
From: Jason Gunthorpe @ 2022-09-26 17:58 UTC (permalink / raw)
  To: Bob Pearson; +Cc: zyjzyj2000, lizhijian, linux-rdma

On Fri, Sep 16, 2022 at 10:15:37PM -0500, Bob Pearson wrote:
> Make changes to implement the xrc transport protocol.
> 
> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> ---
>  providers/rxe/rxe-abi.h |   2 +
>  providers/rxe/rxe.c     | 387 ++++++++++++++++++++++++++++------------
>  providers/rxe/rxe.h     |  21 ++-
>  3 files changed, 290 insertions(+), 120 deletions(-)

Please send as a PR to rdma-core

Jason

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/2] Update kernel headers
  2022-09-13 22:30 [PATCH 0/2] providers/rxe: Remove redundant num_sge_fields Bob Pearson
@ 2022-09-13 22:30 ` Bob Pearson
  0 siblings, 0 replies; 9+ messages in thread
From: Bob Pearson @ 2022-09-13 22:30 UTC (permalink / raw)
  To: jgg, zyjzyj2000, linux-rdma; +Cc: Bob Pearson

To commit ?? ("RDMA/rxe: Remove redundant num_sge fields").

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 kernel-headers/rdma/rdma_user_rxe.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/kernel-headers/rdma/rdma_user_rxe.h b/kernel-headers/rdma/rdma_user_rxe.h
index f09c5c9e..73f679df 100644
--- a/kernel-headers/rdma/rdma_user_rxe.h
+++ b/kernel-headers/rdma/rdma_user_rxe.h
@@ -74,7 +74,7 @@ struct rxe_av {
 
 struct rxe_send_wr {
 	__aligned_u64		wr_id;
-	__u32			num_sge;
+	__u32			reserved;
 	__u32			opcode;
 	__u32			send_flags;
 	union {
@@ -166,7 +166,7 @@ struct rxe_send_wqe {
 
 struct rxe_recv_wqe {
 	__aligned_u64		wr_id;
-	__u32			num_sge;
+	__u32			reserved;
 	__u32			padding;
 	struct rxe_dma_info	dma;
 };
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* RE: [PATCH 1/2] Update kernel headers
  2021-06-29  6:58   ` Leon Romanovsky
@ 2021-06-29 14:09     ` Pearson, Robert B
  0 siblings, 0 replies; 9+ messages in thread
From: Pearson, Robert B @ 2021-06-29 14:09 UTC (permalink / raw)
  To: Leon Romanovsky, Gal Pressman; +Cc: Bob Pearson, jgg, zyjzyj2000, linux-rdma



-----Original Message-----
From: Leon Romanovsky <leon@kernel.org> 
Sent: Tuesday, June 29, 2021 1:58 AM
To: Gal Pressman <galpress@amazon.com>
Cc: Bob Pearson <rpearsonhpe@gmail.com>; jgg@nvidia.com; zyjzyj2000@gmail.com; linux-rdma@vger.kernel.org
Subject: Re: [PATCH 1/2] Update kernel headers

On Tue, Jun 29, 2021 at 09:36:50AM +0300, Gal Pressman wrote:
> On 29/06/2021 1:05, Bob Pearson wrote:
> > To commit ?? ("RDMA/rxe: Convert kernel UD post send to use ah_num").
> > 
> > Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> > ---
> >  kernel-headers/rdma/rdma_user_rxe.h | 14 +++++++++++++-
> >  1 file changed, 13 insertions(+), 1 deletion(-)
> > 
> > diff --git a/kernel-headers/rdma/rdma_user_rxe.h 
> > b/kernel-headers/rdma/rdma_user_rxe.h
> > index e283c222..e544832e 100644
> > --- a/kernel-headers/rdma/rdma_user_rxe.h
> > +++ b/kernel-headers/rdma/rdma_user_rxe.h
> > @@ -98,6 +98,8 @@ struct rxe_send_wr {
> >  			__u32	remote_qpn;
> >  			__u32	remote_qkey;
> >  			__u16	pkey_index;
> > +			__u16	reserved;
> > +			__u32	ah_num;
> >  		} ud;
> >  		struct {
> >  			__aligned_u64	addr;
> > @@ -148,7 +150,12 @@ struct rxe_dma_info {
> >  
> >  struct rxe_send_wqe {
> >  	struct rxe_send_wr	wr;
> > -	struct rxe_av		av;
> > +	union {
> > +		struct rxe_av av;
> > +		struct {
> > +			__u32		reserved[0];
> > +		} ex;
> > +	};
> >  	__u32			status;
> >  	__u32			state;
> >  	__aligned_u64		iova;
> > @@ -168,6 +175,11 @@ struct rxe_recv_wqe {
> >  	struct rxe_dma_info	dma;
> >  };
> >  
> > +struct rxe_create_ah_resp {
> > +	__u32 ah_num;
> > +	__u32 reserved;
> > +};
> > +
> >  struct rxe_create_cq_resp {
> >  	struct mminfo mi;
> >  };
> > 
> 
> I think the second patch didn't make it to the list.

I don't know how Bob sends his patches, but it is here https://lore.kernel.org/linux-rdma/20210628220303.9938-1-rpearsonhpe@gmail.com

Thanks

There seems to be some confusion here. I will resend.
I use git send-email but I may have mistyped something.

Bob

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] Update kernel headers
  2021-06-29  6:36 ` Gal Pressman
@ 2021-06-29  6:58   ` Leon Romanovsky
  2021-06-29 14:09     ` Pearson, Robert B
  0 siblings, 1 reply; 9+ messages in thread
From: Leon Romanovsky @ 2021-06-29  6:58 UTC (permalink / raw)
  To: Gal Pressman; +Cc: Bob Pearson, jgg, zyjzyj2000, linux-rdma

On Tue, Jun 29, 2021 at 09:36:50AM +0300, Gal Pressman wrote:
> On 29/06/2021 1:05, Bob Pearson wrote:
> > To commit ?? ("RDMA/rxe: Convert kernel UD post send to use ah_num").
> > 
> > Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> > ---
> >  kernel-headers/rdma/rdma_user_rxe.h | 14 +++++++++++++-
> >  1 file changed, 13 insertions(+), 1 deletion(-)
> > 
> > diff --git a/kernel-headers/rdma/rdma_user_rxe.h b/kernel-headers/rdma/rdma_user_rxe.h
> > index e283c222..e544832e 100644
> > --- a/kernel-headers/rdma/rdma_user_rxe.h
> > +++ b/kernel-headers/rdma/rdma_user_rxe.h
> > @@ -98,6 +98,8 @@ struct rxe_send_wr {
> >  			__u32	remote_qpn;
> >  			__u32	remote_qkey;
> >  			__u16	pkey_index;
> > +			__u16	reserved;
> > +			__u32	ah_num;
> >  		} ud;
> >  		struct {
> >  			__aligned_u64	addr;
> > @@ -148,7 +150,12 @@ struct rxe_dma_info {
> >  
> >  struct rxe_send_wqe {
> >  	struct rxe_send_wr	wr;
> > -	struct rxe_av		av;
> > +	union {
> > +		struct rxe_av av;
> > +		struct {
> > +			__u32		reserved[0];
> > +		} ex;
> > +	};
> >  	__u32			status;
> >  	__u32			state;
> >  	__aligned_u64		iova;
> > @@ -168,6 +175,11 @@ struct rxe_recv_wqe {
> >  	struct rxe_dma_info	dma;
> >  };
> >  
> > +struct rxe_create_ah_resp {
> > +	__u32 ah_num;
> > +	__u32 reserved;
> > +};
> > +
> >  struct rxe_create_cq_resp {
> >  	struct mminfo mi;
> >  };
> > 
> 
> I think the second patch didn't make it to the list.

I don't know how Bob sends his patches, but it is here
https://lore.kernel.org/linux-rdma/20210628220303.9938-1-rpearsonhpe@gmail.com

Thanks

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] Update kernel headers
  2021-06-28 22:05 Bob Pearson
@ 2021-06-29  6:36 ` Gal Pressman
  2021-06-29  6:58   ` Leon Romanovsky
  0 siblings, 1 reply; 9+ messages in thread
From: Gal Pressman @ 2021-06-29  6:36 UTC (permalink / raw)
  To: Bob Pearson, jgg, zyjzyj2000, linux-rdma

On 29/06/2021 1:05, Bob Pearson wrote:
> To commit ?? ("RDMA/rxe: Convert kernel UD post send to use ah_num").
> 
> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> ---
>  kernel-headers/rdma/rdma_user_rxe.h | 14 +++++++++++++-
>  1 file changed, 13 insertions(+), 1 deletion(-)
> 
> diff --git a/kernel-headers/rdma/rdma_user_rxe.h b/kernel-headers/rdma/rdma_user_rxe.h
> index e283c222..e544832e 100644
> --- a/kernel-headers/rdma/rdma_user_rxe.h
> +++ b/kernel-headers/rdma/rdma_user_rxe.h
> @@ -98,6 +98,8 @@ struct rxe_send_wr {
>  			__u32	remote_qpn;
>  			__u32	remote_qkey;
>  			__u16	pkey_index;
> +			__u16	reserved;
> +			__u32	ah_num;
>  		} ud;
>  		struct {
>  			__aligned_u64	addr;
> @@ -148,7 +150,12 @@ struct rxe_dma_info {
>  
>  struct rxe_send_wqe {
>  	struct rxe_send_wr	wr;
> -	struct rxe_av		av;
> +	union {
> +		struct rxe_av av;
> +		struct {
> +			__u32		reserved[0];
> +		} ex;
> +	};
>  	__u32			status;
>  	__u32			state;
>  	__aligned_u64		iova;
> @@ -168,6 +175,11 @@ struct rxe_recv_wqe {
>  	struct rxe_dma_info	dma;
>  };
>  
> +struct rxe_create_ah_resp {
> +	__u32 ah_num;
> +	__u32 reserved;
> +};
> +
>  struct rxe_create_cq_resp {
>  	struct mminfo mi;
>  };
> 

I think the second patch didn't make it to the list.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/2] Update kernel headers
@ 2021-06-28 22:05 Bob Pearson
  2021-06-29  6:36 ` Gal Pressman
  0 siblings, 1 reply; 9+ messages in thread
From: Bob Pearson @ 2021-06-28 22:05 UTC (permalink / raw)
  To: jgg, zyjzyj2000, linux-rdma; +Cc: Bob Pearson

To commit ?? ("RDMA/rxe: Convert kernel UD post send to use ah_num").

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 kernel-headers/rdma/rdma_user_rxe.h | 14 +++++++++++++-
 1 file changed, 13 insertions(+), 1 deletion(-)

diff --git a/kernel-headers/rdma/rdma_user_rxe.h b/kernel-headers/rdma/rdma_user_rxe.h
index e283c222..e544832e 100644
--- a/kernel-headers/rdma/rdma_user_rxe.h
+++ b/kernel-headers/rdma/rdma_user_rxe.h
@@ -98,6 +98,8 @@ struct rxe_send_wr {
 			__u32	remote_qpn;
 			__u32	remote_qkey;
 			__u16	pkey_index;
+			__u16	reserved;
+			__u32	ah_num;
 		} ud;
 		struct {
 			__aligned_u64	addr;
@@ -148,7 +150,12 @@ struct rxe_dma_info {
 
 struct rxe_send_wqe {
 	struct rxe_send_wr	wr;
-	struct rxe_av		av;
+	union {
+		struct rxe_av av;
+		struct {
+			__u32		reserved[0];
+		} ex;
+	};
 	__u32			status;
 	__u32			state;
 	__aligned_u64		iova;
@@ -168,6 +175,11 @@ struct rxe_recv_wqe {
 	struct rxe_dma_info	dma;
 };
 
+struct rxe_create_ah_resp {
+	__u32 ah_num;
+	__u32 reserved;
+};
+
 struct rxe_create_cq_resp {
 	struct mminfo mi;
 };
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2022-09-26 18:11 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-17  3:15 [PATCH 0/2] providers/rxe: Implement the xrc transport protocol Bob Pearson
2022-09-17  3:15 ` [PATCH 1/2] Update kernel headers Bob Pearson
2022-09-17  3:15 ` [PATCH 2/2] providers/rxe: Implement the xrc transport protocol Bob Pearson
2022-09-26 17:58   ` Jason Gunthorpe
  -- strict thread matches above, loose matches on Subject: below --
2022-09-13 22:30 [PATCH 0/2] providers/rxe: Remove redundant num_sge_fields Bob Pearson
2022-09-13 22:30 ` [PATCH 1/2] Update kernel headers Bob Pearson
2021-06-28 22:05 Bob Pearson
2021-06-29  6:36 ` Gal Pressman
2021-06-29  6:58   ` Leon Romanovsky
2021-06-29 14:09     ` Pearson, Robert B

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.