linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH libmlx5 0/5] Support CQE versions
@ 2015-09-06  9:30 Haggai Abramonvsky
       [not found] ` <1441531812-27716-1-git-send-email-hagaya-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 6+ messages in thread
From: Haggai Abramonvsky @ 2015-09-06  9:30 UTC (permalink / raw)
  To: Eli Cohen
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, Doug Ledford, Haggai Abramovsky

From: Haggai Abramovsky <hagaya-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>

This series adds support for CQE (cookie) versions in libmlx5

Non-default cookie versions are a must for RAW PACKET Eth QPs.

The code is capable to work over kernels which don't have this
support yet for the sake of creating other QP types.

Haggai

Haggai Abramonvsky (5):
  libmlx5: Add infrastructure for resource tracking
  libmlx5: Add QPs and XSRQs resource tracking
  libmlx5: Added new poll_cq according to the new CQE format
  libmlx5: Added QP and XSRQ create/destroy flow with user index
  libmlx5: Work with CQE version 1

 src/cq.c       | 262 +++++++++++++++++++++++++++++++++++++++++++++++----------
 src/mlx5-abi.h |   8 +-
 src/mlx5.c     |  80 ++++++++++++++++++
 src/mlx5.h     |  54 +++++++++++-
 src/verbs.c    | 122 ++++++++++++++++++++-------
 5 files changed, 447 insertions(+), 79 deletions(-)

-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH libmlx5 1/5] Add infrastructure for resource tracking
       [not found] ` <1441531812-27716-1-git-send-email-hagaya-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
@ 2015-09-06  9:30   ` Haggai Abramonvsky
  2015-09-06  9:30   ` [PATCH libmlx5 2/5] Add QPs and XSRQs " Haggai Abramonvsky
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Haggai Abramonvsky @ 2015-09-06  9:30 UTC (permalink / raw)
  To: Eli Cohen
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, Doug Ledford, Haggai Abramonvsky

Add new struct member, mlx5_resource, for each tracked object, to allow
retrieving the object in the  poll_cq once we need it.


Signed-off-by: Haggai Abramovsky <hagaya-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
---
 src/mlx5.h | 18 +++++++++++++++++-
 1 file changed, 17 insertions(+), 1 deletion(-)

diff --git a/src/mlx5.h b/src/mlx5.h
index 6ad79fe..4dda0bf 100644
--- a/src/mlx5.h
+++ b/src/mlx5.h
@@ -228,6 +228,18 @@ enum mlx5_alloc_type {
 	MLX5_ALLOC_TYPE_ALL
 };
 
+enum mlx5_rsc_type {
+	MLX5_RSC_TYPE_QP,
+	MLX5_RSC_TYPE_XSRQ,
+	MLX5_RSC_TYPE_SRQ,
+	MLX5_RSC_TYPE_INVAL,
+};
+
+struct mlx5_resource {
+	enum mlx5_rsc_type      type;
+	uint32_t                rsn;
+};
+
 struct mlx5_device {
 	struct verbs_device	verbs_dev;
 	int			page_size;
@@ -341,6 +353,7 @@ struct mlx5_cq {
 };
 
 struct mlx5_srq {
+	struct mlx5_resource            rsc;  /* This struct must be first */
 	struct verbs_srq		vsrq;
 	struct mlx5_buf			buf;
 	struct mlx5_spinlock		lock;
@@ -392,6 +405,7 @@ struct mlx5_mr {
 };
 
 struct mlx5_qp {
+	struct mlx5_resource            rsc; /* This struct must be first */
 	struct verbs_qp			verbs_qp;
 	struct ibv_qp			ibv_qp;
 	struct mlx5_buf                 buf;
@@ -491,7 +505,9 @@ static inline struct mlx5_cq *to_mcq(struct ibv_cq *ibcq)
 
 static inline struct mlx5_srq *to_msrq(struct ibv_srq *ibsrq)
 {
-	return (struct mlx5_srq *)ibsrq;
+	struct verbs_srq *vsrq = (struct verbs_srq *)ibsrq;
+
+	return container_of(vsrq, struct mlx5_srq, vsrq);
 }
 
 static inline struct mlx5_qp *to_mqp(struct ibv_qp *ibqp)
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH libmlx5 2/5] Add QPs and XSRQs resource tracking
       [not found] ` <1441531812-27716-1-git-send-email-hagaya-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
  2015-09-06  9:30   ` [PATCH libmlx5 1/5] Add infrastructure for resource tracking Haggai Abramonvsky
@ 2015-09-06  9:30   ` Haggai Abramonvsky
  2015-09-06  9:30   ` [PATCH libmlx5 3/5] Added new poll_cq according to the new CQE format Haggai Abramonvsky
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Haggai Abramonvsky @ 2015-09-06  9:30 UTC (permalink / raw)
  To: Eli Cohen
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, Doug Ledford, Haggai Abramonvsky

Add new database that stores all the QPs and XSRQs context. Insertions
and deletions to the database are done using the object's user-index.

This database will allow us to retrieve the objects; QPs and XSRQs; by
their user-index in the poll_one.


Signed-off-by: Haggai Abramovsky <hagaya-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
---
 src/mlx5.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 src/mlx5.h | 24 ++++++++++++++++++++++
 2 files changed, 91 insertions(+)

diff --git a/src/mlx5.c b/src/mlx5.c
index d023288..6ce8bd8 100644
--- a/src/mlx5.c
+++ b/src/mlx5.c
@@ -128,6 +128,73 @@ static int read_number_from_line(const char *line, int *value)
 	return 0;
 }
 
+static int32_t get_free_uidx(struct mlx5_context *ctx)
+{
+	int32_t tind;
+	int32_t i;
+
+	for (tind = 0; tind < MLX5_UIDX_TABLE_SIZE; tind++) {
+		if (ctx->uidx_table[tind].refcnt < MLX5_UIDX_TABLE_MASK)
+			break;
+	}
+
+	if (tind == MLX5_UIDX_TABLE_SIZE)
+		return -1;
+
+	if (!ctx->uidx_table[tind].refcnt)
+		return tind << MLX5_UIDX_TABLE_SHIFT;
+
+	for (i = 0; i < MLX5_UIDX_TABLE_MASK + 1; i++) {
+		if (!ctx->uidx_table[tind].table[i])
+			break;
+	}
+
+	return (tind << MLX5_UIDX_TABLE_SHIFT) | i;
+}
+
+int32_t mlx5_store_uidx(struct mlx5_context *ctx, void *rsc)
+{
+	int32_t tind;
+	int32_t ret = -1;
+	int32_t uidx;
+
+	pthread_mutex_lock(&ctx->uidx_table_mutex);
+	uidx = get_free_uidx(ctx);
+	if (uidx < 0)
+		goto out;
+
+	tind = uidx >> MLX5_UIDX_TABLE_SHIFT;
+
+	if (!ctx->uidx_table[tind].refcnt) {
+		ctx->uidx_table[tind].table = calloc(MLX5_UIDX_TABLE_MASK + 1,
+						     sizeof(void *));
+		if (!ctx->uidx_table[tind].table)
+			goto out;
+	}
+
+	++ctx->uidx_table[tind].refcnt;
+	ctx->uidx_table[tind].table[uidx & MLX5_UIDX_TABLE_MASK] = rsc;
+	ret = uidx;
+
+out:
+	pthread_mutex_unlock(&ctx->uidx_table_mutex);
+	return ret;
+}
+
+void mlx5_clear_uidx(struct mlx5_context *ctx, uint32_t uidx)
+{
+	int tind = uidx >> MLX5_UIDX_TABLE_SHIFT;
+
+	pthread_mutex_lock(&ctx->uidx_table_mutex);
+
+	if (!--ctx->uidx_table[tind].refcnt)
+		free(ctx->uidx_table[tind].table);
+	else
+		ctx->uidx_table[tind].table[uidx & MLX5_UIDX_TABLE_MASK] = NULL;
+
+	pthread_mutex_unlock(&ctx->uidx_table_mutex);
+}
+
 static int mlx5_is_sandy_bridge(int *num_cores)
 {
 	char line[128];
diff --git a/src/mlx5.h b/src/mlx5.h
index 4dda0bf..17ef51c 100644
--- a/src/mlx5.h
+++ b/src/mlx5.h
@@ -165,6 +165,12 @@ enum {
 };
 
 enum {
+	MLX5_UIDX_TABLE_SHIFT		= 12,
+	MLX5_UIDX_TABLE_MASK		= (1 << MLX5_UIDX_TABLE_SHIFT) - 1,
+	MLX5_UIDX_TABLE_SIZE		= 1 << (24 - MLX5_UIDX_TABLE_SHIFT),
+};
+
+enum {
 	MLX5_SRQ_TABLE_SHIFT		= 12,
 	MLX5_SRQ_TABLE_MASK		= (1 << MLX5_SRQ_TABLE_SHIFT) - 1,
 	MLX5_SRQ_TABLE_SIZE		= 1 << (24 - MLX5_SRQ_TABLE_SHIFT),
@@ -275,6 +281,12 @@ struct mlx5_context {
 	}				srq_table[MLX5_SRQ_TABLE_SIZE];
 	pthread_mutex_t			srq_table_mutex;
 
+	struct {
+		struct mlx5_resource  **table;
+		int                     refcnt;
+	}				uidx_table[MLX5_UIDX_TABLE_SIZE];
+	pthread_mutex_t                 uidx_table_mutex;
+
 	void			       *uar[MLX5_MAX_UAR_PAGES];
 	struct mlx5_spinlock		lock32;
 	struct mlx5_db_page	       *db_list;
@@ -612,6 +624,8 @@ void mlx5_set_sq_sizes(struct mlx5_qp *qp, struct ibv_qp_cap *cap,
 struct mlx5_qp *mlx5_find_qp(struct mlx5_context *ctx, uint32_t qpn);
 int mlx5_store_qp(struct mlx5_context *ctx, uint32_t qpn, struct mlx5_qp *qp);
 void mlx5_clear_qp(struct mlx5_context *ctx, uint32_t qpn);
+int32_t mlx5_store_uidx(struct mlx5_context *ctx, void *rsc);
+void mlx5_clear_uidx(struct mlx5_context *ctx, uint32_t uidx);
 struct mlx5_srq *mlx5_find_srq(struct mlx5_context *ctx, uint32_t srqn);
 int mlx5_store_srq(struct mlx5_context *ctx, uint32_t srqn,
 		   struct mlx5_srq *srq);
@@ -636,6 +650,16 @@ int mlx5_close_xrcd(struct ibv_xrcd *ib_xrcd);
 struct ibv_srq *mlx5_create_srq_ex(struct ibv_context *context,
 				   struct ibv_srq_init_attr_ex *attr);
 
+static inline void *mlx5_find_uidx(struct mlx5_context *ctx, uint32_t uidx)
+{
+	int tind = uidx >> MLX5_UIDX_TABLE_SHIFT;
+
+	if (likely(ctx->uidx_table[tind].refcnt))
+		return ctx->uidx_table[tind].table[uidx & MLX5_UIDX_TABLE_MASK];
+
+	return NULL;
+}
+
 static inline int mlx5_spin_lock(struct mlx5_spinlock *lock)
 {
 	if (!mlx5_single_threaded)
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH libmlx5 3/5] Added new poll_cq according to the new CQE format
       [not found] ` <1441531812-27716-1-git-send-email-hagaya-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
  2015-09-06  9:30   ` [PATCH libmlx5 1/5] Add infrastructure for resource tracking Haggai Abramonvsky
  2015-09-06  9:30   ` [PATCH libmlx5 2/5] Add QPs and XSRQs " Haggai Abramonvsky
@ 2015-09-06  9:30   ` Haggai Abramonvsky
  2015-09-06  9:30   ` [PATCH libmlx5 4/5] Added QP and XSRQ create/destroy flow with user index Haggai Abramonvsky
  2015-09-06  9:30   ` [PATCH libmlx5 5/5] Work with CQE version 1 Haggai Abramonvsky
  4 siblings, 0 replies; 6+ messages in thread
From: Haggai Abramonvsky @ 2015-09-06  9:30 UTC (permalink / raw)
  To: Eli Cohen
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, Doug Ledford, Haggai Abramonvsky

When working with CQE  version 1, the CQE format is different, therefore
we need a new poll_cq function that is compatible with the new CQE
format.

When  allocating the user-context, we decide on the CQE version, and
according to this, we choose which poll_cq  function to use; the old one
that supports the old CQE format or the new one.


Signed-off-by: Haggai Abramovsky <hagaya-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
---
 src/cq.c       | 217 +++++++++++++++++++++++++++++++++++++++++++++------------
 src/mlx5-abi.h |   3 +-
 src/mlx5.h     |  12 ++++
 3 files changed, 187 insertions(+), 45 deletions(-)

diff --git a/src/cq.c b/src/cq.c
index 63c3ab0..a1fdac3 100644
--- a/src/cq.c
+++ b/src/cq.c
@@ -116,7 +116,7 @@ struct mlx5_cqe64 {
 	uint16_t	slid;
 	uint32_t	flags_rqpn;
 	uint8_t		rsvd28[4];
-	uint32_t	srqn;
+	uint32_t	srqn_uidx;
 	uint32_t	imm_inval_pkey;
 	uint8_t		rsvd40[4];
 	uint32_t	byte_cnt;
@@ -362,21 +362,120 @@ static void mlx5_get_cycles(uint64_t *cycles)
 }
 #endif
 
-static int mlx5_poll_one(struct mlx5_cq *cq,
-			 struct mlx5_qp **cur_qp,
+static inline struct mlx5_qp *get_req_context(struct mlx5_context *mctx,
+					      struct mlx5_resource **cur_rsc,
+					      uint32_t rsn, int cqe_ver)
+					      __attribute__((always_inline));
+static inline struct mlx5_qp *get_req_context(struct mlx5_context *mctx,
+					      struct mlx5_resource **cur_rsc,
+					      uint32_t rsn, int cqe_ver)
+{
+	if (!*cur_rsc || (rsn != (*cur_rsc)->rsn))
+		*cur_rsc = cqe_ver ? mlx5_find_uidx(mctx, rsn) :
+				      (struct mlx5_resource *)mlx5_find_qp(mctx, rsn);
+
+	return rsc_to_mqp(*cur_rsc);
+}
+
+static inline int get_resp_cxt_v1(struct mlx5_context *mctx,
+				  struct mlx5_resource **cur_rsc,
+				  struct mlx5_srq **cur_srq,
+				  uint32_t uidx, int *is_srq)
+				  __attribute__((always_inline));
+static inline int get_resp_cxt_v1(struct mlx5_context *mctx,
+				  struct mlx5_resource **cur_rsc,
+				  struct mlx5_srq **cur_srq,
+				  uint32_t uidx, int *is_srq)
+{
+	struct mlx5_qp *mqp;
+
+	if (!*cur_rsc || (uidx != (*cur_rsc)->rsn)) {
+		*cur_rsc = mlx5_find_uidx(mctx, uidx);
+		if (unlikely(!*cur_rsc))
+			return CQ_POLL_ERR;
+	}
+
+	switch ((*cur_rsc)->type) {
+	case MLX5_RSC_TYPE_QP:
+		mqp = rsc_to_mqp(*cur_rsc);
+		if (mqp->verbs_qp.qp.srq) {
+			*cur_srq = to_msrq(mqp->verbs_qp.qp.srq);
+			*is_srq = 1;
+		}
+		break;
+	case MLX5_RSC_TYPE_XSRQ:
+		*cur_srq = rsc_to_msrq(*cur_rsc);
+		*is_srq = 1;
+		break;
+	default:
+		return CQ_POLL_ERR;
+	}
+
+	return CQ_OK;
+}
+
+static inline int get_resp_ctx(struct mlx5_context *mctx,
+			       struct mlx5_resource **cur_rsc,
+			       uint32_t qpn)
+			       __attribute__((always_inline));
+static inline int get_resp_ctx(struct mlx5_context *mctx,
+			       struct mlx5_resource **cur_rsc,
+			       uint32_t qpn)
+{
+	if (!*cur_rsc || (qpn != (*cur_rsc)->rsn)) {
+		/*
+		 * We do not have to take the QP table lock here,
+		 * because CQs will be locked while QPs are removed
+		 * from the table.
+		 */
+		*cur_rsc = (struct mlx5_resource *)mlx5_find_qp(mctx, qpn);
+		if (unlikely(!*cur_rsc))
+			return CQ_POLL_ERR;
+	}
+
+	return CQ_OK;
+}
+
+static inline int get_srq_ctx(struct mlx5_context *mctx,
+			      struct mlx5_srq **cur_srq,
+			      uint32_t srqn_uidx)
+			      __attribute__((always_inline));
+static inline int get_srq_ctx(struct mlx5_context *mctx,
+			      struct mlx5_srq **cur_srq,
+			      uint32_t srqn)
+{
+	if (!*cur_srq || (srqn != (*cur_srq)->srqn)) {
+		*cur_srq = mlx5_find_srq(mctx, srqn);
+		if (unlikely(!*cur_srq))
+			return CQ_POLL_ERR;
+	}
+
+	return CQ_OK;
+}
+
+static inline int mlx5_poll_one(struct mlx5_cq *cq,
+			 struct mlx5_resource **cur_rsc,
 			 struct mlx5_srq **cur_srq,
-			 struct ibv_wc *wc)
+			 struct ibv_wc *wc, int cqe_ver)
+			 __attribute__((always_inline));
+static inline int mlx5_poll_one(struct mlx5_cq *cq,
+			 struct mlx5_resource **cur_rsc,
+			 struct mlx5_srq **cur_srq,
+			 struct ibv_wc *wc, int cqe_ver)
 {
 	struct mlx5_cqe64 *cqe64;
 	struct mlx5_wq *wq;
 	uint16_t wqe_ctr;
 	void *cqe;
 	uint32_t qpn;
-	uint32_t srqn;
+	uint32_t srqn_uidx;
 	int idx;
 	uint8_t opcode;
 	struct mlx5_err_cqe *ecqe;
 	int err;
+	int is_srq = 0;
+	struct mlx5_qp *mqp = NULL;
+	struct mlx5_context *mctx = to_mctx(cq->ibv_cq.context);
 
 	cqe = next_cqe_sw(cq);
 	if (!cqe)
@@ -384,6 +483,7 @@ static int mlx5_poll_one(struct mlx5_cq *cq,
 
 	cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64;
 
+	opcode = cqe64->op_own >> 4;
 	++cq->cons_index;
 
 	VALGRIND_MAKE_MEM_DEFINED(cqe64, sizeof *cqe64);
@@ -396,51 +496,32 @@ static int mlx5_poll_one(struct mlx5_cq *cq,
 
 #ifdef MLX5_DEBUG
 	if (mlx5_debug_mask & MLX5_DBG_CQ_CQE) {
-		FILE *fp = to_mctx(cq->ibv_cq.context)->dbg_fp;
+		FILE *fp = mctx->dbg_fp;
 
 		mlx5_dbg(fp, MLX5_DBG_CQ_CQE, "dump cqe for cqn 0x%x:\n", cq->cqn);
 		dump_cqe(fp, cqe64);
 	}
 #endif
 
-	srqn = ntohl(cqe64->srqn) & 0xffffff;
 	qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
-	if (srqn) {
-		if (!*cur_srq || (srqn != (*cur_srq)->srqn)) {
-			*cur_srq = mlx5_find_srq(to_mctx(cq->ibv_cq.context),
-						 srqn);
-			if (unlikely(!*cur_srq))
-				return CQ_POLL_ERR;
-		}
-	} else {
-		if (!*cur_qp || (qpn != (*cur_qp)->ibv_qp.qp_num)) {
-			/*
-			 * We do not have to take the QP table lock here,
-			 * because CQs will be locked while QPs are removed
-			 * from the table.
-			 */
-			*cur_qp = mlx5_find_qp(to_mctx(cq->ibv_cq.context),
-					       qpn);
-			if (unlikely(!*cur_qp))
-				return CQ_POLL_ERR;
-		}
-	}
-
 	wc->wc_flags = 0;
-	wc->qp_num = qpn;
 
-	opcode = cqe64->op_own >> 4;
 	switch (opcode) {
 	case MLX5_CQE_REQ:
-		wq = &(*cur_qp)->sq;
+		mqp = get_req_context(mctx, cur_rsc,
+				      (cqe_ver ? (ntohl(cqe64->srqn_uidx) & 0xffffff) : qpn),
+				      cqe_ver);
+		if (unlikely(!mqp))
+			return CQ_POLL_ERR;
+		wq = &mqp->sq;
 		wqe_ctr = ntohs(cqe64->wqe_counter);
 		idx = wqe_ctr & (wq->wqe_cnt - 1);
 		handle_good_req(wc, cqe64);
 		if (cqe64->op_own & MLX5_INLINE_SCATTER_32)
-			err = mlx5_copy_to_send_wqe(*cur_qp, wqe_ctr, cqe,
+			err = mlx5_copy_to_send_wqe(mqp, wqe_ctr, cqe,
 						    wc->byte_len);
 		else if (cqe64->op_own & MLX5_INLINE_SCATTER_64)
-			err = mlx5_copy_to_send_wqe(*cur_qp, wqe_ctr, cqe - 1,
+			err = mlx5_copy_to_send_wqe(mqp, wqe_ctr, cqe - 1,
 						    wc->byte_len);
 		else
 			err = 0;
@@ -453,20 +534,35 @@ static int mlx5_poll_one(struct mlx5_cq *cq,
 	case MLX5_CQE_RESP_SEND:
 	case MLX5_CQE_RESP_SEND_IMM:
 	case MLX5_CQE_RESP_SEND_INV:
-		wc->status = handle_responder(wc, cqe64, *cur_qp,
-					      srqn ? *cur_srq : NULL);
+		srqn_uidx = ntohl(cqe64->srqn_uidx) & 0xffffff;
+		if (cqe_ver) {
+			err = get_resp_cxt_v1(mctx, cur_rsc, cur_srq, srqn_uidx, &is_srq);
+		} else {
+			if (srqn_uidx) {
+				err = get_srq_ctx(mctx, cur_srq, srqn_uidx);
+				is_srq = 1;
+			} else {
+				err = get_resp_ctx(mctx, cur_rsc, qpn);
+			}
+		}
+		if (unlikely(err))
+			return err;
+
+		wc->status = handle_responder(wc, cqe64, rsc_to_mqp(*cur_rsc),
+					      is_srq ? *cur_srq : NULL);
 		break;
 	case MLX5_CQE_RESIZE_CQ:
 		break;
 	case MLX5_CQE_REQ_ERR:
 	case MLX5_CQE_RESP_ERR:
+		srqn_uidx = ntohl(cqe64->srqn_uidx) & 0xffffff;
 		ecqe = (struct mlx5_err_cqe *)cqe64;
 		mlx5_handle_error_cqe(ecqe, wc);
 		if (unlikely(ecqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR &&
 			     ecqe->syndrome != MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR)) {
-			FILE *fp = to_mctx(cq->ibv_cq.context)->dbg_fp;
+			FILE *fp = mctx->dbg_fp;
 			fprintf(fp, PFX "%s: got completion with error:\n",
-				to_mctx(cq->ibv_cq.context)->hostname);
+				mctx->hostname);
 			dump_cqe(fp, ecqe);
 			if (mlx5_freeze_on_error_cqe) {
 				fprintf(fp, PFX "freezing at poll cq...");
@@ -476,18 +572,35 @@ static int mlx5_poll_one(struct mlx5_cq *cq,
 		}
 
 		if (opcode == MLX5_CQE_REQ_ERR) {
-			wq = &(*cur_qp)->sq;
+			mqp = get_req_context(mctx, cur_rsc, (cqe_ver ? srqn_uidx : qpn), cqe_ver);
+			if (unlikely(!mqp))
+				return CQ_POLL_ERR;
+			wq = &mqp->sq;
 			wqe_ctr = ntohs(cqe64->wqe_counter);
 			idx = wqe_ctr & (wq->wqe_cnt - 1);
 			wc->wr_id = wq->wrid[idx];
 			wq->tail = wq->wqe_head[idx] + 1;
 		} else {
-			if (*cur_srq) {
+			if (cqe_ver) {
+				err = get_resp_cxt_v1(mctx, cur_rsc, cur_srq, srqn_uidx, &is_srq);
+			} else {
+				if (srqn_uidx) {
+					err = get_srq_ctx(mctx, cur_srq, srqn_uidx);
+					is_srq = 1;
+				} else {
+					err = get_resp_ctx(mctx, cur_rsc, qpn);
+				}
+			}
+			if (unlikely(err))
+				return CQ_POLL_ERR;
+
+			if (is_srq) {
 				wqe_ctr = ntohs(cqe64->wqe_counter);
 				wc->wr_id = (*cur_srq)->wrid[wqe_ctr];
 				mlx5_free_srq_wqe(*cur_srq, wqe_ctr);
 			} else {
-				wq = &(*cur_qp)->rq;
+				mqp = rsc_to_mqp(*cur_rsc);
+				wq = &mqp->rq;
 				wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
 				++wq->tail;
 			}
@@ -495,13 +608,19 @@ static int mlx5_poll_one(struct mlx5_cq *cq,
 		break;
 	}
 
+	wc->qp_num = qpn;
+
 	return CQ_OK;
 }
 
-int mlx5_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
+static inline int poll_cq(struct ibv_cq *ibcq, int ne,
+		      struct ibv_wc *wc, int cqe_ver)
+		      __attribute__((always_inline));
+static inline int poll_cq(struct ibv_cq *ibcq, int ne,
+		      struct ibv_wc *wc, int cqe_ver)
 {
 	struct mlx5_cq *cq = to_mcq(ibcq);
-	struct mlx5_qp *qp = NULL;
+	struct mlx5_resource *rsc = NULL;
 	struct mlx5_srq *srq = NULL;
 	int npolled;
 	int err = CQ_OK;
@@ -519,7 +638,7 @@ int mlx5_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
 	mlx5_spin_lock(&cq->lock);
 
 	for (npolled = 0; npolled < ne; ++npolled) {
-		err = mlx5_poll_one(cq, &qp, &srq, wc + npolled);
+		err = mlx5_poll_one(cq, &rsc, &srq, wc + npolled, cqe_ver);
 		if (err != CQ_OK)
 			break;
 	}
@@ -551,6 +670,16 @@ int mlx5_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
 	return err == CQ_POLL_ERR ? err : npolled;
 }
 
+int mlx5_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
+{
+	return poll_cq(ibcq, ne, wc, 0);
+}
+
+int mlx5_poll_cq_v1(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
+{
+	return poll_cq(ibcq, ne, wc, 1);
+}
+
 int mlx5_arm_cq(struct ibv_cq *ibvcq, int solicited)
 {
 	struct mlx5_cq *cq = to_mcq(ibvcq);
@@ -622,7 +751,7 @@ void __mlx5_cq_clean(struct mlx5_cq *cq, uint32_t rsn, struct mlx5_srq *srq)
 		cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe);
 		cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64;
 		if (is_equal_rsn(cqe64, rsn)) {
-			if (srq && (ntohl(cqe64->srqn) & 0xffffff))
+			if (srq && (ntohl(cqe64->srqn_uidx) & 0xffffff))
 				mlx5_free_srq_wqe(srq, ntohs(cqe64->wqe_counter));
 			++nfreed;
 		} else if (nfreed) {
diff --git a/src/mlx5-abi.h b/src/mlx5-abi.h
index 9c6ec7c..28490a5 100644
--- a/src/mlx5-abi.h
+++ b/src/mlx5-abi.h
@@ -70,7 +70,8 @@ struct mlx5_alloc_ucontext_resp {
 	__u32				max_recv_wr;
 	__u32				max_srq_recv_wr;
 	__u16				num_ports;
-	__u16				reserved;
+	__u8				cqe_version;
+	__u8				reserved[5];
 };
 
 struct mlx5_alloc_pd_resp {
diff --git a/src/mlx5.h b/src/mlx5.h
index 17ef51c..ce2e581 100644
--- a/src/mlx5.h
+++ b/src/mlx5.h
@@ -306,6 +306,7 @@ struct mlx5_context {
 	char				hostname[40];
 	struct mlx5_spinlock            hugetlb_lock;
 	struct list_head                hugetlb_list;
+	uint8_t				cqe_version;
 };
 
 struct mlx5_bitmap {
@@ -544,6 +545,16 @@ static inline int max_int(int a, int b)
 	return a > b ? a : b;
 }
 
+static inline struct mlx5_qp *rsc_to_mqp(struct mlx5_resource *rsc)
+{
+	return (struct mlx5_qp *)rsc;
+}
+
+static inline struct mlx5_srq *rsc_to_msrq(struct mlx5_resource *rsc)
+{
+	return (struct mlx5_srq *)rsc;
+}
+
 int mlx5_alloc_buf(struct mlx5_buf *buf, size_t size, int page_size);
 void mlx5_free_buf(struct mlx5_buf *buf);
 int mlx5_alloc_buf_contig(struct mlx5_context *mctx, struct mlx5_buf *buf,
@@ -586,6 +597,7 @@ int mlx5_free_cq_buf(struct mlx5_context *ctx, struct mlx5_buf *buf);
 int mlx5_resize_cq(struct ibv_cq *cq, int cqe);
 int mlx5_destroy_cq(struct ibv_cq *cq);
 int mlx5_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
+int mlx5_poll_cq_v1(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
 int mlx5_arm_cq(struct ibv_cq *cq, int solicited);
 void mlx5_cq_event(struct ibv_cq *cq);
 void __mlx5_cq_clean(struct mlx5_cq *cq, uint32_t qpn, struct mlx5_srq *srq);
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH libmlx5 4/5] Added QP and XSRQ create/destroy flow with user index
       [not found] ` <1441531812-27716-1-git-send-email-hagaya-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
                     ` (2 preceding siblings ...)
  2015-09-06  9:30   ` [PATCH libmlx5 3/5] Added new poll_cq according to the new CQE format Haggai Abramonvsky
@ 2015-09-06  9:30   ` Haggai Abramonvsky
  2015-09-06  9:30   ` [PATCH libmlx5 5/5] Work with CQE version 1 Haggai Abramonvsky
  4 siblings, 0 replies; 6+ messages in thread
From: Haggai Abramonvsky @ 2015-09-06  9:30 UTC (permalink / raw)
  To: Eli Cohen
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, Doug Ledford, Haggai Abramonvsky

When wokring with CQE version 1, the library allocates a user-index for
each new QP/XSRQ, and this user-index is passed to the kernel.

Also in the destruction of a QP/XSRQ, the library needs to free the
user-index, so it can be reused.

In this stage, the library still doesn't work with CQE version 1,
therefore we prepared the user-index in the driver data,  but don't pass
it to the kernel.


Signed-off-by: Haggai Abramovsky <hagaya-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
---
 src/cq.c       |  47 +++++++++++++++++++--
 src/mlx5-abi.h |   5 +++
 src/mlx5.c     |  10 +++++
 src/mlx5.h     |   4 +-
 src/verbs.c    | 127 ++++++++++++++++++++++++++++++++++++++++++---------------
 5 files changed, 155 insertions(+), 38 deletions(-)

diff --git a/src/cq.c b/src/cq.c
index a1fdac3..32f0dd4 100644
--- a/src/cq.c
+++ b/src/cq.c
@@ -721,6 +721,47 @@ static int is_equal_rsn(struct mlx5_cqe64 *cqe64, uint32_t rsn)
 	return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
 }
 
+static int is_equal_uidx(struct mlx5_cqe64 *cqe64, uint32_t uidx)
+{
+	return uidx == (ntohl(cqe64->srqn_uidx) & 0xffffff);
+}
+
+static inline int is_responder(uint8_t opcode)
+{
+	switch (opcode) {
+	case MLX5_CQE_RESP_WR_IMM:
+	case MLX5_CQE_RESP_SEND:
+	case MLX5_CQE_RESP_SEND_IMM:
+	case MLX5_CQE_RESP_SEND_INV:
+	case MLX5_CQE_RESP_ERR:
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int free_res_cqe(struct mlx5_cqe64 *cqe64, uint32_t rsn,
+			       struct mlx5_srq *srq, int cqe_version)
+{
+	if (cqe_version) {
+		if (is_equal_uidx(cqe64, rsn)) {
+			if (srq && is_responder(cqe64->op_own >> 4))
+				mlx5_free_srq_wqe(srq,
+						  ntohs(cqe64->wqe_counter));
+			return 1;
+		}
+	} else {
+		if (is_equal_rsn(cqe64, rsn)) {
+			if (srq && (ntohl(cqe64->srqn_uidx) & 0xffffff))
+				mlx5_free_srq_wqe(srq,
+						  ntohs(cqe64->wqe_counter));
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
 void __mlx5_cq_clean(struct mlx5_cq *cq, uint32_t rsn, struct mlx5_srq *srq)
 {
 	uint32_t prod_index;
@@ -728,6 +769,7 @@ void __mlx5_cq_clean(struct mlx5_cq *cq, uint32_t rsn, struct mlx5_srq *srq)
 	struct mlx5_cqe64 *cqe64, *dest64;
 	void *cqe, *dest;
 	uint8_t owner_bit;
+	int cqe_version;
 
 	if (!cq)
 		return;
@@ -747,12 +789,11 @@ void __mlx5_cq_clean(struct mlx5_cq *cq, uint32_t rsn, struct mlx5_srq *srq)
 	 * Now sweep backwards through the CQ, removing CQ entries
 	 * that match our QP by copying older entries on top of them.
 	 */
+	cqe_version = (to_mctx(cq->ibv_cq.context))->cqe_version;
 	while ((int) --prod_index - (int) cq->cons_index >= 0) {
 		cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe);
 		cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64;
-		if (is_equal_rsn(cqe64, rsn)) {
-			if (srq && (ntohl(cqe64->srqn_uidx) & 0xffffff))
-				mlx5_free_srq_wqe(srq, ntohs(cqe64->wqe_counter));
+		if (free_res_cqe(cqe64, rsn, srq, cqe_version)) {
 			++nfreed;
 		} else if (nfreed) {
 			dest = get_cqe(cq, (prod_index + nfreed) & cq->ibv_cq.cqe);
diff --git a/src/mlx5-abi.h b/src/mlx5-abi.h
index 28490a5..001a4be 100644
--- a/src/mlx5-abi.h
+++ b/src/mlx5-abi.h
@@ -109,6 +109,9 @@ struct mlx5_create_srq_ex {
 	__u64				buf_addr;
 	__u64				db_addr;
 	__u32				flags;
+	__u32				reserved;
+	__u32                           uidx;
+	__u32                           reserved1;
 };
 
 struct mlx5_create_qp {
@@ -119,6 +122,8 @@ struct mlx5_create_qp {
 	__u32				rq_wqe_count;
 	__u32				rq_wqe_shift;
 	__u32				flags;
+	__u32                           uidx;
+	__u32                           reserved;
 };
 
 struct mlx5_create_qp_resp {
diff --git a/src/mlx5.c b/src/mlx5.c
index 6ce8bd8..e64ba8d 100644
--- a/src/mlx5.c
+++ b/src/mlx5.c
@@ -600,11 +600,21 @@ static int mlx5_init_context(struct verbs_device *vdev,
 	context->max_recv_wr	= resp.max_recv_wr;
 	context->max_srq_recv_wr = resp.max_srq_recv_wr;
 
+	if (context->cqe_version) {
+		if (context->cqe_version == 1)
+			mlx5_ctx_ops.poll_cq = mlx5_poll_cq_v1;
+		else
+			 context->cqe_version = 0;
+	}
+
 	pthread_mutex_init(&context->qp_table_mutex, NULL);
 	pthread_mutex_init(&context->srq_table_mutex, NULL);
 	for (i = 0; i < MLX5_QP_TABLE_SIZE; ++i)
 		context->qp_table[i].refcnt = 0;
 
+	for (i = 0; i < MLX5_QP_TABLE_SIZE; ++i)
+		context->uidx_table[i].refcnt = 0;
+
 	context->db_list = NULL;
 
 	pthread_mutex_init(&context->db_list_mutex, NULL);
diff --git a/src/mlx5.h b/src/mlx5.h
index ce2e581..3f1a05a 100644
--- a/src/mlx5.h
+++ b/src/mlx5.h
@@ -242,8 +242,8 @@ enum mlx5_rsc_type {
 };
 
 struct mlx5_resource {
-	enum mlx5_rsc_type      type;
-	uint32_t                rsn;
+	enum mlx5_rsc_type  type;
+	uint32_t            rsn;
 };
 
 struct mlx5_device {
diff --git a/src/verbs.c b/src/verbs.c
index 8ddf4e6..c2f5493 100644
--- a/src/verbs.c
+++ b/src/verbs.c
@@ -505,6 +505,8 @@ struct ibv_srq *mlx5_create_srq(struct ibv_pd *pd,
 	pthread_mutex_unlock(&ctx->srq_table_mutex);
 
 	srq->srqn = resp.srqn;
+	srq->rsc.rsn = resp.srqn;
+	srq->rsc.type = MLX5_RSC_TYPE_SRQ;
 
 	return ibsrq;
 
@@ -545,16 +547,22 @@ int mlx5_query_srq(struct ibv_srq *srq,
 int mlx5_destroy_srq(struct ibv_srq *srq)
 {
 	int ret;
+	struct mlx5_srq *msrq = to_msrq(srq);
+	struct mlx5_context *ctx = to_mctx(srq->context);
 
 	ret = ibv_cmd_destroy_srq(srq);
 	if (ret)
 		return ret;
 
-	mlx5_clear_srq(to_mctx(srq->context), to_msrq(srq)->srqn);
-	mlx5_free_db(to_mctx(srq->context), to_msrq(srq)->db);
-	mlx5_free_buf(&to_msrq(srq)->buf);
-	free(to_msrq(srq)->wrid);
-	free(to_msrq(srq));
+	if (ctx->cqe_version && msrq->rsc.type == MLX5_RSC_TYPE_XSRQ)
+		mlx5_clear_uidx(ctx, msrq->rsc.rsn);
+	else
+		mlx5_clear_srq(ctx, msrq->srqn);
+
+	mlx5_free_db(ctx, msrq->db);
+	mlx5_free_buf(&msrq->buf);
+	free(msrq->wrid);
+	free(msrq);
 
 	return 0;
 }
@@ -873,6 +881,11 @@ static void mlx5_free_qp_buf(struct mlx5_qp *qp)
 		free(qp->sq.wrid);
 }
 
+static inline int is_xrc_tgt(int type)
+{
+	return type == IBV_QPT_XRC_RECV;
+}
+
 struct ibv_qp *create_qp(struct ibv_context *context,
 			 struct ibv_qp_init_attr_ex *attr)
 {
@@ -937,24 +950,35 @@ struct ibv_qp *create_qp(struct ibv_context *context,
 	cmd.rq_wqe_count = qp->rq.wqe_cnt;
 	cmd.rq_wqe_shift = qp->rq.wqe_shift;
 
-	pthread_mutex_lock(&ctx->qp_table_mutex);
+	if (!ctx->cqe_version) {
+		pthread_mutex_lock(&ctx->qp_table_mutex);
+	} else if (!is_xrc_tgt(attr->qp_type)) {
+		cmd.uidx = mlx5_store_uidx(ctx, qp);
+		if (cmd.uidx < 0) {
+			mlx5_dbg(fp, MLX5_DBG_QP, "Couldn't find free user index\n");
+			goto err_rq_db;
+		}
+	}
 
 	ret = ibv_cmd_create_qp_ex(context, &qp->verbs_qp, sizeof(qp->verbs_qp),
-				   attr, &cmd.ibv_cmd, sizeof(cmd),
+				   attr, &cmd.ibv_cmd,
+				   offsetof(struct mlx5_create_qp, uidx),
 				   &resp.ibv_resp, sizeof(resp));
 	if (ret) {
 		mlx5_dbg(fp, MLX5_DBG_QP, "ret %d\n", ret);
-		goto err_rq_db;
+		goto err_free_uidx;
 	}
 
-	if (qp->sq.wqe_cnt || qp->rq.wqe_cnt) {
-		ret = mlx5_store_qp(ctx, ibqp->qp_num, qp);
-		if (ret) {
-			mlx5_dbg(fp, MLX5_DBG_QP, "ret %d\n", ret);
-			goto err_destroy;
+	if (!ctx->cqe_version) {
+		if (qp->sq.wqe_cnt || qp->rq.wqe_cnt) {
+			ret = mlx5_store_qp(ctx, ibqp->qp_num, qp);
+			if (ret) {
+				mlx5_dbg(fp, MLX5_DBG_QP, "ret %d\n", ret);
+				goto err_destroy;
+			}
 		}
+		pthread_mutex_unlock(&ctx->qp_table_mutex);
 	}
-	pthread_mutex_unlock(&ctx->qp_table_mutex);
 
 	map_uuar(context, qp, resp.uuar_index);
 
@@ -968,13 +992,21 @@ struct ibv_qp *create_qp(struct ibv_context *context,
 	attr->cap.max_recv_wr = qp->rq.max_post;
 	attr->cap.max_recv_sge = qp->rq.max_gs;
 
+	qp->rsc.type = MLX5_RSC_TYPE_QP;
+	qp->rsc.rsn = (ctx->cqe_version && !is_xrc_tgt(attr->qp_type)) ?
+		      cmd.uidx : ibqp->qp_num;
+
 	return ibqp;
 
 err_destroy:
 	ibv_cmd_destroy_qp(ibqp);
 
+err_free_uidx:
+	if (!ctx->cqe_version)
+		pthread_mutex_unlock(&to_mctx(context)->qp_table_mutex);
+	else if (!is_xrc_tgt(attr->qp_type))
+		mlx5_clear_uidx(ctx, cmd.uidx);
 err_rq_db:
-	pthread_mutex_unlock(&to_mctx(context)->qp_table_mutex);
 	mlx5_free_db(to_mctx(context), qp->db);
 
 err_free_qp_buf:
@@ -1045,27 +1077,37 @@ static void mlx5_unlock_cqs(struct ibv_qp *qp)
 int mlx5_destroy_qp(struct ibv_qp *ibqp)
 {
 	struct mlx5_qp *qp = to_mqp(ibqp);
+	struct mlx5_context *ctx = to_mctx(ibqp->context);
 	int ret;
 
-	pthread_mutex_lock(&to_mctx(ibqp->context)->qp_table_mutex);
+	if (!ctx->cqe_version)
+		pthread_mutex_lock(&to_mctx(ibqp->context)->qp_table_mutex);
+
 	ret = ibv_cmd_destroy_qp(ibqp);
 	if (ret) {
-		pthread_mutex_unlock(&to_mctx(ibqp->context)->qp_table_mutex);
+		if (!ctx->cqe_version)
+			pthread_mutex_unlock(&to_mctx(ibqp->context)->qp_table_mutex);
+
 		return ret;
 	}
 
 	mlx5_lock_cqs(ibqp);
 
-	__mlx5_cq_clean(to_mcq(ibqp->recv_cq), ibqp->qp_num,
+	__mlx5_cq_clean(to_mcq(ibqp->recv_cq), qp->rsc.rsn,
 			ibqp->srq ? to_msrq(ibqp->srq) : NULL);
 	if (ibqp->send_cq != ibqp->recv_cq)
-		__mlx5_cq_clean(to_mcq(ibqp->send_cq), ibqp->qp_num, NULL);
+		__mlx5_cq_clean(to_mcq(ibqp->send_cq), qp->rsc.rsn, NULL);
 
-	if (qp->sq.wqe_cnt || qp->rq.wqe_cnt)
-		mlx5_clear_qp(to_mctx(ibqp->context), ibqp->qp_num);
+	if (!ctx->cqe_version) {
+		if (qp->sq.wqe_cnt || qp->rq.wqe_cnt)
+			mlx5_clear_qp(to_mctx(ibqp->context), ibqp->qp_num);
+	}
 
 	mlx5_unlock_cqs(ibqp);
-	pthread_mutex_unlock(&to_mctx(ibqp->context)->qp_table_mutex);
+	if (!ctx->cqe_version)
+		pthread_mutex_unlock(&to_mctx(ibqp->context)->qp_table_mutex);
+	else if (!is_xrc_tgt(ibqp->qp_type))
+		mlx5_clear_uidx(ctx, qp->rsc.rsn);
 
 	mlx5_free_db(to_mctx(ibqp->context), qp->db);
 	mlx5_free_qp_buf(qp);
@@ -1107,11 +1149,11 @@ int mlx5_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
 	    (attr_mask & IBV_QP_STATE) &&
 	    attr->qp_state == IBV_QPS_RESET) {
 		if (qp->recv_cq) {
-			mlx5_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
+			mlx5_cq_clean(to_mcq(qp->recv_cq), to_mqp(qp)->rsc.rsn,
 				      qp->srq ? to_msrq(qp->srq) : NULL);
 		}
 		if (qp->send_cq != qp->recv_cq && qp->send_cq)
-			mlx5_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
+			mlx5_cq_clean(to_mcq(qp->send_cq), to_mqp(qp)->rsc.rsn, NULL);
 
 		mlx5_init_qp_indices(to_mqp(qp));
 		db = to_mqp(qp)->db;
@@ -1233,6 +1275,7 @@ mlx5_create_xrc_srq(struct ibv_context *context,
 	struct mlx5_context *ctx;
 	int max_sge;
 	struct ibv_srq *ibsrq;
+	int uidx;
 
 	msrq = calloc(1, sizeof(*msrq));
 	if (!msrq)
@@ -1296,28 +1339,46 @@ mlx5_create_xrc_srq(struct ibv_context *context,
 		cmd.flags = MLX5_SRQ_FLAG_SIGNATURE;
 
 	attr->attr.max_sge = msrq->max_gs;
-	pthread_mutex_lock(&ctx->srq_table_mutex);
+
+	if (ctx->cqe_version) {
+		uidx = mlx5_store_uidx(ctx, msrq);
+		if (uidx < 0) {
+			mlx5_dbg(fp, MLX5_DBG_QP, "Couldn't find free user index\n");
+			goto err_free_db;
+		}
+		cmd.uidx = uidx;
+	} else {
+		pthread_mutex_lock(&ctx->srq_table_mutex);
+	}
+
 	err = ibv_cmd_create_srq_ex(context, &msrq->vsrq, sizeof(msrq->vsrq),
-				    attr, &cmd.ibv_cmd, sizeof(cmd),
+				    attr, &cmd.ibv_cmd, offsetof(struct mlx5_create_srq_ex, uidx),
 				    &resp.ibv_resp, sizeof(resp));
 	if (err)
-		goto err_free_db;
+		goto err_free_uidx;
 
-	err = mlx5_store_srq(to_mctx(context), resp.srqn, msrq);
-	if (err)
-		goto err_destroy;
+	if (!ctx->cqe_version) {
+		err = mlx5_store_srq(to_mctx(context), resp.srqn, msrq);
+		if (err)
+			goto err_destroy;
 
-	pthread_mutex_unlock(&ctx->srq_table_mutex);
+		pthread_mutex_unlock(&ctx->srq_table_mutex);
+	}
 
 	msrq->srqn = resp.srqn;
+	msrq->rsc.type = MLX5_RSC_TYPE_XSRQ;
+	msrq->rsc.rsn = ctx->cqe_version ? cmd.uidx : resp.srqn;
 
 	return ibsrq;
 
 err_destroy:
 	ibv_cmd_destroy_srq(ibsrq);
-
+err_free_uidx:
+	if (ctx->cqe_version)
+		mlx5_clear_uidx(ctx, cmd.uidx);
+	else
+		pthread_mutex_unlock(&ctx->srq_table_mutex);
 err_free_db:
-	pthread_mutex_unlock(&ctx->srq_table_mutex);
 	mlx5_free_db(ctx, msrq->db);
 
 err_free:
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH libmlx5 5/5] Work with CQE version 1
       [not found] ` <1441531812-27716-1-git-send-email-hagaya-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
                     ` (3 preceding siblings ...)
  2015-09-06  9:30   ` [PATCH libmlx5 4/5] Added QP and XSRQ create/destroy flow with user index Haggai Abramonvsky
@ 2015-09-06  9:30   ` Haggai Abramonvsky
  4 siblings, 0 replies; 6+ messages in thread
From: Haggai Abramonvsky @ 2015-09-06  9:30 UTC (permalink / raw)
  To: Eli Cohen
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, Doug Ledford, Haggai Abramonvsky

>From now on, if the kernel supports CQE version 1, the library will
choose to work with it.


Signed-off-by: Haggai Abramovsky <hagaya-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
---
 src/mlx5.c  | 3 +++
 src/verbs.c | 5 ++---
 2 files changed, 5 insertions(+), 3 deletions(-)

diff --git a/src/mlx5.c b/src/mlx5.c
index e64ba8d..3bf8cb4 100644
--- a/src/mlx5.c
+++ b/src/mlx5.c
@@ -582,6 +582,8 @@ static int mlx5_init_context(struct verbs_device *vdev,
 	}
 
 	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
 	req.total_num_uuars = tot_uuars;
 	req.num_low_latency_uuars = low_lat_uuars;
 	if (ibv_cmd_get_context(&context->ibv_ctx, &req.ibv_req, sizeof req,
@@ -600,6 +602,7 @@ static int mlx5_init_context(struct verbs_device *vdev,
 	context->max_recv_wr	= resp.max_recv_wr;
 	context->max_srq_recv_wr = resp.max_srq_recv_wr;
 
+	context->cqe_version = resp.cqe_version;
 	if (context->cqe_version) {
 		if (context->cqe_version == 1)
 			mlx5_ctx_ops.poll_cq = mlx5_poll_cq_v1;
diff --git a/src/verbs.c b/src/verbs.c
index c2f5493..ea943b7 100644
--- a/src/verbs.c
+++ b/src/verbs.c
@@ -961,8 +961,7 @@ struct ibv_qp *create_qp(struct ibv_context *context,
 	}
 
 	ret = ibv_cmd_create_qp_ex(context, &qp->verbs_qp, sizeof(qp->verbs_qp),
-				   attr, &cmd.ibv_cmd,
-				   offsetof(struct mlx5_create_qp, uidx),
+				   attr, &cmd.ibv_cmd, sizeof(cmd),
 				   &resp.ibv_resp, sizeof(resp));
 	if (ret) {
 		mlx5_dbg(fp, MLX5_DBG_QP, "ret %d\n", ret);
@@ -1352,7 +1351,7 @@ mlx5_create_xrc_srq(struct ibv_context *context,
 	}
 
 	err = ibv_cmd_create_srq_ex(context, &msrq->vsrq, sizeof(msrq->vsrq),
-				    attr, &cmd.ibv_cmd, offsetof(struct mlx5_create_srq_ex, uidx),
+				    attr, &cmd.ibv_cmd, sizeof(cmd),
 				    &resp.ibv_resp, sizeof(resp));
 	if (err)
 		goto err_free_uidx;
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2015-09-06  9:30 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-09-06  9:30 [PATCH libmlx5 0/5] Support CQE versions Haggai Abramonvsky
     [not found] ` <1441531812-27716-1-git-send-email-hagaya-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
2015-09-06  9:30   ` [PATCH libmlx5 1/5] Add infrastructure for resource tracking Haggai Abramonvsky
2015-09-06  9:30   ` [PATCH libmlx5 2/5] Add QPs and XSRQs " Haggai Abramonvsky
2015-09-06  9:30   ` [PATCH libmlx5 3/5] Added new poll_cq according to the new CQE format Haggai Abramonvsky
2015-09-06  9:30   ` [PATCH libmlx5 4/5] Added QP and XSRQ create/destroy flow with user index Haggai Abramonvsky
2015-09-06  9:30   ` [PATCH libmlx5 5/5] Work with CQE version 1 Haggai Abramonvsky

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).