All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 2/3] iw_cxgb4: add drain_qp function
       [not found] ` <cover.1454709317.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
@ 2016-01-14 18:24   ` Steve Wise
  2016-01-27 20:09   ` [PATCH 3/3] IB/srp: use ib_drain_qp() Steve Wise
  2016-02-05 21:13   ` [PATCH 1/3] IB: new common API for draining a queue pair Steve Wise
  2 siblings, 0 replies; 18+ messages in thread
From: Steve Wise @ 2016-01-14 18:24 UTC (permalink / raw)
  To: linux-rdma-u79uwXL29TY76Z2rM5mHXA

Add a completion object, named qp_drained, to the c4iw_qp struct.
This completion object is signaled when the last CQE is drained from
the CQs for the QP.

Add c4iw_dain_qp() to block until qp_drained is completed.

Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
---
 drivers/infiniband/hw/cxgb4/cq.c       | 6 +++++-
 drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 2 ++
 drivers/infiniband/hw/cxgb4/provider.c | 1 +
 drivers/infiniband/hw/cxgb4/qp.c       | 8 ++++++++
 4 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index cf21df4..6fdcf78 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -815,8 +815,12 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
 		}
 	}
 out:
-	if (wq)
+	if (wq) {
+		if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS &&
+			     t4_sq_empty(wq) && t4_rq_empty(wq)))
+			complete(&qhp->qp_drained);
 		spin_unlock(&qhp->lock);
+	}
 	return ret;
 }
 
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index fb2de75..fdb9d9a 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -476,6 +476,7 @@ struct c4iw_qp {
 	wait_queue_head_t wait;
 	struct timer_list timer;
 	int sq_sig_all;
+	struct completion qp_drained;
 };
 
 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -1016,6 +1017,7 @@ extern int c4iw_wr_log;
 extern int db_fc_threshold;
 extern int db_coalescing_threshold;
 extern int use_dsgl;
+void c4iw_drain_qp(struct ib_qp *qp);
 
 
 #endif
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index ec04272..0ab942f 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -564,6 +564,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
 	dev->ibdev.get_protocol_stats = c4iw_get_mib;
 	dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
 	dev->ibdev.get_port_immutable = c4iw_port_immutable;
+	dev->ibdev.drain_qp = c4iw_drain_qp;
 
 	dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
 	if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e99345e..2e70c01 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1697,6 +1697,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
 	qhp->attr.max_ird = 0;
 	qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
 	spin_lock_init(&qhp->lock);
+	init_completion(&qhp->qp_drained);
 	mutex_init(&qhp->mutex);
 	init_waitqueue_head(&qhp->wait);
 	atomic_set(&qhp->refcnt, 1);
@@ -1888,3 +1889,10 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 	init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
 	return 0;
 }
+
+void c4iw_drain_qp(struct ib_qp *ibqp)
+{
+	struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+
+	wait_for_completion(&qp->qp_drained);
+}
-- 
2.7.0

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 3/3] IB/srp: use ib_drain_qp()
       [not found] ` <cover.1454709317.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  2016-01-14 18:24   ` [PATCH 2/3] iw_cxgb4: add drain_qp function Steve Wise
@ 2016-01-27 20:09   ` Steve Wise
  2016-02-05 21:13   ` [PATCH 1/3] IB: new common API for draining a queue pair Steve Wise
  2 siblings, 0 replies; 18+ messages in thread
From: Steve Wise @ 2016-01-27 20:09 UTC (permalink / raw)
  To: linux-rdma-u79uwXL29TY76Z2rM5mHXA

Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
---
 drivers/infiniband/ulp/srp/ib_srp.c | 45 ++++++-------------------------------
 1 file changed, 7 insertions(+), 38 deletions(-)

diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 03022f6..95670ae 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -446,49 +446,17 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
 				  dev->max_pages_per_mr);
 }
 
-static void srp_drain_done(struct ib_cq *cq, struct ib_wc *wc)
-{
-	struct srp_rdma_ch *ch = cq->cq_context;
-
-	complete(&ch->done);
-}
-
-static struct ib_cqe srp_drain_cqe = {
-	.done		= srp_drain_done,
-};
-
 /**
  * srp_destroy_qp() - destroy an RDMA queue pair
  * @ch: SRP RDMA channel.
  *
- * Change a queue pair into the error state and wait until all receive
- * completions have been processed before destroying it. This avoids that
- * the receive completion handler can access the queue pair while it is
+ * Drain the qp before destroying it.  This avoids that the receive
+ * completion handler can access the queue pair while it is
  * being destroyed.
  */
 static void srp_destroy_qp(struct srp_rdma_ch *ch)
 {
-	static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
-	static struct ib_recv_wr wr = { 0 };
-	struct ib_recv_wr *bad_wr;
-	int ret;
-
-	wr.wr_cqe = &srp_drain_cqe;
-	/* Destroying a QP and reusing ch->done is only safe if not connected */
-	WARN_ON_ONCE(ch->connected);
-
-	ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
-	WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
-	if (ret)
-		goto out;
-
-	init_completion(&ch->done);
-	ret = ib_post_recv(ch->qp, &wr, &bad_wr);
-	WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
-	if (ret == 0)
-		wait_for_completion(&ch->done);
-
-out:
+	ib_drain_qp(ch->qp);
 	ib_destroy_qp(ch->qp);
 }
 
@@ -508,7 +476,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
 	if (!init_attr)
 		return -ENOMEM;
 
-	/* queue_size + 1 for ib_drain_qp */
+	/* queue_size + 1 for ib_drain_qp() */
 	recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
 				ch->comp_vector, IB_POLL_SOFTIRQ);
 	if (IS_ERR(recv_cq)) {
@@ -516,7 +484,8 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
 		goto err;
 	}
 
-	send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
+	/* queue_size + 1 for ib_drain_qp() */
+	send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size + 1,
 				ch->comp_vector, IB_POLL_DIRECT);
 	if (IS_ERR(send_cq)) {
 		ret = PTR_ERR(send_cq);
@@ -524,7 +493,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
 	}
 
 	init_attr->event_handler       = srp_qp_event;
-	init_attr->cap.max_send_wr     = m * target->queue_size;
+	init_attr->cap.max_send_wr     = m * target->queue_size + 1;
 	init_attr->cap.max_recv_wr     = target->queue_size + 1;
 	init_attr->cap.max_recv_sge    = 1;
 	init_attr->cap.max_send_sge    = 1;
-- 
2.7.0

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 1/3] IB: new common API for draining a queue pair
       [not found] ` <cover.1454709317.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  2016-01-14 18:24   ` [PATCH 2/3] iw_cxgb4: add drain_qp function Steve Wise
  2016-01-27 20:09   ` [PATCH 3/3] IB/srp: use ib_drain_qp() Steve Wise
@ 2016-02-05 21:13   ` Steve Wise
       [not found]     ` <2da1db58d642789e8df154e34d622a37295d1ba3.1454709317.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  2 siblings, 1 reply; 18+ messages in thread
From: Steve Wise @ 2016-02-05 21:13 UTC (permalink / raw)
  To: linux-rdma-u79uwXL29TY76Z2rM5mHXA

From: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+p6NamaJ0bNTAL8bYrjMMd8@public.gmane.org>

Add provider-specific drain_qp function for providers needing special
drain logic.

Add static function __ib_drain_qp() which posts noop WRs to the RQ and
SQ and blocks until their completions are processed.  This ensures the
applications completions have all been processed.

Add API function ib_drain_qp() which calls the provider-specific drain
if it exists or __ib_drain_qp().

Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
---
 drivers/infiniband/core/verbs.c | 72 +++++++++++++++++++++++++++++++++++++++++
 include/rdma/ib_verbs.h         |  2 ++
 2 files changed, 74 insertions(+)

diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 5af6d02..31b82cd 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1657,3 +1657,75 @@ next_page:
 	return i;
 }
 EXPORT_SYMBOL(ib_sg_to_pages);
+
+struct ib_drain_cqe {
+	struct ib_cqe cqe;
+	struct completion done;
+};
+
+static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
+						cqe);
+
+	complete(&cqe->done);
+}
+
+/*
+ * Post a WR and block until its completion is reaped for both the RQ and SQ.
+ */
+static void __ib_drain_qp(struct ib_qp *qp)
+{
+	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+	struct ib_drain_cqe rdrain, sdrain;
+	struct ib_recv_wr rwr = {}, *bad_rwr;
+	struct ib_send_wr swr = {}, *bad_swr;
+	int ret;
+
+	rwr.wr_cqe = &rdrain.cqe;
+	rdrain.cqe.done = ib_drain_qp_done;
+	init_completion(&rdrain.done);
+
+	swr.wr_cqe = &sdrain.cqe;
+	sdrain.cqe.done = ib_drain_qp_done;
+	init_completion(&sdrain.done);
+
+	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+	if (ret) {
+		WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
+		return;
+	}
+
+	ret = ib_post_recv(qp, &rwr, &bad_rwr);
+	if (ret) {
+		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+		return;
+	}
+
+	ret = ib_post_send(qp, &swr, &bad_swr);
+	if (ret) {
+		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+		return;
+	}
+
+	wait_for_completion(&rdrain.done);
+	wait_for_completion(&sdrain.done);
+}
+
+/**
+ * ib_drain_qp() - Block until all CQEs have been consumed by the
+ *		   application.
+ * @qp:            queue pair to drain
+ *
+ * If the device has a provider-specific drain function, then
+ * call that.  Otherwise call the generic drain function
+ * __ib_drain_qp().
+ */
+void ib_drain_qp(struct ib_qp *qp)
+{
+	if (qp->device->drain_qp)
+		qp->device->drain_qp(qp);
+	else
+		__ib_drain_qp(qp);
+}
+EXPORT_SYMBOL(ib_drain_qp);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 284b00c..d8533ab 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1846,6 +1846,7 @@ struct ib_device {
 	int			   (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
 						      struct ib_mr_status *mr_status);
 	void			   (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
+	void			   (*drain_qp)(struct ib_qp *qp);
 
 	struct ib_dma_mapping_ops   *dma_ops;
 
@@ -3094,4 +3095,5 @@ int ib_sg_to_pages(struct ib_mr *mr,
 		   int sg_nents,
 		   int (*set_page)(struct ib_mr *, u64));
 
+void ib_drain_qp(struct ib_qp *qp);
 #endif /* IB_VERBS_H */
-- 
2.7.0

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining a queue pair
       [not found]     ` <2da1db58d642789e8df154e34d622a37295d1ba3.1454709317.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
@ 2016-02-05 21:49       ` Chuck Lever
       [not found]         ` <3AB519EB-3BFE-4A02-A825-F74EECD2CE19-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
  2016-02-06 16:10       ` Devesh Sharma
  2016-02-06 17:08       ` Leon Romanovsky
  2 siblings, 1 reply; 18+ messages in thread
From: Chuck Lever @ 2016-02-05 21:49 UTC (permalink / raw)
  To: Steve Wise; +Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA


> On Feb 5, 2016, at 4:13 PM, Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org> wrote:
> 
> From: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+p6NamaJ0bNTAL8bYrjMMd8@public.gmane.org>
> 
> Add provider-specific drain_qp function for providers needing special
> drain logic.
> 
> Add static function __ib_drain_qp() which posts noop WRs to the RQ and
> SQ and blocks until their completions are processed.  This ensures the
> applications completions have all been processed.
> 
> Add API function ib_drain_qp() which calls the provider-specific drain
> if it exists or __ib_drain_qp().
> 
> Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
> ---
> drivers/infiniband/core/verbs.c | 72 +++++++++++++++++++++++++++++++++++++++++
> include/rdma/ib_verbs.h         |  2 ++
> 2 files changed, 74 insertions(+)
> 
> diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
> index 5af6d02..31b82cd 100644
> --- a/drivers/infiniband/core/verbs.c
> +++ b/drivers/infiniband/core/verbs.c
> @@ -1657,3 +1657,75 @@ next_page:
> 	return i;
> }
> EXPORT_SYMBOL(ib_sg_to_pages);
> +
> +struct ib_drain_cqe {
> +	struct ib_cqe cqe;
> +	struct completion done;
> +};
> +
> +static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
> +{
> +	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
> +						cqe);
> +
> +	complete(&cqe->done);
> +}
> +
> +/*
> + * Post a WR and block until its completion is reaped for both the RQ and SQ.
> + */
> +static void __ib_drain_qp(struct ib_qp *qp)
> +{
> +	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
> +	struct ib_drain_cqe rdrain, sdrain;
> +	struct ib_recv_wr rwr = {}, *bad_rwr;
> +	struct ib_send_wr swr = {}, *bad_swr;
> +	int ret;
> +
> +	rwr.wr_cqe = &rdrain.cqe;
> +	rdrain.cqe.done = ib_drain_qp_done;
> +	init_completion(&rdrain.done);
> +
> +	swr.wr_cqe = &sdrain.cqe;
> +	sdrain.cqe.done = ib_drain_qp_done;

OK. ib_cqe is what hooks the completion events for these
blank WRs, so those completions are never exposed to the
RDMA consumer.

But does a consumer have to bump its SQE and RQE count
when allocating its CQs, or is that done automatically
by ib_alloc_cq() ?



> +	init_completion(&sdrain.done);
> +
> +	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
> +	if (ret) {
> +		WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
> +		return;
> +	}
> +
> +	ret = ib_post_recv(qp, &rwr, &bad_rwr);
> +	if (ret) {
> +		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
> +		return;
> +	}
> +
> +	ret = ib_post_send(qp, &swr, &bad_swr);
> +	if (ret) {
> +		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
> +		return;
> +	}
> +
> +	wait_for_completion(&rdrain.done);
> +	wait_for_completion(&sdrain.done);
> +}
> +
> +/**
> + * ib_drain_qp() - Block until all CQEs have been consumed by the
> + *		   application.
> + * @qp:            queue pair to drain
> + *
> + * If the device has a provider-specific drain function, then
> + * call that.  Otherwise call the generic drain function
> + * __ib_drain_qp().
> + */
> +void ib_drain_qp(struct ib_qp *qp)
> +{
> +	if (qp->device->drain_qp)
> +		qp->device->drain_qp(qp);
> +	else
> +		__ib_drain_qp(qp);
> +}
> +EXPORT_SYMBOL(ib_drain_qp);
> diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
> index 284b00c..d8533ab 100644
> --- a/include/rdma/ib_verbs.h
> +++ b/include/rdma/ib_verbs.h
> @@ -1846,6 +1846,7 @@ struct ib_device {
> 	int			   (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
> 						      struct ib_mr_status *mr_status);
> 	void			   (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
> +	void			   (*drain_qp)(struct ib_qp *qp);
> 
> 	struct ib_dma_mapping_ops   *dma_ops;
> 
> @@ -3094,4 +3095,5 @@ int ib_sg_to_pages(struct ib_mr *mr,
> 		   int sg_nents,
> 		   int (*set_page)(struct ib_mr *, u64));
> 
> +void ib_drain_qp(struct ib_qp *qp);
> #endif /* IB_VERBS_H */
> -- 
> 2.7.0
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
Chuck Lever




--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* RE: [PATCH 1/3] IB: new common API for draining a queue pair
       [not found]         ` <3AB519EB-3BFE-4A02-A825-F74EECD2CE19-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
@ 2016-02-05 21:53           ` Steve Wise
  2016-02-05 22:00             ` Chuck Lever
  0 siblings, 1 reply; 18+ messages in thread
From: Steve Wise @ 2016-02-05 21:53 UTC (permalink / raw)
  To: 'Chuck Lever'; +Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA

> > From: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+p6NamaJ0bNTAL8bYrjMMd8@public.gmane.org>
> >
> > Add provider-specific drain_qp function for providers needing special
> > drain logic.
> >
> > Add static function __ib_drain_qp() which posts noop WRs to the RQ and
> > SQ and blocks until their completions are processed.  This ensures the
> > applications completions have all been processed.
> >
> > Add API function ib_drain_qp() which calls the provider-specific drain
> > if it exists or __ib_drain_qp().
> >
> > Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
> > ---
> > drivers/infiniband/core/verbs.c | 72 +++++++++++++++++++++++++++++++++++++++++
> > include/rdma/ib_verbs.h         |  2 ++
> > 2 files changed, 74 insertions(+)
> >
> > diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
> > index 5af6d02..31b82cd 100644
> > --- a/drivers/infiniband/core/verbs.c
> > +++ b/drivers/infiniband/core/verbs.c
> > @@ -1657,3 +1657,75 @@ next_page:
> > 	return i;
> > }
> > EXPORT_SYMBOL(ib_sg_to_pages);
> > +
> > +struct ib_drain_cqe {
> > +	struct ib_cqe cqe;
> > +	struct completion done;
> > +};
> > +
> > +static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
> > +{
> > +	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
> > +						cqe);
> > +
> > +	complete(&cqe->done);
> > +}
> > +
> > +/*
> > + * Post a WR and block until its completion is reaped for both the RQ and SQ.
> > + */
> > +static void __ib_drain_qp(struct ib_qp *qp)
> > +{
> > +	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
> > +	struct ib_drain_cqe rdrain, sdrain;
> > +	struct ib_recv_wr rwr = {}, *bad_rwr;
> > +	struct ib_send_wr swr = {}, *bad_swr;
> > +	int ret;
> > +
> > +	rwr.wr_cqe = &rdrain.cqe;
> > +	rdrain.cqe.done = ib_drain_qp_done;
> > +	init_completion(&rdrain.done);
> > +
> > +	swr.wr_cqe = &sdrain.cqe;
> > +	sdrain.cqe.done = ib_drain_qp_done;
> 
> OK. ib_cqe is what hooks the completion events for these
> blank WRs, so those completions are never exposed to the
> RDMA consumer.
> 

Right, which means only consumers that use the new style CQ processing can make use of this.

> But does a consumer have to bump its SQE and RQE count
> when allocating its CQs, or is that done automatically
> by ib_alloc_cq() ?
>

The consumer has to make sure there is room in the SQ, RQ and CQ.  Going forward, we could enhance QP and CQ allocation to allow the
consumer to specify it wants drain capability so the consumer doesn't have to do this. It could be done under the covers.  In fact,
if we did that, then ib_destroy_qp() could do the drain if need be.
 


--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCH 0/3] new ib_drain_qp() API
@ 2016-02-05 21:55 Steve Wise
       [not found] ` <cover.1454709317.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  0 siblings, 1 reply; 18+ messages in thread
From: Steve Wise @ 2016-02-05 21:55 UTC (permalink / raw)
  To: linux-rdma-u79uwXL29TY76Z2rM5mHXA

This series creates a new helper API for draining a queue pair.  It is a
rework of an original patch created by Christoph Hellwig as part of the CQ
API rework and was dropped to be resubmitted by me with iw_cxgb4 support.

Original thread: http://www.spinics.net/lists/linux-rdma/msg30296.html


Steve Wise (3):
  IB: new common API for draining a queue pair
  iw_cxgb4: add drain_qp function
  IB/srp: use ib_drain_qp()

 drivers/infiniband/core/verbs.c        | 72 ++++++++++++++++++++++++++++++++++
 drivers/infiniband/hw/cxgb4/cq.c       |  6 ++-
 drivers/infiniband/hw/cxgb4/iw_cxgb4.h |  2 +
 drivers/infiniband/hw/cxgb4/provider.c |  1 +
 drivers/infiniband/hw/cxgb4/qp.c       |  8 ++++
 drivers/infiniband/ulp/srp/ib_srp.c    | 45 ++++-----------------
 include/rdma/ib_verbs.h                |  2 +
 7 files changed, 97 insertions(+), 39 deletions(-)

-- 
2.7.0

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining a queue pair
  2016-02-05 21:53           ` Steve Wise
@ 2016-02-05 22:00             ` Chuck Lever
       [not found]               ` <53AAECEE-AC46-4E28-9FC0-DB1E567A2A50-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
  0 siblings, 1 reply; 18+ messages in thread
From: Chuck Lever @ 2016-02-05 22:00 UTC (permalink / raw)
  To: Steve Wise; +Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA


> On Feb 5, 2016, at 4:53 PM, Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org> wrote:
> 
>>> From: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+p6NamaJ0bNTAL8bYrjMMd8@public.gmane.org>
>>> 
>>> Add provider-specific drain_qp function for providers needing special
>>> drain logic.
>>> 
>>> Add static function __ib_drain_qp() which posts noop WRs to the RQ and
>>> SQ and blocks until their completions are processed.  This ensures the
>>> applications completions have all been processed.
>>> 
>>> Add API function ib_drain_qp() which calls the provider-specific drain
>>> if it exists or __ib_drain_qp().
>>> 
>>> Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
>>> ---
>>> drivers/infiniband/core/verbs.c | 72 +++++++++++++++++++++++++++++++++++++++++
>>> include/rdma/ib_verbs.h         |  2 ++
>>> 2 files changed, 74 insertions(+)
>>> 
>>> diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
>>> index 5af6d02..31b82cd 100644
>>> --- a/drivers/infiniband/core/verbs.c
>>> +++ b/drivers/infiniband/core/verbs.c
>>> @@ -1657,3 +1657,75 @@ next_page:
>>> 	return i;
>>> }
>>> EXPORT_SYMBOL(ib_sg_to_pages);
>>> +
>>> +struct ib_drain_cqe {
>>> +	struct ib_cqe cqe;
>>> +	struct completion done;
>>> +};
>>> +
>>> +static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
>>> +{
>>> +	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
>>> +						cqe);
>>> +
>>> +	complete(&cqe->done);
>>> +}
>>> +
>>> +/*
>>> + * Post a WR and block until its completion is reaped for both the RQ and SQ.
>>> + */
>>> +static void __ib_drain_qp(struct ib_qp *qp)
>>> +{
>>> +	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
>>> +	struct ib_drain_cqe rdrain, sdrain;
>>> +	struct ib_recv_wr rwr = {}, *bad_rwr;
>>> +	struct ib_send_wr swr = {}, *bad_swr;
>>> +	int ret;
>>> +
>>> +	rwr.wr_cqe = &rdrain.cqe;
>>> +	rdrain.cqe.done = ib_drain_qp_done;
>>> +	init_completion(&rdrain.done);
>>> +
>>> +	swr.wr_cqe = &sdrain.cqe;
>>> +	sdrain.cqe.done = ib_drain_qp_done;
>> 
>> OK. ib_cqe is what hooks the completion events for these
>> blank WRs, so those completions are never exposed to the
>> RDMA consumer.
>> 
> 
> Right, which means only consumers that use the new style CQ processing can make use of this.
> 
>> But does a consumer have to bump its SQE and RQE count
>> when allocating its CQs, or is that done automatically
>> by ib_alloc_cq() ?
>> 
> 
> The consumer has to make sure there is room in the SQ, RQ and CQ.

The documenting comment in front of ib_drain_qp() should
mention this, and it should also state the requirement to
use ib_alloc_cq().


> Going forward, we could enhance QP and CQ allocation to allow the
> consumer to specify it wants drain capability so the consumer doesn't have to do this. It could be done under the covers.  In fact,
> if we did that, then ib_destroy_qp() could do the drain if need be.


--
Chuck Lever




--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining a queue pair
       [not found]               ` <53AAECEE-AC46-4E28-9FC0-DB1E567A2A50-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
@ 2016-02-05 22:20                 ` Chuck Lever
  2016-02-08 15:23                 ` Steve Wise
  1 sibling, 0 replies; 18+ messages in thread
From: Chuck Lever @ 2016-02-05 22:20 UTC (permalink / raw)
  To: Steve Wise; +Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA


> On Feb 5, 2016, at 5:00 PM, Chuck Lever <chuck.lever-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org> wrote:
> 
>> 
>> On Feb 5, 2016, at 4:53 PM, Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org> wrote:
>> 
>>>> From: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+p6NamaJ0bNTAL8bYrjMMd8@public.gmane.org>
>>>> 
>>>> Add provider-specific drain_qp function for providers needing special
>>>> drain logic.
>>>> 
>>>> Add static function __ib_drain_qp() which posts noop WRs to the RQ and
>>>> SQ and blocks until their completions are processed.  This ensures the
>>>> applications completions have all been processed.
>>>> 
>>>> Add API function ib_drain_qp() which calls the provider-specific drain
>>>> if it exists or __ib_drain_qp().
>>>> 
>>>> Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
>>>> ---
>>>> drivers/infiniband/core/verbs.c | 72 +++++++++++++++++++++++++++++++++++++++++
>>>> include/rdma/ib_verbs.h         |  2 ++
>>>> 2 files changed, 74 insertions(+)
>>>> 
>>>> diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
>>>> index 5af6d02..31b82cd 100644
>>>> --- a/drivers/infiniband/core/verbs.c
>>>> +++ b/drivers/infiniband/core/verbs.c
>>>> @@ -1657,3 +1657,75 @@ next_page:
>>>> 	return i;
>>>> }
>>>> EXPORT_SYMBOL(ib_sg_to_pages);
>>>> +
>>>> +struct ib_drain_cqe {
>>>> +	struct ib_cqe cqe;
>>>> +	struct completion done;
>>>> +};
>>>> +
>>>> +static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
>>>> +{
>>>> +	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
>>>> +						cqe);
>>>> +
>>>> +	complete(&cqe->done);
>>>> +}
>>>> +
>>>> +/*
>>>> + * Post a WR and block until its completion is reaped for both the RQ and SQ.
>>>> + */
>>>> +static void __ib_drain_qp(struct ib_qp *qp)
>>>> +{
>>>> +	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
>>>> +	struct ib_drain_cqe rdrain, sdrain;
>>>> +	struct ib_recv_wr rwr = {}, *bad_rwr;
>>>> +	struct ib_send_wr swr = {}, *bad_swr;
>>>> +	int ret;
>>>> +
>>>> +	rwr.wr_cqe = &rdrain.cqe;
>>>> +	rdrain.cqe.done = ib_drain_qp_done;
>>>> +	init_completion(&rdrain.done);
>>>> +
>>>> +	swr.wr_cqe = &sdrain.cqe;
>>>> +	sdrain.cqe.done = ib_drain_qp_done;
>>> 
>>> OK. ib_cqe is what hooks the completion events for these
>>> blank WRs, so those completions are never exposed to the
>>> RDMA consumer.
>>> 
>> 
>> Right, which means only consumers that use the new style CQ processing can make use of this.
>> 
>>> But does a consumer have to bump its SQE and RQE count
>>> when allocating its CQs, or is that done automatically
>>> by ib_alloc_cq() ?
>>> 
>> 
>> The consumer has to make sure there is room in the SQ, RQ and CQ.
> 
> The documenting comment in front of ib_drain_qp() should
> mention this, and it should also state the requirement to
> use ib_alloc_cq().

And otherwise, I'm happy to see this work!

Reviewed-by: Chuck Lever <chuck.lever-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>


>> Going forward, we could enhance QP and CQ allocation to allow the
>> consumer to specify it wants drain capability so the consumer doesn't have to do this. It could be done under the covers.  In fact,
>> if we did that, then ib_destroy_qp() could do the drain if need be.
> 
> 
> --
> Chuck Lever
> 
> 
> 
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
Chuck Lever




--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining a queue pair
       [not found]     ` <2da1db58d642789e8df154e34d622a37295d1ba3.1454709317.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  2016-02-05 21:49       ` Chuck Lever
@ 2016-02-06 16:10       ` Devesh Sharma
       [not found]         ` <CANjDDBjFY+u=8UXVCkKkK0LKSWiRcfZYnP9035m5owAYZnfn6w-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  2016-02-06 17:08       ` Leon Romanovsky
  2 siblings, 1 reply; 18+ messages in thread
From: Devesh Sharma @ 2016-02-06 16:10 UTC (permalink / raw)
  To: Steve Wise; +Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA

Hi Steve,


On Sat, Feb 6, 2016 at 2:43 AM, Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org> wrote:
> From: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+p6NamaJ0bNTAL8bYrjMMd8@public.gmane.org>
>
> Add provider-specific drain_qp function for providers needing special
> drain logic.
>
> Add static function __ib_drain_qp() which posts noop WRs to the RQ and
> SQ and blocks until their completions are processed.  This ensures the
> applications completions have all been processed.
>
> Add API function ib_drain_qp() which calls the provider-specific drain
> if it exists or __ib_drain_qp().
>
> Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
> ---
>  drivers/infiniband/core/verbs.c | 72 +++++++++++++++++++++++++++++++++++++++++
>  include/rdma/ib_verbs.h         |  2 ++
>  2 files changed, 74 insertions(+)
>
> diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
> index 5af6d02..31b82cd 100644
> --- a/drivers/infiniband/core/verbs.c
> +++ b/drivers/infiniband/core/verbs.c
> @@ -1657,3 +1657,75 @@ next_page:
>         return i;
>  }
>  EXPORT_SYMBOL(ib_sg_to_pages);
> +
> +struct ib_drain_cqe {
> +       struct ib_cqe cqe;
> +       struct completion done;
> +};
> +
> +static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
> +{
> +       struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
> +                                               cqe);
> +
> +       complete(&cqe->done);
> +}
> +
> +/*
> + * Post a WR and block until its completion is reaped for both the RQ and SQ.
> + */
> +static void __ib_drain_qp(struct ib_qp *qp)
> +{
> +       struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
> +       struct ib_drain_cqe rdrain, sdrain;
> +       struct ib_recv_wr rwr = {}, *bad_rwr;
> +       struct ib_send_wr swr = {}, *bad_swr;
> +       int ret;
> +
> +       rwr.wr_cqe = &rdrain.cqe;
> +       rdrain.cqe.done = ib_drain_qp_done;
> +       init_completion(&rdrain.done);
> +
> +       swr.wr_cqe = &sdrain.cqe;
> +       sdrain.cqe.done = ib_drain_qp_done;
> +       init_completion(&sdrain.done);
> +
> +       ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
> +       if (ret) {
> +               WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
> +               return;
> +       }

I was thinking if we really need this modify_qp here. generally
drain_qp is called on
an error'ed out QP. In a graceful tear down rdma_disconnect takes care
to modify-qp
to error. while in error state qp is already in error state.

> +
> +       ret = ib_post_recv(qp, &rwr, &bad_rwr);
> +       if (ret) {
> +               WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
> +               return;
> +       }
> +
> +       ret = ib_post_send(qp, &swr, &bad_swr);
> +       if (ret) {
> +               WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
> +               return;
> +       }
> +
> +       wait_for_completion(&rdrain.done);
> +       wait_for_completion(&sdrain.done);
> +}
> +
> +/**
> + * ib_drain_qp() - Block until all CQEs have been consumed by the
> + *                application.
> + * @qp:            queue pair to drain
> + *
> + * If the device has a provider-specific drain function, then
> + * call that.  Otherwise call the generic drain function
> + * __ib_drain_qp().
> + */
> +void ib_drain_qp(struct ib_qp *qp)
> +{
> +       if (qp->device->drain_qp)
> +               qp->device->drain_qp(qp);
> +       else
> +               __ib_drain_qp(qp);
> +}
> +EXPORT_SYMBOL(ib_drain_qp);
> diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
> index 284b00c..d8533ab 100644
> --- a/include/rdma/ib_verbs.h
> +++ b/include/rdma/ib_verbs.h
> @@ -1846,6 +1846,7 @@ struct ib_device {
>         int                        (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
>                                                       struct ib_mr_status *mr_status);
>         void                       (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
> +       void                       (*drain_qp)(struct ib_qp *qp);
>
>         struct ib_dma_mapping_ops   *dma_ops;
>
> @@ -3094,4 +3095,5 @@ int ib_sg_to_pages(struct ib_mr *mr,
>                    int sg_nents,
>                    int (*set_page)(struct ib_mr *, u64));
>
> +void ib_drain_qp(struct ib_qp *qp);
>  #endif /* IB_VERBS_H */
> --
> 2.7.0
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining a queue pair
       [not found]     ` <2da1db58d642789e8df154e34d622a37295d1ba3.1454709317.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  2016-02-05 21:49       ` Chuck Lever
  2016-02-06 16:10       ` Devesh Sharma
@ 2016-02-06 17:08       ` Leon Romanovsky
       [not found]         ` <20160206170838.GC8584-2ukJVAZIZ/Y@public.gmane.org>
  2 siblings, 1 reply; 18+ messages in thread
From: Leon Romanovsky @ 2016-02-06 17:08 UTC (permalink / raw)
  To: Steve Wise; +Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA

On Fri, Feb 05, 2016 at 01:13:16PM -0800, Steve Wise wrote:
> From: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+p6NamaJ0bNTAL8bYrjMMd8@public.gmane.org>
> 
> Add provider-specific drain_qp function for providers needing special
> drain logic.
> 
> Add static function __ib_drain_qp() which posts noop WRs to the RQ and
> SQ and blocks until their completions are processed.  This ensures the
> applications completions have all been processed.
> 
> Add API function ib_drain_qp() which calls the provider-specific drain
> if it exists or __ib_drain_qp().
> 
> Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
> ---
>  drivers/infiniband/core/verbs.c | 72 +++++++++++++++++++++++++++++++++++++++++
>  include/rdma/ib_verbs.h         |  2 ++
>  2 files changed, 74 insertions(+)
> 

 ...

> +	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
> +	if (ret) {
> +		WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
> +		return;
> +	}
> +
> +	ret = ib_post_recv(qp, &rwr, &bad_rwr);
> +	if (ret) {
> +		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
> +		return;
> +	}
> +
> +	ret = ib_post_send(qp, &swr, &bad_swr);
> +	if (ret) {
> +		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
> +		return;

You are returning here despite the fact that recv queue drained
successfully and you can wait for completion of rdrain safely.
Is it done on purpose?

> +	}
> +
> +	wait_for_completion(&rdrain.done);
> +	wait_for_completion(&sdrain.done);
> +}
> +
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining a queue pair
       [not found]         ` <20160206170838.GC8584-2ukJVAZIZ/Y@public.gmane.org>
@ 2016-02-07 11:51           ` Sagi Grimberg
       [not found]             ` <56B72FD4.3060407-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
  0 siblings, 1 reply; 18+ messages in thread
From: Sagi Grimberg @ 2016-02-07 11:51 UTC (permalink / raw)
  To: Steve Wise, linux-rdma-u79uwXL29TY76Z2rM5mHXA


>> +	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
>> +	if (ret) {
>> +		WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
>> +		return;
>> +	}
>> +
>> +	ret = ib_post_recv(qp, &rwr, &bad_rwr);
>> +	if (ret) {
>> +		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
>> +		return;
>> +	}
>> +
>> +	ret = ib_post_send(qp, &swr, &bad_swr);
>> +	if (ret) {
>> +		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
>> +		return;
>
> You are returning here despite the fact that recv queue drained
> successfully and you can wait for completion of rdrain safely.
> Is it done on purpose?

Good catch, the completion structures are on-stack.

Steve, you need to wait for the completion of the
successful post in this case...
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining a queue pair
       [not found]         ` <CANjDDBjFY+u=8UXVCkKkK0LKSWiRcfZYnP9035m5owAYZnfn6w-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2016-02-07 11:53           ` Sagi Grimberg
       [not found]             ` <56B73049.5040901-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
  0 siblings, 1 reply; 18+ messages in thread
From: Sagi Grimberg @ 2016-02-07 11:53 UTC (permalink / raw)
  To: Devesh Sharma, Steve Wise; +Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA


>> +/*
>> + * Post a WR and block until its completion is reaped for both the RQ and SQ.
>> + */
>> +static void __ib_drain_qp(struct ib_qp *qp)
>> +{
>> +       struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
>> +       struct ib_drain_cqe rdrain, sdrain;
>> +       struct ib_recv_wr rwr = {}, *bad_rwr;
>> +       struct ib_send_wr swr = {}, *bad_swr;
>> +       int ret;
>> +
>> +       rwr.wr_cqe = &rdrain.cqe;
>> +       rdrain.cqe.done = ib_drain_qp_done;
>> +       init_completion(&rdrain.done);
>> +
>> +       swr.wr_cqe = &sdrain.cqe;
>> +       sdrain.cqe.done = ib_drain_qp_done;
>> +       init_completion(&sdrain.done);
>> +
>> +       ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
>> +       if (ret) {
>> +               WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
>> +               return;
>> +       }
>
> I was thinking if we really need this modify_qp here. generally
> drain_qp is called on
> an error'ed out QP. In a graceful tear down rdma_disconnect takes care
> to modify-qp
> to error. while in error state qp is already in error state.

We could get it conditional, but I don't see any harm done
in having it as it would mean a passed in flag.

It would be better to have the modify_qp implementers do
nothing for a ERROR -> ERROR modify...
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining a queue pair
       [not found]             ` <56B73049.5040901-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
@ 2016-02-08 10:37               ` Devesh Sharma
       [not found]                 ` <CANjDDBiGVxmMn7H-__z48hS93mHoqejiZgsry0DmLwJKxZ=MqQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  2016-02-08 15:34               ` Steve Wise
  1 sibling, 1 reply; 18+ messages in thread
From: Devesh Sharma @ 2016-02-08 10:37 UTC (permalink / raw)
  To: Sagi Grimberg; +Cc: Steve Wise, linux-rdma-u79uwXL29TY76Z2rM5mHXA

On Sun, Feb 7, 2016 at 5:23 PM, Sagi Grimberg <sagig-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org> wrote:
>
>>> +/*
>>> + * Post a WR and block until its completion is reaped for both the RQ
>>> and SQ.
>>> + */
>>> +static void __ib_drain_qp(struct ib_qp *qp)
>>> +{
>>> +       struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
>>> +       struct ib_drain_cqe rdrain, sdrain;
>>> +       struct ib_recv_wr rwr = {}, *bad_rwr;
>>> +       struct ib_send_wr swr = {}, *bad_swr;
>>> +       int ret;
>>> +
>>> +       rwr.wr_cqe = &rdrain.cqe;
>>> +       rdrain.cqe.done = ib_drain_qp_done;
>>> +       init_completion(&rdrain.done);
>>> +
>>> +       swr.wr_cqe = &sdrain.cqe;
>>> +       sdrain.cqe.done = ib_drain_qp_done;
>>> +       init_completion(&sdrain.done);
>>> +
>>> +       ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
>>> +       if (ret) {
>>> +               WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
>>> +               return;
>>> +       }
>>
>>
>> I was thinking if we really need this modify_qp here. generally
>> drain_qp is called on
>> an error'ed out QP. In a graceful tear down rdma_disconnect takes care
>> to modify-qp
>> to error. while in error state qp is already in error state.
>
>
> We could get it conditional, but I don't see any harm done
> in having it as it would mean a passed in flag.
>

Since Spec allows this, I would say okay let it be there.

> It would be better to have the modify_qp implementers do
> nothing for a ERROR -> ERROR modify...

Driver implementer has a choice here which is okay.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* RE: [PATCH 1/3] IB: new common API for draining a queue pair
       [not found]               ` <53AAECEE-AC46-4E28-9FC0-DB1E567A2A50-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
  2016-02-05 22:20                 ` Chuck Lever
@ 2016-02-08 15:23                 ` Steve Wise
  1 sibling, 0 replies; 18+ messages in thread
From: Steve Wise @ 2016-02-08 15:23 UTC (permalink / raw)
  To: 'Chuck Lever'; +Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA

> >> OK. ib_cqe is what hooks the completion events for these
> >> blank WRs, so those completions are never exposed to the
> >> RDMA consumer.
> >>
> >
> > Right, which means only consumers that use the new style CQ processing can make use of this.
> >
> >> But does a consumer have to bump its SQE and RQE count
> >> when allocating its CQs, or is that done automatically
> >> by ib_alloc_cq() ?
> >>
> >
> > The consumer has to make sure there is room in the SQ, RQ and CQ.
> 
> The documenting comment in front of ib_drain_qp() should
> mention this, and it should also state the requirement to
> use ib_alloc_cq().
>

Will do.
 


--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* RE: [PATCH 1/3] IB: new common API for draining a queue pair
       [not found]             ` <56B72FD4.3060407-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
@ 2016-02-08 15:24               ` Steve Wise
  0 siblings, 0 replies; 18+ messages in thread
From: Steve Wise @ 2016-02-08 15:24 UTC (permalink / raw)
  To: 'Sagi Grimberg', linux-rdma-u79uwXL29TY76Z2rM5mHXA



> -----Original Message-----
> From: Sagi Grimberg [mailto:sagig-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org]
> Sent: Sunday, February 07, 2016 5:52 AM
> To: Steve Wise; linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> Subject: Re: [PATCH 1/3] IB: new common API for draining a queue pair
> 
> 
> >> +	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
> >> +	if (ret) {
> >> +		WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
> >> +		return;
> >> +	}
> >> +
> >> +	ret = ib_post_recv(qp, &rwr, &bad_rwr);
> >> +	if (ret) {
> >> +		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
> >> +		return;
> >> +	}
> >> +
> >> +	ret = ib_post_send(qp, &swr, &bad_swr);
> >> +	if (ret) {
> >> +		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
> >> +		return;
> >
> > You are returning here despite the fact that recv queue drained
> > successfully and you can wait for completion of rdrain safely.
> > Is it done on purpose?
> 
> Good catch, the completion structures are on-stack.
> 
> Steve, you need to wait for the completion of the
> successful post in this case...

Yup.  Will do.

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* RE: [PATCH 1/3] IB: new common API for draining a queue pair
       [not found]             ` <56B73049.5040901-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
  2016-02-08 10:37               ` Devesh Sharma
@ 2016-02-08 15:34               ` Steve Wise
  1 sibling, 0 replies; 18+ messages in thread
From: Steve Wise @ 2016-02-08 15:34 UTC (permalink / raw)
  To: 'Sagi Grimberg', 'Devesh Sharma'
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA

> >> +/*
> >> + * Post a WR and block until its completion is reaped for both the RQ and SQ.
> >> + */
> >> +static void __ib_drain_qp(struct ib_qp *qp)
> >> +{
> >> +       struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
> >> +       struct ib_drain_cqe rdrain, sdrain;
> >> +       struct ib_recv_wr rwr = {}, *bad_rwr;
> >> +       struct ib_send_wr swr = {}, *bad_swr;
> >> +       int ret;
> >> +
> >> +       rwr.wr_cqe = &rdrain.cqe;
> >> +       rdrain.cqe.done = ib_drain_qp_done;
> >> +       init_completion(&rdrain.done);
> >> +
> >> +       swr.wr_cqe = &sdrain.cqe;
> >> +       sdrain.cqe.done = ib_drain_qp_done;
> >> +       init_completion(&sdrain.done);
> >> +
> >> +       ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
> >> +       if (ret) {
> >> +               WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
> >> +               return;
> >> +       }
> >
> > I was thinking if we really need this modify_qp here. generally
> > drain_qp is called on
> > an error'ed out QP. In a graceful tear down rdma_disconnect takes care
> > to modify-qp
> > to error. while in error state qp is already in error state.
> 
> We could get it conditional, but I don't see any harm done
> in having it as it would mean a passed in flag.
> 
> It would be better to have the modify_qp implementers do
> nothing for a ERROR -> ERROR modify...

The IBTA spec sez ERROR->ERROR is allowed, so nobody should be failing that transition. iWARP, however has to be different. :)  It sez any attempt to transition out of ERROR to anything other than IDLE results in an immediate error.  But since iWARP provider will need their own handler, I guess I think we should just leave the modify->ERROR in as-is.  Or perhaps ignore the return code.

Steve.



--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* RE: [PATCH 1/3] IB: new common API for draining a queue pair
       [not found]                 ` <CANjDDBiGVxmMn7H-__z48hS93mHoqejiZgsry0DmLwJKxZ=MqQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2016-02-08 15:53                   ` Steve Wise
  0 siblings, 0 replies; 18+ messages in thread
From: Steve Wise @ 2016-02-08 15:53 UTC (permalink / raw)
  To: 'Devesh Sharma', 'Sagi Grimberg'
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA

> On Sun, Feb 7, 2016 at 5:23 PM, Sagi Grimberg <sagig-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org> wrote:
> >
> >>> +/*
> >>> + * Post a WR and block until its completion is reaped for both the RQ
> >>> and SQ.
> >>> + */
> >>> +static void __ib_drain_qp(struct ib_qp *qp)
> >>> +{
> >>> +       struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
> >>> +       struct ib_drain_cqe rdrain, sdrain;
> >>> +       struct ib_recv_wr rwr = {}, *bad_rwr;
> >>> +       struct ib_send_wr swr = {}, *bad_swr;
> >>> +       int ret;
> >>> +
> >>> +       rwr.wr_cqe = &rdrain.cqe;
> >>> +       rdrain.cqe.done = ib_drain_qp_done;
> >>> +       init_completion(&rdrain.done);
> >>> +
> >>> +       swr.wr_cqe = &sdrain.cqe;
> >>> +       sdrain.cqe.done = ib_drain_qp_done;
> >>> +       init_completion(&sdrain.done);
> >>> +
> >>> +       ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
> >>> +       if (ret) {
> >>> +               WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
> >>> +               return;
> >>> +       }
> >>
> >>
> >> I was thinking if we really need this modify_qp here. generally
> >> drain_qp is called on
> >> an error'ed out QP. In a graceful tear down rdma_disconnect takes care
> >> to modify-qp
> >> to error. while in error state qp is already in error state.
> >
> >
> > We could get it conditional, but I don't see any harm done
> > in having it as it would mean a passed in flag.
> >
> 
> Since Spec allows this, I would say okay let it be there.
> 
> > It would be better to have the modify_qp implementers do
> > nothing for a ERROR -> ERROR modify...
> 
> Driver implementer has a choice here which is okay.

Ok, I'll leave it as-is then.  Thanks for the reviews!



--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCH 1/3] IB: new common API for draining a queue pair
       [not found] ` <cover.1454969695.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
@ 2016-02-05 21:13   ` Steve Wise
  0 siblings, 0 replies; 18+ messages in thread
From: Steve Wise @ 2016-02-05 21:13 UTC (permalink / raw)
  To: linux-rdma-u79uwXL29TY76Z2rM5mHXA; +Cc: bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ

From: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+p6NamaJ0bNTAL8bYrjMMd8@public.gmane.org>

Add provider-specific drain_qp function for providers needing special
drain logic.

Add static function __ib_drain_qp() which posts noop WRs to the RQ and
SQ and blocks until their completions are processed.  This ensures the
applications completions have all been processed.

Add API function ib_drain_qp() which calls the provider-specific drain
if it exists or __ib_drain_qp().

Reviewed-by: Chuck Lever <chuck.lever-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
---
 drivers/infiniband/core/verbs.c | 77 +++++++++++++++++++++++++++++++++++++++++
 include/rdma/ib_verbs.h         |  2 ++
 2 files changed, 79 insertions(+)

diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 5af6d02..98ddcb4 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1657,3 +1657,80 @@ next_page:
 	return i;
 }
 EXPORT_SYMBOL(ib_sg_to_pages);
+
+struct ib_drain_cqe {
+	struct ib_cqe cqe;
+	struct completion done;
+};
+
+static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
+						cqe);
+
+	complete(&cqe->done);
+}
+
+/*
+ * Post a WR and block until its completion is reaped for both the RQ and SQ.
+ */
+static void __ib_drain_qp(struct ib_qp *qp)
+{
+	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+	struct ib_drain_cqe rdrain, sdrain;
+	struct ib_recv_wr rwr = {}, *bad_rwr;
+	struct ib_send_wr swr = {}, *bad_swr;
+	int ret;
+
+	rwr.wr_cqe = &rdrain.cqe;
+	rdrain.cqe.done = ib_drain_qp_done;
+	init_completion(&rdrain.done);
+
+	swr.wr_cqe = &sdrain.cqe;
+	sdrain.cqe.done = ib_drain_qp_done;
+	init_completion(&sdrain.done);
+
+	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+	if (ret) {
+		WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
+		return;
+	}
+
+	ret = ib_post_recv(qp, &rwr, &bad_rwr);
+	if (ret) {
+		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+		return;
+	}
+
+	ret = ib_post_send(qp, &swr, &bad_swr);
+	if (ret) {
+		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+		goto err_drain_rq;
+	}
+
+	wait_for_completion(&sdrain.done);
+err_drain_rq:
+	wait_for_completion(&rdrain.done);
+}
+
+/**
+ * ib_drain_qp() - Block until all CQEs have been consumed by the
+ *		   application.
+ * @qp:            queue pair to drain
+ *
+ * If the device has a provider-specific drain function, then
+ * call that.  Otherwise call the generic drain function
+ * __ib_drain_qp().
+ *
+ * The consumer must ensure there is room* in the CQ, SQ, and RQ
+ * for a drain work request.  Also the consumer must allocate the
+ * CQ using ib_alloc_cq() and thus not directly polling the CQ.
+ */
+void ib_drain_qp(struct ib_qp *qp)
+{
+	if (qp->device->drain_qp)
+		qp->device->drain_qp(qp);
+	else
+		__ib_drain_qp(qp);
+}
+EXPORT_SYMBOL(ib_drain_qp);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 284b00c..d8533ab 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1846,6 +1846,7 @@ struct ib_device {
 	int			   (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
 						      struct ib_mr_status *mr_status);
 	void			   (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
+	void			   (*drain_qp)(struct ib_qp *qp);
 
 	struct ib_dma_mapping_ops   *dma_ops;
 
@@ -3094,4 +3095,5 @@ int ib_sg_to_pages(struct ib_mr *mr,
 		   int sg_nents,
 		   int (*set_page)(struct ib_mr *, u64));
 
+void ib_drain_qp(struct ib_qp *qp);
 #endif /* IB_VERBS_H */
-- 
2.7.0

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2016-02-08 15:53 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-02-05 21:55 [PATCH 0/3] new ib_drain_qp() API Steve Wise
     [not found] ` <cover.1454709317.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
2016-01-14 18:24   ` [PATCH 2/3] iw_cxgb4: add drain_qp function Steve Wise
2016-01-27 20:09   ` [PATCH 3/3] IB/srp: use ib_drain_qp() Steve Wise
2016-02-05 21:13   ` [PATCH 1/3] IB: new common API for draining a queue pair Steve Wise
     [not found]     ` <2da1db58d642789e8df154e34d622a37295d1ba3.1454709317.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
2016-02-05 21:49       ` Chuck Lever
     [not found]         ` <3AB519EB-3BFE-4A02-A825-F74EECD2CE19-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2016-02-05 21:53           ` Steve Wise
2016-02-05 22:00             ` Chuck Lever
     [not found]               ` <53AAECEE-AC46-4E28-9FC0-DB1E567A2A50-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2016-02-05 22:20                 ` Chuck Lever
2016-02-08 15:23                 ` Steve Wise
2016-02-06 16:10       ` Devesh Sharma
     [not found]         ` <CANjDDBjFY+u=8UXVCkKkK0LKSWiRcfZYnP9035m5owAYZnfn6w-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2016-02-07 11:53           ` Sagi Grimberg
     [not found]             ` <56B73049.5040901-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
2016-02-08 10:37               ` Devesh Sharma
     [not found]                 ` <CANjDDBiGVxmMn7H-__z48hS93mHoqejiZgsry0DmLwJKxZ=MqQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2016-02-08 15:53                   ` Steve Wise
2016-02-08 15:34               ` Steve Wise
2016-02-06 17:08       ` Leon Romanovsky
     [not found]         ` <20160206170838.GC8584-2ukJVAZIZ/Y@public.gmane.org>
2016-02-07 11:51           ` Sagi Grimberg
     [not found]             ` <56B72FD4.3060407-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
2016-02-08 15:24               ` Steve Wise
2016-02-08 22:14 [PATCH v2 0/3] new ib_drain_qp() API Steve Wise
     [not found] ` <cover.1454969695.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
2016-02-05 21:13   ` [PATCH 1/3] IB: new common API for draining a queue pair Steve Wise

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.