All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 2/3] iw_cxgb4: add queue drain functions
       [not found] ` <cover.1455230646.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
@ 2016-01-14 18:24   ` Steve Wise
  2016-01-27 20:09   ` [PATCH 3/3] IB/srp: use ib_drain_rq() Steve Wise
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 22+ messages in thread
From: Steve Wise @ 2016-01-14 18:24 UTC (permalink / raw)
  To: linux-rdma-u79uwXL29TY76Z2rM5mHXA; +Cc: bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ

Add completion objects, named sq_drained and rq_drained, to the c4iw_qp
struct.  The queue-specific completion object is signaled when the last
CQE is drained from the CQ for that queue.

Add c4iw_drain_sq() to block until qp->rq_drained is completed.

Add c4iw_drain_rq() to block until qp->sq_drained is completed.

Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
---
 drivers/infiniband/hw/cxgb4/cq.c       |  9 ++++++++-
 drivers/infiniband/hw/cxgb4/iw_cxgb4.h |  4 ++++
 drivers/infiniband/hw/cxgb4/provider.c |  2 ++
 drivers/infiniband/hw/cxgb4/qp.c       | 16 ++++++++++++++++
 4 files changed, 30 insertions(+), 1 deletion(-)

diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index cf21df4..b4eeb78 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -815,8 +815,15 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
 		}
 	}
 out:
-	if (wq)
+	if (wq) {
+		if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
+			if (t4_sq_empty(wq))
+				complete(&qhp->sq_drained);
+			if (t4_rq_empty(wq))
+				complete(&qhp->rq_drained);
+		}
 		spin_unlock(&qhp->lock);
+	}
 	return ret;
 }
 
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index fb2de75..7c6a6e1 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -476,6 +476,8 @@ struct c4iw_qp {
 	wait_queue_head_t wait;
 	struct timer_list timer;
 	int sq_sig_all;
+	struct completion rq_drained;
+	struct completion sq_drained;
 };
 
 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -1016,6 +1018,8 @@ extern int c4iw_wr_log;
 extern int db_fc_threshold;
 extern int db_coalescing_threshold;
 extern int use_dsgl;
+void c4iw_drain_rq(struct ib_qp *qp);
+void c4iw_drain_sq(struct ib_qp *qp);
 
 
 #endif
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index ec04272..104662d 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -564,6 +564,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
 	dev->ibdev.get_protocol_stats = c4iw_get_mib;
 	dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
 	dev->ibdev.get_port_immutable = c4iw_port_immutable;
+	dev->ibdev.drain_sq = c4iw_drain_sq;
+	dev->ibdev.drain_rq = c4iw_drain_rq;
 
 	dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
 	if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e99345e..7b1b1e8 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1697,6 +1697,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
 	qhp->attr.max_ird = 0;
 	qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
 	spin_lock_init(&qhp->lock);
+	init_completion(&qhp->sq_drained);
+	init_completion(&qhp->rq_drained);
 	mutex_init(&qhp->mutex);
 	init_waitqueue_head(&qhp->wait);
 	atomic_set(&qhp->refcnt, 1);
@@ -1888,3 +1890,17 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 	init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
 	return 0;
 }
+
+void c4iw_drain_sq(struct ib_qp *ibqp)
+{
+	struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+
+	wait_for_completion(&qp->sq_drained);
+}
+
+void c4iw_drain_rq(struct ib_qp *ibqp)
+{
+	struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+
+	wait_for_completion(&qp->rq_drained);
+}
-- 
2.7.0

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 3/3] IB/srp: use ib_drain_rq()
       [not found] ` <cover.1455230646.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  2016-01-14 18:24   ` [PATCH 2/3] iw_cxgb4: add queue drain functions Steve Wise
@ 2016-01-27 20:09   ` Steve Wise
       [not found]     ` <c11baa726a6440549ab46b9525116d9fe74eb5a0.1455230646.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  2016-02-05 21:13   ` [PATCH 1/3] IB: new common API for draining queues Steve Wise
  2016-02-13 16:10   ` [PATCH v3 0/3] new ib_drain_qp() API Leon Romanovsky
  3 siblings, 1 reply; 22+ messages in thread
From: Steve Wise @ 2016-01-27 20:09 UTC (permalink / raw)
  To: linux-rdma-u79uwXL29TY76Z2rM5mHXA; +Cc: bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ

Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
---
 drivers/infiniband/ulp/srp/ib_srp.c | 40 ++++---------------------------------
 1 file changed, 4 insertions(+), 36 deletions(-)

diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 03022f6..b6bf204 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -446,49 +446,17 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
 				  dev->max_pages_per_mr);
 }
 
-static void srp_drain_done(struct ib_cq *cq, struct ib_wc *wc)
-{
-	struct srp_rdma_ch *ch = cq->cq_context;
-
-	complete(&ch->done);
-}
-
-static struct ib_cqe srp_drain_cqe = {
-	.done		= srp_drain_done,
-};
-
 /**
  * srp_destroy_qp() - destroy an RDMA queue pair
  * @ch: SRP RDMA channel.
  *
- * Change a queue pair into the error state and wait until all receive
- * completions have been processed before destroying it. This avoids that
- * the receive completion handler can access the queue pair while it is
+ * Drain the qp before destroying it.  This avoids that the receive
+ * completion handler can access the queue pair while it is
  * being destroyed.
  */
 static void srp_destroy_qp(struct srp_rdma_ch *ch)
 {
-	static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
-	static struct ib_recv_wr wr = { 0 };
-	struct ib_recv_wr *bad_wr;
-	int ret;
-
-	wr.wr_cqe = &srp_drain_cqe;
-	/* Destroying a QP and reusing ch->done is only safe if not connected */
-	WARN_ON_ONCE(ch->connected);
-
-	ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
-	WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
-	if (ret)
-		goto out;
-
-	init_completion(&ch->done);
-	ret = ib_post_recv(ch->qp, &wr, &bad_wr);
-	WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
-	if (ret == 0)
-		wait_for_completion(&ch->done);
-
-out:
+	ib_drain_rq(ch->qp);
 	ib_destroy_qp(ch->qp);
 }
 
@@ -508,7 +476,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
 	if (!init_attr)
 		return -ENOMEM;
 
-	/* queue_size + 1 for ib_drain_qp */
+	/* queue_size + 1 for ib_drain_rq() */
 	recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
 				ch->comp_vector, IB_POLL_SOFTIRQ);
 	if (IS_ERR(recv_cq)) {
-- 
2.7.0

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 1/3] IB: new common API for draining queues
       [not found] ` <cover.1455230646.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  2016-01-14 18:24   ` [PATCH 2/3] iw_cxgb4: add queue drain functions Steve Wise
  2016-01-27 20:09   ` [PATCH 3/3] IB/srp: use ib_drain_rq() Steve Wise
@ 2016-02-05 21:13   ` Steve Wise
       [not found]     ` <3e7261d1436d33320223d365974ff38945f0d558.1455230646.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  2016-02-13 16:10   ` [PATCH v3 0/3] new ib_drain_qp() API Leon Romanovsky
  3 siblings, 1 reply; 22+ messages in thread
From: Steve Wise @ 2016-02-05 21:13 UTC (permalink / raw)
  To: linux-rdma-u79uwXL29TY76Z2rM5mHXA; +Cc: bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ

From: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+p6NamaJ0bNTAL8bYrjMMd8@public.gmane.org>

Add provider-specific drain_sq/drain_rq functions for providers needing
special drain logic.

Add static functions __ib_drain_sq() and __ib_drain_rq() which post noop
WRs to the SQ or RQ and block until their completions are processed.
This ensures the applications completions have all been processed.

Add API functions ib_drain_sq(), ib_drain_rq(), and ib_drain_qp().

Reviewed-by: Chuck Lever <chuck.lever-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
---
 drivers/infiniband/core/verbs.c | 144 ++++++++++++++++++++++++++++++++++++++++
 include/rdma/ib_verbs.h         |   5 ++
 2 files changed, 149 insertions(+)

diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 5af6d02..aed521e 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1657,3 +1657,147 @@ next_page:
 	return i;
 }
 EXPORT_SYMBOL(ib_sg_to_pages);
+
+struct ib_drain_cqe {
+	struct ib_cqe cqe;
+	struct completion done;
+};
+
+static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
+						cqe);
+
+	complete(&cqe->done);
+}
+
+static void wait_for_drain(struct ib_cq *cq, struct completion *c)
+{
+	if (cq->poll_ctx == IB_POLL_DIRECT)
+		do
+			ib_process_cq_direct(cq, 1024);
+		while (!wait_for_completion_timeout(c, msecs_to_jiffies(100)));
+	else
+		wait_for_completion(c);
+}
+
+/*
+ * Post a WR and block until its completion is reaped for the SQ.
+ */
+static void __ib_drain_sq(struct ib_qp *qp)
+{
+	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+	struct ib_drain_cqe sdrain;
+	struct ib_send_wr swr = {}, *bad_swr;
+	int ret;
+
+	swr.wr_cqe = &sdrain.cqe;
+	sdrain.cqe.done = ib_drain_qp_done;
+	init_completion(&sdrain.done);
+
+	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+	if (ret) {
+		WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
+		return;
+	}
+
+	ret = ib_post_send(qp, &swr, &bad_swr);
+	if (ret) {
+		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+		return;
+	}
+
+	wait_for_drain(qp->send_cq, &sdrain.done);
+}
+
+/*
+ * Post a WR and block until its completion is reaped for the RQ.
+ */
+static void __ib_drain_rq(struct ib_qp *qp)
+{
+	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+	struct ib_drain_cqe rdrain;
+	struct ib_recv_wr rwr = {}, *bad_rwr;
+	int ret;
+
+	rwr.wr_cqe = &rdrain.cqe;
+	rdrain.cqe.done = ib_drain_qp_done;
+	init_completion(&rdrain.done);
+
+	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+	if (ret) {
+		WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
+		return;
+	}
+
+	ret = ib_post_recv(qp, &rwr, &bad_rwr);
+	if (ret) {
+		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+		return;
+	}
+
+	wait_for_drain(qp->recv_cq, &rdrain.done);
+}
+
+/**
+ * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
+ *		   application.
+ * @qp:            queue pair to drain
+ *
+ * If the device has a provider-specific drain function, then
+ * call that.  Otherwise call the generic drain function
+ * __ib_drain_sq().
+ *
+ * The consumer must ensure there is room in the CQ and SQ
+ * for a drain work requests.  Also the consumer must allocate the
+ * CQ using ib_alloc_cq().  It is up to the consumer to handle concurrency
+ * issues if the CQs are using the IB_POLL_DIRECT polling context.
+ */
+void ib_drain_sq(struct ib_qp *qp)
+{
+	if (qp->device->drain_sq)
+		qp->device->drain_sq(qp);
+	else
+		__ib_drain_sq(qp);
+}
+EXPORT_SYMBOL(ib_drain_sq);
+
+/**
+ * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
+ *		   application.
+ * @qp:            queue pair to drain
+ *
+ * If the device has a provider-specific drain function, then
+ * call that.  Otherwise call the generic drain function
+ * __ib_drain_rq().
+ *
+ * The consumer must ensure there is room in the CQ and RQ
+ * for a drain work requests.  Also the consumer must allocate the
+ * CQ using ib_alloc_cq().  It is up to the consumer to handle concurrency
+ * issues if the CQs are using the IB_POLL_DIRECT polling context.
+ */
+void ib_drain_rq(struct ib_qp *qp)
+{
+	if (qp->device->drain_rq)
+		qp->device->drain_rq(qp);
+	else
+		__ib_drain_rq(qp);
+}
+EXPORT_SYMBOL(ib_drain_rq);
+
+/**
+ * ib_drain_qp() - Block until all CQEs have been consumed by the
+ *		   application on both the RQ and SQ.
+ * @qp:            queue pair to drain
+ *
+ * The consumer must ensure there is room in the CQ(s), SQ, and RQ
+ * for a drain work requests.  Also the consumer must allocate the
+ * CQ using ib_alloc_cq().  It is up to the consumer to handle concurrency
+ * issues if the CQs are using the IB_POLL_DIRECT polling context.
+ */
+void ib_drain_qp(struct ib_qp *qp)
+{
+	ib_drain_sq(qp);
+	ib_drain_rq(qp);
+}
+EXPORT_SYMBOL(ib_drain_qp);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 284b00c..68b7e97 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1846,6 +1846,8 @@ struct ib_device {
 	int			   (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
 						      struct ib_mr_status *mr_status);
 	void			   (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
+	void			   (*drain_rq)(struct ib_qp *qp);
+	void			   (*drain_sq)(struct ib_qp *qp);
 
 	struct ib_dma_mapping_ops   *dma_ops;
 
@@ -3094,4 +3096,7 @@ int ib_sg_to_pages(struct ib_mr *mr,
 		   int sg_nents,
 		   int (*set_page)(struct ib_mr *, u64));
 
+void ib_drain_rq(struct ib_qp *qp);
+void ib_drain_sq(struct ib_qp *qp);
+void ib_drain_qp(struct ib_qp *qp);
 #endif /* IB_VERBS_H */
-- 
2.7.0

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH v3 0/3] new ib_drain_qp() API
@ 2016-02-11 22:44 Steve Wise
       [not found] ` <cover.1455230646.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  0 siblings, 1 reply; 22+ messages in thread
From: Steve Wise @ 2016-02-11 22:44 UTC (permalink / raw)
  To: linux-rdma-u79uwXL29TY76Z2rM5mHXA; +Cc: bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ

This series creates new helper API functions for draining a queue pair.
It is a rework of an original patch created by Christoph Hellwig as
part of the CQ API rework and was dropped to be resubmitted by me with
iw_cxgb4 support.

Original thread: http://www.spinics.net/lists/linux-rdma/msg30296.html

Changes since v2:

- created 3 drain API functions: ib_drain_rq(), ib_drain_sq(), and
ib_drain_qp()

- add provider-specific drain function pointers for the sq and rq

- refactored the code a bit

- support for IB_DIRECT_POLL CQs


Changes since v1:

- added comments to the ib_drain_qp() function header specifying the
consumer requirements

- in __ib_drain_qp(), if the ib_post_send() fails, still wait for the
recv wr to drain since we already posted it.

- CC the SRP maintainer, bart.vanassche-XdAiOPVOjtvowKkBSvOlow@public.gmane.org

---

Steve Wise (3):
  IB: new common API for draining queues
  iw_cxgb4: add queue drain functions
  IB/srp: use ib_drain_rq()

 drivers/infiniband/core/verbs.c        | 144 +++++++++++++++++++++++++++++++++
 drivers/infiniband/hw/cxgb4/cq.c       |   9 ++-
 drivers/infiniband/hw/cxgb4/iw_cxgb4.h |   4 +
 drivers/infiniband/hw/cxgb4/provider.c |   2 +
 drivers/infiniband/hw/cxgb4/qp.c       |  16 ++++
 drivers/infiniband/ulp/srp/ib_srp.c    |  40 +--------
 include/rdma/ib_verbs.h                |   5 ++
 7 files changed, 183 insertions(+), 37 deletions(-)

-- 
2.7.0

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining queues
       [not found]     ` <3e7261d1436d33320223d365974ff38945f0d558.1455230646.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
@ 2016-02-11 22:59       ` Bart Van Assche
       [not found]         ` <56BD1248.80805-XdAiOPVOjttBDgjK7y7TUQ@public.gmane.org>
  2016-02-12  5:19       ` Devesh Sharma
  2016-02-15 21:05       ` Doug Ledford
  2 siblings, 1 reply; 22+ messages in thread
From: Bart Van Assche @ 2016-02-11 22:59 UTC (permalink / raw)
  To: Steve Wise, linux-rdma-u79uwXL29TY76Z2rM5mHXA

On 02/05/2016 01:13 PM, Steve Wise wrote:
> +static void wait_for_drain(struct ib_cq *cq, struct completion *c)
> +{
> +	if (cq->poll_ctx == IB_POLL_DIRECT)
> +		do
> +			ib_process_cq_direct(cq, 1024);
> +		while (!wait_for_completion_timeout(c, msecs_to_jiffies(100)));
> +	else
> +		wait_for_completion(c);
> +}

Hello Steve,

Have you verified this patch with checkpatch ? I expect that checkpatch 
will report that six braces are missing from this function.

Thanks,

Bart.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] IB/srp: use ib_drain_rq()
       [not found]     ` <c11baa726a6440549ab46b9525116d9fe74eb5a0.1455230646.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
@ 2016-02-11 23:01       ` Bart Van Assche
  0 siblings, 0 replies; 22+ messages in thread
From: Bart Van Assche @ 2016-02-11 23:01 UTC (permalink / raw)
  To: Steve Wise, linux-rdma-u79uwXL29TY76Z2rM5mHXA

On 01/27/2016 12:09 PM, Steve Wise wrote:
> Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>

Reviewed-by: Bart Van Assche <bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ@public.gmane.org>
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining queues
       [not found]         ` <56BD1248.80805-XdAiOPVOjttBDgjK7y7TUQ@public.gmane.org>
@ 2016-02-11 23:15           ` Bart Van Assche
       [not found]             ` <56BD1624.4090309-XdAiOPVOjttBDgjK7y7TUQ@public.gmane.org>
  2016-02-11 23:18           ` Steve Wise
  1 sibling, 1 reply; 22+ messages in thread
From: Bart Van Assche @ 2016-02-11 23:15 UTC (permalink / raw)
  To: Steve Wise, linux-rdma-u79uwXL29TY76Z2rM5mHXA

On 02/11/2016 02:59 PM, Bart Van Assche wrote:
> On 02/05/2016 01:13 PM, Steve Wise wrote:
>> +static void wait_for_drain(struct ib_cq *cq, struct completion *c)
>> +{
>> +    if (cq->poll_ctx == IB_POLL_DIRECT)
>> +        do
>> +            ib_process_cq_direct(cq, 1024);
>> +        while (!wait_for_completion_timeout(c, msecs_to_jiffies(100)));
>> +    else
>> +        wait_for_completion(c);
>> +}
>
> Hello Steve,
>
> Have you verified this patch with checkpatch ? I expect that checkpatch
> will report that six braces are missing from this function.

(replying to my own e-mail)

BTW, this patch series does not call wait_for_drain() for any queue that 
uses IB_POLL_DIRECT. This means that the "if (cq->poll_ctx == 
IB_POLL_DIRECT)" part is untested. Please do propose untested code for 
upstream inclusion.

Thanks,

Bart.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* RE: [PATCH 1/3] IB: new common API for draining queues
       [not found]         ` <56BD1248.80805-XdAiOPVOjttBDgjK7y7TUQ@public.gmane.org>
  2016-02-11 23:15           ` Bart Van Assche
@ 2016-02-11 23:18           ` Steve Wise
  1 sibling, 0 replies; 22+ messages in thread
From: Steve Wise @ 2016-02-11 23:18 UTC (permalink / raw)
  To: 'Bart Van Assche', linux-rdma-u79uwXL29TY76Z2rM5mHXA



> -----Original Message-----
> From: linux-rdma-owner-u79uwXL29TY76Z2rM5mHXA@public.gmane.org [mailto:linux-rdma-owner-u79uwXL29TY76Z2rM5mHXA@public.gmane.org] On Behalf Of Bart Van Assche
> Sent: Thursday, February 11, 2016 4:59 PM
> To: Steve Wise; linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> Subject: Re: [PATCH 1/3] IB: new common API for draining queues
> 
> On 02/05/2016 01:13 PM, Steve Wise wrote:
> > +static void wait_for_drain(struct ib_cq *cq, struct completion *c)
> > +{
> > +	if (cq->poll_ctx == IB_POLL_DIRECT)
> > +		do
> > +			ib_process_cq_direct(cq, 1024);
> > +		while (!wait_for_completion_timeout(c, msecs_to_jiffies(100)));
> > +	else
> > +		wait_for_completion(c);
> > +}
> 
> Hello Steve,
> 
> Have you verified this patch with checkpatch ? I expect that checkpatch
> will report that six braces are missing from this function.
> 
> Thanks,
> 
> Bart.

I'll run it through.  Thanks.


--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* RE: [PATCH 1/3] IB: new common API for draining queues
       [not found]             ` <56BD1624.4090309-XdAiOPVOjttBDgjK7y7TUQ@public.gmane.org>
@ 2016-02-11 23:20               ` Steve Wise
  2016-02-11 23:23                 ` Bart Van Assche
  0 siblings, 1 reply; 22+ messages in thread
From: Steve Wise @ 2016-02-11 23:20 UTC (permalink / raw)
  To: 'Bart Van Assche', linux-rdma-u79uwXL29TY76Z2rM5mHXA



> -----Original Message-----
> From: linux-rdma-owner-u79uwXL29TY76Z2rM5mHXA@public.gmane.org [mailto:linux-rdma-owner-u79uwXL29TY76Z2rM5mHXA@public.gmane.org] On Behalf Of Bart Van Assche
> Sent: Thursday, February 11, 2016 5:16 PM
> To: Steve Wise; linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> Subject: Re: [PATCH 1/3] IB: new common API for draining queues
> 
> On 02/11/2016 02:59 PM, Bart Van Assche wrote:
> > On 02/05/2016 01:13 PM, Steve Wise wrote:
> >> +static void wait_for_drain(struct ib_cq *cq, struct completion *c)
> >> +{
> >> +    if (cq->poll_ctx == IB_POLL_DIRECT)
> >> +        do
> >> +            ib_process_cq_direct(cq, 1024);
> >> +        while (!wait_for_completion_timeout(c, msecs_to_jiffies(100)));
> >> +    else
> >> +        wait_for_completion(c);
> >> +}
> >
> > Hello Steve,
> >
> > Have you verified this patch with checkpatch ? I expect that checkpatch
> > will report that six braces are missing from this function.
> 
> (replying to my own e-mail)
> 
> BTW, this patch series does not call wait_for_drain() for any queue that
> uses IB_POLL_DIRECT. This means that the "if (cq->poll_ctx ==
> IB_POLL_DIRECT)" part is untested. Please do propose untested code for
> upstream inclusion.
> 

I guess you're making your point to not post code that has no user...



--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining queues
  2016-02-11 23:20               ` Steve Wise
@ 2016-02-11 23:23                 ` Bart Van Assche
       [not found]                   ` <56BD1800.9050508-XdAiOPVOjttBDgjK7y7TUQ@public.gmane.org>
  0 siblings, 1 reply; 22+ messages in thread
From: Bart Van Assche @ 2016-02-11 23:23 UTC (permalink / raw)
  To: Steve Wise, linux-rdma-u79uwXL29TY76Z2rM5mHXA

On 02/11/2016 03:20 PM, Steve Wise wrote:
> I guess you're making your point to not post code that has no user...

How about using WARN_ONCE(cq->poll_ctx == IB_POLL_DIRECT) instead ?

Bart.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining queues
       [not found]                   ` <56BD1800.9050508-XdAiOPVOjttBDgjK7y7TUQ@public.gmane.org>
@ 2016-02-12  0:32                     ` Steve Wise
  0 siblings, 0 replies; 22+ messages in thread
From: Steve Wise @ 2016-02-12  0:32 UTC (permalink / raw)
  To: Bart Van Assche, linux-rdma-u79uwXL29TY76Z2rM5mHXA



On 2/11/2016 5:23 PM, Bart Van Assche wrote:
> On 02/11/2016 03:20 PM, Steve Wise wrote:
>> I guess you're making your point to not post code that has no user...
>
> How about using WARN_ONCE(cq->poll_ctx == IB_POLL_DIRECT) instead ?

That's fine with me.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining queues
       [not found]     ` <3e7261d1436d33320223d365974ff38945f0d558.1455230646.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  2016-02-11 22:59       ` Bart Van Assche
@ 2016-02-12  5:19       ` Devesh Sharma
       [not found]         ` <CANjDDBg=B33kRDTZ=NnZ-cZhNwXnpJ950dLy6qY0QZBjDaNisQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  2016-02-15 21:05       ` Doug Ledford
  2 siblings, 1 reply; 22+ messages in thread
From: Devesh Sharma @ 2016-02-12  5:19 UTC (permalink / raw)
  To: Steve Wise
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ

On Sat, Feb 6, 2016 at 2:43 AM, Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org> wrote:
> From: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+p6NamaJ0bNTAL8bYrjMMd8@public.gmane.org>
>
> Add provider-specific drain_sq/drain_rq functions for providers needing
> special drain logic.
>
> Add static functions __ib_drain_sq() and __ib_drain_rq() which post noop
> WRs to the SQ or RQ and block until their completions are processed.
> This ensures the applications completions have all been processed.
>
> Add API functions ib_drain_sq(), ib_drain_rq(), and ib_drain_qp().
>
> Reviewed-by: Chuck Lever <chuck.lever-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
> Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
> ---
>  drivers/infiniband/core/verbs.c | 144 ++++++++++++++++++++++++++++++++++++++++
>  include/rdma/ib_verbs.h         |   5 ++
>  2 files changed, 149 insertions(+)
>
> diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
> index 5af6d02..aed521e 100644
> --- a/drivers/infiniband/core/verbs.c
> +++ b/drivers/infiniband/core/verbs.c
> @@ -1657,3 +1657,147 @@ next_page:
>         return i;
>  }
>  EXPORT_SYMBOL(ib_sg_to_pages);
> +
> +struct ib_drain_cqe {
> +       struct ib_cqe cqe;
> +       struct completion done;
> +};
> +
> +static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
> +{
> +       struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
> +                                               cqe);
> +
> +       complete(&cqe->done);
> +}
> +
> +static void wait_for_drain(struct ib_cq *cq, struct completion *c)
> +{
> +       if (cq->poll_ctx == IB_POLL_DIRECT)
> +               do
> +                       ib_process_cq_direct(cq, 1024);
> +               while (!wait_for_completion_timeout(c, msecs_to_jiffies(100)));
> +       else
> +               wait_for_completion(c);
> +}
> +
> +/*
> + * Post a WR and block until its completion is reaped for the SQ.
> + */
> +static void __ib_drain_sq(struct ib_qp *qp)
> +{
> +       struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
> +       struct ib_drain_cqe sdrain;
> +       struct ib_send_wr swr = {}, *bad_swr;
> +       int ret;
> +
> +       swr.wr_cqe = &sdrain.cqe;
> +       sdrain.cqe.done = ib_drain_qp_done;
> +       init_completion(&sdrain.done);
> +
> +       ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
> +       if (ret) {
> +               WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
> +               return;
> +       }

Should it query qp-state and then modify if needed? I am not liking the
the extra step which is highly unlikely in a normal usage conditions?
especially for a shared CQ it is really redundant.

> +
> +       ret = ib_post_send(qp, &swr, &bad_swr);
> +       if (ret) {
> +               WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
> +               return;
> +       }
> +
> +       wait_for_drain(qp->send_cq, &sdrain.done);
> +}
> +
> +/*
> + * Post a WR and block until its completion is reaped for the RQ.
> + */
> +static void __ib_drain_rq(struct ib_qp *qp)
> +{
> +       struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
> +       struct ib_drain_cqe rdrain;
> +       struct ib_recv_wr rwr = {}, *bad_rwr;
> +       int ret;
> +
> +       rwr.wr_cqe = &rdrain.cqe;
> +       rdrain.cqe.done = ib_drain_qp_done;
> +       init_completion(&rdrain.done);
> +
> +       ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
> +       if (ret) {
> +               WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
> +               return;
> +       }
> +
> +       ret = ib_post_recv(qp, &rwr, &bad_rwr);
> +       if (ret) {
> +               WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
> +               return;
> +       }
> +
> +       wait_for_drain(qp->recv_cq, &rdrain.done);
> +}
> +
> +/**
> + * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
> + *                application.
> + * @qp:            queue pair to drain
> + *
> + * If the device has a provider-specific drain function, then
> + * call that.  Otherwise call the generic drain function
> + * __ib_drain_sq().
> + *
> + * The consumer must ensure there is room in the CQ and SQ
> + * for a drain work requests.  Also the consumer must allocate the
> + * CQ using ib_alloc_cq().  It is up to the consumer to handle concurrency
> + * issues if the CQs are using the IB_POLL_DIRECT polling context.
> + */
> +void ib_drain_sq(struct ib_qp *qp)
> +{
> +       if (qp->device->drain_sq)
> +               qp->device->drain_sq(qp);
> +       else
> +               __ib_drain_sq(qp);
> +}
> +EXPORT_SYMBOL(ib_drain_sq);
> +
> +/**
> + * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
> + *                application.
> + * @qp:            queue pair to drain
> + *
> + * If the device has a provider-specific drain function, then
> + * call that.  Otherwise call the generic drain function
> + * __ib_drain_rq().
> + *
> + * The consumer must ensure there is room in the CQ and RQ
> + * for a drain work requests.  Also the consumer must allocate the
> + * CQ using ib_alloc_cq().  It is up to the consumer to handle concurrency
> + * issues if the CQs are using the IB_POLL_DIRECT polling context.
> + */
> +void ib_drain_rq(struct ib_qp *qp)
> +{
> +       if (qp->device->drain_rq)
> +               qp->device->drain_rq(qp);
> +       else
> +               __ib_drain_rq(qp);
> +}
> +EXPORT_SYMBOL(ib_drain_rq);
> +
> +/**
> + * ib_drain_qp() - Block until all CQEs have been consumed by the
> + *                application on both the RQ and SQ.
> + * @qp:            queue pair to drain
> + *
> + * The consumer must ensure there is room in the CQ(s), SQ, and RQ
> + * for a drain work requests.  Also the consumer must allocate the
> + * CQ using ib_alloc_cq().  It is up to the consumer to handle concurrency
> + * issues if the CQs are using the IB_POLL_DIRECT polling context.
> + */
> +void ib_drain_qp(struct ib_qp *qp)
> +{
> +       ib_drain_sq(qp);
> +       ib_drain_rq(qp);
> +}
> +EXPORT_SYMBOL(ib_drain_qp);
> diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
> index 284b00c..68b7e97 100644
> --- a/include/rdma/ib_verbs.h
> +++ b/include/rdma/ib_verbs.h
> @@ -1846,6 +1846,8 @@ struct ib_device {
>         int                        (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
>                                                       struct ib_mr_status *mr_status);
>         void                       (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
> +       void                       (*drain_rq)(struct ib_qp *qp);
> +       void                       (*drain_sq)(struct ib_qp *qp);
>
>         struct ib_dma_mapping_ops   *dma_ops;
>
> @@ -3094,4 +3096,7 @@ int ib_sg_to_pages(struct ib_mr *mr,
>                    int sg_nents,
>                    int (*set_page)(struct ib_mr *, u64));
>
> +void ib_drain_rq(struct ib_qp *qp);
> +void ib_drain_sq(struct ib_qp *qp);
> +void ib_drain_qp(struct ib_qp *qp);
>  #endif /* IB_VERBS_H */
> --
> 2.7.0
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* RE: [PATCH 1/3] IB: new common API for draining queues
       [not found]         ` <CANjDDBg=B33kRDTZ=NnZ-cZhNwXnpJ950dLy6qY0QZBjDaNisQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2016-02-12 15:17           ` Steve Wise
  2016-02-13 15:41             ` Devesh Sharma
  0 siblings, 1 reply; 22+ messages in thread
From: Steve Wise @ 2016-02-12 15:17 UTC (permalink / raw)
  To: 'Devesh Sharma'
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ

> > +/*
> > + * Post a WR and block until its completion is reaped for the SQ.
> > + */
> > +static void __ib_drain_sq(struct ib_qp *qp)
> > +{
> > +       struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
> > +       struct ib_drain_cqe sdrain;
> > +       struct ib_send_wr swr = {}, *bad_swr;
> > +       int ret;
> > +
> > +       swr.wr_cqe = &sdrain.cqe;
> > +       sdrain.cqe.done = ib_drain_qp_done;
> > +       init_completion(&sdrain.done);
> > +
> > +       ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
> > +       if (ret) {
> > +               WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
> > +               return;
> > +       }
> 
> Should it query qp-state and then modify if needed? I am not liking the
> the extra step which is highly unlikely in a normal usage conditions?
> especially for a shared CQ it is really redundant.
>

Hey Devesh, on a previous round of reviews for this series, I thought you agreed that this was fine? 

See: http://www.spinics.net/lists/linux-rdma/msg33202.html

My personal opinion is that calling query_qp() and only modifying if needed is as much work as just calling modify since ERR->ERR is basically a noop.

Steve.

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining queues
  2016-02-12 15:17           ` Steve Wise
@ 2016-02-13 15:41             ` Devesh Sharma
  0 siblings, 0 replies; 22+ messages in thread
From: Devesh Sharma @ 2016-02-13 15:41 UTC (permalink / raw)
  To: Steve Wise
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ

Hi Steve,


On Fri, Feb 12, 2016 at 8:47 PM, Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org> wrote:
>> > +/*
>> > + * Post a WR and block until its completion is reaped for the SQ.
>> > + */
>> > +static void __ib_drain_sq(struct ib_qp *qp)
>> > +{
>> > +       struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
>> > +       struct ib_drain_cqe sdrain;
>> > +       struct ib_send_wr swr = {}, *bad_swr;
>> > +       int ret;
>> > +
>> > +       swr.wr_cqe = &sdrain.cqe;
>> > +       sdrain.cqe.done = ib_drain_qp_done;
>> > +       init_completion(&sdrain.done);
>> > +
>> > +       ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
>> > +       if (ret) {
>> > +               WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
>> > +               return;
>> > +       }
>>
>> Should it query qp-state and then modify if needed? I am not liking the
>> the extra step which is highly unlikely in a normal usage conditions?
>> especially for a shared CQ it is really redundant.
>>
>
> Hey Devesh, on a previous round of reviews for this series, I thought you agreed that this was fine?

Yes, I agreed, I was trying to give it second thought and just posted
what I had. :)

>
> See: http://www.spinics.net/lists/linux-rdma/msg33202.html
>
> My personal opinion is that calling query_qp() and only modifying if needed is as much work as just calling modify since ERR->ERR is basically a noop.

I agree that query_qp() is an overkill.

>
> Steve.
>
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v3 0/3] new ib_drain_qp() API
       [not found] ` <cover.1455230646.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
                     ` (2 preceding siblings ...)
  2016-02-05 21:13   ` [PATCH 1/3] IB: new common API for draining queues Steve Wise
@ 2016-02-13 16:10   ` Leon Romanovsky
       [not found]     ` <20160213161049.GB14741-2ukJVAZIZ/Y@public.gmane.org>
  3 siblings, 1 reply; 22+ messages in thread
From: Leon Romanovsky @ 2016-02-13 16:10 UTC (permalink / raw)
  To: Steve Wise
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ

On Thu, Feb 11, 2016 at 02:44:06PM -0800, Steve Wise wrote:
> This series creates new helper API functions for draining a queue pair.
> It is a rework of an original patch created by Christoph Hellwig as
> part of the CQ API rework and was dropped to be resubmitted by me with
> iw_cxgb4 support.
> 
> Original thread: http://www.spinics.net/lists/linux-rdma/msg30296.html

Hi Steve,
For future submission, can you please use versioning on all patches
submitted in the series and not only in cover letter?

In such way, internal filtering of new emails will be more informative.

Thanks.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v3 0/3] new ib_drain_qp() API
       [not found]     ` <20160213161049.GB14741-2ukJVAZIZ/Y@public.gmane.org>
@ 2016-02-13 16:32       ` Christoph Hellwig
       [not found]         ` <20160213163253.GA8843-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
  0 siblings, 1 reply; 22+ messages in thread
From: Christoph Hellwig @ 2016-02-13 16:32 UTC (permalink / raw)
  To: Steve Wise, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ

On Sat, Feb 13, 2016 at 06:10:49PM +0200, Leon Romanovsky wrote:
> 
> Hi Steve,
> For future submission, can you please use versioning on all patches
> submitted in the series and not only in cover letter?
> 
> In such way, internal filtering of new emails will be more informative.

That's very unusual and not the git-send-email default for a reason.
I'd suggest you fix your 'internal filtering' instead.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v3 0/3] new ib_drain_qp() API
       [not found]         ` <20160213163253.GA8843-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
@ 2016-02-13 16:50           ` Leon Romanovsky
       [not found]             ` <20160213165001.GC14741-2ukJVAZIZ/Y@public.gmane.org>
  0 siblings, 1 reply; 22+ messages in thread
From: Leon Romanovsky @ 2016-02-13 16:50 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: Steve Wise, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ

On Sat, Feb 13, 2016 at 08:32:53AM -0800, Christoph Hellwig wrote:
> On Sat, Feb 13, 2016 at 06:10:49PM +0200, Leon Romanovsky wrote:
> > 
> > Hi Steve,
> > For future submission, can you please use versioning on all patches
> > submitted in the series and not only in cover letter?
> > 
> > In such way, internal filtering of new emails will be more informative.
> 
> That's very unusual and not the git-send-email default for a reason.
> I'd suggest you fix your 'internal filtering' instead.

>From my perspective there are many top developers which are using
proposed format as you said "for a reason".

My flow is based on mutt mail reader and I'll be glad to hear how
it can be fixed.

Can you suggest me how do I suppose "to fix" my filtering?
Right now, the flow is:
1. Open mutt
2. Navigate to linux-rdma mailbox
3. Hit "l" (limit open)
4. Write "new"
5. Get only new (unread) mails.
6. Read all emails and for Steve's mails only, I need to disable
limit in order to say if it is new version of patches or old ones.

P.S. get_maintainer.pl script has strange defaults too, it doesn't say
that we need to follow blindly them.

> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v3 0/3] new ib_drain_qp() API
       [not found]             ` <20160213165001.GC14741-2ukJVAZIZ/Y@public.gmane.org>
@ 2016-02-14 14:51               ` Steve Wise
       [not found]                 ` <56C0946E.50100-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
  0 siblings, 1 reply; 22+ messages in thread
From: Steve Wise @ 2016-02-14 14:51 UTC (permalink / raw)
  To: Christoph Hellwig, Steve Wise, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ



On 2/13/2016 10:50 AM, Leon Romanovsky wrote:
> On Sat, Feb 13, 2016 at 08:32:53AM -0800, Christoph Hellwig wrote:
>> On Sat, Feb 13, 2016 at 06:10:49PM +0200, Leon Romanovsky wrote:
>>> Hi Steve,
>>> For future submission, can you please use versioning on all patches
>>> submitted in the series and not only in cover letter?
>>>
>>> In such way, internal filtering of new emails will be more informative.
>> That's very unusual and not the git-send-email default for a reason.
>> I'd suggest you fix your 'internal filtering' instead.
> >From my perspective there are many top developers which are using
> proposed format as you said "for a reason".
>
> My flow is based on mutt mail reader and I'll be glad to hear how
> it can be fixed.
>
> Can you suggest me how do I suppose "to fix" my filtering?
> Right now, the flow is:
> 1. Open mutt
> 2. Navigate to linux-rdma mailbox
> 3. Hit "l" (limit open)
> 4. Write "new"
> 5. Get only new (unread) mails.
> 6. Read all emails and for Steve's mails only, I need to disable
> limit in order to say if it is new version of patches or old ones.
>
> P.S. get_maintainer.pl script has strange defaults too, it doesn't say
> that we need to follow blindly them.
>

The patches are threaded in that they all reference the cover letter 
email.  Is there a way you can thread your view with mutt?


--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v3 0/3] new ib_drain_qp() API
       [not found]                 ` <56C0946E.50100-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
@ 2016-02-14 14:58                   ` Christoph Hellwig
  0 siblings, 0 replies; 22+ messages in thread
From: Christoph Hellwig @ 2016-02-14 14:58 UTC (permalink / raw)
  To: Steve Wise
  Cc: Christoph Hellwig, Steve Wise, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ

On Sun, Feb 14, 2016 at 08:51:26AM -0600, Steve Wise wrote:
> The patches are threaded in that they all reference the cover letter email.
> Is there a way you can thread your view with mutt?

mutt works perfectly fine threaded.  I've happy to share the muttrc
I use to read dozends of mailing lists every day..
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining queues
       [not found]     ` <3e7261d1436d33320223d365974ff38945f0d558.1455230646.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
  2016-02-11 22:59       ` Bart Van Assche
  2016-02-12  5:19       ` Devesh Sharma
@ 2016-02-15 21:05       ` Doug Ledford
       [not found]         ` <56C23DA8.40905-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
  2 siblings, 1 reply; 22+ messages in thread
From: Doug Ledford @ 2016-02-15 21:05 UTC (permalink / raw)
  To: Steve Wise, linux-rdma-u79uwXL29TY76Z2rM5mHXA
  Cc: bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ

[-- Attachment #1: Type: text/plain, Size: 7628 bytes --]

On 02/05/2016 04:13 PM, Steve Wise wrote:
> From: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+p6NamaJ0bNTAL8bYrjMMd8@public.gmane.org>
> 
> Add provider-specific drain_sq/drain_rq functions for providers needing
> special drain logic.
> 
> Add static functions __ib_drain_sq() and __ib_drain_rq() which post noop
> WRs to the SQ or RQ and block until their completions are processed.
> This ensures the applications completions have all been processed.

Except it doesn't, comments inline below...and applications is
possessive, so needs a '

> Add API functions ib_drain_sq(), ib_drain_rq(), and ib_drain_qp().
> 
> Reviewed-by: Chuck Lever <chuck.lever-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
> Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
> ---
>  drivers/infiniband/core/verbs.c | 144 ++++++++++++++++++++++++++++++++++++++++
>  include/rdma/ib_verbs.h         |   5 ++
>  2 files changed, 149 insertions(+)
> 
> diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
> index 5af6d02..aed521e 100644
> --- a/drivers/infiniband/core/verbs.c
> +++ b/drivers/infiniband/core/verbs.c
> @@ -1657,3 +1657,147 @@ next_page:
>  	return i;
>  }
>  EXPORT_SYMBOL(ib_sg_to_pages);
> +
> +struct ib_drain_cqe {
> +	struct ib_cqe cqe;
> +	struct completion done;
> +};
> +
> +static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
> +{
> +	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
> +						cqe);
> +
> +	complete(&cqe->done);
> +}
> +
> +static void wait_for_drain(struct ib_cq *cq, struct completion *c)
> +{
> +	if (cq->poll_ctx == IB_POLL_DIRECT)
> +		do
> +			ib_process_cq_direct(cq, 1024);
> +		while (!wait_for_completion_timeout(c, msecs_to_jiffies(100)));
> +	else
> +		wait_for_completion(c);
> +}
> +
> +/*
> + * Post a WR and block until its completion is reaped for the SQ.
> + */
> +static void __ib_drain_sq(struct ib_qp *qp)
> +{
> +	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
> +	struct ib_drain_cqe sdrain;
> +	struct ib_send_wr swr = {}, *bad_swr;
> +	int ret;
> +
> +	swr.wr_cqe = &sdrain.cqe;
> +	sdrain.cqe.done = ib_drain_qp_done;
> +	init_completion(&sdrain.done);
> +
> +	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
> +	if (ret) {
> +		WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
> +		return;
> +	}

Set QP to ERR state...OK...

> +	ret = ib_post_send(qp, &swr, &bad_swr);
> +	if (ret) {
> +		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
> +		return;
> +	}

Post command to QP in ERR state (admittedly, I never do this and hadn't
even thought about whether or not it is allowed...obviously it is, but
my initial reaction would have been "won't ib_post_send return an error
when you try to post to a QP in ERR state?")....OK...

> +	wait_for_drain(qp->send_cq, &sdrain.done);

Wait for your posted WR to complete...OK...

As I mentioned in my comment above, I would have thought that the
attempt to post a send to a QP in ERR state would have returned an
error.  It must not or else this patch is worthless because of the order
of actions.  What that highlights though, is that this code will drain a
QP, but only if the caller has taken the time to stop all possible
contexts that might run on other cores and post commands to the QP.
Those commands will error out, but the caller must, none the less, take
steps to block other contexts from sending or else this drain is
useless.  That might be fine for the API, but it should be clearly
documented, and currently it isn't.

> +}
> +
> +/*
> + * Post a WR and block until its completion is reaped for the RQ.
> + */
> +static void __ib_drain_rq(struct ib_qp *qp)
> +{
> +	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
> +	struct ib_drain_cqe rdrain;
> +	struct ib_recv_wr rwr = {}, *bad_rwr;
> +	int ret;
> +
> +	rwr.wr_cqe = &rdrain.cqe;
> +	rdrain.cqe.done = ib_drain_qp_done;
> +	init_completion(&rdrain.done);
> +
> +	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
> +	if (ret) {
> +		WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
> +		return;
> +	}
> +
> +	ret = ib_post_recv(qp, &rwr, &bad_rwr);
> +	if (ret) {
> +		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
> +		return;
> +	}
> +
> +	wait_for_drain(qp->recv_cq, &rdrain.done);
> +}
> +
> +/**
> + * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
> + *		   application.
> + * @qp:            queue pair to drain
> + *
> + * If the device has a provider-specific drain function, then
> + * call that.  Otherwise call the generic drain function
> + * __ib_drain_sq().
> + *
> + * The consumer must ensure there is room in the CQ and SQ
> + * for a drain work requests.  Also the consumer must allocate the

Either requests should be singular, or you should remove the article 'a'.

> + * CQ using ib_alloc_cq().  It is up to the consumer to handle concurrency
> + * issues if the CQs are using the IB_POLL_DIRECT polling context.
> + */
> +void ib_drain_sq(struct ib_qp *qp)
> +{
> +	if (qp->device->drain_sq)
> +		qp->device->drain_sq(qp);
> +	else
> +		__ib_drain_sq(qp);
> +}
> +EXPORT_SYMBOL(ib_drain_sq);
> +
> +/**
> + * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
> + *		   application.
> + * @qp:            queue pair to drain
> + *
> + * If the device has a provider-specific drain function, then
> + * call that.  Otherwise call the generic drain function
> + * __ib_drain_rq().
> + *
> + * The consumer must ensure there is room in the CQ and RQ
> + * for a drain work requests.  Also the consumer must allocate the

Ditto...

> + * CQ using ib_alloc_cq().  It is up to the consumer to handle concurrency
> + * issues if the CQs are using the IB_POLL_DIRECT polling context.
> + */
> +void ib_drain_rq(struct ib_qp *qp)
> +{
> +	if (qp->device->drain_rq)
> +		qp->device->drain_rq(qp);
> +	else
> +		__ib_drain_rq(qp);
> +}
> +EXPORT_SYMBOL(ib_drain_rq);
> +
> +/**
> + * ib_drain_qp() - Block until all CQEs have been consumed by the
> + *		   application on both the RQ and SQ.
> + * @qp:            queue pair to drain
> + *
> + * The consumer must ensure there is room in the CQ(s), SQ, and RQ
> + * for a drain work requests.  Also the consumer must allocate the

Ditto...

> + * CQ using ib_alloc_cq().  It is up to the consumer to handle concurrency
> + * issues if the CQs are using the IB_POLL_DIRECT polling context.
> + */
> +void ib_drain_qp(struct ib_qp *qp)
> +{
> +	ib_drain_sq(qp);
> +	ib_drain_rq(qp);
> +}
> +EXPORT_SYMBOL(ib_drain_qp);
> diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
> index 284b00c..68b7e97 100644
> --- a/include/rdma/ib_verbs.h
> +++ b/include/rdma/ib_verbs.h
> @@ -1846,6 +1846,8 @@ struct ib_device {
>  	int			   (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
>  						      struct ib_mr_status *mr_status);
>  	void			   (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
> +	void			   (*drain_rq)(struct ib_qp *qp);
> +	void			   (*drain_sq)(struct ib_qp *qp);
>  
>  	struct ib_dma_mapping_ops   *dma_ops;
>  
> @@ -3094,4 +3096,7 @@ int ib_sg_to_pages(struct ib_mr *mr,
>  		   int sg_nents,
>  		   int (*set_page)(struct ib_mr *, u64));
>  
> +void ib_drain_rq(struct ib_qp *qp);
> +void ib_drain_sq(struct ib_qp *qp);
> +void ib_drain_qp(struct ib_qp *qp);
>  #endif /* IB_VERBS_H */
> 


-- 
Doug Ledford <dledford-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
              GPG KeyID: 0E572FDD



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 884 bytes --]

^ permalink raw reply	[flat|nested] 22+ messages in thread

* RE: [PATCH 1/3] IB: new common API for draining queues
       [not found]         ` <56C23DA8.40905-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
@ 2016-02-15 21:20           ` Steve Wise
  2016-02-16 11:00           ` Sagi Grimberg
  1 sibling, 0 replies; 22+ messages in thread
From: Steve Wise @ 2016-02-15 21:20 UTC (permalink / raw)
  To: 'Doug Ledford', linux-rdma-u79uwXL29TY76Z2rM5mHXA
  Cc: bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ



> -----Original Message-----
> From: Doug Ledford [mailto:dledford-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org]
> Sent: Monday, February 15, 2016 3:06 PM
> To: Steve Wise; linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> Cc: bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ@public.gmane.org
> Subject: Re: [PATCH 1/3] IB: new common API for draining queues
> 
> On 02/05/2016 04:13 PM, Steve Wise wrote:
> > From: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+p6NamaJ0bNTAL8bYrjMMd8@public.gmane.org>
> >
> > Add provider-specific drain_sq/drain_rq functions for providers needing
> > special drain logic.
> >
> > Add static functions __ib_drain_sq() and __ib_drain_rq() which post noop
> > WRs to the SQ or RQ and block until their completions are processed.
> > This ensures the applications completions have all been processed.
> 
> Except it doesn't, comments inline below...and applications is
> possessive, so needs a '
> 
> > Add API functions ib_drain_sq(), ib_drain_rq(), and ib_drain_qp().
> >
> > Reviewed-by: Chuck Lever <chuck.lever-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
> > Signed-off-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
> > ---
> >  drivers/infiniband/core/verbs.c | 144 ++++++++++++++++++++++++++++++++++++++++
> >  include/rdma/ib_verbs.h         |   5 ++
> >  2 files changed, 149 insertions(+)
> >
> > diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
> > index 5af6d02..aed521e 100644
> > --- a/drivers/infiniband/core/verbs.c
> > +++ b/drivers/infiniband/core/verbs.c
> > @@ -1657,3 +1657,147 @@ next_page:
> >  	return i;
> >  }
> >  EXPORT_SYMBOL(ib_sg_to_pages);
> > +
> > +struct ib_drain_cqe {
> > +	struct ib_cqe cqe;
> > +	struct completion done;
> > +};
> > +
> > +static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
> > +{
> > +	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
> > +						cqe);
> > +
> > +	complete(&cqe->done);
> > +}
> > +
> > +static void wait_for_drain(struct ib_cq *cq, struct completion *c)
> > +{
> > +	if (cq->poll_ctx == IB_POLL_DIRECT)
> > +		do
> > +			ib_process_cq_direct(cq, 1024);
> > +		while (!wait_for_completion_timeout(c, msecs_to_jiffies(100)));
> > +	else
> > +		wait_for_completion(c);
> > +}
> > +
> > +/*
> > + * Post a WR and block until its completion is reaped for the SQ.
> > + */
> > +static void __ib_drain_sq(struct ib_qp *qp)
> > +{
> > +	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
> > +	struct ib_drain_cqe sdrain;
> > +	struct ib_send_wr swr = {}, *bad_swr;
> > +	int ret;
> > +
> > +	swr.wr_cqe = &sdrain.cqe;
> > +	sdrain.cqe.done = ib_drain_qp_done;
> > +	init_completion(&sdrain.done);
> > +
> > +	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
> > +	if (ret) {
> > +		WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
> > +		return;
> > +	}
> 
> Set QP to ERR state...OK...
> 
> > +	ret = ib_post_send(qp, &swr, &bad_swr);
> > +	if (ret) {
> > +		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
> > +		return;
> > +	}
> 
> Post command to QP in ERR state (admittedly, I never do this and hadn't
> even thought about whether or not it is allowed...obviously it is, but
> my initial reaction would have been "won't ib_post_send return an error
> when you try to post to a QP in ERR state?")....OK...
> 

IBTA spec sez a post to a QP in ERR will result in a CQE in the CQ with status FLUSHED.  IE it mandates this behavior.

> > +	wait_for_drain(qp->send_cq, &sdrain.done);
> 
> Wait for your posted WR to complete...OK...
> 
> As I mentioned in my comment above, I would have thought that the
> attempt to post a send to a QP in ERR state would have returned an
> error.  It must not or else this patch is worthless because of the order
> of actions.  What that highlights though, is that this code will drain a
> QP, but only if the caller has taken the time to stop all possible
> contexts that might run on other cores and post commands to the QP.
> Those commands will error out, but the caller must, none the less, take
> steps to block other contexts from sending or else this drain is
> useless.  That might be fine for the API, but it should be clearly
> documented, and currently it isn't.
> 

I'll clarify this in the function header comments.

> > +}
> > +
> > +/*
> > + * Post a WR and block until its completion is reaped for the RQ.
> > + */
> > +static void __ib_drain_rq(struct ib_qp *qp)
> > +{
> > +	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
> > +	struct ib_drain_cqe rdrain;
> > +	struct ib_recv_wr rwr = {}, *bad_rwr;
> > +	int ret;
> > +
> > +	rwr.wr_cqe = &rdrain.cqe;
> > +	rdrain.cqe.done = ib_drain_qp_done;
> > +	init_completion(&rdrain.done);
> > +
> > +	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
> > +	if (ret) {
> > +		WARN_ONCE(ret, "failed to drain QP: %d\n", ret);
> > +		return;
> > +	}
> > +
> > +	ret = ib_post_recv(qp, &rwr, &bad_rwr);
> > +	if (ret) {
> > +		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
> > +		return;
> > +	}
> > +
> > +	wait_for_drain(qp->recv_cq, &rdrain.done);
> > +}
> > +
> > +/**
> > + * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
> > + *		   application.
> > + * @qp:            queue pair to drain
> > + *
> > + * If the device has a provider-specific drain function, then
> > + * call that.  Otherwise call the generic drain function
> > + * __ib_drain_sq().
> > + *
> > + * The consumer must ensure there is room in the CQ and SQ
> > + * for a drain work requests.  Also the consumer must allocate the
> 
> Either requests should be singular, or you should remove the article 'a'.
> 

agreed (and agree to all the dittos)


> > + * CQ using ib_alloc_cq().  It is up to the consumer to handle concurrency
> > + * issues if the CQs are using the IB_POLL_DIRECT polling context.
> > + */
> > +void ib_drain_sq(struct ib_qp *qp)
> > +{
> > +	if (qp->device->drain_sq)
> > +		qp->device->drain_sq(qp);
> > +	else
> > +		__ib_drain_sq(qp);
> > +}
> > +EXPORT_SYMBOL(ib_drain_sq);
> > +
> > +/**
> > + * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
> > + *		   application.
> > + * @qp:            queue pair to drain
> > + *
> > + * If the device has a provider-specific drain function, then
> > + * call that.  Otherwise call the generic drain function
> > + * __ib_drain_rq().
> > + *
> > + * The consumer must ensure there is room in the CQ and RQ
> > + * for a drain work requests.  Also the consumer must allocate the
> 
> Ditto...
> 
> > + * CQ using ib_alloc_cq().  It is up to the consumer to handle concurrency
> > + * issues if the CQs are using the IB_POLL_DIRECT polling context.
> > + */
> > +void ib_drain_rq(struct ib_qp *qp)
> > +{
> > +	if (qp->device->drain_rq)
> > +		qp->device->drain_rq(qp);
> > +	else
> > +		__ib_drain_rq(qp);
> > +}
> > +EXPORT_SYMBOL(ib_drain_rq);
> > +
> > +/**
> > + * ib_drain_qp() - Block until all CQEs have been consumed by the
> > + *		   application on both the RQ and SQ.
> > + * @qp:            queue pair to drain
> > + *
> > + * The consumer must ensure there is room in the CQ(s), SQ, and RQ
> > + * for a drain work requests.  Also the consumer must allocate the
> 
> Ditto...
> 
> > + * CQ using ib_alloc_cq().  It is up to the consumer to handle concurrency
> > + * issues if the CQs are using the IB_POLL_DIRECT polling context.
> > + */
> > +void ib_drain_qp(struct ib_qp *qp)
> > +{
> > +	ib_drain_sq(qp);
> > +	ib_drain_rq(qp);
> > +}
> > +EXPORT_SYMBOL(ib_drain_qp);
> > diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
> > index 284b00c..68b7e97 100644
> > --- a/include/rdma/ib_verbs.h
> > +++ b/include/rdma/ib_verbs.h
> > @@ -1846,6 +1846,8 @@ struct ib_device {
> >  	int			   (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
> >  						      struct ib_mr_status *mr_status);
> >  	void			   (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
> > +	void			   (*drain_rq)(struct ib_qp *qp);
> > +	void			   (*drain_sq)(struct ib_qp *qp);
> >
> >  	struct ib_dma_mapping_ops   *dma_ops;
> >
> > @@ -3094,4 +3096,7 @@ int ib_sg_to_pages(struct ib_mr *mr,
> >  		   int sg_nents,
> >  		   int (*set_page)(struct ib_mr *, u64));
> >
> > +void ib_drain_rq(struct ib_qp *qp);
> > +void ib_drain_sq(struct ib_qp *qp);
> > +void ib_drain_qp(struct ib_qp *qp);
> >  #endif /* IB_VERBS_H */
> >
> 
> 
> --
> Doug Ledford <dledford-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
>               GPG KeyID: 0E572FDD
> 


--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 1/3] IB: new common API for draining queues
       [not found]         ` <56C23DA8.40905-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
  2016-02-15 21:20           ` Steve Wise
@ 2016-02-16 11:00           ` Sagi Grimberg
  1 sibling, 0 replies; 22+ messages in thread
From: Sagi Grimberg @ 2016-02-16 11:00 UTC (permalink / raw)
  To: Doug Ledford, Steve Wise, linux-rdma-u79uwXL29TY76Z2rM5mHXA
  Cc: bart.vanassche-XdAiOPVOjttBDgjK7y7TUQ


> As I mentioned in my comment above, I would have thought that the
> attempt to post a send to a QP in ERR state would have returned an
> error.  It must not or else this patch is worthless because of the order
> of actions.  What that highlights though, is that this code will drain a
> QP, but only if the caller has taken the time to stop all possible
> contexts that might run on other cores and post commands to the QP.
> Those commands will error out, but the caller must, none the less, take
> steps to block other contexts from sending or else this drain is
> useless.  That might be fine for the API, but it should be clearly
> documented, and currently it isn't.

I agree, it should be documented that if there are other contexts that
post concurrently then the QP drain is not guaranteed. That's a valid
requirement I think...
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2016-02-16 11:00 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-02-11 22:44 [PATCH v3 0/3] new ib_drain_qp() API Steve Wise
     [not found] ` <cover.1455230646.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
2016-01-14 18:24   ` [PATCH 2/3] iw_cxgb4: add queue drain functions Steve Wise
2016-01-27 20:09   ` [PATCH 3/3] IB/srp: use ib_drain_rq() Steve Wise
     [not found]     ` <c11baa726a6440549ab46b9525116d9fe74eb5a0.1455230646.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
2016-02-11 23:01       ` Bart Van Assche
2016-02-05 21:13   ` [PATCH 1/3] IB: new common API for draining queues Steve Wise
     [not found]     ` <3e7261d1436d33320223d365974ff38945f0d558.1455230646.git.swise-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
2016-02-11 22:59       ` Bart Van Assche
     [not found]         ` <56BD1248.80805-XdAiOPVOjttBDgjK7y7TUQ@public.gmane.org>
2016-02-11 23:15           ` Bart Van Assche
     [not found]             ` <56BD1624.4090309-XdAiOPVOjttBDgjK7y7TUQ@public.gmane.org>
2016-02-11 23:20               ` Steve Wise
2016-02-11 23:23                 ` Bart Van Assche
     [not found]                   ` <56BD1800.9050508-XdAiOPVOjttBDgjK7y7TUQ@public.gmane.org>
2016-02-12  0:32                     ` Steve Wise
2016-02-11 23:18           ` Steve Wise
2016-02-12  5:19       ` Devesh Sharma
     [not found]         ` <CANjDDBg=B33kRDTZ=NnZ-cZhNwXnpJ950dLy6qY0QZBjDaNisQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2016-02-12 15:17           ` Steve Wise
2016-02-13 15:41             ` Devesh Sharma
2016-02-15 21:05       ` Doug Ledford
     [not found]         ` <56C23DA8.40905-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2016-02-15 21:20           ` Steve Wise
2016-02-16 11:00           ` Sagi Grimberg
2016-02-13 16:10   ` [PATCH v3 0/3] new ib_drain_qp() API Leon Romanovsky
     [not found]     ` <20160213161049.GB14741-2ukJVAZIZ/Y@public.gmane.org>
2016-02-13 16:32       ` Christoph Hellwig
     [not found]         ` <20160213163253.GA8843-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
2016-02-13 16:50           ` Leon Romanovsky
     [not found]             ` <20160213165001.GC14741-2ukJVAZIZ/Y@public.gmane.org>
2016-02-14 14:51               ` Steve Wise
     [not found]                 ` <56C0946E.50100-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
2016-02-14 14:58                   ` Christoph Hellwig

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.