From: Jason Gunthorpe <jgunthorpe-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
To: linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Cc: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org>
Subject: [PATCH rdma-core 04/14] cxgb4: Update to use new udma write barriers
Date: Thu, 16 Feb 2017 12:22:59 -0700 [thread overview]
Message-ID: <1487272989-8215-5-git-send-email-jgunthorpe@obsidianresearch.com> (raw)
In-Reply-To: <1487272989-8215-1-git-send-email-jgunthorpe-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
Based on help from Steve the barriers here are change to consistently
bracket WC memory writes with wc_wmb() like other drivers do.
This allows some of the wc_wmb() calls that were not related to WC
memory be downgraded to wmb().
The driver was probably correct (at least for x86-64) but did not
follow the idiom established by the other drivers for working with
WC memry.
Signed-off-by: Jason Gunthorpe <jgunthorpe-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
---
providers/cxgb4/qp.c | 20 ++++++++++++++++++--
providers/cxgb4/t4.h | 48 +++++++++++++++++++++++++++++++++++-------------
providers/cxgb4/verbs.c | 2 ++
3 files changed, 55 insertions(+), 15 deletions(-)
diff --git a/providers/cxgb4/qp.c b/providers/cxgb4/qp.c
index 700fe02c77c269..45eaca45029e60 100644
--- a/providers/cxgb4/qp.c
+++ b/providers/cxgb4/qp.c
@@ -52,7 +52,12 @@ static void copy_wr_to_sq(struct t4_wq *wq, union t4_wr *wqe, u8 len16)
dst = (u64 *)((u8 *)wq->sq.queue + wq->sq.wq_pidx * T4_EQ_ENTRY_SIZE);
if (t4_sq_onchip(wq)) {
len16 = align(len16, 4);
- wc_wmb();
+
+ /* In onchip mode the copy below will be made to WC memory and
+ * could trigger DMA. In offchip mode the copy below only
+ * queues the WQE, DMA cannot start until t4_ring_sq_db
+ * happens */
+ mmio_wc_start();
}
while (len16) {
*dst++ = *src++;
@@ -62,7 +67,13 @@ static void copy_wr_to_sq(struct t4_wq *wq, union t4_wr *wqe, u8 len16)
if (dst == (u64 *)&wq->sq.queue[wq->sq.size])
dst = (u64 *)wq->sq.queue;
len16--;
+
+ /* NOTE len16 cannot be large enough to write to the
+ same sq.queue memory twice in this loop */
}
+
+ if (t4_sq_onchip(wq))
+ mmio_flush_writes();
}
static void copy_wr_to_rq(struct t4_wq *wq, union t4_recv_wr *wqe, u8 len16)
@@ -274,7 +285,9 @@ static void ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 idx)
int mask;
int __attribute__((unused)) ret;
- wc_wmb();
+ /* FIXME: Why do we need this barrier if the kernel is going to
+ trigger the DMA? */
+ udma_to_device_barrier();
if (qid == qhp->wq.sq.qid) {
attr.sq_psn = idx;
mask = IBV_QP_SQ_PSN;
@@ -385,8 +398,11 @@ int c4iw_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
len16, wqe);
} else
ring_kernel_db(qhp, qhp->wq.sq.qid, idx);
+ /* This write is only for debugging, the value does not matter for DMA
+ */
qhp->wq.sq.queue[qhp->wq.sq.size].status.host_wq_pidx = \
(qhp->wq.sq.wq_pidx);
+
pthread_spin_unlock(&qhp->lock);
return err;
}
diff --git a/providers/cxgb4/t4.h b/providers/cxgb4/t4.h
index a457e2f2921727..a845a367cfbb8c 100644
--- a/providers/cxgb4/t4.h
+++ b/providers/cxgb4/t4.h
@@ -317,9 +317,12 @@ enum {
};
struct t4_sq {
+ /* queue is either host memory or WC MMIO memory if
+ * t4_sq_onchip(). */
union t4_wr *queue;
struct t4_swsqe *sw_sq;
struct t4_swsqe *oldest_read;
+ /* udb is either UC or WC MMIO memory depending on device version. */
volatile u32 *udb;
size_t memsize;
u32 qid;
@@ -367,12 +370,6 @@ struct t4_wq {
u8 *db_offp;
};
-static inline void t4_ma_sync(struct t4_wq *wq, int page_size)
-{
- wc_wmb();
- *((volatile u32 *)wq->sq.ma_sync) = 1;
-}
-
static inline int t4_rqes_posted(struct t4_wq *wq)
{
return wq->rq.in_use;
@@ -444,8 +441,11 @@ static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
- if (!wq->error)
+ if (!wq->error) {
+ /* This write is only for debugging, the value does not matter
+ * for DMA */
wq->sq.queue[wq->sq.size].status.host_pidx = (wq->sq.pidx);
+ }
}
static inline void t4_sq_consume(struct t4_wq *wq)
@@ -457,10 +457,14 @@ static inline void t4_sq_consume(struct t4_wq *wq)
if (++wq->sq.cidx == wq->sq.size)
wq->sq.cidx = 0;
assert((wq->sq.cidx != wq->sq.pidx) || wq->sq.in_use == 0);
- if (!wq->error)
+ if (!wq->error){
+ /* This write is only for debugging, the value does not matter
+ * for DMA */
wq->sq.queue[wq->sq.size].status.host_cidx = wq->sq.cidx;
+ }
}
+/* Copies to WC MMIO memory */
static void copy_wqe_to_udb(volatile u32 *udb_offset, void *wqe)
{
u64 *src, *dst;
@@ -482,8 +486,8 @@ extern int t5_en_wc;
static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t4, u8 len16,
union t4_wr *wqe)
{
- wc_wmb();
if (!t4) {
+ mmio_wc_start();
if (t5_en_wc && inc == 1 && wq->sq.wc_reg_available) {
PDBG("%s: WC wq->sq.pidx = %d; len16=%d\n",
__func__, wq->sq.pidx, len16);
@@ -494,30 +498,45 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t4, u8 len16,
writel(QID_V(wq->sq.bar2_qid) | PIDX_T5_V(inc),
wq->sq.udb);
}
- wc_wmb();
+ /* udb is WC for > t4 devices */
+ mmio_flush_writes();
return;
}
+
+ udma_to_device_barrier();
if (ma_wr) {
if (t4_sq_onchip(wq)) {
int i;
+
+ mmio_wc_start();
for (i = 0; i < 16; i++)
*(volatile u32 *)&wq->sq.queue[wq->sq.size].flits[2+i] = i;
+ mmio_flush_writes();
}
} else {
if (t4_sq_onchip(wq)) {
int i;
+
+ mmio_wc_start();
for (i = 0; i < 16; i++)
+ /* FIXME: What is this supposed to be doing?
+ * Writing to the same address multiple times
+ * with WC memory is not guarenteed to
+ * generate any more than one TLP. Why isn't
+ * writing to WC memory marked volatile? */
*(u32 *)&wq->sq.queue[wq->sq.size].flits[2] = i;
+ mmio_flush_writes();
}
}
+ /* udb is UC for t4 devices */
writel(QID_V(wq->sq.qid & wq->qid_mask) | PIDX_V(inc), wq->sq.udb);
}
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t4, u8 len16,
union t4_recv_wr *wqe)
{
- wc_wmb();
if (!t4) {
+ mmio_wc_start();
if (t5_en_wc && inc == 1 && wq->sq.wc_reg_available) {
PDBG("%s: WC wq->rq.pidx = %d; len16=%d\n",
__func__, wq->rq.pidx, len16);
@@ -528,9 +547,12 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t4, u8 len16,
writel(QID_V(wq->rq.bar2_qid) | PIDX_T5_V(inc),
wq->rq.udb);
}
- wc_wmb();
+ /* udb is WC for > t4 devices */
+ mmio_flush_writes();
return;
}
+ /* udb is UC for t4 devices */
+ udma_to_device_barrier();
writel(QID_V(wq->rq.qid & wq->qid_mask) | PIDX_V(inc), wq->rq.udb);
}
@@ -655,7 +677,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
cq->error = 1;
assert(0);
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
- rmb();
+ udma_from_device_barrier();
*cqe = &cq->queue[cq->cidx];
ret = 0;
} else
diff --git a/providers/cxgb4/verbs.c b/providers/cxgb4/verbs.c
index 32ed44c63d8402..e7620dc02ae0a7 100644
--- a/providers/cxgb4/verbs.c
+++ b/providers/cxgb4/verbs.c
@@ -573,6 +573,8 @@ static void reset_qp(struct c4iw_qp *qhp)
qhp->wq.rq.cidx = qhp->wq.rq.pidx = qhp->wq.rq.in_use = 0;
qhp->wq.sq.oldest_read = NULL;
memset(qhp->wq.sq.queue, 0, qhp->wq.sq.memsize);
+ if (t4_sq_onchip(&qhp->wq))
+ mmio_flush_writes();
memset(qhp->wq.rq.queue, 0, qhp->wq.rq.memsize);
}
--
2.7.4
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
next prev parent reply other threads:[~2017-02-16 19:22 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-02-16 19:22 [PATCH rdma-core 00/14] Revise the DMA barrier macros in ibverbs Jason Gunthorpe
[not found] ` <1487272989-8215-1-git-send-email-jgunthorpe-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-02-16 19:22 ` [PATCH rdma-core 01/14] mlx5: Use stdatomic for the in_use barrier Jason Gunthorpe
2017-02-16 19:22 ` [PATCH rdma-core 02/14] Provide new names for the CPU barriers related to DMA Jason Gunthorpe
[not found] ` <1487272989-8215-3-git-send-email-jgunthorpe-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-02-16 22:07 ` Steve Wise
2017-02-17 16:37 ` Jason Gunthorpe
2017-02-16 19:22 ` [PATCH rdma-core 03/14] cxgb3: Update to use new udma write barriers Jason Gunthorpe
[not found] ` <1487272989-8215-4-git-send-email-jgunthorpe-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-02-16 21:20 ` Steve Wise
2017-02-16 21:45 ` Jason Gunthorpe
[not found] ` <20170216214527.GA13616-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-02-16 22:01 ` Steve Wise
2017-02-16 19:22 ` Jason Gunthorpe [this message]
[not found] ` <1487272989-8215-5-git-send-email-jgunthorpe-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-02-17 20:16 ` [PATCH rdma-core 04/14] cxgb4: " Steve Wise
2017-02-16 19:23 ` [PATCH rdma-core 05/14] hns: " Jason Gunthorpe
2017-02-16 19:23 ` [PATCH rdma-core 06/14] i40iw: Get rid of unique barrier macros Jason Gunthorpe
[not found] ` <1487272989-8215-7-git-send-email-jgunthorpe-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-01 17:29 ` Shiraz Saleem
[not found] ` <20170301172920.GA11340-GOXS9JX10wfOxmVO0tvppfooFf0ArEBIu+b9c/7xato@public.gmane.org>
2017-03-01 17:55 ` Jason Gunthorpe
[not found] ` <20170301175521.GB14791-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-01 22:14 ` Shiraz Saleem
[not found] ` <20170301221420.GA18548-GOXS9JX10wfOxmVO0tvppfooFf0ArEBIu+b9c/7xato@public.gmane.org>
2017-03-01 23:05 ` Jason Gunthorpe
[not found] ` <20170301230506.GB2820-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-03 21:45 ` Shiraz Saleem
[not found] ` <20170303214514.GA12996-GOXS9JX10wfOxmVO0tvppfooFf0ArEBIu+b9c/7xato@public.gmane.org>
2017-03-03 22:22 ` Jason Gunthorpe
[not found] ` <20170303222244.GA678-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-06 19:16 ` Shiraz Saleem
[not found] ` <20170306191631.GB34252-GOXS9JX10wfOxmVO0tvppfooFf0ArEBIu+b9c/7xato@public.gmane.org>
2017-03-06 19:40 ` Jason Gunthorpe
[not found] ` <20170306194052.GB31672-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-07 22:46 ` Shiraz Saleem
[not found] ` <20170307224622.GA45028-GOXS9JX10wfOxmVO0tvppfooFf0ArEBIu+b9c/7xato@public.gmane.org>
2017-03-07 22:50 ` Jason Gunthorpe
[not found] ` <20170307225027.GA20858-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-07 23:01 ` Shiraz Saleem
[not found] ` <20170307230121.GA52428-GOXS9JX10wfOxmVO0tvppfooFf0ArEBIu+b9c/7xato@public.gmane.org>
2017-03-07 23:11 ` Jason Gunthorpe
[not found] ` <20170307231145.GB20858-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-07 23:23 ` Shiraz Saleem
2017-03-06 18:18 ` Shiraz Saleem
[not found] ` <20170306181808.GA34252-GOXS9JX10wfOxmVO0tvppfooFf0ArEBIu+b9c/7xato@public.gmane.org>
2017-03-06 19:07 ` Jason Gunthorpe
[not found] ` <20170306190751.GA30663-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-07 23:16 ` Shiraz Saleem
2017-02-16 19:23 ` [PATCH rdma-core 07/14] mlx4: Update to use new udma write barriers Jason Gunthorpe
[not found] ` <1487272989-8215-8-git-send-email-jgunthorpe-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-02-20 17:46 ` Yishai Hadas
[not found] ` <206559e5-0488-f6d5-c4ec-bf560e0c3ba6-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
2017-02-21 18:14 ` Jason Gunthorpe
[not found] ` <20170221181407.GA13138-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-06 14:57 ` Yishai Hadas
[not found] ` <45d2b7da-9ad6-6b37-d0b2-00f7807966b4-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
2017-03-06 17:31 ` Jason Gunthorpe
[not found] ` <20170306173139.GA11805-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-07 16:44 ` Yishai Hadas
[not found] ` <55bcc87e-b059-65df-8079-100120865ffb-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
2017-03-07 19:18 ` Jason Gunthorpe
[not found] ` <20170307191824.GD2228-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-08 21:27 ` Yishai Hadas
[not found] ` <6571cf34-63b9-7b83-ddb0-9279e7e20fa9-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
2017-03-08 21:56 ` Jason Gunthorpe
[not found] ` <20170308215609.GB4109-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-09 15:42 ` Yishai Hadas
[not found] ` <4dcf0cea-3652-0df2-9d98-74e258e6170a-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
2017-03-09 17:03 ` Jason Gunthorpe
[not found] ` <20170309170320.GA12694-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-13 15:17 ` Yishai Hadas
2017-02-16 19:23 ` [PATCH rdma-core 08/14] mlx5: " Jason Gunthorpe
[not found] ` <1487272989-8215-9-git-send-email-jgunthorpe-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-02-27 10:56 ` Yishai Hadas
[not found] ` <d5921636-1911-5588-8c59-620066bca01a-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
2017-02-27 18:00 ` Jason Gunthorpe
[not found] ` <20170227180009.GL5891-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-02-28 16:02 ` Yishai Hadas
[not found] ` <2969cce4-8b51-8fcf-f099-2b42a6d40a9c-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
2017-02-28 17:06 ` Jason Gunthorpe
[not found] ` <20170228170658.GA17995-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-02 9:34 ` Yishai Hadas
[not found] ` <24bf0e37-e032-0862-c5b9-b5a40fcfb343-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
2017-03-02 17:12 ` Jason Gunthorpe
[not found] ` <20170302171210.GA8595-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-03-06 14:19 ` Yishai Hadas
2017-02-16 19:23 ` [PATCH rdma-core 09/14] nes: " Jason Gunthorpe
2017-02-16 19:23 ` [PATCH rdma-core 10/14] mthca: Update to use new mmio " Jason Gunthorpe
2017-02-16 19:23 ` [PATCH rdma-core 11/14] ocrdma: Update to use new udma " Jason Gunthorpe
[not found] ` <1487272989-8215-12-git-send-email-jgunthorpe-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-02-18 16:21 ` Devesh Sharma
2017-02-16 19:23 ` [PATCH rdma-core 12/14] qedr: " Jason Gunthorpe
[not found] ` <1487272989-8215-13-git-send-email-jgunthorpe-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-02-23 13:49 ` Amrani, Ram
[not found] ` <SN1PR07MB2207DE206738E6DD8511CEA1F8530-mikhvbZlbf8TSoR2DauN2+FPX92sqiQdvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2017-02-23 17:30 ` Jason Gunthorpe
[not found] ` <20170223173047.GC6688-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-02-24 10:01 ` Amrani, Ram
2017-02-16 19:23 ` [PATCH rdma-core 13/14] vmw_pvrdma: " Jason Gunthorpe
[not found] ` <1487272989-8215-14-git-send-email-jgunthorpe-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-02-17 18:05 ` Adit Ranadive
2017-02-16 19:23 ` [PATCH rdma-core 14/14] Remove the old barrier macros Jason Gunthorpe
[not found] ` <1487272989-8215-15-git-send-email-jgunthorpe-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2017-02-23 13:33 ` Amrani, Ram
[not found] ` <SN1PR07MB22070A48ACD50848267A5AD8F8530-mikhvbZlbf8TSoR2DauN2+FPX92sqiQdvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2017-02-23 16:59 ` Jason Gunthorpe
2017-02-28 16:00 ` [PATCH rdma-core 00/14] Revise the DMA barrier macros in ibverbs Doug Ledford
[not found] ` <1488297611.86943.215.camel-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-02-28 16:38 ` Majd Dibbiny
[not found] ` <C6384D48-FC47-4046-8025-462E1CB02A34-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
2017-02-28 17:47 ` Doug Ledford
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1487272989-8215-5-git-send-email-jgunthorpe@obsidianresearch.com \
--to=jgunthorpe-epgobjl8dl3ta4ec/59zmfatqe2ktcn/@public.gmane.org \
--cc=linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).