linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH rdma-next] RDMA/qedr: SRQ's bug fixes
@ 2020-07-06 11:13 Yuval Basson
  2020-07-06 17:15 ` Jason Gunthorpe
  0 siblings, 1 reply; 3+ messages in thread
From: Yuval Basson @ 2020-07-06 11:13 UTC (permalink / raw)
  To: dledford, jgg; +Cc: linux-rdma, Yuval Basson, Michal Kalderon

QP's with the same SRQ, working on different CQs and running in parallel
on different CPUs could lead to a race when maintaining the SRQ consumer
count, and leads to FW running out of SRQs. Update the consumer atomically.
Make sure the wqe_prod is updated after the sge_prod due to FW
requirements.

Fixes: 3491c9e799fb9 ("RDMA/qedr: Add support for kernel mode SRQ's")
Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
Signed-off-by: Yuval Basson <ybason@marvell.com>
---
 drivers/infiniband/hw/qedr/qedr.h  |  4 ++--
 drivers/infiniband/hw/qedr/verbs.c | 23 ++++++++++++-----------
 2 files changed, 14 insertions(+), 13 deletions(-)

diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index fdf90ec..aa33202 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -344,10 +344,10 @@ struct qedr_srq_hwq_info {
 	u32 wqe_prod;
 	u32 sge_prod;
 	u32 wr_prod_cnt;
-	u32 wr_cons_cnt;
+	atomic_t wr_cons_cnt;
 	u32 num_elems;
 
-	u32 *virt_prod_pair_addr;
+	struct rdma_srq_producers *virt_prod_pair_addr;
 	dma_addr_t phy_prod_pair_addr;
 };
 
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 9b9e802..394adbd 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1510,6 +1510,7 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
 	srq->dev = dev;
 	hw_srq = &srq->hw_srq;
 	spin_lock_init(&srq->lock);
+	atomic_set(&hw_srq->wr_cons_cnt, 0);
 
 	hw_srq->max_wr = init_attr->attr.max_wr;
 	hw_srq->max_sges = init_attr->attr.max_sge;
@@ -3686,7 +3687,7 @@ static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
 	 * count and consumer count and subtract it from max
 	 * work request supported so that we get elements left.
 	 */
-	used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
+	used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
 
 	return hw_srq->max_wr - used;
 }
@@ -3701,7 +3702,6 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
 	unsigned long flags;
 	int status = 0;
 	u32 num_sge;
-	u32 offset;
 
 	spin_lock_irqsave(&srq->lock, flags);
 
@@ -3714,7 +3714,8 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
 		if (!qedr_srq_elem_left(hw_srq) ||
 		    wr->num_sge > srq->hw_srq.max_sges) {
 			DP_ERR(dev, "Can't post WR  (%d,%d) || (%d > %d)\n",
-			       hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
+			       hw_srq->wr_prod_cnt,
+			       atomic_read(&hw_srq->wr_cons_cnt),
 			       wr->num_sge, srq->hw_srq.max_sges);
 			status = -ENOMEM;
 			*bad_wr = wr;
@@ -3748,22 +3749,22 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
 			hw_srq->sge_prod++;
 		}
 
-		/* Flush WQE and SGE information before
+		/* Update WQE and SGE information before
 		 * updating producer.
 		 */
-		wmb();
+		dma_wmb();
 
 		/* SRQ producer is 8 bytes. Need to update SGE producer index
 		 * in first 4 bytes and need to update WQE producer in
 		 * next 4 bytes.
 		 */
-		*srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
-		offset = offsetof(struct rdma_srq_producers, wqe_prod);
-		*((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
-			hw_srq->wqe_prod;
+		srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq->sge_prod;
+		/* Make sure sge producer is updated first */
+		barrier();
+		srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq->wqe_prod;
 
 		/* Flush producer after updating it. */
-		wmb();
+		dma_wmb();
 		wr = wr->next;
 	}
 
@@ -4182,7 +4183,7 @@ static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
 	} else {
 		__process_resp_one(dev, qp, cq, wc, resp, wr_id);
 	}
-	srq->hw_srq.wr_cons_cnt++;
+	atomic_inc(&srq->hw_srq.wr_cons_cnt);
 
 	return 1;
 }
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH rdma-next] RDMA/qedr: SRQ's bug fixes
  2020-07-06 11:13 [PATCH rdma-next] RDMA/qedr: SRQ's bug fixes Yuval Basson
@ 2020-07-06 17:15 ` Jason Gunthorpe
  2020-07-07 18:11   ` [EXT] " Yuval Basson
  0 siblings, 1 reply; 3+ messages in thread
From: Jason Gunthorpe @ 2020-07-06 17:15 UTC (permalink / raw)
  To: Yuval Basson; +Cc: dledford, linux-rdma, Michal Kalderon

On Mon, Jul 06, 2020 at 02:13:52PM +0300, Yuval Basson wrote:
> QP's with the same SRQ, working on different CQs and running in parallel
> on different CPUs could lead to a race when maintaining the SRQ consumer
> count, and leads to FW running out of SRQs. Update the consumer atomically.
> Make sure the wqe_prod is updated after the sge_prod due to FW
> requirements.
> 
> Fixes: 3491c9e799fb9 ("RDMA/qedr: Add support for kernel mode SRQ's")
> Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
> Signed-off-by: Yuval Basson <ybason@marvell.com>
>  drivers/infiniband/hw/qedr/qedr.h  |  4 ++--
>  drivers/infiniband/hw/qedr/verbs.c | 23 ++++++++++++-----------
>  2 files changed, 14 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
> index fdf90ec..aa33202 100644
> +++ b/drivers/infiniband/hw/qedr/qedr.h
> @@ -344,10 +344,10 @@ struct qedr_srq_hwq_info {
>  	u32 wqe_prod;
>  	u32 sge_prod;
>  	u32 wr_prod_cnt;
> -	u32 wr_cons_cnt;
> +	atomic_t wr_cons_cnt;
>  	u32 num_elems;
>  
> -	u32 *virt_prod_pair_addr;
> +	struct rdma_srq_producers *virt_prod_pair_addr;
>  	dma_addr_t phy_prod_pair_addr;
>  };
>  
> diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
> index 9b9e802..394adbd 100644
> +++ b/drivers/infiniband/hw/qedr/verbs.c
> @@ -1510,6 +1510,7 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
>  	srq->dev = dev;
>  	hw_srq = &srq->hw_srq;
>  	spin_lock_init(&srq->lock);
> +	atomic_set(&hw_srq->wr_cons_cnt, 0);
>  
>  	hw_srq->max_wr = init_attr->attr.max_wr;
>  	hw_srq->max_sges = init_attr->attr.max_sge;
> @@ -3686,7 +3687,7 @@ static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
>  	 * count and consumer count and subtract it from max
>  	 * work request supported so that we get elements left.
>  	 */
> -	used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
> +	used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
>  
>  	return hw_srq->max_wr - used;
>  }
> @@ -3701,7 +3702,6 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
>  	unsigned long flags;
>  	int status = 0;
>  	u32 num_sge;
> -	u32 offset;
>  
>  	spin_lock_irqsave(&srq->lock, flags);
>  
> @@ -3714,7 +3714,8 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
>  		if (!qedr_srq_elem_left(hw_srq) ||
>  		    wr->num_sge > srq->hw_srq.max_sges) {
>  			DP_ERR(dev, "Can't post WR  (%d,%d) || (%d > %d)\n",
> -			       hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
> +			       hw_srq->wr_prod_cnt,
> +			       atomic_read(&hw_srq->wr_cons_cnt),
>  			       wr->num_sge, srq->hw_srq.max_sges);
>  			status = -ENOMEM;
>  			*bad_wr = wr;
> @@ -3748,22 +3749,22 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
>  			hw_srq->sge_prod++;
>  		}
>  
> -		/* Flush WQE and SGE information before
> +		/* Update WQE and SGE information before
>  		 * updating producer.
>  		 */
> -		wmb();
> +		dma_wmb();
>  
>  		/* SRQ producer is 8 bytes. Need to update SGE producer index
>  		 * in first 4 bytes and need to update WQE producer in
>  		 * next 4 bytes.
>  		 */
> -		*srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
> -		offset = offsetof(struct rdma_srq_producers, wqe_prod);
> -		*((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
> -			hw_srq->wqe_prod;
> +		srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq->sge_prod;
> +		/* Make sure sge producer is updated first */
> +		barrier();
> +		srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq->wqe_prod;

That is not what barrier does

This is DMA coherent memory and you need to ensure that a DMA observes
sge_prod before wqe_prod? That is the very definition of dma_wmb()

>  		/* Flush producer after updating it. */
> -		wmb();
> +		dma_wmb();
>  		wr = wr->next;

Why are there more dma_wmb()s? What dma'able memory is this protecting?

Jason

^ permalink raw reply	[flat|nested] 3+ messages in thread

* RE: [EXT] Re: [PATCH rdma-next] RDMA/qedr: SRQ's bug fixes
  2020-07-06 17:15 ` Jason Gunthorpe
@ 2020-07-07 18:11   ` Yuval Basson
  0 siblings, 0 replies; 3+ messages in thread
From: Yuval Basson @ 2020-07-07 18:11 UTC (permalink / raw)
  To: Jason Gunthorpe; +Cc: dledford, linux-rdma, Michal Kalderon

> On Mon, Jul 06, 2020 at 02:13:52PM +0300, Yuval Basson wrote:
> > QP's with the same SRQ, working on different CQs and running in
> > parallel on different CPUs could lead to a race when maintaining the
> > SRQ consumer count, and leads to FW running out of SRQs. Update the
> consumer atomically.
> > Make sure the wqe_prod is updated after the sge_prod due to FW
> > requirements.
> >
> > Fixes: 3491c9e799fb9 ("RDMA/qedr: Add support for kernel mode SRQ's")
> > Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
> > Signed-off-by: Yuval Basson <ybason@marvell.com>
> > drivers/infiniband/hw/qedr/qedr.h  |  4 ++--
> > drivers/infiniband/hw/qedr/verbs.c | 23 ++++++++++++-----------
> >  2 files changed, 14 insertions(+), 13 deletions(-)
> >
> > diff --git a/drivers/infiniband/hw/qedr/qedr.h
> > b/drivers/infiniband/hw/qedr/qedr.h
> > index fdf90ec..aa33202 100644
> > +++ b/drivers/infiniband/hw/qedr/qedr.h
> > @@ -344,10 +344,10 @@ struct qedr_srq_hwq_info {
> >  	u32 wqe_prod;
> >  	u32 sge_prod;
> >  	u32 wr_prod_cnt;
> > -	u32 wr_cons_cnt;
> > +	atomic_t wr_cons_cnt;
> >  	u32 num_elems;
> >
> > -	u32 *virt_prod_pair_addr;
> > +	struct rdma_srq_producers *virt_prod_pair_addr;
> >  	dma_addr_t phy_prod_pair_addr;
> >  };
> >
> > diff --git a/drivers/infiniband/hw/qedr/verbs.c
> > b/drivers/infiniband/hw/qedr/verbs.c
> > index 9b9e802..394adbd 100644
> > +++ b/drivers/infiniband/hw/qedr/verbs.c
> > @@ -1510,6 +1510,7 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct
> ib_srq_init_attr *init_attr,
> >  	srq->dev = dev;
> >  	hw_srq = &srq->hw_srq;
> >  	spin_lock_init(&srq->lock);
> > +	atomic_set(&hw_srq->wr_cons_cnt, 0);
> >
> >  	hw_srq->max_wr = init_attr->attr.max_wr;
> >  	hw_srq->max_sges = init_attr->attr.max_sge; @@ -3686,7 +3687,7
> @@
> > static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
> >  	 * count and consumer count and subtract it from max
> >  	 * work request supported so that we get elements left.
> >  	 */
> > -	used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
> > +	used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq-
> >wr_cons_cnt);
> >
> >  	return hw_srq->max_wr - used;
> >  }
> > @@ -3701,7 +3702,6 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq,
> const struct ib_recv_wr *wr,
> >  	unsigned long flags;
> >  	int status = 0;
> >  	u32 num_sge;
> > -	u32 offset;
> >
> >  	spin_lock_irqsave(&srq->lock, flags);
> >
> > @@ -3714,7 +3714,8 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq,
> const struct ib_recv_wr *wr,
> >  		if (!qedr_srq_elem_left(hw_srq) ||
> >  		    wr->num_sge > srq->hw_srq.max_sges) {
> >  			DP_ERR(dev, "Can't post WR  (%d,%d) || (%d >
> %d)\n",
> > -			       hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
> > +			       hw_srq->wr_prod_cnt,
> > +			       atomic_read(&hw_srq->wr_cons_cnt),
> >  			       wr->num_sge, srq->hw_srq.max_sges);
> >  			status = -ENOMEM;
> >  			*bad_wr = wr;
> > @@ -3748,22 +3749,22 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq,
> const struct ib_recv_wr *wr,
> >  			hw_srq->sge_prod++;
> >  		}
> >
> > -		/* Flush WQE and SGE information before
> > +		/* Update WQE and SGE information before
> >  		 * updating producer.
> >  		 */
> > -		wmb();
> > +		dma_wmb();
> >
> >  		/* SRQ producer is 8 bytes. Need to update SGE producer
> index
> >  		 * in first 4 bytes and need to update WQE producer in
> >  		 * next 4 bytes.
> >  		 */
> > -		*srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
> > -		offset = offsetof(struct rdma_srq_producers, wqe_prod);
> > -		*((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
> > -			hw_srq->wqe_prod;
> > +		srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq-
> >sge_prod;
> > +		/* Make sure sge producer is updated first */
> > +		barrier();
> > +		srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq-
> >wqe_prod;
> 
> That is not what barrier does
> 
> This is DMA coherent memory and you need to ensure that a DMA observes
> sge_prod before wqe_prod? That is the very definition of dma_wmb()
Yes. I though barrier() will suffice (on x86 for instance). I'll fix in V2.
> 
> >  		/* Flush producer after updating it. */
> > -		wmb();
> > +		dma_wmb();
> >  		wr = wr->next;
> 
> Why are there more dma_wmb()s? What dma'able memory is this
> protecting?
Redundant. Ill remove in V2.
> 
> Jason

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-07-07 18:11 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-06 11:13 [PATCH rdma-next] RDMA/qedr: SRQ's bug fixes Yuval Basson
2020-07-06 17:15 ` Jason Gunthorpe
2020-07-07 18:11   ` [EXT] " Yuval Basson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).