All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/2] net/mlx5: Define interface bits for fencing UMR wqe
@ 2017-05-28  7:53 ` Max Gurtovoy
  0 siblings, 0 replies; 34+ messages in thread
From: Max Gurtovoy @ 2017-05-28  7:53 UTC (permalink / raw)
  To: linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	sagi-NQWnxTmZq1alnMjI0IkVqw, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	hch-jcswGhMUV9g
  Cc: shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg, leon-DgEjT+Ai2ygdnm+yROfE0A,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w,
	Max Gurtovoy

HW can implement UMR wqe re-transmission in various ways.
Thus, add HCA cap to distinguish the needed fence for UMR to make
sure that the wqe wouldn't fail on mkey checks.

Signed-off-by: Max Gurtovoy <maxg-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Acked-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
---

No changes from v1 in this patch (PATCH 1/2).

---
 include/linux/mlx5/mlx5_ifc.h |   10 +++++++++-
 1 files changed, 9 insertions(+), 1 deletions(-)

diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 32de072..edafedb 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -766,6 +766,12 @@ enum {
 	MLX5_CAP_PORT_TYPE_ETH = 0x1,
 };
 
+enum {
+	MLX5_CAP_UMR_FENCE_STRONG	= 0x0,
+	MLX5_CAP_UMR_FENCE_SMALL	= 0x1,
+	MLX5_CAP_UMR_FENCE_NONE		= 0x2,
+};
+
 struct mlx5_ifc_cmd_hca_cap_bits {
 	u8         reserved_at_0[0x80];
 
@@ -875,7 +881,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
 	u8         reserved_at_202[0x1];
 	u8         ipoib_enhanced_offloads[0x1];
 	u8         ipoib_basic_offloads[0x1];
-	u8         reserved_at_205[0xa];
+	u8         reserved_at_205[0x5];
+	u8         umr_fence[0x2];
+	u8         reserved_at_20c[0x3];
 	u8         drain_sigerr[0x1];
 	u8         cmdif_checksum[0x2];
 	u8         sigerr_cqe[0x1];
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH v2 1/2] net/mlx5: Define interface bits for fencing UMR wqe
@ 2017-05-28  7:53 ` Max Gurtovoy
  0 siblings, 0 replies; 34+ messages in thread
From: Max Gurtovoy @ 2017-05-28  7:53 UTC (permalink / raw)


HW can implement UMR wqe re-transmission in various ways.
Thus, add HCA cap to distinguish the needed fence for UMR to make
sure that the wqe wouldn't fail on mkey checks.

Signed-off-by: Max Gurtovoy <maxg at mellanox.com>
Acked-by: Leon Romanovsky <leon at kernel.org>
---

No changes from v1 in this patch (PATCH 1/2).

---
 include/linux/mlx5/mlx5_ifc.h |   10 +++++++++-
 1 files changed, 9 insertions(+), 1 deletions(-)

diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 32de072..edafedb 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -766,6 +766,12 @@ enum {
 	MLX5_CAP_PORT_TYPE_ETH = 0x1,
 };
 
+enum {
+	MLX5_CAP_UMR_FENCE_STRONG	= 0x0,
+	MLX5_CAP_UMR_FENCE_SMALL	= 0x1,
+	MLX5_CAP_UMR_FENCE_NONE		= 0x2,
+};
+
 struct mlx5_ifc_cmd_hca_cap_bits {
 	u8         reserved_at_0[0x80];
 
@@ -875,7 +881,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
 	u8         reserved_at_202[0x1];
 	u8         ipoib_enhanced_offloads[0x1];
 	u8         ipoib_basic_offloads[0x1];
-	u8         reserved_at_205[0xa];
+	u8         reserved_at_205[0x5];
+	u8         umr_fence[0x2];
+	u8         reserved_at_20c[0x3];
 	u8         drain_sigerr[0x1];
 	u8         cmdif_checksum[0x2];
 	u8         sigerr_cqe[0x1];
-- 
1.7.1

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
  2017-05-28  7:53 ` Max Gurtovoy
@ 2017-05-28  7:53     ` Max Gurtovoy
  -1 siblings, 0 replies; 34+ messages in thread
From: Max Gurtovoy @ 2017-05-28  7:53 UTC (permalink / raw)
  To: linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	sagi-NQWnxTmZq1alnMjI0IkVqw, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	hch-jcswGhMUV9g
  Cc: shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg, leon-DgEjT+Ai2ygdnm+yROfE0A,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w,
	Max Gurtovoy

Cache the needed umr_fence and set the wqe ctrl segmennt
accordingly.

Signed-off-by: Max Gurtovoy <maxg-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Acked-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
---

Changes from v1:
 - updated MLX5_FENCE_MODE_STRONG_ORDERING to be default value
   in mlx5_get_umr_fence.

---
 drivers/infiniband/hw/mlx5/main.c    |   14 ++++++++++++++
 drivers/infiniband/hw/mlx5/mlx5_ib.h |    1 +
 drivers/infiniband/hw/mlx5/qp.c      |   15 +++++++--------
 3 files changed, 22 insertions(+), 8 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d45772d..83d1f9b 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2979,6 +2979,18 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
 	return ret;
 }
 
+static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
+{
+	switch (umr_fence_cap) {
+	case MLX5_CAP_UMR_FENCE_NONE:
+		return MLX5_FENCE_MODE_NONE;
+	case MLX5_CAP_UMR_FENCE_SMALL:
+		return MLX5_FENCE_MODE_INITIATOR_SMALL;
+	default:
+		return MLX5_FENCE_MODE_STRONG_ORDERING;
+	}
+}
+
 static int create_dev_resources(struct mlx5_ib_resources *devr)
 {
 	struct ib_srq_init_attr attr;
@@ -3693,6 +3705,8 @@ static void mlx5_ib_free_rdma_netdev(struct net_device *netdev)
 
 	mlx5_ib_internal_fill_odp_caps(dev);
 
+	dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
+
 	if (MLX5_CAP_GEN(mdev, imaicl)) {
 		dev->ib_dev.alloc_mw		= mlx5_ib_alloc_mw;
 		dev->ib_dev.dealloc_mw		= mlx5_ib_dealloc_mw;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 38c877b..0e08a58 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -654,6 +654,7 @@ struct mlx5_ib_dev {
 	struct mlx5_ib_port	*port;
 	struct mlx5_sq_bfreg     bfreg;
 	struct mlx5_sq_bfreg     fp_bfreg;
+	u8				umr_fence;
 };
 
 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 93959e1..876a429 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3738,11 +3738,10 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
 	}
 }
 
-static u8 get_fence(u8 fence, struct ib_send_wr *wr)
+static u8 get_fence(u8 fence, struct ib_send_wr *wr, struct mlx5_ib_dev *dev)
 {
-	if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
-		     wr->send_flags & IB_SEND_FENCE))
-		return MLX5_FENCE_MODE_STRONG_ORDERING;
+	if (wr->opcode == IB_WR_LOCAL_INV || wr->opcode == IB_WR_REG_MR)
+		return dev->umr_fence;
 
 	if (unlikely(fence)) {
 		if (wr->send_flags & IB_SEND_FENCE)
@@ -3928,7 +3927,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				}
 
 				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr),
+					   nreq, get_fence(fence, wr, dev),
 					   next_fence, MLX5_OPCODE_UMR);
 				/*
 				 * SET_PSV WQEs are not signaled and solicited
@@ -3955,7 +3954,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				}
 
 				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr),
+					   nreq, get_fence(fence, wr, dev),
 					   next_fence, MLX5_OPCODE_SET_PSV);
 				err = begin_wqe(qp, &seg, &ctrl, wr,
 						&idx, &size, nreq);
@@ -3977,7 +3976,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				}
 
 				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr),
+					   nreq, get_fence(fence, wr, dev),
 					   next_fence, MLX5_OPCODE_SET_PSV);
 				num_sge = 0;
 				goto skip_psv;
@@ -4090,7 +4089,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 		}
 
 		finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
-			   get_fence(fence, wr), next_fence,
+			   get_fence(fence, wr, dev), next_fence,
 			   mlx5_ib_opcode[wr->opcode]);
 skip_psv:
 		if (0)
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
@ 2017-05-28  7:53     ` Max Gurtovoy
  0 siblings, 0 replies; 34+ messages in thread
From: Max Gurtovoy @ 2017-05-28  7:53 UTC (permalink / raw)


Cache the needed umr_fence and set the wqe ctrl segmennt
accordingly.

Signed-off-by: Max Gurtovoy <maxg at mellanox.com>
Acked-by: Leon Romanovsky <leon at kernel.org>
---

Changes from v1:
 - updated MLX5_FENCE_MODE_STRONG_ORDERING to be default value
   in mlx5_get_umr_fence.

---
 drivers/infiniband/hw/mlx5/main.c    |   14 ++++++++++++++
 drivers/infiniband/hw/mlx5/mlx5_ib.h |    1 +
 drivers/infiniband/hw/mlx5/qp.c      |   15 +++++++--------
 3 files changed, 22 insertions(+), 8 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d45772d..83d1f9b 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2979,6 +2979,18 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
 	return ret;
 }
 
+static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
+{
+	switch (umr_fence_cap) {
+	case MLX5_CAP_UMR_FENCE_NONE:
+		return MLX5_FENCE_MODE_NONE;
+	case MLX5_CAP_UMR_FENCE_SMALL:
+		return MLX5_FENCE_MODE_INITIATOR_SMALL;
+	default:
+		return MLX5_FENCE_MODE_STRONG_ORDERING;
+	}
+}
+
 static int create_dev_resources(struct mlx5_ib_resources *devr)
 {
 	struct ib_srq_init_attr attr;
@@ -3693,6 +3705,8 @@ static void mlx5_ib_free_rdma_netdev(struct net_device *netdev)
 
 	mlx5_ib_internal_fill_odp_caps(dev);
 
+	dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
+
 	if (MLX5_CAP_GEN(mdev, imaicl)) {
 		dev->ib_dev.alloc_mw		= mlx5_ib_alloc_mw;
 		dev->ib_dev.dealloc_mw		= mlx5_ib_dealloc_mw;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 38c877b..0e08a58 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -654,6 +654,7 @@ struct mlx5_ib_dev {
 	struct mlx5_ib_port	*port;
 	struct mlx5_sq_bfreg     bfreg;
 	struct mlx5_sq_bfreg     fp_bfreg;
+	u8				umr_fence;
 };
 
 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 93959e1..876a429 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3738,11 +3738,10 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
 	}
 }
 
-static u8 get_fence(u8 fence, struct ib_send_wr *wr)
+static u8 get_fence(u8 fence, struct ib_send_wr *wr, struct mlx5_ib_dev *dev)
 {
-	if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
-		     wr->send_flags & IB_SEND_FENCE))
-		return MLX5_FENCE_MODE_STRONG_ORDERING;
+	if (wr->opcode == IB_WR_LOCAL_INV || wr->opcode == IB_WR_REG_MR)
+		return dev->umr_fence;
 
 	if (unlikely(fence)) {
 		if (wr->send_flags & IB_SEND_FENCE)
@@ -3928,7 +3927,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				}
 
 				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr),
+					   nreq, get_fence(fence, wr, dev),
 					   next_fence, MLX5_OPCODE_UMR);
 				/*
 				 * SET_PSV WQEs are not signaled and solicited
@@ -3955,7 +3954,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				}
 
 				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr),
+					   nreq, get_fence(fence, wr, dev),
 					   next_fence, MLX5_OPCODE_SET_PSV);
 				err = begin_wqe(qp, &seg, &ctrl, wr,
 						&idx, &size, nreq);
@@ -3977,7 +3976,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				}
 
 				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr),
+					   nreq, get_fence(fence, wr, dev),
 					   next_fence, MLX5_OPCODE_SET_PSV);
 				num_sge = 0;
 				goto skip_psv;
@@ -4090,7 +4089,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 		}
 
 		finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
-			   get_fence(fence, wr), next_fence,
+			   get_fence(fence, wr, dev), next_fence,
 			   mlx5_ib_opcode[wr->opcode]);
 skip_psv:
 		if (0)
-- 
1.7.1

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
  2017-05-28  7:53     ` Max Gurtovoy
@ 2017-05-28  9:07         ` Christoph Hellwig
  -1 siblings, 0 replies; 34+ messages in thread
From: Christoph Hellwig @ 2017-05-28  9:07 UTC (permalink / raw)
  To: Max Gurtovoy
  Cc: linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	sagi-NQWnxTmZq1alnMjI0IkVqw, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	hch-jcswGhMUV9g, shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg, leon-DgEjT+Ai2ygdnm+yROfE0A,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w

On Sun, May 28, 2017 at 10:53:11AM +0300, Max Gurtovoy wrote:
> Cache the needed umr_fence and set the wqe ctrl segmennt
> accordingly.

Looks good,

Reviewed-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>

But that whole fence logic looks awkward to me.  Does the following
patch to reorder it make sense to you?

diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 0e08a58de673..bdcf25410c99 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -349,7 +349,7 @@ struct mlx5_ib_qp {
 	struct mlx5_ib_wq	rq;
 
 	u8			sq_signal_bits;
-	u8			fm_cache;
+	u8			next_fence;
 	struct mlx5_ib_wq	sq;
 
 	/* serialize qp state modifications
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 876a42908e4d..ebb6768684de 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3738,23 +3738,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
 	}
 }
 
-static u8 get_fence(u8 fence, struct ib_send_wr *wr, struct mlx5_ib_dev *dev)
-{
-	if (wr->opcode == IB_WR_LOCAL_INV || wr->opcode == IB_WR_REG_MR)
-		return dev->umr_fence;
-
-	if (unlikely(fence)) {
-		if (wr->send_flags & IB_SEND_FENCE)
-			return MLX5_FENCE_MODE_SMALL_AND_FENCE;
-		else
-			return fence;
-	} else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
-		return MLX5_FENCE_MODE_FENCE;
-	}
-
-	return 0;
-}
-
 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
 		     struct mlx5_wqe_ctrl_seg **ctrl,
 		     struct ib_send_wr *wr, unsigned *idx,
@@ -3783,8 +3766,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
 static void finish_wqe(struct mlx5_ib_qp *qp,
 		       struct mlx5_wqe_ctrl_seg *ctrl,
 		       u8 size, unsigned idx, u64 wr_id,
-		       int nreq, u8 fence, u8 next_fence,
-		       u32 mlx5_opcode)
+		       int nreq, u8 fence, u32 mlx5_opcode)
 {
 	u8 opmod = 0;
 
@@ -3792,7 +3774,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
 					     mlx5_opcode | ((u32)opmod << 24));
 	ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
 	ctrl->fm_ce_se |= fence;
-	qp->fm_cache = next_fence;
 	if (unlikely(qp->wq_sig))
 		ctrl->signature = wq_sig(ctrl);
 
@@ -3852,7 +3833,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			goto out;
 		}
 
-		fence = qp->fm_cache;
 		num_sge = wr->num_sge;
 		if (unlikely(num_sge > qp->sq.max_gs)) {
 			mlx5_ib_warn(dev, "\n");
@@ -3869,6 +3849,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			goto out;
 		}
 
+		if (wr->opcode == IB_WR_LOCAL_INV ||
+		    wr->opcode == IB_WR_REG_MR) {
+			fence = dev->umr_fence;
+			next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+		} else if (wr->send_flags & IB_SEND_FENCE) {
+			if (qp->next_fence)
+				fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+			else
+				fence = MLX5_FENCE_MODE_FENCE;
+		} else {
+			fence = qp->next_fence;
+		}
+
 		switch (ibqp->qp_type) {
 		case IB_QPT_XRC_INI:
 			xrc = seg;
@@ -3895,7 +3888,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				goto out;
 
 			case IB_WR_LOCAL_INV:
-				next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
 				qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
 				ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
 				set_linv_wr(qp, &seg, &size);
@@ -3903,7 +3895,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				break;
 
 			case IB_WR_REG_MR:
-				next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
 				qp->sq.wr_data[idx] = IB_WR_REG_MR;
 				ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
 				err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
@@ -3926,9 +3917,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 					goto out;
 				}
 
-				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr, dev),
-					   next_fence, MLX5_OPCODE_UMR);
+				finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+					   fence, MLX5_OPCODE_UMR);
 				/*
 				 * SET_PSV WQEs are not signaled and solicited
 				 * on error
@@ -3953,9 +3943,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 					goto out;
 				}
 
-				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr, dev),
-					   next_fence, MLX5_OPCODE_SET_PSV);
+				finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+					   fence, MLX5_OPCODE_SET_PSV);
 				err = begin_wqe(qp, &seg, &ctrl, wr,
 						&idx, &size, nreq);
 				if (err) {
@@ -3965,7 +3954,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 					goto out;
 				}
 
-				next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
 				err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
 						 mr->sig->psv_wire.psv_idx, &seg,
 						 &size);
@@ -3975,9 +3963,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 					goto out;
 				}
 
-				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr, dev),
-					   next_fence, MLX5_OPCODE_SET_PSV);
+				finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+					   fence, MLX5_OPCODE_SET_PSV);
+				qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
 				num_sge = 0;
 				goto skip_psv;
 
@@ -4088,8 +4076,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			}
 		}
 
-		finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
-			   get_fence(fence, wr, dev), next_fence,
+		qp->next_fence = next_fence;
+		finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence,
 			   mlx5_ib_opcode[wr->opcode]);
 skip_psv:
 		if (0)
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
@ 2017-05-28  9:07         ` Christoph Hellwig
  0 siblings, 0 replies; 34+ messages in thread
From: Christoph Hellwig @ 2017-05-28  9:07 UTC (permalink / raw)


On Sun, May 28, 2017@10:53:11AM +0300, Max Gurtovoy wrote:
> Cache the needed umr_fence and set the wqe ctrl segmennt
> accordingly.

Looks good,

Reviewed-by: Christoph Hellwig <hch at lst.de>

But that whole fence logic looks awkward to me.  Does the following
patch to reorder it make sense to you?

diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 0e08a58de673..bdcf25410c99 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -349,7 +349,7 @@ struct mlx5_ib_qp {
 	struct mlx5_ib_wq	rq;
 
 	u8			sq_signal_bits;
-	u8			fm_cache;
+	u8			next_fence;
 	struct mlx5_ib_wq	sq;
 
 	/* serialize qp state modifications
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 876a42908e4d..ebb6768684de 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3738,23 +3738,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
 	}
 }
 
-static u8 get_fence(u8 fence, struct ib_send_wr *wr, struct mlx5_ib_dev *dev)
-{
-	if (wr->opcode == IB_WR_LOCAL_INV || wr->opcode == IB_WR_REG_MR)
-		return dev->umr_fence;
-
-	if (unlikely(fence)) {
-		if (wr->send_flags & IB_SEND_FENCE)
-			return MLX5_FENCE_MODE_SMALL_AND_FENCE;
-		else
-			return fence;
-	} else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
-		return MLX5_FENCE_MODE_FENCE;
-	}
-
-	return 0;
-}
-
 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
 		     struct mlx5_wqe_ctrl_seg **ctrl,
 		     struct ib_send_wr *wr, unsigned *idx,
@@ -3783,8 +3766,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
 static void finish_wqe(struct mlx5_ib_qp *qp,
 		       struct mlx5_wqe_ctrl_seg *ctrl,
 		       u8 size, unsigned idx, u64 wr_id,
-		       int nreq, u8 fence, u8 next_fence,
-		       u32 mlx5_opcode)
+		       int nreq, u8 fence, u32 mlx5_opcode)
 {
 	u8 opmod = 0;
 
@@ -3792,7 +3774,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
 					     mlx5_opcode | ((u32)opmod << 24));
 	ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
 	ctrl->fm_ce_se |= fence;
-	qp->fm_cache = next_fence;
 	if (unlikely(qp->wq_sig))
 		ctrl->signature = wq_sig(ctrl);
 
@@ -3852,7 +3833,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			goto out;
 		}
 
-		fence = qp->fm_cache;
 		num_sge = wr->num_sge;
 		if (unlikely(num_sge > qp->sq.max_gs)) {
 			mlx5_ib_warn(dev, "\n");
@@ -3869,6 +3849,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			goto out;
 		}
 
+		if (wr->opcode == IB_WR_LOCAL_INV ||
+		    wr->opcode == IB_WR_REG_MR) {
+			fence = dev->umr_fence;
+			next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+		} else if (wr->send_flags & IB_SEND_FENCE) {
+			if (qp->next_fence)
+				fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+			else
+				fence = MLX5_FENCE_MODE_FENCE;
+		} else {
+			fence = qp->next_fence;
+		}
+
 		switch (ibqp->qp_type) {
 		case IB_QPT_XRC_INI:
 			xrc = seg;
@@ -3895,7 +3888,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				goto out;
 
 			case IB_WR_LOCAL_INV:
-				next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
 				qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
 				ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
 				set_linv_wr(qp, &seg, &size);
@@ -3903,7 +3895,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				break;
 
 			case IB_WR_REG_MR:
-				next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
 				qp->sq.wr_data[idx] = IB_WR_REG_MR;
 				ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
 				err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
@@ -3926,9 +3917,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 					goto out;
 				}
 
-				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr, dev),
-					   next_fence, MLX5_OPCODE_UMR);
+				finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+					   fence, MLX5_OPCODE_UMR);
 				/*
 				 * SET_PSV WQEs are not signaled and solicited
 				 * on error
@@ -3953,9 +3943,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 					goto out;
 				}
 
-				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr, dev),
-					   next_fence, MLX5_OPCODE_SET_PSV);
+				finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+					   fence, MLX5_OPCODE_SET_PSV);
 				err = begin_wqe(qp, &seg, &ctrl, wr,
 						&idx, &size, nreq);
 				if (err) {
@@ -3965,7 +3954,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 					goto out;
 				}
 
-				next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
 				err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
 						 mr->sig->psv_wire.psv_idx, &seg,
 						 &size);
@@ -3975,9 +3963,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 					goto out;
 				}
 
-				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr, dev),
-					   next_fence, MLX5_OPCODE_SET_PSV);
+				finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+					   fence, MLX5_OPCODE_SET_PSV);
+				qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
 				num_sge = 0;
 				goto skip_psv;
 
@@ -4088,8 +4076,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			}
 		}
 
-		finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
-			   get_fence(fence, wr, dev), next_fence,
+		qp->next_fence = next_fence;
+		finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence,
 			   mlx5_ib_opcode[wr->opcode]);
 skip_psv:
 		if (0)

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 1/2] net/mlx5: Define interface bits for fencing UMR wqe
  2017-05-28  7:53 ` Max Gurtovoy
@ 2017-05-28  9:14     ` Christoph Hellwig
  -1 siblings, 0 replies; 34+ messages in thread
From: Christoph Hellwig @ 2017-05-28  9:14 UTC (permalink / raw)
  To: Max Gurtovoy
  Cc: linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	sagi-NQWnxTmZq1alnMjI0IkVqw, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	hch-jcswGhMUV9g, shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg, leon-DgEjT+Ai2ygdnm+yROfE0A,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w

On Sun, May 28, 2017 at 10:53:10AM +0300, Max Gurtovoy wrote:
> HW can implement UMR wqe re-transmission in various ways.
> Thus, add HCA cap to distinguish the needed fence for UMR to make
> sure that the wqe wouldn't fail on mkey checks.
> 
> Signed-off-by: Max Gurtovoy <maxg-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
> Acked-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
> ---
> 
> No changes from v1 in this patch (PATCH 1/2).

Still:

Reviewed-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2 1/2] net/mlx5: Define interface bits for fencing UMR wqe
@ 2017-05-28  9:14     ` Christoph Hellwig
  0 siblings, 0 replies; 34+ messages in thread
From: Christoph Hellwig @ 2017-05-28  9:14 UTC (permalink / raw)


On Sun, May 28, 2017@10:53:10AM +0300, Max Gurtovoy wrote:
> HW can implement UMR wqe re-transmission in various ways.
> Thus, add HCA cap to distinguish the needed fence for UMR to make
> sure that the wqe wouldn't fail on mkey checks.
> 
> Signed-off-by: Max Gurtovoy <maxg at mellanox.com>
> Acked-by: Leon Romanovsky <leon at kernel.org>
> ---
> 
> No changes from v1 in this patch (PATCH 1/2).

Still:

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
  2017-05-28  9:07         ` Christoph Hellwig
@ 2017-05-28  9:53             ` Max Gurtovoy
  -1 siblings, 0 replies; 34+ messages in thread
From: Max Gurtovoy @ 2017-05-28  9:53 UTC (permalink / raw)
  To: Christoph Hellwig
  Cc: linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	sagi-NQWnxTmZq1alnMjI0IkVqw, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg, leon-DgEjT+Ai2ygdnm+yROfE0A,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w



On 5/28/2017 12:07 PM, Christoph Hellwig wrote:
> On Sun, May 28, 2017 at 10:53:11AM +0300, Max Gurtovoy wrote:
>> Cache the needed umr_fence and set the wqe ctrl segmennt
>> accordingly.
>
> Looks good,
>
> Reviewed-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>

Thanks.

>
> But that whole fence logic looks awkward to me.  Does the following
> patch to reorder it make sense to you?
>


Yes it make sense to me.
Sagi/Leon, any comments ?


--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
@ 2017-05-28  9:53             ` Max Gurtovoy
  0 siblings, 0 replies; 34+ messages in thread
From: Max Gurtovoy @ 2017-05-28  9:53 UTC (permalink / raw)




On 5/28/2017 12:07 PM, Christoph Hellwig wrote:
> On Sun, May 28, 2017@10:53:11AM +0300, Max Gurtovoy wrote:
>> Cache the needed umr_fence and set the wqe ctrl segmennt
>> accordingly.
>
> Looks good,
>
> Reviewed-by: Christoph Hellwig <hch at lst.de>

Thanks.

>
> But that whole fence logic looks awkward to me.  Does the following
> patch to reorder it make sense to you?
>


Yes it make sense to me.
Sagi/Leon, any comments ?

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
  2017-05-28  9:53             ` Max Gurtovoy
@ 2017-05-29 10:05                 ` Leon Romanovsky
  -1 siblings, 0 replies; 34+ messages in thread
From: Leon Romanovsky @ 2017-05-29 10:05 UTC (permalink / raw)
  To: Max Gurtovoy
  Cc: Christoph Hellwig, linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	sagi-NQWnxTmZq1alnMjI0IkVqw, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w

[-- Attachment #1: Type: text/plain, Size: 982 bytes --]

On Sun, May 28, 2017 at 12:53:00PM +0300, Max Gurtovoy wrote:
>
>
> On 5/28/2017 12:07 PM, Christoph Hellwig wrote:
> > On Sun, May 28, 2017 at 10:53:11AM +0300, Max Gurtovoy wrote:
> > > Cache the needed umr_fence and set the wqe ctrl segmennt
> > > accordingly.
> >
> > Looks good,
> >
> > Reviewed-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
>
> Thanks.
>
> >
> > But that whole fence logic looks awkward to me.  Does the following
> > patch to reorder it make sense to you?
> >
>
>
> Yes it make sense to me.
> Sagi/Leon, any comments ?

Max,

Do you see any performance impact for IB_WR_RDMA_READ, IB_WR_RDMA_WRITE
and IB_WR_RDMA_WRITE_WITH_IMM flows? They don't need fences and such
change can cause to performance losses.

Thanks


>
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
@ 2017-05-29 10:05                 ` Leon Romanovsky
  0 siblings, 0 replies; 34+ messages in thread
From: Leon Romanovsky @ 2017-05-29 10:05 UTC (permalink / raw)


On Sun, May 28, 2017@12:53:00PM +0300, Max Gurtovoy wrote:
>
>
> On 5/28/2017 12:07 PM, Christoph Hellwig wrote:
> > On Sun, May 28, 2017@10:53:11AM +0300, Max Gurtovoy wrote:
> > > Cache the needed umr_fence and set the wqe ctrl segmennt
> > > accordingly.
> >
> > Looks good,
> >
> > Reviewed-by: Christoph Hellwig <hch at lst.de>
>
> Thanks.
>
> >
> > But that whole fence logic looks awkward to me.  Does the following
> > patch to reorder it make sense to you?
> >
>
>
> Yes it make sense to me.
> Sagi/Leon, any comments ?

Max,

Do you see any performance impact for IB_WR_RDMA_READ, IB_WR_RDMA_WRITE
and IB_WR_RDMA_WRITE_WITH_IMM flows? They don't need fences and such
change can cause to performance losses.

Thanks


>
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo at vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 833 bytes
Desc: not available
URL: <http://lists.infradead.org/pipermail/linux-nvme/attachments/20170529/b2c6f7d0/attachment.sig>

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
  2017-05-29 10:05                 ` Leon Romanovsky
@ 2017-05-29 12:21                     ` Max Gurtovoy
  -1 siblings, 0 replies; 34+ messages in thread
From: Max Gurtovoy @ 2017-05-29 12:21 UTC (permalink / raw)
  To: Leon Romanovsky
  Cc: Christoph Hellwig, linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	sagi-NQWnxTmZq1alnMjI0IkVqw, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w



On 5/29/2017 1:05 PM, Leon Romanovsky wrote:
> On Sun, May 28, 2017 at 12:53:00PM +0300, Max Gurtovoy wrote:
>>
>>
>> On 5/28/2017 12:07 PM, Christoph Hellwig wrote:
>>> On Sun, May 28, 2017 at 10:53:11AM +0300, Max Gurtovoy wrote:
>>>> Cache the needed umr_fence and set the wqe ctrl segmennt
>>>> accordingly.
>>>
>>> Looks good,
>>>
>>> Reviewed-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
>>
>> Thanks.
>>
>>>
>>> But that whole fence logic looks awkward to me.  Does the following
>>> patch to reorder it make sense to you?
>>>
>>
>>
>> Yes it make sense to me.
>> Sagi/Leon, any comments ?
>
> Max,
>
> Do you see any performance impact for IB_WR_RDMA_READ, IB_WR_RDMA_WRITE
> and IB_WR_RDMA_WRITE_WITH_IMM flows? They don't need fences and such
> change can cause to performance losses.
>
> Thanks
>

We don't fence those WR's.
Christoph just re-write it to be more intuitive code. I don't see logic 
difference, am I wrong here ?

>
>>
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
>> the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
@ 2017-05-29 12:21                     ` Max Gurtovoy
  0 siblings, 0 replies; 34+ messages in thread
From: Max Gurtovoy @ 2017-05-29 12:21 UTC (permalink / raw)




On 5/29/2017 1:05 PM, Leon Romanovsky wrote:
> On Sun, May 28, 2017@12:53:00PM +0300, Max Gurtovoy wrote:
>>
>>
>> On 5/28/2017 12:07 PM, Christoph Hellwig wrote:
>>> On Sun, May 28, 2017@10:53:11AM +0300, Max Gurtovoy wrote:
>>>> Cache the needed umr_fence and set the wqe ctrl segmennt
>>>> accordingly.
>>>
>>> Looks good,
>>>
>>> Reviewed-by: Christoph Hellwig <hch at lst.de>
>>
>> Thanks.
>>
>>>
>>> But that whole fence logic looks awkward to me.  Does the following
>>> patch to reorder it make sense to you?
>>>
>>
>>
>> Yes it make sense to me.
>> Sagi/Leon, any comments ?
>
> Max,
>
> Do you see any performance impact for IB_WR_RDMA_READ, IB_WR_RDMA_WRITE
> and IB_WR_RDMA_WRITE_WITH_IMM flows? They don't need fences and such
> change can cause to performance losses.
>
> Thanks
>

We don't fence those WR's.
Christoph just re-write it to be more intuitive code. I don't see logic 
difference, am I wrong here ?

>
>>
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
>> the body of a message to majordomo at vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
  2017-05-29 12:21                     ` Max Gurtovoy
@ 2017-05-29 16:06                         ` Leon Romanovsky
  -1 siblings, 0 replies; 34+ messages in thread
From: Leon Romanovsky @ 2017-05-29 16:06 UTC (permalink / raw)
  To: Max Gurtovoy
  Cc: Christoph Hellwig, linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	sagi-NQWnxTmZq1alnMjI0IkVqw, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w

[-- Attachment #1: Type: text/plain, Size: 1532 bytes --]

On Mon, May 29, 2017 at 03:21:11PM +0300, Max Gurtovoy wrote:
>
>
> On 5/29/2017 1:05 PM, Leon Romanovsky wrote:
> > On Sun, May 28, 2017 at 12:53:00PM +0300, Max Gurtovoy wrote:
> > >
> > >
> > > On 5/28/2017 12:07 PM, Christoph Hellwig wrote:
> > > > On Sun, May 28, 2017 at 10:53:11AM +0300, Max Gurtovoy wrote:
> > > > > Cache the needed umr_fence and set the wqe ctrl segmennt
> > > > > accordingly.
> > > >
> > > > Looks good,
> > > >
> > > > Reviewed-by: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
> > >
> > > Thanks.
> > >
> > > >
> > > > But that whole fence logic looks awkward to me.  Does the following
> > > > patch to reorder it make sense to you?
> > > >
> > >
> > >
> > > Yes it make sense to me.
> > > Sagi/Leon, any comments ?
> >
> > Max,
> >
> > Do you see any performance impact for IB_WR_RDMA_READ, IB_WR_RDMA_WRITE
> > and IB_WR_RDMA_WRITE_WITH_IMM flows? They don't need fences and such
> > change can cause to performance losses.
> >
> > Thanks
> >
>
> We don't fence those WR's.
> Christoph just re-write it to be more intuitive code. I don't see logic
> difference, am I wrong here ?

A little bit, before Christoph's suggestion, we calculated fence for
the paths which need such fence, after we will calculate for all paths.

Thanks

>
> >
> > >
> > >
> > > --
> > > To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> > > the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
> > > More majordomo info at  http://vger.kernel.org/majordomo-info.html

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
@ 2017-05-29 16:06                         ` Leon Romanovsky
  0 siblings, 0 replies; 34+ messages in thread
From: Leon Romanovsky @ 2017-05-29 16:06 UTC (permalink / raw)


On Mon, May 29, 2017@03:21:11PM +0300, Max Gurtovoy wrote:
>
>
> On 5/29/2017 1:05 PM, Leon Romanovsky wrote:
> > On Sun, May 28, 2017@12:53:00PM +0300, Max Gurtovoy wrote:
> > >
> > >
> > > On 5/28/2017 12:07 PM, Christoph Hellwig wrote:
> > > > On Sun, May 28, 2017@10:53:11AM +0300, Max Gurtovoy wrote:
> > > > > Cache the needed umr_fence and set the wqe ctrl segmennt
> > > > > accordingly.
> > > >
> > > > Looks good,
> > > >
> > > > Reviewed-by: Christoph Hellwig <hch at lst.de>
> > >
> > > Thanks.
> > >
> > > >
> > > > But that whole fence logic looks awkward to me.  Does the following
> > > > patch to reorder it make sense to you?
> > > >
> > >
> > >
> > > Yes it make sense to me.
> > > Sagi/Leon, any comments ?
> >
> > Max,
> >
> > Do you see any performance impact for IB_WR_RDMA_READ, IB_WR_RDMA_WRITE
> > and IB_WR_RDMA_WRITE_WITH_IMM flows? They don't need fences and such
> > change can cause to performance losses.
> >
> > Thanks
> >
>
> We don't fence those WR's.
> Christoph just re-write it to be more intuitive code. I don't see logic
> difference, am I wrong here ?

A little bit, before Christoph's suggestion, we calculated fence for
the paths which need such fence, after we will calculate for all paths.

Thanks

>
> >
> > >
> > >
> > > --
> > > To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> > > the body of a message to majordomo at vger.kernel.org
> > > More majordomo info at  http://vger.kernel.org/majordomo-info.html
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 833 bytes
Desc: not available
URL: <http://lists.infradead.org/pipermail/linux-nvme/attachments/20170529/c68523d0/attachment-0001.sig>

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
  2017-05-28  7:53     ` Max Gurtovoy
@ 2017-05-30 10:48         ` Sagi Grimberg
  -1 siblings, 0 replies; 34+ messages in thread
From: Sagi Grimberg @ 2017-05-30 10:48 UTC (permalink / raw)
  To: Max Gurtovoy, linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA, hch-jcswGhMUV9g
  Cc: shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg, leon-DgEjT+Ai2ygdnm+yROfE0A,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w

> +static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
> +{
> +	switch (umr_fence_cap) {
> +	case MLX5_CAP_UMR_FENCE_NONE:
> +		return MLX5_FENCE_MODE_NONE;
> +	case MLX5_CAP_UMR_FENCE_SMALL:
> +		return MLX5_FENCE_MODE_INITIATOR_SMALL;
> +	default:
> +		return MLX5_FENCE_MODE_STRONG_ORDERING;
> +	}
> +}

Where are the MLX5_CAP_UMR defines declared? Am I missing something?

What is the value of MLX5_CAP_UMR_FENCE_NONE? if its zero then this
change is not backwards compatible with older FW.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
@ 2017-05-30 10:48         ` Sagi Grimberg
  0 siblings, 0 replies; 34+ messages in thread
From: Sagi Grimberg @ 2017-05-30 10:48 UTC (permalink / raw)


> +static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
> +{
> +	switch (umr_fence_cap) {
> +	case MLX5_CAP_UMR_FENCE_NONE:
> +		return MLX5_FENCE_MODE_NONE;
> +	case MLX5_CAP_UMR_FENCE_SMALL:
> +		return MLX5_FENCE_MODE_INITIATOR_SMALL;
> +	default:
> +		return MLX5_FENCE_MODE_STRONG_ORDERING;
> +	}
> +}

Where are the MLX5_CAP_UMR defines declared? Am I missing something?

What is the value of MLX5_CAP_UMR_FENCE_NONE? if its zero then this
change is not backwards compatible with older FW.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
  2017-05-29 16:06                         ` Leon Romanovsky
@ 2017-05-30 10:51                             ` Sagi Grimberg
  -1 siblings, 0 replies; 34+ messages in thread
From: Sagi Grimberg @ 2017-05-30 10:51 UTC (permalink / raw)
  To: Leon Romanovsky, Max Gurtovoy
  Cc: Christoph Hellwig, linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w

Leon,

>> We don't fence those WR's.
>> Christoph just re-write it to be more intuitive code. I don't see logic
>> difference, am I wrong here ?
>
> A little bit, before Christoph's suggestion, we calculated fence for
> the paths which need such fence, after we will calculate for all paths.

Every WQE posted to a send queue must include a fence bit. All work
request posted on the send queue calculate the required fence, this
used to happen in finish_wqe call-sites with get_fence(), Christoph
just inlin'ed it.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
@ 2017-05-30 10:51                             ` Sagi Grimberg
  0 siblings, 0 replies; 34+ messages in thread
From: Sagi Grimberg @ 2017-05-30 10:51 UTC (permalink / raw)


Leon,

>> We don't fence those WR's.
>> Christoph just re-write it to be more intuitive code. I don't see logic
>> difference, am I wrong here ?
>
> A little bit, before Christoph's suggestion, we calculated fence for
> the paths which need such fence, after we will calculate for all paths.

Every WQE posted to a send queue must include a fence bit. All work
request posted on the send queue calculate the required fence, this
used to happen in finish_wqe call-sites with get_fence(), Christoph
just inlin'ed it.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
  2017-05-30 10:48         ` Sagi Grimberg
@ 2017-05-30 11:15             ` Max Gurtovoy
  -1 siblings, 0 replies; 34+ messages in thread
From: Max Gurtovoy @ 2017-05-30 11:15 UTC (permalink / raw)
  To: Sagi Grimberg, linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA, hch-jcswGhMUV9g
  Cc: shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg, leon-DgEjT+Ai2ygdnm+yROfE0A,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w



On 5/30/2017 1:48 PM, Sagi Grimberg wrote:
>> +static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
>> +{
>> +    switch (umr_fence_cap) {
>> +    case MLX5_CAP_UMR_FENCE_NONE:
>> +        return MLX5_FENCE_MODE_NONE;
>> +    case MLX5_CAP_UMR_FENCE_SMALL:
>> +        return MLX5_FENCE_MODE_INITIATOR_SMALL;
>> +    default:
>> +        return MLX5_FENCE_MODE_STRONG_ORDERING;
>> +    }
>> +}
>
> Where are the MLX5_CAP_UMR defines declared? Am I missing something?
>
> What is the value of MLX5_CAP_UMR_FENCE_NONE? if its zero then this
> change is not backwards compatible with older FW.


they are declared in patch 1/2:

+enum {
+	MLX5_CAP_UMR_FENCE_STRONG	= 0x0,
+	MLX5_CAP_UMR_FENCE_SMALL	= 0x1,
+	MLX5_CAP_UMR_FENCE_NONE		= 0x2,
+};
+

Thanks,
Max.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
@ 2017-05-30 11:15             ` Max Gurtovoy
  0 siblings, 0 replies; 34+ messages in thread
From: Max Gurtovoy @ 2017-05-30 11:15 UTC (permalink / raw)




On 5/30/2017 1:48 PM, Sagi Grimberg wrote:
>> +static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
>> +{
>> +    switch (umr_fence_cap) {
>> +    case MLX5_CAP_UMR_FENCE_NONE:
>> +        return MLX5_FENCE_MODE_NONE;
>> +    case MLX5_CAP_UMR_FENCE_SMALL:
>> +        return MLX5_FENCE_MODE_INITIATOR_SMALL;
>> +    default:
>> +        return MLX5_FENCE_MODE_STRONG_ORDERING;
>> +    }
>> +}
>
> Where are the MLX5_CAP_UMR defines declared? Am I missing something?
>
> What is the value of MLX5_CAP_UMR_FENCE_NONE? if its zero then this
> change is not backwards compatible with older FW.


they are declared in patch 1/2:

+enum {
+	MLX5_CAP_UMR_FENCE_STRONG	= 0x0,
+	MLX5_CAP_UMR_FENCE_SMALL	= 0x1,
+	MLX5_CAP_UMR_FENCE_NONE		= 0x2,
+};
+

Thanks,
Max.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
  2017-05-30 11:15             ` Max Gurtovoy
@ 2017-05-30 11:22                 ` Sagi Grimberg
  -1 siblings, 0 replies; 34+ messages in thread
From: Sagi Grimberg @ 2017-05-30 11:22 UTC (permalink / raw)
  To: Max Gurtovoy, linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA, hch-jcswGhMUV9g
  Cc: shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg, leon-DgEjT+Ai2ygdnm+yROfE0A,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w

> they are declared in patch 1/2:
>
> +enum {
> +    MLX5_CAP_UMR_FENCE_STRONG    = 0x0,
> +    MLX5_CAP_UMR_FENCE_SMALL    = 0x1,
> +    MLX5_CAP_UMR_FENCE_NONE        = 0x2,
> +};
> +

Missed that, thanks.

What's FENCE_NONE anyway? no umr fence needed at all?
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
@ 2017-05-30 11:22                 ` Sagi Grimberg
  0 siblings, 0 replies; 34+ messages in thread
From: Sagi Grimberg @ 2017-05-30 11:22 UTC (permalink / raw)


> they are declared in patch 1/2:
>
> +enum {
> +    MLX5_CAP_UMR_FENCE_STRONG    = 0x0,
> +    MLX5_CAP_UMR_FENCE_SMALL    = 0x1,
> +    MLX5_CAP_UMR_FENCE_NONE        = 0x2,
> +};
> +

Missed that, thanks.

What's FENCE_NONE anyway? no umr fence needed at all?

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
  2017-05-30 11:22                 ` Sagi Grimberg
@ 2017-05-30 11:24                     ` Max Gurtovoy
  -1 siblings, 0 replies; 34+ messages in thread
From: Max Gurtovoy @ 2017-05-30 11:24 UTC (permalink / raw)
  To: Sagi Grimberg, linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA, hch-jcswGhMUV9g
  Cc: shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg, leon-DgEjT+Ai2ygdnm+yROfE0A,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w



On 5/30/2017 2:22 PM, Sagi Grimberg wrote:
>> they are declared in patch 1/2:
>>
>> +enum {
>> +    MLX5_CAP_UMR_FENCE_STRONG    = 0x0,
>> +    MLX5_CAP_UMR_FENCE_SMALL    = 0x1,
>> +    MLX5_CAP_UMR_FENCE_NONE        = 0x2,
>> +};
>> +
>
> Missed that, thanks.
>
> What's FENCE_NONE anyway? no umr fence needed at all?

Yes.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
@ 2017-05-30 11:24                     ` Max Gurtovoy
  0 siblings, 0 replies; 34+ messages in thread
From: Max Gurtovoy @ 2017-05-30 11:24 UTC (permalink / raw)




On 5/30/2017 2:22 PM, Sagi Grimberg wrote:
>> they are declared in patch 1/2:
>>
>> +enum {
>> +    MLX5_CAP_UMR_FENCE_STRONG    = 0x0,
>> +    MLX5_CAP_UMR_FENCE_SMALL    = 0x1,
>> +    MLX5_CAP_UMR_FENCE_NONE        = 0x2,
>> +};
>> +
>
> Missed that, thanks.
>
> What's FENCE_NONE anyway? no umr fence needed at all?

Yes.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
  2017-05-30 11:24                     ` Max Gurtovoy
@ 2017-05-30 14:28                         ` Sagi Grimberg
  -1 siblings, 0 replies; 34+ messages in thread
From: Sagi Grimberg @ 2017-05-30 14:28 UTC (permalink / raw)
  To: Max Gurtovoy, linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA, hch-jcswGhMUV9g
  Cc: shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg, leon-DgEjT+Ai2ygdnm+yROfE0A,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w


> Yes.

So for the series:

Reviewed-by: Sagi Grimberg <sagi-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>

Doug, can you mark this for stable?

Max, I'm not sure how far back this needs a fix,
probably since day 1.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
@ 2017-05-30 14:28                         ` Sagi Grimberg
  0 siblings, 0 replies; 34+ messages in thread
From: Sagi Grimberg @ 2017-05-30 14:28 UTC (permalink / raw)



> Yes.

So for the series:

Reviewed-by: Sagi Grimberg <sagi at grimberg.me>

Doug, can you mark this for stable?

Max, I'm not sure how far back this needs a fix,
probably since day 1.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
  2017-05-30 10:51                             ` Sagi Grimberg
@ 2017-05-30 17:15                                 ` Leon Romanovsky
  -1 siblings, 0 replies; 34+ messages in thread
From: Leon Romanovsky @ 2017-05-30 17:15 UTC (permalink / raw)
  To: Sagi Grimberg
  Cc: Max Gurtovoy, Christoph Hellwig,
	linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w

[-- Attachment #1: Type: text/plain, Size: 839 bytes --]

On Tue, May 30, 2017 at 01:51:33PM +0300, Sagi Grimberg wrote:
> Leon,
>
> > > We don't fence those WR's.
> > > Christoph just re-write it to be more intuitive code. I don't see logic
> > > difference, am I wrong here ?
> >
> > A little bit, before Christoph's suggestion, we calculated fence for
> > the paths which need such fence, after we will calculate for all paths.
>
> Every WQE posted to a send queue must include a fence bit. All work
> request posted on the send queue calculate the required fence, this
> used to happen in finish_wqe call-sites with get_fence(), Christoph
> just inlin'ed it.

Sagi,

Thanks, I found my mistake, I saw that IB_SEND_INLINE WR doesn't call to
get_fence and we have an if() which can skip finish_wqe, so I thought
that finish_wqe isn't called always, however it was for error path.

Thanks again.

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
@ 2017-05-30 17:15                                 ` Leon Romanovsky
  0 siblings, 0 replies; 34+ messages in thread
From: Leon Romanovsky @ 2017-05-30 17:15 UTC (permalink / raw)


On Tue, May 30, 2017@01:51:33PM +0300, Sagi Grimberg wrote:
> Leon,
>
> > > We don't fence those WR's.
> > > Christoph just re-write it to be more intuitive code. I don't see logic
> > > difference, am I wrong here ?
> >
> > A little bit, before Christoph's suggestion, we calculated fence for
> > the paths which need such fence, after we will calculate for all paths.
>
> Every WQE posted to a send queue must include a fence bit. All work
> request posted on the send queue calculate the required fence, this
> used to happen in finish_wqe call-sites with get_fence(), Christoph
> just inlin'ed it.

Sagi,

Thanks, I found my mistake, I saw that IB_SEND_INLINE WR doesn't call to
get_fence and we have an if() which can skip finish_wqe, so I thought
that finish_wqe isn't called always, however it was for error path.

Thanks again.
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 833 bytes
Desc: not available
URL: <http://lists.infradead.org/pipermail/linux-nvme/attachments/20170530/8489129f/attachment.sig>

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
  2017-05-30 14:28                         ` Sagi Grimberg
@ 2017-05-30 20:41                             ` Max Gurtovoy
  -1 siblings, 0 replies; 34+ messages in thread
From: Max Gurtovoy @ 2017-05-30 20:41 UTC (permalink / raw)
  To: Sagi Grimberg, linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA, hch-jcswGhMUV9g
  Cc: shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg, leon-DgEjT+Ai2ygdnm+yROfE0A,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w



On 5/30/2017 5:28 PM, Sagi Grimberg wrote:
>
>> Yes.
>
> So for the series:
>
> Reviewed-by: Sagi Grimberg <sagi-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
>
> Doug, can you mark this for stable?
>
> Max, I'm not sure how far back this needs a fix,
> probably since day 1.

Yes, I guess we should.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap
@ 2017-05-30 20:41                             ` Max Gurtovoy
  0 siblings, 0 replies; 34+ messages in thread
From: Max Gurtovoy @ 2017-05-30 20:41 UTC (permalink / raw)




On 5/30/2017 5:28 PM, Sagi Grimberg wrote:
>
>> Yes.
>
> So for the series:
>
> Reviewed-by: Sagi Grimberg <sagi at grimberg.me>
>
> Doug, can you mark this for stable?
>
> Max, I'm not sure how far back this needs a fix,
> probably since day 1.

Yes, I guess we should.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH v2 1/2] net/mlx5: Define interface bits for fencing UMR wqe
  2017-05-28  7:53 ` Max Gurtovoy
@ 2017-06-01 22:51     ` Doug Ledford
  -1 siblings, 0 replies; 34+ messages in thread
From: Doug Ledford @ 2017-06-01 22:51 UTC (permalink / raw)
  To: Max Gurtovoy, linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r,
	sagi-NQWnxTmZq1alnMjI0IkVqw, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	hch-jcswGhMUV9g
  Cc: shahar.salzman-Re5JQEeQqe8AvxtiuMwx3w,
	joseph.r.gruher-ral2JQCrhuEAvxtiuMwx3w,
	mrybczyn-FNhOzJFKnXGHXe+LvDLADg, leon-DgEjT+Ai2ygdnm+yROfE0A,
	vladimirk-VPRAkNaXOzVWk0Htik3J/w, oren-VPRAkNaXOzVWk0Htik3J/w

On Sun, 2017-05-28 at 10:53 +0300, Max Gurtovoy wrote:
> HW can implement UMR wqe re-transmission in various ways.
> Thus, add HCA cap to distinguish the needed fence for UMR to make
> sure that the wqe wouldn't fail on mkey checks.
> 
> Signed-off-by: Max Gurtovoy <maxg-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
> Acked-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>

Series, with Christoph's fixup, applied.

-- 
Doug Ledford <dledford-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
    GPG KeyID: B826A3330E572FDD
   
Key fingerprint = AE6B 1BDA 122B 23B4 265B  1274 B826 A333 0E57 2FDD

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH v2 1/2] net/mlx5: Define interface bits for fencing UMR wqe
@ 2017-06-01 22:51     ` Doug Ledford
  0 siblings, 0 replies; 34+ messages in thread
From: Doug Ledford @ 2017-06-01 22:51 UTC (permalink / raw)


On Sun, 2017-05-28@10:53 +0300, Max Gurtovoy wrote:
> HW can implement UMR wqe re-transmission in various ways.
> Thus, add HCA cap to distinguish the needed fence for UMR to make
> sure that the wqe wouldn't fail on mkey checks.
> 
> Signed-off-by: Max Gurtovoy <maxg at mellanox.com>
> Acked-by: Leon Romanovsky <leon at kernel.org>

Series, with Christoph's fixup, applied.

-- 
Doug Ledford <dledford at redhat.com>
? ? GPG KeyID: B826A3330E572FDD
? ?
Key fingerprint = AE6B 1BDA 122B 23B4 265B ?1274 B826 A333 0E57 2FDD

^ permalink raw reply	[flat|nested] 34+ messages in thread

end of thread, other threads:[~2017-06-01 22:51 UTC | newest]

Thread overview: 34+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-28  7:53 [PATCH v2 1/2] net/mlx5: Define interface bits for fencing UMR wqe Max Gurtovoy
2017-05-28  7:53 ` Max Gurtovoy
     [not found] ` <1495957991-19223-1-git-send-email-maxg-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
2017-05-28  7:53   ` [PATCH v2 2/2] IB/mlx5: set UMR wqe fence according to HCA cap Max Gurtovoy
2017-05-28  7:53     ` Max Gurtovoy
     [not found]     ` <1495957991-19223-2-git-send-email-maxg-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
2017-05-28  9:07       ` Christoph Hellwig
2017-05-28  9:07         ` Christoph Hellwig
     [not found]         ` <20170528090705.GM13083-jcswGhMUV9g@public.gmane.org>
2017-05-28  9:53           ` Max Gurtovoy
2017-05-28  9:53             ` Max Gurtovoy
     [not found]             ` <19a1d347-91da-7e5f-8877-86772204a80a-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
2017-05-29 10:05               ` Leon Romanovsky
2017-05-29 10:05                 ` Leon Romanovsky
     [not found]                 ` <20170529100504.GZ17751-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-05-29 12:21                   ` Max Gurtovoy
2017-05-29 12:21                     ` Max Gurtovoy
     [not found]                     ` <89409b6d-466d-070a-7082-1f7bbb9ad7c7-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
2017-05-29 16:06                       ` Leon Romanovsky
2017-05-29 16:06                         ` Leon Romanovsky
     [not found]                         ` <20170529160609.GA17751-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-05-30 10:51                           ` Sagi Grimberg
2017-05-30 10:51                             ` Sagi Grimberg
     [not found]                             ` <d23a5a6f-bd9f-3b1a-34d5-25e8eb2a041c-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
2017-05-30 17:15                               ` Leon Romanovsky
2017-05-30 17:15                                 ` Leon Romanovsky
2017-05-30 10:48       ` Sagi Grimberg
2017-05-30 10:48         ` Sagi Grimberg
     [not found]         ` <700d9410-56e7-d70f-9de4-87e4e37bca28-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
2017-05-30 11:15           ` Max Gurtovoy
2017-05-30 11:15             ` Max Gurtovoy
     [not found]             ` <9562e253-bc15-4ea1-3880-0430fd540410-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
2017-05-30 11:22               ` Sagi Grimberg
2017-05-30 11:22                 ` Sagi Grimberg
     [not found]                 ` <bcd8ef4b-60f9-34e4-a020-0d34407da61e-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
2017-05-30 11:24                   ` Max Gurtovoy
2017-05-30 11:24                     ` Max Gurtovoy
     [not found]                     ` <e93ca49a-63d0-495f-1348-89d6fecf9f54-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
2017-05-30 14:28                       ` Sagi Grimberg
2017-05-30 14:28                         ` Sagi Grimberg
     [not found]                         ` <ad73e8d4-f519-3f50-400f-905d075e5ceb-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
2017-05-30 20:41                           ` Max Gurtovoy
2017-05-30 20:41                             ` Max Gurtovoy
2017-05-28  9:14   ` [PATCH v2 1/2] net/mlx5: Define interface bits for fencing UMR wqe Christoph Hellwig
2017-05-28  9:14     ` Christoph Hellwig
2017-06-01 22:51   ` Doug Ledford
2017-06-01 22:51     ` Doug Ledford

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.