All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michal Kalderon <Michal.Kalderon-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>
To: michal.kalderon-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org,
	ram.amrani-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org,
	yuval.mintz-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org,
	ariel.elior-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org,
	davem-fT/PcQaiUtIeIZ0/mPfg9Q@public.gmane.org,
	netdev-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	dledford-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org
Cc: Michal Kalderon
	<Michal.Kalderon-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>,
	Yuval Mintz <Yuval.Mintz-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>,
	Ariel Elior <Ariel.Elior-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>
Subject: [PATCH net-next 08/12] qed: iWARP CM add active side connect
Date: Sun, 2 Jul 2017 10:29:28 +0300	[thread overview]
Message-ID: <1498980572-29519-9-git-send-email-Michal.Kalderon@cavium.com> (raw)
In-Reply-To: <1498980572-29519-1-git-send-email-Michal.Kalderon-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>

This patch implements the active side connect.
Offload a connection, process MPA reply and send RTR.
In some of the common passive/active functions, the active side
will work in blocking mode.

Signed-off-by: Michal Kalderon <Michal.Kalderon-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>
Signed-off-by: Yuval Mintz <Yuval.Mintz-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>
Signed-off-by: Ariel Elior <Ariel.Elior-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>
---
 drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 240 ++++++++++++++++++++++++++--
 drivers/net/ethernet/qlogic/qed/qed_iwarp.h |   7 +
 drivers/net/ethernet/qlogic/qed/qed_rdma.c  |   4 +
 include/linux/qed/qed_rdma_if.h             |  26 +++
 4 files changed, 265 insertions(+), 12 deletions(-)

diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index a6dadae..a5da9fc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -611,7 +611,10 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 	memset(&init_data, 0, sizeof(init_data));
 	init_data.cid = ep->tcp_cid;
 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
-	init_data.comp_mode = QED_SPQ_MODE_CB;
+	if (ep->connect_mode == TCP_CONNECT_PASSIVE)
+		init_data.comp_mode = QED_SPQ_MODE_CB;
+	else
+		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 
 	rc = qed_sp_init_request(p_hwfn, &p_ent,
 				 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
@@ -711,7 +714,7 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 		   "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
 		   async_data->mpa_request.ulp_data_len,
-		   mpa_rev, *((u32 *)((u8 *)ep->ep_buffer_virt->in_pdata)));
+		   mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
 
 	if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
 		/* Read ord/ird values from private data buffer */
@@ -801,7 +804,10 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 	init_data.cid = reject ? ep->tcp_cid : qp->icid;
 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 
-	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
+		init_data.comp_mode = QED_SPQ_MODE_CB;
+	else
+		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 
 	rc = qed_sp_init_request(p_hwfn, &p_ent,
 				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
@@ -890,6 +896,59 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 }
 
+void
+qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+	struct mpa_v2_hdr *mpa_v2_params;
+	union async_output *async_data;
+	u16 mpa_ird, mpa_ord;
+	u8 mpa_data_size = 0;
+
+	if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
+		mpa_v2_params =
+			(struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
+		mpa_data_size = sizeof(*mpa_v2_params);
+		mpa_ird = ntohs(mpa_v2_params->ird);
+		mpa_ord = ntohs(mpa_v2_params->ord);
+
+		ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
+		ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
+	}
+	async_data = &ep->ep_buffer_virt->async_output;
+
+	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
+	ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len -
+				       mpa_data_size;
+}
+
+void
+qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+	struct qed_iwarp_cm_event_params params;
+
+	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+		DP_NOTICE(p_hwfn,
+			  "MPA reply event not expected on passive side!\n");
+		return;
+	}
+
+	params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
+
+	qed_iwarp_parse_private_data(p_hwfn, ep);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
+		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
+
+	params.cm_info = &ep->cm_info;
+	params.ep_context = ep;
+	params.status = 0;
+
+	ep->mpa_reply_processed = true;
+
+	ep->event_cb(ep->cb_context, &params);
+}
+
 #define QED_IWARP_CONNECT_MODE_STRING(ep) \
 	((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
 
@@ -902,7 +961,13 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 {
 	struct qed_iwarp_cm_event_params params;
 
-	params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
+	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
+		params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
+	else
+		params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
+
+	if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
+		qed_iwarp_parse_private_data(p_hwfn, ep);
 
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
@@ -977,6 +1042,102 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 	}
 }
 
+int qed_iwarp_connect(void *rdma_cxt,
+		      struct qed_iwarp_connect_in *iparams,
+		      struct qed_iwarp_connect_out *oparams)
+{
+	struct qed_hwfn *p_hwfn = rdma_cxt;
+	struct qed_iwarp_info *iwarp_info;
+	struct qed_iwarp_ep *ep;
+	u8 mpa_data_size = 0;
+	u8 ts_hdr_size = 0;
+	u32 cid;
+	int rc;
+
+	if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
+	    (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
+		DP_NOTICE(p_hwfn,
+			  "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
+			  iparams->qp->icid, iparams->cm_info.ord,
+			  iparams->cm_info.ird);
+
+		return -EINVAL;
+	}
+
+	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+
+	/* Allocate ep object */
+	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
+	if (rc)
+		return rc;
+
+	rc = qed_iwarp_create_ep(p_hwfn, &ep);
+	if (rc)
+		goto err;
+
+	ep->tcp_cid = cid;
+
+	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
+	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+	ep->qp = iparams->qp;
+	ep->qp->ep = ep;
+	ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
+	ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
+	memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
+
+	ep->cm_info.ord = iparams->cm_info.ord;
+	ep->cm_info.ird = iparams->cm_info.ird;
+
+	ep->rtr_type = iwarp_info->rtr_type;
+	if (!iwarp_info->peer2peer)
+		ep->rtr_type = MPA_RTR_TYPE_NONE;
+
+	if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
+		ep->cm_info.ord = 1;
+
+	ep->mpa_rev = iwarp_info->mpa_rev;
+
+	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
+
+	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
+	ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
+				       mpa_data_size;
+
+	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
+	       iparams->cm_info.private_data,
+	       iparams->cm_info.private_data_len);
+
+	if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
+		ts_hdr_size = TIMESTAMP_HEADER_SIZE;
+
+	ep->mss = iparams->mss - ts_hdr_size;
+	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
+
+	ep->event_cb = iparams->event_cb;
+	ep->cb_context = iparams->cb_context;
+	ep->connect_mode = TCP_CONNECT_ACTIVE;
+
+	oparams->ep_context = ep;
+
+	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
+		   iparams->qp->icid, ep->tcp_cid, rc);
+
+	if (rc) {
+		qed_iwarp_destroy_ep(p_hwfn, ep, true);
+		goto err;
+	}
+
+	return rc;
+err:
+	qed_iwarp_cid_cleaned(p_hwfn, cid);
+
+	return rc;
+}
+
 static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
 {
 	struct qed_iwarp_ep *ep = NULL;
@@ -1174,12 +1335,12 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
 
 int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
 {
-	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+	struct qed_hwfn *p_hwfn = rdma_cxt;
 	struct qed_iwarp_ep *ep;
 	u8 mpa_data_size = 0;
 	int rc;
 
-	ep = (struct qed_iwarp_ep *)iparams->ep_context;
+	ep = iparams->ep_context;
 	if (!ep) {
 		DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
 		return -EINVAL;
@@ -1799,13 +1960,19 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
 			   struct qed_iwarp_ep *ep, u8 fw_return_code)
 {
-	/* Done with the SYN packet, post back to ll2 rx */
-	qed_iwarp_ll2_post_rx(p_hwfn, ep->syn,
-			      p_hwfn->p_rdma_info->iwarp.ll2_syn_handle);
-	ep->syn = NULL;
+	u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
 
-	/* If connect failed - upper layer doesn't know about it */
-	qed_iwarp_mpa_received(p_hwfn, ep);
+	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+		/* Done with the SYN packet, post back to ll2 rx */
+		qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
+
+		ep->syn = NULL;
+
+		/* If connect failed - upper layer doesn't know about it */
+		qed_iwarp_mpa_received(p_hwfn, ep);
+	} else {
+		qed_iwarp_mpa_offload(p_hwfn, ep);
+	}
 }
 
 static inline bool
@@ -1842,6 +2009,16 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
 			   ep->tcp_cid, fw_return_code);
 		qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
 		break;
+		/* Async event for active side only */
+	case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
+		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+			return -EINVAL;
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_RDMA,
+			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
+			   ep->cid, fw_return_code);
+		qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
+		break;
 	case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
 		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
 			return -EINVAL;
@@ -1918,6 +2095,45 @@ int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
 	return 0;
 }
 
+int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
+{
+	struct qed_hwfn *p_hwfn = rdma_cxt;
+	struct qed_sp_init_data init_data;
+	struct qed_spq_entry *p_ent;
+	struct qed_iwarp_ep *ep;
+	struct qed_rdma_qp *qp;
+	int rc;
+
+	ep = iparams->ep_context;
+	if (!ep) {
+		DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
+		return -EINVAL;
+	}
+
+	qp = ep->qp;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
+		   qp->icid, ep->tcp_cid);
+
+	memset(&init_data, 0, sizeof(init_data));
+	init_data.cid = qp->icid;
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+	init_data.comp_mode = QED_SPQ_MODE_CB;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
+				 PROTOCOLID_IWARP, &init_data);
+
+	if (rc)
+		return rc;
+
+	rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
+
+	return rc;
+}
+
 void
 qed_iwarp_query_qp(struct qed_rdma_qp *qp,
 		   struct qed_rdma_query_qp_out_params *out_params)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index bedac98..148ef3c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -170,6 +170,11 @@ void qed_iwarp_query_qp(struct qed_rdma_qp *qp,
 			struct qed_rdma_query_qp_out_params *out_params);
 
 int
+qed_iwarp_connect(void *rdma_cxt,
+		  struct qed_iwarp_connect_in *iparams,
+		  struct qed_iwarp_connect_out *oparams);
+
+int
 qed_iwarp_create_listen(void *rdma_cxt,
 			struct qed_iwarp_listen_in *iparams,
 			struct qed_iwarp_listen_out *oparams);
@@ -179,4 +184,6 @@ void qed_iwarp_query_qp(struct qed_rdma_qp *qp,
 int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams);
 int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle);
 
+int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams);
+
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 29de915..6fb9951 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1772,8 +1772,12 @@ static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
 	.ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
 	.ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
 	.ll2_get_stats = &qed_ll2_get_stats,
+	.iwarp_connect = &qed_iwarp_connect,
 	.iwarp_create_listen = &qed_iwarp_create_listen,
 	.iwarp_destroy_listen = &qed_iwarp_destroy_listen,
+	.iwarp_accept = &qed_iwarp_accept,
+	.iwarp_reject = &qed_iwarp_reject,
+	.iwarp_send_rtr = &qed_iwarp_send_rtr,
 };
 
 const struct qed_rdma_ops *qed_get_rdma_ops(void)
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index c4c241f..e9514a6 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -473,6 +473,8 @@ struct qed_rdma_counters_out_params {
 enum qed_iwarp_event_type {
 	QED_IWARP_EVENT_MPA_REQUEST,	  /* Passive side request received */
 	QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */
+	QED_IWARP_EVENT_ACTIVE_COMPLETE,  /* Active side reply received */
+	QED_IWARP_EVENT_ACTIVE_MPA_REPLY,
 };
 
 enum qed_tcp_ip_version {
@@ -503,6 +505,20 @@ struct qed_iwarp_cm_event_params {
 typedef int (*iwarp_event_handler) (void *context,
 				    struct qed_iwarp_cm_event_params *event);
 
+struct qed_iwarp_connect_in {
+	iwarp_event_handler event_cb;
+	void *cb_context;
+	struct qed_rdma_qp *qp;
+	struct qed_iwarp_cm_info cm_info;
+	u16 mss;
+	u8 remote_mac_addr[ETH_ALEN];
+	u8 local_mac_addr[ETH_ALEN];
+};
+
+struct qed_iwarp_connect_out {
+	void *ep_context;
+};
+
 struct qed_iwarp_listen_in {
 	iwarp_event_handler event_cb;
 	void *cb_context;	/* passed to event_cb */
@@ -534,6 +550,10 @@ struct qed_iwarp_reject_in {
 	u16 private_data_len;
 };
 
+struct qed_iwarp_send_rtr_in {
+	void *ep_context;
+};
+
 struct qed_roce_ll2_header {
 	void *vaddr;
 	dma_addr_t baddr;
@@ -640,6 +660,10 @@ struct qed_rdma_ops {
 	int (*ll2_set_mac_filter)(struct qed_dev *cdev,
 				  u8 *old_mac_address, u8 *new_mac_address);
 
+	int (*iwarp_connect)(void *rdma_cxt,
+			     struct qed_iwarp_connect_in *iparams,
+			     struct qed_iwarp_connect_out *oparams);
+
 	int (*iwarp_create_listen)(void *rdma_cxt,
 				   struct qed_iwarp_listen_in *iparams,
 				   struct qed_iwarp_listen_out *oparams);
@@ -652,6 +676,8 @@ struct qed_rdma_ops {
 
 	int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle);
 
+	int (*iwarp_send_rtr)(void *rdma_cxt,
+			      struct qed_iwarp_send_rtr_in *iparams);
 };
 
 const struct qed_rdma_ops *qed_get_rdma_ops(void);
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

WARNING: multiple messages have this Message-ID (diff)
From: Michal Kalderon <Michal.Kalderon-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>
To: <michal.kalderon-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>,
	<ram.amrani-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>,
	<yuval.mintz-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>,
	<ariel.elior-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>,
	<davem-fT/PcQaiUtIeIZ0/mPfg9Q@public.gmane.org>,
	<netdev-u79uwXL29TY76Z2rM5mHXA@public.gmane.org>,
	<linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org>,
	<dledford-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
Cc: Michal Kalderon
	<Michal.Kalderon-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>,
	Yuval Mintz <Yuval.Mintz-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>,
	Ariel Elior <Ariel.Elior-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>
Subject: [PATCH net-next 08/12] qed: iWARP CM add active side connect
Date: Sun, 2 Jul 2017 10:29:28 +0300	[thread overview]
Message-ID: <1498980572-29519-9-git-send-email-Michal.Kalderon@cavium.com> (raw)
In-Reply-To: <1498980572-29519-1-git-send-email-Michal.Kalderon-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>

This patch implements the active side connect.
Offload a connection, process MPA reply and send RTR.
In some of the common passive/active functions, the active side
will work in blocking mode.

Signed-off-by: Michal Kalderon <Michal.Kalderon-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>
Signed-off-by: Yuval Mintz <Yuval.Mintz-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>
Signed-off-by: Ariel Elior <Ariel.Elior-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>
---
 drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 240 ++++++++++++++++++++++++++--
 drivers/net/ethernet/qlogic/qed/qed_iwarp.h |   7 +
 drivers/net/ethernet/qlogic/qed/qed_rdma.c  |   4 +
 include/linux/qed/qed_rdma_if.h             |  26 +++
 4 files changed, 265 insertions(+), 12 deletions(-)

diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index a6dadae..a5da9fc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -611,7 +611,10 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 	memset(&init_data, 0, sizeof(init_data));
 	init_data.cid = ep->tcp_cid;
 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
-	init_data.comp_mode = QED_SPQ_MODE_CB;
+	if (ep->connect_mode == TCP_CONNECT_PASSIVE)
+		init_data.comp_mode = QED_SPQ_MODE_CB;
+	else
+		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 
 	rc = qed_sp_init_request(p_hwfn, &p_ent,
 				 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
@@ -711,7 +714,7 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 		   "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
 		   async_data->mpa_request.ulp_data_len,
-		   mpa_rev, *((u32 *)((u8 *)ep->ep_buffer_virt->in_pdata)));
+		   mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
 
 	if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
 		/* Read ord/ird values from private data buffer */
@@ -801,7 +804,10 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 	init_data.cid = reject ? ep->tcp_cid : qp->icid;
 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 
-	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
+		init_data.comp_mode = QED_SPQ_MODE_CB;
+	else
+		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 
 	rc = qed_sp_init_request(p_hwfn, &p_ent,
 				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
@@ -890,6 +896,59 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 }
 
+void
+qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+	struct mpa_v2_hdr *mpa_v2_params;
+	union async_output *async_data;
+	u16 mpa_ird, mpa_ord;
+	u8 mpa_data_size = 0;
+
+	if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
+		mpa_v2_params =
+			(struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
+		mpa_data_size = sizeof(*mpa_v2_params);
+		mpa_ird = ntohs(mpa_v2_params->ird);
+		mpa_ord = ntohs(mpa_v2_params->ord);
+
+		ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
+		ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
+	}
+	async_data = &ep->ep_buffer_virt->async_output;
+
+	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
+	ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len -
+				       mpa_data_size;
+}
+
+void
+qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+	struct qed_iwarp_cm_event_params params;
+
+	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+		DP_NOTICE(p_hwfn,
+			  "MPA reply event not expected on passive side!\n");
+		return;
+	}
+
+	params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
+
+	qed_iwarp_parse_private_data(p_hwfn, ep);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
+		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
+
+	params.cm_info = &ep->cm_info;
+	params.ep_context = ep;
+	params.status = 0;
+
+	ep->mpa_reply_processed = true;
+
+	ep->event_cb(ep->cb_context, &params);
+}
+
 #define QED_IWARP_CONNECT_MODE_STRING(ep) \
 	((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
 
@@ -902,7 +961,13 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 {
 	struct qed_iwarp_cm_event_params params;
 
-	params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
+	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
+		params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
+	else
+		params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
+
+	if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
+		qed_iwarp_parse_private_data(p_hwfn, ep);
 
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
@@ -977,6 +1042,102 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 	}
 }
 
+int qed_iwarp_connect(void *rdma_cxt,
+		      struct qed_iwarp_connect_in *iparams,
+		      struct qed_iwarp_connect_out *oparams)
+{
+	struct qed_hwfn *p_hwfn = rdma_cxt;
+	struct qed_iwarp_info *iwarp_info;
+	struct qed_iwarp_ep *ep;
+	u8 mpa_data_size = 0;
+	u8 ts_hdr_size = 0;
+	u32 cid;
+	int rc;
+
+	if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
+	    (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
+		DP_NOTICE(p_hwfn,
+			  "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
+			  iparams->qp->icid, iparams->cm_info.ord,
+			  iparams->cm_info.ird);
+
+		return -EINVAL;
+	}
+
+	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+
+	/* Allocate ep object */
+	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
+	if (rc)
+		return rc;
+
+	rc = qed_iwarp_create_ep(p_hwfn, &ep);
+	if (rc)
+		goto err;
+
+	ep->tcp_cid = cid;
+
+	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
+	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+	ep->qp = iparams->qp;
+	ep->qp->ep = ep;
+	ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
+	ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
+	memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
+
+	ep->cm_info.ord = iparams->cm_info.ord;
+	ep->cm_info.ird = iparams->cm_info.ird;
+
+	ep->rtr_type = iwarp_info->rtr_type;
+	if (!iwarp_info->peer2peer)
+		ep->rtr_type = MPA_RTR_TYPE_NONE;
+
+	if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
+		ep->cm_info.ord = 1;
+
+	ep->mpa_rev = iwarp_info->mpa_rev;
+
+	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
+
+	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
+	ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
+				       mpa_data_size;
+
+	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
+	       iparams->cm_info.private_data,
+	       iparams->cm_info.private_data_len);
+
+	if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
+		ts_hdr_size = TIMESTAMP_HEADER_SIZE;
+
+	ep->mss = iparams->mss - ts_hdr_size;
+	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
+
+	ep->event_cb = iparams->event_cb;
+	ep->cb_context = iparams->cb_context;
+	ep->connect_mode = TCP_CONNECT_ACTIVE;
+
+	oparams->ep_context = ep;
+
+	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
+		   iparams->qp->icid, ep->tcp_cid, rc);
+
+	if (rc) {
+		qed_iwarp_destroy_ep(p_hwfn, ep, true);
+		goto err;
+	}
+
+	return rc;
+err:
+	qed_iwarp_cid_cleaned(p_hwfn, cid);
+
+	return rc;
+}
+
 static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
 {
 	struct qed_iwarp_ep *ep = NULL;
@@ -1174,12 +1335,12 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
 
 int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
 {
-	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+	struct qed_hwfn *p_hwfn = rdma_cxt;
 	struct qed_iwarp_ep *ep;
 	u8 mpa_data_size = 0;
 	int rc;
 
-	ep = (struct qed_iwarp_ep *)iparams->ep_context;
+	ep = iparams->ep_context;
 	if (!ep) {
 		DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
 		return -EINVAL;
@@ -1799,13 +1960,19 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
 			   struct qed_iwarp_ep *ep, u8 fw_return_code)
 {
-	/* Done with the SYN packet, post back to ll2 rx */
-	qed_iwarp_ll2_post_rx(p_hwfn, ep->syn,
-			      p_hwfn->p_rdma_info->iwarp.ll2_syn_handle);
-	ep->syn = NULL;
+	u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
 
-	/* If connect failed - upper layer doesn't know about it */
-	qed_iwarp_mpa_received(p_hwfn, ep);
+	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+		/* Done with the SYN packet, post back to ll2 rx */
+		qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
+
+		ep->syn = NULL;
+
+		/* If connect failed - upper layer doesn't know about it */
+		qed_iwarp_mpa_received(p_hwfn, ep);
+	} else {
+		qed_iwarp_mpa_offload(p_hwfn, ep);
+	}
 }
 
 static inline bool
@@ -1842,6 +2009,16 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
 			   ep->tcp_cid, fw_return_code);
 		qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
 		break;
+		/* Async event for active side only */
+	case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
+		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+			return -EINVAL;
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_RDMA,
+			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
+			   ep->cid, fw_return_code);
+		qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
+		break;
 	case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
 		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
 			return -EINVAL;
@@ -1918,6 +2095,45 @@ int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
 	return 0;
 }
 
+int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
+{
+	struct qed_hwfn *p_hwfn = rdma_cxt;
+	struct qed_sp_init_data init_data;
+	struct qed_spq_entry *p_ent;
+	struct qed_iwarp_ep *ep;
+	struct qed_rdma_qp *qp;
+	int rc;
+
+	ep = iparams->ep_context;
+	if (!ep) {
+		DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
+		return -EINVAL;
+	}
+
+	qp = ep->qp;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
+		   qp->icid, ep->tcp_cid);
+
+	memset(&init_data, 0, sizeof(init_data));
+	init_data.cid = qp->icid;
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+	init_data.comp_mode = QED_SPQ_MODE_CB;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
+				 PROTOCOLID_IWARP, &init_data);
+
+	if (rc)
+		return rc;
+
+	rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
+
+	return rc;
+}
+
 void
 qed_iwarp_query_qp(struct qed_rdma_qp *qp,
 		   struct qed_rdma_query_qp_out_params *out_params)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index bedac98..148ef3c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -170,6 +170,11 @@ void qed_iwarp_query_qp(struct qed_rdma_qp *qp,
 			struct qed_rdma_query_qp_out_params *out_params);
 
 int
+qed_iwarp_connect(void *rdma_cxt,
+		  struct qed_iwarp_connect_in *iparams,
+		  struct qed_iwarp_connect_out *oparams);
+
+int
 qed_iwarp_create_listen(void *rdma_cxt,
 			struct qed_iwarp_listen_in *iparams,
 			struct qed_iwarp_listen_out *oparams);
@@ -179,4 +184,6 @@ void qed_iwarp_query_qp(struct qed_rdma_qp *qp,
 int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams);
 int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle);
 
+int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams);
+
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 29de915..6fb9951 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1772,8 +1772,12 @@ static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
 	.ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
 	.ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
 	.ll2_get_stats = &qed_ll2_get_stats,
+	.iwarp_connect = &qed_iwarp_connect,
 	.iwarp_create_listen = &qed_iwarp_create_listen,
 	.iwarp_destroy_listen = &qed_iwarp_destroy_listen,
+	.iwarp_accept = &qed_iwarp_accept,
+	.iwarp_reject = &qed_iwarp_reject,
+	.iwarp_send_rtr = &qed_iwarp_send_rtr,
 };
 
 const struct qed_rdma_ops *qed_get_rdma_ops(void)
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index c4c241f..e9514a6 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -473,6 +473,8 @@ struct qed_rdma_counters_out_params {
 enum qed_iwarp_event_type {
 	QED_IWARP_EVENT_MPA_REQUEST,	  /* Passive side request received */
 	QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */
+	QED_IWARP_EVENT_ACTIVE_COMPLETE,  /* Active side reply received */
+	QED_IWARP_EVENT_ACTIVE_MPA_REPLY,
 };
 
 enum qed_tcp_ip_version {
@@ -503,6 +505,20 @@ struct qed_iwarp_cm_event_params {
 typedef int (*iwarp_event_handler) (void *context,
 				    struct qed_iwarp_cm_event_params *event);
 
+struct qed_iwarp_connect_in {
+	iwarp_event_handler event_cb;
+	void *cb_context;
+	struct qed_rdma_qp *qp;
+	struct qed_iwarp_cm_info cm_info;
+	u16 mss;
+	u8 remote_mac_addr[ETH_ALEN];
+	u8 local_mac_addr[ETH_ALEN];
+};
+
+struct qed_iwarp_connect_out {
+	void *ep_context;
+};
+
 struct qed_iwarp_listen_in {
 	iwarp_event_handler event_cb;
 	void *cb_context;	/* passed to event_cb */
@@ -534,6 +550,10 @@ struct qed_iwarp_reject_in {
 	u16 private_data_len;
 };
 
+struct qed_iwarp_send_rtr_in {
+	void *ep_context;
+};
+
 struct qed_roce_ll2_header {
 	void *vaddr;
 	dma_addr_t baddr;
@@ -640,6 +660,10 @@ struct qed_rdma_ops {
 	int (*ll2_set_mac_filter)(struct qed_dev *cdev,
 				  u8 *old_mac_address, u8 *new_mac_address);
 
+	int (*iwarp_connect)(void *rdma_cxt,
+			     struct qed_iwarp_connect_in *iparams,
+			     struct qed_iwarp_connect_out *oparams);
+
 	int (*iwarp_create_listen)(void *rdma_cxt,
 				   struct qed_iwarp_listen_in *iparams,
 				   struct qed_iwarp_listen_out *oparams);
@@ -652,6 +676,8 @@ struct qed_rdma_ops {
 
 	int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle);
 
+	int (*iwarp_send_rtr)(void *rdma_cxt,
+			      struct qed_iwarp_send_rtr_in *iparams);
 };
 
 const struct qed_rdma_ops *qed_get_rdma_ops(void);
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

  parent reply	other threads:[~2017-07-02  7:29 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-07-02  7:29 [PATCH net-next 00/12] qed: Add iWARP support for QL4xxxx Michal Kalderon
2017-07-02  7:29 ` Michal Kalderon
2017-07-02  7:29 ` [PATCH net-next 03/12] qed: Rename some ll2 related defines Michal Kalderon
2017-07-02  7:29   ` Michal Kalderon
2017-07-02  7:29 ` [PATCH net-next 06/12] qed: iWARP CM add listener functions and initial SYN processing Michal Kalderon
2017-07-02  7:29   ` Michal Kalderon
2017-07-02  7:29 ` [PATCH net-next 07/12] qed: iWARP CM add passive side connect Michal Kalderon
2017-07-02  7:29   ` Michal Kalderon
2017-07-02  7:29 ` [PATCH net-next 10/12] qed: iWARP CM add error handling Michal Kalderon
2017-07-02  7:29   ` Michal Kalderon
     [not found] ` <1498980572-29519-1-git-send-email-Michal.Kalderon-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org>
2017-07-02  7:29   ` [PATCH net-next 01/12] qed: Introduce iWARP personality Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-02  7:29   ` [PATCH net-next 02/12] qed: Implement iWARP initialization, teardown and qp operations Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-02  7:29   ` [PATCH net-next 04/12] qed: Add iWARP support in ll2 connections Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-02  7:29   ` [PATCH net-next 05/12] qed: iWARP CM - setup a ll2 connection for handling SYN packets Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-02  7:29   ` Michal Kalderon [this message]
2017-07-02  7:29     ` [PATCH net-next 08/12] qed: iWARP CM add active side connect Michal Kalderon
2017-07-02  7:29   ` [PATCH net-next 09/12] qed: iWARP implement disconnect flows Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-02  7:29   ` [PATCH net-next 11/12] qed: Add iWARP protocol support in context allocation Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-02  7:29   ` [PATCH net-next 12/12] qed: Add iWARP support for physical queue allocation Michal Kalderon
2017-07-02  7:29     ` Michal Kalderon
2017-07-03  8:47   ` [PATCH net-next 00/12] qed: Add iWARP support for QL4xxxx David Miller
     [not found]     ` <20170703.014708.552467938183415134.davem-fT/PcQaiUtIeIZ0/mPfg9Q@public.gmane.org>
2017-07-03  8:59       ` David Miller
2017-07-03 18:31         ` Kalderon, Michal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1498980572-29519-9-git-send-email-Michal.Kalderon@cavium.com \
    --to=michal.kalderon-ygcgfspz5w/qt0dzr+alfa@public.gmane.org \
    --cc=ariel.elior-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org \
    --cc=davem-fT/PcQaiUtIeIZ0/mPfg9Q@public.gmane.org \
    --cc=dledford-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
    --cc=linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=netdev-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=ram.amrani-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org \
    --cc=yuval.mintz-YGCgFSpz5w/QT0dZR+AlfA@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.