netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Saurav Kashyap <skashyap@marvell.com>
To: <martin.petersen@oracle.com>
Cc: <GR-QLogic-Storage-Upstream@marvell.com>,
	<linux-scsi@vger.kernel.org>, <netdev@vger.kernel.org>
Subject: [PATCH 1/8] qedf: Keep track of num of pending flogi.
Date: Thu, 26 Mar 2020 00:07:59 -0700	[thread overview]
Message-ID: <20200326070806.25493-2-skashyap@marvell.com> (raw)
In-Reply-To: <20200326070806.25493-1-skashyap@marvell.com>

- Problem: Port not coming up after bringing down the port
  for longer duration.
- Bring down the port from the switch
- wait for fipvlan to exhaust, driver will use
  default vlan (1002) and call fcoe_ctlr_link_up
- libfc/fcoe will start sending FLOGI
- bring back the port and switch discard FLOGI
  because vlan is different.
- keep track of pending flogi and if it increases
  certain number then do ctx reset and it will do
  fipvlan again.

Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
---
 drivers/scsi/qedf/qedf.h      |  2 ++
 drivers/scsi/qedf/qedf_main.c | 23 +++++++++++++++++++++++
 2 files changed, 25 insertions(+)

diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index f3f399f..042ebf6 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -388,6 +388,7 @@ struct qedf_ctx {
 	mempool_t *io_mempool;
 	struct workqueue_struct *dpc_wq;
 	struct delayed_work grcdump_work;
+	struct delayed_work stag_work;
 
 	u32 slow_sge_ios;
 	u32 fast_sge_ios;
@@ -403,6 +404,7 @@ struct qedf_ctx {
 
 	u32 flogi_cnt;
 	u32 flogi_failed;
+	u32 flogi_pending;
 
 	/* Used for fc statistics */
 	struct mutex stats_mutex;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 604856e..ee468102 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -282,6 +282,7 @@ static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
 	else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
 		/* Set the source MAC we will use for FCoE traffic */
 		qedf_set_data_src_addr(qedf, fp);
+		qedf->flogi_pending = 0;
 	}
 
 	/* Complete flogi_compl so we can proceed to sending ADISCs */
@@ -307,6 +308,11 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
 	 */
 	if (resp == fc_lport_flogi_resp) {
 		qedf->flogi_cnt++;
+		if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
+			schedule_delayed_work(&qedf->stag_work, 2);
+			return NULL;
+		}
+		qedf->flogi_pending++;
 		return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
 		    arg, timeout);
 	}
@@ -850,6 +856,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
 
 	qedf = lport_priv(lport);
 
+	qedf->flogi_pending = 0;
 	/* For host reset, essentially do a soft link up/down */
 	atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
@@ -3205,6 +3212,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 		init_completion(&qedf->fipvlan_compl);
 		mutex_init(&qedf->stats_mutex);
 		mutex_init(&qedf->flush_mutex);
+		qedf->flogi_pending = 0;
 
 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
 		   "QLogic FastLinQ FCoE Module qedf %s, "
@@ -3235,6 +3243,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 	INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
 	INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
 	INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
+	INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
 	qedf->fipvlan_retries = qedf_fipvlan_retries;
 	/* Set a default prio in case DCBX doesn't converge */
 	if (qedf_default_prio > -1) {
@@ -3770,6 +3779,20 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
 	fcoe->scsi_tsk_full = qedf->task_set_fulls;
 }
 
+/* Deferred work function to perform soft context reset on STAG change */
+void qedf_stag_change_work(struct work_struct *work)
+{
+	struct qedf_ctx *qedf =
+	    container_of(work, struct qedf_ctx, stag_work.work);
+
+	if (!qedf) {
+		QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
+		return;
+	}
+	QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
+	qedf_ctx_soft_reset(qedf->lport);
+}
+
 static void qedf_shutdown(struct pci_dev *pdev)
 {
 	__qedf_remove(pdev, QEDF_MODE_NORMAL);
-- 
1.8.3.1


  reply	other threads:[~2020-03-26  7:08 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-26  7:07 [PATCH 0/8] qed/qedf: Firmware recovery, bw update and misc fixes Saurav Kashyap
2020-03-26  7:07 ` Saurav Kashyap [this message]
2020-03-26  7:08 ` [PATCH 2/8] qedf: Fix for the deviations from the SAM-4 spec Saurav Kashyap
2020-03-26  7:08 ` [PATCH 3/8] qed: Send BW update notifications to the protocol drivers Saurav Kashyap
2020-03-26  7:08 ` [PATCH 4/8] qedf: Implement callback for bw_update Saurav Kashyap
2020-03-26  7:08 ` [PATCH 5/8] qedf: Add schedule recovery handler Saurav Kashyap
2020-03-26 18:34   ` David Miller
2020-03-27  4:32     ` [EXT] " Saurav Kashyap
2020-03-26  7:08 ` [PATCH 6/8] qedf: Fix crash when MFW calls for protocol stats while function is still probing Saurav Kashyap
2020-03-26  7:08 ` [PATCH 7/8] qedf: Get dev info after updating the params Saurav Kashyap
2020-03-26  7:08 ` [PATCH 8/8] qedf: Update the driver version to 8.42.3.5 Saurav Kashyap

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200326070806.25493-2-skashyap@marvell.com \
    --to=skashyap@marvell.com \
    --cc=GR-QLogic-Storage-Upstream@marvell.com \
    --cc=linux-scsi@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).