netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/8] qed/qedf: Firmware recovery, bw update and misc fixes.
@ 2020-03-26  7:07 Saurav Kashyap
  2020-03-26  7:07 ` [PATCH 1/8] qedf: Keep track of num of pending flogi Saurav Kashyap
                   ` (7 more replies)
  0 siblings, 8 replies; 11+ messages in thread
From: Saurav Kashyap @ 2020-03-26  7:07 UTC (permalink / raw)
  To: martin.petersen; +Cc: GR-QLogic-Storage-Upstream, linux-scsi, netdev

Hi Martin,

Kindly apply this series to scsi tree at your earliest convenience.

Thanks,
~Saurav
 
Chad Dupuis (2):
  qedf: Add schedule recovery handler.
  qedf: Fix crash when MFW calls for protocol stats while function is
    still probing.

Javed Hasan (1):
  qedf: Fix for the deviations from the SAM-4 spec.

Saurav Kashyap (4):
  qedf: Keep track of num of pending flogi.
  qedf: Implement callback for bw_update.
  qedf: Get dev info after updating the params.
  qedf: Update the driver version to 8.42.3.5.

Sudarsana Reddy Kalluru (1):
  qed: Send BW update notifications to the protocol drivers.

 drivers/net/ethernet/qlogic/qed/qed.h      |   1 +
 drivers/net/ethernet/qlogic/qed/qed_main.c |   9 ++
 drivers/scsi/qedf/qedf.h                   |   8 +-
 drivers/scsi/qedf/qedf_io.c                |  47 +++++++---
 drivers/scsi/qedf/qedf_main.c              | 133 ++++++++++++++++++++++++++++-
 drivers/scsi/qedf/qedf_version.h           |   4 +-
 include/linux/qed/qed_if.h                 |   1 +
 7 files changed, 186 insertions(+), 17 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/8] qedf: Keep track of num of pending flogi.
  2020-03-26  7:07 [PATCH 0/8] qed/qedf: Firmware recovery, bw update and misc fixes Saurav Kashyap
@ 2020-03-26  7:07 ` Saurav Kashyap
  2020-03-26  7:08 ` [PATCH 2/8] qedf: Fix for the deviations from the SAM-4 spec Saurav Kashyap
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 11+ messages in thread
From: Saurav Kashyap @ 2020-03-26  7:07 UTC (permalink / raw)
  To: martin.petersen; +Cc: GR-QLogic-Storage-Upstream, linux-scsi, netdev

- Problem: Port not coming up after bringing down the port
  for longer duration.
- Bring down the port from the switch
- wait for fipvlan to exhaust, driver will use
  default vlan (1002) and call fcoe_ctlr_link_up
- libfc/fcoe will start sending FLOGI
- bring back the port and switch discard FLOGI
  because vlan is different.
- keep track of pending flogi and if it increases
  certain number then do ctx reset and it will do
  fipvlan again.

Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
---
 drivers/scsi/qedf/qedf.h      |  2 ++
 drivers/scsi/qedf/qedf_main.c | 23 +++++++++++++++++++++++
 2 files changed, 25 insertions(+)

diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index f3f399f..042ebf6 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -388,6 +388,7 @@ struct qedf_ctx {
 	mempool_t *io_mempool;
 	struct workqueue_struct *dpc_wq;
 	struct delayed_work grcdump_work;
+	struct delayed_work stag_work;
 
 	u32 slow_sge_ios;
 	u32 fast_sge_ios;
@@ -403,6 +404,7 @@ struct qedf_ctx {
 
 	u32 flogi_cnt;
 	u32 flogi_failed;
+	u32 flogi_pending;
 
 	/* Used for fc statistics */
 	struct mutex stats_mutex;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 604856e..ee468102 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -282,6 +282,7 @@ static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
 	else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
 		/* Set the source MAC we will use for FCoE traffic */
 		qedf_set_data_src_addr(qedf, fp);
+		qedf->flogi_pending = 0;
 	}
 
 	/* Complete flogi_compl so we can proceed to sending ADISCs */
@@ -307,6 +308,11 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
 	 */
 	if (resp == fc_lport_flogi_resp) {
 		qedf->flogi_cnt++;
+		if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
+			schedule_delayed_work(&qedf->stag_work, 2);
+			return NULL;
+		}
+		qedf->flogi_pending++;
 		return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
 		    arg, timeout);
 	}
@@ -850,6 +856,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
 
 	qedf = lport_priv(lport);
 
+	qedf->flogi_pending = 0;
 	/* For host reset, essentially do a soft link up/down */
 	atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
@@ -3205,6 +3212,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 		init_completion(&qedf->fipvlan_compl);
 		mutex_init(&qedf->stats_mutex);
 		mutex_init(&qedf->flush_mutex);
+		qedf->flogi_pending = 0;
 
 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
 		   "QLogic FastLinQ FCoE Module qedf %s, "
@@ -3235,6 +3243,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 	INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
 	INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
 	INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
+	INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
 	qedf->fipvlan_retries = qedf_fipvlan_retries;
 	/* Set a default prio in case DCBX doesn't converge */
 	if (qedf_default_prio > -1) {
@@ -3770,6 +3779,20 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
 	fcoe->scsi_tsk_full = qedf->task_set_fulls;
 }
 
+/* Deferred work function to perform soft context reset on STAG change */
+void qedf_stag_change_work(struct work_struct *work)
+{
+	struct qedf_ctx *qedf =
+	    container_of(work, struct qedf_ctx, stag_work.work);
+
+	if (!qedf) {
+		QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
+		return;
+	}
+	QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
+	qedf_ctx_soft_reset(qedf->lport);
+}
+
 static void qedf_shutdown(struct pci_dev *pdev)
 {
 	__qedf_remove(pdev, QEDF_MODE_NORMAL);
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/8] qedf: Fix for the deviations from the SAM-4 spec.
  2020-03-26  7:07 [PATCH 0/8] qed/qedf: Firmware recovery, bw update and misc fixes Saurav Kashyap
  2020-03-26  7:07 ` [PATCH 1/8] qedf: Keep track of num of pending flogi Saurav Kashyap
@ 2020-03-26  7:08 ` Saurav Kashyap
  2020-03-26  7:08 ` [PATCH 3/8] qed: Send BW update notifications to the protocol drivers Saurav Kashyap
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 11+ messages in thread
From: Saurav Kashyap @ 2020-03-26  7:08 UTC (permalink / raw)
  To: martin.petersen; +Cc: GR-QLogic-Storage-Upstream, linux-scsi, netdev

From: Javed Hasan <jhasan@marvell.com>

- Upper limit for retry delay(QEDF_RETRY_DELAY_MAX)
  increased from 20 sec to 1 min.
- Log an event/message indicating throttling of I/O
  for the target and include scope and retry delay
  time returned by the target and the driver enforced delay.
- Synchronizing the update of the fcport->retry_delay_timestamp
  between qedf_queuecommand() and qedf_scsi_completion().

Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
Signed-off-by: Javed Hasan <jhasan@marvell.com>
---
 drivers/scsi/qedf/qedf.h    |  2 +-
 drivers/scsi/qedf/qedf_io.c | 47 +++++++++++++++++++++++++++++++++++----------
 2 files changed, 38 insertions(+), 11 deletions(-)

diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 042ebf6..aaa2ac9 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -470,7 +470,7 @@ static inline void qedf_stop_all_io(struct qedf_ctx *qedf)
 extern uint qedf_io_tracing;
 extern uint qedf_stop_io_on_error;
 extern uint qedf_link_down_tmo;
-#define QEDF_RETRY_DELAY_MAX		20 /* 2 seconds */
+#define QEDF_RETRY_DELAY_MAX		600 /* 60 seconds */
 extern bool qedf_retry_delay;
 extern uint qedf_debug;
 
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index e749a2d..f0f455e 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1021,14 +1021,18 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
 	atomic_inc(&fcport->ios_to_queue);
 
 	if (fcport->retry_delay_timestamp) {
+		/* Take fcport->rport_lock for resetting the delay_timestamp */
+		spin_lock_irqsave(&fcport->rport_lock, flags);
 		if (time_after(jiffies, fcport->retry_delay_timestamp)) {
 			fcport->retry_delay_timestamp = 0;
 		} else {
+			spin_unlock_irqrestore(&fcport->rport_lock, flags);
 			/* If retry_delay timer is active, flow off the ML */
 			rc = SCSI_MLQUEUE_TARGET_BUSY;
 			atomic_dec(&fcport->ios_to_queue);
 			goto exit_qcmd;
 		}
+		spin_unlock_irqrestore(&fcport->rport_lock, flags);
 	}
 
 	io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
@@ -1134,6 +1138,8 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
 	int refcount;
 	u16 scope, qualifier = 0;
 	u8 fw_residual_flag = 0;
+	unsigned long flags = 0;
+	u16 chk_scope = 0;
 
 	if (!io_req)
 		return;
@@ -1267,16 +1273,8 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
 				/* Lower 14 bits */
 				qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
 
-				if (qedf_retry_delay &&
-				    scope > 0 && qualifier > 0 &&
-				    qualifier <= 0x3FEF) {
-					/* Check we don't go over the max */
-					if (qualifier > QEDF_RETRY_DELAY_MAX)
-						qualifier =
-						    QEDF_RETRY_DELAY_MAX;
-					fcport->retry_delay_timestamp =
-					    jiffies + (qualifier * HZ / 10);
-				}
+				if (qedf_retry_delay)
+					chk_scope = 1;
 				/* Record stats */
 				if (io_req->cdb_status ==
 				    SAM_STAT_TASK_SET_FULL)
@@ -1287,6 +1285,35 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
 		}
 		if (io_req->fcp_resid)
 			scsi_set_resid(sc_cmd, io_req->fcp_resid);
+
+		if (chk_scope == 1)
+			if ((scope == 1 || scope == 2) &&
+			    (qualifier > 0 && qualifier <= 0x3FEF)) {
+				/* Check we don't go over the max */
+				if (qualifier > QEDF_RETRY_DELAY_MAX) {
+					qualifier = QEDF_RETRY_DELAY_MAX;
+					QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+						  "qualifier = %d\n",
+						  (fcp_rsp->retry_delay_timer &
+						  0x3FFF));
+				}
+				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+					  "Scope = %d and qualifier = %d",
+					  scope, qualifier);
+				/*  Take fcport->rport_lock to
+				 *  update the retry_delay_timestamp
+				 */
+				spin_lock_irqsave(&fcport->rport_lock, flags);
+				fcport->retry_delay_timestamp =
+					jiffies + (qualifier * HZ / 10);
+				spin_unlock_irqrestore(&fcport->rport_lock,
+						       flags);
+
+			} else {
+				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+					  "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
+					  scope, qualifier);
+			}
 		break;
 	default:
 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 3/8] qed: Send BW update notifications to the protocol drivers.
  2020-03-26  7:07 [PATCH 0/8] qed/qedf: Firmware recovery, bw update and misc fixes Saurav Kashyap
  2020-03-26  7:07 ` [PATCH 1/8] qedf: Keep track of num of pending flogi Saurav Kashyap
  2020-03-26  7:08 ` [PATCH 2/8] qedf: Fix for the deviations from the SAM-4 spec Saurav Kashyap
@ 2020-03-26  7:08 ` Saurav Kashyap
  2020-03-26  7:08 ` [PATCH 4/8] qedf: Implement callback for bw_update Saurav Kashyap
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 11+ messages in thread
From: Saurav Kashyap @ 2020-03-26  7:08 UTC (permalink / raw)
  To: martin.petersen; +Cc: GR-QLogic-Storage-Upstream, linux-scsi, netdev

From: Sudarsana Reddy Kalluru <skalluru@marvell.com>

Management firmware (MFW) send a notification whenever there is a change in
the bandwidth values. The patch adds driver support to send this info to
the upper layer drivers (e.g., qedf).

Signed-off-by: Sudarsana Reddy Kalluru <skalluru@marvell.com>
---
 drivers/net/ethernet/qlogic/qed/qed.h      | 1 +
 drivers/net/ethernet/qlogic/qed/qed_main.c | 9 +++++++++
 include/linux/qed/qed_if.h                 | 1 +
 3 files changed, 11 insertions(+)

diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index fa41bf0..d006639 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -1016,6 +1016,7 @@ void qed_set_fw_mac_addr(__le16 *fw_msb,
 int qed_fill_dev_info(struct qed_dev *cdev,
 		      struct qed_dev_info *dev_info);
 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
+void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
 u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
 		   u32 input_len, u8 *input_buf,
 		   u32 max_size, u8 *unzip_buf);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 2c189c6..8d82d65 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1949,6 +1949,15 @@ void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
 		op->link_update(cookie, &if_link);
 }
 
+void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
+{
+	void *cookie = hwfn->cdev->ops_cookie;
+	struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+
+	if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
+		op->bw_update(cookie);
+}
+
 static int qed_drain(struct qed_dev *cdev)
 {
 	struct qed_hwfn *hwfn;
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 8f29e0d..c495637 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -817,6 +817,7 @@ struct qed_common_cb_ops {
 	void	(*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
 	void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
 	void (*get_protocol_tlv_data)(void *dev, void *data);
+	void (*bw_update)(void *dev);
 };
 
 struct qed_selftest_ops {
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 4/8] qedf: Implement callback for bw_update.
  2020-03-26  7:07 [PATCH 0/8] qed/qedf: Firmware recovery, bw update and misc fixes Saurav Kashyap
                   ` (2 preceding siblings ...)
  2020-03-26  7:08 ` [PATCH 3/8] qed: Send BW update notifications to the protocol drivers Saurav Kashyap
@ 2020-03-26  7:08 ` Saurav Kashyap
  2020-03-26  7:08 ` [PATCH 5/8] qedf: Add schedule recovery handler Saurav Kashyap
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 11+ messages in thread
From: Saurav Kashyap @ 2020-03-26  7:08 UTC (permalink / raw)
  To: martin.petersen; +Cc: GR-QLogic-Storage-Upstream, linux-scsi, netdev

This is extension of bw common callback provided by qed.
This is called whenever there is a change in the BW.

Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
---
 drivers/scsi/qedf/qedf_main.c | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index ee468102..ba66216 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -509,6 +509,32 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
 	fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
 }
 
+static void qedf_bw_update(void *dev)
+{
+	struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
+	struct qed_link_output link;
+
+	/* Get the latest status of the link */
+	qed_ops->common->get_link(qedf->cdev, &link);
+
+	if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Ignore link update, driver getting unload.\n");
+		return;
+	}
+
+	if (link.link_up) {
+		if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
+			qedf_update_link_speed(qedf, &link);
+		else
+			QEDF_ERR(&qedf->dbg_ctx,
+				 "Ignore bw update, link is down.\n");
+
+	} else {
+		QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
+	}
+}
+
 static void qedf_link_update(void *dev, struct qed_link_output *link)
 {
 	struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
@@ -635,6 +661,7 @@ static u32 qedf_get_login_failures(void *cookie)
 static struct qed_fcoe_cb_ops qedf_cb_ops = {
 	{
 		.link_update = qedf_link_update,
+		.bw_update = qedf_bw_update,
 		.dcbx_aen = qedf_dcbx_handler,
 		.get_generic_tlv_data = qedf_get_generic_tlv_data,
 		.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 5/8] qedf: Add schedule recovery handler.
  2020-03-26  7:07 [PATCH 0/8] qed/qedf: Firmware recovery, bw update and misc fixes Saurav Kashyap
                   ` (3 preceding siblings ...)
  2020-03-26  7:08 ` [PATCH 4/8] qedf: Implement callback for bw_update Saurav Kashyap
@ 2020-03-26  7:08 ` Saurav Kashyap
  2020-03-26 18:34   ` David Miller
  2020-03-26  7:08 ` [PATCH 6/8] qedf: Fix crash when MFW calls for protocol stats while function is still probing Saurav Kashyap
                   ` (2 subsequent siblings)
  7 siblings, 1 reply; 11+ messages in thread
From: Saurav Kashyap @ 2020-03-26  7:08 UTC (permalink / raw)
  To: martin.petersen; +Cc: GR-QLogic-Storage-Upstream, linux-scsi, netdev

From: Chad Dupuis <cdupuis@marvell.com>

- Add recovery handler, this will be triggered
  by QED.

Signed-off-by: Chad Dupuis <cdupuis@marvell.com>
Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
---
 drivers/scsi/qedf/qedf.h      |  3 +++
 drivers/scsi/qedf/qedf_main.c | 41 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 44 insertions(+)

diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index aaa2ac9..a5134c7 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -387,6 +387,7 @@ struct qedf_ctx {
 #define QEDF_IO_WORK_MIN		64
 	mempool_t *io_mempool;
 	struct workqueue_struct *dpc_wq;
+	struct delayed_work recovery_work;
 	struct delayed_work grcdump_work;
 	struct delayed_work stag_work;
 
@@ -527,6 +528,8 @@ extern void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
 extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
 	struct fcoe_cqe *cqe);
 extern void qedf_restart_rport(struct qedf_rport *fcport);
+void qedf_schedule_recovery_handler(void *dev);
+void qedf_recovery_handler(struct work_struct *work);
 extern int qedf_send_rec(struct qedf_ioreq *orig_io_req);
 extern int qedf_post_io_req(struct qedf_rport *fcport,
 	struct qedf_ioreq *io_req);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index ba66216..b3fa21a 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -662,6 +662,7 @@ static u32 qedf_get_login_failures(void *cookie)
 	{
 		.link_update = qedf_link_update,
 		.bw_update = qedf_bw_update,
+		.schedule_recovery_handler = qedf_schedule_recovery_handler,
 		.dcbx_aen = qedf_dcbx_handler,
 		.get_generic_tlv_data = qedf_get_generic_tlv_data,
 		.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
@@ -3510,6 +3511,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 		    qedf->lport->host->host_no);
 		qedf->dpc_wq = create_workqueue(host_buf);
 	}
+	INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
 
 	/*
 	 * GRC dump and sysfs parameters are not reaped during the recovery
@@ -3825,6 +3827,45 @@ static void qedf_shutdown(struct pci_dev *pdev)
 	__qedf_remove(pdev, QEDF_MODE_NORMAL);
 }
 
+/*
+ * Recovery handler code
+ */
+void qedf_schedule_recovery_handler(void *dev)
+{
+	struct qedf_ctx *qedf = dev;
+
+	QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
+	schedule_delayed_work(&qedf->recovery_work, 0);
+}
+
+void qedf_recovery_handler(struct work_struct *work)
+{
+	struct qedf_ctx *qedf =
+	    container_of(work, struct qedf_ctx, recovery_work.work);
+
+	if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
+		return;
+
+	/*
+	 * Call common_ops->recovery_prolog to allow the MFW to quiesce
+	 * any PCI transactions.
+	 */
+	qed_ops->common->recovery_prolog(qedf->cdev);
+
+	QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
+	__qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
+	/*
+	 * Reset link and dcbx to down state since we will not get a link down
+	 * event from the MFW but calling __qedf_remove will essentially be a
+	 * link down event.
+	 */
+	atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+	atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
+	__qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
+	clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
+	QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
+}
+
 /* Generic TLV data callback */
 void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
 {
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 6/8] qedf: Fix crash when MFW calls for protocol stats while function is still probing.
  2020-03-26  7:07 [PATCH 0/8] qed/qedf: Firmware recovery, bw update and misc fixes Saurav Kashyap
                   ` (4 preceding siblings ...)
  2020-03-26  7:08 ` [PATCH 5/8] qedf: Add schedule recovery handler Saurav Kashyap
@ 2020-03-26  7:08 ` Saurav Kashyap
  2020-03-26  7:08 ` [PATCH 7/8] qedf: Get dev info after updating the params Saurav Kashyap
  2020-03-26  7:08 ` [PATCH 8/8] qedf: Update the driver version to 8.42.3.5 Saurav Kashyap
  7 siblings, 0 replies; 11+ messages in thread
From: Saurav Kashyap @ 2020-03-26  7:08 UTC (permalink / raw)
  To: martin.petersen; +Cc: GR-QLogic-Storage-Upstream, linux-scsi, netdev

From: Chad Dupuis <cdupuis@marvell.com>

The MFW may make an call to qed and then to qedf for protocol statistics
while the function is still probing.  If this happens it's possible that
some members of the struct qedf_ctx may not be fully initialized which can
result in a NULL pointer dereference or general protection fault.

To prevent this, add a new flag call QEDF_PROBING and set it when the
__qedf_probe() function is active. Then in the qedf_get_protocol_tlv_data()
function we can check if the function is still probing and return
immediantely before any uninitialized structures can be touched.

Signed-off-by: Chad Dupuis <cdupuis@marvell.com>
Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
---
 drivers/scsi/qedf/qedf.h      |  1 +
 drivers/scsi/qedf/qedf_main.c | 35 +++++++++++++++++++++++++++++++----
 2 files changed, 32 insertions(+), 4 deletions(-)

diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index a5134c7..951425b 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -355,6 +355,7 @@ struct qedf_ctx {
 #define QEDF_GRCDUMP_CAPTURE		4
 #define QEDF_IN_RECOVERY		5
 #define QEDF_DBG_STOP_IO		6
+#define QEDF_PROBING			8
 	unsigned long flags; /* Miscellaneous state flags */
 	int fipvlan_retries;
 	u8 num_queues;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index b3fa21a..bbad015 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3196,7 +3196,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 {
 	int rc = -EINVAL;
 	struct fc_lport *lport;
-	struct qedf_ctx *qedf;
+	struct qedf_ctx *qedf = NULL;
 	struct Scsi_Host *host;
 	bool is_vf = false;
 	struct qed_ll2_params params;
@@ -3226,6 +3226,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 
 		/* Initialize qedf_ctx */
 		qedf = lport_priv(lport);
+		set_bit(QEDF_PROBING, &qedf->flags);
 		qedf->lport = lport;
 		qedf->ctlr.lp = lport;
 		qedf->pdev = pdev;
@@ -3250,9 +3251,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 	} else {
 		/* Init pointers during recovery */
 		qedf = pci_get_drvdata(pdev);
+		set_bit(QEDF_PROBING, &qedf->flags);
 		lport = qedf->lport;
 	}
 
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
+
 	host = lport->host;
 
 	/* Allocate mempool for qedf_io_work structs */
@@ -3559,6 +3563,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 	else
 		fc_fabric_login(lport);
 
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
+
+	clear_bit(QEDF_PROBING, &qedf->flags);
+
 	/* All good */
 	return 0;
 
@@ -3584,6 +3592,11 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 err1:
 	scsi_host_put(lport->host);
 err0:
+	if (qedf) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
+
+		clear_bit(QEDF_PROBING, &qedf->flags);
+	}
 	return rc;
 }
 
@@ -3733,11 +3746,25 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
 {
 	struct qedf_ctx *qedf = dev;
 	struct qed_mfw_tlv_fcoe *fcoe = data;
-	struct fc_lport *lport = qedf->lport;
-	struct Scsi_Host *host = lport->host;
-	struct fc_host_attrs *fc_host = shost_to_fc_host(host);
+	struct fc_lport *lport;
+	struct Scsi_Host *host;
+	struct fc_host_attrs *fc_host;
 	struct fc_host_statistics *hst;
 
+	if (!qedf) {
+		QEDF_ERR(NULL, "qedf is null.\n");
+		return;
+	}
+
+	if (test_bit(QEDF_PROBING, &qedf->flags)) {
+		QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
+		return;
+	}
+
+	lport = qedf->lport;
+	host = lport->host;
+	fc_host = shost_to_fc_host(host);
+
 	/* Force a refresh of the fc_host stats including offload stats */
 	hst = qedf_fc_get_host_stats(host);
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 7/8] qedf: Get dev info after updating the params.
  2020-03-26  7:07 [PATCH 0/8] qed/qedf: Firmware recovery, bw update and misc fixes Saurav Kashyap
                   ` (5 preceding siblings ...)
  2020-03-26  7:08 ` [PATCH 6/8] qedf: Fix crash when MFW calls for protocol stats while function is still probing Saurav Kashyap
@ 2020-03-26  7:08 ` Saurav Kashyap
  2020-03-26  7:08 ` [PATCH 8/8] qedf: Update the driver version to 8.42.3.5 Saurav Kashyap
  7 siblings, 0 replies; 11+ messages in thread
From: Saurav Kashyap @ 2020-03-26  7:08 UTC (permalink / raw)
  To: martin.petersen; +Cc: GR-QLogic-Storage-Upstream, linux-scsi, netdev

- Get the dev info after updating the params.

Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
---
 drivers/scsi/qedf/qedf_main.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index bbad015..49e6d12 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3330,6 +3330,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 	}
 	qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
 
+	/* Learn information crucial for qedf to progress */
+	rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
+	if (rc) {
+		QEDF_ERR(&qedf->dbg_ctx, "Failed to dev info.\n");
+		goto err2;
+	}
+
 	/* Record BDQ producer doorbell addresses */
 	qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
 	qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 8/8] qedf: Update the driver version to 8.42.3.5.
  2020-03-26  7:07 [PATCH 0/8] qed/qedf: Firmware recovery, bw update and misc fixes Saurav Kashyap
                   ` (6 preceding siblings ...)
  2020-03-26  7:08 ` [PATCH 7/8] qedf: Get dev info after updating the params Saurav Kashyap
@ 2020-03-26  7:08 ` Saurav Kashyap
  7 siblings, 0 replies; 11+ messages in thread
From: Saurav Kashyap @ 2020-03-26  7:08 UTC (permalink / raw)
  To: martin.petersen; +Cc: GR-QLogic-Storage-Upstream, linux-scsi, netdev

- Update version to 8.42.3.5.

Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
---
 drivers/scsi/qedf/qedf_version.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index b0e37af..7661a5d 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -4,9 +4,9 @@
  *  Copyright (c) 2016-2018 Cavium Inc.
  */
 
-#define QEDF_VERSION		"8.42.3.0"
+#define QEDF_VERSION		"8.42.3.5"
 #define QEDF_DRIVER_MAJOR_VER		8
 #define QEDF_DRIVER_MINOR_VER		42
 #define QEDF_DRIVER_REV_VER		3
-#define QEDF_DRIVER_ENG_VER		0
+#define QEDF_DRIVER_ENG_VER		5
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 5/8] qedf: Add schedule recovery handler.
  2020-03-26  7:08 ` [PATCH 5/8] qedf: Add schedule recovery handler Saurav Kashyap
@ 2020-03-26 18:34   ` David Miller
  2020-03-27  4:32     ` [EXT] " Saurav Kashyap
  0 siblings, 1 reply; 11+ messages in thread
From: David Miller @ 2020-03-26 18:34 UTC (permalink / raw)
  To: skashyap; +Cc: martin.petersen, GR-QLogic-Storage-Upstream, linux-scsi, netdev

From: Saurav Kashyap <skashyap@marvell.com>
Date: Thu, 26 Mar 2020 00:08:03 -0700

> --- a/drivers/scsi/qedf/qedf_main.c
> +++ b/drivers/scsi/qedf/qedf_main.c
> @@ -3825,6 +3827,45 @@ static void qedf_shutdown(struct pci_dev *pdev)
>  	__qedf_remove(pdev, QEDF_MODE_NORMAL);
>  }
>  
> +/*
> + * Recovery handler code
> + */
> +void qedf_schedule_recovery_handler(void *dev)
 ...
> +void qedf_recovery_handler(struct work_struct *work)

These two functions are not referenced outside of this file, mark them
static.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* RE: [EXT] Re: [PATCH 5/8] qedf: Add schedule recovery handler.
  2020-03-26 18:34   ` David Miller
@ 2020-03-27  4:32     ` Saurav Kashyap
  0 siblings, 0 replies; 11+ messages in thread
From: Saurav Kashyap @ 2020-03-27  4:32 UTC (permalink / raw)
  To: David Miller
  Cc: martin.petersen, GR-QLogic-Storage-Upstream, linux-scsi, netdev

Hi David,

> -----Original Message-----
> From: David Miller <davem@davemloft.net>
> Sent: Friday, March 27, 2020 12:05 AM
> To: Saurav Kashyap <skashyap@marvell.com>
> Cc: martin.petersen@oracle.com; GR-QLogic-Storage-Upstream <GR-QLogic-
> Storage-Upstream@marvell.com>; linux-scsi@vger.kernel.org;
> netdev@vger.kernel.org
> Subject: [EXT] Re: [PATCH 5/8] qedf: Add schedule recovery handler.
> 
> External Email
> 
> ----------------------------------------------------------------------
> From: Saurav Kashyap <skashyap@marvell.com>
> Date: Thu, 26 Mar 2020 00:08:03 -0700
> 
> > --- a/drivers/scsi/qedf/qedf_main.c
> > +++ b/drivers/scsi/qedf/qedf_main.c
> > @@ -3825,6 +3827,45 @@ static void qedf_shutdown(struct pci_dev
> *pdev)
> >  	__qedf_remove(pdev, QEDF_MODE_NORMAL);
> >  }
> >
> > +/*
> > + * Recovery handler code
> > + */
> > +void qedf_schedule_recovery_handler(void *dev)
>  ...
> > +void qedf_recovery_handler(struct work_struct *work)
> 
> These two functions are not referenced outside of this file, mark them
> static.

Thanks for feedback, will wait for other reviews and will submit v2.

Thanks,
~Saurav

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2020-03-27  4:34 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-26  7:07 [PATCH 0/8] qed/qedf: Firmware recovery, bw update and misc fixes Saurav Kashyap
2020-03-26  7:07 ` [PATCH 1/8] qedf: Keep track of num of pending flogi Saurav Kashyap
2020-03-26  7:08 ` [PATCH 2/8] qedf: Fix for the deviations from the SAM-4 spec Saurav Kashyap
2020-03-26  7:08 ` [PATCH 3/8] qed: Send BW update notifications to the protocol drivers Saurav Kashyap
2020-03-26  7:08 ` [PATCH 4/8] qedf: Implement callback for bw_update Saurav Kashyap
2020-03-26  7:08 ` [PATCH 5/8] qedf: Add schedule recovery handler Saurav Kashyap
2020-03-26 18:34   ` David Miller
2020-03-27  4:32     ` [EXT] " Saurav Kashyap
2020-03-26  7:08 ` [PATCH 6/8] qedf: Fix crash when MFW calls for protocol stats while function is still probing Saurav Kashyap
2020-03-26  7:08 ` [PATCH 7/8] qedf: Get dev info after updating the params Saurav Kashyap
2020-03-26  7:08 ` [PATCH 8/8] qedf: Update the driver version to 8.42.3.5 Saurav Kashyap

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).