linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Tyrel Datwyler <tyreld@linux.ibm.com>
To: james.bottomley@hansenpartnership.com
Cc: martin.petersen@oracle.com, linux-scsi@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org,
	brking@linux.ibm.com, Tyrel Datwyler <tyreld@linux.ibm.com>
Subject: [PATCH v3 15/18] ibmvfc: send Cancel MAD down each hw scsi channel
Date: Wed,  2 Dec 2020 20:08:03 -0600	[thread overview]
Message-ID: <20201203020806.14747-16-tyreld@linux.ibm.com> (raw)
In-Reply-To: <20201203020806.14747-1-tyreld@linux.ibm.com>

In general the client needs to send Cancel MADs and task management
commands down the same channel as the command(s) intended to cancel or
abort. The client assigns cancel keys per LUN and thus must send a
Cancel down each channel commands were submitted for that LUN. Further,
the client then must wait for those cancel completions prior to
submitting a LUN RESET or ABORT TASK SET.

Add a cancel event pointer and cancel rsp iu storage to the
ibmvfc_sub_queue struct such that the cancel routine can assign a cancel
event to each applicable queue. When in legacy CRQ mode we fake treating
it as a subqueue by using a subqueue struct allocated on the stack. Wait
for completion of each submitted cancel.

Signed-off-by: Tyrel Datwyler <tyreld@linux.ibm.com>
---
 drivers/scsi/ibmvscsi/ibmvfc.c | 104 ++++++++++++++++++++++-----------
 drivers/scsi/ibmvscsi/ibmvfc.h |  38 ++++++------
 2 files changed, 90 insertions(+), 52 deletions(-)

diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ec3db5a6baf3..e353b9e88104 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2339,67 +2339,103 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
 {
 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
 	struct ibmvfc_event *evt, *found_evt;
-	union ibmvfc_iu rsp;
-	int rsp_rc = -EBUSY;
+	struct ibmvfc_sub_queue *scrqs;
+	struct ibmvfc_sub_queue legacy_crq;
+	int rsp_rc = 0;
 	unsigned long flags;
 	u16 status;
+	int cancel_cnt = 0;
+	int num_hwq;
+	int ret = 0;
+	int i;
 
 	ENTER;
 	spin_lock_irqsave(vhost->host->host_lock, flags);
-	found_evt = NULL;
-	list_for_each_entry(evt, &vhost->sent, queue) {
-		if (evt->cmnd && evt->cmnd->device == sdev) {
-			found_evt = evt;
+	if (vhost->using_channels && vhost->scsi_scrqs.active_queues) {
+		num_hwq = vhost->scsi_scrqs.active_queues;
+		scrqs = vhost->scsi_scrqs.scrqs;
+	} else {
+		/* Use ibmvfc_sub_queue on the stack to fake legacy CRQ as a subqueue */
+		num_hwq = 1;
+		scrqs = &legacy_crq;
+	}
+
+	for (i = 0; i < num_hwq; i++) {
+		scrqs[i].cancel_event = NULL;
+		found_evt = NULL;
+		list_for_each_entry(evt, &vhost->sent, queue) {
+			if (evt->cmnd && evt->cmnd->device == sdev && evt->hwq == i) {
+				found_evt = evt;
+				cancel_cnt++;
+				break;
+			}
+		}
+
+		if (!found_evt)
+			continue;
+
+		if (vhost->logged_in) {
+			scrqs[i].cancel_event = ibmvfc_init_tmf(vhost, sdev, type);
+			scrqs[i].cancel_event->hwq = i;
+			scrqs[i].cancel_event->sync_iu = &scrqs[i].cancel_rsp;
+			rsp_rc = ibmvfc_send_event(scrqs[i].cancel_event, vhost, default_timeout);
+			if (rsp_rc)
+				break;
+		} else {
+			rsp_rc = -EBUSY;
 			break;
 		}
 	}
 
-	if (!found_evt) {
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+	if (!cancel_cnt) {
 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
 			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
-		spin_unlock_irqrestore(vhost->host->host_lock, flags);
 		return 0;
 	}
 
-	if (vhost->logged_in) {
-		evt = ibmvfc_init_tmf(vhost, sdev, type);
-		evt->sync_iu = &rsp;
-		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
-	}
-
-	spin_unlock_irqrestore(vhost->host->host_lock, flags);
-
 	if (rsp_rc != 0) {
 		sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
 		/* If failure is received, the host adapter is most likely going
 		 through reset, return success so the caller will wait for the command
 		 being cancelled to get returned */
-		return 0;
+		goto free_events;
 	}
 
 	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
 
-	wait_for_completion(&evt->comp);
-	status = be16_to_cpu(rsp.mad_common.status);
-	spin_lock_irqsave(vhost->host->host_lock, flags);
-	ibmvfc_free_event(evt);
-	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+	for (i = 0; i < num_hwq; i++) {
+		if (!scrqs[i].cancel_event)
+			continue;
 
-	if (status != IBMVFC_MAD_SUCCESS) {
-		sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
-		switch (status) {
-		case IBMVFC_MAD_DRIVER_FAILED:
-		case IBMVFC_MAD_CRQ_ERROR:
-			/* Host adapter most likely going through reset, return success to
-			 the caller will wait for the command being cancelled to get returned */
-			return 0;
-		default:
-			return -EIO;
-		};
+		wait_for_completion(&scrqs[i].cancel_event->comp);
+		status = be16_to_cpu(scrqs[i].cancel_rsp.mad_common.status);
+
+		if (status != IBMVFC_MAD_SUCCESS) {
+			sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
+			switch (status) {
+			case IBMVFC_MAD_DRIVER_FAILED:
+			case IBMVFC_MAD_CRQ_ERROR:
+				/* Host adapter most likely going through reset, return success to
+				 the caller will wait for the command being cancelled to get returned */
+				goto free_events;
+			default:
+				ret = -EIO;
+				goto free_events;
+			};
+		}
 	}
 
 	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
-	return 0;
+free_events:
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	for (i = 0; i < num_hwq; i++)
+		if (scrqs[i].cancel_event)
+			ibmvfc_free_event(scrqs[i].cancel_event);
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+	return ret;
 }
 
 /**
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index e0ffb0416223..980eb9afe93a 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -584,6 +584,24 @@ struct ibmvfc_connection_info {
 	__be64 reserved[16];
 } __packed __aligned(8);
 
+union ibmvfc_iu {
+	struct ibmvfc_mad_common mad_common;
+	struct ibmvfc_npiv_login_mad npiv_login;
+	struct ibmvfc_npiv_logout_mad npiv_logout;
+	struct ibmvfc_discover_targets discover_targets;
+	struct ibmvfc_port_login plogi;
+	struct ibmvfc_process_login prli;
+	struct ibmvfc_move_login move_login;
+	struct ibmvfc_query_tgt query_tgt;
+	struct ibmvfc_implicit_logout implicit_logout;
+	struct ibmvfc_tmf tmf;
+	struct ibmvfc_cmd cmd;
+	struct ibmvfc_passthru_mad passthru;
+	struct ibmvfc_channel_enquiry channel_enquiry;
+	struct ibmvfc_channel_setup_mad channel_setup;
+	struct ibmvfc_connection_info connection_info;
+} __packed __aligned(8);
+
 struct ibmvfc_trace_start_entry {
 	u32 xfer_len;
 } __packed;
@@ -666,6 +684,8 @@ struct ibmvfc_sub_queue {
 	dma_addr_t msg_token;
 	int size, cur;
 	struct ibmvfc_host *vhost;
+	struct ibmvfc_event *cancel_event;
+	union ibmvfc_iu cancel_rsp;
 	unsigned long cookie;
 	unsigned long vios_cookie;
 	unsigned long hw_irq;
@@ -712,24 +732,6 @@ struct ibmvfc_async_crq_queue {
 	dma_addr_t msg_token;
 };
 
-union ibmvfc_iu {
-	struct ibmvfc_mad_common mad_common;
-	struct ibmvfc_npiv_login_mad npiv_login;
-	struct ibmvfc_npiv_logout_mad npiv_logout;
-	struct ibmvfc_discover_targets discover_targets;
-	struct ibmvfc_port_login plogi;
-	struct ibmvfc_process_login prli;
-	struct ibmvfc_move_login move_login;
-	struct ibmvfc_query_tgt query_tgt;
-	struct ibmvfc_implicit_logout implicit_logout;
-	struct ibmvfc_tmf tmf;
-	struct ibmvfc_cmd cmd;
-	struct ibmvfc_passthru_mad passthru;
-	struct ibmvfc_channel_enquiry channel_enquiry;
-	struct ibmvfc_channel_setup_mad channel_setup;
-	struct ibmvfc_connection_info connection_info;
-} __packed __aligned(8);
-
 enum ibmvfc_target_action {
 	IBMVFC_TGT_ACTION_NONE = 0,
 	IBMVFC_TGT_ACTION_INIT,
-- 
2.27.0


  parent reply	other threads:[~2020-12-03  2:09 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-03  2:07 [PATCH v3 00/18] ibmvfc: initial MQ development Tyrel Datwyler
2020-12-03  2:07 ` [PATCH v3 01/18] ibmvfc: add vhost fields and defaults for MQ enablement Tyrel Datwyler
2020-12-03  2:07 ` [PATCH v3 02/18] ibmvfc: define hcall wrapper for registering a Sub-CRQ Tyrel Datwyler
2020-12-03  2:07 ` [PATCH v3 03/18] ibmvfc: add Subordinate CRQ definitions Tyrel Datwyler
2020-12-03  2:07 ` [PATCH v3 04/18] ibmvfc: add alloc/dealloc routines for SCSI Sub-CRQ Channels Tyrel Datwyler
2020-12-04 14:47   ` Brian King
2020-12-05  0:15     ` Tyrel Datwyler
2020-12-03  2:07 ` [PATCH v3 05/18] ibmvfc: add Sub-CRQ IRQ enable/disable routine Tyrel Datwyler
2020-12-03  2:07 ` [PATCH v3 06/18] ibmvfc: add handlers to drain and complete Sub-CRQ responses Tyrel Datwyler
2020-12-04 14:51   ` Brian King
2020-12-05  0:16     ` Tyrel Datwyler
2020-12-03  2:07 ` [PATCH v3 07/18] ibmvfc: define Sub-CRQ interrupt handler routine Tyrel Datwyler
2020-12-03  2:07 ` [PATCH v3 08/18] ibmvfc: map/request irq and register Sub-CRQ interrupt handler Tyrel Datwyler
2020-12-03  2:07 ` [PATCH v3 09/18] ibmvfc: implement channel enquiry and setup commands Tyrel Datwyler
2020-12-03  2:07 ` [PATCH v3 10/18] ibmvfc: advertise client support for using hardware channels Tyrel Datwyler
2020-12-03  2:07 ` [PATCH v3 11/18] ibmvfc: set and track hw queue in ibmvfc_event struct Tyrel Datwyler
2020-12-03  2:08 ` [PATCH v3 12/18] ibmvfc: send commands down HW Sub-CRQ when channelized Tyrel Datwyler
2020-12-03  2:08 ` [PATCH v3 13/18] ibmvfc: register Sub-CRQ handles with VIOS during channel setup Tyrel Datwyler
2020-12-03  2:08 ` [PATCH v3 14/18] ibmvfc: add cancel mad initialization helper Tyrel Datwyler
2020-12-03  2:08 ` Tyrel Datwyler [this message]
2020-12-04 21:26   ` [PATCH v3 15/18] ibmvfc: send Cancel MAD down each hw scsi channel Brian King
2020-12-03  2:08 ` [PATCH v3 16/18] ibmvfc: enable MQ and set reasonable defaults Tyrel Datwyler
2020-12-03  2:08 ` [PATCH v3 17/18] ibmvfc: provide modules parameters for MQ settings Tyrel Datwyler
2020-12-04 21:28   ` Brian King
2020-12-03  2:08 ` [PATCH v3 18/18] ibmvfc: drop host lock when completing commands in CRQ Tyrel Datwyler
2020-12-04 21:35   ` Brian King
2020-12-05  0:20     ` Tyrel Datwyler

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201203020806.14747-16-tyreld@linux.ibm.com \
    --to=tyreld@linux.ibm.com \
    --cc=brking@linux.ibm.com \
    --cc=james.bottomley@hansenpartnership.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-scsi@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=martin.petersen@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).