linux-scsi.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Avri Altman <avri.altman@wdc.com>
To: "James E . J . Bottomley" <jejb@linux.vnet.ibm.com>,
	"Martin K . Petersen" <martin.petersen@oracle.com>,
	linux-scsi@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: Bart Van Assche <bvanassche@acm.org>,
	alim.akhtar@samsung.com, asutoshd@codeaurora.org,
	Zang Leigang <zangleigang@hisilicon.com>,
	Avi Shchislowski <avi.shchislowski@wdc.com>,
	Bean Huo <beanhuo@micron.com>,
	cang@codeaurora.org, stanley.chu@mediatek.com,
	MOHAMMED RAFIQ KAMAL BASHA <md.rafiq@samsung.com>,
	Sang-yoon Oh <sangyoon.oh@samsung.com>,
	yongmyung lee <ymhungry.lee@samsung.com>,
	Jinyoung CHOI <j-young.choi@samsung.com>,
	Avri Altman <avri.altman@wdc.com>
Subject: [RFC PATCH 04/13] scsi: ufs: ufshpb: Init part II - Attach scsi device
Date: Fri, 15 May 2020 13:30:05 +0300	[thread overview]
Message-ID: <1589538614-24048-5-git-send-email-avri.altman@wdc.com> (raw)
In-Reply-To: <1589538614-24048-1-git-send-email-avri.altman@wdc.com>

The ufs boot process is essentially comprised of 2 parts: first a
handshake with the device, and then, scsi scans and assign a scsi device
to each lun.  The latter, although running a-synchronically, is
happening right after reading the device configuration - lun by lun.

By now we've read the device HPB configuration, and we are ready  to
attach a scsi device to our HPB luns.  A perfect timing might be while
scsi is performing its .slave_alloc() or .slave_configure().

Signed-off-by: Avri Altman <avri.altman@wdc.com>
---
 drivers/scsi/ufs/ufshcd.c |   3 ++
 drivers/scsi/ufs/ufshpb.c | 103 ++++++++++++++++++++++++++++++++++++++++++++++
 drivers/scsi/ufs/ufshpb.h |   3 ++
 3 files changed, 109 insertions(+)

diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index bffe699..c2011bf 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4628,6 +4628,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
 
 	ufshcd_get_lu_power_on_wp_status(hba, sdev);
 
+	if (ufshcd_is_hpb_supported(hba))
+		ufshpb_attach_sdev(hba, sdev);
+
 	return 0;
 }
 
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index e94e699..4a10e7b 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -7,7 +7,9 @@
  */
 
 #include <asm/unaligned.h>
+#include <scsi/scsi_dh.h>
 #include <scsi/scsi_dh_ufshpb.h>
+#include "../scsi_priv.h"
 #include "ufshcd.h"
 #include "ufs.h"
 #include "ufshpb.h"
@@ -25,6 +27,7 @@ enum ufshpb_control_modes {
 
 struct ufshpb_lun {
 	u8 lun;
+	struct scsi_device *sdev;
 };
 
 
@@ -34,6 +37,91 @@ struct ufshpb_lun *ufshpb_luns;
 static unsigned long ufshpb_lun_map[BITS_TO_LONGS(UFSHPB_MAX_LUNS)];
 static u8 ufshpb_lun_lookup[UFSHPB_MAX_LUNS];
 
+void ufshpb_remove_lun(u8 lun)
+{
+	struct ufshpb_lun *hpb;
+
+	if (!ufshpb_luns)
+		return;
+
+	hpb = ufshpb_luns + lun;
+	if (hpb->sdev && hpb->sdev->handler_data) {
+		if (scsi_device_get(hpb->sdev))
+			return;
+
+		scsi_dh_release_device(hpb->sdev);
+		scsi_device_put(hpb->sdev);
+	}
+}
+
+/**
+ * ufshpb_attach_sdev - attach and activate a hpb device handler
+ * @hba: per adapter object
+ * @sdev: scsi device that owns that handler
+ *
+ * Called during .slave_alloc(), and after ufshpb_probe() read the device hpb
+ * configuration.
+ */
+void ufshpb_attach_sdev(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+	struct ufshpb_lun *hpb_lun;
+	struct ufshpb_dh_data {
+		struct ufshpb_config *c;
+		struct ufshpb_lun_config *lc;
+	} h;
+	int ret = -EINVAL;
+	u8 lun = sdev->lun;
+	u8 i;
+
+	/* ignore w-luns as those can't be hpb luns */
+	if (lun >= UFSHPB_MAX_LUNS)
+		return;
+
+	/* ignore non-hpb luns */
+	if (!test_bit(lun, ufshpb_lun_map))
+		return;
+
+	i = ufshpb_lun_lookup[lun];
+
+	if (sdev->handler && sdev->handler_data) {
+		dev_err(hba->dev, "trying to re-attach lun %d ?\n", lun);
+		goto out;
+	}
+
+	if (!ufshpb_luns) {
+		dev_err(hba->dev, "HPB was already removed\n");
+		goto out;
+	}
+
+	if (scsi_device_get(sdev)) {
+		dev_err(hba->dev, "failed to get sdev for lun %d\n", lun);
+		goto out;
+	}
+
+	hpb_lun = ufshpb_luns + i;
+	hpb_lun->sdev = sdev;
+
+	ret = scsi_dh_attach(sdev->request_queue, "ufshpb");
+	if (ret || !sdev->handler || !sdev->handler_data)
+		goto put_scsi_dev;
+
+	h.c = ufshpb_conf;
+	h.lc = ufshpb_luns_conf + i;
+
+	memcpy(sdev->handler_data, &h, sizeof(h));
+
+	ret = scsi_dh_activate(sdev->request_queue, NULL, NULL);
+
+put_scsi_dev:
+	scsi_device_put(sdev);
+
+out:
+	if (ret) {
+		dev_err(hba->dev, "attach sdev to HPB lun %d failed\n", lun);
+		ufshpb_remove_lun(i);
+	}
+}
+
 /**
  * ufshpb_remove - ufshpb cleanup
  *
@@ -41,9 +129,24 @@ static u8 ufshpb_lun_lookup[UFSHPB_MAX_LUNS];
  */
 void ufshpb_remove(struct ufs_hba *hba)
 {
+	if (!ufshpb_conf)
+		goto remove_hpb;
+
+	if (ufshpb_luns) {
+		unsigned int num_hpb_luns = ufshpb_conf->num_hpb_luns;
+		int i;
+
+		spin_lock(hba->host->host_lock);
+		for (i = 0; i < num_hpb_luns; i++)
+			ufshpb_remove_lun(i);
+		spin_unlock(hba->host->host_lock);
+	}
+
 	kfree(ufshpb_conf);
 	kfree(ufshpb_luns_conf);
 	kfree(ufshpb_luns);
+
+remove_hpb:
 	ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
 			  QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
 }
diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h
index ee990f4..276a749 100644
--- a/drivers/scsi/ufs/ufshpb.h
+++ b/drivers/scsi/ufs/ufshpb.h
@@ -14,9 +14,12 @@ struct ufs_hba;
 #ifdef CONFIG_SCSI_UFS_HPB
 void ufshpb_remove(struct ufs_hba *hba);
 int ufshpb_probe(struct ufs_hba *hba);
+void ufshpb_attach_sdev(struct ufs_hba *hba, struct scsi_device *sdev);
 #else
 static inline void ufshpb_remove(struct ufs_hba *hba) {}
 static inline int ufshpb_probe(struct ufs_hba *hba) { return 0; }
+static inline void
+ufshpb_attach_sdev(struct ufs_hba *hba, struct scsi_device *sdev) {}
 #endif
 
 #endif /* _UFSHPB_H */
-- 
2.7.4


  parent reply	other threads:[~2020-05-15 10:31 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-15 10:30 [RFC PATCH 00/13] scsi: ufs: Add HPB Support Avri Altman
2020-05-15 10:30 ` [RFC PATCH 01/13] scsi: ufs: Add HPB parameters Avri Altman
2020-05-15 10:30 ` [RFC PATCH 02/13] scsi: ufshpb: Init part I - Read HPB config Avri Altman
2020-05-15 15:33   ` Randy Dunlap
2020-05-16  1:46   ` Bart Van Assche
2020-05-16  1:57   ` Bart Van Assche
2020-05-15 10:30 ` [RFC PATCH 03/13] scsi: scsi_dh: Introduce scsi_dh_ufshpb Avri Altman
2020-05-16  1:48   ` Bart Van Assche
2020-05-15 10:30 ` Avri Altman [this message]
2020-05-16  1:52   ` [RFC PATCH 04/13] scsi: ufs: ufshpb: Init part II - Attach scsi device Bart Van Assche
2020-05-15 10:30 ` [RFC PATCH 05/13] scsi: ufs: ufshpb: Disable HPB if no HPB-enabled luns Avri Altman
2020-05-16  2:02   ` Bart Van Assche
2020-05-15 10:30 ` [RFC PATCH 06/13] scsi: scsi_dh: ufshpb: Prepare for L2P cache management Avri Altman
2020-05-16  2:13   ` Bart Van Assche
2020-05-15 10:30 ` [RFC PATCH 07/13] scsi: scsi_dh: ufshpb: Add ufshpb state machine Avri Altman
2020-05-16  2:44   ` Bart Van Assche
2020-05-15 10:30 ` [RFC PATCH 08/13] scsi: dh: ufshpb: Activate pinned regions Avri Altman
2020-05-15 10:30 ` [RFC PATCH 09/13] scsi: ufshpb: Add response API Avri Altman
2020-05-16  3:06   ` Bart Van Assche
2020-05-15 10:30 ` [RFC PATCH 10/13] scsi: dh: ufshpb: Add ufshpb_set_params Avri Altman
2020-05-15 10:30 ` [RFC PATCH 11/13] scsi: Allow device handler set their own CDB Avri Altman
2020-05-16  3:19   ` Bart Van Assche
2020-05-15 10:30 ` [RFC PATCH 12/13] scsi: dh: ufshpb: Add prep_fn handler Avri Altman
2020-05-16  3:40   ` Bart Van Assche
2020-05-15 10:30 ` [RFC PATCH 13/13] scsi: scsi_dh: ufshpb: Add "Cold" subregions timer Avri Altman
2020-05-16  3:50 ` [RFC PATCH 00/13] scsi: ufs: Add HPB Support Bart Van Assche
2020-05-16  9:14   ` Avri Altman
2020-05-16 17:14     ` Bart Van Assche
     [not found]     ` <CGME20200516171420epcas2p108c570904c5117c3654d71e0a2842faa@epcms2p7>
2020-05-19 22:31       ` Another approach of UFSHPB yongmyung lee
2020-05-20 17:55         ` Christoph Hellwig
2020-05-20 21:19           ` Bart Van Assche
2020-05-22 16:35             ` Bart Van Assche
2020-05-22 16:49         ` Bart Van Assche
     [not found]         ` <CGME20200516171420epcas2p108c570904c5117c3654d71e0a2842faa@epcms2p4>
2020-05-25  5:40           ` Daejun Park
2020-05-25 14:56             ` Bart Van Assche
2020-05-26  6:15               ` Avri Altman
2020-05-26 17:03                 ` Bart Van Assche
     [not found]                 ` <CGME20200516171420epcas2p108c570904c5117c3654d71e0a2842faa@epcms2p3>
2020-05-27  9:11                   ` Daejun Park
2020-05-27 11:46                     ` Bean Huo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1589538614-24048-5-git-send-email-avri.altman@wdc.com \
    --to=avri.altman@wdc.com \
    --cc=alim.akhtar@samsung.com \
    --cc=asutoshd@codeaurora.org \
    --cc=avi.shchislowski@wdc.com \
    --cc=beanhuo@micron.com \
    --cc=bvanassche@acm.org \
    --cc=cang@codeaurora.org \
    --cc=j-young.choi@samsung.com \
    --cc=jejb@linux.vnet.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-scsi@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    --cc=md.rafiq@samsung.com \
    --cc=sangyoon.oh@samsung.com \
    --cc=stanley.chu@mediatek.com \
    --cc=ymhungry.lee@samsung.com \
    --cc=zangleigang@hisilicon.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).