linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4 1/3] scsi: ufs: Protect some contexts from unexpected clock scaling
       [not found] <1607877104-8916-1-git-send-email-cang@codeaurora.org>
@ 2020-12-13 16:31 ` Can Guo
  2020-12-15  5:43   ` Stanley Chu
  2020-12-16 13:31   ` Bean Huo
  2020-12-13 16:31 ` [PATCH v4 2/3] scsi: ufs: Clean up ufshcd_exit_clk_scaling/gating() Can Guo
  2020-12-13 16:31 ` [PATCH v4 3/3] scsi: ufs: Revert "Make sure clk scaling happens only when HBA is runtime ACTIVE" Can Guo
  2 siblings, 2 replies; 9+ messages in thread
From: Can Guo @ 2020-12-13 16:31 UTC (permalink / raw)
  To: asutoshd, nguyenb, hongwus, rnayak, linux-scsi, kernel-team,
	saravanak, salyzyn, cang
  Cc: Alim Akhtar, Avri Altman, James E.J. Bottomley,
	Martin K. Petersen, Stanley Chu, Bean Huo, Bart Van Assche,
	Satya Tangirala, open list

In contexts like suspend, shutdown and error handling, we need to suspend
devfreq to make sure these contexts won't be disturbed by clock scaling.
However, suspending devfreq is not enough since users can still trigger a
clock scaling by manipulating the sysfs node clkscale_enable and devfreq
sysfs nodes like min/max_freq and governor. Add one more flag in struct
clk_scaling such that these contexts can prevent clock scaling from being
invoked through above sysfs nodes.

Signed-off-by: Can Guo <cang@codeaurora.org>
---
 drivers/scsi/ufs/ufshcd.c | 83 +++++++++++++++++++++++++++++++----------------
 drivers/scsi/ufs/ufshcd.h |  2 ++
 2 files changed, 57 insertions(+), 28 deletions(-)

diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 0c148fc..4ccdd2b 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1147,12 +1147,22 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
 	 */
 	ufshcd_scsi_block_requests(hba);
 	down_write(&hba->clk_scaling_lock);
-	if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+
+	if (!hba->clk_scaling.is_allowed)
+		ret = -EAGAIN;
+	else if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US))
 		ret = -EBUSY;
+
+	if (ret) {
 		up_write(&hba->clk_scaling_lock);
 		ufshcd_scsi_unblock_requests(hba);
+		goto out;
 	}
 
+	/* let's not get into low power until clock scaling is completed */
+	ufshcd_hold(hba, false);
+
+out:
 	return ret;
 }
 
@@ -1160,6 +1170,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
 {
 	up_write(&hba->clk_scaling_lock);
 	ufshcd_scsi_unblock_requests(hba);
+	ufshcd_release(hba);
 }
 
 /**
@@ -1175,12 +1186,9 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 {
 	int ret = 0;
 
-	/* let's not get into low power until clock scaling is completed */
-	ufshcd_hold(hba, false);
-
 	ret = ufshcd_clock_scaling_prepare(hba);
 	if (ret)
-		goto out;
+		return ret;
 
 	/* scale down the gear before scaling down clocks */
 	if (!scale_up) {
@@ -1212,8 +1220,6 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 
 out_unprepare:
 	ufshcd_clock_scaling_unprepare(hba);
-out:
-	ufshcd_release(hba);
 	return ret;
 }
 
@@ -1487,7 +1493,7 @@ static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
 {
 	struct ufs_hba *hba = dev_get_drvdata(dev);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
+	return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled);
 }
 
 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
@@ -1496,12 +1502,20 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
 	struct ufs_hba *hba = dev_get_drvdata(dev);
 	u32 value;
 	int err;
+	unsigned long flags;
+	bool update = true;
 
 	if (kstrtou32(buf, 0, &value))
 		return -EINVAL;
 
 	value = !!value;
-	if (value == hba->clk_scaling.is_allowed)
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (value == hba->clk_scaling.is_enabled)
+		update = false;
+	else
+		hba->clk_scaling.is_enabled = value;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	if (!update)
 		goto out;
 
 	pm_runtime_get_sync(hba->dev);
@@ -1510,8 +1524,6 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
 	cancel_work_sync(&hba->clk_scaling.suspend_work);
 	cancel_work_sync(&hba->clk_scaling.resume_work);
 
-	hba->clk_scaling.is_allowed = value;
-
 	if (value) {
 		ufshcd_resume_clkscaling(hba);
 	} else {
@@ -1845,8 +1857,6 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
 	snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
 		 hba->host->host_no);
 	hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
-
-	ufshcd_clkscaling_init_sysfs(hba);
 }
 
 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
@@ -1854,6 +1864,8 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
 	if (!ufshcd_is_clkscaling_supported(hba))
 		return;
 
+	if (hba->devfreq)
+		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
 	destroy_workqueue(hba->clk_scaling.workq);
 	ufshcd_devfreq_remove(hba);
 }
@@ -1918,7 +1930,7 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
 	if (!hba->clk_scaling.active_reqs++)
 		queue_resume_work = true;
 
-	if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
+	if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
 		return;
 
 	if (queue_resume_work)
@@ -4987,7 +4999,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
 				complete(hba->dev_cmd.complete);
 			}
 		}
-		if (ufshcd_is_clkscaling_supported(hba))
+		if (ufshcd_is_clkscaling_supported(hba) &&
+		    hba->clk_scaling.active_reqs > 0)
 			hba->clk_scaling.active_reqs--;
 	}
 
@@ -5650,18 +5663,25 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
 		ufshcd_vops_resume(hba, UFS_RUNTIME_PM);
 	} else {
 		ufshcd_hold(hba, false);
-		if (hba->clk_scaling.is_allowed) {
+		if (hba->clk_scaling.is_enabled) {
 			cancel_work_sync(&hba->clk_scaling.suspend_work);
 			cancel_work_sync(&hba->clk_scaling.resume_work);
 			ufshcd_suspend_clkscaling(hba);
 		}
 	}
+	down_write(&hba->clk_scaling_lock);
+	hba->clk_scaling.is_allowed = false;
+	up_write(&hba->clk_scaling_lock);
 }
 
 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
 {
 	ufshcd_release(hba);
-	if (hba->clk_scaling.is_allowed)
+
+	down_write(&hba->clk_scaling_lock);
+	hba->clk_scaling.is_allowed = true;
+	up_write(&hba->clk_scaling_lock);
+	if (hba->clk_scaling.is_enabled)
 		ufshcd_resume_clkscaling(hba);
 	pm_runtime_put(hba->dev);
 }
@@ -7620,12 +7640,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
 			sizeof(struct ufs_pa_layer_attr));
 		hba->clk_scaling.saved_pwr_info.is_valid = true;
 		if (!hba->devfreq) {
+			hba->clk_scaling.is_allowed = true;
 			ret = ufshcd_devfreq_init(hba);
 			if (ret)
 				goto out;
-		}
 
-		hba->clk_scaling.is_allowed = true;
+			hba->clk_scaling.is_enabled = true;
+			ufshcd_clkscaling_init_sysfs(hba);
+		}
 	}
 
 	ufs_bsg_probe(hba);
@@ -8491,11 +8513,14 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	ufshcd_hold(hba, false);
 	hba->clk_gating.is_suspended = true;
 
-	if (hba->clk_scaling.is_allowed) {
+	if (hba->clk_scaling.is_enabled) {
 		cancel_work_sync(&hba->clk_scaling.suspend_work);
 		cancel_work_sync(&hba->clk_scaling.resume_work);
 		ufshcd_suspend_clkscaling(hba);
 	}
+	down_write(&hba->clk_scaling_lock);
+	hba->clk_scaling.is_allowed = false;
+	up_write(&hba->clk_scaling_lock);
 
 	if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
 			req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -8592,8 +8617,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	goto out;
 
 set_link_active:
-	if (hba->clk_scaling.is_allowed)
-		ufshcd_resume_clkscaling(hba);
 	ufshcd_vreg_set_hpm(hba);
 	if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
 		ufshcd_set_link_active(hba);
@@ -8603,7 +8626,10 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
 		ufshcd_disable_auto_bkops(hba);
 enable_gating:
-	if (hba->clk_scaling.is_allowed)
+	down_write(&hba->clk_scaling_lock);
+	hba->clk_scaling.is_allowed = true;
+	up_write(&hba->clk_scaling_lock);
+	if (hba->clk_scaling.is_enabled)
 		ufshcd_resume_clkscaling(hba);
 	hba->clk_gating.is_suspended = false;
 	hba->dev_info.b_rpm_dev_flush_capable = false;
@@ -8701,7 +8727,10 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 
 	hba->clk_gating.is_suspended = false;
 
-	if (hba->clk_scaling.is_allowed)
+	down_write(&hba->clk_scaling_lock);
+	hba->clk_scaling.is_allowed = true;
+	up_write(&hba->clk_scaling_lock);
+	if (hba->clk_scaling.is_enabled)
 		ufshcd_resume_clkscaling(hba);
 
 	/* Enable Auto-Hibernate if configured */
@@ -8725,8 +8754,6 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	ufshcd_vreg_set_lpm(hba);
 disable_irq_and_vops_clks:
 	ufshcd_disable_irq(hba);
-	if (hba->clk_scaling.is_allowed)
-		ufshcd_suspend_clkscaling(hba);
 	ufshcd_setup_clocks(hba, false);
 	if (ufshcd_is_clkgating_allowed(hba)) {
 		hba->clk_gating.state = CLKS_OFF;
@@ -8915,6 +8942,8 @@ int ufshcd_shutdown(struct ufs_hba *hba)
 
 	pm_runtime_get_sync(hba->dev);
 
+	ufshcd_exit_clk_scaling(hba);
+
 	ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
 out:
 	if (ret)
@@ -8944,8 +8973,6 @@ void ufshcd_remove(struct ufs_hba *hba)
 
 	ufshcd_exit_clk_scaling(hba);
 	ufshcd_exit_clk_gating(hba);
-	if (ufshcd_is_clkscaling_supported(hba))
-		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
 	ufshcd_hba_exit(hba);
 }
 EXPORT_SYMBOL_GPL(ufshcd_remove);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index e0f00a4..9fcecba 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -382,6 +382,7 @@ struct ufs_saved_pwr_info {
  * @workq: workqueue to schedule devfreq suspend/resume work
  * @suspend_work: worker to suspend devfreq
  * @resume_work: worker to resume devfreq
+ * @is_enabled: tracks if scaling is currently enabled or not
  * @is_allowed: tracks if scaling is currently allowed or not
  * @is_busy_started: tracks if busy period has started or not
  * @is_suspended: tracks if devfreq is suspended or not
@@ -396,6 +397,7 @@ struct ufs_clk_scaling {
 	struct workqueue_struct *workq;
 	struct work_struct suspend_work;
 	struct work_struct resume_work;
+	bool is_enabled;
 	bool is_allowed;
 	bool is_busy_started;
 	bool is_suspended;
-- 
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, a Linux Foundation Collaborative Project.


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v4 2/3] scsi: ufs: Clean up ufshcd_exit_clk_scaling/gating()
       [not found] <1607877104-8916-1-git-send-email-cang@codeaurora.org>
  2020-12-13 16:31 ` [PATCH v4 1/3] scsi: ufs: Protect some contexts from unexpected clock scaling Can Guo
@ 2020-12-13 16:31 ` Can Guo
  2020-12-15  5:55   ` Stanley Chu
  2020-12-16 14:18   ` Bean Huo
  2020-12-13 16:31 ` [PATCH v4 3/3] scsi: ufs: Revert "Make sure clk scaling happens only when HBA is runtime ACTIVE" Can Guo
  2 siblings, 2 replies; 9+ messages in thread
From: Can Guo @ 2020-12-13 16:31 UTC (permalink / raw)
  To: asutoshd, nguyenb, hongwus, rnayak, linux-scsi, kernel-team,
	saravanak, salyzyn, cang
  Cc: Alim Akhtar, Avri Altman, James E.J. Bottomley,
	Martin K. Petersen, Stanley Chu, Bean Huo, Bart Van Assche,
	Satya Tangirala, open list

ufshcd_hba_exit() is always called after ufshcd_exit_clk_scaling() and
ufshcd_exit_clk_gating(), so move ufshcd_exit_clk_scaling/gating() to
ufshcd_hba_exit().

Signed-off-by: Can Guo <cang@codeaurora.org>
---
 drivers/scsi/ufs/ufshcd.c | 32 +++++++++++++++-----------------
 drivers/scsi/ufs/ufshcd.h |  4 ++++
 2 files changed, 19 insertions(+), 17 deletions(-)

diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 4ccdd2b..80f82cb 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1857,11 +1857,14 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
 	snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
 		 hba->host->host_no);
 	hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+	hba->clk_scaling.is_initialized = true;
 }
 
 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
 {
-	if (!ufshcd_is_clkscaling_supported(hba))
+	if (!ufshcd_is_clkscaling_supported(hba) ||
+	    !hba->clk_scaling.is_initialized)
 		return;
 
 	if (hba->devfreq)
@@ -1905,12 +1908,16 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 	hba->clk_gating.enable_attr.attr.mode = 0644;
 	if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
 		dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
+
+	hba->clk_gating.is_initialized = true;
 }
 
 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
 {
-	if (!ufshcd_is_clkgating_allowed(hba))
+	if (!ufshcd_is_clkgating_allowed(hba) ||
+	    !hba->clk_gating.is_initialized)
 		return;
+
 	device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
 	device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
 	cancel_work_sync(&hba->clk_gating.ungate_work);
@@ -7775,7 +7782,6 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
 	 */
 	if (ret) {
 		pm_runtime_put_sync(hba->dev);
-		ufshcd_exit_clk_scaling(hba);
 		ufshcd_hba_exit(hba);
 	}
 }
@@ -8212,12 +8218,12 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
 static void ufshcd_hba_exit(struct ufs_hba *hba)
 {
 	if (hba->is_powered) {
+		ufshcd_exit_clk_scaling(hba);
+		ufshcd_exit_clk_gating(hba);
+		if (hba->eh_wq)
+			destroy_workqueue(hba->eh_wq);
 		ufshcd_variant_hba_exit(hba);
 		ufshcd_setup_vreg(hba, false);
-		ufshcd_suspend_clkscaling(hba);
-		if (ufshcd_is_clkscaling_supported(hba))
-			if (hba->devfreq)
-				ufshcd_suspend_clkscaling(hba);
 		ufshcd_setup_clocks(hba, false);
 		ufshcd_setup_hba_vreg(hba, false);
 		hba->is_powered = false;
@@ -8966,13 +8972,9 @@ void ufshcd_remove(struct ufs_hba *hba)
 	blk_mq_free_tag_set(&hba->tmf_tag_set);
 	blk_cleanup_queue(hba->cmd_queue);
 	scsi_remove_host(hba->host);
-	destroy_workqueue(hba->eh_wq);
 	/* disable interrupts */
 	ufshcd_disable_intr(hba, hba->intr_mask);
 	ufshcd_hba_stop(hba);
-
-	ufshcd_exit_clk_scaling(hba);
-	ufshcd_exit_clk_gating(hba);
 	ufshcd_hba_exit(hba);
 }
 EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -9171,7 +9173,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 	err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
 	if (err) {
 		dev_err(hba->dev, "request irq failed\n");
-		goto exit_gating;
+		goto out_disable;
 	} else {
 		hba->is_irq_enabled = true;
 	}
@@ -9179,7 +9181,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 	err = scsi_add_host(host, hba->dev);
 	if (err) {
 		dev_err(hba->dev, "scsi_add_host failed\n");
-		goto exit_gating;
+		goto out_disable;
 	}
 
 	hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
@@ -9262,10 +9264,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 	blk_cleanup_queue(hba->cmd_queue);
 out_remove_scsi_host:
 	scsi_remove_host(hba->host);
-exit_gating:
-	ufshcd_exit_clk_scaling(hba);
-	ufshcd_exit_clk_gating(hba);
-	destroy_workqueue(hba->eh_wq);
 out_disable:
 	hba->is_irq_enabled = false;
 	ufshcd_hba_exit(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 9fcecba..3c57847 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -347,6 +347,7 @@ enum clk_gating_state {
  * @delay_attr: sysfs attribute to control delay_attr
  * @enable_attr: sysfs attribute to enable/disable clock gating
  * @is_enabled: Indicates the current status of clock gating
+ * @is_initialized: Indicates whether clock gating is initialized or not
  * @active_reqs: number of requests that are pending and should be waited for
  * completion before gating clocks.
  */
@@ -359,6 +360,7 @@ struct ufs_clk_gating {
 	struct device_attribute delay_attr;
 	struct device_attribute enable_attr;
 	bool is_enabled;
+	bool is_initialized;
 	int active_reqs;
 	struct workqueue_struct *clk_gating_workq;
 };
@@ -384,6 +386,7 @@ struct ufs_saved_pwr_info {
  * @resume_work: worker to resume devfreq
  * @is_enabled: tracks if scaling is currently enabled or not
  * @is_allowed: tracks if scaling is currently allowed or not
+ * @is_initialized: Indicates whether clock scaling is initialized or not
  * @is_busy_started: tracks if busy period has started or not
  * @is_suspended: tracks if devfreq is suspended or not
  */
@@ -399,6 +402,7 @@ struct ufs_clk_scaling {
 	struct work_struct resume_work;
 	bool is_enabled;
 	bool is_allowed;
+	bool is_initialized;
 	bool is_busy_started;
 	bool is_suspended;
 };
-- 
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, a Linux Foundation Collaborative Project.


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v4 3/3] scsi: ufs: Revert "Make sure clk scaling happens only when HBA is runtime ACTIVE"
       [not found] <1607877104-8916-1-git-send-email-cang@codeaurora.org>
  2020-12-13 16:31 ` [PATCH v4 1/3] scsi: ufs: Protect some contexts from unexpected clock scaling Can Guo
  2020-12-13 16:31 ` [PATCH v4 2/3] scsi: ufs: Clean up ufshcd_exit_clk_scaling/gating() Can Guo
@ 2020-12-13 16:31 ` Can Guo
  2020-12-15  6:15   ` Stanley Chu
  2 siblings, 1 reply; 9+ messages in thread
From: Can Guo @ 2020-12-13 16:31 UTC (permalink / raw)
  To: asutoshd, nguyenb, hongwus, rnayak, linux-scsi, kernel-team,
	saravanak, salyzyn, cang
  Cc: Alim Akhtar, Avri Altman, James E.J. Bottomley,
	Martin K. Petersen, Stanley Chu, Bean Huo, Bart Van Assche,
	open list

Commit 73cc291c27024 ("Make sure clk scaling happens only when HBA is
runtime ACTIVE") is no longer needed since commit f7a42540928a8 ("scsi:
ufs: Protect some contexts from unexpected clock scaling") is a more
mature fix to protect UFS LLD stability from clock scaling invoked through
sysfs nodes by users.

Signed-off-by: Can Guo <cang@codeaurora.org>
---
 drivers/scsi/ufs/ufshcd.c | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 80f82cb..052479a 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1300,15 +1300,8 @@ static int ufshcd_devfreq_target(struct device *dev,
 	}
 	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 
-	pm_runtime_get_noresume(hba->dev);
-	if (!pm_runtime_active(hba->dev)) {
-		pm_runtime_put_noidle(hba->dev);
-		ret = -EAGAIN;
-		goto out;
-	}
 	start = ktime_get();
 	ret = ufshcd_devfreq_scale(hba, scale_up);
-	pm_runtime_put(hba->dev);
 
 	trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
 		(scale_up ? "up" : "down"),
-- 
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, a Linux Foundation Collaborative Project.


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 1/3] scsi: ufs: Protect some contexts from unexpected clock scaling
  2020-12-13 16:31 ` [PATCH v4 1/3] scsi: ufs: Protect some contexts from unexpected clock scaling Can Guo
@ 2020-12-15  5:43   ` Stanley Chu
  2020-12-18  6:26     ` Can Guo
  2020-12-16 13:31   ` Bean Huo
  1 sibling, 1 reply; 9+ messages in thread
From: Stanley Chu @ 2020-12-15  5:43 UTC (permalink / raw)
  To: Can Guo
  Cc: asutoshd, nguyenb, hongwus, rnayak, linux-scsi, kernel-team,
	saravanak, salyzyn, Alim Akhtar, Avri Altman,
	James E.J. Bottomley, Martin K. Petersen, Bean Huo,
	Bart Van Assche, Satya Tangirala, open list

Hi Can,

On Sun, 2020-12-13 at 08:31 -0800, Can Guo wrote:
> In contexts like suspend, shutdown and error handling, we need to suspend
> devfreq to make sure these contexts won't be disturbed by clock scaling.
> However, suspending devfreq is not enough since users can still trigger a
> clock scaling by manipulating the sysfs node clkscale_enable and devfreq
> sysfs nodes like min/max_freq and governor. Add one more flag in struct
> clk_scaling such that these contexts can prevent clock scaling from being
> invoked through above sysfs nodes.
> 
> Signed-off-by: Can Guo <cang@codeaurora.org>
> ---
>  drivers/scsi/ufs/ufshcd.c | 83 +++++++++++++++++++++++++++++++----------------
>  drivers/scsi/ufs/ufshcd.h |  2 ++
>  2 files changed, 57 insertions(+), 28 deletions(-)
> 
> diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
> index 0c148fc..4ccdd2b 100644
> --- a/drivers/scsi/ufs/ufshcd.c
> +++ b/drivers/scsi/ufs/ufshcd.c
> @@ -1147,12 +1147,22 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
>  	 */
>  	ufshcd_scsi_block_requests(hba);
>  	down_write(&hba->clk_scaling_lock);
> -	if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
> +
> +	if (!hba->clk_scaling.is_allowed)
> +		ret = -EAGAIN;
> +	else if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US))
>  		ret = -EBUSY;
> +
> +	if (ret) {
>  		up_write(&hba->clk_scaling_lock);
>  		ufshcd_scsi_unblock_requests(hba);
> +		goto out;
>  	}
>  
> +	/* let's not get into low power until clock scaling is completed */
> +	ufshcd_hold(hba, false);
> +
> +out:
>  	return ret;
>  }
>  
> @@ -1160,6 +1170,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
>  {
>  	up_write(&hba->clk_scaling_lock);
>  	ufshcd_scsi_unblock_requests(hba);
> +	ufshcd_release(hba);
>  }
>  
>  /**
> @@ -1175,12 +1186,9 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
>  {
>  	int ret = 0;
>  
> -	/* let's not get into low power until clock scaling is completed */
> -	ufshcd_hold(hba, false);
> -
>  	ret = ufshcd_clock_scaling_prepare(hba);
>  	if (ret)
> -		goto out;
> +		return ret;
>  
>  	/* scale down the gear before scaling down clocks */
>  	if (!scale_up) {
> @@ -1212,8 +1220,6 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
>  
>  out_unprepare:
>  	ufshcd_clock_scaling_unprepare(hba);
> -out:
> -	ufshcd_release(hba);
>  	return ret;
>  }
>  
> @@ -1487,7 +1493,7 @@ static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
>  {
>  	struct ufs_hba *hba = dev_get_drvdata(dev);
>  
> -	return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
> +	return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled);
>  }
>  
>  static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
> @@ -1496,12 +1502,20 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
>  	struct ufs_hba *hba = dev_get_drvdata(dev);
>  	u32 value;
>  	int err;
> +	unsigned long flags;
> +	bool update = true;
>  
>  	if (kstrtou32(buf, 0, &value))
>  		return -EINVAL;
>  
>  	value = !!value;
> -	if (value == hba->clk_scaling.is_allowed)
> +	spin_lock_irqsave(hba->host->host_lock, flags);
> +	if (value == hba->clk_scaling.is_enabled)
> +		update = false;
> +	else
> +		hba->clk_scaling.is_enabled = value;
> +	spin_unlock_irqrestore(hba->host->host_lock, flags);
> +	if (!update)
>  		goto out;
>  
>  	pm_runtime_get_sync(hba->dev);
> @@ -1510,8 +1524,6 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
>  	cancel_work_sync(&hba->clk_scaling.suspend_work);
>  	cancel_work_sync(&hba->clk_scaling.resume_work);
>  
> -	hba->clk_scaling.is_allowed = value;
> -
>  	if (value) {
>  		ufshcd_resume_clkscaling(hba);
>  	} else {
> @@ -1845,8 +1857,6 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
>  	snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
>  		 hba->host->host_no);
>  	hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
> -
> -	ufshcd_clkscaling_init_sysfs(hba);
>  }
>  
>  static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
> @@ -1854,6 +1864,8 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
>  	if (!ufshcd_is_clkscaling_supported(hba))
>  		return;
>  
> +	if (hba->devfreq)
> +		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
>  	destroy_workqueue(hba->clk_scaling.workq);
>  	ufshcd_devfreq_remove(hba);
>  }
> @@ -1918,7 +1930,7 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
>  	if (!hba->clk_scaling.active_reqs++)
>  		queue_resume_work = true;
>  
> -	if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
> +	if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
>  		return;
>  
>  	if (queue_resume_work)
> @@ -4987,7 +4999,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
>  				complete(hba->dev_cmd.complete);
>  			}
>  		}
> -		if (ufshcd_is_clkscaling_supported(hba))
> +		if (ufshcd_is_clkscaling_supported(hba) &&
> +		    hba->clk_scaling.active_reqs > 0)
>  			hba->clk_scaling.active_reqs--;
>  	}
>  
> @@ -5650,18 +5663,25 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
>  		ufshcd_vops_resume(hba, UFS_RUNTIME_PM);
>  	} else {
>  		ufshcd_hold(hba, false);
> -		if (hba->clk_scaling.is_allowed) {
> +		if (hba->clk_scaling.is_enabled) {
>  			cancel_work_sync(&hba->clk_scaling.suspend_work);
>  			cancel_work_sync(&hba->clk_scaling.resume_work);
>  			ufshcd_suspend_clkscaling(hba);
>  		}
>  	}
> +	down_write(&hba->clk_scaling_lock);
> +	hba->clk_scaling.is_allowed = false;
> +	up_write(&hba->clk_scaling_lock);
>  }
>  
>  static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
>  {
>  	ufshcd_release(hba);
> -	if (hba->clk_scaling.is_allowed)
> +
> +	down_write(&hba->clk_scaling_lock);
> +	hba->clk_scaling.is_allowed = true;
> +	up_write(&hba->clk_scaling_lock);
> +	if (hba->clk_scaling.is_enabled)
>  		ufshcd_resume_clkscaling(hba);
>  	pm_runtime_put(hba->dev);
>  }
> @@ -7620,12 +7640,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
>  			sizeof(struct ufs_pa_layer_attr));
>  		hba->clk_scaling.saved_pwr_info.is_valid = true;
>  		if (!hba->devfreq) {
> +			hba->clk_scaling.is_allowed = true;

Perhaps moving this line after ufshcd_devfreq_init() is successful?

>  			ret = ufshcd_devfreq_init(hba);
>  			if (ret)
>  				goto out;
> -		}
>  
> -		hba->clk_scaling.is_allowed = true;
> +			hba->clk_scaling.is_enabled = true;
> +			ufshcd_clkscaling_init_sysfs(hba);
> +		}
>  	}
>  
>  	ufs_bsg_probe(hba);
> @@ -8491,11 +8513,14 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
>  	ufshcd_hold(hba, false);
>  	hba->clk_gating.is_suspended = true;
>  
> -	if (hba->clk_scaling.is_allowed) {
> +	if (hba->clk_scaling.is_enabled) {
>  		cancel_work_sync(&hba->clk_scaling.suspend_work);
>  		cancel_work_sync(&hba->clk_scaling.resume_work);
>  		ufshcd_suspend_clkscaling(hba);
>  	}
> +	down_write(&hba->clk_scaling_lock);
> +	hba->clk_scaling.is_allowed = false;
> +	up_write(&hba->clk_scaling_lock);
>  
>  	if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
>  			req_link_state == UIC_LINK_ACTIVE_STATE) {
> @@ -8592,8 +8617,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
>  	goto out;
>  
>  set_link_active:
> -	if (hba->clk_scaling.is_allowed)
> -		ufshcd_resume_clkscaling(hba);
>  	ufshcd_vreg_set_hpm(hba);
>  	if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
>  		ufshcd_set_link_active(hba);
> @@ -8603,7 +8626,10 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
>  	if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
>  		ufshcd_disable_auto_bkops(hba);
>  enable_gating:
> -	if (hba->clk_scaling.is_allowed)
> +	down_write(&hba->clk_scaling_lock);
> +	hba->clk_scaling.is_allowed = true;
> +	up_write(&hba->clk_scaling_lock);
> +	if (hba->clk_scaling.is_enabled)
>  		ufshcd_resume_clkscaling(hba);
>  	hba->clk_gating.is_suspended = false;
>  	hba->dev_info.b_rpm_dev_flush_capable = false;
> @@ -8701,7 +8727,10 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
>  
>  	hba->clk_gating.is_suspended = false;
>  
> -	if (hba->clk_scaling.is_allowed)
> +	down_write(&hba->clk_scaling_lock);
> +	hba->clk_scaling.is_allowed = true;
> +	up_write(&hba->clk_scaling_lock);
> +	if (hba->clk_scaling.is_enabled)
>  		ufshcd_resume_clkscaling(hba);
>  
>  	/* Enable Auto-Hibernate if configured */
> @@ -8725,8 +8754,6 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
>  	ufshcd_vreg_set_lpm(hba);
>  disable_irq_and_vops_clks:
>  	ufshcd_disable_irq(hba);
> -	if (hba->clk_scaling.is_allowed)
> -		ufshcd_suspend_clkscaling(hba);
>  	ufshcd_setup_clocks(hba, false);
>  	if (ufshcd_is_clkgating_allowed(hba)) {
>  		hba->clk_gating.state = CLKS_OFF;
> @@ -8915,6 +8942,8 @@ int ufshcd_shutdown(struct ufs_hba *hba)
>  
>  	pm_runtime_get_sync(hba->dev);
>  
> +	ufshcd_exit_clk_scaling(hba);
> +
>  	ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
>  out:
>  	if (ret)
> @@ -8944,8 +8973,6 @@ void ufshcd_remove(struct ufs_hba *hba)
>  
>  	ufshcd_exit_clk_scaling(hba);
>  	ufshcd_exit_clk_gating(hba);
> -	if (ufshcd_is_clkscaling_supported(hba))
> -		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
>  	ufshcd_hba_exit(hba);
>  }
>  EXPORT_SYMBOL_GPL(ufshcd_remove);
> diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
> index e0f00a4..9fcecba 100644
> --- a/drivers/scsi/ufs/ufshcd.h
> +++ b/drivers/scsi/ufs/ufshcd.h
> @@ -382,6 +382,7 @@ struct ufs_saved_pwr_info {
>   * @workq: workqueue to schedule devfreq suspend/resume work
>   * @suspend_work: worker to suspend devfreq
>   * @resume_work: worker to resume devfreq
> + * @is_enabled: tracks if scaling is currently enabled or not
>   * @is_allowed: tracks if scaling is currently allowed or not
>   * @is_busy_started: tracks if busy period has started or not
>   * @is_suspended: tracks if devfreq is suspended or not
> @@ -396,6 +397,7 @@ struct ufs_clk_scaling {
>  	struct workqueue_struct *workq;
>  	struct work_struct suspend_work;
>  	struct work_struct resume_work;
> +	bool is_enabled;
>  	bool is_allowed;
>  	bool is_busy_started;
>  	bool is_suspended;

Now there are more and more "similar boolean attributes" regarding
clk-scaling control, maybe add more comprehensive comments to describe
them?

Otherwise this patch looks good to me.

Feel free to add
Reviewed-by: Stanley Chu <stanley.chu@mediatek.com>




^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 2/3] scsi: ufs: Clean up ufshcd_exit_clk_scaling/gating()
  2020-12-13 16:31 ` [PATCH v4 2/3] scsi: ufs: Clean up ufshcd_exit_clk_scaling/gating() Can Guo
@ 2020-12-15  5:55   ` Stanley Chu
  2020-12-16 14:18   ` Bean Huo
  1 sibling, 0 replies; 9+ messages in thread
From: Stanley Chu @ 2020-12-15  5:55 UTC (permalink / raw)
  To: Can Guo
  Cc: asutoshd, nguyenb, hongwus, rnayak, linux-scsi, kernel-team,
	saravanak, salyzyn, Alim Akhtar, Avri Altman,
	James E.J. Bottomley, Martin K. Petersen, Bean Huo,
	Bart Van Assche, Satya Tangirala, open list

On Sun, 2020-12-13 at 08:31 -0800, Can Guo wrote:
> ufshcd_hba_exit() is always called after ufshcd_exit_clk_scaling() and
> ufshcd_exit_clk_gating(), so move ufshcd_exit_clk_scaling/gating() to
> ufshcd_hba_exit().
> 
> Signed-off-by: Can Guo <cang@codeaurora.org>

Reviewed-by: Stanley Chu <stanley.chu@mediatek.com>



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 3/3] scsi: ufs: Revert "Make sure clk scaling happens only when HBA is runtime ACTIVE"
  2020-12-13 16:31 ` [PATCH v4 3/3] scsi: ufs: Revert "Make sure clk scaling happens only when HBA is runtime ACTIVE" Can Guo
@ 2020-12-15  6:15   ` Stanley Chu
  0 siblings, 0 replies; 9+ messages in thread
From: Stanley Chu @ 2020-12-15  6:15 UTC (permalink / raw)
  To: Can Guo
  Cc: asutoshd, nguyenb, hongwus, rnayak, linux-scsi, kernel-team,
	saravanak, salyzyn, Alim Akhtar, Avri Altman,
	James E.J. Bottomley, Martin K. Petersen, Bean Huo,
	Bart Van Assche, open list

On Sun, 2020-12-13 at 08:31 -0800, Can Guo wrote:
> Commit 73cc291c27024 ("Make sure clk scaling happens only when HBA is
> runtime ACTIVE") is no longer needed since commit f7a42540928a8 ("scsi:
> ufs: Protect some contexts from unexpected clock scaling") is a more
> mature fix to protect UFS LLD stability from clock scaling invoked through
> sysfs nodes by users.
> 
> Signed-off-by: Can Guo <cang@codeaurora.org>

Reviewed-by: Stanley Chu <stanley.chu@mediatek.com>


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 1/3] scsi: ufs: Protect some contexts from unexpected clock scaling
  2020-12-13 16:31 ` [PATCH v4 1/3] scsi: ufs: Protect some contexts from unexpected clock scaling Can Guo
  2020-12-15  5:43   ` Stanley Chu
@ 2020-12-16 13:31   ` Bean Huo
  1 sibling, 0 replies; 9+ messages in thread
From: Bean Huo @ 2020-12-16 13:31 UTC (permalink / raw)
  To: Can Guo, asutoshd, nguyenb, hongwus, rnayak, linux-scsi,
	kernel-team, saravanak, salyzyn
  Cc: Alim Akhtar, Avri Altman, James E.J. Bottomley,
	Martin K. Petersen, Stanley Chu, Bean Huo, Bart Van Assche,
	Satya Tangirala, open list

On Sun, 2020-12-13 at 08:31 -0800, Can Guo wrote:
> In contexts like suspend, shutdown and error handling, we need to
> suspend
> devfreq to make sure these contexts won't be disturbed by clock
> scaling.
> However, suspending devfreq is not enough since users can still
> trigger a
> clock scaling by manipulating the sysfs node clkscale_enable and
> devfreq
> sysfs nodes like min/max_freq and governor. Add one more flag in
> struct
> clk_scaling such that these contexts can prevent clock scaling from
> being
> invoked through above sysfs nodes.
> 
> Signed-off-by: Can Guo <cang@codeaurora.org>
looks good to me.

Reviewed-by: Bean Huo <beanhuo@micron.com>


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 2/3] scsi: ufs: Clean up ufshcd_exit_clk_scaling/gating()
  2020-12-13 16:31 ` [PATCH v4 2/3] scsi: ufs: Clean up ufshcd_exit_clk_scaling/gating() Can Guo
  2020-12-15  5:55   ` Stanley Chu
@ 2020-12-16 14:18   ` Bean Huo
  1 sibling, 0 replies; 9+ messages in thread
From: Bean Huo @ 2020-12-16 14:18 UTC (permalink / raw)
  To: Can Guo, asutoshd, nguyenb, hongwus, rnayak, linux-scsi,
	kernel-team, saravanak, salyzyn
  Cc: Alim Akhtar, Avri Altman, James E.J. Bottomley,
	Martin K. Petersen, Stanley Chu, Bean Huo, Bart Van Assche,
	Satya Tangirala, open list

On Sun, 2020-12-13 at 08:31 -0800, Can Guo wrote:
> ufshcd_hba_exit() is always called after ufshcd_exit_clk_scaling()
> and
> ufshcd_exit_clk_gating(), so move ufshcd_exit_clk_scaling/gating() to
> ufshcd_hba_exit().
> 
> Signed-off-by: Can Guo <cang@codeaurora.org>
Reviewed-by: Bean Huo <beanhuo@micron.com>


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 1/3] scsi: ufs: Protect some contexts from unexpected clock scaling
  2020-12-15  5:43   ` Stanley Chu
@ 2020-12-18  6:26     ` Can Guo
  0 siblings, 0 replies; 9+ messages in thread
From: Can Guo @ 2020-12-18  6:26 UTC (permalink / raw)
  To: Stanley Chu
  Cc: asutoshd, nguyenb, hongwus, rnayak, linux-scsi, kernel-team,
	saravanak, salyzyn, Alim Akhtar, Avri Altman,
	James E.J. Bottomley, Martin K. Petersen, Bean Huo,
	Bart Van Assche, Satya Tangirala, open list

On 2020-12-15 13:43, Stanley Chu wrote:
> Hi Can,
> 
> On Sun, 2020-12-13 at 08:31 -0800, Can Guo wrote:
>> In contexts like suspend, shutdown and error handling, we need to 
>> suspend
>> devfreq to make sure these contexts won't be disturbed by clock 
>> scaling.
>> However, suspending devfreq is not enough since users can still 
>> trigger a
>> clock scaling by manipulating the sysfs node clkscale_enable and 
>> devfreq
>> sysfs nodes like min/max_freq and governor. Add one more flag in 
>> struct
>> clk_scaling such that these contexts can prevent clock scaling from 
>> being
>> invoked through above sysfs nodes.
>> 
>> Signed-off-by: Can Guo <cang@codeaurora.org>
>> ---
>>  drivers/scsi/ufs/ufshcd.c | 83 
>> +++++++++++++++++++++++++++++++----------------
>>  drivers/scsi/ufs/ufshcd.h |  2 ++
>>  2 files changed, 57 insertions(+), 28 deletions(-)
>> 
>> diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
>> index 0c148fc..4ccdd2b 100644
>> --- a/drivers/scsi/ufs/ufshcd.c
>> +++ b/drivers/scsi/ufs/ufshcd.c
>> @@ -1147,12 +1147,22 @@ static int ufshcd_clock_scaling_prepare(struct 
>> ufs_hba *hba)
>>  	 */
>>  	ufshcd_scsi_block_requests(hba);
>>  	down_write(&hba->clk_scaling_lock);
>> -	if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
>> +
>> +	if (!hba->clk_scaling.is_allowed)
>> +		ret = -EAGAIN;
>> +	else if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US))
>>  		ret = -EBUSY;
>> +
>> +	if (ret) {
>>  		up_write(&hba->clk_scaling_lock);
>>  		ufshcd_scsi_unblock_requests(hba);
>> +		goto out;
>>  	}
>> 
>> +	/* let's not get into low power until clock scaling is completed */
>> +	ufshcd_hold(hba, false);
>> +
>> +out:
>>  	return ret;
>>  }
>> 
>> @@ -1160,6 +1170,7 @@ static void 
>> ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
>>  {
>>  	up_write(&hba->clk_scaling_lock);
>>  	ufshcd_scsi_unblock_requests(hba);
>> +	ufshcd_release(hba);
>>  }
>> 
>>  /**
>> @@ -1175,12 +1186,9 @@ static int ufshcd_devfreq_scale(struct ufs_hba 
>> *hba, bool scale_up)
>>  {
>>  	int ret = 0;
>> 
>> -	/* let's not get into low power until clock scaling is completed */
>> -	ufshcd_hold(hba, false);
>> -
>>  	ret = ufshcd_clock_scaling_prepare(hba);
>>  	if (ret)
>> -		goto out;
>> +		return ret;
>> 
>>  	/* scale down the gear before scaling down clocks */
>>  	if (!scale_up) {
>> @@ -1212,8 +1220,6 @@ static int ufshcd_devfreq_scale(struct ufs_hba 
>> *hba, bool scale_up)
>> 
>>  out_unprepare:
>>  	ufshcd_clock_scaling_unprepare(hba);
>> -out:
>> -	ufshcd_release(hba);
>>  	return ret;
>>  }
>> 
>> @@ -1487,7 +1493,7 @@ static ssize_t 
>> ufshcd_clkscale_enable_show(struct device *dev,
>>  {
>>  	struct ufs_hba *hba = dev_get_drvdata(dev);
>> 
>> -	return snprintf(buf, PAGE_SIZE, "%d\n", 
>> hba->clk_scaling.is_allowed);
>> +	return snprintf(buf, PAGE_SIZE, "%d\n", 
>> hba->clk_scaling.is_enabled);
>>  }
>> 
>>  static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
>> @@ -1496,12 +1502,20 @@ static ssize_t 
>> ufshcd_clkscale_enable_store(struct device *dev,
>>  	struct ufs_hba *hba = dev_get_drvdata(dev);
>>  	u32 value;
>>  	int err;
>> +	unsigned long flags;
>> +	bool update = true;
>> 
>>  	if (kstrtou32(buf, 0, &value))
>>  		return -EINVAL;
>> 
>>  	value = !!value;
>> -	if (value == hba->clk_scaling.is_allowed)
>> +	spin_lock_irqsave(hba->host->host_lock, flags);
>> +	if (value == hba->clk_scaling.is_enabled)
>> +		update = false;
>> +	else
>> +		hba->clk_scaling.is_enabled = value;
>> +	spin_unlock_irqrestore(hba->host->host_lock, flags);
>> +	if (!update)
>>  		goto out;
>> 
>>  	pm_runtime_get_sync(hba->dev);
>> @@ -1510,8 +1524,6 @@ static ssize_t 
>> ufshcd_clkscale_enable_store(struct device *dev,
>>  	cancel_work_sync(&hba->clk_scaling.suspend_work);
>>  	cancel_work_sync(&hba->clk_scaling.resume_work);
>> 
>> -	hba->clk_scaling.is_allowed = value;
>> -
>>  	if (value) {
>>  		ufshcd_resume_clkscaling(hba);
>>  	} else {
>> @@ -1845,8 +1857,6 @@ static void ufshcd_init_clk_scaling(struct 
>> ufs_hba *hba)
>>  	snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
>>  		 hba->host->host_no);
>>  	hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
>> -
>> -	ufshcd_clkscaling_init_sysfs(hba);
>>  }
>> 
>>  static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
>> @@ -1854,6 +1864,8 @@ static void ufshcd_exit_clk_scaling(struct 
>> ufs_hba *hba)
>>  	if (!ufshcd_is_clkscaling_supported(hba))
>>  		return;
>> 
>> +	if (hba->devfreq)
>> +		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
>>  	destroy_workqueue(hba->clk_scaling.workq);
>>  	ufshcd_devfreq_remove(hba);
>>  }
>> @@ -1918,7 +1930,7 @@ static void ufshcd_clk_scaling_start_busy(struct 
>> ufs_hba *hba)
>>  	if (!hba->clk_scaling.active_reqs++)
>>  		queue_resume_work = true;
>> 
>> -	if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
>> +	if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
>>  		return;
>> 
>>  	if (queue_resume_work)
>> @@ -4987,7 +4999,8 @@ static void __ufshcd_transfer_req_compl(struct 
>> ufs_hba *hba,
>>  				complete(hba->dev_cmd.complete);
>>  			}
>>  		}
>> -		if (ufshcd_is_clkscaling_supported(hba))
>> +		if (ufshcd_is_clkscaling_supported(hba) &&
>> +		    hba->clk_scaling.active_reqs > 0)
>>  			hba->clk_scaling.active_reqs--;
>>  	}
>> 
>> @@ -5650,18 +5663,25 @@ static void ufshcd_err_handling_prepare(struct 
>> ufs_hba *hba)
>>  		ufshcd_vops_resume(hba, UFS_RUNTIME_PM);
>>  	} else {
>>  		ufshcd_hold(hba, false);
>> -		if (hba->clk_scaling.is_allowed) {
>> +		if (hba->clk_scaling.is_enabled) {
>>  			cancel_work_sync(&hba->clk_scaling.suspend_work);
>>  			cancel_work_sync(&hba->clk_scaling.resume_work);
>>  			ufshcd_suspend_clkscaling(hba);
>>  		}
>>  	}
>> +	down_write(&hba->clk_scaling_lock);
>> +	hba->clk_scaling.is_allowed = false;
>> +	up_write(&hba->clk_scaling_lock);
>>  }
>> 
>>  static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
>>  {
>>  	ufshcd_release(hba);
>> -	if (hba->clk_scaling.is_allowed)
>> +
>> +	down_write(&hba->clk_scaling_lock);
>> +	hba->clk_scaling.is_allowed = true;
>> +	up_write(&hba->clk_scaling_lock);
>> +	if (hba->clk_scaling.is_enabled)
>>  		ufshcd_resume_clkscaling(hba);
>>  	pm_runtime_put(hba->dev);
>>  }
>> @@ -7620,12 +7640,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
>>  			sizeof(struct ufs_pa_layer_attr));
>>  		hba->clk_scaling.saved_pwr_info.is_valid = true;
>>  		if (!hba->devfreq) {
>> +			hba->clk_scaling.is_allowed = true;
> 
> Perhaps moving this line after ufshcd_devfreq_init() is successful?

devfreq starts to work even before ufshcd_devfreq_init() returns,
I don't want to block it, hence put it before ufshcd_devfreq_init().
If devfreq fails to initialize, clk_scaling and its related sysfs
nodes anyways won't work and clk_scaling->is_enabled is false, so
we are safe.

> 
>>  			ret = ufshcd_devfreq_init(hba);
>>  			if (ret)
>>  				goto out;
>> -		}
>> 
>> -		hba->clk_scaling.is_allowed = true;
>> +			hba->clk_scaling.is_enabled = true;
>> +			ufshcd_clkscaling_init_sysfs(hba);
>> +		}
>>  	}
>> 
>>  	ufs_bsg_probe(hba);
>> @@ -8491,11 +8513,14 @@ static int ufshcd_suspend(struct ufs_hba *hba, 
>> enum ufs_pm_op pm_op)
>>  	ufshcd_hold(hba, false);
>>  	hba->clk_gating.is_suspended = true;
>> 
>> -	if (hba->clk_scaling.is_allowed) {
>> +	if (hba->clk_scaling.is_enabled) {
>>  		cancel_work_sync(&hba->clk_scaling.suspend_work);
>>  		cancel_work_sync(&hba->clk_scaling.resume_work);
>>  		ufshcd_suspend_clkscaling(hba);
>>  	}
>> +	down_write(&hba->clk_scaling_lock);
>> +	hba->clk_scaling.is_allowed = false;
>> +	up_write(&hba->clk_scaling_lock);
>> 
>>  	if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
>>  			req_link_state == UIC_LINK_ACTIVE_STATE) {
>> @@ -8592,8 +8617,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, 
>> enum ufs_pm_op pm_op)
>>  	goto out;
>> 
>>  set_link_active:
>> -	if (hba->clk_scaling.is_allowed)
>> -		ufshcd_resume_clkscaling(hba);
>>  	ufshcd_vreg_set_hpm(hba);
>>  	if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
>>  		ufshcd_set_link_active(hba);
>> @@ -8603,7 +8626,10 @@ static int ufshcd_suspend(struct ufs_hba *hba, 
>> enum ufs_pm_op pm_op)
>>  	if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
>>  		ufshcd_disable_auto_bkops(hba);
>>  enable_gating:
>> -	if (hba->clk_scaling.is_allowed)
>> +	down_write(&hba->clk_scaling_lock);
>> +	hba->clk_scaling.is_allowed = true;
>> +	up_write(&hba->clk_scaling_lock);
>> +	if (hba->clk_scaling.is_enabled)
>>  		ufshcd_resume_clkscaling(hba);
>>  	hba->clk_gating.is_suspended = false;
>>  	hba->dev_info.b_rpm_dev_flush_capable = false;
>> @@ -8701,7 +8727,10 @@ static int ufshcd_resume(struct ufs_hba *hba, 
>> enum ufs_pm_op pm_op)
>> 
>>  	hba->clk_gating.is_suspended = false;
>> 
>> -	if (hba->clk_scaling.is_allowed)
>> +	down_write(&hba->clk_scaling_lock);
>> +	hba->clk_scaling.is_allowed = true;
>> +	up_write(&hba->clk_scaling_lock);
>> +	if (hba->clk_scaling.is_enabled)
>>  		ufshcd_resume_clkscaling(hba);
>> 
>>  	/* Enable Auto-Hibernate if configured */
>> @@ -8725,8 +8754,6 @@ static int ufshcd_resume(struct ufs_hba *hba, 
>> enum ufs_pm_op pm_op)
>>  	ufshcd_vreg_set_lpm(hba);
>>  disable_irq_and_vops_clks:
>>  	ufshcd_disable_irq(hba);
>> -	if (hba->clk_scaling.is_allowed)
>> -		ufshcd_suspend_clkscaling(hba);
>>  	ufshcd_setup_clocks(hba, false);
>>  	if (ufshcd_is_clkgating_allowed(hba)) {
>>  		hba->clk_gating.state = CLKS_OFF;
>> @@ -8915,6 +8942,8 @@ int ufshcd_shutdown(struct ufs_hba *hba)
>> 
>>  	pm_runtime_get_sync(hba->dev);
>> 
>> +	ufshcd_exit_clk_scaling(hba);
>> +
>>  	ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
>>  out:
>>  	if (ret)
>> @@ -8944,8 +8973,6 @@ void ufshcd_remove(struct ufs_hba *hba)
>> 
>>  	ufshcd_exit_clk_scaling(hba);
>>  	ufshcd_exit_clk_gating(hba);
>> -	if (ufshcd_is_clkscaling_supported(hba))
>> -		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
>>  	ufshcd_hba_exit(hba);
>>  }
>>  EXPORT_SYMBOL_GPL(ufshcd_remove);
>> diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
>> index e0f00a4..9fcecba 100644
>> --- a/drivers/scsi/ufs/ufshcd.h
>> +++ b/drivers/scsi/ufs/ufshcd.h
>> @@ -382,6 +382,7 @@ struct ufs_saved_pwr_info {
>>   * @workq: workqueue to schedule devfreq suspend/resume work
>>   * @suspend_work: worker to suspend devfreq
>>   * @resume_work: worker to resume devfreq
>> + * @is_enabled: tracks if scaling is currently enabled or not
>>   * @is_allowed: tracks if scaling is currently allowed or not
>>   * @is_busy_started: tracks if busy period has started or not
>>   * @is_suspended: tracks if devfreq is suspended or not
>> @@ -396,6 +397,7 @@ struct ufs_clk_scaling {
>>  	struct workqueue_struct *workq;
>>  	struct work_struct suspend_work;
>>  	struct work_struct resume_work;
>> +	bool is_enabled;
>>  	bool is_allowed;
>>  	bool is_busy_started;
>>  	bool is_suspended;
> 
> Now there are more and more "similar boolean attributes" regarding
> clk-scaling control, maybe add more comprehensive comments to describe
> them?
> 
> Otherwise this patch looks good to me.

Sure, I will give more words to the new flag.

Thanks,

Can Guo.

> 
> Feel free to add
> Reviewed-by: Stanley Chu <stanley.chu@mediatek.com>

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2020-12-18  6:27 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <1607877104-8916-1-git-send-email-cang@codeaurora.org>
2020-12-13 16:31 ` [PATCH v4 1/3] scsi: ufs: Protect some contexts from unexpected clock scaling Can Guo
2020-12-15  5:43   ` Stanley Chu
2020-12-18  6:26     ` Can Guo
2020-12-16 13:31   ` Bean Huo
2020-12-13 16:31 ` [PATCH v4 2/3] scsi: ufs: Clean up ufshcd_exit_clk_scaling/gating() Can Guo
2020-12-15  5:55   ` Stanley Chu
2020-12-16 14:18   ` Bean Huo
2020-12-13 16:31 ` [PATCH v4 3/3] scsi: ufs: Revert "Make sure clk scaling happens only when HBA is runtime ACTIVE" Can Guo
2020-12-15  6:15   ` Stanley Chu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).