* [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler
@ 2022-03-10 15:17 Mario Limonciello
2022-03-10 15:17 ` [PATCH 2/4] ACPI / x86: Pass the constraints checking result to LPS0 callback Mario Limonciello
` (3 more replies)
0 siblings, 4 replies; 9+ messages in thread
From: Mario Limonciello @ 2022-03-10 15:17 UTC (permalink / raw)
To: Hans de Goede, Mark Gross, Rafael J . Wysocki
Cc: open list:X86 PLATFORM DRIVERS, linux-acpi, Mario Limonciello
Currenty the latest thing run during a suspend to idle attempt is
the LPS0 `prepare_late` callback and the earliest thing is the
`resume_early` callback.
There is a desire for the `amd-pmc` driver to suspend later in the
suspend process (ideally the very last thing), so create a callback
that it or any other driver can hook into to do this.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/acpi/x86/s2idle.c | 76 ++++++++++++++++++++++++++++++++++++++-
include/linux/acpi.h | 9 ++++-
2 files changed, 83 insertions(+), 2 deletions(-)
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index abc06e7f89d8..652dc2d75458 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -86,6 +86,16 @@ struct lpi_device_constraint_amd {
int min_dstate;
};
+struct lps0_callback_handler {
+ struct list_head list_node;
+ int (*prepare_late_callback)(void *context);
+ void (*restore_early_callback)(void *context);
+ void *context;
+};
+
+static LIST_HEAD(lps0_callback_handler_head);
+static DEFINE_MUTEX(lps0_callback_handler_mutex);
+
static struct lpi_constraints *lpi_constraints_table;
static int lpi_constraints_table_size;
static int rev_id;
@@ -444,6 +454,9 @@ static struct acpi_scan_handler lps0_handler = {
int acpi_s2idle_prepare_late(void)
{
+ struct lps0_callback_handler *handler;
+ int rc = 0;
+
if (!lps0_device_handle || sleep_no_lps0)
return 0;
@@ -474,14 +487,31 @@ int acpi_s2idle_prepare_late(void)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
}
- return 0;
+
+ mutex_lock(&lps0_callback_handler_mutex);
+ list_for_each_entry(handler, &lps0_callback_handler_head, list_node) {
+ rc = handler->prepare_late_callback(handler->context);
+ if (rc)
+ goto out;
+ }
+out:
+ mutex_unlock(&lps0_callback_handler_mutex);
+
+ return rc;
}
void acpi_s2idle_restore_early(void)
{
+ struct lps0_callback_handler *handler;
+
if (!lps0_device_handle || sleep_no_lps0)
return;
+ mutex_lock(&lps0_callback_handler_mutex);
+ list_for_each_entry(handler, &lps0_callback_handler_head, list_node)
+ handler->restore_early_callback(handler->context);
+ mutex_unlock(&lps0_callback_handler_mutex);
+
/* Modern standby exit */
if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
@@ -524,4 +554,48 @@ void acpi_s2idle_setup(void)
s2idle_set_ops(&acpi_s2idle_ops_lps0);
}
+int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
+ void (*restore_early)(void *context),
+ void *context)
+{
+ struct lps0_callback_handler *handler;
+
+ if (!lps0_device_handle || sleep_no_lps0)
+ return -ENODEV;
+
+ handler = kmalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler)
+ return -ENOMEM;
+ handler->prepare_late_callback = prepare_late;
+ handler->restore_early_callback = restore_early;
+ handler->context = context;
+
+ mutex_lock(&lps0_callback_handler_mutex);
+ list_add(&handler->list_node, &lps0_callback_handler_head);
+ mutex_unlock(&lps0_callback_handler_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_register_lps0_callbacks);
+
+void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context),
+ void (*restore_early)(void *context),
+ void *context)
+{
+ struct lps0_callback_handler *handler;
+
+ mutex_lock(&lps0_callback_handler_mutex);
+ list_for_each_entry(handler, &lps0_callback_handler_head, list_node) {
+ if (handler->prepare_late_callback == prepare_late &&
+ handler->restore_early_callback == restore_early &&
+ handler->context == context) {
+ list_del(&handler->list_node);
+ kfree(handler);
+ break;
+ }
+ }
+ mutex_unlock(&lps0_callback_handler_mutex);
+}
+EXPORT_SYMBOL_GPL(acpi_unregister_lps0_callbacks);
+
#endif /* CONFIG_SUSPEND */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 6274758648e3..cae0fde309f2 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1023,7 +1023,14 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
-
+#ifdef CONFIG_X86
+int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
+ void (*restore_early)(void *context),
+ void *context);
+void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context),
+ void (*restore_early)(void *context),
+ void *context);
+#endif /* CONFIG_X86 */
#ifndef CONFIG_IA64
void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
--
2.34.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 2/4] ACPI / x86: Pass the constraints checking result to LPS0 callback
2022-03-10 15:17 [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler Mario Limonciello
@ 2022-03-10 15:17 ` Mario Limonciello
2022-03-10 16:26 ` David E. Box
2022-03-10 15:17 ` [PATCH 3/4] platform/x86: amd-pmc: Move to later in the suspend process Mario Limonciello
` (2 subsequent siblings)
3 siblings, 1 reply; 9+ messages in thread
From: Mario Limonciello @ 2022-03-10 15:17 UTC (permalink / raw)
To: Hans de Goede, Mark Gross, Rafael J . Wysocki
Cc: open list:X86 PLATFORM DRIVERS, linux-acpi, Mario Limonciello
If constraints checking has been enabled by the LPS0 code, it may
also be useful for drivers using the callback to make a decision what
to do.
For example this may in the future allow a failing constraints check
preventing another driver from notifying firmware that all required
devices have entered the deepest state.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/acpi/x86/s2idle.c | 17 ++++++++++-------
include/linux/acpi.h | 4 ++--
2 files changed, 12 insertions(+), 9 deletions(-)
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 652dc2d75458..c737a8e5d5a5 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -88,7 +88,7 @@ struct lpi_device_constraint_amd {
struct lps0_callback_handler {
struct list_head list_node;
- int (*prepare_late_callback)(void *context);
+ int (*prepare_late_callback)(void *context, bool constraints);
void (*restore_early_callback)(void *context);
void *context;
};
@@ -297,7 +297,7 @@ static void lpi_device_get_constraints(void)
ACPI_FREE(out_obj);
}
-static void lpi_check_constraints(void)
+static void lpi_check_constraints(bool *met)
{
int i;
@@ -319,11 +319,13 @@ static void lpi_check_constraints(void)
continue;
}
- if (adev->power.state < lpi_constraints_table[i].min_dstate)
+ if (adev->power.state < lpi_constraints_table[i].min_dstate) {
acpi_handle_info(handle,
"LPI: Constraint not met; min power state:%s current power state:%s\n",
acpi_power_state_string(lpi_constraints_table[i].min_dstate),
acpi_power_state_string(adev->power.state));
+ *met = false;
+ }
}
}
@@ -455,13 +457,14 @@ static struct acpi_scan_handler lps0_handler = {
int acpi_s2idle_prepare_late(void)
{
struct lps0_callback_handler *handler;
+ bool constraints_met = true;
int rc = 0;
if (!lps0_device_handle || sleep_no_lps0)
return 0;
if (pm_debug_messages_on)
- lpi_check_constraints();
+ lpi_check_constraints(&constraints_met);
/* Screen off */
if (lps0_dsm_func_mask > 0)
@@ -490,7 +493,7 @@ int acpi_s2idle_prepare_late(void)
mutex_lock(&lps0_callback_handler_mutex);
list_for_each_entry(handler, &lps0_callback_handler_head, list_node) {
- rc = handler->prepare_late_callback(handler->context);
+ rc = handler->prepare_late_callback(handler->context, constraints_met);
if (rc)
goto out;
}
@@ -554,7 +557,7 @@ void acpi_s2idle_setup(void)
s2idle_set_ops(&acpi_s2idle_ops_lps0);
}
-int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
+int acpi_register_lps0_callbacks(int (*prepare_late)(void *context, bool constraints),
void (*restore_early)(void *context),
void *context)
{
@@ -578,7 +581,7 @@ int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
}
EXPORT_SYMBOL_GPL(acpi_register_lps0_callbacks);
-void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context),
+void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context, bool constraints),
void (*restore_early)(void *context),
void *context)
{
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index cae0fde309f2..5aae774670dc 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1024,10 +1024,10 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
#ifdef CONFIG_X86
-int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
+int acpi_register_lps0_callbacks(int (*prepare_late)(void *context, bool constraints),
void (*restore_early)(void *context),
void *context);
-void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context),
+void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context, bool constraints),
void (*restore_early)(void *context),
void *context);
#endif /* CONFIG_X86 */
--
2.34.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 3/4] platform/x86: amd-pmc: Move to later in the suspend process
2022-03-10 15:17 [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler Mario Limonciello
2022-03-10 15:17 ` [PATCH 2/4] ACPI / x86: Pass the constraints checking result to LPS0 callback Mario Limonciello
@ 2022-03-10 15:17 ` Mario Limonciello
2022-03-10 16:35 ` David E. Box
2022-03-10 15:17 ` [PATCH 4/4] platform/x86: amd-pmc: Drop CPU QoS workaround Mario Limonciello
2022-03-10 15:56 ` [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler David E. Box
3 siblings, 1 reply; 9+ messages in thread
From: Mario Limonciello @ 2022-03-10 15:17 UTC (permalink / raw)
To: Hans de Goede, Mark Gross, Rafael J . Wysocki
Cc: open list:X86 PLATFORM DRIVERS, linux-acpi, Mario Limonciello
The `OS_HINT` message is supposed to indicate that everything else
that is supposed to go into the deepest state has done so.
This assumption is invalid as:
1) The CPUs will still go in and out of the deepest state
2) Other devices may still run their `noirq` suspend routines
3) The LPS0 ACPI device will still run
To more closely mirror how this works on other operating systems,
move the `amd-pmc` suspend to the very last thing before the s2idle
loop via an lps0 callback.
Fixes: 8d89835b0467 ("PM: suspend: Do not pause cpuidle in the suspend-to-idle path")
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/platform/x86/amd-pmc.c | 33 +++++++++++++++++----------------
1 file changed, 17 insertions(+), 16 deletions(-)
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index 971aaabaa9c8..c13fd93f2662 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -639,9 +639,9 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
return rc;
}
-static int __maybe_unused amd_pmc_suspend(struct device *dev)
+static int amd_pmc_suspend(void *context, bool constraints_met)
{
- struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ struct amd_pmc_dev *pdev = dev_get_drvdata((struct device *)context);
int rc;
u8 msg;
u32 arg = 1;
@@ -658,7 +658,7 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev)
}
/* Dump the IdleMask before we send hint to SMU */
- amd_pmc_idlemask_read(pdev, dev, NULL);
+ amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
msg = amd_pmc_get_os_hint(pdev);
rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0);
if (rc) {
@@ -681,28 +681,28 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev)
return rc;
}
-static int __maybe_unused amd_pmc_resume(struct device *dev)
+static void amd_pmc_resume(void *context)
{
- struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ struct amd_pmc_dev *pdev = dev_get_drvdata((struct device *)context);
int rc;
u8 msg;
msg = amd_pmc_get_os_hint(pdev);
rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
if (rc)
- dev_err(pdev->dev, "resume failed\n");
+ dev_err(pdev->dev, "resume failed: %d\n", rc);
/* Let SMU know that we are looking for stats */
amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
/* Dump the IdleMask to see the blockers */
- amd_pmc_idlemask_read(pdev, dev, NULL);
+ amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
/* Write data incremented by 1 to distinguish in stb_read */
if (enable_stb)
rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1);
if (rc)
- dev_err(pdev->dev, "error writing to STB\n");
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
/* Restore the QoS request back to defaults if it was set */
if (pdev->cpu_id == AMD_CPU_ID_CZN)
@@ -711,15 +711,8 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
/* Notify on failed entry */
amd_pmc_validate_deepest(pdev);
-
- return rc;
}
-static const struct dev_pm_ops amd_pmc_pm_ops = {
- .suspend_noirq = amd_pmc_suspend,
- .resume_noirq = amd_pmc_resume,
-};
-
static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
@@ -884,6 +877,12 @@ static int amd_pmc_probe(struct platform_device *pdev)
amd_pmc_get_smu_version(dev);
platform_set_drvdata(pdev, dev);
+ err = acpi_register_lps0_callbacks(amd_pmc_suspend,
+ amd_pmc_resume,
+ &pdev->dev);
+ if (err)
+ goto err_pci_dev_put;
+
amd_pmc_dbgfs_register(dev);
cpu_latency_qos_add_request(&dev->amd_pmc_pm_qos_req, PM_QOS_DEFAULT_VALUE);
return 0;
@@ -897,6 +896,9 @@ static int amd_pmc_remove(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
+ acpi_unregister_lps0_callbacks(amd_pmc_suspend,
+ amd_pmc_resume,
+ &pdev->dev);
amd_pmc_dbgfs_unregister(dev);
pci_dev_put(dev->rdev);
mutex_destroy(&dev->lock);
@@ -917,7 +919,6 @@ static struct platform_driver amd_pmc_driver = {
.driver = {
.name = "amd_pmc",
.acpi_match_table = amd_pmc_acpi_ids,
- .pm = &amd_pmc_pm_ops,
},
.probe = amd_pmc_probe,
.remove = amd_pmc_remove,
--
2.34.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 4/4] platform/x86: amd-pmc: Drop CPU QoS workaround
2022-03-10 15:17 [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler Mario Limonciello
2022-03-10 15:17 ` [PATCH 2/4] ACPI / x86: Pass the constraints checking result to LPS0 callback Mario Limonciello
2022-03-10 15:17 ` [PATCH 3/4] platform/x86: amd-pmc: Move to later in the suspend process Mario Limonciello
@ 2022-03-10 15:17 ` Mario Limonciello
2022-03-10 15:56 ` [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler David E. Box
3 siblings, 0 replies; 9+ messages in thread
From: Mario Limonciello @ 2022-03-10 15:17 UTC (permalink / raw)
To: Hans de Goede, Mark Gross, Rafael J . Wysocki
Cc: open list:X86 PLATFORM DRIVERS, linux-acpi, Mario Limonciello
A workaround was previously introduced as part of commit 646f429ec2de
("platform/x86: amd-pmc: Set QOS during suspend on CZN w/ timer wakeup")
to prevent CPUs from going into the deepest state during the execution
of the `noirq` stage of `amd_pmc`. As `amd_pmc` has been pushed to the
last step for suspend on AMD platforms, this workaround is no longer
necessary.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/platform/x86/amd-pmc.c | 22 ----------------------
1 file changed, 22 deletions(-)
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index c13fd93f2662..b636fbe90407 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -21,7 +21,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/pm_qos.h>
#include <linux/rtc.h>
#include <linux/suspend.h>
#include <linux/seq_file.h>
@@ -96,9 +95,6 @@
#define PMC_MSG_DELAY_MIN_US 50
#define RESPONSE_REGISTER_LOOP_MAX 20000
-/* QoS request for letting CPUs in idle states, but not the deepest */
-#define AMD_PMC_MAX_IDLE_STATE_LATENCY 3
-
#define SOC_SUBSYSTEM_IP_MAX 12
#define DELAY_MIN_US 2000
#define DELAY_MAX_US 3000
@@ -153,7 +149,6 @@ struct amd_pmc_dev {
struct device *dev;
struct pci_dev *rdev;
struct mutex lock; /* generic mutex lock */
- struct pm_qos_request amd_pmc_pm_qos_req;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
@@ -628,14 +623,6 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
rc = rtc_alarm_irq_enable(rtc_device, 0);
dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration);
- /*
- * Prevent CPUs from getting into deep idle states while sending OS_HINT
- * which is otherwise generally safe to send when at least one of the CPUs
- * is not in deep idle states.
- */
- cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req, AMD_PMC_MAX_IDLE_STATE_LATENCY);
- wake_up_all_idle_cpus();
-
return rc;
}
@@ -675,9 +662,6 @@ static int amd_pmc_suspend(void *context, bool constraints_met)
return 0;
fail:
- if (pdev->cpu_id == AMD_CPU_ID_CZN)
- cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req,
- PM_QOS_DEFAULT_VALUE);
return rc;
}
@@ -704,11 +688,6 @@ static void amd_pmc_resume(void *context)
if (rc)
dev_err(pdev->dev, "error writing to STB: %d\n", rc);
- /* Restore the QoS request back to defaults if it was set */
- if (pdev->cpu_id == AMD_CPU_ID_CZN)
- cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req,
- PM_QOS_DEFAULT_VALUE);
-
/* Notify on failed entry */
amd_pmc_validate_deepest(pdev);
}
@@ -884,7 +863,6 @@ static int amd_pmc_probe(struct platform_device *pdev)
goto err_pci_dev_put;
amd_pmc_dbgfs_register(dev);
- cpu_latency_qos_add_request(&dev->amd_pmc_pm_qos_req, PM_QOS_DEFAULT_VALUE);
return 0;
err_pci_dev_put:
--
2.34.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler
2022-03-10 15:17 [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler Mario Limonciello
` (2 preceding siblings ...)
2022-03-10 15:17 ` [PATCH 4/4] platform/x86: amd-pmc: Drop CPU QoS workaround Mario Limonciello
@ 2022-03-10 15:56 ` David E. Box
2022-03-10 16:13 ` Limonciello, Mario
3 siblings, 1 reply; 9+ messages in thread
From: David E. Box @ 2022-03-10 15:56 UTC (permalink / raw)
To: Mario Limonciello, Hans de Goede, Mark Gross, Rafael J . Wysocki
Cc: open list:X86 PLATFORM DRIVERS, linux-acpi
Hi Mario,
On Thu, 2022-03-10 at 09:17 -0600, Mario Limonciello wrote:
> Currenty the latest thing run during a suspend to idle attempt is
> the LPS0 `prepare_late` callback and the earliest thing is the
> `resume_early` callback.
>
> There is a desire for the `amd-pmc` driver to suspend later in the
> suspend process (ideally the very last thing), so create a callback
> that it or any other driver can hook into to do this.
>
> Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
> ---
> drivers/acpi/x86/s2idle.c | 76 ++++++++++++++++++++++++++++++++++++++-
> include/linux/acpi.h | 9 ++++-
> 2 files changed, 83 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
> index abc06e7f89d8..652dc2d75458 100644
> --- a/drivers/acpi/x86/s2idle.c
> +++ b/drivers/acpi/x86/s2idle.c
> @@ -86,6 +86,16 @@ struct lpi_device_constraint_amd {
> int min_dstate;
> };
>
> +struct lps0_callback_handler {
> + struct list_head list_node;
> + int (*prepare_late_callback)(void *context);
> + void (*restore_early_callback)(void *context);
> + void *context;
> +};
Maybe put this in acpi.h
...
> +
> +static LIST_HEAD(lps0_callback_handler_head);
> +static DEFINE_MUTEX(lps0_callback_handler_mutex);
> +
> static struct lpi_constraints *lpi_constraints_table;
> static int lpi_constraints_table_size;
> static int rev_id;
> @@ -444,6 +454,9 @@ static struct acpi_scan_handler lps0_handler = {
>
> int acpi_s2idle_prepare_late(void)
> {
> + struct lps0_callback_handler *handler;
> + int rc = 0;
> +
> if (!lps0_device_handle || sleep_no_lps0)
> return 0;
>
> @@ -474,14 +487,31 @@ int acpi_s2idle_prepare_late(void)
> acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
> lps0_dsm_func_mask_microsoft,
> lps0_dsm_guid_microsoft);
> }
> - return 0;
> +
> + mutex_lock(&lps0_callback_handler_mutex);
> + list_for_each_entry(handler, &lps0_callback_handler_head, list_node) {
> + rc = handler->prepare_late_callback(handler->context);
> + if (rc)
> + goto out;
> + }
> +out:
> + mutex_unlock(&lps0_callback_handler_mutex);
> +
> + return rc;
> }
>
> void acpi_s2idle_restore_early(void)
> {
> + struct lps0_callback_handler *handler;
> +
> if (!lps0_device_handle || sleep_no_lps0)
> return;
>
> + mutex_lock(&lps0_callback_handler_mutex);
> + list_for_each_entry(handler, &lps0_callback_handler_head, list_node)
> + handler->restore_early_callback(handler->context);
> + mutex_unlock(&lps0_callback_handler_mutex);
> +
> /* Modern standby exit */
> if (lps0_dsm_func_mask_microsoft > 0)
> acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
> @@ -524,4 +554,48 @@ void acpi_s2idle_setup(void)
> s2idle_set_ops(&acpi_s2idle_ops_lps0);
> }
>
> +int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
> + void (*restore_early)(void *context),
> + void *context)
... and just have "struct lps0_callback_handler *handler" be the argument here.
David
> +{
> + struct lps0_callback_handler *handler;
> +
> + if (!lps0_device_handle || sleep_no_lps0)
> + return -ENODEV;
> +
> + handler = kmalloc(sizeof(*handler), GFP_KERNEL);
> + if (!handler)
> + return -ENOMEM;
> + handler->prepare_late_callback = prepare_late;
> + handler->restore_early_callback = restore_early;
> + handler->context = context;
> +
> + mutex_lock(&lps0_callback_handler_mutex);
> + list_add(&handler->list_node, &lps0_callback_handler_head);
> + mutex_unlock(&lps0_callback_handler_mutex);
> +
> + return 0;
> +}
> +EXPORT_SYMBOL_GPL(acpi_register_lps0_callbacks);
> +
> +void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context),
> + void (*restore_early)(void *context),
> + void *context)
> +{
> + struct lps0_callback_handler *handler;
> +
> + mutex_lock(&lps0_callback_handler_mutex);
> + list_for_each_entry(handler, &lps0_callback_handler_head, list_node) {
> + if (handler->prepare_late_callback == prepare_late &&
> + handler->restore_early_callback == restore_early &&
> + handler->context == context) {
> + list_del(&handler->list_node);
> + kfree(handler);
> + break;
> + }
> + }
> + mutex_unlock(&lps0_callback_handler_mutex);
> +}
> +EXPORT_SYMBOL_GPL(acpi_unregister_lps0_callbacks);
> +
> #endif /* CONFIG_SUSPEND */
> diff --git a/include/linux/acpi.h b/include/linux/acpi.h
> index 6274758648e3..cae0fde309f2 100644
> --- a/include/linux/acpi.h
> +++ b/include/linux/acpi.h
> @@ -1023,7 +1023,14 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8
> sleep_state,
>
> acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
> u32 val_a, u32 val_b);
> -
> +#ifdef CONFIG_X86
> +int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
> + void (*restore_early)(void *context),
> + void *context);
> +void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context),
> + void (*restore_early)(void *context),
> + void *context);
> +#endif /* CONFIG_X86 */
> #ifndef CONFIG_IA64
> void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
> #else
^ permalink raw reply [flat|nested] 9+ messages in thread
* RE: [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler
2022-03-10 15:56 ` [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler David E. Box
@ 2022-03-10 16:13 ` Limonciello, Mario
0 siblings, 0 replies; 9+ messages in thread
From: Limonciello, Mario @ 2022-03-10 16:13 UTC (permalink / raw)
To: david.e.box, Hans de Goede, Mark Gross, Rafael J . Wysocki
Cc: open list:X86 PLATFORM DRIVERS, linux-acpi
[Public]
> On Thu, 2022-03-10 at 09:17 -0600, Mario Limonciello wrote:
> > Currenty the latest thing run during a suspend to idle attempt is
> > the LPS0 `prepare_late` callback and the earliest thing is the
> > `resume_early` callback.
> >
> > There is a desire for the `amd-pmc` driver to suspend later in the
> > suspend process (ideally the very last thing), so create a callback
> > that it or any other driver can hook into to do this.
> >
> > Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
> > ---
> > drivers/acpi/x86/s2idle.c | 76
> ++++++++++++++++++++++++++++++++++++++-
> > include/linux/acpi.h | 9 ++++-
> > 2 files changed, 83 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
> > index abc06e7f89d8..652dc2d75458 100644
> > --- a/drivers/acpi/x86/s2idle.c
> > +++ b/drivers/acpi/x86/s2idle.c
> > @@ -86,6 +86,16 @@ struct lpi_device_constraint_amd {
> > int min_dstate;
> > };
> >
> > +struct lps0_callback_handler {
> > + struct list_head list_node;
> > + int (*prepare_late_callback)(void *context);
> > + void (*restore_early_callback)(void *context);
> > + void *context;
> > +};
>
> Maybe put this in acpi.h
Wonderful suggestion, thanks!
I'll adopt this for v2 after some other feedback comes in on the approach.
>
> ...
>
>
> > +
> > +static LIST_HEAD(lps0_callback_handler_head);
> > +static DEFINE_MUTEX(lps0_callback_handler_mutex);
> > +
> > static struct lpi_constraints *lpi_constraints_table;
> > static int lpi_constraints_table_size;
> > static int rev_id;
> > @@ -444,6 +454,9 @@ static struct acpi_scan_handler lps0_handler = {
> >
> > int acpi_s2idle_prepare_late(void)
> > {
> > + struct lps0_callback_handler *handler;
> > + int rc = 0;
> > +
> > if (!lps0_device_handle || sleep_no_lps0)
> > return 0;
> >
> > @@ -474,14 +487,31 @@ int acpi_s2idle_prepare_late(void)
> > acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
> > lps0_dsm_func_mask_microsoft,
> > lps0_dsm_guid_microsoft);
> > }
> > - return 0;
> > +
> > + mutex_lock(&lps0_callback_handler_mutex);
> > + list_for_each_entry(handler, &lps0_callback_handler_head,
> list_node) {
> > + rc = handler->prepare_late_callback(handler->context);
> > + if (rc)
> > + goto out;
> > + }
> > +out:
> > + mutex_unlock(&lps0_callback_handler_mutex);
> > +
> > + return rc;
> > }
> >
> > void acpi_s2idle_restore_early(void)
> > {
> > + struct lps0_callback_handler *handler;
> > +
> > if (!lps0_device_handle || sleep_no_lps0)
> > return;
> >
> > + mutex_lock(&lps0_callback_handler_mutex);
> > + list_for_each_entry(handler, &lps0_callback_handler_head,
> list_node)
> > + handler->restore_early_callback(handler->context);
> > + mutex_unlock(&lps0_callback_handler_mutex);
> > +
> > /* Modern standby exit */
> > if (lps0_dsm_func_mask_microsoft > 0)
> > acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
> > @@ -524,4 +554,48 @@ void acpi_s2idle_setup(void)
> > s2idle_set_ops(&acpi_s2idle_ops_lps0);
> > }
> >
> > +int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
> > + void (*restore_early)(void *context),
> > + void *context)
>
> ... and just have "struct lps0_callback_handler *handler" be the argument
> here.
>
> David
>
> > +{
> > + struct lps0_callback_handler *handler;
> > +
> > + if (!lps0_device_handle || sleep_no_lps0)
> > + return -ENODEV;
> > +
> > + handler = kmalloc(sizeof(*handler), GFP_KERNEL);
> > + if (!handler)
> > + return -ENOMEM;
> > + handler->prepare_late_callback = prepare_late;
> > + handler->restore_early_callback = restore_early;
> > + handler->context = context;
> > +
> > + mutex_lock(&lps0_callback_handler_mutex);
> > + list_add(&handler->list_node, &lps0_callback_handler_head);
> > + mutex_unlock(&lps0_callback_handler_mutex);
> > +
> > + return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(acpi_register_lps0_callbacks);
> > +
> > +void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context),
> > + void (*restore_early)(void *context),
> > + void *context)
> > +{
> > + struct lps0_callback_handler *handler;
> > +
> > + mutex_lock(&lps0_callback_handler_mutex);
> > + list_for_each_entry(handler, &lps0_callback_handler_head,
> list_node) {
> > + if (handler->prepare_late_callback == prepare_late &&
> > + handler->restore_early_callback == restore_early &&
> > + handler->context == context) {
> > + list_del(&handler->list_node);
> > + kfree(handler);
> > + break;
> > + }
> > + }
> > + mutex_unlock(&lps0_callback_handler_mutex);
> > +}
> > +EXPORT_SYMBOL_GPL(acpi_unregister_lps0_callbacks);
> > +
> > #endif /* CONFIG_SUSPEND */
> > diff --git a/include/linux/acpi.h b/include/linux/acpi.h
> > index 6274758648e3..cae0fde309f2 100644
> > --- a/include/linux/acpi.h
> > +++ b/include/linux/acpi.h
> > @@ -1023,7 +1023,14 @@ void acpi_os_set_prepare_extended_sleep(int
> (*func)(u8
> > sleep_state,
> >
> > acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
> > u32 val_a, u32 val_b);
> > -
> > +#ifdef CONFIG_X86
> > +int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
> > + void (*restore_early)(void *context),
> > + void *context);
> > +void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context),
> > + void (*restore_early)(void *context),
> > + void *context);
> > +#endif /* CONFIG_X86 */
> > #ifndef CONFIG_IA64
> > void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
> > #else
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 2/4] ACPI / x86: Pass the constraints checking result to LPS0 callback
2022-03-10 15:17 ` [PATCH 2/4] ACPI / x86: Pass the constraints checking result to LPS0 callback Mario Limonciello
@ 2022-03-10 16:26 ` David E. Box
2022-03-10 16:29 ` Limonciello, Mario
0 siblings, 1 reply; 9+ messages in thread
From: David E. Box @ 2022-03-10 16:26 UTC (permalink / raw)
To: Mario Limonciello, Hans de Goede, Mark Gross, Rafael J . Wysocki
Cc: open list:X86 PLATFORM DRIVERS, linux-acpi
On Thu, 2022-03-10 at 09:17 -0600, Mario Limonciello wrote:
> If constraints checking has been enabled by the LPS0 code, it may
> also be useful for drivers using the callback to make a decision what
> to do.
>
> For example this may in the future allow a failing constraints check
> preventing another driver from notifying firmware that all required
> devices have entered the deepest state.
>
> Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
> ---
> drivers/acpi/x86/s2idle.c | 17 ++++++++++-------
> include/linux/acpi.h | 4 ++--
> 2 files changed, 12 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
> index 652dc2d75458..c737a8e5d5a5 100644
> --- a/drivers/acpi/x86/s2idle.c
> +++ b/drivers/acpi/x86/s2idle.c
> @@ -88,7 +88,7 @@ struct lpi_device_constraint_amd {
>
> struct lps0_callback_handler {
> struct list_head list_node;
> - int (*prepare_late_callback)(void *context);
> + int (*prepare_late_callback)(void *context, bool constraints);
> void (*restore_early_callback)(void *context);
> void *context;
> };
> @@ -297,7 +297,7 @@ static void lpi_device_get_constraints(void)
> ACPI_FREE(out_obj);
> }
>
> -static void lpi_check_constraints(void)
> +static void lpi_check_constraints(bool *met)
> {
> int i;
>
> @@ -319,11 +319,13 @@ static void lpi_check_constraints(void)
> continue;
> }
>
> - if (adev->power.state < lpi_constraints_table[i].min_dstate)
> + if (adev->power.state < lpi_constraints_table[i].min_dstate) {
> acpi_handle_info(handle,
> "LPI: Constraint not met; min power state:%s
> current power state:%s\n",
> acpi_power_state_string(lpi_constraints_table[i]
> .min_dstate),
> acpi_power_state_string(adev->power.state));
> + *met = false;
> + }
> }
> }
>
> @@ -455,13 +457,14 @@ static struct acpi_scan_handler lps0_handler = {
> int acpi_s2idle_prepare_late(void)
> {
> struct lps0_callback_handler *handler;
> + bool constraints_met = true;
> int rc = 0;
>
> if (!lps0_device_handle || sleep_no_lps0)
> return 0;
>
> if (pm_debug_messages_on)
> - lpi_check_constraints();
> + lpi_check_constraints(&constraints_met);
lpi_check_constraints() was only used for dumping information to dmesg. If you
want to make a decision based on whether a constraint was met then you probably
want to run it all the time. You could use pm_debug_messages_on inside of
lpi_check_contraints() to decide whether to print to the log.
>
> /* Screen off */
> if (lps0_dsm_func_mask > 0)
> @@ -490,7 +493,7 @@ int acpi_s2idle_prepare_late(void)
>
> mutex_lock(&lps0_callback_handler_mutex);
> list_for_each_entry(handler, &lps0_callback_handler_head, list_node) {
> - rc = handler->prepare_late_callback(handler->context);
> + rc = handler->prepare_late_callback(handler->context,
> constraints_met);
Otherwise, is it okay that constraints_met will always be true if
pm_debug_messages_on isn't?
David
> if (rc)
> goto out;
> }
> @@ -554,7 +557,7 @@ void acpi_s2idle_setup(void)
> s2idle_set_ops(&acpi_s2idle_ops_lps0);
> }
>
> -int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
> +int acpi_register_lps0_callbacks(int (*prepare_late)(void *context, bool
> constraints),
> void (*restore_early)(void *context),
> void *context)
> {
> @@ -578,7 +581,7 @@ int acpi_register_lps0_callbacks(int (*prepare_late)(void
> *context),
> }
> EXPORT_SYMBOL_GPL(acpi_register_lps0_callbacks);
>
> -void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context),
> +void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context, bool
> constraints),
> void (*restore_early)(void *context),
> void *context)
> {
> diff --git a/include/linux/acpi.h b/include/linux/acpi.h
> index cae0fde309f2..5aae774670dc 100644
> --- a/include/linux/acpi.h
> +++ b/include/linux/acpi.h
> @@ -1024,10 +1024,10 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8
> sleep_state,
> acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
> u32 val_a, u32 val_b);
> #ifdef CONFIG_X86
> -int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
> +int acpi_register_lps0_callbacks(int (*prepare_late)(void *context, bool
> constraints),
> void (*restore_early)(void *context),
> void *context);
> -void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context),
> +void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context, bool
> constraints),
> void (*restore_early)(void *context),
> void *context);
> #endif /* CONFIG_X86 */
^ permalink raw reply [flat|nested] 9+ messages in thread
* RE: [PATCH 2/4] ACPI / x86: Pass the constraints checking result to LPS0 callback
2022-03-10 16:26 ` David E. Box
@ 2022-03-10 16:29 ` Limonciello, Mario
0 siblings, 0 replies; 9+ messages in thread
From: Limonciello, Mario @ 2022-03-10 16:29 UTC (permalink / raw)
To: david.e.box, Hans de Goede, Mark Gross, Rafael J . Wysocki
Cc: open list:X86 PLATFORM DRIVERS, linux-acpi
[Public]
> -----Original Message-----
> From: David E. Box <david.e.box@linux.intel.com>
> Sent: Thursday, March 10, 2022 10:26
> To: Limonciello, Mario <Mario.Limonciello@amd.com>; Hans de Goede
> <hdegoede@redhat.com>; Mark Gross <mgross@linux.intel.com>; Rafael J .
> Wysocki <rjw@rjwysocki.net>
> Cc: open list:X86 PLATFORM DRIVERS <platform-driver-
> x86@vger.kernel.org>; linux-acpi@vger.kernel.org
> Subject: Re: [PATCH 2/4] ACPI / x86: Pass the constraints checking result to
> LPS0 callback
>
> On Thu, 2022-03-10 at 09:17 -0600, Mario Limonciello wrote:
> > If constraints checking has been enabled by the LPS0 code, it may
> > also be useful for drivers using the callback to make a decision what
> > to do.
> >
> > For example this may in the future allow a failing constraints check
> > preventing another driver from notifying firmware that all required
> > devices have entered the deepest state.
> >
> > Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
> > ---
> > drivers/acpi/x86/s2idle.c | 17 ++++++++++-------
> > include/linux/acpi.h | 4 ++--
> > 2 files changed, 12 insertions(+), 9 deletions(-)
> >
> > diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
> > index 652dc2d75458..c737a8e5d5a5 100644
> > --- a/drivers/acpi/x86/s2idle.c
> > +++ b/drivers/acpi/x86/s2idle.c
> > @@ -88,7 +88,7 @@ struct lpi_device_constraint_amd {
> >
> > struct lps0_callback_handler {
> > struct list_head list_node;
> > - int (*prepare_late_callback)(void *context);
> > + int (*prepare_late_callback)(void *context, bool constraints);
> > void (*restore_early_callback)(void *context);
> > void *context;
> > };
> > @@ -297,7 +297,7 @@ static void lpi_device_get_constraints(void)
> > ACPI_FREE(out_obj);
> > }
> >
> > -static void lpi_check_constraints(void)
> > +static void lpi_check_constraints(bool *met)
> > {
> > int i;
> >
> > @@ -319,11 +319,13 @@ static void lpi_check_constraints(void)
> > continue;
> > }
> >
> > - if (adev->power.state < lpi_constraints_table[i].min_dstate)
> > + if (adev->power.state < lpi_constraints_table[i].min_dstate)
> {
> > acpi_handle_info(handle,
> > "LPI: Constraint not met; min power state:%s
> > current power state:%s\n",
> >
> acpi_power_state_string(lpi_constraints_table[i]
> > .min_dstate),
> > acpi_power_state_string(adev-
> >power.state));
> > + *met = false;
> > + }
> > }
> > }
> >
> > @@ -455,13 +457,14 @@ static struct acpi_scan_handler lps0_handler = {
> > int acpi_s2idle_prepare_late(void)
> > {
> > struct lps0_callback_handler *handler;
> > + bool constraints_met = true;
> > int rc = 0;
> >
> > if (!lps0_device_handle || sleep_no_lps0)
> > return 0;
> >
> > if (pm_debug_messages_on)
> > - lpi_check_constraints();
> > + lpi_check_constraints(&constraints_met);
>
> lpi_check_constraints() was only used for dumping information to dmesg. If
> you
> want to make a decision based on whether a constraint was met then you
> probably
> want to run it all the time. You could use pm_debug_messages_on inside of
> lpi_check_contraints() to decide whether to print to the log.
That's a good idea.
>
> >
> > /* Screen off */
> > if (lps0_dsm_func_mask > 0)
> > @@ -490,7 +493,7 @@ int acpi_s2idle_prepare_late(void)
> >
> > mutex_lock(&lps0_callback_handler_mutex);
> > list_for_each_entry(handler, &lps0_callback_handler_head,
> list_node) {
> > - rc = handler->prepare_late_callback(handler->context);
> > + rc = handler->prepare_late_callback(handler->context,
> > constraints_met);
>
> Otherwise, is it okay that constraints_met will always be true if
> pm_debug_messages_on isn't?
I wasn't ready to make decisions based on it, but you're right at least
running it all the time and showing the impactful messages when debugging
on is a good start.
Thanks!
>
> David
>
> > if (rc)
> > goto out;
> > }
> > @@ -554,7 +557,7 @@ void acpi_s2idle_setup(void)
> > s2idle_set_ops(&acpi_s2idle_ops_lps0);
> > }
> >
> > -int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
> > +int acpi_register_lps0_callbacks(int (*prepare_late)(void *context, bool
> > constraints),
> > void (*restore_early)(void *context),
> > void *context)
> > {
> > @@ -578,7 +581,7 @@ int acpi_register_lps0_callbacks(int
> (*prepare_late)(void
> > *context),
> > }
> > EXPORT_SYMBOL_GPL(acpi_register_lps0_callbacks);
> >
> > -void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context),
> > +void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context,
> bool
> > constraints),
> > void (*restore_early)(void *context),
> > void *context)
> > {
> > diff --git a/include/linux/acpi.h b/include/linux/acpi.h
> > index cae0fde309f2..5aae774670dc 100644
> > --- a/include/linux/acpi.h
> > +++ b/include/linux/acpi.h
> > @@ -1024,10 +1024,10 @@ void acpi_os_set_prepare_extended_sleep(int
> (*func)(u8
> > sleep_state,
> > acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
> > u32 val_a, u32 val_b);
> > #ifdef CONFIG_X86
> > -int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
> > +int acpi_register_lps0_callbacks(int (*prepare_late)(void *context, bool
> > constraints),
> > void (*restore_early)(void *context),
> > void *context);
> > -void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context),
> > +void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context,
> bool
> > constraints),
> > void (*restore_early)(void *context),
> > void *context);
> > #endif /* CONFIG_X86 */
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 3/4] platform/x86: amd-pmc: Move to later in the suspend process
2022-03-10 15:17 ` [PATCH 3/4] platform/x86: amd-pmc: Move to later in the suspend process Mario Limonciello
@ 2022-03-10 16:35 ` David E. Box
0 siblings, 0 replies; 9+ messages in thread
From: David E. Box @ 2022-03-10 16:35 UTC (permalink / raw)
To: Mario Limonciello, Hans de Goede, Mark Gross, Rafael J . Wysocki
Cc: open list:X86 PLATFORM DRIVERS, linux-acpi
On Thu, 2022-03-10 at 09:17 -0600, Mario Limonciello wrote:
> The `OS_HINT` message is supposed to indicate that everything else
> that is supposed to go into the deepest state has done so.
>
> This assumption is invalid as:
> 1) The CPUs will still go in and out of the deepest state
> 2) Other devices may still run their `noirq` suspend routines
> 3) The LPS0 ACPI device will still run
Yep. We had looked at adding a notifier to address this.
David
>
> To more closely mirror how this works on other operating systems,
> move the `amd-pmc` suspend to the very last thing before the s2idle
> loop via an lps0 callback.
>
> Fixes: 8d89835b0467 ("PM: suspend: Do not pause cpuidle in the suspend-to-idle
> path")
> Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
> ---
> drivers/platform/x86/amd-pmc.c | 33 +++++++++++++++++----------------
> 1 file changed, 17 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
> index 971aaabaa9c8..c13fd93f2662 100644
> --- a/drivers/platform/x86/amd-pmc.c
> +++ b/drivers/platform/x86/amd-pmc.c
> @@ -639,9 +639,9 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev
> *pdev, u32 *arg)
> return rc;
> }
>
> -static int __maybe_unused amd_pmc_suspend(struct device *dev)
> +static int amd_pmc_suspend(void *context, bool constraints_met)
> {
> - struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
> + struct amd_pmc_dev *pdev = dev_get_drvdata((struct device *)context);
> int rc;
> u8 msg;
> u32 arg = 1;
> @@ -658,7 +658,7 @@ static int __maybe_unused amd_pmc_suspend(struct device
> *dev)
> }
>
> /* Dump the IdleMask before we send hint to SMU */
> - amd_pmc_idlemask_read(pdev, dev, NULL);
> + amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
> msg = amd_pmc_get_os_hint(pdev);
> rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0);
> if (rc) {
> @@ -681,28 +681,28 @@ static int __maybe_unused amd_pmc_suspend(struct device
> *dev)
> return rc;
> }
>
> -static int __maybe_unused amd_pmc_resume(struct device *dev)
> +static void amd_pmc_resume(void *context)
> {
> - struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
> + struct amd_pmc_dev *pdev = dev_get_drvdata((struct device *)context);
> int rc;
> u8 msg;
>
> msg = amd_pmc_get_os_hint(pdev);
> rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
> if (rc)
> - dev_err(pdev->dev, "resume failed\n");
> + dev_err(pdev->dev, "resume failed: %d\n", rc);
>
> /* Let SMU know that we are looking for stats */
> amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
>
> /* Dump the IdleMask to see the blockers */
> - amd_pmc_idlemask_read(pdev, dev, NULL);
> + amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
>
> /* Write data incremented by 1 to distinguish in stb_read */
> if (enable_stb)
> rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1);
> if (rc)
> - dev_err(pdev->dev, "error writing to STB\n");
> + dev_err(pdev->dev, "error writing to STB: %d\n", rc);
>
> /* Restore the QoS request back to defaults if it was set */
> if (pdev->cpu_id == AMD_CPU_ID_CZN)
> @@ -711,15 +711,8 @@ static int __maybe_unused amd_pmc_resume(struct device
> *dev)
>
> /* Notify on failed entry */
> amd_pmc_validate_deepest(pdev);
> -
> - return rc;
> }
>
> -static const struct dev_pm_ops amd_pmc_pm_ops = {
> - .suspend_noirq = amd_pmc_suspend,
> - .resume_noirq = amd_pmc_resume,
> -};
> -
> static const struct pci_device_id pmc_pci_ids[] = {
> { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
> { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
> @@ -884,6 +877,12 @@ static int amd_pmc_probe(struct platform_device *pdev)
>
> amd_pmc_get_smu_version(dev);
> platform_set_drvdata(pdev, dev);
> + err = acpi_register_lps0_callbacks(amd_pmc_suspend,
> + amd_pmc_resume,
> + &pdev->dev);
> + if (err)
> + goto err_pci_dev_put;
> +
> amd_pmc_dbgfs_register(dev);
> cpu_latency_qos_add_request(&dev->amd_pmc_pm_qos_req,
> PM_QOS_DEFAULT_VALUE);
> return 0;
> @@ -897,6 +896,9 @@ static int amd_pmc_remove(struct platform_device *pdev)
> {
> struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
>
> + acpi_unregister_lps0_callbacks(amd_pmc_suspend,
> + amd_pmc_resume,
> + &pdev->dev);
> amd_pmc_dbgfs_unregister(dev);
> pci_dev_put(dev->rdev);
> mutex_destroy(&dev->lock);
> @@ -917,7 +919,6 @@ static struct platform_driver amd_pmc_driver = {
> .driver = {
> .name = "amd_pmc",
> .acpi_match_table = amd_pmc_acpi_ids,
> - .pm = &amd_pmc_pm_ops,
> },
> .probe = amd_pmc_probe,
> .remove = amd_pmc_remove,
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2022-03-10 16:36 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-10 15:17 [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler Mario Limonciello
2022-03-10 15:17 ` [PATCH 2/4] ACPI / x86: Pass the constraints checking result to LPS0 callback Mario Limonciello
2022-03-10 16:26 ` David E. Box
2022-03-10 16:29 ` Limonciello, Mario
2022-03-10 15:17 ` [PATCH 3/4] platform/x86: amd-pmc: Move to later in the suspend process Mario Limonciello
2022-03-10 16:35 ` David E. Box
2022-03-10 15:17 ` [PATCH 4/4] platform/x86: amd-pmc: Drop CPU QoS workaround Mario Limonciello
2022-03-10 15:56 ` [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler David E. Box
2022-03-10 16:13 ` Limonciello, Mario
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).