linux-acpi.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler
@ 2022-03-10 15:17 Mario Limonciello
  2022-03-10 15:17 ` [PATCH 2/4] ACPI / x86: Pass the constraints checking result to LPS0 callback Mario Limonciello
                   ` (3 more replies)
  0 siblings, 4 replies; 9+ messages in thread
From: Mario Limonciello @ 2022-03-10 15:17 UTC (permalink / raw)
  To: Hans de Goede, Mark Gross, Rafael J . Wysocki
  Cc: open list:X86 PLATFORM DRIVERS, linux-acpi, Mario Limonciello

Currenty the latest thing run during a suspend to idle attempt is
the LPS0 `prepare_late` callback and the earliest thing is the
`resume_early` callback.

There is a desire for the `amd-pmc` driver to suspend later in the
suspend process (ideally the very last thing), so create a callback
that it or any other driver can hook into to do this.

Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
 drivers/acpi/x86/s2idle.c | 76 ++++++++++++++++++++++++++++++++++++++-
 include/linux/acpi.h      |  9 ++++-
 2 files changed, 83 insertions(+), 2 deletions(-)

diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index abc06e7f89d8..652dc2d75458 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -86,6 +86,16 @@ struct lpi_device_constraint_amd {
 	int min_dstate;
 };
 
+struct lps0_callback_handler {
+	struct list_head list_node;
+	int (*prepare_late_callback)(void *context);
+	void (*restore_early_callback)(void *context);
+	void *context;
+};
+
+static LIST_HEAD(lps0_callback_handler_head);
+static DEFINE_MUTEX(lps0_callback_handler_mutex);
+
 static struct lpi_constraints *lpi_constraints_table;
 static int lpi_constraints_table_size;
 static int rev_id;
@@ -444,6 +454,9 @@ static struct acpi_scan_handler lps0_handler = {
 
 int acpi_s2idle_prepare_late(void)
 {
+	struct lps0_callback_handler *handler;
+	int rc = 0;
+
 	if (!lps0_device_handle || sleep_no_lps0)
 		return 0;
 
@@ -474,14 +487,31 @@ int acpi_s2idle_prepare_late(void)
 		acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
 				lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
 	}
-	return 0;
+
+	mutex_lock(&lps0_callback_handler_mutex);
+	list_for_each_entry(handler, &lps0_callback_handler_head, list_node) {
+		rc = handler->prepare_late_callback(handler->context);
+		if (rc)
+			goto out;
+	}
+out:
+	mutex_unlock(&lps0_callback_handler_mutex);
+
+	return rc;
 }
 
 void acpi_s2idle_restore_early(void)
 {
+	struct lps0_callback_handler *handler;
+
 	if (!lps0_device_handle || sleep_no_lps0)
 		return;
 
+	mutex_lock(&lps0_callback_handler_mutex);
+	list_for_each_entry(handler, &lps0_callback_handler_head, list_node)
+		handler->restore_early_callback(handler->context);
+	mutex_unlock(&lps0_callback_handler_mutex);
+
 	/* Modern standby exit */
 	if (lps0_dsm_func_mask_microsoft > 0)
 		acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
@@ -524,4 +554,48 @@ void acpi_s2idle_setup(void)
 	s2idle_set_ops(&acpi_s2idle_ops_lps0);
 }
 
+int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
+				 void (*restore_early)(void *context),
+				 void *context)
+{
+	struct lps0_callback_handler *handler;
+
+	if (!lps0_device_handle || sleep_no_lps0)
+		return -ENODEV;
+
+	handler = kmalloc(sizeof(*handler), GFP_KERNEL);
+	if (!handler)
+		return -ENOMEM;
+	handler->prepare_late_callback = prepare_late;
+	handler->restore_early_callback = restore_early;
+	handler->context = context;
+
+	mutex_lock(&lps0_callback_handler_mutex);
+	list_add(&handler->list_node, &lps0_callback_handler_head);
+	mutex_unlock(&lps0_callback_handler_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_register_lps0_callbacks);
+
+void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context),
+				    void (*restore_early)(void *context),
+				    void *context)
+{
+	struct lps0_callback_handler *handler;
+
+	mutex_lock(&lps0_callback_handler_mutex);
+	list_for_each_entry(handler, &lps0_callback_handler_head, list_node) {
+		if (handler->prepare_late_callback == prepare_late &&
+		    handler->restore_early_callback == restore_early &&
+		    handler->context == context) {
+			list_del(&handler->list_node);
+			kfree(handler);
+			break;
+		}
+	}
+	mutex_unlock(&lps0_callback_handler_mutex);
+}
+EXPORT_SYMBOL_GPL(acpi_unregister_lps0_callbacks);
+
 #endif /* CONFIG_SUSPEND */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 6274758648e3..cae0fde309f2 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1023,7 +1023,14 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
 
 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
 					   u32 val_a, u32 val_b);
-
+#ifdef CONFIG_X86
+int acpi_register_lps0_callbacks(int (*prepare_late)(void *context),
+				 void (*restore_early)(void *context),
+				 void *context);
+void acpi_unregister_lps0_callbacks(int (*prepare_late)(void *context),
+				    void (*restore_early)(void *context),
+				    void *context);
+#endif /* CONFIG_X86 */
 #ifndef CONFIG_IA64
 void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
 #else
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2022-03-10 16:36 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-10 15:17 [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler Mario Limonciello
2022-03-10 15:17 ` [PATCH 2/4] ACPI / x86: Pass the constraints checking result to LPS0 callback Mario Limonciello
2022-03-10 16:26   ` David E. Box
2022-03-10 16:29     ` Limonciello, Mario
2022-03-10 15:17 ` [PATCH 3/4] platform/x86: amd-pmc: Move to later in the suspend process Mario Limonciello
2022-03-10 16:35   ` David E. Box
2022-03-10 15:17 ` [PATCH 4/4] platform/x86: amd-pmc: Drop CPU QoS workaround Mario Limonciello
2022-03-10 15:56 ` [PATCH 1/4] ACPI / x86: Add support for LPS0 callback handler David E. Box
2022-03-10 16:13   ` Limonciello, Mario

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).