summarylogtreecommitdiffstats
path: root/sys-kernel_arch-sources-g14_files-0035-backport-from-5.14-s0ix-enablement-no-d3hot-2021-06-30.patch
diff options
context:
space:
mode:
Diffstat (limited to 'sys-kernel_arch-sources-g14_files-0035-backport-from-5.14-s0ix-enablement-no-d3hot-2021-06-30.patch')
-rw-r--r--sys-kernel_arch-sources-g14_files-0035-backport-from-5.14-s0ix-enablement-no-d3hot-2021-06-30.patch1308
1 files changed, 1308 insertions, 0 deletions
diff --git a/sys-kernel_arch-sources-g14_files-0035-backport-from-5.14-s0ix-enablement-no-d3hot-2021-06-30.patch b/sys-kernel_arch-sources-g14_files-0035-backport-from-5.14-s0ix-enablement-no-d3hot-2021-06-30.patch
new file mode 100644
index 000000000000..882d4af32dd4
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-0035-backport-from-5.14-s0ix-enablement-no-d3hot-2021-06-30.patch
@@ -0,0 +1,1308 @@
+From 42b2e2ffb3a9e421050893c8a3786210fb541ae1 Mon Sep 17 00:00:00 2001
+From: Scott B <28817345+foundObjects@users.noreply.github.com>
+Date: Thu, 8 Jul 2021 03:24:48 -0700
+Subject: [PATCH] Squashed commit of the following:
+
+all mainline s0ix support up to 2021-06-30:
+ 11 s0ix patches, including EC GPE patch
+ 7 amd_pmc v5 patchset diagnostics
+
+Does not include AMD XHCI D3hot Quirk
+
+commit 2b513555a462fc9233355d44789833a9be300a2f
+Merge: ebf1d8bcad36 b072ea5d76db
+Author: Scott B <28817345+foundObjects@users.noreply.github.com>
+Date: Thu Jul 8 03:16:14 2021 -0700
+
+ Merge branch 'patchwork-amd-pmc-logging-v5' into TEMP-s0ix-on-5.13
+
+commit ebf1d8bcad366f451d54bda9e15943f173003551
+Merge: 62fb9874f5da 35f01a4422b1
+Author: Scott B <28817345+foundObjects@users.noreply.github.com>
+Date: Thu Jul 8 03:15:32 2021 -0700
+
+ Merge branch 'DO-NOT-USE-backport-from-5.14-s0ix-no-d3hot' into HEAD
+
+commit 35f01a4422b180806c6106b5696ab9626b5dac41
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed Jun 30 14:46:06 2021 -0500
+
+ ACPI: PM: Only mark EC GPE for wakeup on Intel systems
+
+ When using s2idle on a variety of AMD notebook systems, they are
+ experiencing spurious events that the EC or SMU are in the wrong
+ state leading to a hard time waking up or higher than expected
+ power consumption.
+
+ These events only occur when the EC GPE is inadvertently set as a wakeup
+ source. Originally the EC GPE was only set as a wakeup source when using
+ the intel-vbtn or intel-hid drivers in commit 10a08fd65ec1 ("ACPI: PM:
+ Set up EC GPE for system wakeup from drivers that need it") but during
+ testing a reporter discovered that this was not enough for their ASUS
+ Zenbook UX430UNR/i7-8550U to wakeup by lid event or keypress.
+ Marking the EC GPE for wakeup universally resolved this for that
+ reporter in commit b90ff3554aa3 ("ACPI: PM: s2idle: Always set up EC GPE
+ for system wakeup").
+
+ However this behavior has lead to a number of problems:
+
+ * On both Lenovo T14 and P14s the keyboard wakeup doesn't work, and
+ sometimes the power button event doesn't work.
+ * On HP 635 G7 detaching or attaching AC during suspend will cause
+ the system not to wakeup
+ * On Asus vivobook to prevent detaching AC causing resume problems
+ * On Lenovo 14ARE05 to prevent detaching AC causing resume problems
+ * On HP ENVY x360 to prevent detaching AC causing resume problems
+
+ As there may be other Intel systems besides ASUS Zenbook UX430UNR/i7-8550U
+ that don't use intel-vbtn or intel-hid avoid these problems by only
+ universally marking the EC GPE wakesource on non-AMD systems.
+
+ Link: https://patchwork.kernel.org/project/linux-pm/cover/5997740.FPbUVk04hV@kreacher/#22825489
+ Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1230
+ Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1629
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Acked-by: Alex Deucher <alexander.deucher@amd.com>
+
+commit 5032c30b7d44b5b9d68f7a8d3bbcb6238484b2a6
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Thu Jun 17 11:42:12 2021 -0500
+
+ ACPI: PM: Adjust behavior for field problems on AMD systems
+
+ Some AMD Systems with uPEP _HID AMD004/AMDI005 have an off by one bug
+ in their function mask return. This means that they will call entrance
+ but not exit for matching functions.
+
+ Other AMD systems with this HID should use the Microsoft generic UUID.
+
+ AMD systems with uPEP HID AMDI006 should be using the Microsoft method.
+
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 6ad8efa21132a0c53372a615c91f5f24b6e2ed63
+Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Date: Thu Jun 17 11:42:11 2021 -0500
+
+ ACPI: PM: s2idle: Add support for new Microsoft UUID
+
+ This adds supports for _DSM notifications to the Microsoft UUID
+ described by Microsoft documentation for s2idle.
+
+ Link: https://docs.microsoft.com/en-us/windows-hardware/design/device-experiences/modern-standby-firmware-notifications
+ Co-developed-by: Mario Limonciello <mario.limonciello@amd.com>
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+
+commit 1c22e7dec26ea42a98b0c87d97a150a415978ec1
+Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Date: Thu Jun 17 11:42:10 2021 -0500
+
+ ACPI: PM: s2idle: Add support for multiple func mask
+
+ Required for follow-up patch adding new UUID
+ needing new function mask.
+
+ Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+
+commit aa430a0319ce9ac4629735ac6a5b86bd48517dff
+Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Date: Thu Jun 17 11:42:09 2021 -0500
+
+ ACPI: PM: s2idle: Refactor common code
+
+ Refactor common code to prepare for upcoming changes.
+ * Remove unused struct.
+ * Print error before returning.
+ * Frees ACPI obj if _DSM type is not as expected.
+ * Treat lps0_dsm_func_mask as an integer rather than character
+ * Remove extra out_obj
+ * Move rev_id
+
+ Co-developed-by: Mario Limonciello <mario.limonciello@amd.com>
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+
+commit a71e52e9ea22c0b976014a6f7eb2ed93f65aa4ca
+Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Date: Thu Jun 17 11:42:08 2021 -0500
+
+ ACPI: PM: s2idle: Use correct revision id
+
+ AMD spec mentions only revision 0. With this change,
+ device constraint list is populated properly.
+
+ Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+
+commit e5b15e24f169d15b231581cccb2bfa9820225cff
+Author: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed May 5 09:20:32 2021 -0400
+
+ ACPI: PM: s2idle: Add missing LPS0 functions for AMD
+
+ These are supposedly not required for AMD platforms,
+ but at least some HP laptops seem to require it to
+ properly turn off the keyboard backlight.
+
+ Based on a patch from Marcin Bachry <hegel666@gmail.com>.
+
+ Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1230
+ Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+ Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+ Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 526bf2ffedece1257d3742a2ad633e00bb1778a7
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed Jun 9 13:40:18 2021 -0500
+
+ ACPI: Add quirks for AMD Renoir/Lucienne CPUs to force the D3 hint
+
+ AMD systems from Renoir and Lucienne require that the NVME controller
+ is put into D3 over a Modern Standby / suspend-to-idle
+ cycle. This is "typically" accomplished using the `StorageD3Enable`
+ property in the _DSD, but this property was introduced after many
+ of these systems launched and most OEM systems don't have it in
+ their BIOS.
+
+ On AMD Renoir without these drives going into D3 over suspend-to-idle
+ the resume will fail with the NVME controller being reset and a trace
+ like this in the kernel logs:
+ ```
+ [ 83.556118] nvme nvme0: I/O 161 QID 2 timeout, aborting
+ [ 83.556178] nvme nvme0: I/O 162 QID 2 timeout, aborting
+ [ 83.556187] nvme nvme0: I/O 163 QID 2 timeout, aborting
+ [ 83.556196] nvme nvme0: I/O 164 QID 2 timeout, aborting
+ [ 95.332114] nvme nvme0: I/O 25 QID 0 timeout, reset controller
+ [ 95.332843] nvme nvme0: Abort status: 0x371
+ [ 95.332852] nvme nvme0: Abort status: 0x371
+ [ 95.332856] nvme nvme0: Abort status: 0x371
+ [ 95.332859] nvme nvme0: Abort status: 0x371
+ [ 95.332909] PM: dpm_run_callback(): pci_pm_resume+0x0/0xe0 returns -16
+ [ 95.332936] nvme 0000:03:00.0: PM: failed to resume async: error -16
+ ```
+
+ The Microsoft documentation for StorageD3Enable mentioned that Windows has
+ a hardcoded allowlist for D3 support, which was used for these platforms.
+ Introduce quirks to hardcode them for Linux as well.
+
+ As this property is now "standardized", OEM systems using AMD Cezanne and
+ newer APU's have adopted this property, and quirks like this should not be
+ necessary.
+
+ CC: Shyam-sundar S-k <Shyam-sundar.S-k@amd.com>
+ CC: Alexander Deucher <Alexander.Deucher@amd.com>
+ CC: Prike Liang <prike.liang@amd.com>
+ Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ Tested-by: Julian Sikorski <belegdol@gmail.com>
+ Signed-off-by: Christoph Hellwig <hch@lst.de>
+
+commit 8b763d23606ce256a5ec9db81d8177ad5e90bbf9
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed Jun 9 13:40:17 2021 -0500
+
+ ACPI: Check StorageD3Enable _DSD property in ACPI code
+
+ Although first implemented for NVME, this check may be usable by
+ other drivers as well. Microsoft's specification explicitly mentions
+ that is may be usable by SATA and AHCI devices. Google also indicates
+ that they have used this with SDHCI in a downstream kernel tree that
+ a user can plug a storage device into.
+
+ Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro
+ Suggested-by: Keith Busch <kbusch@kernel.org>
+ CC: Shyam-sundar S-k <Shyam-sundar.S-k@amd.com>
+ CC: Alexander Deucher <Alexander.Deucher@amd.com>
+ CC: Rafael J. Wysocki <rjw@rjwysocki.net>
+ CC: Prike Liang <prike.liang@amd.com>
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ Signed-off-by: Christoph Hellwig <hch@lst.de>
+
+commit b12b48eb29c8a392f4093898877a02f271b93355
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Fri May 28 11:02:34 2021 -0500
+
+ nvme-pci: look for StorageD3Enable on companion ACPI device instead
+
+ The documentation around the StorageD3Enable property hints that it
+ should be made on the PCI device. This is where newer AMD systems set
+ the property and it's required for S0i3 support.
+
+ So rather than look for nodes of the root port only present on Intel
+ systems, switch to the companion ACPI device for all systems.
+ David Box from Intel indicated this should work on Intel as well.
+
+ Link: https://lore.kernel.org/linux-nvme/YK6gmAWqaRmvpJXb@google.com/T/#m900552229fa455867ee29c33b854845fce80ba70
+ Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro
+ Fixes: df4f9bc4fb9c ("nvme-pci: add support for ACPI StorageD3Enable property")
+ Suggested-by: Liang Prike <Prike.Liang@amd.com>
+ Acked-by: Raul E Rangel <rrangel@chromium.org>
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Reviewed-by: David E. Box <david.e.box@linux.intel.com>
+ Signed-off-by: Christoph Hellwig <hch@lst.de>
+
+commit e54f80e62f9d3412a7add7f3753c7c2604cbfe78
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed May 12 17:15:14 2021 -0500
+
+ ACPI: processor idle: Fix up C-state latency if not ordered
+
+ Generally, the C-state latency is provided by the _CST method or
+ FADT, but some OEM platforms using AMD Picasso, Renoir, Van Gogh,
+ and Cezanne set the C2 latency greater than C3's which causes the
+ C2 state to be skipped.
+
+ That will block the core entering PC6, which prevents S0ix working
+ properly on Linux systems.
+
+ In other operating systems, the latency values are not validated and
+ this does not cause problems by skipping states.
+
+ To avoid this issue on Linux, detect when latencies are not an
+ arithmetic progression and sort them.
+
+ Link: https://gitlab.freedesktop.org/agd5f/linux/-/commit/026d186e4592c1ee9c1cb44295912d0294508725
+ Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1230#note_712174
+ Suggested-by: Prike Liang <Prike.Liang@amd.com>
+ Suggested-by: Alex Deucher <alexander.deucher@amd.com>
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ [ rjw: Subject and changelog edits ]
+ Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit b072ea5d76db90c1316aa04a15f77744ac1bafa1
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:18:03 2021 +0530
+
+ platform/x86: amd-pmc: Add new acpi id for future PMC controllers
+
+ The upcoming PMC controller would have a newer acpi id, add that to
+ the supported acpid device list.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+
+commit 4f025bcec084eb0624b3a34a03b0ff00a14a2fa0
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:18:02 2021 +0530
+
+ platform/x86: amd-pmc: Add support for ACPI ID AMDI0006
+
+ Some newer BIOSes have added another ACPI ID for the uPEP device.
+ SMU statistics behave identically on this device.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+
+commit a3ccb3605aac3f650732a55adea7e666d9df24c5
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:18:01 2021 +0530
+
+ amd-pmc: Add support for logging s0ix counters
+
+ Even the FCH SSC registers provides certain level of information
+ about the s0ix entry and exit times which comes handy when the SMU
+ fails to report the statistics via the mailbox communication.
+
+ This information is captured via a new debugfs file "s0ix_stats".
+ A non-zero entry in this counters would mean that the system entered
+ the s0ix state.
+
+ If s0ix entry time and exit time don't change during suspend to idle,
+ the silicon has not entered the deepest state.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+
+commit 826b61ec6a7785614435fcdeb951290981eff6f1
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:18:00 2021 +0530
+
+ platform/x86: amd-pmc: Add support for logging SMU metrics
+
+ SMU provides a way to dump the s0ix debug statistics in the form of a
+ metrics table via a of set special mailbox commands.
+
+ Add support to the driver which can send these commands to SMU and expose
+ the information received via debugfs. The information contains the s0ix
+ entry/exit, active time of each IP block etc.
+
+ As a side note, SMU subsystem logging is not supported on Picasso based
+ SoC's.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+
+commit dd120f506eea9bee584cf0f7d737631554ba17a3
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:17:59 2021 +0530
+
+ platform/x86: amd-pmc: call dump registers only once
+
+ Currently amd_pmc_dump_registers() routine is being called at
+ multiple places. The best to call it is after command submission
+ to SMU.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+
+commit 87390ff9f263e0aa586d9a33198e79b47e5f85d1
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:17:58 2021 +0530
+
+ platform/x86: amd-pmc: Fix SMU firmware reporting mechanism
+
+ It was lately understood that the current mechanism available in the
+ driver to get SMU firmware info works only on internal SMU builds and
+ there is a separate way to get all the SMU logging counters (addressed
+ in the next patch). Hence remove all the smu info shown via debugfs as it
+ is no more useful.
+
+ Fixes: 156ec4731cb2 ("platform/x86: amd-pmc: Add AMD platform support for S2Idle")
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+
+commit 65581db92e5e4d5fa941cc6bb91e702e3ed97b74
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:17:57 2021 +0530
+
+ platform/x86: amd-pmc: Fix command completion code
+
+ The protocol to submit a job request to SMU is to wait for
+ AMD_PMC_REGISTER_RESPONSE to return 1,meaning SMU is ready to take
+ requests. PMC driver has to make sure that the response code is always
+ AMD_PMC_RESULT_OK before making any command submissions.
+
+ When we submit a message to SMU, we have to wait until it processes
+ the request. Adding a read_poll_timeout() check as this was missing in
+ the existing code.
+
+ Also, add a mutex to protect amd_pmc_send_cmd() calls to SMU.
+
+ Fixes: 156ec4731cb2 ("platform/x86: amd-pmc: Add AMD platform support for S2Idle")
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Acked-by: Raul E Rangel <rrangel@chromium.org>
+---
+ drivers/acpi/device_pm.c | 32 +++++
+ drivers/acpi/internal.h | 9 ++
+ drivers/acpi/processor_idle.c | 40 ++++++
+ drivers/acpi/x86/s2idle.c | 157 ++++++++++++++-------
+ drivers/acpi/x86/utils.c | 25 ++++
+ drivers/nvme/host/pci.c | 50 +------
+ drivers/platform/x86/amd-pmc.c | 244 ++++++++++++++++++++++++++++++---
+ include/linux/acpi.h | 5 +
+ 8 files changed, 443 insertions(+), 119 deletions(-)
+
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index d260bc1f3e6e..6dd9bd64903e 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -1340,4 +1340,36 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
+ return 1;
+ }
+ EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
++
++/**
++ * acpi_storage_d3 - Check if D3 should be used in the suspend path
++ * @dev: Device to check
++ *
++ * Return %true if the platform firmware wants @dev to be programmed
++ * into D3hot or D3cold (if supported) in the suspend path, or %false
++ * when there is no specific preference. On some platforms, if this
++ * hint is ignored, @dev may remain unresponsive after suspending the
++ * platform as a whole.
++ *
++ * Although the property has storage in the name it actually is
++ * applied to the PCIe slot and plugging in a non-storage device the
++ * same platform restrictions will likely apply.
++ */
++bool acpi_storage_d3(struct device *dev)
++{
++ struct acpi_device *adev = ACPI_COMPANION(dev);
++ u8 val;
++
++ if (force_storage_d3())
++ return true;
++
++ if (!adev)
++ return false;
++ if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
++ &val))
++ return false;
++ return val == 1;
++}
++EXPORT_SYMBOL_GPL(acpi_storage_d3);
++
+ #endif /* CONFIG_PM */
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index e21611c9a170..7ac01b03ba67 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -236,6 +236,15 @@ static inline int suspend_nvs_save(void) { return 0; }
+ static inline void suspend_nvs_restore(void) {}
+ #endif
+
++#ifdef CONFIG_X86
++bool force_storage_d3(void);
++#else
++static inline bool force_storage_d3(void)
++{
++ return false;
++}
++#endif
++
+ /*--------------------------------------------------------------------------
+ Device properties
+ -------------------------------------------------------------------------- */
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 45a019619e4a..095c8aca141e 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -16,6 +16,7 @@
+ #include <linux/acpi.h>
+ #include <linux/dmi.h>
+ #include <linux/sched.h> /* need_resched() */
++#include <linux/sort.h>
+ #include <linux/tick.h>
+ #include <linux/cpuidle.h>
+ #include <linux/cpu.h>
+@@ -384,10 +385,37 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
+ return;
+ }
+
++static int acpi_cst_latency_cmp(const void *a, const void *b)
++{
++ const struct acpi_processor_cx *x = a, *y = b;
++
++ if (!(x->valid && y->valid))
++ return 0;
++ if (x->latency > y->latency)
++ return 1;
++ if (x->latency < y->latency)
++ return -1;
++ return 0;
++}
++static void acpi_cst_latency_swap(void *a, void *b, int n)
++{
++ struct acpi_processor_cx *x = a, *y = b;
++ u32 tmp;
++
++ if (!(x->valid && y->valid))
++ return;
++ tmp = x->latency;
++ x->latency = y->latency;
++ y->latency = tmp;
++}
++
+ static int acpi_processor_power_verify(struct acpi_processor *pr)
+ {
+ unsigned int i;
+ unsigned int working = 0;
++ unsigned int last_latency = 0;
++ unsigned int last_type = 0;
++ bool buggy_latency = false;
+
+ pr->power.timer_broadcast_on_state = INT_MAX;
+
+@@ -411,12 +439,24 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
+ }
+ if (!cx->valid)
+ continue;
++ if (cx->type >= last_type && cx->latency < last_latency)
++ buggy_latency = true;
++ last_latency = cx->latency;
++ last_type = cx->type;
+
+ lapic_timer_check_state(i, pr, cx);
+ tsc_check_state(cx->type);
+ working++;
+ }
+
++ if (buggy_latency) {
++ pr_notice("FW issue: working around C-state latencies out of order\n");
++ sort(&pr->power.states[1], max_cstate,
++ sizeof(struct acpi_processor_cx),
++ acpi_cst_latency_cmp,
++ acpi_cst_latency_swap);
++ }
++
+ lapic_timer_propagate_broadcast(pr);
+
+ return (working);
+diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
+index 2b69536cdccb..1c507804fb10 100644
+--- a/drivers/acpi/x86/s2idle.c
++++ b/drivers/acpi/x86/s2idle.c
+@@ -32,6 +32,9 @@ static const struct acpi_device_id lps0_device_ids[] = {
+ {"", },
+ };
+
++/* Microsoft platform agnostic UUID */
++#define ACPI_LPS0_DSM_UUID_MICROSOFT "11e00d56-ce64-47ce-837b-1f898f9aa461"
++
+ #define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
+
+ #define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
+@@ -39,15 +42,22 @@ static const struct acpi_device_id lps0_device_ids[] = {
+ #define ACPI_LPS0_SCREEN_ON 4
+ #define ACPI_LPS0_ENTRY 5
+ #define ACPI_LPS0_EXIT 6
++#define ACPI_LPS0_MS_ENTRY 7
++#define ACPI_LPS0_MS_EXIT 8
+
+ /* AMD */
+ #define ACPI_LPS0_DSM_UUID_AMD "e3f32452-febc-43ce-9039-932122d37721"
++#define ACPI_LPS0_ENTRY_AMD 2
++#define ACPI_LPS0_EXIT_AMD 3
+ #define ACPI_LPS0_SCREEN_OFF_AMD 4
+ #define ACPI_LPS0_SCREEN_ON_AMD 5
+
+ static acpi_handle lps0_device_handle;
+ static guid_t lps0_dsm_guid;
+-static char lps0_dsm_func_mask;
++static int lps0_dsm_func_mask;
++
++static guid_t lps0_dsm_guid_microsoft;
++static int lps0_dsm_func_mask_microsoft;
+
+ /* Device constraint entry structure */
+ struct lpi_device_info {
+@@ -68,15 +78,7 @@ struct lpi_constraints {
+ int min_dstate;
+ };
+
+-/* AMD */
+-/* Device constraint entry structure */
+-struct lpi_device_info_amd {
+- int revision;
+- int count;
+- union acpi_object *package;
+-};
+-
+-/* Constraint package structure */
++/* AMD Constraint package structure */
+ struct lpi_device_constraint_amd {
+ char *name;
+ int enabled;
+@@ -94,15 +96,15 @@ static void lpi_device_get_constraints_amd(void)
+ int i, j, k;
+
+ out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
+- 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
++ rev_id, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
+ NULL, ACPI_TYPE_PACKAGE);
+
+- if (!out_obj)
+- return;
+-
+ acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
+ out_obj ? "successful" : "failed");
+
++ if (!out_obj)
++ return;
++
+ for (i = 0; i < out_obj->package.count; i++) {
+ union acpi_object *package = &out_obj->package.elements[i];
+
+@@ -315,14 +317,15 @@ static void lpi_check_constraints(void)
+ }
+ }
+
+-static void acpi_sleep_run_lps0_dsm(unsigned int func)
++static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, guid_t dsm_guid)
+ {
+ union acpi_object *out_obj;
+
+- if (!(lps0_dsm_func_mask & (1 << func)))
++ if (!(func_mask & (1 << func)))
+ return;
+
+- out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, rev_id, func, NULL);
++ out_obj = acpi_evaluate_dsm(lps0_device_handle, &dsm_guid,
++ rev_id, func, NULL);
+ ACPI_FREE(out_obj);
+
+ acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
+@@ -334,11 +337,33 @@ static bool acpi_s2idle_vendor_amd(void)
+ return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
+ }
+
++static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *dsm_guid)
++{
++ union acpi_object *obj;
++ int ret = -EINVAL;
++
++ guid_parse(uuid, dsm_guid);
++ obj = acpi_evaluate_dsm(handle, dsm_guid, rev, 0, NULL);
++
++ /* Check if the _DSM is present and as expected. */
++ if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length == 0 ||
++ obj->buffer.length > sizeof(u32)) {
++ acpi_handle_debug(handle,
++ "_DSM UUID %s rev %d function 0 evaluation failed\n", uuid, rev);
++ goto out;
++ }
++
++ ret = *(int *)obj->buffer.pointer;
++ acpi_handle_debug(handle, "_DSM UUID %s rev %d function mask: 0x%x\n", uuid, rev, ret);
++
++out:
++ ACPI_FREE(obj);
++ return ret;
++}
++
+ static int lps0_device_attach(struct acpi_device *adev,
+ const struct acpi_device_id *not_used)
+ {
+- union acpi_object *out_obj;
+-
+ if (lps0_device_handle)
+ return 0;
+
+@@ -346,28 +371,36 @@ static int lps0_device_attach(struct acpi_device *adev,
+ return 0;
+
+ if (acpi_s2idle_vendor_amd()) {
+- guid_parse(ACPI_LPS0_DSM_UUID_AMD, &lps0_dsm_guid);
+- out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 0, 0, NULL);
++ /* AMD0004, AMDI0005:
++ * - Should use rev_id 0x0
++ * - function mask > 0x3: Should use AMD method, but has off by one bug
++ * - function mask = 0x3: Should use Microsoft method
++ * AMDI0006:
++ * - should use rev_id 0x0
++ * - function mask = 0x3: Should use Microsoft method
++ */
++ const char *hid = acpi_device_hid(adev);
+ rev_id = 0;
++ lps0_dsm_func_mask = validate_dsm(adev->handle,
++ ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
++ lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
++ ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
++ &lps0_dsm_guid_microsoft);
++ if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
++ !strcmp(hid, "AMDI0005"))) {
++ lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
++ acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
++ ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
++ }
+ } else {
+- guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
+- out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
+ rev_id = 1;
++ lps0_dsm_func_mask = validate_dsm(adev->handle,
++ ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
++ lps0_dsm_func_mask_microsoft = -EINVAL;
+ }
+
+- /* Check if the _DSM is present and as expected. */
+- if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
+- acpi_handle_debug(adev->handle,
+- "_DSM function 0 evaluation failed\n");
+- return 0;
+- }
+-
+- lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
+-
+- ACPI_FREE(out_obj);
+-
+- acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
+- lps0_dsm_func_mask);
++ if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
++ return 0; //function evaluation failed
+
+ lps0_device_handle = adev->handle;
+
+@@ -384,11 +417,15 @@ static int lps0_device_attach(struct acpi_device *adev,
+ mem_sleep_current = PM_SUSPEND_TO_IDLE;
+
+ /*
+- * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
+- * EC GPE to be enabled while suspended for certain wakeup devices to
+- * work, so mark it as wakeup-capable.
++ * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
++ * use intel-hid or intel-vbtn but require the EC GPE to be enabled while
++ * suspended for certain wakeup devices to work, so mark it as wakeup-capable.
++ *
++ * Only enable on !AMD as enabling this universally causes problems for a number
++ * of AMD based systems.
+ */
+- acpi_ec_mark_gpe_for_wake();
++ if (!acpi_s2idle_vendor_amd())
++ acpi_ec_mark_gpe_for_wake();
+
+ return 0;
+ }
+@@ -406,11 +443,23 @@ int acpi_s2idle_prepare_late(void)
+ if (pm_debug_messages_on)
+ lpi_check_constraints();
+
+- if (acpi_s2idle_vendor_amd()) {
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
++ if (lps0_dsm_func_mask_microsoft > 0) {
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ } else if (acpi_s2idle_vendor_amd()) {
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD,
++ lps0_dsm_func_mask, lps0_dsm_guid);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
++ lps0_dsm_func_mask, lps0_dsm_guid);
+ } else {
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
++ lps0_dsm_func_mask, lps0_dsm_guid);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
++ lps0_dsm_func_mask, lps0_dsm_guid);
+ }
+
+ return 0;
+@@ -421,11 +470,23 @@ void acpi_s2idle_restore_early(void)
+ if (!lps0_device_handle || sleep_no_lps0)
+ return;
+
+- if (acpi_s2idle_vendor_amd()) {
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
++ if (lps0_dsm_func_mask_microsoft > 0) {
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ } else if (acpi_s2idle_vendor_amd()) {
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD,
++ lps0_dsm_func_mask, lps0_dsm_guid);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD,
++ lps0_dsm_func_mask, lps0_dsm_guid);
+ } else {
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
++ lps0_dsm_func_mask, lps0_dsm_guid);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
++ lps0_dsm_func_mask, lps0_dsm_guid);
+ }
+ }
+
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index bdc1ba00aee9..f22f23933063 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -135,3 +135,28 @@ bool acpi_device_always_present(struct acpi_device *adev)
+
+ return ret;
+ }
++
++/*
++ * AMD systems from Renoir and Lucienne *require* that the NVME controller
++ * is put into D3 over a Modern Standby / suspend-to-idle cycle.
++ *
++ * This is "typically" accomplished using the `StorageD3Enable`
++ * property in the _DSD that is checked via the `acpi_storage_d3` function
++ * but this property was introduced after many of these systems launched
++ * and most OEM systems don't have it in their BIOS.
++ *
++ * The Microsoft documentation for StorageD3Enable mentioned that Windows has
++ * a hardcoded allowlist for D3 support, which was used for these platforms.
++ *
++ * This allows quirking on Linux in a similar fashion.
++ */
++static const struct x86_cpu_id storage_d3_cpu_ids[] = {
++ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
++ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
++ {}
++};
++
++bool force_storage_d3(void)
++{
++ return x86_match_cpu(storage_d3_cpu_ids);
++}
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index a29b170701fc..8fbc4c87a0d8 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2828,54 +2828,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ return 0;
+ }
+
+-#ifdef CONFIG_ACPI
+-static bool nvme_acpi_storage_d3(struct pci_dev *dev)
+-{
+- struct acpi_device *adev;
+- struct pci_dev *root;
+- acpi_handle handle;
+- acpi_status status;
+- u8 val;
+-
+- /*
+- * Look for _DSD property specifying that the storage device on the port
+- * must use D3 to support deep platform power savings during
+- * suspend-to-idle.
+- */
+- root = pcie_find_root_port(dev);
+- if (!root)
+- return false;
+-
+- adev = ACPI_COMPANION(&root->dev);
+- if (!adev)
+- return false;
+-
+- /*
+- * The property is defined in the PXSX device for South complex ports
+- * and in the PEGP device for North complex ports.
+- */
+- status = acpi_get_handle(adev->handle, "PXSX", &handle);
+- if (ACPI_FAILURE(status)) {
+- status = acpi_get_handle(adev->handle, "PEGP", &handle);
+- if (ACPI_FAILURE(status))
+- return false;
+- }
+-
+- if (acpi_bus_get_device(handle, &adev))
+- return false;
+-
+- if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
+- &val))
+- return false;
+- return val == 1;
+-}
+-#else
+-static inline bool nvme_acpi_storage_d3(struct pci_dev *dev)
+-{
+- return false;
+-}
+-#endif /* CONFIG_ACPI */
+-
+ static void nvme_async_probe(void *data, async_cookie_t cookie)
+ {
+ struct nvme_dev *dev = data;
+@@ -2925,7 +2877,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ quirks |= check_vendor_combination_bug(pdev);
+
+- if (!noacpi && nvme_acpi_storage_d3(pdev)) {
++ if (!noacpi && acpi_storage_d3(&pdev->dev)) {
+ /*
+ * Some systems use a bios work around to ask for D3 on
+ * platforms that support kernel managed suspend.
+diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
+index b9da58ee9b1e..d2f9a62e1166 100644
+--- a/drivers/platform/x86/amd-pmc.c
++++ b/drivers/platform/x86/amd-pmc.c
+@@ -46,34 +46,79 @@
+ #define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
+ #define AMD_PMC_RESULT_FAILED 0xFF
+
++/* FCH SSC Registers */
++#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
++#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
++#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
++#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
++#define FCH_SSC_MAPPING_SIZE 0x800
++#define FCH_BASE_PHY_ADDR_LOW 0xFED81100
++#define FCH_BASE_PHY_ADDR_HIGH 0x00000000
++
++/* SMU Message Definations */
++#define SMU_MSG_GETSMUVERSION 0x02
++#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
++#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
++#define SMU_MSG_LOG_START 0x06
++#define SMU_MSG_LOG_RESET 0x07
++#define SMU_MSG_LOG_DUMP_DATA 0x08
++#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
+ /* List of supported CPU ids */
+ #define AMD_CPU_ID_RV 0x15D0
+ #define AMD_CPU_ID_RN 0x1630
+ #define AMD_CPU_ID_PCO AMD_CPU_ID_RV
+ #define AMD_CPU_ID_CZN AMD_CPU_ID_RN
++#define AMD_CPU_ID_YC 0x14B5
+
+-#define AMD_SMU_FW_VERSION 0x0
+ #define PMC_MSG_DELAY_MIN_US 100
+ #define RESPONSE_REGISTER_LOOP_MAX 200
+
++#define SOC_SUBSYSTEM_IP_MAX 12
++#define DELAY_MIN_US 2000
++#define DELAY_MAX_US 3000
+ enum amd_pmc_def {
+ MSG_TEST = 0x01,
+ MSG_OS_HINT_PCO,
+ MSG_OS_HINT_RN,
+ };
+
++struct amd_pmc_bit_map {
++ const char *name;
++ u32 bit_mask;
++};
++
++static const struct amd_pmc_bit_map soc15_ip_blk[] = {
++ {"DISPLAY", BIT(0)},
++ {"CPU", BIT(1)},
++ {"GFX", BIT(2)},
++ {"VDD", BIT(3)},
++ {"ACP", BIT(4)},
++ {"VCN", BIT(5)},
++ {"ISP", BIT(6)},
++ {"NBIO", BIT(7)},
++ {"DF", BIT(8)},
++ {"USB0", BIT(9)},
++ {"USB1", BIT(10)},
++ {"LAPIC", BIT(11)},
++ {}
++};
++
+ struct amd_pmc_dev {
+ void __iomem *regbase;
+- void __iomem *smu_base;
++ void __iomem *smu_virt_addr;
++ void __iomem *fch_virt_addr;
+ u32 base_addr;
+ u32 cpu_id;
++ u32 active_ips;
+ struct device *dev;
++ struct mutex lock; /* generic mutex lock */
+ #if IS_ENABLED(CONFIG_DEBUG_FS)
+ struct dentry *dbgfs_dir;
+ #endif /* CONFIG_DEBUG_FS */
+ };
+
+ static struct amd_pmc_dev pmc;
++static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret);
+
+ static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
+ {
+@@ -85,18 +130,76 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
+ iowrite32(val, dev->regbase + reg_offset);
+ }
+
++struct smu_metrics {
++ u32 table_version;
++ u32 hint_count;
++ u32 s0i3_cyclecount;
++ u32 timein_s0i2;
++ u64 timeentering_s0i3_lastcapture;
++ u64 timeentering_s0i3_totaltime;
++ u64 timeto_resume_to_os_lastcapture;
++ u64 timeto_resume_to_os_totaltime;
++ u64 timein_s0i3_lastcapture;
++ u64 timein_s0i3_totaltime;
++ u64 timein_swdrips_lastcapture;
++ u64 timein_swdrips_totaltime;
++ u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX];
++ u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
++} __packed;
++
+ #ifdef CONFIG_DEBUG_FS
+ static int smu_fw_info_show(struct seq_file *s, void *unused)
+ {
+ struct amd_pmc_dev *dev = s->private;
+- u32 value;
++ struct smu_metrics table;
++ int idx;
++
++ if (dev->cpu_id == AMD_CPU_ID_PCO)
++ return -EINVAL;
++
++ memcpy_fromio(&table, dev->smu_virt_addr, sizeof(struct smu_metrics));
++
++ seq_puts(s, "\n=== SMU Statistics ===\n");
++ seq_printf(s, "Table Version: %d\n", table.table_version);
++ seq_printf(s, "Hint Count: %d\n", table.hint_count);
++ seq_printf(s, "S0i3 Cycle Count: %d\n", table.s0i3_cyclecount);
++ seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
++ seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
++
++ seq_puts(s, "\n=== Active time (in us) ===\n");
++ for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) {
++ if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
++ seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
++ table.timecondition_notmet_lastcapture[idx]);
++ }
+
+- value = ioread32(dev->smu_base + AMD_SMU_FW_VERSION);
+- seq_printf(s, "SMU FW Info: %x\n", value);
+ return 0;
+ }
+ DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
+
++static int s0ix_stats_show(struct seq_file *s, void *unused)
++{
++ struct amd_pmc_dev *dev = s->private;
++ u64 entry_time, exit_time, residency;
++
++ entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
++ entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
++
++ exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
++ exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
++
++ /* It's in 48MHz. We need to convert it */
++ residency = (exit_time - entry_time) / 48;
++
++ seq_puts(s, "=== S0ix statistics ===\n");
++ seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
++ seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
++ seq_printf(s, "Residency Time: %lld\n", residency);
++
++ return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
++
+ static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
+ {
+ debugfs_remove_recursive(dev->dbgfs_dir);
+@@ -107,6 +210,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
+ dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
+ debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
+ &smu_fw_info_fops);
++ debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
++ &s0ix_stats_fops);
+ }
+ #else
+ static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
+@@ -118,6 +223,32 @@ static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
+ }
+ #endif /* CONFIG_DEBUG_FS */
+
++static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
++{
++ u32 phys_addr_low, phys_addr_hi;
++ u64 smu_phys_addr;
++
++ if (dev->cpu_id == AMD_CPU_ID_PCO)
++ return -EINVAL;
++
++ /* Get Active devices list from SMU */
++ amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1);
++
++ /* Get dram address */
++ amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1);
++ amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1);
++ smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
++
++ dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, sizeof(struct smu_metrics));
++ if (!dev->smu_virt_addr)
++ return -ENOMEM;
++
++ /* Start the logging */
++ amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0);
++
++ return 0;
++}
++
+ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
+ {
+ u32 value;
+@@ -132,15 +263,15 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
+ dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
+ }
+
+-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
++static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret)
+ {
+ int rc;
+- u8 msg;
+ u32 val;
+
++ mutex_lock(&dev->lock);
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
+- val, val > 0, PMC_MSG_DELAY_MIN_US,
++ val, val != 0, PMC_MSG_DELAY_MIN_US,
+ PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "failed to talk to SMU\n");
+@@ -154,21 +285,74 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
+ amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, set);
+
+ /* Write message ID to message ID register */
+- msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO;
+ amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg);
+- return 0;
++
++ /* Wait until we get a valid response */
++ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
++ val, val != 0, PMC_MSG_DELAY_MIN_US,
++ PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
++ if (rc) {
++ dev_err(dev->dev, "SMU response timed out\n");
++ goto out_unlock;
++ }
++
++ switch (val) {
++ case AMD_PMC_RESULT_OK:
++ if (ret) {
++ /* PMFW may take longer time to return back the data */
++ usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
++ *data = amd_pmc_reg_read(dev, AMD_PMC_REGISTER_ARGUMENT);
++ }
++ break;
++ case AMD_PMC_RESULT_CMD_REJECT_BUSY:
++ dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
++ rc = -EBUSY;
++ goto out_unlock;
++ case AMD_PMC_RESULT_CMD_UNKNOWN:
++ dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
++ rc = -EINVAL;
++ goto out_unlock;
++ case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
++ case AMD_PMC_RESULT_FAILED:
++ default:
++ dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
++ rc = -EIO;
++ goto out_unlock;
++ }
++
++out_unlock:
++ mutex_unlock(&dev->lock);
++ amd_pmc_dump_registers(dev);
++ return rc;
++}
++
++static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
++{
++ switch (dev->cpu_id) {
++ case AMD_CPU_ID_PCO:
++ return MSG_OS_HINT_PCO;
++ case AMD_CPU_ID_RN:
++ case AMD_CPU_ID_YC:
++ return MSG_OS_HINT_RN;
++ }
++ return -EINVAL;
+ }
+
+ static int __maybe_unused amd_pmc_suspend(struct device *dev)
+ {
+ struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ int rc;
++ u8 msg;
+
+- rc = amd_pmc_send_cmd(pdev, 1);
++ /* Reset and Start SMU logging - to monitor the s0i3 stats */
++ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0);
++ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0);
++
++ msg = amd_pmc_get_os_hint(pdev);
++ rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0);
+ if (rc)
+ dev_err(pdev->dev, "suspend failed\n");
+
+- amd_pmc_dump_registers(pdev);
+ return 0;
+ }
+
+@@ -176,12 +360,16 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
+ {
+ struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ int rc;
++ u8 msg;
+
+- rc = amd_pmc_send_cmd(pdev, 0);
++ /* Let SMU know that we are looking for stats */
++ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
++
++ msg = amd_pmc_get_os_hint(pdev);
++ rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
+ if (rc)
+ dev_err(pdev->dev, "resume failed\n");
+
+- amd_pmc_dump_registers(pdev);
+ return 0;
+ }
+
+@@ -190,6 +378,7 @@ static const struct dev_pm_ops amd_pmc_pm_ops = {
+ };
+
+ static const struct pci_device_id pmc_pci_ids[] = {
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
+@@ -201,9 +390,8 @@ static int amd_pmc_probe(struct platform_device *pdev)
+ {
+ struct amd_pmc_dev *dev = &pmc;
+ struct pci_dev *rdev;
+- u32 base_addr_lo;
+- u32 base_addr_hi;
+- u64 base_addr;
++ u32 base_addr_lo, base_addr_hi;
++ u64 base_addr, fch_phys_addr;
+ int err;
+ u32 val;
+
+@@ -248,16 +436,25 @@ static int amd_pmc_probe(struct platform_device *pdev)
+ pci_dev_put(rdev);
+ base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+
+- dev->smu_base = devm_ioremap(dev->dev, base_addr, AMD_PMC_MAPPING_SIZE);
+- if (!dev->smu_base)
+- return -ENOMEM;
+-
+ dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
+ AMD_PMC_MAPPING_SIZE);
+ if (!dev->regbase)
+ return -ENOMEM;
+
+- amd_pmc_dump_registers(dev);
++ mutex_init(&dev->lock);
++
++ /* Use FCH registers to get the S0ix stats */
++ base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
++ base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
++ fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
++ dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
++ if (!dev->fch_virt_addr)
++ return -ENOMEM;
++
++ /* Use SMU to get the s0i3 debug stats */
++ err = amd_pmc_setup_smu_logging(dev);
++ if (err)
++ dev_err(dev->dev, "SMU debugging info not supported on this platform\n");
+
+ platform_set_drvdata(pdev, dev);
+ amd_pmc_dbgfs_register(dev);
+@@ -269,11 +466,14 @@ static int amd_pmc_remove(struct platform_device *pdev)
+ struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
+
+ amd_pmc_dbgfs_unregister(dev);
++ mutex_destroy(&dev->lock);
+ return 0;
+ }
+
+ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
+ {"AMDI0005", 0},
++ {"AMDI0006", 0},
++ {"AMDI0007", 0},
+ {"AMD0004", 0},
+ { }
+ };
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index c60745f657e9..dd0dafd21e33 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -1004,6 +1004,7 @@ int acpi_dev_resume(struct device *dev);
+ int acpi_subsys_runtime_suspend(struct device *dev);
+ int acpi_subsys_runtime_resume(struct device *dev);
+ int acpi_dev_pm_attach(struct device *dev, bool power_on);
++bool acpi_storage_d3(struct device *dev);
+ #else
+ static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
+ static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
+@@ -1011,6 +1012,10 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
+ {
+ return 0;
+ }
++static inline bool acpi_storage_d3(struct device *dev)
++{
++ return false;
++}
+ #endif
+
+ #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
+--
+2.32.0
+