summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authordragonn2021-07-20 20:33:28 +0200
committerdragonn2021-07-20 20:33:28 +0200
commitb59bf3acd35fd937c8cc162243b70724e643294f (patch)
treec42e496dd99a5a83a7279086f4203fe3d9c5bc42
parent3e86e006ef476d294fe1d93ec691a65dd144502c (diff)
downloadaur-b59bf3acd35fd937c8cc162243b70724e643294f.tar.gz
5.13.4
-rw-r--r--.SRCINFO78
-rw-r--r--.gitignore7
-rw-r--r--5.12-acpi-1of2-turn-off-unused.patch88
-rw-r--r--5.12-acpi-2of2-turn-off-unconditionally.patch51
-rw-r--r--PKGBUILD101
-rw-r--r--README4
-rw-r--r--amdgpu-backlight.patch21
-rw-r--r--amdgpu-drm-next.patch138
-rw-r--r--sys-kernel_arch-sources-g14_files-0001-revert-reserve-x86-low-memory.patch357
-rw-r--r--sys-kernel_arch-sources-g14_files-0002-acpi_unused.patch87
-rw-r--r--sys-kernel_arch-sources-g14_files-0003-flow-x13-sound.patch12
-rw-r--r--sys-kernel_arch-sources-g14_files-0004-5.8+--more-uarches-for-kernel.patch682
-rw-r--r--sys-kernel_arch-sources-g14_files-0005-lru-multi-generational.patch3458
-rw-r--r--sys-kernel_arch-sources-g14_files-0006-ACPI-PM-s2idle-Add-missing-LPS0-functions.patch51
-rw-r--r--sys-kernel_arch-sources-g14_files-0007-ACPI-processor-idle-Fix-up-C-state-latency.patch108
-rw-r--r--sys-kernel_arch-sources-g14_files-0008-NVMe-set-some-AMD-PCIe-downstream-storage-device-to-D3-for-s2idle.patch71
-rw-r--r--sys-kernel_arch-sources-g14_files-0009-PCI-quirks-Quirk-PCI-d3hot-delay.patch30
-rw-r--r--sys-kernel_arch-sources-g14_files-0010-platform-x86-force-LPS0-functions-for-AMD.patch41
-rw-r--r--sys-kernel_arch-sources-g14_files-0011-USB-pci-quirks-disable-D3cold-on-s2idle-Renoire.patch68
-rw-r--r--sys-kernel_arch-sources-g14_files-0014-acpi_unused-v2.patch209
-rw-r--r--sys-kernel_arch-sources-g14_files-0015-revert-4cbbe34807938e6e494e535a68d5ff64edac3f20.patch40
-rw-r--r--sys-kernel_arch-sources-g14_files-0016-revert-1c0b0efd148d5b24c4932ddb3fa03c8edd6097b3.patch40
-rw-r--r--sys-kernel_arch-sources-g14_files-0017-5.14-ACPI-processor-idle-Fix-up-C-state-latency-if-not-ordered.patch108
-rw-r--r--sys-kernel_arch-sources-g14_files-0019-5.14-nvme-pci-look-for-StorageD3Enable-on-companion-ACPI-device.patch74
-rw-r--r--sys-kernel_arch-sources-g14_files-0020-5.14-ACPI-Check-StorageD3Enable_DSD-property-in-AHCI-mode.patch136
-rw-r--r--sys-kernel_arch-sources-g14_files-0021-5.14-ACPI-Add-quirks-for-AMD-Renoir+Lucienne-CPUs-to-force-the-D3-hint.patch122
-rw-r--r--sys-kernel_arch-sources-g14_files-0022-5.14-ACPI-PM-s2idle-Add-missing-LPS0-functions-for-AMD.patch51
-rw-r--r--sys-kernel_arch-sources-g14_files-0023-5.14-1of5-ACPI-PM-s2idle-Use-correct-revision-id.patch144
-rw-r--r--sys-kernel_arch-sources-g14_files-0024-5.14-2of5-ACPI-PM-s2idle-Refactor-common-code.patch258
-rw-r--r--sys-kernel_arch-sources-g14_files-0025-5.14-3of5-ACPI-PM-s2idle-Add-support-for-multiple-func-mask.patch196
-rw-r--r--sys-kernel_arch-sources-g14_files-0026-5.14-4of5-ACPI-PM-s2idle-Add-support-for-new-Microsoft-UUID.patch223
-rw-r--r--sys-kernel_arch-sources-g14_files-0027-5.14-5of5-ACPI-PM-s2idle-Adjust-behavior-for-field-problems-on-AMD-systems.patch168
-rw-r--r--sys-kernel_arch-sources-g14_files-0028-platform-x86-amd-pmc-Fix-command-completion-code.patch52
-rw-r--r--sys-kernel_arch-sources-g14_files-0029-platform-x86-amd-pmc-Fix-SMU-firmware-reporting-mechanism.patch89
-rw-r--r--sys-kernel_arch-sources-g14_files-0030-platform-x86-amd-pmc-Add-support-for-logging-SMU-metrics.patch278
-rw-r--r--sys-kernel_arch-sources-g14_files-0031-platform-x86-amd-pmc-Add-support-for-s0ix-counters.patch123
-rw-r--r--sys-kernel_arch-sources-g14_files-0032-platform-x86-amd-pmc-Add-support-for-ACPI-ID-AMDI0006.patch28
-rw-r--r--sys-kernel_arch-sources-g14_files-0033-platform-x86-amd-pmc-Add-new-acpi-for-future-PMC.patch53
-rw-r--r--sys-kernel_arch-sources-g14_files-0034-btusb-mediatek.patch74
-rw-r--r--sys-kernel_arch-sources-g14_files-0035-backport-from-5.14-s0ix-enablement-no-d3hot-2021-06-30.patch1308
-rw-r--r--sys-kernel_arch-sources-g14_files-0036-PCI-quirks-Quirk-PCI-d3hot-delay-for-AMD-xhci.patch (renamed from sys-kernel_arch-sources-g14_files-0018-PCI-quirks-Quirk-PCI-d3hot-delay-for-AMD-xhci.patch)0
-rw-r--r--sys-kernel_arch-sources-g14_files-0037-ACPI-PM-Only-mark-EC-GPE-for-wakeup-on-Intel-systems.patch171
-rw-r--r--sys-kernel_arch-sources-g14_files-0037-platform-x86-amd-pmc-Use-return-code-on-suspend.patch135
-rw-r--r--sys-kernel_arch-sources-g14_files-0039-asus-wmi-Add-panel-overdrive-functionality.patch166
-rw-r--r--sys-kernel_arch-sources-g14_files-0040-asus-wmi-Add-dgpu-disable-method.patch182
-rw-r--r--sys-kernel_arch-sources-g14_files-0041-asus-wmi-Add-egpu-enable-method.patch169
-rw-r--r--sys-kernel_arch-sources-g14_files-0042-HID-asus-Remove-check-for-same-LED-brightness-on-set.patch30
-rw-r--r--sys-kernel_arch-sources-g14_files-0043-ALSA-hda-realtek-Fix-speakers-not-working-on-Asus-Fl.patch25
-rw-r--r--sys-kernel_arch-sources-g14_files-8001-x86-amd_nb-Add-AMD-family-19h-model-50h-PCI-ids.patch59
-rw-r--r--sys-kernel_arch-sources-g14_files-8002-hwmon-k10temp-support-Zen3-APUs.patch37
-rw-r--r--sys-kernel_arch-sources-g14_files-8011-Bluetooth-btusb-Add-support-for-Lite-On-Mediatek-Chi.patch74
-rw-r--r--sys-kernel_arch-sources-g14_files-8012-mt76-mt7921-continue-to-probe-driver-when-fw-already.patch41
-rw-r--r--sys-kernel_arch-sources-g14_files-8013-mt76-mt7921-Fix-out-of-order-process-by-invalid-even.patch41
-rw-r--r--sys-kernel_arch-sources-g14_files-8014-mt76-mt7921-Add-mt7922-support.patch221
-rw-r--r--sys-kernel_arch-sources-g14_files-9001-v5.13.2-s0ix-patch-2021-07-14.patch1164
-rw-r--r--sys-kernel_arch-sources-g14_files_0001-HID-asus-Filter-keyboard-EC-for-old-ROG-keyboard.patch32
56 files changed, 6293 insertions, 5481 deletions
diff --git a/.SRCINFO b/.SRCINFO
index a9ff4b60ddb4..688c738db8a0 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,7 +1,7 @@
pkgbase = linux-g14
pkgdesc = Linux
- pkgver = 5.12.14.arch1
- pkgrel = 2
+ pkgver = 5.13.4.arch1
+ pkgrel = 1
url = https://lab.retarded.farm/zappel/asus-rog-zephyrus-g14/
arch = x86_64
license = GPL2
@@ -16,65 +16,45 @@ pkgbase = linux-g14
makedepends = git
makedepends = gcc>=11.0
options = !strip
- source = archlinux-linux::git+https://github.com/archlinux/linux?signed#tag=v5.12.14-arch1
+ source = archlinux-linux::git+https://github.com/archlinux/linux?signed#tag=v5.13.4-arch1
source = config
source = choose-gcc-optimization.sh
- source = sys-kernel_arch-sources-g14_files-0001-revert-reserve-x86-low-memory.patch
- source = sys-kernel_arch-sources-g14_files-0003-flow-x13-sound.patch
source = sys-kernel_arch-sources-g14_files-0004-5.8+--more-uarches-for-kernel.patch::https://raw.githubusercontent.com/graysky2/kernel_compiler_patch/a8d200f422f4b2abeaa6cfcfa37136b308e6e33e/more-uarches-for-kernel-5.8%2B.patch
source = sys-kernel_arch-sources-g14_files-0005-lru-multi-generational.patch
- source = https://gitlab.com/asus-linux/fedora-kernel/-/archive/91f97d88231152006764d3c50cc52ddbb508529f/fedora-kernel-91f97d88231152006764d3c50cc52ddbb508529f.zip
- source = sys-kernel_arch-sources-g14_files-0012-acpi-1of2-turn-off-unused.patch::https://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git/patch/?id=4b9ee772eaa82188b0eb8e05bdd1707c2a992004
- source = sys-kernel_arch-sources-g14_files-0014-acpi_unused-v2.patch
- source = sys-kernel_arch-sources-g14_files-0017-5.14-ACPI-processor-idle-Fix-up-C-state-latency-if-not-ordered.patch
- source = sys-kernel_arch-sources-g14_files-0018-PCI-quirks-Quirk-PCI-d3hot-delay-for-AMD-xhci.patch
- source = sys-kernel_arch-sources-g14_files-0019-5.14-nvme-pci-look-for-StorageD3Enable-on-companion-ACPI-device.patch
- source = sys-kernel_arch-sources-g14_files-0020-5.14-ACPI-Check-StorageD3Enable_DSD-property-in-AHCI-mode.patch
- source = sys-kernel_arch-sources-g14_files-0021-5.14-ACPI-Add-quirks-for-AMD-Renoir+Lucienne-CPUs-to-force-the-D3-hint.patch
- source = sys-kernel_arch-sources-g14_files-0022-5.14-ACPI-PM-s2idle-Add-missing-LPS0-functions-for-AMD.patch
- source = sys-kernel_arch-sources-g14_files-0023-5.14-1of5-ACPI-PM-s2idle-Use-correct-revision-id.patch
- source = sys-kernel_arch-sources-g14_files-0024-5.14-2of5-ACPI-PM-s2idle-Refactor-common-code.patch
- source = sys-kernel_arch-sources-g14_files-0025-5.14-3of5-ACPI-PM-s2idle-Add-support-for-multiple-func-mask.patch
- source = sys-kernel_arch-sources-g14_files-0026-5.14-4of5-ACPI-PM-s2idle-Add-support-for-new-Microsoft-UUID.patch
- source = sys-kernel_arch-sources-g14_files-0027-5.14-5of5-ACPI-PM-s2idle-Adjust-behavior-for-field-problems-on-AMD-systems.patch
- source = sys-kernel_arch-sources-g14_files-0028-platform-x86-amd-pmc-Fix-command-completion-code.patch
- source = sys-kernel_arch-sources-g14_files-0029-platform-x86-amd-pmc-Fix-SMU-firmware-reporting-mechanism.patch
- source = sys-kernel_arch-sources-g14_files-0030-platform-x86-amd-pmc-Add-support-for-logging-SMU-metrics.patch
- source = sys-kernel_arch-sources-g14_files-0031-platform-x86-amd-pmc-Add-support-for-s0ix-counters.patch
- source = sys-kernel_arch-sources-g14_files-0032-platform-x86-amd-pmc-Add-support-for-ACPI-ID-AMDI0006.patch
- source = sys-kernel_arch-sources-g14_files-0033-platform-x86-amd-pmc-Add-new-acpi-for-future-PMC.patch
source = sys-kernel_arch-sources-g14_files-0034-btusb-mediatek.patch
+ source = sys-kernel_arch-sources-g14_files-0039-asus-wmi-Add-panel-overdrive-functionality.patch
+ source = sys-kernel_arch-sources-g14_files-0040-asus-wmi-Add-dgpu-disable-method.patch
+ source = sys-kernel_arch-sources-g14_files-0041-asus-wmi-Add-egpu-enable-method.patch
+ source = sys-kernel_arch-sources-g14_files-0042-HID-asus-Remove-check-for-same-LED-brightness-on-set.patch
+ source = sys-kernel_arch-sources-g14_files-0043-ALSA-hda-realtek-Fix-speakers-not-working-on-Asus-Fl.patch
+ source = sys-kernel_arch-sources-g14_files-8001-x86-amd_nb-Add-AMD-family-19h-model-50h-PCI-ids.patch
+ source = sys-kernel_arch-sources-g14_files-8002-hwmon-k10temp-support-Zen3-APUs.patch
+ source = sys-kernel_arch-sources-g14_files-8011-Bluetooth-btusb-Add-support-for-Lite-On-Mediatek-Chi.patch
+ source = sys-kernel_arch-sources-g14_files-8012-mt76-mt7921-continue-to-probe-driver-when-fw-already.patch
+ source = sys-kernel_arch-sources-g14_files-8013-mt76-mt7921-Fix-out-of-order-process-by-invalid-even.patch
+ source = sys-kernel_arch-sources-g14_files-8014-mt76-mt7921-Add-mt7922-support.patch
+ source = sys-kernel_arch-sources-g14_files-9001-v5.13.2-s0ix-patch-2021-07-14.patch
validpgpkeys = ABAF11C65A2970B130ABE3C479BE3E4300411886
validpgpkeys = 647F28654894E3BD457199BE38DBBDC86092693E
validpgpkeys = A2FF3A36AAA56654109064AB19802F8B0D70FC30
sha256sums = SKIP
sha256sums = 761427cf8dbeed10fd3149023bc83d0a2319e70a5cbfdcdda50e7a49e8d2b198
sha256sums = 1ac18cad2578df4a70f9346f7c6fccbb62f042a0ee0594817fdef9f2704904ee
- sha256sums = 05f47255831028b9e3a49e335323169d0156201d5e9b2bf8e09093440ab9e56e
- sha256sums = 4a9e44dfbc7e9574ae86cf53a896b6c67f03b224e90e18982dfb0e4ba02a6c1b
sha256sums = fa6cee9527d8e963d3398085d1862edc509a52e4540baec463edb8a9dd95bee0
- sha256sums = b9e4b11f6ca413fa7fcd1d810215bf3a36e69eedc4570f4209f7c1957083b2f3
- sha256sums = f94b12f56e99ebfc87014f9570a987bca7b50400c412ddbbb7035d73c5d8c668
- sha256sums = 5af4796400245fec2e84d6e3f847b8896600558aa85f5e9c4706dd50994a9802
- sha256sums = 4c1e9ec4402161ac93bb88595840ea5c5ac0c2cb75d06b01170a3ee4fc1f8907
- sha256sums = b4a563ef30f86b9af0932c00bb3422b95eedbda1ff40a1a725c22a0ae9ab7084
- sha256sums = dab4db308ede1aa35166f31671572eeccf0e7637b3218ce3ae519c2705934f79
- sha256sums = 9e83c46bed9059ba78df6c17a2f7c80a1cdb6efbdf64ec643f68573ede891b95
- sha256sums = 6c5538dc21a139a4475af6c1acc5d2761923173992568f7c159db971ff3167cd
- sha256sums = 84119c2d2beb6d7dc56389f2d1be8052b4fd23022e15edd86ee59130adcd9ab7
- sha256sums = 478e908f89ae413c650116681710aed3e974384a2ed5e97be3755189688e5415
- sha256sums = 1c58e4fd62cb7034e4fe834b55ffd8e183926571d4056b150bab5725f0ac5e29
- sha256sums = 50f6e6a3371eaedd3b649a25c5044e6359853c2e3836a6af683a906abb973fba
- sha256sums = 23ada5c29c415c0bb8d14cff213c697c649b438d7427a67a15b0b3f65c66aa6f
- sha256sums = 9ea5d38eea3809e85c6f3165f4b410ee53f0fdb813cbbf229e18a87e79c13ad5
- sha256sums = d6113df716cb81f78abc58405648d90f8497e29d79f5fd403eda36af867b50f3
- sha256sums = bc783b22ab5ab75dc28ae10519a9d6da23d80ee291812115555945acd280edc5
- sha256sums = dce87ca35886d075554fe6d8831075237d80526e078431165d2ec0d1a9630c7b
- sha256sums = ad9f485bb262bb1156da57698ccab5a6b8d8ca34b6ae8a185dcd014a34c69557
- sha256sums = 3e8c51aff84b6f12e6bc61057982befd82415626fe379e83271ddeb1a9628734
- sha256sums = bd975ab32d6490a4231d6ce4fab0343698b28407799bdaec133671e9fd778eb5
- sha256sums = ae66bbed96b5946b5a20d902bc0282c7dd172650812114b24429f40d5ba225bb
- sha256sums = f4185ae572190227161d6f0e7d502138f2aaa60130d8d99b2c44edaefd5e91af
+ sha256sums = 9327ac3edacbc60a023928147f9439789527fad62cef66945f35a9165108e30d
+ sha256sums = 0c515951db1c3dfc847e9b4777249c09be520ac140feb015a39c29e0531a89e6
+ sha256sums = 1ab75535772c63567384eb2ac74753e4d5db2f3317cb265aedf6151b9f18c6c2
+ sha256sums = 8cc771f37ee08ad5796e6db64f180c1415a5f6e03eb3045272dade30ca754b53
+ sha256sums = f3461e7cc759fd4cef2ec5c4fa15b80fa6d37e16008db223f77ed88a65aa938e
+ sha256sums = 96bf4c0fb920a876d7ec1ed25123bab8a0a43db5f363823e83e14707083d8501
+ sha256sums = 32bbcde83406810f41c9ed61206a7596eb43707a912ec9d870fd94f160d247c1
+ sha256sums = ed28a8051514f8c228717a5cdd13191b1c58181e0228d972fbe2af5ee1d013d7
+ sha256sums = de8c9747637768c4356c06aa65c3f157c526aa420f21fdd5edd0ed06f720a62e
+ sha256sums = 67ebf477b2ecbf367ea3fee1568eeb3de59de7185ef5ed66b81ae73108f6693c
+ sha256sums = 13f1c3a15fb1418b4aee0594e1f7871151303ca4f7eaab3c6f2ea21af965d85b
+ sha256sums = 2163cb2e394a013042a40cd3b00dae788603284b20d71e262995366c5534e480
+ sha256sums = a01cf700d79b983807e2285be1b30df6e02db6adfd9c9027fe2dfa8ca5a74bc9
+ sha256sums = e5d1bfe9d309f292d41bb06b98b94df168e0004f6e8ace45b310c6829a803d03
pkgname = linux-g14
pkgdesc = The Linux kernel and modules
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000000000000..0dc162744a9c
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,7 @@
+src
+pkg
+archlinux-linux
+*.tar.xz
+.vscode
+xanmod-rog-PKGBUILD
+linux
diff --git a/5.12-acpi-1of2-turn-off-unused.patch b/5.12-acpi-1of2-turn-off-unused.patch
deleted file mode 100644
index d5488cdd4352..000000000000
--- a/5.12-acpi-1of2-turn-off-unused.patch
+++ /dev/null
@@ -1,88 +0,0 @@
-From 4b9ee772eaa82188b0eb8e05bdd1707c2a992004 Mon Sep 17 00:00:00 2001
-From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
-Date: Thu, 18 Mar 2021 19:25:12 +0100
-Subject: ACPI: scan: Turn off unused power resources during initialization
-
-It is reported that on certain platforms there are power resources
-that are not associated with any devices physically present in the
-platform. Those power resources are expected to be turned off by
-the OS in accordance with the ACPI specification (section 7.3 of
-ACPI 6.4) which currently is not done by Linux and that may lead
-to obscure issues.
-
-For instance, leaving those power resources in the "on" state may
-prevent the platform from reaching the lowest power state in
-suspend-to-idle which leads to excessive power draw.
-
-For this reason, turn all of the unused ACPI power resources off
-at the end of the initial namespace scan for devices in analogy with
-resume from suspend-to-RAM.
-
-Link: https://uefi.org/specs/ACPI/6.4/07_Power_and_Performance_Mgmt/device-power-management-objects.html
-Reported-by: David Box <david.e.box@linux.intel.com>
-Tested-by: Wendy Wang <wendy.wang@intel.com>
-Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
----
- drivers/acpi/internal.h | 1 +
- drivers/acpi/power.c | 2 +-
- drivers/acpi/scan.c | 2 ++
- drivers/acpi/sleep.h | 1 -
- 4 files changed, 4 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
-index e6a5d997241c4..9fcefcdc1dbe0 100644
---- a/drivers/acpi/internal.h
-+++ b/drivers/acpi/internal.h
-@@ -139,6 +139,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
- int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
- int acpi_power_on_resources(struct acpi_device *device, int state);
- int acpi_power_transition(struct acpi_device *device, int state);
-+void acpi_turn_off_unused_power_resources(void);
-
- /* --------------------------------------------------------------------------
- Device Power Management
-diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
-index 9b608b55d2b29..46c38627adddb 100644
---- a/drivers/acpi/power.c
-+++ b/drivers/acpi/power.c
-@@ -996,6 +996,7 @@ void acpi_resume_power_resources(void)
-
- mutex_unlock(&power_resource_list_lock);
- }
-+#endif
-
- void acpi_turn_off_unused_power_resources(void)
- {
-@@ -1025,4 +1026,3 @@ void acpi_turn_off_unused_power_resources(void)
-
- mutex_unlock(&power_resource_list_lock);
- }
--#endif
-diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
-index a184529d8fa40..1584c9e463bdf 100644
---- a/drivers/acpi/scan.c
-+++ b/drivers/acpi/scan.c
-@@ -2360,6 +2360,8 @@ int __init acpi_scan_init(void)
- }
- }
-
-+ acpi_turn_off_unused_power_resources();
-+
- acpi_scan_initialized = true;
-
- out:
-diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
-index 1856f76ac83f7..7fe41ee489d61 100644
---- a/drivers/acpi/sleep.h
-+++ b/drivers/acpi/sleep.h
-@@ -8,7 +8,6 @@ extern struct list_head acpi_wakeup_device_list;
- extern struct mutex acpi_device_lock;
-
- extern void acpi_resume_power_resources(void);
--extern void acpi_turn_off_unused_power_resources(void);
-
- static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
- {
---
-cgit 1.2.3-1.el7
-
diff --git a/5.12-acpi-2of2-turn-off-unconditionally.patch b/5.12-acpi-2of2-turn-off-unconditionally.patch
deleted file mode 100644
index 4098b0ebcade..000000000000
--- a/5.12-acpi-2of2-turn-off-unconditionally.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-From 7e4fdeafa61f2b653fcf9678f09935e55756aed2 Mon Sep 17 00:00:00 2001
-From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
-Date: Thu, 18 Mar 2021 19:28:28 +0100
-Subject: ACPI: power: Turn off unused power resources unconditionally
-
-According to the ACPI specification (section 7.2.2 in ACPI 6.4), the
-OS may evaluate the _OFF method of a power resource that is "off"
-already [1], and in particular that can be done in the case of unused
-power resources.
-
-Accordingly, modify acpi_turn_off_unused_power_resources() to
-evaluate the _OFF method for each of the unused power resources
-unconditionally which may help to work around BIOS issues where the
-return value of _STA for a power resource does not reflect the
-actual state of the power resource [2].
-
-Link: https://uefi.org/specs/ACPI/6.4/07_Power_and_Performance_Mgmt/declaring-a-power-resource-object.html#off # [1]
-Link: https://lore.kernel.org/lkml/20210314000439.3138941-1-luzmaximilian@gmail.com/ # [2]
-Tested-by: Wendy Wang <wendy.wang@intel.com>
-Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
----
- drivers/acpi/power.c | 11 +----------
- 1 file changed, 1 insertion(+), 10 deletions(-)
-
-diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
-index 46c38627adddb..bacae6d178ff5 100644
---- a/drivers/acpi/power.c
-+++ b/drivers/acpi/power.c
-@@ -1005,18 +1005,9 @@ void acpi_turn_off_unused_power_resources(void)
- mutex_lock(&power_resource_list_lock);
-
- list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
-- int result, state;
--
- mutex_lock(&resource->resource_lock);
-
-- result = acpi_power_get_state(resource->device.handle, &state);
-- if (result) {
-- mutex_unlock(&resource->resource_lock);
-- continue;
-- }
--
-- if (state == ACPI_POWER_RESOURCE_STATE_ON
-- && !resource->ref_count) {
-+ if (!resource->ref_count) {
- dev_info(&resource->device.dev, "Turning OFF\n");
- __acpi_power_off(resource);
- }
---
-cgit 1.2.3-1.el7
-
diff --git a/PKGBUILD b/PKGBUILD
index dfb0045d514e..423acf39283f 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -1,9 +1,9 @@
# Maintainer: Jan Alexander Steffens (heftig) <jan.steffens@gmail.com>
pkgbase=linux-g14
-pkgver=5.12.14.arch1
-_tagver=5.12.14.arch1
-pkgrel=2
+pkgver=5.13.4.arch1
+_tagver=5.13.4.arch1
+pkgrel=1
pkgdesc='Linux'
#_srctag=v${pkgver%.*}-${pkgver##*.}
_srctag=v${_tagver%.*}-${_tagver##*.}
@@ -24,40 +24,33 @@ source=(
config # the main kernel config file
"choose-gcc-optimization.sh"
- #"sys-kernel_arch-sources-g14_files-0000-revert-arch1-to-upstream-arch0.patch"
- "sys-kernel_arch-sources-g14_files-0001-revert-reserve-x86-low-memory.patch"
- "sys-kernel_arch-sources-g14_files-0003-flow-x13-sound.patch"
"sys-kernel_arch-sources-g14_files-0004-5.8+--more-uarches-for-kernel.patch"::"https://raw.githubusercontent.com/graysky2/kernel_compiler_patch/a8d200f422f4b2abeaa6cfcfa37136b308e6e33e/more-uarches-for-kernel-5.8%2B.patch"
"sys-kernel_arch-sources-g14_files-0005-lru-multi-generational.patch"
- "https://gitlab.com/asus-linux/fedora-kernel/-/archive/$_fedora_kernel_commit_id/fedora-kernel-$_fedora_kernel_commit_id.zip"
- "sys-kernel_arch-sources-g14_files-0012-acpi-1of2-turn-off-unused.patch"::"https://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git/patch/?id=4b9ee772eaa82188b0eb8e05bdd1707c2a992004"
- #the second patch in this sequence (2of2) was rejected upstream as it causes problems for some machines
- #"sys-kernel_arch-sources-g14_files-0013-acpi-2of2-turn-off-unconditionally.patch"::"https://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git/patch/?id=7e4fdeafa61f2b653fcf9678f09935e55756aed2"
- "sys-kernel_arch-sources-g14_files-0014-acpi_unused-v2.patch"
-
- #"sys-kernel_arch-sources-g14_files-0015-revert-4cbbe34807938e6e494e535a68d5ff64edac3f20.patch"
- #"sys-kernel_arch-sources-g14_files-0016-revert-1c0b0efd148d5b24c4932ddb3fa03c8edd6097b3.patch"
-
- "sys-kernel_arch-sources-g14_files-0017-5.14-ACPI-processor-idle-Fix-up-C-state-latency-if-not-ordered.patch"
- "sys-kernel_arch-sources-g14_files-0018-PCI-quirks-Quirk-PCI-d3hot-delay-for-AMD-xhci.patch"
- "sys-kernel_arch-sources-g14_files-0019-5.14-nvme-pci-look-for-StorageD3Enable-on-companion-ACPI-device.patch"
- "sys-kernel_arch-sources-g14_files-0020-5.14-ACPI-Check-StorageD3Enable_DSD-property-in-AHCI-mode.patch"
- "sys-kernel_arch-sources-g14_files-0021-5.14-ACPI-Add-quirks-for-AMD-Renoir+Lucienne-CPUs-to-force-the-D3-hint.patch"
- "sys-kernel_arch-sources-g14_files-0022-5.14-ACPI-PM-s2idle-Add-missing-LPS0-functions-for-AMD.patch"
- "sys-kernel_arch-sources-g14_files-0023-5.14-1of5-ACPI-PM-s2idle-Use-correct-revision-id.patch"
- "sys-kernel_arch-sources-g14_files-0024-5.14-2of5-ACPI-PM-s2idle-Refactor-common-code.patch"
- "sys-kernel_arch-sources-g14_files-0025-5.14-3of5-ACPI-PM-s2idle-Add-support-for-multiple-func-mask.patch"
- "sys-kernel_arch-sources-g14_files-0026-5.14-4of5-ACPI-PM-s2idle-Add-support-for-new-Microsoft-UUID.patch"
- "sys-kernel_arch-sources-g14_files-0027-5.14-5of5-ACPI-PM-s2idle-Adjust-behavior-for-field-problems-on-AMD-systems.patch"
-
- "sys-kernel_arch-sources-g14_files-0028-platform-x86-amd-pmc-Fix-command-completion-code.patch"
- "sys-kernel_arch-sources-g14_files-0029-platform-x86-amd-pmc-Fix-SMU-firmware-reporting-mechanism.patch"
- "sys-kernel_arch-sources-g14_files-0030-platform-x86-amd-pmc-Add-support-for-logging-SMU-metrics.patch"
- "sys-kernel_arch-sources-g14_files-0031-platform-x86-amd-pmc-Add-support-for-s0ix-counters.patch"
- "sys-kernel_arch-sources-g14_files-0032-platform-x86-amd-pmc-Add-support-for-ACPI-ID-AMDI0006.patch"
- "sys-kernel_arch-sources-g14_files-0033-platform-x86-amd-pmc-Add-new-acpi-for-future-PMC.patch"
+ #"https://gitlab.com/asus-linux/fedora-kernel/-/archive/$_fedora_kernel_commit_id/fedora-kernel-$_fedora_kernel_commit_id.zip"
"sys-kernel_arch-sources-g14_files-0034-btusb-mediatek.patch"
+
+ # for now let's just pull the 5 asus-linux patches we need directly and skip all of the git filtering
+ "sys-kernel_arch-sources-g14_files-0039-asus-wmi-Add-panel-overdrive-functionality.patch"
+ "sys-kernel_arch-sources-g14_files-0040-asus-wmi-Add-dgpu-disable-method.patch"
+ "sys-kernel_arch-sources-g14_files-0041-asus-wmi-Add-egpu-enable-method.patch"
+ "sys-kernel_arch-sources-g14_files-0042-HID-asus-Remove-check-for-same-LED-brightness-on-set.patch"
+ "sys-kernel_arch-sources-g14_files-0043-ALSA-hda-realtek-Fix-speakers-not-working-on-Asus-Fl.patch"
+
+
+ # k10temp support for Zen3 APUs
+ "sys-kernel_arch-sources-g14_files-8001-x86-amd_nb-Add-AMD-family-19h-model-50h-PCI-ids.patch"
+ "sys-kernel_arch-sources-g14_files-8002-hwmon-k10temp-support-Zen3-APUs.patch"
+
+
+ # mediatek mt7921 bt/wifi patches
+ "sys-kernel_arch-sources-g14_files-8011-Bluetooth-btusb-Add-support-for-Lite-On-Mediatek-Chi.patch"
+ "sys-kernel_arch-sources-g14_files-8012-mt76-mt7921-continue-to-probe-driver-when-fw-already.patch"
+ "sys-kernel_arch-sources-g14_files-8013-mt76-mt7921-Fix-out-of-order-process-by-invalid-even.patch"
+ "sys-kernel_arch-sources-g14_files-8014-mt76-mt7921-Add-mt7922-support.patch"
+
+ # squashed s0ix enablement through 2021-07-14; all current patches
+ "sys-kernel_arch-sources-g14_files-9001-v5.13.2-s0ix-patch-2021-07-14.patch"
)
validpgpkeys=(
@@ -69,31 +62,21 @@ validpgpkeys=(
sha256sums=('SKIP'
'761427cf8dbeed10fd3149023bc83d0a2319e70a5cbfdcdda50e7a49e8d2b198'
'1ac18cad2578df4a70f9346f7c6fccbb62f042a0ee0594817fdef9f2704904ee'
- '05f47255831028b9e3a49e335323169d0156201d5e9b2bf8e09093440ab9e56e'
- '4a9e44dfbc7e9574ae86cf53a896b6c67f03b224e90e18982dfb0e4ba02a6c1b'
'fa6cee9527d8e963d3398085d1862edc509a52e4540baec463edb8a9dd95bee0'
- 'b9e4b11f6ca413fa7fcd1d810215bf3a36e69eedc4570f4209f7c1957083b2f3'
- 'f94b12f56e99ebfc87014f9570a987bca7b50400c412ddbbb7035d73c5d8c668'
- '5af4796400245fec2e84d6e3f847b8896600558aa85f5e9c4706dd50994a9802'
- '4c1e9ec4402161ac93bb88595840ea5c5ac0c2cb75d06b01170a3ee4fc1f8907'
- 'b4a563ef30f86b9af0932c00bb3422b95eedbda1ff40a1a725c22a0ae9ab7084'
- 'dab4db308ede1aa35166f31671572eeccf0e7637b3218ce3ae519c2705934f79'
- '9e83c46bed9059ba78df6c17a2f7c80a1cdb6efbdf64ec643f68573ede891b95'
- '6c5538dc21a139a4475af6c1acc5d2761923173992568f7c159db971ff3167cd'
- '84119c2d2beb6d7dc56389f2d1be8052b4fd23022e15edd86ee59130adcd9ab7'
- '478e908f89ae413c650116681710aed3e974384a2ed5e97be3755189688e5415'
- '1c58e4fd62cb7034e4fe834b55ffd8e183926571d4056b150bab5725f0ac5e29'
- '50f6e6a3371eaedd3b649a25c5044e6359853c2e3836a6af683a906abb973fba'
- '23ada5c29c415c0bb8d14cff213c697c649b438d7427a67a15b0b3f65c66aa6f'
- '9ea5d38eea3809e85c6f3165f4b410ee53f0fdb813cbbf229e18a87e79c13ad5'
- 'd6113df716cb81f78abc58405648d90f8497e29d79f5fd403eda36af867b50f3'
- 'bc783b22ab5ab75dc28ae10519a9d6da23d80ee291812115555945acd280edc5'
- 'dce87ca35886d075554fe6d8831075237d80526e078431165d2ec0d1a9630c7b'
- 'ad9f485bb262bb1156da57698ccab5a6b8d8ca34b6ae8a185dcd014a34c69557'
- '3e8c51aff84b6f12e6bc61057982befd82415626fe379e83271ddeb1a9628734'
- 'bd975ab32d6490a4231d6ce4fab0343698b28407799bdaec133671e9fd778eb5'
- 'ae66bbed96b5946b5a20d902bc0282c7dd172650812114b24429f40d5ba225bb'
- 'f4185ae572190227161d6f0e7d502138f2aaa60130d8d99b2c44edaefd5e91af')
+ '9327ac3edacbc60a023928147f9439789527fad62cef66945f35a9165108e30d'
+ '0c515951db1c3dfc847e9b4777249c09be520ac140feb015a39c29e0531a89e6'
+ '1ab75535772c63567384eb2ac74753e4d5db2f3317cb265aedf6151b9f18c6c2'
+ '8cc771f37ee08ad5796e6db64f180c1415a5f6e03eb3045272dade30ca754b53'
+ 'f3461e7cc759fd4cef2ec5c4fa15b80fa6d37e16008db223f77ed88a65aa938e'
+ '96bf4c0fb920a876d7ec1ed25123bab8a0a43db5f363823e83e14707083d8501'
+ '32bbcde83406810f41c9ed61206a7596eb43707a912ec9d870fd94f160d247c1'
+ 'ed28a8051514f8c228717a5cdd13191b1c58181e0228d972fbe2af5ee1d013d7'
+ 'de8c9747637768c4356c06aa65c3f157c526aa420f21fdd5edd0ed06f720a62e'
+ '67ebf477b2ecbf367ea3fee1568eeb3de59de7185ef5ed66b81ae73108f6693c'
+ '13f1c3a15fb1418b4aee0594e1f7871151303ca4f7eaab3c6f2ea21af965d85b'
+ '2163cb2e394a013042a40cd3b00dae788603284b20d71e262995366c5534e480'
+ 'a01cf700d79b983807e2285be1b30df6e02db6adfd9c9027fe2dfa8ca5a74bc9'
+ 'e5d1bfe9d309f292d41bb06b98b94df168e0004f6e8ace45b310c6829a803d03')
# notable microarch levels:
#
@@ -124,6 +107,10 @@ _fedora_kernel_patch_skip_list=(
"0001-ALSA-hda-realtek-GA503-use-same-quirks-as-GA401.patch"
"0001-Add-jack-toggle-support-for-headphones-on-Asus-ROG-Z.patch"
"0001-HID-asus-filter-G713-G733-key-event-to-prevent-shutd.patch"
+ "0001-ACPI-video-use-native-backlight-for-GA401-GA502-GA50.patch"
+ "0002-Revert-platform-x86-asus-nb-wmi-Drop-duplicate-DMI-q.patch"
+ "0003-Revert-platform-x86-asus-nb-wmi-add-support-for-ASUS.patch"
+
# filter out suspend patches, we'll use upstream directly
"0001-ACPI-processor-idle-Fix-up-C-state-latency-if-not-ordered.patch"
diff --git a/README b/README
new file mode 100644
index 000000000000..13e6e2ad1f80
--- /dev/null
+++ b/README
@@ -0,0 +1,4 @@
+5.8.14.arch1-1 - no audio patches
+5.8.14.arch1-2 - v1 audio patch
+5.8.14.arch1-3 - v2 audio patch
+Remeber to always make a full system shutdown with unplug power and all cables, then boot clean without connecting anything expect of power.
diff --git a/amdgpu-backlight.patch b/amdgpu-backlight.patch
deleted file mode 100644
index 07159c8b8d09..000000000000
--- a/amdgpu-backlight.patch
+++ /dev/null
@@ -1,21 +0,0 @@
-diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
-index ddc979e3eebe..acd4874e0743 100644
---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
-+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
-@@ -1377,13 +1377,10 @@ static int dm_late_init(void *handle)
- */
- params.min_abm_backlight = 0x28F;
-
-- /* todo will enable for navi10 */
-- if (adev->asic_type <= CHIP_RAVEN) {
-- ret = dmcu_load_iram(dmcu, params);
-+ ret = dmcu_load_iram(dmcu, params);
-
-- if (!ret)
-- return -EINVAL;
-- }
-+ if (!ret)
-+ return -EINVAL;
-
- return detect_mst_link_for_all_connectors(adev->ddev);
- }
diff --git a/amdgpu-drm-next.patch b/amdgpu-drm-next.patch
deleted file mode 100644
index 46f340e39d91..000000000000
--- a/amdgpu-drm-next.patch
+++ /dev/null
@@ -1,138 +0,0 @@
-diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
-index b73b10bce0df..e8e444eeb1cd 100644
---- a/drivers/pci/p2pdma.c
-+++ b/drivers/pci/p2pdma.c
-@@ -282,6 +282,8 @@ static const struct pci_p2pdma_whitelist_entry {
- } pci_p2pdma_whitelist[] = {
- /* AMD ZEN */
- {PCI_VENDOR_ID_AMD, 0x1450, 0},
-+ {PCI_VENDOR_ID_AMD, 0x15d0, 0},
-+ {PCI_VENDOR_ID_AMD, 0x1630, 0},
-
- /* Intel Xeon E5/Core i7 */
- {PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE},
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
-index 22943773ae31..6b94587df407 100644
---- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
-@@ -2856,8 +2856,8 @@ static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
- /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
- data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
--
-- pwr_10_0_gfxip_control_over_cgpg(adev, true);
-+ if (adev->asic_type != CHIP_RENOIR)
-+ pwr_10_0_gfxip_control_over_cgpg(adev, true);
- }
- }
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
-index 623745b2d8b3..3e406eeeaff6 100644
---- a/drivers/gpu/drm/amd/amdgpu/soc15.c
-+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
-@@ -415,7 +415,8 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
- *value = 0;
- for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
- en = &soc15_allowed_read_registers[i];
-- if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
-+ if (adev->reg_offset[en->hwip][en->inst] &&
-+ reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
- + en->reg_offset))
- continue;
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
-index 33501c6c7189..899610fe2411 100644
---- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
-@@ -2202,6 +2202,7 @@ static int sdma_v4_0_set_powergating_state(void *handle,
-
- switch (adev->asic_type) {
- case CHIP_RAVEN:
-+ case CHIP_RENOIR:
- sdma_v4_1_update_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
- break;
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
-index a027a8f7b281..e036c868e354 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
-@@ -1742,8 +1742,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
-- if (adev->asic_type == CHIP_RAVEN ||
-- adev->asic_type == CHIP_RENOIR)
-+ if (adev->flags & AMD_IS_APU)
- adev->family = AMDGPU_FAMILY_RV;
- else
- adev->family = AMDGPU_FAMILY_AI;
-diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
-index 711e9dd19705..22943773ae31 100644
---- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
-@@ -1890,7 +1890,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
- return r;
- }
-
-- if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
-+ if (adev->flags & AMD_IS_APU) {
- /* TODO: double check the cp_table_size for RV */
- adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
- r = amdgpu_gfx_rlc_init_cpt(adev);
-@@ -2384,7 +2384,7 @@ static int gfx_v9_0_sw_fini(void *handle)
-
- gfx_v9_0_mec_fini(adev);
- amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
-- if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
-+ if (adev->flags & AMD_IS_APU) {
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
-diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
-index 899610fe2411..3278debe8cee 100644
---- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
-@@ -1774,7 +1774,7 @@ static int sdma_v4_0_early_init(void *handle)
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int r;
-
-- if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR)
-+ if (adev->flags & AMD_IS_APU)
- adev->sdma.num_instances = 1;
- else if (adev->asic_type == CHIP_ARCTURUS)
- adev->sdma.num_instances = 8;
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
-index c7c9e07962b9..623745b2d8b3 100644
---- a/drivers/gpu/drm/amd/amdgpu/soc15.c
-+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
-@@ -670,14 +670,25 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
-
- int soc15_set_ip_blocks(struct amdgpu_device *adev)
- {
-+ int r;
-+
- /* Set IP register base before any HW register access */
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_RAVEN:
-- case CHIP_RENOIR:
- vega10_reg_base_init(adev);
- break;
-+ case CHIP_RENOIR:
-+ if (amdgpu_discovery) {
-+ r = amdgpu_discovery_reg_base_init(adev);
-+ if (r) {
-+ DRM_WARN("failed to init reg base from ip discovery table, "
-+ "fallback to legacy init method\n");
-+ vega10_reg_base_init(adev);
-+ }
-+ }
-+ break;
- case CHIP_VEGA20:
- vega20_reg_base_init(adev);
- break;
-
diff --git a/sys-kernel_arch-sources-g14_files-0001-revert-reserve-x86-low-memory.patch b/sys-kernel_arch-sources-g14_files-0001-revert-reserve-x86-low-memory.patch
deleted file mode 100644
index 265077683dd2..000000000000
--- a/sys-kernel_arch-sources-g14_files-0001-revert-reserve-x86-low-memory.patch
+++ /dev/null
@@ -1,357 +0,0 @@
-diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index 479cc44cc4e2..835f810f2f26 100644
---- a/Documentation/admin-guide/kernel-parameters.txt
-+++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -4623,6 +4623,11 @@
- Reserves a hole at the top of the kernel virtual
- address space.
-
-+ reservelow= [X86]
-+ Format: nn[K]
-+ Set the amount of memory to reserve for BIOS at
-+ the bottom of the address space.
-+
- reset_devices [KNL] Force drivers to reset the underlying device
- during initialization.
-
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index fc91be3b1bd1..861b1b794697 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -1688,6 +1688,35 @@ config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK
- Set whether the default state of memory_corruption_check is
- on or off.
-
-+config X86_RESERVE_LOW
-+ int "Amount of low memory, in kilobytes, to reserve for the BIOS"
-+ default 64
-+ range 4 640
-+ help
-+ Specify the amount of low memory to reserve for the BIOS.
-+
-+ The first page contains BIOS data structures that the kernel
-+ must not use, so that page must always be reserved.
-+
-+ By default we reserve the first 64K of physical RAM, as a
-+ number of BIOSes are known to corrupt that memory range
-+ during events such as suspend/resume or monitor cable
-+ insertion, so it must not be used by the kernel.
-+
-+ You can set this to 4 if you are absolutely sure that you
-+ trust the BIOS to get all its memory reservations and usages
-+ right. If you know your BIOS have problems beyond the
-+ default 64K area, you can set this to 640 to avoid using the
-+ entire low memory range.
-+
-+ If you have doubts about the BIOS (e.g. suspend/resume does
-+ not work or there's kernel crashes after certain hardware
-+ hotplug events) then you might want to enable
-+ X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check
-+ typical corruption patterns.
-+
-+ Leave this to the default value of 64 if you are unsure.
-+
- config MATH_EMULATION
- bool
- depends on MODIFY_LDT_SYSCALL
-diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h
-index 8b6bd63530dc..f58de66091e5 100644
---- a/arch/x86/include/asm/crash.h
-+++ b/arch/x86/include/asm/crash.h
-@@ -9,4 +9,10 @@ int crash_setup_memmap_entries(struct kimage *image,
- struct boot_params *params);
- void crash_smp_send_stop(void);
-
-+#ifdef CONFIG_KEXEC_CORE
-+void __init crash_reserve_low_1M(void);
-+#else
-+static inline void __init crash_reserve_low_1M(void) { }
-+#endif
-+
- #endif /* _ASM_X86_CRASH_H */
-diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
-index e0b8d9662da5..b1deacbeb266 100644
---- a/arch/x86/kernel/crash.c
-+++ b/arch/x86/kernel/crash.c
-@@ -70,6 +70,19 @@ static inline void cpu_crash_vmclear_loaded_vmcss(void)
- rcu_read_unlock();
- }
-
-+/*
-+ * When the crashkernel option is specified, only use the low
-+ * 1M for the real mode trampoline.
-+ */
-+void __init crash_reserve_low_1M(void)
-+{
-+ if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0)
-+ return;
-+
-+ memblock_reserve(0, 1<<20);
-+ pr_info("Reserving the low 1M of memory for crashkernel\n");
-+}
-+
- #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
-
- static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
-diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index fbda4bbf75c1..e79f21d13a0d 100644
---- a/arch/x86/kernel/setup.c
-+++ b/arch/x86/kernel/setup.c
-@@ -634,16 +634,11 @@ static void __init trim_snb_memory(void)
- printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
-
- /*
-- * SandyBridge integrated graphics devices have a bug that prevents
-- * them from accessing certain memory ranges, namely anything below
-- * 1M and in the pages listed in bad_pages[] above.
-- *
-- * To avoid these pages being ever accessed by SNB gfx devices reserve
-- * bad_pages that have not already been reserved at boot time.
-- * All memory below the 1 MB mark is anyway reserved later during
-- * setup_arch(), so there is no need to reserve it here.
-+ * Reserve all memory below the 1 MB mark that has not
-+ * already been reserved.
- */
--
-+ memblock_reserve(0, 1<<20);
-+
- for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
- if (memblock_reserve(bad_pages[i], PAGE_SIZE))
- printk(KERN_WARNING "failed to reserve 0x%08lx\n",
-@@ -651,6 +646,18 @@ static void __init trim_snb_memory(void)
- }
- }
-
-+/*
-+ * Here we put platform-specific memory range workarounds, i.e.
-+ * memory known to be corrupt or otherwise in need to be reserved on
-+ * specific platforms.
-+ *
-+ * If this gets used more widely it could use a real dispatch mechanism.
-+ */
-+static void __init trim_platform_memory_ranges(void)
-+{
-+ trim_snb_memory();
-+}
-+
- static void __init trim_bios_range(void)
- {
- /*
-@@ -695,42 +702,35 @@ static void __init e820_add_kernel_range(void)
- e820__range_add(start, size, E820_TYPE_RAM);
- }
-
--static void __init early_reserve_memory(void)
-+static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
-+
-+static int __init parse_reservelow(char *p)
- {
-- /*
-- * Reserve the memory occupied by the kernel between _text and
-- * __end_of_kernel_reserve symbols. Any kernel sections after the
-- * __end_of_kernel_reserve symbol must be explicitly reserved with a
-- * separate memblock_reserve() or they will be discarded.
-- */
-- memblock_reserve(__pa_symbol(_text),
-- (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
-+ unsigned long long size;
-
-- /*
-- * The first 4Kb of memory is a BIOS owned area, but generally it is
-- * not listed as such in the E820 table.
-- *
-- * Reserve the first 64K of memory since some BIOSes are known to
-- * corrupt low memory. After the real mode trampoline is allocated the
-- * rest of the memory below 640k is reserved.
-- *
-- * In addition, make sure page 0 is always reserved because on
-- * systems with L1TF its contents can be leaked to user processes.
-- */
-- memblock_reserve(0, SZ_64K);
-+ if (!p)
-+ return -EINVAL;
-
-- early_reserve_initrd();
-+ size = memparse(p, &p);
-
-- if (efi_enabled(EFI_BOOT))
-- efi_memblock_x86_reserve_range();
-+ if (size < 4096)
-+ size = 4096;
-
-- memblock_x86_reserve_range_setup_data();
-+ if (size > 640*1024)
-+ size = 640*1024;
-
-- reserve_ibft_region();
-- reserve_bios_regions();
-- trim_snb_memory();
-+ reserve_low = size;
-+
-+ return 0;
- }
-
-+early_param("reservelow", parse_reservelow);
-+
-+static void __init trim_low_memory_range(void)
-+{
-+ memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
-+}
-+
- /*
- * Dump out kernel offset information on panic.
- */
-@@ -765,6 +765,29 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
-
- void __init setup_arch(char **cmdline_p)
- {
-+ /*
-+ * Reserve the memory occupied by the kernel between _text and
-+ * __end_of_kernel_reserve symbols. Any kernel sections after the
-+ * __end_of_kernel_reserve symbol must be explicitly reserved with a
-+ * separate memblock_reserve() or they will be discarded.
-+ */
-+ memblock_reserve(__pa_symbol(_text),
-+ (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
-+
-+ /*
-+ * Make sure page 0 is always reserved because on systems with
-+ * L1TF its contents can be leaked to user processes.
-+ */
-+ memblock_reserve(0, PAGE_SIZE);
-+
-+ early_reserve_initrd();
-+
-+ /*
-+ * At this point everything still needed from the boot loader
-+ * or BIOS or kernel text should be early reserved or marked not
-+ * RAM in e820. All other memory is free game.
-+ */
-+
- #ifdef CONFIG_X86_32
- memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
-
-@@ -888,18 +911,8 @@ void __init setup_arch(char **cmdline_p)
-
- parse_early_param();
-
-- /*
-- * Do some memory reservations *before* memory is added to
-- * memblock, so memblock allocations won't overwrite it.
-- * Do it after early param, so we could get (unlikely) panic from
-- * serial.
-- *
-- * After this point everything still needed from the boot loader or
-- * firmware or kernel text should be early reserved or marked not
-- * RAM in e820. All other memory is free game.
-- */
-- early_reserve_memory();
--
-+ if (efi_enabled(EFI_BOOT))
-+ efi_memblock_x86_reserve_range();
- #ifdef CONFIG_MEMORY_HOTPLUG
- /*
- * Memory used by the kernel cannot be hot-removed because Linux
-@@ -926,6 +939,9 @@ void __init setup_arch(char **cmdline_p)
-
- x86_report_nx();
-
-+ /* after early param, so could get panic from serial */
-+ memblock_x86_reserve_range_setup_data();
-+
- if (acpi_mps_check()) {
- #ifdef CONFIG_X86_LOCAL_APIC
- disable_apic = 1;
-@@ -1017,6 +1033,8 @@ void __init setup_arch(char **cmdline_p)
- */
- find_smp_config();
-
-+ reserve_ibft_region();
-+
- early_alloc_pgt_buf();
-
- /*
-@@ -1037,6 +1055,8 @@ void __init setup_arch(char **cmdline_p)
- */
- sev_setup_arch();
-
-+ reserve_bios_regions();
-+
- efi_fake_memmap();
- efi_find_mirror();
- efi_esrt_init();
-@@ -1060,21 +1080,11 @@ void __init setup_arch(char **cmdline_p)
- (max_pfn_mapped<<PAGE_SHIFT) - 1);
- #endif
-
-- /*
-- * Find free memory for the real mode trampoline and place it
-- * there.
-- * If there is not enough free memory under 1M, on EFI-enabled
-- * systems there will be additional attempt to reclaim the memory
-- * for the real mode trampoline at efi_free_boot_services().
-- *
-- * Unconditionally reserve the entire first 1M of RAM because
-- * BIOSes are know to corrupt low memory and several
-- * hundred kilobytes are not worth complex detection what memory gets
-- * clobbered. Moreover, on machines with SandyBridge graphics or in
-- * setups that use crashkernel the entire 1M is anyway reserved.
-- */
- reserve_real_mode();
-
-+ trim_platform_memory_ranges();
-+ trim_low_memory_range();
-+
- init_mem_mapping();
-
- idt_setup_early_pf();
-diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
-index 27561b56a821..67d93a243c35 100644
---- a/arch/x86/platform/efi/quirks.c
-+++ b/arch/x86/platform/efi/quirks.c
-@@ -450,18 +450,6 @@ void __init efi_free_boot_services(void)
- size -= rm_size;
- }
-
-- /*
-- * Don't free memory under 1M for two reasons:
-- * - BIOS might clobber it
-- * - Crash kernel needs it to be reserved
-- */
-- if (start + size < SZ_1M)
-- continue;
-- if (start < SZ_1M) {
-- size -= (SZ_1M - start);
-- start = SZ_1M;
-- }
--
- memblock_free_late(start, size);
- }
-
-diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
-index ea42630d4e2e..22fda7d99159 100644
---- a/arch/x86/realmode/init.c
-+++ b/arch/x86/realmode/init.c
-@@ -29,16 +29,14 @@ void __init reserve_real_mode(void)
-
- /* Has to be under 1M so we can execute real-mode AP code. */
- mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
-- if (!mem)
-+ if (!mem) {
- pr_info("No sub-1M memory is available for the trampoline\n");
-- else
-- set_real_mode_mem(mem);
-+ return;
-+ }
-
-- /*
-- * Unconditionally reserve the entire fisrt 1M, see comment in
-- * setup_arch()
-- */
-- memblock_reserve(0, SZ_1M);
-+ memblock_reserve(mem, size);
-+ set_real_mode_mem(mem);
-+ crash_reserve_low_1M();
- }
-
- static void sme_sev_setup_real_mode(struct trampoline_header *th)
diff --git a/sys-kernel_arch-sources-g14_files-0002-acpi_unused.patch b/sys-kernel_arch-sources-g14_files-0002-acpi_unused.patch
deleted file mode 100644
index 5d7d83c2600c..000000000000
--- a/sys-kernel_arch-sources-g14_files-0002-acpi_unused.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-From 4b9ee772eaa82188b0eb8e05bdd1707c2a992004 Mon Sep 17 00:00:00 2001
-From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
-Date: Thu, 18 Mar 2021 19:25:12 +0100
-Subject: ACPI: scan: Turn off unused power resources during initialization
-
-It is reported that on certain platforms there are power resources
-that are not associated with any devices physically present in the
-platform. Those power resources are expected to be turned off by
-the OS in accordance with the ACPI specification (section 7.3 of
-ACPI 6.4) which currently is not done by Linux and that may lead
-to obscure issues.
-
-For instance, leaving those power resources in the "on" state may
-prevent the platform from reaching the lowest power state in
-suspend-to-idle which leads to excessive power draw.
-
-For this reason, turn all of the unused ACPI power resources off
-at the end of the initial namespace scan for devices in analogy with
-resume from suspend-to-RAM.
-
-Link: https://uefi.org/specs/ACPI/6.4/07_Power_and_Performance_Mgmt/device-power-management-objects.html
-Reported-by: David Box <david.e.box@linux.intel.com>
-Tested-by: Wendy Wang <wendy.wang@intel.com>
-Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
----
- drivers/acpi/internal.h | 1 +
- drivers/acpi/power.c | 2 +-
- drivers/acpi/scan.c | 2 ++
- drivers/acpi/sleep.h | 1 -
- 4 files changed, 4 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
-index e6a5d997241c4..9fcefcdc1dbe0 100644
---- a/drivers/acpi/internal.h
-+++ b/drivers/acpi/internal.h
-@@ -139,6 +139,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
- int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
- int acpi_power_on_resources(struct acpi_device *device, int state);
- int acpi_power_transition(struct acpi_device *device, int state);
-+void acpi_turn_off_unused_power_resources(void);
-
- /* --------------------------------------------------------------------------
- Device Power Management
-diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
-index 9b608b55d2b29..46c38627adddb 100644
---- a/drivers/acpi/power.c
-+++ b/drivers/acpi/power.c
-@@ -996,6 +996,7 @@ void acpi_resume_power_resources(void)
-
- mutex_unlock(&power_resource_list_lock);
- }
-+#endif
-
- void acpi_turn_off_unused_power_resources(void)
- {
-@@ -1025,4 +1026,3 @@ void acpi_turn_off_unused_power_resources(void)
-
- mutex_unlock(&power_resource_list_lock);
- }
--#endif
-diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
-index a184529d8fa40..1584c9e463bdf 100644
---- a/drivers/acpi/scan.c
-+++ b/drivers/acpi/scan.c
-@@ -2360,6 +2360,8 @@ int __init acpi_scan_init(void)
- }
- }
-
-+ acpi_turn_off_unused_power_resources();
-+
- acpi_scan_initialized = true;
-
- out:
-diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
-index 1856f76ac83f7..7fe41ee489d61 100644
---- a/drivers/acpi/sleep.h
-+++ b/drivers/acpi/sleep.h
-@@ -8,7 +8,6 @@ extern struct list_head acpi_wakeup_device_list;
- extern struct mutex acpi_device_lock;
-
- extern void acpi_resume_power_resources(void);
--extern void acpi_turn_off_unused_power_resources(void);
-
- static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
- {
---
-cgit 1.2.3-1.el7
diff --git a/sys-kernel_arch-sources-g14_files-0003-flow-x13-sound.patch b/sys-kernel_arch-sources-g14_files-0003-flow-x13-sound.patch
deleted file mode 100644
index a57e74ce8921..000000000000
--- a/sys-kernel_arch-sources-g14_files-0003-flow-x13-sound.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
-index 43a63db4ab6a..d7b59b846082 100644
---- a/sound/pci/hda/patch_realtek.c
-+++ b/sound/pci/hda/patch_realtek.c
-@@ -8297,6 +8297,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
- SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
- SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
-+ SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
- SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
- SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
- SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
diff --git a/sys-kernel_arch-sources-g14_files-0004-5.8+--more-uarches-for-kernel.patch b/sys-kernel_arch-sources-g14_files-0004-5.8+--more-uarches-for-kernel.patch
new file mode 100644
index 000000000000..c45d13bf417a
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-0004-5.8+--more-uarches-for-kernel.patch
@@ -0,0 +1,682 @@
+From 4af44fbc97bc51eb742f0d6555bde23cf580d4e3 Mon Sep 17 00:00:00 2001
+From: graysky <graysky@archlinux.us>
+Date: Sun, 6 Jun 2021 09:41:36 -0400
+Subject: [PATCH] more uarches for kernel 5.8+
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+FEATURES
+This patch adds additional CPU options to the Linux kernel accessible under:
+ Processor type and features --->
+ Processor family --->
+
+With the release of gcc 11.1 and clang 12.0, several generic 64-bit levels are
+offered which are good for supported Intel or AMD CPUs:
+• x86-64-v2
+• x86-64-v3
+• x86-64-v4
+
+Users of glibc 2.33 and above can see which level is supported by current
+hardware by running:
+ /lib/ld-linux-x86-64.so.2 --help | grep supported
+
+Alternatively, compare the flags from /proc/cpuinfo to this list.[1]
+
+CPU-specific microarchitectures include:
+• AMD Improved K8-family
+• AMD K10-family
+• AMD Family 10h (Barcelona)
+• AMD Family 14h (Bobcat)
+• AMD Family 16h (Jaguar)
+• AMD Family 15h (Bulldozer)
+• AMD Family 15h (Piledriver)
+• AMD Family 15h (Steamroller)
+• AMD Family 15h (Excavator)
+• AMD Family 17h (Zen)
+• AMD Family 17h (Zen 2)
+• AMD Family 19h (Zen 3)†
+• Intel Silvermont low-power processors
+• Intel Goldmont low-power processors (Apollo Lake and Denverton)
+• Intel Goldmont Plus low-power processors (Gemini Lake)
+• Intel 1st Gen Core i3/i5/i7 (Nehalem)
+• Intel 1.5 Gen Core i3/i5/i7 (Westmere)
+• Intel 2nd Gen Core i3/i5/i7 (Sandybridge)
+• Intel 3rd Gen Core i3/i5/i7 (Ivybridge)
+• Intel 4th Gen Core i3/i5/i7 (Haswell)
+• Intel 5th Gen Core i3/i5/i7 (Broadwell)
+• Intel 6th Gen Core i3/i5/i7 (Skylake)
+• Intel 6th Gen Core i7/i9 (Skylake X)
+• Intel 8th Gen Core i3/i5/i7 (Cannon Lake)
+• Intel 10th Gen Core i7/i9 (Ice Lake)
+• Intel Xeon (Cascade Lake)
+• Intel Xeon (Cooper Lake)*
+• Intel 3rd Gen 10nm++ i3/i5/i7/i9-family (Tiger Lake)*
+• Intel 3rd Gen 10nm++ Xeon (Sapphire Rapids)‡
+• Intel 11th Gen i3/i5/i7/i9-family (Rocket Lake)‡
+• Intel 12th Gen i3/i5/i7/i9-family (Alder Lake)‡
+
+Notes: If not otherwise noted, gcc >=9.1 is required for support.
+ *Requires gcc >=10.1 or clang >=10.0
+ †Required gcc >=10.3 or clang >=12.0
+ ‡Required gcc >=11.1 or clang >=12.0
+
+It also offers to compile passing the 'native' option which, "selects the CPU
+to generate code for at compilation time by determining the processor type of
+the compiling machine. Using -march=native enables all instruction subsets
+supported by the local machine and will produce code optimized for the local
+machine under the constraints of the selected instruction set."[2]
+
+Users of Intel CPUs should select the 'Intel-Native' option and users of AMD
+CPUs should select the 'AMD-Native' option.
+
+MINOR NOTES RELATING TO INTEL ATOM PROCESSORS
+This patch also changes -march=atom to -march=bonnell in accordance with the
+gcc v4.9 changes. Upstream is using the deprecated -match=atom flags when I
+believe it should use the newer -march=bonnell flag for atom processors.[3]
+
+It is not recommended to compile on Atom-CPUs with the 'native' option.[4] The
+recommendation is to use the 'atom' option instead.
+
+BENEFITS
+Small but real speed increases are measurable using a make endpoint comparing
+a generic kernel to one built with one of the respective microarchs.
+
+See the following experimental evidence supporting this statement:
+https://github.com/graysky2/kernel_gcc_patch
+
+REQUIREMENTS
+linux version >=5.8
+gcc version >=9.0 or clang version >=9.0
+
+ACKNOWLEDGMENTS
+This patch builds on the seminal work by Jeroen.[5]
+
+REFERENCES
+1. https://gitlab.com/x86-psABIs/x86-64-ABI/-/commit/77566eb03bc6a326811cb7e9
+2. https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html#index-x86-Options
+3. https://bugzilla.kernel.org/show_bug.cgi?id=77461
+4. https://github.com/graysky2/kernel_gcc_patch/issues/15
+5. http://www.linuxforge.net/docs/linux/linux-gcc.php
+
+Signed-off-by: graysky <graysky@archlinux.us>
+---
+ arch/x86/Kconfig.cpu | 332 ++++++++++++++++++++++++++++++--
+ arch/x86/Makefile | 47 ++++-
+ arch/x86/include/asm/vermagic.h | 66 +++++++
+ 3 files changed, 428 insertions(+), 17 deletions(-)
+
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index 814fe0d349b0..8acf6519d279 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -157,7 +157,7 @@ config MPENTIUM4
+
+
+ config MK6
+- bool "K6/K6-II/K6-III"
++ bool "AMD K6/K6-II/K6-III"
+ depends on X86_32
+ help
+ Select this for an AMD K6-family processor. Enables use of
+@@ -165,7 +165,7 @@ config MK6
+ flags to GCC.
+
+ config MK7
+- bool "Athlon/Duron/K7"
++ bool "AMD Athlon/Duron/K7"
+ depends on X86_32
+ help
+ Select this for an AMD Athlon K7-family processor. Enables use of
+@@ -173,12 +173,98 @@ config MK7
+ flags to GCC.
+
+ config MK8
+- bool "Opteron/Athlon64/Hammer/K8"
++ bool "AMD Opteron/Athlon64/Hammer/K8"
+ help
+ Select this for an AMD Opteron or Athlon64 Hammer-family processor.
+ Enables use of some extended instructions, and passes appropriate
+ optimization flags to GCC.
+
++config MK8SSE3
++ bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
++ help
++ Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
++ Enables use of some extended instructions, and passes appropriate
++ optimization flags to GCC.
++
++config MK10
++ bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
++ help
++ Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
++ Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
++ Enables use of some extended instructions, and passes appropriate
++ optimization flags to GCC.
++
++config MBARCELONA
++ bool "AMD Barcelona"
++ help
++ Select this for AMD Family 10h Barcelona processors.
++
++ Enables -march=barcelona
++
++config MBOBCAT
++ bool "AMD Bobcat"
++ help
++ Select this for AMD Family 14h Bobcat processors.
++
++ Enables -march=btver1
++
++config MJAGUAR
++ bool "AMD Jaguar"
++ help
++ Select this for AMD Family 16h Jaguar processors.
++
++ Enables -march=btver2
++
++config MBULLDOZER
++ bool "AMD Bulldozer"
++ help
++ Select this for AMD Family 15h Bulldozer processors.
++
++ Enables -march=bdver1
++
++config MPILEDRIVER
++ bool "AMD Piledriver"
++ help
++ Select this for AMD Family 15h Piledriver processors.
++
++ Enables -march=bdver2
++
++config MSTEAMROLLER
++ bool "AMD Steamroller"
++ help
++ Select this for AMD Family 15h Steamroller processors.
++
++ Enables -march=bdver3
++
++config MEXCAVATOR
++ bool "AMD Excavator"
++ help
++ Select this for AMD Family 15h Excavator processors.
++
++ Enables -march=bdver4
++
++config MZEN
++ bool "AMD Zen"
++ help
++ Select this for AMD Family 17h Zen processors.
++
++ Enables -march=znver1
++
++config MZEN2
++ bool "AMD Zen 2"
++ help
++ Select this for AMD Family 17h Zen 2 processors.
++
++ Enables -march=znver2
++
++config MZEN3
++ bool "AMD Zen 3"
++ depends on ( CC_IS_GCC && GCC_VERSION >= 100300 ) || ( CC_IS_CLANG && CLANG_VERSION >= 120000 )
++ help
++ Select this for AMD Family 19h Zen 3 processors.
++
++ Enables -march=znver3
++
+ config MCRUSOE
+ bool "Crusoe"
+ depends on X86_32
+@@ -270,7 +356,7 @@ config MPSC
+ in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
+
+ config MCORE2
+- bool "Core 2/newer Xeon"
++ bool "Intel Core 2"
+ help
+
+ Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
+@@ -278,6 +364,8 @@ config MCORE2
+ family in /proc/cpuinfo. Newer ones have 6 and older ones 15
+ (not a typo)
+
++ Enables -march=core2
++
+ config MATOM
+ bool "Intel Atom"
+ help
+@@ -287,6 +375,182 @@ config MATOM
+ accordingly optimized code. Use a recent GCC with specific Atom
+ support in order to fully benefit from selecting this option.
+
++config MNEHALEM
++ bool "Intel Nehalem"
++ select X86_P6_NOP
++ help
++
++ Select this for 1st Gen Core processors in the Nehalem family.
++
++ Enables -march=nehalem
++
++config MWESTMERE
++ bool "Intel Westmere"
++ select X86_P6_NOP
++ help
++
++ Select this for the Intel Westmere formerly Nehalem-C family.
++
++ Enables -march=westmere
++
++config MSILVERMONT
++ bool "Intel Silvermont"
++ select X86_P6_NOP
++ help
++
++ Select this for the Intel Silvermont platform.
++
++ Enables -march=silvermont
++
++config MGOLDMONT
++ bool "Intel Goldmont"
++ select X86_P6_NOP
++ help
++
++ Select this for the Intel Goldmont platform including Apollo Lake and Denverton.
++
++ Enables -march=goldmont
++
++config MGOLDMONTPLUS
++ bool "Intel Goldmont Plus"
++ select X86_P6_NOP
++ help
++
++ Select this for the Intel Goldmont Plus platform including Gemini Lake.
++
++ Enables -march=goldmont-plus
++
++config MSANDYBRIDGE
++ bool "Intel Sandy Bridge"
++ select X86_P6_NOP
++ help
++
++ Select this for 2nd Gen Core processors in the Sandy Bridge family.
++
++ Enables -march=sandybridge
++
++config MIVYBRIDGE
++ bool "Intel Ivy Bridge"
++ select X86_P6_NOP
++ help
++
++ Select this for 3rd Gen Core processors in the Ivy Bridge family.
++
++ Enables -march=ivybridge
++
++config MHASWELL
++ bool "Intel Haswell"
++ select X86_P6_NOP
++ help
++
++ Select this for 4th Gen Core processors in the Haswell family.
++
++ Enables -march=haswell
++
++config MBROADWELL
++ bool "Intel Broadwell"
++ select X86_P6_NOP
++ help
++
++ Select this for 5th Gen Core processors in the Broadwell family.
++
++ Enables -march=broadwell
++
++config MSKYLAKE
++ bool "Intel Skylake"
++ select X86_P6_NOP
++ help
++
++ Select this for 6th Gen Core processors in the Skylake family.
++
++ Enables -march=skylake
++
++config MSKYLAKEX
++ bool "Intel Skylake X"
++ select X86_P6_NOP
++ help
++
++ Select this for 6th Gen Core processors in the Skylake X family.
++
++ Enables -march=skylake-avx512
++
++config MCANNONLAKE
++ bool "Intel Cannon Lake"
++ select X86_P6_NOP
++ help
++
++ Select this for 8th Gen Core processors
++
++ Enables -march=cannonlake
++
++config MICELAKE
++ bool "Intel Ice Lake"
++ select X86_P6_NOP
++ help
++
++ Select this for 10th Gen Core processors in the Ice Lake family.
++
++ Enables -march=icelake-client
++
++config MCASCADELAKE
++ bool "Intel Cascade Lake"
++ select X86_P6_NOP
++ help
++
++ Select this for Xeon processors in the Cascade Lake family.
++
++ Enables -march=cascadelake
++
++config MCOOPERLAKE
++ bool "Intel Cooper Lake"
++ depends on ( CC_IS_GCC && GCC_VERSION > 100100 ) || ( CC_IS_CLANG && CLANG_VERSION >= 100000 )
++ select X86_P6_NOP
++ help
++
++ Select this for Xeon processors in the Cooper Lake family.
++
++ Enables -march=cooperlake
++
++config MTIGERLAKE
++ bool "Intel Tiger Lake"
++ depends on ( CC_IS_GCC && GCC_VERSION > 100100 ) || ( CC_IS_CLANG && CLANG_VERSION >= 100000 )
++ select X86_P6_NOP
++ help
++
++ Select this for third-generation 10 nm process processors in the Tiger Lake family.
++
++ Enables -march=tigerlake
++
++config MSAPPHIRERAPIDS
++ bool "Intel Sapphire Rapids"
++ depends on ( CC_IS_GCC && GCC_VERSION > 110000 ) || ( CC_IS_CLANG && CLANG_VERSION >= 120000 )
++ select X86_P6_NOP
++ help
++
++ Select this for third-generation 10 nm process processors in the Sapphire Rapids family.
++
++ Enables -march=sapphirerapids
++
++config MROCKETLAKE
++ bool "Intel Rocket Lake"
++ depends on ( CC_IS_GCC && GCC_VERSION > 110000 ) || ( CC_IS_CLANG && CLANG_VERSION >= 120000 )
++ select X86_P6_NOP
++ help
++
++ Select this for eleventh-generation processors in the Rocket Lake family.
++
++ Enables -march=rocketlake
++
++config MALDERLAKE
++ bool "Intel Alder Lake"
++ depends on ( CC_IS_GCC && GCC_VERSION > 110000 ) || ( CC_IS_CLANG && CLANG_VERSION >= 120000 )
++ select X86_P6_NOP
++ help
++
++ Select this for twelfth-generation processors in the Alder Lake family.
++
++ Enables -march=alderlake
++
+ config GENERIC_CPU
+ bool "Generic-x86-64"
+ depends on X86_64
+@@ -294,6 +558,50 @@ config GENERIC_CPU
+ Generic x86-64 CPU.
+ Run equally well on all x86-64 CPUs.
+
++config GENERIC_CPU2
++ bool "Generic-x86-64-v2"
++ depends on ( CC_IS_GCC && GCC_VERSION > 110000 ) || ( CC_IS_CLANG && LANG_VERSION >= 120000 )
++ depends on X86_64
++ help
++ Generic x86-64 CPU.
++ Run equally well on all x86-64 CPUs with min support of x86-64-v2.
++
++config GENERIC_CPU3
++ bool "Generic-x86-64-v3"
++ depends on ( CC_IS_GCC && GCC_VERSION > 110000 ) || ( CC_IS_CLANG && LANG_VERSION >= 120000 )
++ depends on X86_64
++ help
++ Generic x86-64-v3 CPU with v3 instructions.
++ Run equally well on all x86-64 CPUs with min support of x86-64-v3.
++
++config GENERIC_CPU4
++ bool "Generic-x86-64-v4"
++ depends on ( CC_IS_GCC && GCC_VERSION > 110000 ) || ( CC_IS_CLANG && LANG_VERSION >= 120000 )
++ depends on X86_64
++ help
++ Generic x86-64 CPU with v4 instructions.
++ Run equally well on all x86-64 CPUs with min support of x86-64-v4.
++
++config MNATIVE_INTEL
++ bool "Intel-Native optimizations autodetected by the compiler"
++ help
++
++ Clang 3.8, GCC 4.2 and above support -march=native, which automatically detects
++ the optimum settings to use based on your processor. Do NOT use this
++ for AMD CPUs. Intel Only!
++
++ Enables -march=native
++
++config MNATIVE_AMD
++ bool "AMD-Native optimizations autodetected by the compiler"
++ help
++
++ Clang 3.8, GCC 4.2 and above support -march=native, which automatically detects
++ the optimum settings to use based on your processor. Do NOT use this
++ for Intel CPUs. AMD Only!
++
++ Enables -march=native
++
+ endchoice
+
+ config X86_GENERIC
+@@ -318,7 +626,7 @@ config X86_INTERNODE_CACHE_SHIFT
+ config X86_L1_CACHE_SHIFT
+ int
+ default "7" if MPENTIUM4 || MPSC
+- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++ default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 || GENERIC_CPU4
+ default "4" if MELAN || M486SX || M486 || MGEODEGX1
+ default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
+
+@@ -336,11 +644,11 @@ config X86_ALIGNMENT_16
+
+ config X86_INTEL_USERCOPY
+ def_bool y
+- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
++ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL
+
+ config X86_USE_PPRO_CHECKSUM
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
+
+ config X86_USE_3DNOW
+ def_bool y
+@@ -360,26 +668,26 @@ config X86_USE_3DNOW
+ config X86_P6_NOP
+ def_bool y
+ depends on X86_64
+- depends on (MCORE2 || MPENTIUM4 || MPSC)
++ depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL)
+
+ config X86_TSC
+ def_bool y
+- depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
++ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) || X86_64
+
+ config X86_CMPXCHG64
+ def_bool y
+- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
++ depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
+
+ # this should be set for all -march=.. options where the compiler
+ # generates cmov.
+ config X86_CMOV
+ def_bool y
+- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++ depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
+
+ config X86_MINIMUM_CPU_FAMILY
+ int
+ default "64" if X86_64
+- default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
++ default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
+ default "5" if X86_32 && X86_CMPXCHG64
+ default "4"
+
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 78faf9c7e3ae..ee0cd507af8b 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -114,11 +114,48 @@ else
+ # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
+ cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
+ cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+-
+- cflags-$(CONFIG_MCORE2) += \
+- $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
+- cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++ cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3)
++ cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
++ cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
++ cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
++ cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
++ cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
++ cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
++ cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-mno-tbm)
++ cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
++ cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-mno-tbm)
++ cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
++ cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-mno-tbm)
++ cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
++ cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2)
++ cflags-$(CONFIG_MZEN3) += $(call cc-option,-march=znver3)
++
++ cflags-$(CONFIG_MNATIVE_INTEL) += $(call cc-option,-march=native)
++ cflags-$(CONFIG_MNATIVE_AMD) += $(call cc-option,-march=native)
++ cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell)
++ cflags-$(CONFIG_MCORE2) += $(call cc-option,-march=core2)
++ cflags-$(CONFIG_MNEHALEM) += $(call cc-option,-march=nehalem)
++ cflags-$(CONFIG_MWESTMERE) += $(call cc-option,-march=westmere)
++ cflags-$(CONFIG_MSILVERMONT) += $(call cc-option,-march=silvermont)
++ cflags-$(CONFIG_MGOLDMONT) += $(call cc-option,-march=goldmont)
++ cflags-$(CONFIG_MGOLDMONTPLUS) += $(call cc-option,-march=goldmont-plus)
++ cflags-$(CONFIG_MSANDYBRIDGE) += $(call cc-option,-march=sandybridge)
++ cflags-$(CONFIG_MIVYBRIDGE) += $(call cc-option,-march=ivybridge)
++ cflags-$(CONFIG_MHASWELL) += $(call cc-option,-march=haswell)
++ cflags-$(CONFIG_MBROADWELL) += $(call cc-option,-march=broadwell)
++ cflags-$(CONFIG_MSKYLAKE) += $(call cc-option,-march=skylake)
++ cflags-$(CONFIG_MSKYLAKEX) += $(call cc-option,-march=skylake-avx512)
++ cflags-$(CONFIG_MCANNONLAKE) += $(call cc-option,-march=cannonlake)
++ cflags-$(CONFIG_MICELAKE) += $(call cc-option,-march=icelake-client)
++ cflags-$(CONFIG_MCASCADELAKE) += $(call cc-option,-march=cascadelake)
++ cflags-$(CONFIG_MCOOPERLAKE) += $(call cc-option,-march=cooperlake)
++ cflags-$(CONFIG_MTIGERLAKE) += $(call cc-option,-march=tigerlake)
++ cflags-$(CONFIG_MSAPPHIRERAPIDS) += $(call cc-option,-march=sapphirerapids)
++ cflags-$(CONFIG_MROCKETLAKE) += $(call cc-option,-march=rocketlake)
++ cflags-$(CONFIG_MALDERLAKE) += $(call cc-option,-march=alderlake)
++ cflags-$(CONFIG_GENERIC_CPU2) += $(call cc-option,-march=x86-64-v2)
++ cflags-$(CONFIG_GENERIC_CPU3) += $(call cc-option,-march=x86-64-v3)
++ cflags-$(CONFIG_GENERIC_CPU4) += $(call cc-option,-march=x86-64-v4)
+ cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+ KBUILD_CFLAGS += $(cflags-y)
+
+diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
+index 75884d2cdec3..4e6a08d4c7e5 100644
+--- a/arch/x86/include/asm/vermagic.h
++++ b/arch/x86/include/asm/vermagic.h
+@@ -17,6 +17,48 @@
+ #define MODULE_PROC_FAMILY "586MMX "
+ #elif defined CONFIG_MCORE2
+ #define MODULE_PROC_FAMILY "CORE2 "
++#elif defined CONFIG_MNATIVE_INTEL
++#define MODULE_PROC_FAMILY "NATIVE_INTEL "
++#elif defined CONFIG_MNATIVE_AMD
++#define MODULE_PROC_FAMILY "NATIVE_AMD "
++#elif defined CONFIG_MNEHALEM
++#define MODULE_PROC_FAMILY "NEHALEM "
++#elif defined CONFIG_MWESTMERE
++#define MODULE_PROC_FAMILY "WESTMERE "
++#elif defined CONFIG_MSILVERMONT
++#define MODULE_PROC_FAMILY "SILVERMONT "
++#elif defined CONFIG_MGOLDMONT
++#define MODULE_PROC_FAMILY "GOLDMONT "
++#elif defined CONFIG_MGOLDMONTPLUS
++#define MODULE_PROC_FAMILY "GOLDMONTPLUS "
++#elif defined CONFIG_MSANDYBRIDGE
++#define MODULE_PROC_FAMILY "SANDYBRIDGE "
++#elif defined CONFIG_MIVYBRIDGE
++#define MODULE_PROC_FAMILY "IVYBRIDGE "
++#elif defined CONFIG_MHASWELL
++#define MODULE_PROC_FAMILY "HASWELL "
++#elif defined CONFIG_MBROADWELL
++#define MODULE_PROC_FAMILY "BROADWELL "
++#elif defined CONFIG_MSKYLAKE
++#define MODULE_PROC_FAMILY "SKYLAKE "
++#elif defined CONFIG_MSKYLAKEX
++#define MODULE_PROC_FAMILY "SKYLAKEX "
++#elif defined CONFIG_MCANNONLAKE
++#define MODULE_PROC_FAMILY "CANNONLAKE "
++#elif defined CONFIG_MICELAKE
++#define MODULE_PROC_FAMILY "ICELAKE "
++#elif defined CONFIG_MCASCADELAKE
++#define MODULE_PROC_FAMILY "CASCADELAKE "
++#elif defined CONFIG_MCOOPERLAKE
++#define MODULE_PROC_FAMILY "COOPERLAKE "
++#elif defined CONFIG_MTIGERLAKE
++#define MODULE_PROC_FAMILY "TIGERLAKE "
++#elif defined CONFIG_MSAPPHIRERAPIDS
++#define MODULE_PROC_FAMILY "SAPPHIRERAPIDS "
++#elif defined CONFIG_ROCKETLAKE
++#define MODULE_PROC_FAMILY "ROCKETLAKE "
++#elif defined CONFIG_MALDERLAKE
++#define MODULE_PROC_FAMILY "ALDERLAKE "
+ #elif defined CONFIG_MATOM
+ #define MODULE_PROC_FAMILY "ATOM "
+ #elif defined CONFIG_M686
+@@ -35,6 +77,30 @@
+ #define MODULE_PROC_FAMILY "K7 "
+ #elif defined CONFIG_MK8
+ #define MODULE_PROC_FAMILY "K8 "
++#elif defined CONFIG_MK8SSE3
++#define MODULE_PROC_FAMILY "K8SSE3 "
++#elif defined CONFIG_MK10
++#define MODULE_PROC_FAMILY "K10 "
++#elif defined CONFIG_MBARCELONA
++#define MODULE_PROC_FAMILY "BARCELONA "
++#elif defined CONFIG_MBOBCAT
++#define MODULE_PROC_FAMILY "BOBCAT "
++#elif defined CONFIG_MBULLDOZER
++#define MODULE_PROC_FAMILY "BULLDOZER "
++#elif defined CONFIG_MPILEDRIVER
++#define MODULE_PROC_FAMILY "PILEDRIVER "
++#elif defined CONFIG_MSTEAMROLLER
++#define MODULE_PROC_FAMILY "STEAMROLLER "
++#elif defined CONFIG_MJAGUAR
++#define MODULE_PROC_FAMILY "JAGUAR "
++#elif defined CONFIG_MEXCAVATOR
++#define MODULE_PROC_FAMILY "EXCAVATOR "
++#elif defined CONFIG_MZEN
++#define MODULE_PROC_FAMILY "ZEN "
++#elif defined CONFIG_MZEN2
++#define MODULE_PROC_FAMILY "ZEN2 "
++#elif defined CONFIG_MZEN3
++#define MODULE_PROC_FAMILY "ZEN3 "
+ #elif defined CONFIG_MELAN
+ #define MODULE_PROC_FAMILY "ELAN "
+ #elif defined CONFIG_MCRUSOE
+--
+2.31.1
+
diff --git a/sys-kernel_arch-sources-g14_files-0005-lru-multi-generational.patch b/sys-kernel_arch-sources-g14_files-0005-lru-multi-generational.patch
index 16b30ccf94ad..b85a0f064684 100644
--- a/sys-kernel_arch-sources-g14_files-0005-lru-multi-generational.patch
+++ b/sys-kernel_arch-sources-g14_files-0005-lru-multi-generational.patch
@@ -1,57 +1,165 @@
-diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
-index ac398e143c9a..89fe4e3592f9 100644
---- a/include/linux/nodemask.h
-+++ b/include/linux/nodemask.h
-@@ -486,6 +486,7 @@ static inline int num_node_state(enum node_states state)
- #define first_online_node 0
- #define first_memory_node 0
- #define next_online_node(nid) (MAX_NUMNODES)
-+#define next_memory_node(nid) (MAX_NUMNODES)
- #define nr_node_ids 1U
- #define nr_online_nodes 1U
-
-diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
-index 4f2f79de083e..bd5744360cfa 100644
---- a/include/linux/cgroup.h
-+++ b/include/linux/cgroup.h
-@@ -432,6 +432,18 @@ static inline void cgroup_put(struct cgroup *cgrp)
- css_put(&cgrp->self);
- }
+diff --git a/Documentation/vm/index.rst b/Documentation/vm/index.rst
+index eff5fbd492d0..c353b3f55924 100644
+--- a/Documentation/vm/index.rst
++++ b/Documentation/vm/index.rst
+@@ -17,6 +17,7 @@ various features of the Linux memory management
-+extern struct mutex cgroup_mutex;
+ swap_numa
+ zswap
++ multigen_lru
+
+ Kernel developers MM documentation
+ ==================================
+diff --git a/Documentation/vm/multigen_lru.rst b/Documentation/vm/multigen_lru.rst
+new file mode 100644
+index 000000000000..a18416ed7e92
+--- /dev/null
++++ b/Documentation/vm/multigen_lru.rst
+@@ -0,0 +1,143 @@
++.. SPDX-License-Identifier: GPL-2.0
+
-+static inline void cgroup_lock(void)
-+{
-+ mutex_lock(&cgroup_mutex);
-+}
++=====================
++Multigenerational LRU
++=====================
+
-+static inline void cgroup_unlock(void)
-+{
-+ mutex_unlock(&cgroup_mutex);
-+}
++Quick Start
++===========
++Build Options
++-------------
++:Required: Set ``CONFIG_LRU_GEN=y``.
+
- /**
- * task_css_set_check - obtain a task's css_set with extra access conditions
- * @task: the task to obtain css_set for
-@@ -446,7 +458,6 @@ static inline void cgroup_put(struct cgroup *cgrp)
- * as locks used during the cgroup_subsys::attach() methods.
- */
- #ifdef CONFIG_PROVE_RCU
--extern struct mutex cgroup_mutex;
- extern spinlock_t css_set_lock;
- #define task_css_set_check(task, __c) \
- rcu_dereference_check((task)->cgroups, \
-@@ -704,6 +715,8 @@ struct cgroup;
- static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
- static inline void css_get(struct cgroup_subsys_state *css) {}
- static inline void css_put(struct cgroup_subsys_state *css) {}
-+static inline void cgroup_lock(void) {}
-+static inline void cgroup_unlock(void) {}
- static inline int cgroup_attach_task_all(struct task_struct *from,
- struct task_struct *t) { return 0; }
- static inline int cgroupstats_build(struct cgroupstats *stats,
-
- diff --git a/arch/Kconfig b/arch/Kconfig
++:Optional: Set ``CONFIG_LRU_GEN_ENABLED=y`` to turn the feature on by
++ default.
++
++:Optional: Change ``CONFIG_NR_LRU_GENS`` to a number ``X`` to support
++ a maximum of ``X`` generations.
++
++:Optional: Change ``CONFIG_TIERS_PER_GEN`` to a number ``Y`` to
++ support a maximum of ``Y`` tiers per generation.
++
++Runtime Options
++---------------
++:Required: Write ``1`` to ``/sys/kernel/mm/lru_gen/enable`` if the
++ feature was not turned on by default.
++
++:Optional: Change ``/sys/kernel/mm/lru_gen/spread`` to a number ``N``
++ to spread pages out across ``N+1`` generations. ``N`` should be less
++ than ``X``. Larger values make the background aging more aggressive.
++
++:Optional: Read ``/sys/kernel/debug/lru_gen`` to verify the feature.
++ This file has the following output:
++
++::
++
++ memcg memcg_id memcg_path
++ node node_id
++ min_gen birth_time anon_size file_size
++ ...
++ max_gen birth_time anon_size file_size
++
++Given a memcg and a node, ``min_gen`` is the oldest generation
++(number) and ``max_gen`` is the youngest. Birth time is in
++milliseconds. The sizes of anon and file types are in pages.
++
++Recipes
++-------
++:Android on ARMv8.1+: ``X=4``, ``Y=3`` and ``N=0``.
++
++:Android on pre-ARMv8.1 CPUs: Not recommended due to the lack of
++ ``ARM64_HW_AFDBM``.
++
++:Laptops and workstations running Chrome on x86_64: Use the default
++ values.
++
++:Working set estimation: Write ``+ memcg_id node_id gen [swappiness]``
++ to ``/sys/kernel/debug/lru_gen`` to account referenced pages to
++ generation ``max_gen`` and create the next generation ``max_gen+1``.
++ ``gen`` should be equal to ``max_gen``. A swap file and a non-zero
++ ``swappiness`` are required to scan anon type. If swapping is not
++ desired, set ``vm.swappiness`` to ``0``.
++
++:Proactive reclaim: Write ``- memcg_id node_id gen [swappiness]
++ [nr_to_reclaim]`` to ``/sys/kernel/debug/lru_gen`` to evict
++ generations less than or equal to ``gen``. ``gen`` should be less
++ than ``max_gen-1`` as ``max_gen`` and ``max_gen-1`` are active
++ generations and therefore protected from the eviction. Use
++ ``nr_to_reclaim`` to limit the number of pages to evict. Multiple
++ command lines are supported, so does concatenation with delimiters
++ ``,`` and ``;``.
++
++Framework
++=========
++For each ``lruvec``, evictable pages are divided into multiple
++generations. The youngest generation number is stored in ``max_seq``
++for both anon and file types as they are aged on an equal footing. The
++oldest generation numbers are stored in ``min_seq[2]`` separately for
++anon and file types as clean file pages can be evicted regardless of
++swap and write-back constraints. These three variables are
++monotonically increasing. Generation numbers are truncated into
++``order_base_2(CONFIG_NR_LRU_GENS+1)`` bits in order to fit into
++``page->flags``. The sliding window technique is used to prevent
++truncated generation numbers from overlapping. Each truncated
++generation number is an index to an array of per-type and per-zone
++lists. Evictable pages are added to the per-zone lists indexed by
++``max_seq`` or ``min_seq[2]`` (modulo ``CONFIG_NR_LRU_GENS``),
++depending on their types.
++
++Each generation is then divided into multiple tiers. Tiers represent
++levels of usage from file descriptors only. Pages accessed N times via
++file descriptors belong to tier order_base_2(N). Each generation
++contains at most CONFIG_TIERS_PER_GEN tiers, and they require
++additional CONFIG_TIERS_PER_GEN-2 bits in page->flags. In contrast to
++moving across generations which requires the lru lock for the list
++operations, moving across tiers only involves an atomic operation on
++``page->flags`` and therefore has a negligible cost. A feedback loop
++modeled after the PID controller monitors the refault rates across all
++tiers and decides when to activate pages from which tiers in the
++reclaim path.
++
++The framework comprises two conceptually independent components: the
++aging and the eviction, which can be invoked separately from user
++space for the purpose of working set estimation and proactive reclaim.
++
++Aging
++-----
++The aging produces young generations. Given an ``lruvec``, the aging
++scans page tables for referenced pages of this ``lruvec``. Upon
++finding one, the aging updates its generation number to ``max_seq``.
++After each round of scan, the aging increments ``max_seq``.
++
++The aging maintains either a system-wide ``mm_struct`` list or
++per-memcg ``mm_struct`` lists, and it only scans page tables of
++processes that have been scheduled since the last scan.
++
++The aging is due when both of ``min_seq[2]`` reaches ``max_seq-1``,
++assuming both anon and file types are reclaimable.
++
++Eviction
++--------
++The eviction consumes old generations. Given an ``lruvec``, the
++eviction scans the pages on the per-zone lists indexed by either of
++``min_seq[2]``. It first tries to select a type based on the values of
++``min_seq[2]``. When anon and file types are both available from the
++same generation, it selects the one that has a lower refault rate.
++
++During a scan, the eviction sorts pages according to their new
++generation numbers, if the aging has found them referenced. It also
++moves pages from the tiers that have higher refault rates than tier 0
++to the next generation.
++
++When it finds all the per-zone lists of a selected type are empty, the
++eviction increments ``min_seq[2]`` indexed by this selected type.
++
++To-do List
++==========
++KVM Optimization
++----------------
++Support shadow page table scanning.
++
++NUMA Optimization
++-----------------
++Optimize page table scan for NUMA.
+diff --git a/arch/Kconfig b/arch/Kconfig
index c45b770d3579..e3812adc69f7 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -119,362 +227,26 @@ index d27cf69e811d..b968d6bd28b6 100644
int pudp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pud_t *pudp)
{
-diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
-index 46b13780c2c8..94ecc1d277a2 100644
---- a/include/linux/pgtable.h
-+++ b/include/linux/pgtable.h
-@@ -193,7 +193,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
- #endif
-
- #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
--#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
- static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
-@@ -214,7 +214,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- BUILD_BUG();
- return 0;
- }
--#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG */
- #endif
-
- #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index 5199b9696bab..2339459c97d4 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -2421,6 +2421,103 @@ enum scan_balance {
- SCAN_FILE,
- };
-
-+static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
-+{
-+ unsigned long file;
-+ struct lruvec *target_lruvec;
-+
-+ target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
-+
-+ /*
-+ * Determine the scan balance between anon and file LRUs.
-+ */
-+ spin_lock_irq(&target_lruvec->lru_lock);
-+ sc->anon_cost = target_lruvec->anon_cost;
-+ sc->file_cost = target_lruvec->file_cost;
-+ spin_unlock_irq(&target_lruvec->lru_lock);
-+
-+ /*
-+ * Target desirable inactive:active list ratios for the anon
-+ * and file LRU lists.
-+ */
-+ if (!sc->force_deactivate) {
-+ unsigned long refaults;
-+
-+ refaults = lruvec_page_state(target_lruvec,
-+ WORKINGSET_ACTIVATE_ANON);
-+ if (refaults != target_lruvec->refaults[0] ||
-+ inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
-+ sc->may_deactivate |= DEACTIVATE_ANON;
-+ else
-+ sc->may_deactivate &= ~DEACTIVATE_ANON;
-+
-+ /*
-+ * When refaults are being observed, it means a new
-+ * workingset is being established. Deactivate to get
-+ * rid of any stale active pages quickly.
-+ */
-+ refaults = lruvec_page_state(target_lruvec,
-+ WORKINGSET_ACTIVATE_FILE);
-+ if (refaults != target_lruvec->refaults[1] ||
-+ inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
-+ sc->may_deactivate |= DEACTIVATE_FILE;
-+ else
-+ sc->may_deactivate &= ~DEACTIVATE_FILE;
-+ } else
-+ sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
-+
-+ /*
-+ * If we have plenty of inactive file pages that aren't
-+ * thrashing, try to reclaim those first before touching
-+ * anonymous pages.
-+ */
-+ file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
-+ if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
-+ sc->cache_trim_mode = 1;
-+ else
-+ sc->cache_trim_mode = 0;
-+
-+ /*
-+ * Prevent the reclaimer from falling into the cache trap: as
-+ * cache pages start out inactive, every cache fault will tip
-+ * the scan balance towards the file LRU. And as the file LRU
-+ * shrinks, so does the window for rotation from references.
-+ * This means we have a runaway feedback loop where a tiny
-+ * thrashing file LRU becomes infinitely more attractive than
-+ * anon pages. Try to detect this based on file LRU size.
-+ */
-+ if (!cgroup_reclaim(sc)) {
-+ unsigned long total_high_wmark = 0;
-+ unsigned long free, anon;
-+ int z;
-+
-+ free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
-+ file = node_page_state(pgdat, NR_ACTIVE_FILE) +
-+ node_page_state(pgdat, NR_INACTIVE_FILE);
-+
-+ for (z = 0; z < MAX_NR_ZONES; z++) {
-+ struct zone *zone = &pgdat->node_zones[z];
-+
-+ if (!managed_zone(zone))
-+ continue;
-+
-+ total_high_wmark += high_wmark_pages(zone);
-+ }
-+
-+ /*
-+ * Consider anon: if that's low too, this isn't a
-+ * runaway file reclaim problem, but rather just
-+ * extreme pressure. Reclaim as per usual then.
-+ */
-+ anon = node_page_state(pgdat, NR_INACTIVE_ANON);
-+
-+ sc->file_is_tiny =
-+ file + free <= total_high_wmark &&
-+ !(sc->may_deactivate & DEACTIVATE_ANON) &&
-+ anon >> sc->priority;
-+ }
-+}
-+
- /*
- * Determine how aggressively the anon and file LRU lists should be
- * scanned. The relative value of each set of LRU lists is determined
-@@ -2866,7 +2963,6 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
- unsigned long nr_reclaimed, nr_scanned;
- struct lruvec *target_lruvec;
- bool reclaimable = false;
-- unsigned long file;
-
- target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
-
-@@ -2876,93 +2972,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
- nr_reclaimed = sc->nr_reclaimed;
- nr_scanned = sc->nr_scanned;
-
-- /*
-- * Determine the scan balance between anon and file LRUs.
-- */
-- spin_lock_irq(&target_lruvec->lru_lock);
-- sc->anon_cost = target_lruvec->anon_cost;
-- sc->file_cost = target_lruvec->file_cost;
-- spin_unlock_irq(&target_lruvec->lru_lock);
--
-- /*
-- * Target desirable inactive:active list ratios for the anon
-- * and file LRU lists.
-- */
-- if (!sc->force_deactivate) {
-- unsigned long refaults;
--
-- refaults = lruvec_page_state(target_lruvec,
-- WORKINGSET_ACTIVATE_ANON);
-- if (refaults != target_lruvec->refaults[0] ||
-- inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
-- sc->may_deactivate |= DEACTIVATE_ANON;
-- else
-- sc->may_deactivate &= ~DEACTIVATE_ANON;
--
-- /*
-- * When refaults are being observed, it means a new
-- * workingset is being established. Deactivate to get
-- * rid of any stale active pages quickly.
-- */
-- refaults = lruvec_page_state(target_lruvec,
-- WORKINGSET_ACTIVATE_FILE);
-- if (refaults != target_lruvec->refaults[1] ||
-- inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
-- sc->may_deactivate |= DEACTIVATE_FILE;
-- else
-- sc->may_deactivate &= ~DEACTIVATE_FILE;
-- } else
-- sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
--
-- /*
-- * If we have plenty of inactive file pages that aren't
-- * thrashing, try to reclaim those first before touching
-- * anonymous pages.
-- */
-- file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
-- if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
-- sc->cache_trim_mode = 1;
-- else
-- sc->cache_trim_mode = 0;
--
-- /*
-- * Prevent the reclaimer from falling into the cache trap: as
-- * cache pages start out inactive, every cache fault will tip
-- * the scan balance towards the file LRU. And as the file LRU
-- * shrinks, so does the window for rotation from references.
-- * This means we have a runaway feedback loop where a tiny
-- * thrashing file LRU becomes infinitely more attractive than
-- * anon pages. Try to detect this based on file LRU size.
-- */
-- if (!cgroup_reclaim(sc)) {
-- unsigned long total_high_wmark = 0;
-- unsigned long free, anon;
-- int z;
--
-- free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
-- file = node_page_state(pgdat, NR_ACTIVE_FILE) +
-- node_page_state(pgdat, NR_INACTIVE_FILE);
--
-- for (z = 0; z < MAX_NR_ZONES; z++) {
-- struct zone *zone = &pgdat->node_zones[z];
-- if (!managed_zone(zone))
-- continue;
--
-- total_high_wmark += high_wmark_pages(zone);
-- }
--
-- /*
-- * Consider anon: if that's low too, this isn't a
-- * runaway file reclaim problem, but rather just
-- * extreme pressure. Reclaim as per usual then.
-- */
-- anon = node_page_state(pgdat, NR_INACTIVE_ANON);
--
-- sc->file_is_tiny =
-- file + free <= total_high_wmark &&
-- !(sc->may_deactivate & DEACTIVATE_ANON) &&
-- anon >> sc->priority;
-- }
-+ prepare_scan_count(pgdat, sc);
-
- shrink_node_memcgs(pgdat, sc);
-
-diff --git a/mm/workingset.c b/mm/workingset.c
-index b7cdeca5a76d..edb8aed2587e 100644
---- a/mm/workingset.c
-+++ b/mm/workingset.c
-@@ -168,9 +168,9 @@
- * refault distance will immediately activate the refaulting page.
- */
-
--#define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
-- 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
--#define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
-+#define EVICTION_SHIFT (BITS_PER_XA_VALUE - MEM_CGROUP_ID_SHIFT - NODES_SHIFT)
-+#define EVICTION_MASK (BIT(EVICTION_SHIFT) - 1)
-+#define WORKINGSET_WIDTH 1
-
- /*
- * Eviction timestamps need to be able to cover the full range of
-@@ -182,36 +182,23 @@
- */
- static unsigned int bucket_order __read_mostly;
-
--static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
-- bool workingset)
-+static void *pack_shadow(int memcg_id, struct pglist_data *pgdat, unsigned long val)
- {
-- eviction >>= bucket_order;
-- eviction &= EVICTION_MASK;
-- eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
-- eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
-- eviction = (eviction << 1) | workingset;
-+ val = (val << MEM_CGROUP_ID_SHIFT) | memcg_id;
-+ val = (val << NODES_SHIFT) | pgdat->node_id;
-
-- return xa_mk_value(eviction);
-+ return xa_mk_value(val);
- }
-
--static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
-- unsigned long *evictionp, bool *workingsetp)
-+static unsigned long unpack_shadow(void *shadow, int *memcg_id, struct pglist_data **pgdat)
- {
-- unsigned long entry = xa_to_value(shadow);
-- int memcgid, nid;
-- bool workingset;
-+ unsigned long val = xa_to_value(shadow);
-
-- workingset = entry & 1;
-- entry >>= 1;
-- nid = entry & ((1UL << NODES_SHIFT) - 1);
-- entry >>= NODES_SHIFT;
-- memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
-- entry >>= MEM_CGROUP_ID_SHIFT;
-+ *pgdat = NODE_DATA(val & (BIT(NODES_SHIFT) - 1));
-+ val >>= NODES_SHIFT;
-+ *memcg_id = val & (BIT(MEM_CGROUP_ID_SHIFT) - 1);
-
-- *memcgidp = memcgid;
-- *pgdat = NODE_DATA(nid);
-- *evictionp = entry << bucket_order;
-- *workingsetp = workingset;
-+ return val >> MEM_CGROUP_ID_SHIFT;
- }
-
- /**
-@@ -266,8 +253,10 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
- /* XXX: target_memcg can be NULL, go through lruvec */
- memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
- eviction = atomic_long_read(&lruvec->nonresident_age);
-+ eviction >>= bucket_order;
-+ eviction = (eviction << WORKINGSET_WIDTH) | PageWorkingset(page);
- workingset_age_nonresident(lruvec, thp_nr_pages(page));
-- return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
-+ return pack_shadow(memcgid, pgdat, eviction);
- }
-
- /**
-@@ -294,7 +283,7 @@ void workingset_refault(struct page *page, void *shadow)
- bool workingset;
- int memcgid;
-
-- unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
-+ eviction = unpack_shadow(shadow, &memcgid, &pgdat);
-
- rcu_read_lock();
- /*
-@@ -318,6 +307,8 @@ void workingset_refault(struct page *page, void *shadow)
- goto out;
- eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
- refault = atomic_long_read(&eviction_lruvec->nonresident_age);
-+ workingset = eviction & (BIT(WORKINGSET_WIDTH) - 1);
-+ eviction = (eviction >> WORKINGSET_WIDTH) << bucket_order;
-
- /*
- * Calculate the refault distance
-@@ -335,7 +326,7 @@ void workingset_refault(struct page *page, void *shadow)
- * longest time, so the occasional inappropriate activation
- * leading to pressure on the active list is not a problem.
- */
-- refault_distance = (refault - eviction) & EVICTION_MASK;
-+ refault_distance = (refault - eviction) & (EVICTION_MASK >> WORKINGSET_WIDTH);
-
- /*
- * The activation decision for this page is made at the level
-@@ -593,7 +584,7 @@ static int __init workingset_init(void)
- unsigned int max_order;
- int ret;
-
-- BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
-+ BUILD_BUG_ON(EVICTION_SHIFT < WORKINGSET_WIDTH);
+diff --git a/fs/exec.c b/fs/exec.c
+index 18594f11c31f..c691d4d7720c 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1008,6 +1008,7 @@ static int exec_mmap(struct mm_struct *mm)
+ active_mm = tsk->active_mm;
+ tsk->active_mm = mm;
+ tsk->mm = mm;
++ lru_gen_add_mm(mm);
/*
- * Calculate the eviction bucket size to cover the longest
- * actionable refault distance, which is currently half of
-@@ -601,7 +592,7 @@ static int __init workingset_init(void)
- * some more pages at runtime, so keep working with up to
- * double the initial memory by using totalram_pages as-is.
- */
-- timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
-+ timestamp_bits = EVICTION_SHIFT - WORKINGSET_WIDTH;
- max_order = fls_long(totalram_pages() - 1);
- if (max_order > timestamp_bits)
- bucket_order = max_order - timestamp_bits;
-
+ * This prevents preemption while active_mm is being loaded and
+ * it and mm are being updated, which could cause problems for
+@@ -1018,6 +1019,7 @@ static int exec_mmap(struct mm_struct *mm)
+ if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
+ local_irq_enable();
+ activate_mm(active_mm, mm);
++ lru_gen_switch_mm(active_mm, mm);
+ if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
+ local_irq_enable();
+ tsk->mm->vmacache_seqnum = 0;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index a5ceccc5ef00..f784c118f00f 100644
--- a/fs/fuse/dev.c
@@ -489,8 +261,80 @@ index a5ceccc5ef00..f784c118f00f 100644
dump_page(page, "fuse: trying to steal weird page");
return 1;
}
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index 6bc9c76680b2..e52e44af6810 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -432,6 +432,18 @@ static inline void cgroup_put(struct cgroup *cgrp)
+ css_put(&cgrp->self);
+ }
+
++extern struct mutex cgroup_mutex;
++
++static inline void cgroup_lock(void)
++{
++ mutex_lock(&cgroup_mutex);
++}
++
++static inline void cgroup_unlock(void)
++{
++ mutex_unlock(&cgroup_mutex);
++}
++
+ /**
+ * task_css_set_check - obtain a task's css_set with extra access conditions
+ * @task: the task to obtain css_set for
+@@ -446,7 +458,6 @@ static inline void cgroup_put(struct cgroup *cgrp)
+ * as locks used during the cgroup_subsys::attach() methods.
+ */
+ #ifdef CONFIG_PROVE_RCU
+-extern struct mutex cgroup_mutex;
+ extern spinlock_t css_set_lock;
+ #define task_css_set_check(task, __c) \
+ rcu_dereference_check((task)->cgroups, \
+@@ -704,6 +715,8 @@ struct cgroup;
+ static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
+ static inline void css_get(struct cgroup_subsys_state *css) {}
+ static inline void css_put(struct cgroup_subsys_state *css) {}
++static inline void cgroup_lock(void) {}
++static inline void cgroup_unlock(void) {}
+ static inline int cgroup_attach_task_all(struct task_struct *from,
+ struct task_struct *t) { return 0; }
+ static inline int cgroupstats_build(struct cgroupstats *stats,
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index c193be760709..60601a997433 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -230,6 +230,8 @@ struct obj_cgroup {
+ };
+ };
+
++struct lru_gen_mm_list;
++
+ /*
+ * The memory controller data structure. The memory controller controls both
+ * page cache and RSS per cgroup. We would eventually like to provide
+@@ -349,6 +351,10 @@ struct mem_cgroup {
+ struct deferred_split deferred_split_queue;
+ #endif
+
++#ifdef CONFIG_LRU_GEN
++ struct lru_gen_mm_list *mm_list;
++#endif
++
+ struct mem_cgroup_per_node *nodeinfo[0];
+ /* WARNING: nodeinfo must be the last member here */
+ };
+@@ -1131,7 +1137,6 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
+
+ static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+ {
+- WARN_ON_ONCE(!rcu_read_lock_held());
+ return NULL;
+ }
+
diff --git a/include/linux/mm.h b/include/linux/mm.h
-index c274f75efcf9..e0c19a02db9d 100644
+index 8ae31622deef..d335b1c13cc2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1089,6 +1089,8 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
@@ -503,10 +347,10 @@ index c274f75efcf9..e0c19a02db9d 100644
/*
* Define the bit shifts to access each section. For non-existent
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
-index 355ea1ee32bd..ae3e3826dd7f 100644
+index 355ea1ee32bd..f3b99f65a652 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
-@@ -79,11 +79,199 @@ static __always_inline enum lru_list page_lru(struct page *page)
+@@ -79,11 +79,239 @@ static __always_inline enum lru_list page_lru(struct page *page)
return lru;
}
@@ -534,6 +378,12 @@ index 355ea1ee32bd..ae3e3826dd7f 100644
+ return seq % MAX_NR_GENS;
+}
+
++/* Convert the level of usage to a tier. See the comment on MAX_NR_TIERS. */
++static inline int lru_tier_from_usage(int usage)
++{
++ return order_base_2(usage + 1);
++}
++
+/* Return a proper index regardless whether we keep a full history of stats. */
+static inline int hist_from_seq_or_gen(int seq_or_gen)
+{
@@ -676,6 +526,36 @@ index 355ea1ee32bd..ae3e3826dd7f 100644
+ return true;
+}
+
++/* Return the level of usage of a page. See the comment on MAX_NR_TIERS. */
++static inline int page_tier_usage(struct page *page)
++{
++ unsigned long flags = READ_ONCE(page->flags);
++
++ return flags & BIT(PG_workingset) ?
++ ((flags & LRU_USAGE_MASK) >> LRU_USAGE_PGOFF) + 1 : 0;
++}
++
++/* Increment the usage counter after a page is accessed via file descriptors. */
++static inline void page_inc_usage(struct page *page)
++{
++ unsigned long usage;
++ unsigned long old_flags, new_flags;
++
++ do {
++ old_flags = READ_ONCE(page->flags);
++
++ if (!(old_flags & BIT(PG_workingset))) {
++ new_flags = old_flags | BIT(PG_workingset);
++ continue;
++ }
++
++ usage = (old_flags & LRU_USAGE_MASK) + BIT(LRU_USAGE_PGOFF);
++
++ new_flags = (old_flags & ~LRU_USAGE_MASK) | min(usage, LRU_USAGE_MASK);
++ } while (new_flags != old_flags &&
++ cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
++}
++
+#else /* CONFIG_LRU_GEN */
+
+static inline bool lru_gen_enabled(void)
@@ -693,6 +573,10 @@ index 355ea1ee32bd..ae3e3826dd7f 100644
+ return false;
+}
+
++static inline void page_inc_usage(struct page *page)
++{
++}
++
+#endif /* CONFIG_LRU_GEN */
+
static __always_inline void add_page_to_lru_list(struct page *page,
@@ -706,7 +590,7 @@ index 355ea1ee32bd..ae3e3826dd7f 100644
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add(&page->lru, &lruvec->lists[lru]);
}
-@@ -93,6 +281,9 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
+@@ -93,6 +321,9 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
{
enum lru_list lru = page_lru(page);
@@ -716,7 +600,7 @@ index 355ea1ee32bd..ae3e3826dd7f 100644
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add_tail(&page->lru, &lruvec->lists[lru]);
}
-@@ -100,6 +291,9 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
+@@ -100,6 +331,9 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec)
{
@@ -726,15 +610,148 @@ index 355ea1ee32bd..ae3e3826dd7f 100644
list_del(&page->lru);
update_lru_size(lruvec, page_lru(page), page_zonenum(page),
-thp_nr_pages(page));
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 8f0fb62e8975..602901a0b1d0 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -15,6 +15,8 @@
+ #include <linux/page-flags-layout.h>
+ #include <linux/workqueue.h>
+ #include <linux/seqlock.h>
++#include <linux/nodemask.h>
++#include <linux/mmdebug.h>
+
+ #include <asm/mmu.h>
+
+@@ -574,6 +576,22 @@ struct mm_struct {
+
+ #ifdef CONFIG_IOMMU_SUPPORT
+ u32 pasid;
++#endif
++#ifdef CONFIG_LRU_GEN
++ struct {
++ /* the node of a global or per-memcg mm_struct list */
++ struct list_head list;
++#ifdef CONFIG_MEMCG
++ /* points to the memcg of the owner task above */
++ struct mem_cgroup *memcg;
++#endif
++ /* whether this mm_struct has been used since the last walk */
++ nodemask_t nodes;
++#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
++ /* the number of CPUs using this mm_struct */
++ atomic_t nr_cpus;
++#endif
++ } lrugen;
+ #endif
+ } __randomize_layout;
+
+@@ -601,6 +619,95 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
+ return (struct cpumask *)&mm->cpu_bitmap;
+ }
+
++#ifdef CONFIG_LRU_GEN
++
++void lru_gen_init_mm(struct mm_struct *mm);
++void lru_gen_add_mm(struct mm_struct *mm);
++void lru_gen_del_mm(struct mm_struct *mm);
++#ifdef CONFIG_MEMCG
++int lru_gen_alloc_mm_list(struct mem_cgroup *memcg);
++void lru_gen_free_mm_list(struct mem_cgroup *memcg);
++void lru_gen_migrate_mm(struct mm_struct *mm);
++#endif
++
++/* Track the usage of each mm_struct so that we can skip inactive ones. */
++static inline void lru_gen_switch_mm(struct mm_struct *old, struct mm_struct *new)
++{
++ /* exclude init_mm, efi_mm, etc. */
++ if (!core_kernel_data((unsigned long)old)) {
++ VM_BUG_ON(old == &init_mm);
++
++ nodes_setall(old->lrugen.nodes);
++#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
++ atomic_dec(&old->lrugen.nr_cpus);
++ VM_BUG_ON_MM(atomic_read(&old->lrugen.nr_cpus) < 0, old);
++#endif
++ } else
++ VM_BUG_ON_MM(READ_ONCE(old->lrugen.list.prev) ||
++ READ_ONCE(old->lrugen.list.next), old);
++
++ if (!core_kernel_data((unsigned long)new)) {
++ VM_BUG_ON(new == &init_mm);
++
++#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
++ atomic_inc(&new->lrugen.nr_cpus);
++ VM_BUG_ON_MM(atomic_read(&new->lrugen.nr_cpus) < 0, new);
++#endif
++ } else
++ VM_BUG_ON_MM(READ_ONCE(new->lrugen.list.prev) ||
++ READ_ONCE(new->lrugen.list.next), new);
++}
++
++/* Return whether this mm_struct is being used on any CPUs. */
++static inline bool lru_gen_mm_is_active(struct mm_struct *mm)
++{
++#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
++ return !cpumask_empty(mm_cpumask(mm));
++#else
++ return atomic_read(&mm->lrugen.nr_cpus);
++#endif
++}
++
++#else /* CONFIG_LRU_GEN */
++
++static inline void lru_gen_init_mm(struct mm_struct *mm)
++{
++}
++
++static inline void lru_gen_add_mm(struct mm_struct *mm)
++{
++}
++
++static inline void lru_gen_del_mm(struct mm_struct *mm)
++{
++}
++
++#ifdef CONFIG_MEMCG
++static inline int lru_gen_alloc_mm_list(struct mem_cgroup *memcg)
++{
++ return 0;
++}
++
++static inline void lru_gen_free_mm_list(struct mem_cgroup *memcg)
++{
++}
++
++static inline void lru_gen_migrate_mm(struct mm_struct *mm)
++{
++}
++#endif
++
++static inline void lru_gen_switch_mm(struct mm_struct *old, struct mm_struct *new)
++{
++}
++
++static inline bool lru_gen_mm_is_active(struct mm_struct *mm)
++{
++ return false;
++}
++
++#endif /* CONFIG_LRU_GEN */
++
+ struct mmu_gather;
+ extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
+ extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
-index 0d53eba1c383..e5deec17b4bd 100644
+index 0d53eba1c383..ded72f44d7e7 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
-@@ -293,6 +293,108 @@ enum lruvec_flags {
+@@ -293,6 +293,114 @@ enum lruvec_flags {
*/
};
+struct lruvec;
++struct page_vma_mapped_walk;
+
+#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
+#define LRU_USAGE_MASK ((BIT(LRU_USAGE_WIDTH) - 1) << LRU_USAGE_PGOFF)
@@ -823,6 +840,7 @@ index 0d53eba1c383..e5deec17b4bd 100644
+
+void lru_gen_init_lruvec(struct lruvec *lruvec);
+void lru_gen_set_state(bool enable, bool main, bool swap);
++void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw);
+
+#else /* CONFIG_LRU_GEN */
+
@@ -834,12 +852,16 @@ index 0d53eba1c383..e5deec17b4bd 100644
+{
+}
+
++static inline void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw)
++{
++}
++
+#endif /* CONFIG_LRU_GEN */
+
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
/* per lruvec lru_lock for memcg */
-@@ -310,6 +412,10 @@ struct lruvec {
+@@ -310,6 +418,10 @@ struct lruvec {
unsigned long refaults[ANON_AND_FILE];
/* Various lruvec state flags (enum lruvec_flags) */
unsigned long flags;
@@ -850,6 +872,37 @@ index 0d53eba1c383..e5deec17b4bd 100644
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
+@@ -751,6 +863,8 @@ struct deferred_split {
+ };
+ #endif
+
++struct mm_walk_args;
++
+ /*
+ * On NUMA machines, each NUMA node would have a pg_data_t to describe
+ * it's memory layout. On UMA machines there is a single pglist_data which
+@@ -856,6 +970,9 @@ typedef struct pglist_data {
+
+ unsigned long flags;
+
++#ifdef CONFIG_LRU_GEN
++ struct mm_walk_args *mm_walk_args;
++#endif
+ ZONE_PADDING(_pad2_)
+
+ /* Per-node vmstats */
+diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
+index ac398e143c9a..89fe4e3592f9 100644
+--- a/include/linux/nodemask.h
++++ b/include/linux/nodemask.h
+@@ -486,6 +486,7 @@ static inline int num_node_state(enum node_states state)
+ #define first_online_node 0
+ #define first_memory_node 0
+ #define next_online_node(nid) (MAX_NUMNODES)
++#define next_memory_node(nid) (MAX_NUMNODES)
+ #define nr_node_ids 1U
+ #define nr_online_nodes 1U
+
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index ef1e3e736e14..ce8d5732a3aa 100644
--- a/include/linux/page-flags-layout.h
@@ -873,7 +926,7 @@ index ef1e3e736e14..ce8d5732a3aa 100644
#define SECTIONS_WIDTH 0
#endif
--#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+-#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + LRU_USAGE_WIDTH + SECTIONS_WIDTH + NODES_SHIFT \
+ <= BITS_PER_LONG - NR_PAGEFLAGS
#define NODES_WIDTH NODES_SHIFT
@@ -883,7 +936,7 @@ index ef1e3e736e14..ce8d5732a3aa 100644
#define LAST_CPUPID_SHIFT 0
#endif
--#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
+-#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT \
- <= BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + LRU_USAGE_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
+ KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
@@ -894,7 +947,7 @@ index ef1e3e736e14..ce8d5732a3aa 100644
#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
#endif
--#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
+-#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH \
- > BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + LRU_USAGE_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
+ KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
@@ -923,6 +976,43 @@ index 04a34c08e0a6..e58984fca32a 100644
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
+index a43047b1030d..47c2c39bafdf 100644
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -193,7 +193,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ #endif
+
+ #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
+ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+@@ -214,7 +214,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ BUILD_BUG();
+ return 0;
+ }
+-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
++#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG */
+ #endif
+
+ #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index 144727041e78..30b1f15f5c6e 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -365,8 +365,8 @@ extern void deactivate_page(struct page *page);
+ extern void mark_page_lazyfree(struct page *page);
+ extern void swap_setup(void);
+
+-extern void lru_cache_add_inactive_or_unevictable(struct page *page,
+- struct vm_area_struct *vma);
++extern void lru_cache_add_page_vma(struct page *page, struct vm_area_struct *vma,
++ bool faulting);
+
+ /* linux/mm/vmscan.c */
+ extern unsigned long zone_reclaimable_pages(struct zone *zone);
diff --git a/kernel/bounds.c b/kernel/bounds.c
index 9795d75b09b2..a8cbf2d0b11a 100644
--- a/kernel/bounds.c
@@ -940,501 +1030,177 @@ index 9795d75b09b2..a8cbf2d0b11a 100644
/* End of constants */
return 0;
-diff --git a/mm/huge_memory.c b/mm/huge_memory.c
-index 63ed6b25deaa..8ac9093e5a0d 100644
---- a/mm/huge_memory.c
-+++ b/mm/huge_memory.c
-@@ -2410,7 +2410,8 @@ static void __split_huge_page_tail(struct page *head, int tail,
- #ifdef CONFIG_64BIT
- (1L << PG_arch_2) |
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 6addc9780319..4e93e5602723 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -184,7 +184,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
+ if (new_page) {
+ get_page(new_page);
+ page_add_new_anon_rmap(new_page, vma, addr, false);
+- lru_cache_add_inactive_or_unevictable(new_page, vma);
++ lru_cache_add_page_vma(new_page, vma, false);
+ } else
+ /* no new page, just dec_mm_counter for old_page */
+ dec_mm_counter(mm, MM_ANONPAGES);
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 65809fac3038..6e6d95b0462c 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -422,6 +422,7 @@ void mm_update_next_owner(struct mm_struct *mm)
+ goto retry;
+ }
+ WRITE_ONCE(mm->owner, c);
++ lru_gen_migrate_mm(mm);
+ task_unlock(c);
+ put_task_struct(c);
+ }
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 03baafd70b98..7a72a9e17059 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -673,6 +673,7 @@ static void check_mm(struct mm_struct *mm)
+ #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+ VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
#endif
-- (1L << PG_dirty)));
-+ (1L << PG_dirty) |
-+ LRU_GEN_MASK | LRU_USAGE_MASK));
++ VM_BUG_ON_MM(lru_gen_mm_is_active(mm), mm);
+ }
- /* ->mapping in first tail page is compound_mapcount */
- VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
-diff --git a/mm/mm_init.c b/mm/mm_init.c
-index 9ddaf0e1b0ab..ef0deadb90a7 100644
---- a/mm/mm_init.c
-+++ b/mm/mm_init.c
-@@ -65,14 +65,16 @@ void __init mminit_verify_pageflags_layout(void)
+ #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
+@@ -1065,6 +1066,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
+ goto fail_nocontext;
- shift = 8 * sizeof(unsigned long);
- width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
-- - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH;
-+ - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_USAGE_WIDTH;
- mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
-- "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n",
-+ "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
- SECTIONS_WIDTH,
- NODES_WIDTH,
- ZONES_WIDTH,
- LAST_CPUPID_WIDTH,
- KASAN_TAG_WIDTH,
-+ LRU_GEN_WIDTH,
-+ LRU_USAGE_WIDTH,
- NR_PAGEFLAGS);
- mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
- "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
-diff --git a/mm/mmzone.c b/mm/mmzone.c
-index eb89d6e018e2..2ec0d7793424 100644
---- a/mm/mmzone.c
-+++ b/mm/mmzone.c
-@@ -81,6 +81,8 @@ void lruvec_init(struct lruvec *lruvec)
+ mm->user_ns = get_user_ns(user_ns);
++ lru_gen_init_mm(mm);
+ return mm;
- for_each_lru(lru)
- INIT_LIST_HEAD(&lruvec->lists[lru]);
-+
-+ lru_gen_init_lruvec(lruvec);
+ fail_nocontext:
+@@ -1107,6 +1109,7 @@ static inline void __mmput(struct mm_struct *mm)
+ }
+ if (mm->binfmt)
+ module_put(mm->binfmt->module);
++ lru_gen_del_mm(mm);
+ mmdrop(mm);
}
- #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
-diff --git a/mm/swapfile.c b/mm/swapfile.c
-index 149e77454e3c..3598b668f533 100644
---- a/mm/swapfile.c
-+++ b/mm/swapfile.c
-@@ -2702,6 +2702,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
- err = 0;
- atomic_inc(&proc_poll_event);
- wake_up_interruptible(&proc_poll_wait);
-+ /* stop tracking anon if the multigenerational lru is turned off */
-+ lru_gen_set_state(false, false, true);
+@@ -2531,6 +2534,13 @@ pid_t kernel_clone(struct kernel_clone_args *args)
+ get_task_struct(p);
+ }
- out_dput:
- filp_close(victim, NULL);
-@@ -3348,6 +3350,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
- mutex_unlock(&swapon_mutex);
- atomic_inc(&proc_poll_event);
- wake_up_interruptible(&proc_poll_wait);
-+ /* start tracking anon if the multigenerational lru is turned on */
-+ lru_gen_set_state(true, false, true);
++ if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) {
++ /* lock the task to synchronize with memcg migration */
++ task_lock(p);
++ lru_gen_add_mm(p->mm);
++ task_unlock(p);
++ }
++
+ wake_up_new_task(p);
- error = 0;
- goto out;
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index 2339459c97d4..f7bbfc0b1ebd 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -49,6 +49,7 @@
- #include <linux/printk.h>
- #include <linux/dax.h>
- #include <linux/psi.h>
-+#include <linux/memory.h>
+ /* forking complete and child started to run, tell ptracer */
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index 0fccf7d0c6a1..42cea2a77273 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -1350,6 +1350,7 @@ void kthread_use_mm(struct mm_struct *mm)
+ tsk->mm = mm;
+ membarrier_update_current_mm(mm);
+ switch_mm_irqs_off(active_mm, mm, tsk);
++ lru_gen_switch_mm(active_mm, mm);
+ local_irq_enable();
+ task_unlock(tsk);
+ #ifdef finish_arch_post_lock_switch
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 4ca80df205ce..68e6dc4ef643 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4323,6 +4323,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
+ * finish_task_switch()'s mmdrop().
+ */
+ switch_mm_irqs_off(prev->active_mm, next->mm, next);
++ lru_gen_switch_mm(prev->active_mm, next->mm);
- #include <asm/tlbflush.h>
- #include <asm/div64.h>
-@@ -2715,6 +2716,311 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
+ if (!prev->mm) { // from kernel
+ /* will mmdrop() in finish_task_switch(). */
+@@ -7602,6 +7603,7 @@ void idle_task_exit(void)
+
+ if (mm != &init_mm) {
+ switch_mm(mm, &init_mm, current);
++ lru_gen_switch_mm(mm, &init_mm);
+ finish_arch_post_lock_switch();
}
- }
-+#ifdef CONFIG_LRU_GEN
-+
-+/*
-+ * After pages are faulted in, the aging must scan them twice before the
-+ * eviction can consider them. The first scan clears the accessed bit set during
-+ * initial faults. And the second scan makes sure they haven't been used since
-+ * the first scan.
-+ */
-+#define MIN_NR_GENS 2
-+
-+#define MAX_BATCH_SIZE 8192
-+
-+/******************************************************************************
-+ * shorthand helpers
-+ ******************************************************************************/
-+
-+#define DEFINE_MAX_SEQ() \
-+ unsigned long max_seq = READ_ONCE(lruvec->evictable.max_seq)
-+
-+#define DEFINE_MIN_SEQ() \
-+ unsigned long min_seq[ANON_AND_FILE] = { \
-+ READ_ONCE(lruvec->evictable.min_seq[0]), \
-+ READ_ONCE(lruvec->evictable.min_seq[1]), \
-+ }
-+
-+#define for_each_type_zone(type, zone) \
-+ for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
-+ for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
-+
-+#define for_each_gen_type_zone(gen, type, zone) \
-+ for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \
-+ for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
-+ for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
-+
-+static int page_lru_gen(struct page *page)
-+{
-+ return ((page->flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
-+}
-+
-+static int get_nr_gens(struct lruvec *lruvec, int type)
-+{
-+ return lruvec->evictable.max_seq - lruvec->evictable.min_seq[type] + 1;
-+}
-+
-+static int min_nr_gens(unsigned long max_seq, unsigned long *min_seq, int swappiness)
-+{
-+ return max_seq - max(min_seq[!swappiness], min_seq[1]) + 1;
-+}
-+
-+static int max_nr_gens(unsigned long max_seq, unsigned long *min_seq, int swappiness)
-+{
-+ return max_seq - min(min_seq[!swappiness], min_seq[1]) + 1;
-+}
-+
-+static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
-+{
-+ lockdep_assert_held(&lruvec->lru_lock);
-+
-+ return get_nr_gens(lruvec, 0) >= MIN_NR_GENS &&
-+ get_nr_gens(lruvec, 0) <= MAX_NR_GENS &&
-+ get_nr_gens(lruvec, 1) >= MIN_NR_GENS &&
-+ get_nr_gens(lruvec, 1) <= MAX_NR_GENS;
-+}
-+
-+/******************************************************************************
-+ * state change
-+ ******************************************************************************/
-+
-+#ifdef CONFIG_LRU_GEN_ENABLED
-+DEFINE_STATIC_KEY_TRUE(lru_gen_static_key);
-+#else
-+DEFINE_STATIC_KEY_FALSE(lru_gen_static_key);
-+#endif
-+
-+static DEFINE_MUTEX(lru_gen_state_mutex);
-+static int lru_gen_nr_swapfiles __read_mostly;
-+
-+static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
-+{
-+ int gen, type, zone;
-+ enum lru_list lru;
-+ struct lrugen *lrugen = &lruvec->evictable;
-+
-+ for_each_evictable_lru(lru) {
-+ type = is_file_lru(lru);
-+
-+ if (lrugen->enabled[type] && !list_empty(&lruvec->lists[lru]))
-+ return false;
-+ }
-+
-+ for_each_gen_type_zone(gen, type, zone) {
-+ if (!lrugen->enabled[type] && !list_empty(&lrugen->lists[gen][type][zone]))
-+ return false;
-+
-+ VM_WARN_ON_ONCE(!lrugen->enabled[type] && lrugen->sizes[gen][type][zone]);
-+ }
-+
-+ return true;
-+}
-+
-+static bool fill_lru_gen_lists(struct lruvec *lruvec)
-+{
-+ enum lru_list lru;
-+ int batch_size = 0;
-+
-+ for_each_evictable_lru(lru) {
-+ int type = is_file_lru(lru);
-+ bool active = is_active_lru(lru);
-+ struct list_head *head = &lruvec->lists[lru];
-+
-+ if (!lruvec->evictable.enabled[type])
-+ continue;
-+
-+ while (!list_empty(head)) {
-+ bool success;
-+ struct page *page = lru_to_page(head);
-+
-+ VM_BUG_ON_PAGE(PageTail(page), page);
-+ VM_BUG_ON_PAGE(PageUnevictable(page), page);
-+ VM_BUG_ON_PAGE(PageActive(page) != active, page);
-+ VM_BUG_ON_PAGE(page_lru_gen(page) != -1, page);
-+ VM_BUG_ON_PAGE(page_is_file_lru(page) != type, page);
-+
-+ prefetchw_prev_lru_page(page, head, flags);
-+
-+ del_page_from_lru_list(page, lruvec);
-+ success = lru_gen_addition(page, lruvec, true);
-+ VM_BUG_ON(!success);
-+
-+ if (++batch_size == MAX_BATCH_SIZE)
-+ return false;
-+ }
-+ }
-+
-+ return true;
-+}
-+
-+static bool drain_lru_gen_lists(struct lruvec *lruvec)
-+{
-+ int gen, type, zone;
-+ int batch_size = 0;
-+
-+ for_each_gen_type_zone(gen, type, zone) {
-+ struct list_head *head = &lruvec->evictable.lists[gen][type][zone];
-+
-+ if (lruvec->evictable.enabled[type])
-+ continue;
-+
-+ while (!list_empty(head)) {
-+ bool success;
-+ struct page *page = lru_to_page(head);
-+
-+ VM_BUG_ON_PAGE(PageTail(page), page);
-+ VM_BUG_ON_PAGE(PageUnevictable(page), page);
-+ VM_BUG_ON_PAGE(PageActive(page), page);
-+ VM_BUG_ON_PAGE(page_is_file_lru(page) != type, page);
-+ VM_BUG_ON_PAGE(page_zonenum(page) != zone, page);
-+
-+ prefetchw_prev_lru_page(page, head, flags);
-+
-+ success = lru_gen_deletion(page, lruvec);
-+ VM_BUG_ON(!success);
-+ add_page_to_lru_list(page, lruvec);
-+
-+ if (++batch_size == MAX_BATCH_SIZE)
-+ return false;
-+ }
-+ }
-+
-+ return true;
-+}
-+
-+/*
-+ * For file page tracking, we enable/disable it according to the main switch.
-+ * For anon page tracking, we only enabled it when the main switch is on and
-+ * there is at least one swapfile; we disable it when there are no swapfiles
-+ * regardless of the value of the main switch. Otherwise, we will eventually
-+ * reach the max size of the sliding window and have to call inc_min_seq(),
-+ * which brings an unnecessary overhead.
-+ */
-+void lru_gen_set_state(bool enable, bool main, bool swap)
-+{
-+ struct mem_cgroup *memcg;
-+
-+ mem_hotplug_begin();
-+ mutex_lock(&lru_gen_state_mutex);
-+ cgroup_lock();
-+
-+ main = main && enable != lru_gen_enabled();
-+ swap = swap && !(enable ? lru_gen_nr_swapfiles++ : --lru_gen_nr_swapfiles);
-+ swap = swap && lru_gen_enabled();
-+ if (!main && !swap)
-+ goto unlock;
-+
-+ if (main) {
-+ if (enable)
-+ static_branch_enable(&lru_gen_static_key);
-+ else
-+ static_branch_disable(&lru_gen_static_key);
-+ }
-+
-+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
-+ do {
-+ int nid;
-+
-+ for_each_node_state(nid, N_MEMORY) {
-+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
-+ struct lrugen *lrugen = &lruvec->evictable;
-+
-+ spin_lock_irq(&lruvec->lru_lock);
-+
-+ VM_BUG_ON(!seq_is_valid(lruvec));
-+ VM_BUG_ON(!state_is_valid(lruvec));
-+
-+ WRITE_ONCE(lrugen->enabled[0], lru_gen_enabled() && lru_gen_nr_swapfiles);
-+ WRITE_ONCE(lrugen->enabled[1], lru_gen_enabled());
-+
-+ while (!(enable ? fill_lru_gen_lists(lruvec) :
-+ drain_lru_gen_lists(lruvec))) {
-+ spin_unlock_irq(&lruvec->lru_lock);
-+ cond_resched();
-+ spin_lock_irq(&lruvec->lru_lock);
-+ }
-+
-+ spin_unlock_irq(&lruvec->lru_lock);
-+ }
-+
-+ cond_resched();
-+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
-+unlock:
-+ cgroup_unlock();
-+ mutex_unlock(&lru_gen_state_mutex);
-+ mem_hotplug_done();
-+}
-+
-+static int __meminit __maybe_unused lru_gen_online_mem(struct notifier_block *self,
-+ unsigned long action, void *arg)
-+{
-+ struct mem_cgroup *memcg;
-+ struct memory_notify *mnb = arg;
-+ int nid = mnb->status_change_nid;
-+
-+ if (action != MEM_GOING_ONLINE || nid == NUMA_NO_NODE)
-+ return NOTIFY_DONE;
-+
-+ mutex_lock(&lru_gen_state_mutex);
-+ cgroup_lock();
-+
-+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
-+ do {
-+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
-+ struct lrugen *lrugen = &lruvec->evictable;
-+
-+ VM_BUG_ON(!seq_is_valid(lruvec));
-+ VM_BUG_ON(!state_is_valid(lruvec));
-+
-+ WRITE_ONCE(lrugen->enabled[0], lru_gen_enabled() && lru_gen_nr_swapfiles);
-+ WRITE_ONCE(lrugen->enabled[1], lru_gen_enabled());
-+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
-+
-+ cgroup_unlock();
-+ mutex_unlock(&lru_gen_state_mutex);
-+
-+ return NOTIFY_DONE;
-+}
-+
-+/******************************************************************************
-+ * initialization
-+ ******************************************************************************/
-+
-+void lru_gen_init_lruvec(struct lruvec *lruvec)
-+{
-+ int i;
-+ int gen, type, zone;
-+ struct lrugen *lrugen = &lruvec->evictable;
-+
-+ lrugen->max_seq = MIN_NR_GENS + 1;
-+ lrugen->enabled[0] = lru_gen_enabled() && lru_gen_nr_swapfiles;
-+ lrugen->enabled[1] = lru_gen_enabled();
-+
-+ for (i = 0; i <= MIN_NR_GENS + 1; i++)
-+ lrugen->timestamps[i] = jiffies;
-+
-+ for_each_gen_type_zone(gen, type, zone)
-+ INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
-+}
-+
-+static int __init init_lru_gen(void)
-+{
-+ BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
-+ BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
-+
-+ if (hotplug_memory_notifier(lru_gen_online_mem, 0))
-+ pr_err("lru_gen: failed to subscribe hotplug notifications\n");
-+
-+ return 0;
-+};
-+/*
-+ * We want to run as early as possible because debug code may call mm_alloc()
-+ * and mmput(). Out only dependency mm_kobj is initialized one stage earlier.
-+ */
-+arch_initcall(init_lru_gen);
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 02d44e3420f5..da125f145bc4 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -901,4 +901,62 @@ config KMAP_LOCAL
+ # struct io_mapping based helper. Selected by drivers that need them
+ config IO_MAPPING
+ bool
+
-+#endif /* CONFIG_LRU_GEN */
++# the multigenerational lru {
++config LRU_GEN
++ bool "Multigenerational LRU"
++ depends on MMU
++ help
++ A high performance LRU implementation to heavily overcommit workloads
++ that are not IO bound. See Documentation/vm/multigen_lru.rst for
++ details.
+
- static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
- {
- unsigned long nr[NR_LRU_LISTS];
-
-diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
-index ae3e3826dd7f..f3b99f65a652 100644
---- a/include/linux/mm_inline.h
-+++ b/include/linux/mm_inline.h
-@@ -103,6 +103,12 @@ static inline int lru_gen_from_seq(unsigned long seq)
- return seq % MAX_NR_GENS;
- }
-
-+/* Convert the level of usage to a tier. See the comment on MAX_NR_TIERS. */
-+static inline int lru_tier_from_usage(int usage)
-+{
-+ return order_base_2(usage + 1);
-+}
++ Warning: do not enable this option unless you plan to use it because
++ it introduces a small per-process and per-memcg and per-node memory
++ overhead.
+
- /* Return a proper index regardless whether we keep a full history of stats. */
- static inline int hist_from_seq_or_gen(int seq_or_gen)
- {
-@@ -245,6 +251,36 @@ static inline bool lru_gen_deletion(struct page *page, struct lruvec *lruvec)
- return true;
- }
-
-+/* Return the level of usage of a page. See the comment on MAX_NR_TIERS. */
-+static inline int page_tier_usage(struct page *page)
-+{
-+ unsigned long flags = READ_ONCE(page->flags);
++config LRU_GEN_ENABLED
++ bool "Turn on by default"
++ depends on LRU_GEN
++ help
++ The default value of /sys/kernel/mm/lru_gen/enabled is 0. This option
++ changes it to 1.
+
-+ return flags & BIT(PG_workingset) ?
-+ ((flags & LRU_USAGE_MASK) >> LRU_USAGE_PGOFF) + 1 : 0;
-+}
++ Warning: the default value is the fast path. See
++ Documentation/static-keys.txt for details.
+
-+/* Increment the usage counter after a page is accessed via file descriptors. */
-+static inline void page_inc_usage(struct page *page)
-+{
-+ unsigned long usage;
-+ unsigned long old_flags, new_flags;
++config LRU_GEN_STATS
++ bool "Full stats for debugging"
++ depends on LRU_GEN
++ help
++ This option keeps full stats for each generation, which can be read
++ from /sys/kernel/debug/lru_gen_full.
+
-+ do {
-+ old_flags = READ_ONCE(page->flags);
++ Warning: do not enable this option unless you plan to use it because
++ it introduces an additional small per-process and per-memcg and
++ per-node memory overhead.
+
-+ if (!(old_flags & BIT(PG_workingset))) {
-+ new_flags = old_flags | BIT(PG_workingset);
-+ continue;
-+ }
++config NR_LRU_GENS
++ int "Max number of generations"
++ depends on LRU_GEN
++ range 4 31
++ default 7
++ help
++ This will use order_base_2(N+1) spare bits from page flags.
+
-+ usage = (old_flags & LRU_USAGE_MASK) + BIT(LRU_USAGE_PGOFF);
++ Warning: do not use numbers larger than necessary because each
++ generation introduces a small per-node and per-memcg memory overhead.
+
-+ new_flags = (old_flags & ~LRU_USAGE_MASK) | min(usage, LRU_USAGE_MASK);
-+ } while (new_flags != old_flags &&
-+ cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
-+}
++config TIERS_PER_GEN
++ int "Number of tiers per generation"
++ depends on LRU_GEN
++ range 2 5
++ default 4
++ help
++ This will use N-2 spare bits from page flags.
+
- #else /* CONFIG_LRU_GEN */
-
- static inline bool lru_gen_enabled(void)
-@@ -262,6 +298,10 @@ static inline bool lru_gen_deletion(struct page *page, struct lruvec *lruvec)
- return false;
- }
-
-+static inline void page_inc_usage(struct page *page)
-+{
-+}
++ Larger values generally offer better protection to active pages under
++ heavy buffered I/O workloads.
++# }
+
- #endif /* CONFIG_LRU_GEN */
-
- static __always_inline void add_page_to_lru_list(struct page *page,
-diff --git a/include/linux/swap.h b/include/linux/swap.h
-index 144727041e78..30b1f15f5c6e 100644
---- a/include/linux/swap.h
-+++ b/include/linux/swap.h
-@@ -365,8 +365,8 @@ extern void deactivate_page(struct page *page);
- extern void mark_page_lazyfree(struct page *page);
- extern void swap_setup(void);
-
--extern void lru_cache_add_inactive_or_unevictable(struct page *page,
-- struct vm_area_struct *vma);
-+extern void lru_cache_add_page_vma(struct page *page, struct vm_area_struct *vma,
-+ bool faulting);
-
- /* linux/mm/vmscan.c */
- extern unsigned long zone_reclaimable_pages(struct zone *zone);
-diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
-index 6addc9780319..4e93e5602723 100644
---- a/kernel/events/uprobes.c
-+++ b/kernel/events/uprobes.c
-@@ -184,7 +184,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
- if (new_page) {
- get_page(new_page);
- page_add_new_anon_rmap(new_page, vma, addr, false);
-- lru_cache_add_inactive_or_unevictable(new_page, vma);
-+ lru_cache_add_page_vma(new_page, vma, false);
- } else
- /* no new page, just dec_mm_counter for old_page */
- dec_mm_counter(mm, MM_ANONPAGES);
+ endmenu
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
-index 8ac9093e5a0d..681da4a3cf61 100644
+index 6d2a0119fc58..64c70c322ac4 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
-@@ -636,7 +636,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
+@@ -639,7 +639,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
entry = mk_huge_pmd(page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
page_add_new_anon_rmap(page, vma, haddr, true);
@@ -1443,6 +1209,16 @@ index 8ac9093e5a0d..681da4a3cf61 100644
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
+@@ -2422,7 +2422,8 @@ static void __split_huge_page_tail(struct page *head, int tail,
+ #ifdef CONFIG_64BIT
+ (1L << PG_arch_2) |
+ #endif
+- (1L << PG_dirty)));
++ (1L << PG_dirty) |
++ LRU_GEN_MASK | LRU_USAGE_MASK));
+
+ /* ->mapping in first tail page is compound_mapcount */
+ VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 6c0185fdd815..09e5346c2754 100644
--- a/mm/khugepaged.c
@@ -1456,8 +1232,68 @@ index 6c0185fdd815..09e5346c2754 100644
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 64ada9e650a5..58b610ffa0e0 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4981,6 +4981,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
+ for_each_node(node)
+ free_mem_cgroup_per_node_info(memcg, node);
+ free_percpu(memcg->vmstats_percpu);
++ lru_gen_free_mm_list(memcg);
+ kfree(memcg);
+ }
+
+@@ -5030,6 +5031,9 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
+ if (alloc_mem_cgroup_per_node_info(memcg, node))
+ goto fail;
+
++ if (lru_gen_alloc_mm_list(memcg))
++ goto fail;
++
+ if (memcg_wb_domain_init(memcg, GFP_KERNEL))
+ goto fail;
+
+@@ -5991,6 +5995,29 @@ static void mem_cgroup_move_task(void)
+ }
+ #endif
+
++#ifdef CONFIG_LRU_GEN
++static void mem_cgroup_attach(struct cgroup_taskset *tset)
++{
++ struct cgroup_subsys_state *css;
++ struct task_struct *task = NULL;
++
++ cgroup_taskset_for_each_leader(task, css, tset)
++ ;
++
++ if (!task)
++ return;
++
++ task_lock(task);
++ if (task->mm && task->mm->owner == task)
++ lru_gen_migrate_mm(task->mm);
++ task_unlock(task);
++}
++#else
++static void mem_cgroup_attach(struct cgroup_taskset *tset)
++{
++}
++#endif
++
+ static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
+ {
+ if (value == PAGE_COUNTER_MAX)
+@@ -6332,6 +6359,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
+ .css_reset = mem_cgroup_css_reset,
+ .css_rstat_flush = mem_cgroup_css_rstat_flush,
+ .can_attach = mem_cgroup_can_attach,
++ .attach = mem_cgroup_attach,
+ .cancel_attach = mem_cgroup_cancel_attach,
+ .post_attach = mem_cgroup_move_task,
+ .dfl_cftypes = memory_files,
diff --git a/mm/memory.c b/mm/memory.c
-index 730daa00952b..a76196885f92 100644
+index 486f4a2874e7..c017bdac5fd1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -839,7 +839,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
@@ -1469,7 +1305,7 @@ index 730daa00952b..a76196885f92 100644
rss[mm_counter(new_page)]++;
/* All done, just insert the new page copy in the child */
-@@ -2950,7 +2950,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
+@@ -2962,7 +2962,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
*/
ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
page_add_new_anon_rmap(new_page, vma, vmf->address, false);
@@ -1478,7 +1314,7 @@ index 730daa00952b..a76196885f92 100644
/*
* We call the notify macro here because, when using secondary
* mmu page tables (such as kvm shadow page tables), we want the
-@@ -3479,7 +3479,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+@@ -3521,7 +3521,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
/* ksm created a completely new copy */
if (unlikely(page != swapcache && swapcache)) {
page_add_new_anon_rmap(page, vma, vmf->address, false);
@@ -1487,7 +1323,7 @@ index 730daa00952b..a76196885f92 100644
} else {
do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
}
-@@ -3625,7 +3625,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
+@@ -3668,7 +3668,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, vmf->address, false);
@@ -1496,7 +1332,7 @@ index 730daa00952b..a76196885f92 100644
setpte:
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
-@@ -3793,7 +3793,7 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
+@@ -3838,7 +3838,7 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
if (write && !(vma->vm_flags & VM_SHARED)) {
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, addr, false);
@@ -1506,10 +1342,10 @@ index 730daa00952b..a76196885f92 100644
inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
page_add_file_rmap(page, false);
diff --git a/mm/migrate.c b/mm/migrate.c
-index b234c3f3acb7..d3307c9eced4 100644
+index 41ff2c9896c4..e103ab266d97 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
-@@ -2967,7 +2967,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
+@@ -2968,7 +2968,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
inc_mm_counter(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, addr, false);
if (!is_zone_device_page(page))
@@ -1518,6 +1354,66 @@ index b234c3f3acb7..d3307c9eced4 100644
get_page(page);
if (flush) {
+diff --git a/mm/mm_init.c b/mm/mm_init.c
+index 9ddaf0e1b0ab..ef0deadb90a7 100644
+--- a/mm/mm_init.c
++++ b/mm/mm_init.c
+@@ -65,14 +65,16 @@ void __init mminit_verify_pageflags_layout(void)
+
+ shift = 8 * sizeof(unsigned long);
+ width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
+- - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH;
++ - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_USAGE_WIDTH;
+ mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
+- "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n",
++ "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
+ SECTIONS_WIDTH,
+ NODES_WIDTH,
+ ZONES_WIDTH,
+ LAST_CPUPID_WIDTH,
+ KASAN_TAG_WIDTH,
++ LRU_GEN_WIDTH,
++ LRU_USAGE_WIDTH,
+ NR_PAGEFLAGS);
+ mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
+ "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
+diff --git a/mm/mmzone.c b/mm/mmzone.c
+index eb89d6e018e2..2ec0d7793424 100644
+--- a/mm/mmzone.c
++++ b/mm/mmzone.c
+@@ -81,6 +81,8 @@ void lruvec_init(struct lruvec *lruvec)
+
+ for_each_lru(lru)
+ INIT_LIST_HEAD(&lruvec->lists[lru]);
++
++ lru_gen_init_lruvec(lruvec);
+ }
+
+ #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
+diff --git a/mm/rmap.c b/mm/rmap.c
+index e05c300048e6..1a33e394f516 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -72,6 +72,7 @@
+ #include <linux/page_idle.h>
+ #include <linux/memremap.h>
+ #include <linux/userfaultfd_k.h>
++#include <linux/mm_inline.h>
+
+ #include <asm/tlbflush.h>
+
+@@ -789,6 +790,11 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
+ }
+
+ if (pvmw.pte) {
++ /* the multigenerational lru exploits the spatial locality */
++ if (lru_gen_enabled() && pte_young(*pvmw.pte)) {
++ lru_gen_scan_around(&pvmw);
++ referenced++;
++ }
+ if (ptep_clear_flush_young_notify(vma, address,
+ pvmw.pte)) {
+ /*
diff --git a/mm/swap.c b/mm/swap.c
index dfb48cf9c2c9..96ce95eeb2c9 100644
--- a/mm/swap.c
@@ -1591,7 +1487,7 @@ index dfb48cf9c2c9..96ce95eeb2c9 100644
local_lock(&lru_pvecs.lock);
diff --git a/mm/swapfile.c b/mm/swapfile.c
-index 3598b668f533..549e94318b2f 100644
+index 996afa8131c8..8b5ca15df123 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1936,7 +1936,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
@@ -1603,8 +1499,26 @@ index 3598b668f533..549e94318b2f 100644
}
swap_free(entry);
out:
+@@ -2702,6 +2702,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ err = 0;
+ atomic_inc(&proc_poll_event);
+ wake_up_interruptible(&proc_poll_wait);
++ /* stop tracking anon if the multigenerational lru is turned off */
++ lru_gen_set_state(false, false, true);
+
+ out_dput:
+ filp_close(victim, NULL);
+@@ -3348,6 +3350,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+ mutex_unlock(&swapon_mutex);
+ atomic_inc(&proc_poll_event);
+ wake_up_interruptible(&proc_poll_wait);
++ /* start tracking anon if the multigenerational lru is turned on */
++ lru_gen_set_state(true, false, true);
+
+ error = 0;
+ goto out;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
-index e14b3820c6a8..175d55b4f594 100644
+index 63a73e164d55..747a2d7eb5b6 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -123,7 +123,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
@@ -1617,10 +1531,22 @@ index e14b3820c6a8..175d55b4f594 100644
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
diff --git a/mm/vmscan.c b/mm/vmscan.c
-index f7bbfc0b1ebd..84d25079092e 100644
+index 5199b9696bab..ff2deec24c64 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -1094,9 +1094,11 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
+@@ -49,6 +49,11 @@
+ #include <linux/printk.h>
+ #include <linux/dax.h>
+ #include <linux/psi.h>
++#include <linux/memory.h>
++#include <linux/pagewalk.h>
++#include <linux/shmem_fs.h>
++#include <linux/ctype.h>
++#include <linux/debugfs.h>
+
+ #include <asm/tlbflush.h>
+ #include <asm/div64.h>
+@@ -1093,9 +1098,11 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
if (PageSwapCache(page)) {
swp_entry_t swap = { .val = page_private(page) };
@@ -1633,10 +1559,193 @@ index f7bbfc0b1ebd..84d25079092e 100644
__delete_from_swap_cache(page, swap, shadow);
xa_unlock_irqrestore(&mapping->i_pages, flags);
put_swap_page(page, swap);
-@@ -2780,6 +2782,93 @@ static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
- get_nr_gens(lruvec, 1) <= MAX_NR_GENS;
+@@ -1306,6 +1313,11 @@ static unsigned int shrink_page_list(struct list_head *page_list,
+ if (!sc->may_unmap && page_mapped(page))
+ goto keep_locked;
+
++ /* in case the page was found accessed by lru_gen_scan_around() */
++ if (lru_gen_enabled() && !ignore_references &&
++ page_mapped(page) && PageReferenced(page))
++ goto keep_locked;
++
+ may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
+ (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
+
+@@ -2421,6 +2433,106 @@ enum scan_balance {
+ SCAN_FILE,
+ };
+
++static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
++{
++ unsigned long file;
++ struct lruvec *target_lruvec;
++
++ if (lru_gen_enabled())
++ return;
++
++ target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
++
++ /*
++ * Determine the scan balance between anon and file LRUs.
++ */
++ spin_lock_irq(&target_lruvec->lru_lock);
++ sc->anon_cost = target_lruvec->anon_cost;
++ sc->file_cost = target_lruvec->file_cost;
++ spin_unlock_irq(&target_lruvec->lru_lock);
++
++ /*
++ * Target desirable inactive:active list ratios for the anon
++ * and file LRU lists.
++ */
++ if (!sc->force_deactivate) {
++ unsigned long refaults;
++
++ refaults = lruvec_page_state(target_lruvec,
++ WORKINGSET_ACTIVATE_ANON);
++ if (refaults != target_lruvec->refaults[0] ||
++ inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
++ sc->may_deactivate |= DEACTIVATE_ANON;
++ else
++ sc->may_deactivate &= ~DEACTIVATE_ANON;
++
++ /*
++ * When refaults are being observed, it means a new
++ * workingset is being established. Deactivate to get
++ * rid of any stale active pages quickly.
++ */
++ refaults = lruvec_page_state(target_lruvec,
++ WORKINGSET_ACTIVATE_FILE);
++ if (refaults != target_lruvec->refaults[1] ||
++ inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
++ sc->may_deactivate |= DEACTIVATE_FILE;
++ else
++ sc->may_deactivate &= ~DEACTIVATE_FILE;
++ } else
++ sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
++
++ /*
++ * If we have plenty of inactive file pages that aren't
++ * thrashing, try to reclaim those first before touching
++ * anonymous pages.
++ */
++ file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
++ if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
++ sc->cache_trim_mode = 1;
++ else
++ sc->cache_trim_mode = 0;
++
++ /*
++ * Prevent the reclaimer from falling into the cache trap: as
++ * cache pages start out inactive, every cache fault will tip
++ * the scan balance towards the file LRU. And as the file LRU
++ * shrinks, so does the window for rotation from references.
++ * This means we have a runaway feedback loop where a tiny
++ * thrashing file LRU becomes infinitely more attractive than
++ * anon pages. Try to detect this based on file LRU size.
++ */
++ if (!cgroup_reclaim(sc)) {
++ unsigned long total_high_wmark = 0;
++ unsigned long free, anon;
++ int z;
++
++ free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
++ file = node_page_state(pgdat, NR_ACTIVE_FILE) +
++ node_page_state(pgdat, NR_INACTIVE_FILE);
++
++ for (z = 0; z < MAX_NR_ZONES; z++) {
++ struct zone *zone = &pgdat->node_zones[z];
++
++ if (!managed_zone(zone))
++ continue;
++
++ total_high_wmark += high_wmark_pages(zone);
++ }
++
++ /*
++ * Consider anon: if that's low too, this isn't a
++ * runaway file reclaim problem, but rather just
++ * extreme pressure. Reclaim as per usual then.
++ */
++ anon = node_page_state(pgdat, NR_INACTIVE_ANON);
++
++ sc->file_is_tiny =
++ file + free <= total_high_wmark &&
++ !(sc->may_deactivate & DEACTIVATE_ANON) &&
++ anon >> sc->priority;
++ }
++}
++
+ /*
+ * Determine how aggressively the anon and file LRU lists should be
+ * scanned. The relative value of each set of LRU lists is determined
+@@ -2618,6 +2730,2425 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
+ }
}
++#ifdef CONFIG_LRU_GEN
++
++/*
++ * After pages are faulted in, the aging must scan them twice before the
++ * eviction can consider them. The first scan clears the accessed bit set during
++ * initial faults. And the second scan makes sure they haven't been used since
++ * the first scan.
++ */
++#define MIN_NR_GENS 2
++
++#define MAX_BATCH_SIZE 8192
++
++/******************************************************************************
++ * shorthand helpers
++ ******************************************************************************/
++
++#define DEFINE_MAX_SEQ() \
++ unsigned long max_seq = READ_ONCE(lruvec->evictable.max_seq)
++
++#define DEFINE_MIN_SEQ() \
++ unsigned long min_seq[ANON_AND_FILE] = { \
++ READ_ONCE(lruvec->evictable.min_seq[0]), \
++ READ_ONCE(lruvec->evictable.min_seq[1]), \
++ }
++
++#define for_each_type_zone(type, zone) \
++ for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
++ for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
++
++#define for_each_gen_type_zone(gen, type, zone) \
++ for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \
++ for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
++ for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
++
++static int page_lru_gen(struct page *page)
++{
++ return ((page->flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
++}
++
++static int get_nr_gens(struct lruvec *lruvec, int type)
++{
++ return lruvec->evictable.max_seq - lruvec->evictable.min_seq[type] + 1;
++}
++
++static int min_nr_gens(unsigned long max_seq, unsigned long *min_seq, int swappiness)
++{
++ return max_seq - max(min_seq[!swappiness], min_seq[1]) + 1;
++}
++
++static int max_nr_gens(unsigned long max_seq, unsigned long *min_seq, int swappiness)
++{
++ return max_seq - min(min_seq[!swappiness], min_seq[1]) + 1;
++}
++
++static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
++{
++ lockdep_assert_held(&lruvec->lru_lock);
++
++ return get_nr_gens(lruvec, 0) >= MIN_NR_GENS &&
++ get_nr_gens(lruvec, 0) <= MAX_NR_GENS &&
++ get_nr_gens(lruvec, 1) >= MIN_NR_GENS &&
++ get_nr_gens(lruvec, 1) <= MAX_NR_GENS;
++}
++
+/******************************************************************************
+ * refault feedback loop
+ ******************************************************************************/
@@ -1724,477 +1833,6 @@ index f7bbfc0b1ebd..84d25079092e 100644
+ sp->refaulted * max(pv->total, 1UL) * pv->gain;
+}
+
- /******************************************************************************
- * state change
- ******************************************************************************/
-diff --git a/mm/workingset.c b/mm/workingset.c
-index edb8aed2587e..3f3f03d51ea7 100644
---- a/mm/workingset.c
-+++ b/mm/workingset.c
-@@ -201,6 +201,110 @@ static unsigned long unpack_shadow(void *shadow, int *memcg_id, struct pglist_da
- return val >> MEM_CGROUP_ID_SHIFT;
- }
-
-+#ifdef CONFIG_LRU_GEN
-+
-+#if LRU_GEN_SHIFT + LRU_USAGE_SHIFT >= EVICTION_SHIFT
-+#error "Please try smaller NODES_SHIFT, NR_LRU_GENS and TIERS_PER_GEN configurations"
-+#endif
-+
-+static void page_set_usage(struct page *page, int usage)
-+{
-+ unsigned long old_flags, new_flags;
-+
-+ VM_BUG_ON(usage > BIT(LRU_USAGE_WIDTH));
-+
-+ if (!usage)
-+ return;
-+
-+ do {
-+ old_flags = READ_ONCE(page->flags);
-+ new_flags = (old_flags & ~LRU_USAGE_MASK) | LRU_TIER_FLAGS |
-+ ((usage - 1UL) << LRU_USAGE_PGOFF);
-+ } while (new_flags != old_flags &&
-+ cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
-+}
-+
-+/* Return a token to be stored in the shadow entry of a page being evicted. */
-+static void *lru_gen_eviction(struct page *page)
-+{
-+ int hist, tier;
-+ unsigned long token;
-+ unsigned long min_seq;
-+ struct lruvec *lruvec;
-+ struct lrugen *lrugen;
-+ int type = page_is_file_lru(page);
-+ int usage = page_tier_usage(page);
-+ struct mem_cgroup *memcg = page_memcg(page);
-+ struct pglist_data *pgdat = page_pgdat(page);
-+
-+ lruvec = mem_cgroup_lruvec(memcg, pgdat);
-+ lrugen = &lruvec->evictable;
-+ min_seq = READ_ONCE(lrugen->min_seq[type]);
-+ token = (min_seq << LRU_USAGE_SHIFT) | usage;
-+
-+ hist = hist_from_seq_or_gen(min_seq);
-+ tier = lru_tier_from_usage(usage);
-+ atomic_long_add(thp_nr_pages(page), &lrugen->evicted[hist][type][tier]);
-+
-+ return pack_shadow(mem_cgroup_id(memcg), pgdat, token);
-+}
-+
-+/* Account a refaulted page based on the token stored in its shadow entry. */
-+static void lru_gen_refault(struct page *page, void *shadow)
-+{
-+ int hist, tier, usage;
-+ int memcg_id;
-+ unsigned long token;
-+ unsigned long min_seq;
-+ struct lruvec *lruvec;
-+ struct lrugen *lrugen;
-+ struct pglist_data *pgdat;
-+ struct mem_cgroup *memcg;
-+ int type = page_is_file_lru(page);
-+
-+ token = unpack_shadow(shadow, &memcg_id, &pgdat);
-+ if (page_pgdat(page) != pgdat)
-+ return;
-+
-+ rcu_read_lock();
-+ memcg = page_memcg_rcu(page);
-+ if (mem_cgroup_id(memcg) != memcg_id)
-+ goto unlock;
-+
-+ usage = token & (BIT(LRU_USAGE_SHIFT) - 1);
-+ token >>= LRU_USAGE_SHIFT;
-+
-+ lruvec = mem_cgroup_lruvec(memcg, pgdat);
-+ lrugen = &lruvec->evictable;
-+ min_seq = READ_ONCE(lrugen->min_seq[type]);
-+ if (token != (min_seq & (EVICTION_MASK >> LRU_USAGE_SHIFT)))
-+ goto unlock;
-+
-+ page_set_usage(page, usage);
-+
-+ hist = hist_from_seq_or_gen(min_seq);
-+ tier = lru_tier_from_usage(usage);
-+ atomic_long_add(thp_nr_pages(page), &lrugen->refaulted[hist][type][tier]);
-+ inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type);
-+ if (tier)
-+ inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type);
-+unlock:
-+ rcu_read_unlock();
-+}
-+
-+#else /* CONFIG_LRU_GEN */
-+
-+static void *lru_gen_eviction(struct page *page)
-+{
-+ return NULL;
-+}
-+
-+static void lru_gen_refault(struct page *page, void *shadow)
-+{
-+}
-+
-+#endif /* CONFIG_LRU_GEN */
-+
- /**
- * workingset_age_nonresident - age non-resident entries as LRU ages
- * @lruvec: the lruvec that was aged
-@@ -249,6 +353,9 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
- VM_BUG_ON_PAGE(page_count(page), page);
- VM_BUG_ON_PAGE(!PageLocked(page), page);
-
-+ if (lru_gen_enabled())
-+ return lru_gen_eviction(page);
-+
- lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
- /* XXX: target_memcg can be NULL, go through lruvec */
- memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
-@@ -283,6 +390,11 @@ void workingset_refault(struct page *page, void *shadow)
- bool workingset;
- int memcgid;
-
-+ if (lru_gen_enabled()) {
-+ lru_gen_refault(page, shadow);
-+ return;
-+ }
-+
- eviction = unpack_shadow(shadow, &memcgid, &pgdat);
-
- rcu_read_lock();
-
-diff --git a/fs/exec.c b/fs/exec.c
-index 18594f11c31f..c691d4d7720c 100644
---- a/fs/exec.c
-+++ b/fs/exec.c
-@@ -1008,6 +1008,7 @@ static int exec_mmap(struct mm_struct *mm)
- active_mm = tsk->active_mm;
- tsk->active_mm = mm;
- tsk->mm = mm;
-+ lru_gen_add_mm(mm);
- /*
- * This prevents preemption while active_mm is being loaded and
- * it and mm are being updated, which could cause problems for
-@@ -1018,6 +1019,7 @@ static int exec_mmap(struct mm_struct *mm)
- if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
- local_irq_enable();
- activate_mm(active_mm, mm);
-+ lru_gen_switch_mm(active_mm, mm);
- if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
- local_irq_enable();
- tsk->mm->vmacache_seqnum = 0;
-diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
-index 6bcac3d91dd1..60601a997433 100644
---- a/include/linux/memcontrol.h
-+++ b/include/linux/memcontrol.h
-@@ -230,6 +230,8 @@ struct obj_cgroup {
- };
- };
-
-+struct lru_gen_mm_list;
-+
- /*
- * The memory controller data structure. The memory controller controls both
- * page cache and RSS per cgroup. We would eventually like to provide
-@@ -349,6 +351,10 @@ struct mem_cgroup {
- struct deferred_split deferred_split_queue;
- #endif
-
-+#ifdef CONFIG_LRU_GEN
-+ struct lru_gen_mm_list *mm_list;
-+#endif
-+
- struct mem_cgroup_per_node *nodeinfo[0];
- /* WARNING: nodeinfo must be the last member here */
- };
-diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
-index 5aacc1c10a45..b0f662555eae 100644
---- a/include/linux/mm_types.h
-+++ b/include/linux/mm_types.h
-@@ -15,6 +15,8 @@
- #include <linux/page-flags-layout.h>
- #include <linux/workqueue.h>
- #include <linux/seqlock.h>
-+#include <linux/nodemask.h>
-+#include <linux/mmdebug.h>
-
- #include <asm/mmu.h>
-
-@@ -561,6 +563,22 @@ struct mm_struct {
-
- #ifdef CONFIG_IOMMU_SUPPORT
- u32 pasid;
-+#endif
-+#ifdef CONFIG_LRU_GEN
-+ struct {
-+ /* the node of a global or per-memcg mm_struct list */
-+ struct list_head list;
-+#ifdef CONFIG_MEMCG
-+ /* points to the memcg of the owner task above */
-+ struct mem_cgroup *memcg;
-+#endif
-+ /* whether this mm_struct has been used since the last walk */
-+ nodemask_t nodes;
-+#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
-+ /* the number of CPUs using this mm_struct */
-+ atomic_t nr_cpus;
-+#endif
-+ } lrugen;
- #endif
- } __randomize_layout;
-
-@@ -588,6 +606,95 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
- return (struct cpumask *)&mm->cpu_bitmap;
- }
-
-+#ifdef CONFIG_LRU_GEN
-+
-+void lru_gen_init_mm(struct mm_struct *mm);
-+void lru_gen_add_mm(struct mm_struct *mm);
-+void lru_gen_del_mm(struct mm_struct *mm);
-+#ifdef CONFIG_MEMCG
-+int lru_gen_alloc_mm_list(struct mem_cgroup *memcg);
-+void lru_gen_free_mm_list(struct mem_cgroup *memcg);
-+void lru_gen_migrate_mm(struct mm_struct *mm);
-+#endif
-+
-+/* Track the usage of each mm_struct so that we can skip inactive ones. */
-+static inline void lru_gen_switch_mm(struct mm_struct *old, struct mm_struct *new)
-+{
-+ /* exclude init_mm, efi_mm, etc. */
-+ if (!core_kernel_data((unsigned long)old)) {
-+ VM_BUG_ON(old == &init_mm);
-+
-+ nodes_setall(old->lrugen.nodes);
-+#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
-+ atomic_dec(&old->lrugen.nr_cpus);
-+ VM_BUG_ON_MM(atomic_read(&old->lrugen.nr_cpus) < 0, old);
-+#endif
-+ } else
-+ VM_BUG_ON_MM(READ_ONCE(old->lrugen.list.prev) ||
-+ READ_ONCE(old->lrugen.list.next), old);
-+
-+ if (!core_kernel_data((unsigned long)new)) {
-+ VM_BUG_ON(new == &init_mm);
-+
-+#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
-+ atomic_inc(&new->lrugen.nr_cpus);
-+ VM_BUG_ON_MM(atomic_read(&new->lrugen.nr_cpus) < 0, new);
-+#endif
-+ } else
-+ VM_BUG_ON_MM(READ_ONCE(new->lrugen.list.prev) ||
-+ READ_ONCE(new->lrugen.list.next), new);
-+}
-+
-+/* Return whether this mm_struct is being used on any CPUs. */
-+static inline bool lru_gen_mm_is_active(struct mm_struct *mm)
-+{
-+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
-+ return !cpumask_empty(mm_cpumask(mm));
-+#else
-+ return atomic_read(&mm->lrugen.nr_cpus);
-+#endif
-+}
-+
-+#else /* CONFIG_LRU_GEN */
-+
-+static inline void lru_gen_init_mm(struct mm_struct *mm)
-+{
-+}
-+
-+static inline void lru_gen_add_mm(struct mm_struct *mm)
-+{
-+}
-+
-+static inline void lru_gen_del_mm(struct mm_struct *mm)
-+{
-+}
-+
-+#ifdef CONFIG_MEMCG
-+static inline int lru_gen_alloc_mm_list(struct mem_cgroup *memcg)
-+{
-+ return 0;
-+}
-+
-+static inline void lru_gen_free_mm_list(struct mem_cgroup *memcg)
-+{
-+}
-+
-+static inline void lru_gen_migrate_mm(struct mm_struct *mm)
-+{
-+}
-+#endif
-+
-+static inline void lru_gen_switch_mm(struct mm_struct *old, struct mm_struct *new)
-+{
-+}
-+
-+static inline bool lru_gen_mm_is_active(struct mm_struct *mm)
-+{
-+ return false;
-+}
-+
-+#endif /* CONFIG_LRU_GEN */
-+
- struct mmu_gather;
- extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
- extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
-diff --git a/kernel/exit.c b/kernel/exit.c
-index fd1c04193e18..b362179852f1 100644
---- a/kernel/exit.c
-+++ b/kernel/exit.c
-@@ -423,6 +423,7 @@ void mm_update_next_owner(struct mm_struct *mm)
- goto retry;
- }
- WRITE_ONCE(mm->owner, c);
-+ lru_gen_migrate_mm(mm);
- task_unlock(c);
- put_task_struct(c);
- }
-diff --git a/kernel/fork.c b/kernel/fork.c
-index dc06afd725cb..2fd7dae9afcb 100644
---- a/kernel/fork.c
-+++ b/kernel/fork.c
-@@ -669,6 +669,7 @@ static void check_mm(struct mm_struct *mm)
- #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
- VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
- #endif
-+ VM_BUG_ON_MM(lru_gen_mm_is_active(mm), mm);
- }
-
- #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
-@@ -1061,6 +1062,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
- goto fail_nocontext;
-
- mm->user_ns = get_user_ns(user_ns);
-+ lru_gen_init_mm(mm);
- return mm;
-
- fail_nocontext:
-@@ -1103,6 +1105,7 @@ static inline void __mmput(struct mm_struct *mm)
- }
- if (mm->binfmt)
- module_put(mm->binfmt->module);
-+ lru_gen_del_mm(mm);
- mmdrop(mm);
- }
-
-@@ -2524,6 +2527,13 @@ pid_t kernel_clone(struct kernel_clone_args *args)
- get_task_struct(p);
- }
-
-+ if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) {
-+ /* lock the task to synchronize with memcg migration */
-+ task_lock(p);
-+ lru_gen_add_mm(p->mm);
-+ task_unlock(p);
-+ }
-+
- wake_up_new_task(p);
-
- /* forking complete and child started to run, tell ptracer */
-diff --git a/kernel/kthread.c b/kernel/kthread.c
-index fe3f2a40d61e..b81e49ed31a7 100644
---- a/kernel/kthread.c
-+++ b/kernel/kthread.c
-@@ -1325,6 +1325,7 @@ void kthread_use_mm(struct mm_struct *mm)
- tsk->mm = mm;
- membarrier_update_current_mm(mm);
- switch_mm_irqs_off(active_mm, mm, tsk);
-+ lru_gen_switch_mm(active_mm, mm);
- local_irq_enable();
- task_unlock(tsk);
- #ifdef finish_arch_post_lock_switch
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 5226cc26a095..2d4b77f173db 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -4323,6 +4323,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
- * finish_task_switch()'s mmdrop().
- */
- switch_mm_irqs_off(prev->active_mm, next->mm, next);
-+ lru_gen_switch_mm(prev->active_mm, next->mm);
-
- if (!prev->mm) { // from kernel
- /* will mmdrop() in finish_task_switch(). */
-@@ -7603,6 +7604,7 @@ void idle_task_exit(void)
-
- if (mm != &init_mm) {
- switch_mm(mm, &init_mm, current);
-+ lru_gen_switch_mm(mm, &init_mm);
- finish_arch_post_lock_switch();
- }
-
-diff --git a/mm/memcontrol.c b/mm/memcontrol.c
-index 64ada9e650a5..58b610ffa0e0 100644
---- a/mm/memcontrol.c
-+++ b/mm/memcontrol.c
-@@ -5214,6 +5214,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
- free_mem_cgroup_per_node_info(memcg, node);
- free_percpu(memcg->vmstats_percpu);
- free_percpu(memcg->vmstats_local);
-+ lru_gen_free_mm_list(memcg);
- kfree(memcg);
- }
-
-@@ -5266,6 +5267,9 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
- if (alloc_mem_cgroup_per_node_info(memcg, node))
- goto fail;
-
-+ if (lru_gen_alloc_mm_list(memcg))
-+ goto fail;
-+
- if (memcg_wb_domain_init(memcg, GFP_KERNEL))
- goto fail;
-
-@@ -5991,6 +5995,29 @@ static void mem_cgroup_move_task(void)
- }
- #endif
-
-+#ifdef CONFIG_LRU_GEN
-+static void mem_cgroup_attach(struct cgroup_taskset *tset)
-+{
-+ struct cgroup_subsys_state *css;
-+ struct task_struct *task = NULL;
-+
-+ cgroup_taskset_for_each_leader(task, css, tset)
-+ ;
-+
-+ if (!task)
-+ return;
-+
-+ task_lock(task);
-+ if (task->mm && task->mm->owner == task)
-+ lru_gen_migrate_mm(task->mm);
-+ task_unlock(task);
-+}
-+#else
-+static void mem_cgroup_attach(struct cgroup_taskset *tset)
-+{
-+}
-+#endif
-+
- static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
- {
- if (value == PAGE_COUNTER_MAX)
-@@ -6332,6 +6359,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
- .css_reset = mem_cgroup_css_reset,
- .css_rstat_flush = mem_cgroup_css_rstat_flush,
- .can_attach = mem_cgroup_can_attach,
-+ .attach = mem_cgroup_attach,
- .cancel_attach = mem_cgroup_cancel_attach,
- .post_attach = mem_cgroup_move_task,
- .dfl_cftypes = memory_files,
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index 84d25079092e..d93d2272e475 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -2869,6 +2869,323 @@ static bool positive_ctrl_err(struct controller_pos *sp, struct controller_pos *
- sp->refaulted * max(pv->total, 1UL) * pv->gain;
- }
-
+/******************************************************************************
+ * mm_struct list
+ ******************************************************************************/
@@ -2512,96 +2150,6 @@ index 84d25079092e..d93d2272e475 100644
+ return last;
+}
+
- /******************************************************************************
- * state change
- ******************************************************************************/
-@@ -3096,6 +3413,13 @@ static int __init init_lru_gen(void)
- {
- BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
- BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
-+ BUILD_BUG_ON(sizeof(MM_STAT_CODES) != NR_MM_STATS + 1);
-+
-+ if (mem_cgroup_disabled()) {
-+ global_mm_list = alloc_mm_list();
-+ if (WARN_ON_ONCE(!global_mm_list))
-+ return -ENOMEM;
-+ }
-
- if (hotplug_memory_notifier(lru_gen_online_mem, 0))
- pr_err("lru_gen: failed to subscribe hotplug notifications\n");
-
-diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
-index e5deec17b4bd..38de59fcbe54 100644
---- a/include/linux/mmzone.h
-+++ b/include/linux/mmzone.h
-@@ -294,6 +294,7 @@ enum lruvec_flags {
- };
-
- struct lruvec;
-+struct page_vma_mapped_walk;
-
- #define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
- #define LRU_USAGE_MASK ((BIT(LRU_USAGE_WIDTH) - 1) << LRU_USAGE_PGOFF)
-@@ -382,6 +383,7 @@ struct lrugen {
-
- void lru_gen_init_lruvec(struct lruvec *lruvec);
- void lru_gen_set_state(bool enable, bool main, bool swap);
-+void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw);
-
- #else /* CONFIG_LRU_GEN */
-
-@@ -393,6 +395,10 @@ static inline void lru_gen_set_state(bool enable, bool main, bool swap)
- {
- }
-
-+static inline void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw)
-+{
-+}
-+
- #endif /* CONFIG_LRU_GEN */
-
- struct lruvec {
-diff --git a/mm/rmap.c b/mm/rmap.c
-index 693a610e181d..985cf4ebd03c 100644
---- a/mm/rmap.c
-+++ b/mm/rmap.c
-@@ -72,6 +72,7 @@
- #include <linux/page_idle.h>
- #include <linux/memremap.h>
- #include <linux/userfaultfd_k.h>
-+#include <linux/mm_inline.h>
-
- #include <asm/tlbflush.h>
-
-@@ -792,6 +793,11 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
- }
-
- if (pvmw.pte) {
-+ /* the multigenerational lru exploits the spatial locality */
-+ if (lru_gen_enabled() && pte_young(*pvmw.pte)) {
-+ lru_gen_scan_around(&pvmw);
-+ referenced++;
-+ }
- if (ptep_clear_flush_young_notify(vma, address,
- pvmw.pte)) {
- /*
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index d93d2272e475..837d5e6a821e 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -50,6 +50,8 @@
- #include <linux/dax.h>
- #include <linux/psi.h>
- #include <linux/memory.h>
-+#include <linux/pagewalk.h>
-+#include <linux/shmem_fs.h>
-
- #include <asm/tlbflush.h>
- #include <asm/div64.h>
-@@ -3186,6 +3188,788 @@ static bool get_next_mm(struct mm_walk_args *args, struct mm_struct **iter)
- return last;
- }
-
+/******************************************************************************
+ * the aging
+ ******************************************************************************/
@@ -3384,74 +2932,6 @@ index d93d2272e475..837d5e6a821e 100644
+ set_page_dirty(pte_page(pte[i]));
+}
+
- /******************************************************************************
- * state change
- ******************************************************************************/
-@@ -3415,6 +4199,10 @@ static int __init init_lru_gen(void)
- BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
- BUILD_BUG_ON(sizeof(MM_STAT_CODES) != NR_MM_STATS + 1);
-
-+ VM_BUG_ON(PMD_SIZE / PAGE_SIZE != PTRS_PER_PTE);
-+ VM_BUG_ON(PUD_SIZE / PMD_SIZE != PTRS_PER_PMD);
-+ VM_BUG_ON(P4D_SIZE / PUD_SIZE != PTRS_PER_PUD);
-+
- if (mem_cgroup_disabled()) {
- global_mm_list = alloc_mm_list();
- if (WARN_ON_ONCE(!global_mm_list))
-
-diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
-index 38de59fcbe54..ded72f44d7e7 100644
---- a/include/linux/mmzone.h
-+++ b/include/linux/mmzone.h
-@@ -863,6 +863,8 @@ struct deferred_split {
- };
- #endif
-
-+struct mm_walk_args;
-+
- /*
- * On NUMA machines, each NUMA node would have a pg_data_t to describe
- * it's memory layout. On UMA machines there is a single pglist_data which
-@@ -968,6 +970,9 @@ typedef struct pglist_data {
-
- unsigned long flags;
-
-+#ifdef CONFIG_LRU_GEN
-+ struct mm_walk_args *mm_walk_args;
-+#endif
- ZONE_PADDING(_pad2_)
-
- /* Per-node vmstats */
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index 837d5e6a821e..2f86dcc04c56 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -1311,6 +1311,11 @@ static unsigned int shrink_page_list(struct list_head *page_list,
- if (!sc->may_unmap && page_mapped(page))
- goto keep_locked;
-
-+ /* in case the page was found accessed by lru_gen_scan_around() */
-+ if (lru_gen_enabled() && !ignore_references &&
-+ page_mapped(page) && PageReferenced(page))
-+ goto keep_locked;
-+
- may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
- (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
-
-@@ -2431,6 +2436,9 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
- unsigned long file;
- struct lruvec *target_lruvec;
-
-+ if (lru_gen_enabled())
-+ return;
-+
- target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
-
- /*
-@@ -3970,6 +3978,489 @@ void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw)
- set_page_dirty(pte_page(pte[i]));
- }
-
+/******************************************************************************
+ * the eviction
+ ******************************************************************************/
@@ -3935,124 +3415,223 @@ index 837d5e6a821e..2f86dcc04c56 100644
+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
+}
+
- /******************************************************************************
- * state change
- ******************************************************************************/
-@@ -4172,6 +4663,21 @@ static int __meminit __maybe_unused lru_gen_online_mem(struct notifier_block *se
- return NOTIFY_DONE;
- }
-
-+static void lru_gen_start_kswapd(int nid)
++/******************************************************************************
++ * state change
++ ******************************************************************************/
++
++#ifdef CONFIG_LRU_GEN_ENABLED
++DEFINE_STATIC_KEY_TRUE(lru_gen_static_key);
++#else
++DEFINE_STATIC_KEY_FALSE(lru_gen_static_key);
++#endif
++
++static DEFINE_MUTEX(lru_gen_state_mutex);
++static int lru_gen_nr_swapfiles __read_mostly;
++
++static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
+{
-+ struct pglist_data *pgdat = NODE_DATA(nid);
++ int gen, type, zone;
++ enum lru_list lru;
++ struct lrugen *lrugen = &lruvec->evictable;
+
-+ pgdat->mm_walk_args = kvzalloc_node(size_of_mm_walk_args(), GFP_KERNEL, nid);
-+ WARN_ON_ONCE(!pgdat->mm_walk_args);
++ for_each_evictable_lru(lru) {
++ type = is_file_lru(lru);
++
++ if (lrugen->enabled[type] && !list_empty(&lruvec->lists[lru]))
++ return false;
++ }
++
++ for_each_gen_type_zone(gen, type, zone) {
++ if (!lrugen->enabled[type] && !list_empty(&lrugen->lists[gen][type][zone]))
++ return false;
++
++ VM_WARN_ON_ONCE(!lrugen->enabled[type] && lrugen->sizes[gen][type][zone]);
++ }
++
++ return true;
+}
+
-+static void lru_gen_stop_kswapd(int nid)
++static bool fill_lru_gen_lists(struct lruvec *lruvec)
+{
-+ struct pglist_data *pgdat = NODE_DATA(nid);
++ enum lru_list lru;
++ int batch_size = 0;
+
-+ kvfree(pgdat->mm_walk_args);
-+}
++ for_each_evictable_lru(lru) {
++ int type = is_file_lru(lru);
++ bool active = is_active_lru(lru);
++ struct list_head *head = &lruvec->lists[lru];
+
- /******************************************************************************
- * initialization
- ******************************************************************************/
-@@ -4220,6 +4726,24 @@ static int __init init_lru_gen(void)
- */
- arch_initcall(init_lru_gen);
-
-+#else /* CONFIG_LRU_GEN */
++ if (!lruvec->evictable.enabled[type])
++ continue;
+
-+static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
-+{
++ while (!list_empty(head)) {
++ bool success;
++ struct page *page = lru_to_page(head);
++
++ VM_BUG_ON_PAGE(PageTail(page), page);
++ VM_BUG_ON_PAGE(PageUnevictable(page), page);
++ VM_BUG_ON_PAGE(PageActive(page) != active, page);
++ VM_BUG_ON_PAGE(page_lru_gen(page) != -1, page);
++ VM_BUG_ON_PAGE(page_is_file_lru(page) != type, page);
++
++ prefetchw_prev_lru_page(page, head, flags);
++
++ del_page_from_lru_list(page, lruvec);
++ success = lru_gen_addition(page, lruvec, true);
++ VM_BUG_ON(!success);
++
++ if (++batch_size == MAX_BATCH_SIZE)
++ return false;
++ }
++ }
++
++ return true;
+}
+
-+static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
++static bool drain_lru_gen_lists(struct lruvec *lruvec)
+{
++ int gen, type, zone;
++ int batch_size = 0;
++
++ for_each_gen_type_zone(gen, type, zone) {
++ struct list_head *head = &lruvec->evictable.lists[gen][type][zone];
++
++ if (lruvec->evictable.enabled[type])
++ continue;
++
++ while (!list_empty(head)) {
++ bool success;
++ struct page *page = lru_to_page(head);
++
++ VM_BUG_ON_PAGE(PageTail(page), page);
++ VM_BUG_ON_PAGE(PageUnevictable(page), page);
++ VM_BUG_ON_PAGE(PageActive(page), page);
++ VM_BUG_ON_PAGE(page_is_file_lru(page) != type, page);
++ VM_BUG_ON_PAGE(page_zonenum(page) != zone, page);
++
++ prefetchw_prev_lru_page(page, head, flags);
++
++ success = lru_gen_deletion(page, lruvec);
++ VM_BUG_ON(!success);
++ add_page_to_lru_list(page, lruvec);
++
++ if (++batch_size == MAX_BATCH_SIZE)
++ return false;
++ }
++ }
++
++ return true;
+}
+
-+static void lru_gen_start_kswapd(int nid)
++/*
++ * For file page tracking, we enable/disable it according to the main switch.
++ * For anon page tracking, we only enabled it when the main switch is on and
++ * there is at least one swapfile; we disable it when there are no swapfiles
++ * regardless of the value of the main switch. Otherwise, we will eventually
++ * reach the max size of the sliding window and have to call inc_min_seq(),
++ * which brings an unnecessary overhead.
++ */
++void lru_gen_set_state(bool enable, bool main, bool swap)
+{
++ struct mem_cgroup *memcg;
++
++ mem_hotplug_begin();
++ mutex_lock(&lru_gen_state_mutex);
++ cgroup_lock();
++
++ main = main && enable != lru_gen_enabled();
++ swap = swap && !(enable ? lru_gen_nr_swapfiles++ : --lru_gen_nr_swapfiles);
++ swap = swap && lru_gen_enabled();
++ if (!main && !swap)
++ goto unlock;
++
++ if (main) {
++ if (enable)
++ static_branch_enable(&lru_gen_static_key);
++ else
++ static_branch_disable(&lru_gen_static_key);
++ }
++
++ memcg = mem_cgroup_iter(NULL, NULL, NULL);
++ do {
++ int nid;
++
++ for_each_node_state(nid, N_MEMORY) {
++ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
++ struct lrugen *lrugen = &lruvec->evictable;
++
++ spin_lock_irq(&lruvec->lru_lock);
++
++ VM_BUG_ON(!seq_is_valid(lruvec));
++ VM_BUG_ON(!state_is_valid(lruvec));
++
++ WRITE_ONCE(lrugen->enabled[0], lru_gen_enabled() && lru_gen_nr_swapfiles);
++ WRITE_ONCE(lrugen->enabled[1], lru_gen_enabled());
++
++ while (!(enable ? fill_lru_gen_lists(lruvec) :
++ drain_lru_gen_lists(lruvec))) {
++ spin_unlock_irq(&lruvec->lru_lock);
++ cond_resched();
++ spin_lock_irq(&lruvec->lru_lock);
++ }
++
++ spin_unlock_irq(&lruvec->lru_lock);
++ }
++
++ cond_resched();
++ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
++unlock:
++ cgroup_unlock();
++ mutex_unlock(&lru_gen_state_mutex);
++ mem_hotplug_done();
+}
+
-+static void lru_gen_stop_kswapd(int nid)
++static int __meminit __maybe_unused lru_gen_online_mem(struct notifier_block *self,
++ unsigned long action, void *arg)
+{
++ struct mem_cgroup *memcg;
++ struct memory_notify *mnb = arg;
++ int nid = mnb->status_change_nid;
++
++ if (action != MEM_GOING_ONLINE || nid == NUMA_NO_NODE)
++ return NOTIFY_DONE;
++
++ mutex_lock(&lru_gen_state_mutex);
++ cgroup_lock();
++
++ memcg = mem_cgroup_iter(NULL, NULL, NULL);
++ do {
++ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
++ struct lrugen *lrugen = &lruvec->evictable;
++
++ VM_BUG_ON(!seq_is_valid(lruvec));
++ VM_BUG_ON(!state_is_valid(lruvec));
++
++ WRITE_ONCE(lrugen->enabled[0], lru_gen_enabled() && lru_gen_nr_swapfiles);
++ WRITE_ONCE(lrugen->enabled[1], lru_gen_enabled());
++ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
++
++ cgroup_unlock();
++ mutex_unlock(&lru_gen_state_mutex);
++
++ return NOTIFY_DONE;
+}
+
- #endif /* CONFIG_LRU_GEN */
-
- static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
-@@ -4233,6 +4757,11 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
- struct blk_plug plug;
- bool scan_adjusted;
-
-+ if (lru_gen_enabled()) {
-+ lru_gen_shrink_lruvec(lruvec, sc);
-+ return;
-+ }
++static void lru_gen_start_kswapd(int nid)
++{
++ struct pglist_data *pgdat = NODE_DATA(nid);
+
- get_scan_count(lruvec, sc, nr);
-
- /* Record the original scan target for proportional adjustments later */
-@@ -4699,6 +5228,9 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
- struct lruvec *target_lruvec;
- unsigned long refaults;
-
-+ if (lru_gen_enabled())
-+ return;
++ pgdat->mm_walk_args = kvzalloc_node(size_of_mm_walk_args(), GFP_KERNEL, nid);
++ WARN_ON_ONCE(!pgdat->mm_walk_args);
++}
+
- target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
- refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
- target_lruvec->refaults[0] = refaults;
-@@ -5073,6 +5605,11 @@ static void age_active_anon(struct pglist_data *pgdat,
- struct mem_cgroup *memcg;
- struct lruvec *lruvec;
-
-+ if (lru_gen_enabled()) {
-+ lru_gen_age_node(pgdat, sc);
-+ return;
-+ }
++static void lru_gen_stop_kswapd(int nid)
++{
++ struct pglist_data *pgdat = NODE_DATA(nid);
+
- if (!total_swap_pages)
- return;
-
-@@ -5753,6 +6290,8 @@ int kswapd_run(int nid)
- if (pgdat->kswapd)
- return 0;
-
-+ lru_gen_start_kswapd(nid);
++ kvfree(pgdat->mm_walk_args);
++}
+
- pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
- if (IS_ERR(pgdat->kswapd)) {
- /* failure at boot is fatal */
-@@ -5775,6 +6314,7 @@ void kswapd_stop(int nid)
- if (kswapd) {
- kthread_stop(kswapd);
- NODE_DATA(nid)->kswapd = NULL;
-+ lru_gen_stop_kswapd(nid);
- }
- }
-
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index 2f86dcc04c56..ff2deec24c64 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -52,6 +52,8 @@
- #include <linux/memory.h>
- #include <linux/pagewalk.h>
- #include <linux/shmem_fs.h>
-+#include <linux/ctype.h>
-+#include <linux/debugfs.h>
-
- #include <asm/tlbflush.h>
- #include <asm/div64.h>
-@@ -4678,6 +4680,401 @@ static void lru_gen_stop_kswapd(int nid)
- kvfree(pgdat->mm_walk_args);
- }
-
+/******************************************************************************
+ * sysfs interface
+ ******************************************************************************/
@@ -4448,250 +4027,473 @@ index 2f86dcc04c56..ff2deec24c64 100644
+ .release = seq_release,
+};
+
- /******************************************************************************
- * initialization
- ******************************************************************************/
-@@ -4718,6 +5115,12 @@ static int __init init_lru_gen(void)
- if (hotplug_memory_notifier(lru_gen_online_mem, 0))
- pr_err("lru_gen: failed to subscribe hotplug notifications\n");
-
++/******************************************************************************
++ * initialization
++ ******************************************************************************/
++
++void lru_gen_init_lruvec(struct lruvec *lruvec)
++{
++ int i;
++ int gen, type, zone;
++ struct lrugen *lrugen = &lruvec->evictable;
++
++ lrugen->max_seq = MIN_NR_GENS + 1;
++ lrugen->enabled[0] = lru_gen_enabled() && lru_gen_nr_swapfiles;
++ lrugen->enabled[1] = lru_gen_enabled();
++
++ for (i = 0; i <= MIN_NR_GENS + 1; i++)
++ lrugen->timestamps[i] = jiffies;
++
++ for_each_gen_type_zone(gen, type, zone)
++ INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
++}
++
++static int __init init_lru_gen(void)
++{
++ BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
++ BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
++ BUILD_BUG_ON(sizeof(MM_STAT_CODES) != NR_MM_STATS + 1);
++
++ VM_BUG_ON(PMD_SIZE / PAGE_SIZE != PTRS_PER_PTE);
++ VM_BUG_ON(PUD_SIZE / PMD_SIZE != PTRS_PER_PMD);
++ VM_BUG_ON(P4D_SIZE / PUD_SIZE != PTRS_PER_PUD);
++
++ if (mem_cgroup_disabled()) {
++ global_mm_list = alloc_mm_list();
++ if (WARN_ON_ONCE(!global_mm_list))
++ return -ENOMEM;
++ }
++
++ if (hotplug_memory_notifier(lru_gen_online_mem, 0))
++ pr_err("lru_gen: failed to subscribe hotplug notifications\n");
++
+ if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
+ pr_err("lru_gen: failed to create sysfs group\n");
+
+ debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops);
+ debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops);
+
- return 0;
- };
- /*
-
-diff --git a/mm/Kconfig b/mm/Kconfig
-index 24c045b24b95..e82e6b92820c 100644
---- a/mm/Kconfig
-+++ b/mm/Kconfig
-@@ -872,4 +872,61 @@ config MAPPING_DIRTY_HELPERS
- config KMAP_LOCAL
- bool
-
-+# the multigenerational lru {
-+config LRU_GEN
-+ bool "Multigenerational LRU"
-+ depends on MMU
-+ help
-+ A high performance LRU implementation to heavily overcommit workloads
-+ that are not IO bound. See Documentation/vm/multigen_lru.rst for
-+ details.
-+
-+ Warning: do not enable this option unless you plan to use it because
-+ it introduces a small per-process and per-memcg and per-node memory
-+ overhead.
-+
-+config LRU_GEN_ENABLED
-+ bool "Turn on by default"
-+ depends on LRU_GEN
-+ help
-+ The default value of /sys/kernel/mm/lru_gen/enabled is 0. This option
-+ changes it to 1.
++ return 0;
++};
++/*
++ * We want to run as early as possible because debug code may call mm_alloc()
++ * and mmput(). Out only dependency mm_kobj is initialized one stage earlier.
++ */
++arch_initcall(init_lru_gen);
+
-+ Warning: the default value is the fast path. See
-+ Documentation/static-keys.txt for details.
++#else /* CONFIG_LRU_GEN */
+
-+config LRU_GEN_STATS
-+ bool "Full stats for debugging"
-+ depends on LRU_GEN
-+ help
-+ This option keeps full stats for each generation, which can be read
-+ from /sys/kernel/debug/lru_gen_full.
++static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
++{
++}
+
-+ Warning: do not enable this option unless you plan to use it because
-+ it introduces an additional small per-process and per-memcg and
-+ per-node memory overhead.
++static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
++{
++}
+
-+config NR_LRU_GENS
-+ int "Max number of generations"
-+ depends on LRU_GEN
-+ range 4 31
-+ default 7
-+ help
-+ This will use order_base_2(N+1) spare bits from page flags.
++static void lru_gen_start_kswapd(int nid)
++{
++}
+
-+ Warning: do not use numbers larger than necessary because each
-+ generation introduces a small per-node and per-memcg memory overhead.
++static void lru_gen_stop_kswapd(int nid)
++{
++}
+
-+config TIERS_PER_GEN
-+ int "Number of tiers per generation"
-+ depends on LRU_GEN
-+ range 2 5
-+ default 4
-+ help
-+ This will use N-2 spare bits from page flags.
++#endif /* CONFIG_LRU_GEN */
+
-+ Larger values generally offer better protection to active pages under
-+ heavy buffered I/O workloads.
-+# }
+ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
+ {
+ unsigned long nr[NR_LRU_LISTS];
+@@ -2629,6 +5160,11 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
+ struct blk_plug plug;
+ bool scan_adjusted;
+
++ if (lru_gen_enabled()) {
++ lru_gen_shrink_lruvec(lruvec, sc);
++ return;
++ }
+
- endmenu
-
-
-diff --git a/Documentation/vm/index.rst b/Documentation/vm/index.rst
-index eff5fbd492d0..c353b3f55924 100644
---- a/Documentation/vm/index.rst
-+++ b/Documentation/vm/index.rst
-@@ -17,6 +17,7 @@ various features of the Linux memory management
+ get_scan_count(lruvec, sc, nr);
- swap_numa
- zswap
-+ multigen_lru
+ /* Record the original scan target for proportional adjustments later */
+@@ -2866,7 +5402,6 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
+ unsigned long nr_reclaimed, nr_scanned;
+ struct lruvec *target_lruvec;
+ bool reclaimable = false;
+- unsigned long file;
- Kernel developers MM documentation
- ==================================
-diff --git a/Documentation/vm/multigen_lru.rst b/Documentation/vm/multigen_lru.rst
-new file mode 100644
-index 000000000000..a18416ed7e92
---- /dev/null
-+++ b/Documentation/vm/multigen_lru.rst
-@@ -0,0 +1,143 @@
-+.. SPDX-License-Identifier: GPL-2.0
-+
-+=====================
-+Multigenerational LRU
-+=====================
+ target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
+
+@@ -2876,93 +5411,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
+ nr_reclaimed = sc->nr_reclaimed;
+ nr_scanned = sc->nr_scanned;
+
+- /*
+- * Determine the scan balance between anon and file LRUs.
+- */
+- spin_lock_irq(&target_lruvec->lru_lock);
+- sc->anon_cost = target_lruvec->anon_cost;
+- sc->file_cost = target_lruvec->file_cost;
+- spin_unlock_irq(&target_lruvec->lru_lock);
+-
+- /*
+- * Target desirable inactive:active list ratios for the anon
+- * and file LRU lists.
+- */
+- if (!sc->force_deactivate) {
+- unsigned long refaults;
+-
+- refaults = lruvec_page_state(target_lruvec,
+- WORKINGSET_ACTIVATE_ANON);
+- if (refaults != target_lruvec->refaults[0] ||
+- inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
+- sc->may_deactivate |= DEACTIVATE_ANON;
+- else
+- sc->may_deactivate &= ~DEACTIVATE_ANON;
+-
+- /*
+- * When refaults are being observed, it means a new
+- * workingset is being established. Deactivate to get
+- * rid of any stale active pages quickly.
+- */
+- refaults = lruvec_page_state(target_lruvec,
+- WORKINGSET_ACTIVATE_FILE);
+- if (refaults != target_lruvec->refaults[1] ||
+- inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
+- sc->may_deactivate |= DEACTIVATE_FILE;
+- else
+- sc->may_deactivate &= ~DEACTIVATE_FILE;
+- } else
+- sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
+-
+- /*
+- * If we have plenty of inactive file pages that aren't
+- * thrashing, try to reclaim those first before touching
+- * anonymous pages.
+- */
+- file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
+- if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
+- sc->cache_trim_mode = 1;
+- else
+- sc->cache_trim_mode = 0;
+-
+- /*
+- * Prevent the reclaimer from falling into the cache trap: as
+- * cache pages start out inactive, every cache fault will tip
+- * the scan balance towards the file LRU. And as the file LRU
+- * shrinks, so does the window for rotation from references.
+- * This means we have a runaway feedback loop where a tiny
+- * thrashing file LRU becomes infinitely more attractive than
+- * anon pages. Try to detect this based on file LRU size.
+- */
+- if (!cgroup_reclaim(sc)) {
+- unsigned long total_high_wmark = 0;
+- unsigned long free, anon;
+- int z;
+-
+- free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
+- file = node_page_state(pgdat, NR_ACTIVE_FILE) +
+- node_page_state(pgdat, NR_INACTIVE_FILE);
+-
+- for (z = 0; z < MAX_NR_ZONES; z++) {
+- struct zone *zone = &pgdat->node_zones[z];
+- if (!managed_zone(zone))
+- continue;
+-
+- total_high_wmark += high_wmark_pages(zone);
+- }
+-
+- /*
+- * Consider anon: if that's low too, this isn't a
+- * runaway file reclaim problem, but rather just
+- * extreme pressure. Reclaim as per usual then.
+- */
+- anon = node_page_state(pgdat, NR_INACTIVE_ANON);
+-
+- sc->file_is_tiny =
+- file + free <= total_high_wmark &&
+- !(sc->may_deactivate & DEACTIVATE_ANON) &&
+- anon >> sc->priority;
+- }
++ prepare_scan_count(pgdat, sc);
+
+ shrink_node_memcgs(pgdat, sc);
+
+@@ -3182,6 +5631,9 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
+ struct lruvec *target_lruvec;
+ unsigned long refaults;
+
++ if (lru_gen_enabled())
++ return;
+
-+Quick Start
-+===========
-+Build Options
-+-------------
-+:Required: Set ``CONFIG_LRU_GEN=y``.
+ target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
+ refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
+ target_lruvec->refaults[0] = refaults;
+@@ -3556,6 +6008,11 @@ static void age_active_anon(struct pglist_data *pgdat,
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+
++ if (lru_gen_enabled()) {
++ lru_gen_age_node(pgdat, sc);
++ return;
++ }
+
-+:Optional: Set ``CONFIG_LRU_GEN_ENABLED=y`` to turn the feature on by
-+ default.
+ if (!total_swap_pages)
+ return;
+
+@@ -4236,6 +6693,8 @@ int kswapd_run(int nid)
+ if (pgdat->kswapd)
+ return 0;
+
++ lru_gen_start_kswapd(nid);
+
-+:Optional: Change ``CONFIG_NR_LRU_GENS`` to a number ``X`` to support
-+ a maximum of ``X`` generations.
+ pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
+ if (IS_ERR(pgdat->kswapd)) {
+ /* failure at boot is fatal */
+@@ -4258,6 +6717,7 @@ void kswapd_stop(int nid)
+ if (kswapd) {
+ kthread_stop(kswapd);
+ NODE_DATA(nid)->kswapd = NULL;
++ lru_gen_stop_kswapd(nid);
+ }
+ }
+
+diff --git a/mm/workingset.c b/mm/workingset.c
+index b7cdeca5a76d..3f3f03d51ea7 100644
+--- a/mm/workingset.c
++++ b/mm/workingset.c
+@@ -168,9 +168,9 @@
+ * refault distance will immediately activate the refaulting page.
+ */
+
+-#define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
+- 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
+-#define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
++#define EVICTION_SHIFT (BITS_PER_XA_VALUE - MEM_CGROUP_ID_SHIFT - NODES_SHIFT)
++#define EVICTION_MASK (BIT(EVICTION_SHIFT) - 1)
++#define WORKINGSET_WIDTH 1
+
+ /*
+ * Eviction timestamps need to be able to cover the full range of
+@@ -182,38 +182,129 @@
+ */
+ static unsigned int bucket_order __read_mostly;
+
+-static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
+- bool workingset)
++static void *pack_shadow(int memcg_id, struct pglist_data *pgdat, unsigned long val)
+ {
+- eviction >>= bucket_order;
+- eviction &= EVICTION_MASK;
+- eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
+- eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
+- eviction = (eviction << 1) | workingset;
++ val = (val << MEM_CGROUP_ID_SHIFT) | memcg_id;
++ val = (val << NODES_SHIFT) | pgdat->node_id;
+
+- return xa_mk_value(eviction);
++ return xa_mk_value(val);
+ }
+
+-static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
+- unsigned long *evictionp, bool *workingsetp)
++static unsigned long unpack_shadow(void *shadow, int *memcg_id, struct pglist_data **pgdat)
+ {
+- unsigned long entry = xa_to_value(shadow);
+- int memcgid, nid;
+- bool workingset;
++ unsigned long val = xa_to_value(shadow);
+
+- workingset = entry & 1;
+- entry >>= 1;
+- nid = entry & ((1UL << NODES_SHIFT) - 1);
+- entry >>= NODES_SHIFT;
+- memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
+- entry >>= MEM_CGROUP_ID_SHIFT;
++ *pgdat = NODE_DATA(val & (BIT(NODES_SHIFT) - 1));
++ val >>= NODES_SHIFT;
++ *memcg_id = val & (BIT(MEM_CGROUP_ID_SHIFT) - 1);
+
+- *memcgidp = memcgid;
+- *pgdat = NODE_DATA(nid);
+- *evictionp = entry << bucket_order;
+- *workingsetp = workingset;
++ return val >> MEM_CGROUP_ID_SHIFT;
+ }
+
++#ifdef CONFIG_LRU_GEN
+
-+:Optional: Change ``CONFIG_TIERS_PER_GEN`` to a number ``Y`` to
-+ support a maximum of ``Y`` tiers per generation.
++#if LRU_GEN_SHIFT + LRU_USAGE_SHIFT >= EVICTION_SHIFT
++#error "Please try smaller NODES_SHIFT, NR_LRU_GENS and TIERS_PER_GEN configurations"
++#endif
+
-+Runtime Options
-+---------------
-+:Required: Write ``1`` to ``/sys/kernel/mm/lru_gen/enable`` if the
-+ feature was not turned on by default.
++static void page_set_usage(struct page *page, int usage)
++{
++ unsigned long old_flags, new_flags;
+
-+:Optional: Change ``/sys/kernel/mm/lru_gen/spread`` to a number ``N``
-+ to spread pages out across ``N+1`` generations. ``N`` should be less
-+ than ``X``. Larger values make the background aging more aggressive.
++ VM_BUG_ON(usage > BIT(LRU_USAGE_WIDTH));
+
-+:Optional: Read ``/sys/kernel/debug/lru_gen`` to verify the feature.
-+ This file has the following output:
++ if (!usage)
++ return;
+
-+::
++ do {
++ old_flags = READ_ONCE(page->flags);
++ new_flags = (old_flags & ~LRU_USAGE_MASK) | LRU_TIER_FLAGS |
++ ((usage - 1UL) << LRU_USAGE_PGOFF);
++ } while (new_flags != old_flags &&
++ cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
++}
+
-+ memcg memcg_id memcg_path
-+ node node_id
-+ min_gen birth_time anon_size file_size
-+ ...
-+ max_gen birth_time anon_size file_size
++/* Return a token to be stored in the shadow entry of a page being evicted. */
++static void *lru_gen_eviction(struct page *page)
++{
++ int hist, tier;
++ unsigned long token;
++ unsigned long min_seq;
++ struct lruvec *lruvec;
++ struct lrugen *lrugen;
++ int type = page_is_file_lru(page);
++ int usage = page_tier_usage(page);
++ struct mem_cgroup *memcg = page_memcg(page);
++ struct pglist_data *pgdat = page_pgdat(page);
+
-+Given a memcg and a node, ``min_gen`` is the oldest generation
-+(number) and ``max_gen`` is the youngest. Birth time is in
-+milliseconds. The sizes of anon and file types are in pages.
++ lruvec = mem_cgroup_lruvec(memcg, pgdat);
++ lrugen = &lruvec->evictable;
++ min_seq = READ_ONCE(lrugen->min_seq[type]);
++ token = (min_seq << LRU_USAGE_SHIFT) | usage;
+
-+Recipes
-+-------
-+:Android on ARMv8.1+: ``X=4``, ``Y=3`` and ``N=0``.
++ hist = hist_from_seq_or_gen(min_seq);
++ tier = lru_tier_from_usage(usage);
++ atomic_long_add(thp_nr_pages(page), &lrugen->evicted[hist][type][tier]);
+
-+:Android on pre-ARMv8.1 CPUs: Not recommended due to the lack of
-+ ``ARM64_HW_AFDBM``.
++ return pack_shadow(mem_cgroup_id(memcg), pgdat, token);
++}
+
-+:Laptops and workstations running Chrome on x86_64: Use the default
-+ values.
++/* Account a refaulted page based on the token stored in its shadow entry. */
++static void lru_gen_refault(struct page *page, void *shadow)
++{
++ int hist, tier, usage;
++ int memcg_id;
++ unsigned long token;
++ unsigned long min_seq;
++ struct lruvec *lruvec;
++ struct lrugen *lrugen;
++ struct pglist_data *pgdat;
++ struct mem_cgroup *memcg;
++ int type = page_is_file_lru(page);
+
-+:Working set estimation: Write ``+ memcg_id node_id gen [swappiness]``
-+ to ``/sys/kernel/debug/lru_gen`` to account referenced pages to
-+ generation ``max_gen`` and create the next generation ``max_gen+1``.
-+ ``gen`` should be equal to ``max_gen``. A swap file and a non-zero
-+ ``swappiness`` are required to scan anon type. If swapping is not
-+ desired, set ``vm.swappiness`` to ``0``.
++ token = unpack_shadow(shadow, &memcg_id, &pgdat);
++ if (page_pgdat(page) != pgdat)
++ return;
+
-+:Proactive reclaim: Write ``- memcg_id node_id gen [swappiness]
-+ [nr_to_reclaim]`` to ``/sys/kernel/debug/lru_gen`` to evict
-+ generations less than or equal to ``gen``. ``gen`` should be less
-+ than ``max_gen-1`` as ``max_gen`` and ``max_gen-1`` are active
-+ generations and therefore protected from the eviction. Use
-+ ``nr_to_reclaim`` to limit the number of pages to evict. Multiple
-+ command lines are supported, so does concatenation with delimiters
-+ ``,`` and ``;``.
++ rcu_read_lock();
++ memcg = page_memcg_rcu(page);
++ if (mem_cgroup_id(memcg) != memcg_id)
++ goto unlock;
+
-+Framework
-+=========
-+For each ``lruvec``, evictable pages are divided into multiple
-+generations. The youngest generation number is stored in ``max_seq``
-+for both anon and file types as they are aged on an equal footing. The
-+oldest generation numbers are stored in ``min_seq[2]`` separately for
-+anon and file types as clean file pages can be evicted regardless of
-+swap and write-back constraints. These three variables are
-+monotonically increasing. Generation numbers are truncated into
-+``order_base_2(CONFIG_NR_LRU_GENS+1)`` bits in order to fit into
-+``page->flags``. The sliding window technique is used to prevent
-+truncated generation numbers from overlapping. Each truncated
-+generation number is an index to an array of per-type and per-zone
-+lists. Evictable pages are added to the per-zone lists indexed by
-+``max_seq`` or ``min_seq[2]`` (modulo ``CONFIG_NR_LRU_GENS``),
-+depending on their types.
++ usage = token & (BIT(LRU_USAGE_SHIFT) - 1);
++ token >>= LRU_USAGE_SHIFT;
+
-+Each generation is then divided into multiple tiers. Tiers represent
-+levels of usage from file descriptors only. Pages accessed N times via
-+file descriptors belong to tier order_base_2(N). Each generation
-+contains at most CONFIG_TIERS_PER_GEN tiers, and they require
-+additional CONFIG_TIERS_PER_GEN-2 bits in page->flags. In contrast to
-+moving across generations which requires the lru lock for the list
-+operations, moving across tiers only involves an atomic operation on
-+``page->flags`` and therefore has a negligible cost. A feedback loop
-+modeled after the PID controller monitors the refault rates across all
-+tiers and decides when to activate pages from which tiers in the
-+reclaim path.
++ lruvec = mem_cgroup_lruvec(memcg, pgdat);
++ lrugen = &lruvec->evictable;
++ min_seq = READ_ONCE(lrugen->min_seq[type]);
++ if (token != (min_seq & (EVICTION_MASK >> LRU_USAGE_SHIFT)))
++ goto unlock;
+
-+The framework comprises two conceptually independent components: the
-+aging and the eviction, which can be invoked separately from user
-+space for the purpose of working set estimation and proactive reclaim.
++ page_set_usage(page, usage);
+
-+Aging
-+-----
-+The aging produces young generations. Given an ``lruvec``, the aging
-+scans page tables for referenced pages of this ``lruvec``. Upon
-+finding one, the aging updates its generation number to ``max_seq``.
-+After each round of scan, the aging increments ``max_seq``.
++ hist = hist_from_seq_or_gen(min_seq);
++ tier = lru_tier_from_usage(usage);
++ atomic_long_add(thp_nr_pages(page), &lrugen->refaulted[hist][type][tier]);
++ inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type);
++ if (tier)
++ inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type);
++unlock:
++ rcu_read_unlock();
++}
+
-+The aging maintains either a system-wide ``mm_struct`` list or
-+per-memcg ``mm_struct`` lists, and it only scans page tables of
-+processes that have been scheduled since the last scan.
++#else /* CONFIG_LRU_GEN */
+
-+The aging is due when both of ``min_seq[2]`` reaches ``max_seq-1``,
-+assuming both anon and file types are reclaimable.
++static void *lru_gen_eviction(struct page *page)
++{
++ return NULL;
++}
+
-+Eviction
-+--------
-+The eviction consumes old generations. Given an ``lruvec``, the
-+eviction scans the pages on the per-zone lists indexed by either of
-+``min_seq[2]``. It first tries to select a type based on the values of
-+``min_seq[2]``. When anon and file types are both available from the
-+same generation, it selects the one that has a lower refault rate.
++static void lru_gen_refault(struct page *page, void *shadow)
++{
++}
+
-+During a scan, the eviction sorts pages according to their new
-+generation numbers, if the aging has found them referenced. It also
-+moves pages from the tiers that have higher refault rates than tier 0
-+to the next generation.
++#endif /* CONFIG_LRU_GEN */
+
-+When it finds all the per-zone lists of a selected type are empty, the
-+eviction increments ``min_seq[2]`` indexed by this selected type.
+ /**
+ * workingset_age_nonresident - age non-resident entries as LRU ages
+ * @lruvec: the lruvec that was aged
+@@ -262,12 +353,17 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
+ VM_BUG_ON_PAGE(page_count(page), page);
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+
++ if (lru_gen_enabled())
++ return lru_gen_eviction(page);
+
-+To-do List
-+==========
-+KVM Optimization
-+----------------
-+Support shadow page table scanning.
+ lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
+ /* XXX: target_memcg can be NULL, go through lruvec */
+ memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
+ eviction = atomic_long_read(&lruvec->nonresident_age);
++ eviction >>= bucket_order;
++ eviction = (eviction << WORKINGSET_WIDTH) | PageWorkingset(page);
+ workingset_age_nonresident(lruvec, thp_nr_pages(page));
+- return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
++ return pack_shadow(memcgid, pgdat, eviction);
+ }
+
+ /**
+@@ -294,7 +390,12 @@ void workingset_refault(struct page *page, void *shadow)
+ bool workingset;
+ int memcgid;
+
+- unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
++ if (lru_gen_enabled()) {
++ lru_gen_refault(page, shadow);
++ return;
++ }
+
-+NUMA Optimization
-+-----------------
-+Optimize page table scan for NUMA.
--- \ No newline at end of file
++ eviction = unpack_shadow(shadow, &memcgid, &pgdat);
+
+ rcu_read_lock();
+ /*
+@@ -318,6 +419,8 @@ void workingset_refault(struct page *page, void *shadow)
+ goto out;
+ eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
+ refault = atomic_long_read(&eviction_lruvec->nonresident_age);
++ workingset = eviction & (BIT(WORKINGSET_WIDTH) - 1);
++ eviction = (eviction >> WORKINGSET_WIDTH) << bucket_order;
+
+ /*
+ * Calculate the refault distance
+@@ -335,7 +438,7 @@ void workingset_refault(struct page *page, void *shadow)
+ * longest time, so the occasional inappropriate activation
+ * leading to pressure on the active list is not a problem.
+ */
+- refault_distance = (refault - eviction) & EVICTION_MASK;
++ refault_distance = (refault - eviction) & (EVICTION_MASK >> WORKINGSET_WIDTH);
+
+ /*
+ * The activation decision for this page is made at the level
+@@ -593,7 +696,7 @@ static int __init workingset_init(void)
+ unsigned int max_order;
+ int ret;
+
+- BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
++ BUILD_BUG_ON(EVICTION_SHIFT < WORKINGSET_WIDTH);
+ /*
+ * Calculate the eviction bucket size to cover the longest
+ * actionable refault distance, which is currently half of
+@@ -601,7 +704,7 @@ static int __init workingset_init(void)
+ * some more pages at runtime, so keep working with up to
+ * double the initial memory by using totalram_pages as-is.
+ */
+- timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
++ timestamp_bits = EVICTION_SHIFT - WORKINGSET_WIDTH;
+ max_order = fls_long(totalram_pages() - 1);
+ if (max_order > timestamp_bits)
+ bucket_order = max_order - timestamp_bits;
diff --git a/sys-kernel_arch-sources-g14_files-0006-ACPI-PM-s2idle-Add-missing-LPS0-functions.patch b/sys-kernel_arch-sources-g14_files-0006-ACPI-PM-s2idle-Add-missing-LPS0-functions.patch
deleted file mode 100644
index ddb59718e8c8..000000000000
--- a/sys-kernel_arch-sources-g14_files-0006-ACPI-PM-s2idle-Add-missing-LPS0-functions.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-From 007ec62403d0a592c7f0efb3812eb4512993c388 Mon Sep 17 00:00:00 2001
-From: Alex Deucher <alexander.deucher@amd.com>
-Date: Wed, 5 May 2021 09:20:32 -0400
-Subject: [PATCH] ACPI: PM: s2idle: Add missing LPS0 functions for AMD
-
-These are supposedly not required for AMD platforms,
-but at least some HP laptops seem to require it to
-properly turn off the keyboard backlight.
-
-Based on a patch from Marcin Bachry <hegel666@gmail.com>.
-
-Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1230
-Reviewed-by: Hans de Goede <hdegoede@redhat.com>
-Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
----
- drivers/acpi/x86/s2idle.c | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
-index 2b69536cdccb..2d7ddb8a8cb6 100644
---- a/drivers/acpi/x86/s2idle.c
-+++ b/drivers/acpi/x86/s2idle.c
-@@ -42,6 +42,8 @@ static const struct acpi_device_id lps0_device_ids[] = {
-
- /* AMD */
- #define ACPI_LPS0_DSM_UUID_AMD "e3f32452-febc-43ce-9039-932122d37721"
-+#define ACPI_LPS0_ENTRY_AMD 2
-+#define ACPI_LPS0_EXIT_AMD 3
- #define ACPI_LPS0_SCREEN_OFF_AMD 4
- #define ACPI_LPS0_SCREEN_ON_AMD 5
-
-@@ -408,6 +410,7 @@ int acpi_s2idle_prepare_late(void)
-
- if (acpi_s2idle_vendor_amd()) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD);
- } else {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
-@@ -422,6 +425,7 @@ void acpi_s2idle_restore_early(void)
- return;
-
- if (acpi_s2idle_vendor_amd()) {
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
- } else {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0007-ACPI-processor-idle-Fix-up-C-state-latency.patch b/sys-kernel_arch-sources-g14_files-0007-ACPI-processor-idle-Fix-up-C-state-latency.patch
deleted file mode 100644
index 6cc65c8102d8..000000000000
--- a/sys-kernel_arch-sources-g14_files-0007-ACPI-processor-idle-Fix-up-C-state-latency.patch
+++ /dev/null
@@ -1,108 +0,0 @@
-From 7dc51b02a491b3fc79b6b4166b7c07d6d2efdb7d Mon Sep 17 00:00:00 2001
-From: Mario Limonciello <mario.limonciello@amd.com>
-Date: Wed, 12 May 2021 17:15:14 -0500
-Subject: [PATCH] ACPI: processor idle: Fix up C-state latency if not ordered
-
-Generally, the C-state latency is provided by the _CST method or
-FADT, but some OEM platforms using AMD Picasso, Renoir, Van Gogh,
-and Cezanne set the C2 latency greater than C3's which causes the
-C2 state to be skipped.
-
-That will block the core entering PC6, which prevents S0ix working
-properly on Linux systems.
-
-In other operating systems, the latency values are not validated and
-this does not cause problems by skipping states.
-
-To avoid this issue on Linux, detect when latencies are not an
-arithmetic progression and sort them.
-
-Link: https://gitlab.freedesktop.org/agd5f/linux/-/commit/026d186e4592c1ee9c1cb44295912d0294508725
-Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1230#note_712174
-Suggested-by: Prike Liang <Prike.Liang@amd.com>
-Suggested-by: Alex Deucher <alexander.deucher@amd.com>
-Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
-[ rjw: Subject and changelog edits ]
-Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
----
- drivers/acpi/processor_idle.c | 40 +++++++++++++++++++++++++++++++++++
- 1 file changed, 40 insertions(+)
-
-diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
-index 4e2d76b8b697..6790df5a2462 100644
---- a/drivers/acpi/processor_idle.c
-+++ b/drivers/acpi/processor_idle.c
-@@ -16,6 +16,7 @@
- #include <linux/acpi.h>
- #include <linux/dmi.h>
- #include <linux/sched.h> /* need_resched() */
-+#include <linux/sort.h>
- #include <linux/tick.h>
- #include <linux/cpuidle.h>
- #include <linux/cpu.h>
-@@ -388,10 +389,37 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
- return;
- }
-
-+static int acpi_cst_latency_cmp(const void *a, const void *b)
-+{
-+ const struct acpi_processor_cx *x = a, *y = b;
-+
-+ if (!(x->valid && y->valid))
-+ return 0;
-+ if (x->latency > y->latency)
-+ return 1;
-+ if (x->latency < y->latency)
-+ return -1;
-+ return 0;
-+}
-+static void acpi_cst_latency_swap(void *a, void *b, int n)
-+{
-+ struct acpi_processor_cx *x = a, *y = b;
-+ u32 tmp;
-+
-+ if (!(x->valid && y->valid))
-+ return;
-+ tmp = x->latency;
-+ x->latency = y->latency;
-+ y->latency = tmp;
-+}
-+
- static int acpi_processor_power_verify(struct acpi_processor *pr)
- {
- unsigned int i;
- unsigned int working = 0;
-+ unsigned int last_latency = 0;
-+ unsigned int last_type = 0;
-+ bool buggy_latency = false;
-
- pr->power.timer_broadcast_on_state = INT_MAX;
-
-@@ -415,12 +443,24 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
- }
- if (!cx->valid)
- continue;
-+ if (cx->type >= last_type && cx->latency < last_latency)
-+ buggy_latency = true;
-+ last_latency = cx->latency;
-+ last_type = cx->type;
-
- lapic_timer_check_state(i, pr, cx);
- tsc_check_state(cx->type);
- working++;
- }
-
-+ if (buggy_latency) {
-+ pr_notice("FW issue: working around C-state latencies out of order\n");
-+ sort(&pr->power.states[1], max_cstate,
-+ sizeof(struct acpi_processor_cx),
-+ acpi_cst_latency_cmp,
-+ acpi_cst_latency_swap);
-+ }
-+
- lapic_timer_propagate_broadcast(pr);
-
- return (working);
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0008-NVMe-set-some-AMD-PCIe-downstream-storage-device-to-D3-for-s2idle.patch b/sys-kernel_arch-sources-g14_files-0008-NVMe-set-some-AMD-PCIe-downstream-storage-device-to-D3-for-s2idle.patch
deleted file mode 100644
index 424fde18a3f3..000000000000
--- a/sys-kernel_arch-sources-g14_files-0008-NVMe-set-some-AMD-PCIe-downstream-storage-device-to-D3-for-s2idle.patch
+++ /dev/null
@@ -1,71 +0,0 @@
-From a83315923e87a169ceb6c839a84ee0455868218f Mon Sep 17 00:00:00 2001
-From: Prike Liang <Prike.Liang@amd.com>
-Date: Tue, 25 May 2021 10:48:59 +0800
-Subject: [PATCH] nvme-pci: set some AMD PCIe downstream storage device to D3
- for s2idle
-
-In the NVMe controller default suspend mode use APST do the power state
-suspend and resume and the NVMe remains in D0 during s2idle entry.Then the
-NVMe device will be shutdown by firmware in the s0ix entry and will not
-restore the third-party NVMe device power context in the firmware s0ix
-resume. Finally,the NVMe will lost the power state during s2idle resume
-and result in request queue timeout. So far,this issue only found on the
-Renoir/Lucienne/Cezanne series and can be addressed by shutdown the NVMe
-device in the s2idle entry.
-
-Link:https://lore.kernel.org/stable/20210416155653.GA31818@redsun51.ssa.fujisawa.hgst.com/T/
-
-Suggested-by: Mario Limonciello <mario.limonciello@amd.com>
-Signed-off-by: Prike Liang <Prike.Liang@amd.com>
----
- drivers/nvme/host/pci.c | 20 ++++++++++++++++++++
- 1 file changed, 20 insertions(+)
-
-diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
-index c92a15c3fbc5..07198b6d6066 100644
---- a/drivers/nvme/host/pci.c
-+++ b/drivers/nvme/host/pci.c
-@@ -26,6 +26,9 @@
- #include <linux/io-64-nonatomic-hi-lo.h>
- #include <linux/sed-opal.h>
- #include <linux/pci-p2pdma.h>
-+#ifdef CONFIG_X86
-+#include <asm/cpu_device_id.h>
-+#endif
-
- #include "trace.h"
- #include "nvme.h"
-@@ -2832,6 +2835,16 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
- }
-
- #ifdef CONFIG_ACPI
-+
-+#ifdef CONFIG_X86
-+static const struct x86_cpu_id storage_d3_cpu_ids[] = {
-+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /*Cezanne*/
-+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /*Renoir*/
-+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL),/*Lucienne*/
-+ {}
-+};
-+#endif
-+
- static bool nvme_acpi_storage_d3(struct pci_dev *dev)
- {
- struct acpi_device *adev;
-@@ -2840,6 +2853,13 @@ static bool nvme_acpi_storage_d3(struct pci_dev *dev)
- acpi_status status;
- u8 val;
-
-+#ifdef CONFIG_X86
-+ /*
-+ * Set the NVMe on the target platform to D3 directly by kernel power management.
-+ */
-+ if (x86_match_cpu(storage_d3_cpu_ids) && pm_suspend_default_s2idle())
-+ return true;
-+#endif
- /*
- * Look for _DSD property specifying that the storage device on the port
- * must use D3 to support deep platform power savings during
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0009-PCI-quirks-Quirk-PCI-d3hot-delay.patch b/sys-kernel_arch-sources-g14_files-0009-PCI-quirks-Quirk-PCI-d3hot-delay.patch
deleted file mode 100644
index 28e54a25fce4..000000000000
--- a/sys-kernel_arch-sources-g14_files-0009-PCI-quirks-Quirk-PCI-d3hot-delay.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From 0e8cbc53ddd7f82c81bcf27af8bed6116f6baa64 Mon Sep 17 00:00:00 2001
-From: Marcin Bachry <hegel666@gmail.com>
-Date: Tue, 16 Mar 2021 15:28:51 -0400
-Subject: [PATCH] PCI: quirks: Quirk PCI d3hot delay for AMD xhci
-
-Renoir needs a similar delay.
-
-Signed-off-by: Marcin Bachry <hegel666@gmail.com>
-Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
----
- drivers/pci/quirks.c | 3 +++
- 1 file changed, 3 insertions(+)
-
-diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
-index 98851d00dc4d..8c929b7d335e 100644
---- a/drivers/pci/quirks.c
-+++ b/drivers/pci/quirks.c
-@@ -1904,6 +1904,9 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
- }
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
-+/* Renoir XHCI requires longer delay when transitioning from D0 to
-+ * D3hot */
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
-
- #ifdef CONFIG_X86_IO_APIC
- static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0010-platform-x86-force-LPS0-functions-for-AMD.patch b/sys-kernel_arch-sources-g14_files-0010-platform-x86-force-LPS0-functions-for-AMD.patch
deleted file mode 100644
index cbc10385bb35..000000000000
--- a/sys-kernel_arch-sources-g14_files-0010-platform-x86-force-LPS0-functions-for-AMD.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From 90f10be7639e99688600792e878f6cad1157352a Mon Sep 17 00:00:00 2001
-From: Alex Deucher <alexander.deucher@amd.com>
-Date: Wed, 17 Mar 2021 10:38:42 -0400
-Subject: [PATCH] platform/x86: force LPS0 functions for AMD
-
-ACPI_LPS0_ENTRY_AMD/ACPI_LPS0_EXIT_AMD are supposedly not
-required for AMD platforms, and on some platforms they are
-not even listed in the function mask but at least some HP
-laptops seem to require it to properly support s0ix.
-
-Based on a patch from Marcin Bachry <hegel666@gmail.com>.
-
-Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1230
-Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-Cc: Marcin Bachry <hegel666@gmail.com>
-Reviewed-by: Hans de Goede <hdegoede@redhat.com>
----
- drivers/acpi/x86/s2idle.c | 7 +++++++
- 1 file changed, 7 insertions(+)
-
-diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
-index 2d7ddb8a8cb6..482e6b23b21a 100644
---- a/drivers/acpi/x86/s2idle.c
-+++ b/drivers/acpi/x86/s2idle.c
-@@ -368,6 +368,13 @@ static int lps0_device_attach(struct acpi_device *adev,
-
- ACPI_FREE(out_obj);
-
-+ /*
-+ * Some HP laptops require ACPI_LPS0_ENTRY_AMD/ACPI_LPS0_EXIT_AMD for proper
-+ * S0ix, but don't set the function mask correctly. Fix that up here.
-+ */
-+ if (acpi_s2idle_vendor_amd())
-+ lps0_dsm_func_mask |= (1 << ACPI_LPS0_ENTRY_AMD) | (1 << ACPI_LPS0_EXIT_AMD);
-+
- acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
- lps0_dsm_func_mask);
-
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0011-USB-pci-quirks-disable-D3cold-on-s2idle-Renoire.patch b/sys-kernel_arch-sources-g14_files-0011-USB-pci-quirks-disable-D3cold-on-s2idle-Renoire.patch
deleted file mode 100644
index 5646b10014a8..000000000000
--- a/sys-kernel_arch-sources-g14_files-0011-USB-pci-quirks-disable-D3cold-on-s2idle-Renoire.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-From 0274e6962acf3a732390e9dc4bbccda8a467347c Mon Sep 17 00:00:00 2001
-From: Mario Limonciello <mario.limonciello@amd.com>
-Date: Wed, 5 May 2021 01:16:06 -0500
-Subject: [PATCH] usb: pci-quirks: disable D3cold on xhci suspend for s2idle on
- AMD Renoire
-
-The XHCI controller is required to enter D3hot rather than D3cold for AMD
-s2idle on this hardware generation.
-
-Otherwise, the 'Controller Not Ready' (CNR) bit is not being cleared by host
-in resume and eventually this results in xhci resume failures during the
-s2idle wakeup.
-
-Suggested-by: Prike Liang <Prike.Liang@amd.com>
-Link: https://lore.kernel.org/linux-usb/1612527609-7053-1-git-send-email-Prike.Liang@amd.com/
-Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
----
- drivers/usb/host/xhci-pci.c | 7 ++++++-
- drivers/usb/host/xhci.h | 1 +
- 2 files changed, 7 insertions(+), 1 deletion(-)
-
-diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
-index 7bc18cf8042c..5142d74085b5 100644
---- a/drivers/usb/host/xhci-pci.c
-+++ b/drivers/usb/host/xhci-pci.c
-@@ -59,6 +59,7 @@
- #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
- #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
-
-+#define PCI_DEVICE_ID_AMD_RENOIRE_XHCI 0x1639
- #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
- #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
- #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
-@@ -182,6 +183,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
- (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
- xhci->quirks |= XHCI_U2_DISABLE_WAKE;
-
-+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
-+ pdev->device == PCI_DEVICE_ID_AMD_RENOIRE_XHCI)
-+ xhci->quirks |= XHCI_BROKEN_D3COLD;
-+
- if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
- xhci->quirks |= XHCI_LPM_SUPPORT;
- xhci->quirks |= XHCI_INTEL_HOST;
-@@ -539,7 +544,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
- * Systems with the TI redriver that loses port status change events
- * need to have the registers polled during D3, so avoid D3cold.
- */
-- if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
-+ if (xhci->quirks & (XHCI_COMP_MODE_QUIRK | XHCI_BROKEN_D3COLD))
- pci_d3cold_disable(pdev);
-
- if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
-diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
-index ca822ad3b65b..4e171099d2cb 100644
---- a/drivers/usb/host/xhci.h
-+++ b/drivers/usb/host/xhci.h
-@@ -1892,6 +1892,7 @@ struct xhci_hcd {
- #define XHCI_DISABLE_SPARSE BIT_ULL(38)
- #define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39)
- #define XHCI_NO_SOFT_RETRY BIT_ULL(40)
-+#define XHCI_BROKEN_D3COLD BIT_ULL(41)
-
- unsigned int num_active_eps;
- unsigned int limit_active_eps;
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0014-acpi_unused-v2.patch b/sys-kernel_arch-sources-g14_files-0014-acpi_unused-v2.patch
deleted file mode 100644
index c2ade498368f..000000000000
--- a/sys-kernel_arch-sources-g14_files-0014-acpi_unused-v2.patch
+++ /dev/null
@@ -1,209 +0,0 @@
-This is a backported version of the following patch from 5.13-rc4
-
-From 9b7ff25d129df7c4f61e08382993e1988d56f6a7 Mon Sep 17 00:00:00 2001
-From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
-Date: Fri, 21 May 2021 15:13:11 +0200
-Subject: ACPI: power: Refine turning off unused power resources
-
-Commit 7e4fdeafa61f ("ACPI: power: Turn off unused power resources
-unconditionally") dropped the power resource state check from
-acpi_turn_off_unused_power_resources(), because according to the
-ACPI specification (e.g. ACPI 6.4, Section 7.2.2) the OS "may run
-the _OFF method repeatedly, even if the resource is already off".
-
-However, it turns out that some systems do not follow the
-specification in this particular respect and that commit introduced
-boot issues on them, so refine acpi_turn_off_unused_power_resources()
-to only turn off power resources without any users after device
-enumeration and restore its previous behavior in the system-wide
-resume path.
-
-Fixes: 7e4fdeafa61f ("ACPI: power: Turn off unused power resources unconditionally")
-Link: https://uefi.org/specs/ACPI/6.4/07_Power_and_Performance_Mgmt/declaring-a-power-resource-object.html#off
-BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=213019
-Reported-by: Zhang Rui <rui.zhang@intel.com>
-Tested-by: Zhang Rui <rui.zhang@intel.com>
-Reported-by: Dave Olsthoorn <dave@bewaar.me>
-Tested-by: Dave Olsthoorn <dave@bewaar.me>
-Reported-by: Shujun Wang <wsj20369@163.com>
-Tested-by: Shujun Wang <wsj20369@163.com>
-Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
----
- drivers/acpi/internal.h | 4 ++--
- drivers/acpi/power.c | 59 ++++++++++++++++++++++++++++++++++++++-----------
- drivers/acpi/scan.c | 2 +-
- drivers/acpi/sleep.c | 2 +-
- 4 files changed, 50 insertions(+), 17 deletions(-)
-
-
-
---- linux-5.12/drivers/acpi/internal.h.orig 2021-06-11 13:58:34.918396821 -0700
-+++ linux-5.12/drivers/acpi/internal.h 2021-06-11 13:59:36.571792727 -0700
-@@ -135,7 +135,7 @@
- void acpi_power_resources_list_free(struct list_head *list);
- int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
- struct list_head *list);
--int acpi_add_power_resource(acpi_handle handle);
-+struct acpi_device *acpi_add_power_resource(acpi_handle handle);
- void acpi_power_add_remove_device(struct acpi_device *adev, bool add);
- int acpi_power_wakeup_list_init(struct list_head *list, int *system_level);
- int acpi_device_sleep_wake(struct acpi_device *dev,
-@@ -143,7 +143,7 @@
- int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
- int acpi_power_on_resources(struct acpi_device *device, int state);
- int acpi_power_transition(struct acpi_device *device, int state);
--void acpi_turn_off_unused_power_resources(void);
-+void acpi_turn_off_unused_power_resources(bool init);
-
- /* --------------------------------------------------------------------------
- Device Power Management
---- linux-5.12/drivers/acpi/power.c.orig 2021-06-11 13:58:34.918396821 -0700
-+++ linux-5.12/drivers/acpi/power.c 2021-06-11 14:00:09.121484875 -0700
-@@ -52,6 +52,7 @@
- u32 system_level;
- u32 order;
- unsigned int ref_count;
-+ unsigned int users;
- bool wakeup_enabled;
- struct mutex resource_lock;
- struct list_head dependents;
-@@ -147,6 +148,7 @@
-
- for (i = start; i < package->package.count; i++) {
- union acpi_object *element = &package->package.elements[i];
-+ struct acpi_device *rdev;
- acpi_handle rhandle;
-
- if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
-@@ -163,13 +165,16 @@
- if (acpi_power_resource_is_dup(package, start, i))
- continue;
-
-- err = acpi_add_power_resource(rhandle);
-- if (err)
-+ rdev = acpi_add_power_resource(rhandle);
-+ if (!rdev) {
-+ err = -ENODEV;
- break;
--
-+ }
- err = acpi_power_resources_list_add(rhandle, list);
- if (err)
- break;
-+
-+ to_power_resource(rdev)->users++;
- }
- if (err)
- acpi_power_resources_list_free(list);
-@@ -907,7 +912,7 @@
- mutex_unlock(&power_resource_list_lock);
- }
-
--int acpi_add_power_resource(acpi_handle handle)
-+struct acpi_device *acpi_add_power_resource(acpi_handle handle)
- {
- struct acpi_power_resource *resource;
- struct acpi_device *device = NULL;
-@@ -918,11 +923,11 @@
-
- acpi_bus_get_device(handle, &device);
- if (device)
-- return 0;
-+ return device;
-
- resource = kzalloc(sizeof(*resource), GFP_KERNEL);
- if (!resource)
-- return -ENOMEM;
-+ return NULL;
-
- device = &resource->device;
- acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
-@@ -960,11 +965,11 @@
-
- acpi_power_add_resource_to_list(resource);
- acpi_device_add_finalize(device);
-- return 0;
-+ return device;
-
- err:
- acpi_release_power_resource(&device->dev);
-- return result;
-+ return NULL;
- }
-
- #ifdef CONFIG_ACPI_SLEEP
-@@ -998,7 +1003,38 @@
- }
- #endif
-
--void acpi_turn_off_unused_power_resources(void)
-+static void acpi_power_turn_off_if_unused(struct acpi_power_resource *resource,
-+ bool init)
-+{
-+ if (resource->ref_count > 0)
-+ return;
-+
-+ if (init) {
-+ if (resource->users > 0)
-+ return;
-+ } else {
-+ int result, state;
-+
-+ result = acpi_power_get_state(resource->device.handle, &state);
-+ if (result || state == ACPI_POWER_RESOURCE_STATE_OFF)
-+ return;
-+ }
-+
-+ dev_info(&resource->device.dev, "Turning OFF\n");
-+ __acpi_power_off(resource);
-+}
-+
-+/**
-+ * acpi_turn_off_unused_power_resources - Turn off power resources not in use.
-+ * @init: Control switch.
-+ *
-+ * If @ainit is set, unconditionally turn off all of the ACPI power resources
-+ * without any users.
-+ *
-+ * Otherwise, turn off all ACPI power resources without active references (that
-+ * is, the ones that should be "off" at the moment) that are "on".
-+ */
-+void acpi_turn_off_unused_power_resources(bool init)
- {
- struct acpi_power_resource *resource;
-
-@@ -1015,11 +1051,7 @@
- continue;
- }
-
-- if (state == ACPI_POWER_RESOURCE_STATE_ON
-- && !resource->ref_count) {
-- dev_info(&resource->device.dev, "Turning OFF\n");
-- __acpi_power_off(resource);
-- }
-+ acpi_power_turn_off_if_unused(resource, init);
-
- mutex_unlock(&resource->resource_lock);
- }
---- linux-5.12/drivers/acpi/scan.c.orig 2021-06-11 13:58:34.918396821 -0700
-+++ linux-5.12/drivers/acpi/scan.c 2021-06-11 13:59:36.573792709 -0700
-@@ -2394,7 +2394,7 @@
- }
- }
-
-- acpi_turn_off_unused_power_resources();
-+ acpi_turn_off_unused_power_resources(true);
-
- acpi_scan_initialized = true;
-
---- linux-5.12/drivers/acpi/sleep.c 2021-06-11 14:05:25.798799399 -0700
-+++ linux-5.12/drivers/acpi/sleep.c.new 2021-06-11 14:04:41.767145304 -0700
-@@ -504,7 +504,7 @@
- */
- static void acpi_pm_end(void)
- {
-- acpi_turn_off_unused_power_resources();
-+ acpi_turn_off_unused_power_resources(false);
- acpi_scan_lock_release();
- /*
- * This is necessary in case acpi_pm_finish() is not called during a \ No newline at end of file
diff --git a/sys-kernel_arch-sources-g14_files-0015-revert-4cbbe34807938e6e494e535a68d5ff64edac3f20.patch b/sys-kernel_arch-sources-g14_files-0015-revert-4cbbe34807938e6e494e535a68d5ff64edac3f20.patch
deleted file mode 100644
index 2dd5dd8a056b..000000000000
--- a/sys-kernel_arch-sources-g14_files-0015-revert-4cbbe34807938e6e494e535a68d5ff64edac3f20.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From ee5468b9f1d3bf48082eed351dace14598e8ca39 Mon Sep 17 00:00:00 2001
-From: Yifan Zhang <yifan1.zhang@amd.com>
-Date: Sat, 19 Jun 2021 11:40:54 +0800
-Subject: [PATCH] Revert "drm/amdgpu/gfx9: fix the doorbell missing when in
- CGPG issue."
-
-This reverts commit 4cbbe34807938e6e494e535a68d5ff64edac3f20.
-
-Reason for revert: side effect of enlarging CP_MEC_DOORBELL_RANGE may
-cause some APUs fail to enter gfxoff in certain user cases.
-
-Signed-off-by: Yifan Zhang <yifan1.zhang@amd.com>
-Acked-by: Alex Deucher <alexander.deucher@amd.com>
-Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-Cc: stable@vger.kernel.org
----
- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 6 +-----
- 1 file changed, 1 insertion(+), 5 deletions(-)
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
-index c09225d065c2..516467e962b7 100644
---- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
-@@ -3673,12 +3673,8 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
- if (ring->use_doorbell) {
- WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
- (adev->doorbell_index.kiq * 2) << 2);
-- /* If GC has entered CGPG, ringing doorbell > first page doesn't
-- * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
-- * this issue.
-- */
- WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
-- (adev->doorbell.size - 4));
-+ (adev->doorbell_index.userqueue_end * 2) << 2);
- }
-
- WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0016-revert-1c0b0efd148d5b24c4932ddb3fa03c8edd6097b3.patch b/sys-kernel_arch-sources-g14_files-0016-revert-1c0b0efd148d5b24c4932ddb3fa03c8edd6097b3.patch
deleted file mode 100644
index af3eaf22a1b6..000000000000
--- a/sys-kernel_arch-sources-g14_files-0016-revert-1c0b0efd148d5b24c4932ddb3fa03c8edd6097b3.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From baacf52a473b24e10322b67757ddb92ab8d86717 Mon Sep 17 00:00:00 2001
-From: Yifan Zhang <yifan1.zhang@amd.com>
-Date: Sat, 19 Jun 2021 11:39:43 +0800
-Subject: [PATCH] Revert "drm/amdgpu/gfx10: enlarge CP_MEC_DOORBELL_RANGE_UPPER
- to cover full doorbell."
-
-This reverts commit 1c0b0efd148d5b24c4932ddb3fa03c8edd6097b3.
-
-Reason for revert: Side effect of enlarging CP_MEC_DOORBELL_RANGE may
-cause some APUs fail to enter gfxoff in certain user cases.
-
-Signed-off-by: Yifan Zhang <yifan1.zhang@amd.com>
-Acked-by: Alex Deucher <alexander.deucher@amd.com>
-Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-Cc: stable@vger.kernel.org
----
- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 6 +-----
- 1 file changed, 1 insertion(+), 5 deletions(-)
-
-diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
-index 327b1f8213a8..0597aeb5f0e8 100644
---- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
-@@ -6871,12 +6871,8 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
- if (ring->use_doorbell) {
- WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
- (adev->doorbell_index.kiq * 2) << 2);
-- /* If GC has entered CGPG, ringing doorbell > first page doesn't
-- * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
-- * this issue.
-- */
- WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
-- (adev->doorbell.size - 4));
-+ (adev->doorbell_index.userqueue_end * 2) << 2);
- }
-
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0017-5.14-ACPI-processor-idle-Fix-up-C-state-latency-if-not-ordered.patch b/sys-kernel_arch-sources-g14_files-0017-5.14-ACPI-processor-idle-Fix-up-C-state-latency-if-not-ordered.patch
deleted file mode 100644
index cde53bfc9175..000000000000
--- a/sys-kernel_arch-sources-g14_files-0017-5.14-ACPI-processor-idle-Fix-up-C-state-latency-if-not-ordered.patch
+++ /dev/null
@@ -1,108 +0,0 @@
-From a3c5cb660f9b75c581ed0b0616709e89a77f098a Mon Sep 17 00:00:00 2001
-From: Mario Limonciello <mario.limonciello@amd.com>
-Date: Wed, 12 May 2021 17:15:14 -0500
-Subject: [PATCH] ACPI: processor idle: Fix up C-state latency if not ordered
-
-Generally, the C-state latency is provided by the _CST method or
-FADT, but some OEM platforms using AMD Picasso, Renoir, Van Gogh,
-and Cezanne set the C2 latency greater than C3's which causes the
-C2 state to be skipped.
-
-That will block the core entering PC6, which prevents S0ix working
-properly on Linux systems.
-
-In other operating systems, the latency values are not validated and
-this does not cause problems by skipping states.
-
-To avoid this issue on Linux, detect when latencies are not an
-arithmetic progression and sort them.
-
-Link: https://gitlab.freedesktop.org/agd5f/linux/-/commit/026d186e4592c1ee9c1cb44295912d0294508725
-Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1230#note_712174
-Suggested-by: Prike Liang <Prike.Liang@amd.com>
-Suggested-by: Alex Deucher <alexander.deucher@amd.com>
-Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
-[ rjw: Subject and changelog edits ]
-Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
----
- drivers/acpi/processor_idle.c | 40 +++++++++++++++++++++++++++++++++++
- 1 file changed, 40 insertions(+)
-
-diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
-index 4e2d76b8b697..6790df5a2462 100644
---- a/drivers/acpi/processor_idle.c
-+++ b/drivers/acpi/processor_idle.c
-@@ -16,6 +16,7 @@
- #include <linux/acpi.h>
- #include <linux/dmi.h>
- #include <linux/sched.h> /* need_resched() */
-+#include <linux/sort.h>
- #include <linux/tick.h>
- #include <linux/cpuidle.h>
- #include <linux/cpu.h>
-@@ -388,10 +389,37 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
- return;
- }
-
-+static int acpi_cst_latency_cmp(const void *a, const void *b)
-+{
-+ const struct acpi_processor_cx *x = a, *y = b;
-+
-+ if (!(x->valid && y->valid))
-+ return 0;
-+ if (x->latency > y->latency)
-+ return 1;
-+ if (x->latency < y->latency)
-+ return -1;
-+ return 0;
-+}
-+static void acpi_cst_latency_swap(void *a, void *b, int n)
-+{
-+ struct acpi_processor_cx *x = a, *y = b;
-+ u32 tmp;
-+
-+ if (!(x->valid && y->valid))
-+ return;
-+ tmp = x->latency;
-+ x->latency = y->latency;
-+ y->latency = tmp;
-+}
-+
- static int acpi_processor_power_verify(struct acpi_processor *pr)
- {
- unsigned int i;
- unsigned int working = 0;
-+ unsigned int last_latency = 0;
-+ unsigned int last_type = 0;
-+ bool buggy_latency = false;
-
- pr->power.timer_broadcast_on_state = INT_MAX;
-
-@@ -415,12 +443,24 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
- }
- if (!cx->valid)
- continue;
-+ if (cx->type >= last_type && cx->latency < last_latency)
-+ buggy_latency = true;
-+ last_latency = cx->latency;
-+ last_type = cx->type;
-
- lapic_timer_check_state(i, pr, cx);
- tsc_check_state(cx->type);
- working++;
- }
-
-+ if (buggy_latency) {
-+ pr_notice("FW issue: working around C-state latencies out of order\n");
-+ sort(&pr->power.states[1], max_cstate,
-+ sizeof(struct acpi_processor_cx),
-+ acpi_cst_latency_cmp,
-+ acpi_cst_latency_swap);
-+ }
-+
- lapic_timer_propagate_broadcast(pr);
-
- return (working);
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0019-5.14-nvme-pci-look-for-StorageD3Enable-on-companion-ACPI-device.patch b/sys-kernel_arch-sources-g14_files-0019-5.14-nvme-pci-look-for-StorageD3Enable-on-companion-ACPI-device.patch
deleted file mode 100644
index 18f4baf9eb2c..000000000000
--- a/sys-kernel_arch-sources-g14_files-0019-5.14-nvme-pci-look-for-StorageD3Enable-on-companion-ACPI-device.patch
+++ /dev/null
@@ -1,74 +0,0 @@
-From 137bd7494abeceab04bf62eeec4647db5048353a Mon Sep 17 00:00:00 2001
-From: Mario Limonciello <mario.limonciello@amd.com>
-Date: Fri, 28 May 2021 11:02:34 -0500
-Subject: [PATCH] nvme-pci: look for StorageD3Enable on companion ACPI device
- instead
-
-The documentation around the StorageD3Enable property hints that it
-should be made on the PCI device. This is where newer AMD systems set
-the property and it's required for S0i3 support.
-
-So rather than look for nodes of the root port only present on Intel
-systems, switch to the companion ACPI device for all systems.
-David Box from Intel indicated this should work on Intel as well.
-
-Link: https://lore.kernel.org/linux-nvme/YK6gmAWqaRmvpJXb@google.com/T/#m900552229fa455867ee29c33b854845fce80ba70
-Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro
-Fixes: df4f9bc4fb9c ("nvme-pci: add support for ACPI StorageD3Enable property")
-Suggested-by: Liang Prike <Prike.Liang@amd.com>
-Acked-by: Raul E Rangel <rrangel@chromium.org>
-Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
-Reviewed-by: David E. Box <david.e.box@linux.intel.com>
-Signed-off-by: Christoph Hellwig <hch@lst.de>
----
- drivers/nvme/host/pci.c | 24 +-----------------------
- 1 file changed, 1 insertion(+), 23 deletions(-)
-
-diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
-index c92a15c3fbc5..60c1c83e03fa 100644
---- a/drivers/nvme/host/pci.c
-+++ b/drivers/nvme/host/pci.c
-@@ -2834,10 +2834,7 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
- #ifdef CONFIG_ACPI
- static bool nvme_acpi_storage_d3(struct pci_dev *dev)
- {
-- struct acpi_device *adev;
-- struct pci_dev *root;
-- acpi_handle handle;
-- acpi_status status;
-+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
- u8 val;
-
- /*
-@@ -2845,28 +2842,9 @@ static bool nvme_acpi_storage_d3(struct pci_dev *dev)
- * must use D3 to support deep platform power savings during
- * suspend-to-idle.
- */
-- root = pcie_find_root_port(dev);
-- if (!root)
-- return false;
-
-- adev = ACPI_COMPANION(&root->dev);
- if (!adev)
- return false;
--
-- /*
-- * The property is defined in the PXSX device for South complex ports
-- * and in the PEGP device for North complex ports.
-- */
-- status = acpi_get_handle(adev->handle, "PXSX", &handle);
-- if (ACPI_FAILURE(status)) {
-- status = acpi_get_handle(adev->handle, "PEGP", &handle);
-- if (ACPI_FAILURE(status))
-- return false;
-- }
--
-- if (acpi_bus_get_device(handle, &adev))
-- return false;
--
- if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
- &val))
- return false;
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0020-5.14-ACPI-Check-StorageD3Enable_DSD-property-in-AHCI-mode.patch b/sys-kernel_arch-sources-g14_files-0020-5.14-ACPI-Check-StorageD3Enable_DSD-property-in-AHCI-mode.patch
deleted file mode 100644
index 1526dad95c09..000000000000
--- a/sys-kernel_arch-sources-g14_files-0020-5.14-ACPI-Check-StorageD3Enable_DSD-property-in-AHCI-mode.patch
+++ /dev/null
@@ -1,136 +0,0 @@
-From 183b978ab6434645aca18091e7c1458bad7590dc Mon Sep 17 00:00:00 2001
-From: Mario Limonciello <mario.limonciello@amd.com>
-Date: Wed, 9 Jun 2021 13:40:17 -0500
-Subject: [PATCH] ACPI: Check StorageD3Enable _DSD property in ACPI code
-
-Although first implemented for NVME, this check may be usable by
-other drivers as well. Microsoft's specification explicitly mentions
-that is may be usable by SATA and AHCI devices. Google also indicates
-that they have used this with SDHCI in a downstream kernel tree that
-a user can plug a storage device into.
-
-Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro
-Suggested-by: Keith Busch <kbusch@kernel.org>
-CC: Shyam-sundar S-k <Shyam-sundar.S-k@amd.com>
-CC: Alexander Deucher <Alexander.Deucher@amd.com>
-CC: Rafael J. Wysocki <rjw@rjwysocki.net>
-CC: Prike Liang <prike.liang@amd.com>
-Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
-Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-Signed-off-by: Christoph Hellwig <hch@lst.de>
----
- drivers/acpi/device_pm.c | 29 +++++++++++++++++++++++++++++
- drivers/nvme/host/pci.c | 28 +---------------------------
- include/linux/acpi.h | 5 +++++
- 3 files changed, 35 insertions(+), 27 deletions(-)
-
-diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
-index 58876248b192..1e278785c7db 100644
---- a/drivers/acpi/device_pm.c
-+++ b/drivers/acpi/device_pm.c
-@@ -1337,4 +1337,33 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
- return 1;
- }
- EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
-+
-+/**
-+ * acpi_storage_d3 - Check if D3 should be used in the suspend path
-+ * @dev: Device to check
-+ *
-+ * Return %true if the platform firmware wants @dev to be programmed
-+ * into D3hot or D3cold (if supported) in the suspend path, or %false
-+ * when there is no specific preference. On some platforms, if this
-+ * hint is ignored, @dev may remain unresponsive after suspending the
-+ * platform as a whole.
-+ *
-+ * Although the property has storage in the name it actually is
-+ * applied to the PCIe slot and plugging in a non-storage device the
-+ * same platform restrictions will likely apply.
-+ */
-+bool acpi_storage_d3(struct device *dev)
-+{
-+ struct acpi_device *adev = ACPI_COMPANION(dev);
-+ u8 val;
-+
-+ if (!adev)
-+ return false;
-+ if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
-+ &val))
-+ return false;
-+ return val == 1;
-+}
-+EXPORT_SYMBOL_GPL(acpi_storage_d3);
-+
- #endif /* CONFIG_PM */
-diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
-index 60c1c83e03fa..8593161d4da0 100644
---- a/drivers/nvme/host/pci.c
-+++ b/drivers/nvme/host/pci.c
-@@ -2831,32 +2831,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
- return 0;
- }
-
--#ifdef CONFIG_ACPI
--static bool nvme_acpi_storage_d3(struct pci_dev *dev)
--{
-- struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
-- u8 val;
--
-- /*
-- * Look for _DSD property specifying that the storage device on the port
-- * must use D3 to support deep platform power savings during
-- * suspend-to-idle.
-- */
--
-- if (!adev)
-- return false;
-- if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
-- &val))
-- return false;
-- return val == 1;
--}
--#else
--static inline bool nvme_acpi_storage_d3(struct pci_dev *dev)
--{
-- return false;
--}
--#endif /* CONFIG_ACPI */
--
- static void nvme_async_probe(void *data, async_cookie_t cookie)
- {
- struct nvme_dev *dev = data;
-@@ -2906,7 +2880,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-
- quirks |= check_vendor_combination_bug(pdev);
-
-- if (!noacpi && nvme_acpi_storage_d3(pdev)) {
-+ if (!noacpi && acpi_storage_d3(&pdev->dev)) {
- /*
- * Some systems use a bios work around to ask for D3 on
- * platforms that support kernel managed suspend.
-diff --git a/include/linux/acpi.h b/include/linux/acpi.h
-index 3bdcfc4401b7..4dfe15c2933d 100644
---- a/include/linux/acpi.h
-+++ b/include/linux/acpi.h
-@@ -999,6 +999,7 @@ int acpi_dev_resume(struct device *dev);
- int acpi_subsys_runtime_suspend(struct device *dev);
- int acpi_subsys_runtime_resume(struct device *dev);
- int acpi_dev_pm_attach(struct device *dev, bool power_on);
-+bool acpi_storage_d3(struct device *dev);
- #else
- static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
- static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
-@@ -1006,6 +1007,10 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
- {
- return 0;
- }
-+static inline bool acpi_storage_d3(struct device *dev)
-+{
-+ return false;
-+}
- #endif
-
- #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0021-5.14-ACPI-Add-quirks-for-AMD-Renoir+Lucienne-CPUs-to-force-the-D3-hint.patch b/sys-kernel_arch-sources-g14_files-0021-5.14-ACPI-Add-quirks-for-AMD-Renoir+Lucienne-CPUs-to-force-the-D3-hint.patch
deleted file mode 100644
index 14ef4d23ad19..000000000000
--- a/sys-kernel_arch-sources-g14_files-0021-5.14-ACPI-Add-quirks-for-AMD-Renoir+Lucienne-CPUs-to-force-the-D3-hint.patch
+++ /dev/null
@@ -1,122 +0,0 @@
-From d406ff9472c867a9777d1ae8ba7cb7a66a9a0b51 Mon Sep 17 00:00:00 2001
-From: Mario Limonciello <mario.limonciello@amd.com>
-Date: Wed, 9 Jun 2021 13:40:18 -0500
-Subject: [PATCH] ACPI: Add quirks for AMD Renoir/Lucienne CPUs to force the D3
- hint
-
-AMD systems from Renoir and Lucienne require that the NVME controller
-is put into D3 over a Modern Standby / suspend-to-idle
-cycle. This is "typically" accomplished using the `StorageD3Enable`
-property in the _DSD, but this property was introduced after many
-of these systems launched and most OEM systems don't have it in
-their BIOS.
-
-On AMD Renoir without these drives going into D3 over suspend-to-idle
-the resume will fail with the NVME controller being reset and a trace
-like this in the kernel logs:
-```
-[ 83.556118] nvme nvme0: I/O 161 QID 2 timeout, aborting
-[ 83.556178] nvme nvme0: I/O 162 QID 2 timeout, aborting
-[ 83.556187] nvme nvme0: I/O 163 QID 2 timeout, aborting
-[ 83.556196] nvme nvme0: I/O 164 QID 2 timeout, aborting
-[ 95.332114] nvme nvme0: I/O 25 QID 0 timeout, reset controller
-[ 95.332843] nvme nvme0: Abort status: 0x371
-[ 95.332852] nvme nvme0: Abort status: 0x371
-[ 95.332856] nvme nvme0: Abort status: 0x371
-[ 95.332859] nvme nvme0: Abort status: 0x371
-[ 95.332909] PM: dpm_run_callback(): pci_pm_resume+0x0/0xe0 returns -16
-[ 95.332936] nvme 0000:03:00.0: PM: failed to resume async: error -16
-```
-
-The Microsoft documentation for StorageD3Enable mentioned that Windows has
-a hardcoded allowlist for D3 support, which was used for these platforms.
-Introduce quirks to hardcode them for Linux as well.
-
-As this property is now "standardized", OEM systems using AMD Cezanne and
-newer APU's have adopted this property, and quirks like this should not be
-necessary.
-
-CC: Shyam-sundar S-k <Shyam-sundar.S-k@amd.com>
-CC: Alexander Deucher <Alexander.Deucher@amd.com>
-CC: Prike Liang <prike.liang@amd.com>
-Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro
-Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
-Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-Tested-by: Julian Sikorski <belegdol@gmail.com>
-Signed-off-by: Christoph Hellwig <hch@lst.de>
----
- drivers/acpi/device_pm.c | 3 +++
- drivers/acpi/internal.h | 9 +++++++++
- drivers/acpi/x86/utils.c | 25 +++++++++++++++++++++++++
- 3 files changed, 37 insertions(+)
-
-diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
-index 1e278785c7db..28f629a3d95c 100644
---- a/drivers/acpi/device_pm.c
-+++ b/drivers/acpi/device_pm.c
-@@ -1357,6 +1357,9 @@ bool acpi_storage_d3(struct device *dev)
- struct acpi_device *adev = ACPI_COMPANION(dev);
- u8 val;
-
-+ if (force_storage_d3())
-+ return true;
-+
- if (!adev)
- return false;
- if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
-diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
-index cb8f70842249..96471be3f0c8 100644
---- a/drivers/acpi/internal.h
-+++ b/drivers/acpi/internal.h
-@@ -236,6 +236,15 @@ static inline int suspend_nvs_save(void) { return 0; }
- static inline void suspend_nvs_restore(void) {}
- #endif
-
-+#ifdef CONFIG_X86
-+bool force_storage_d3(void);
-+#else
-+static inline bool force_storage_d3(void)
-+{
-+ return false;
-+}
-+#endif
-+
- /*--------------------------------------------------------------------------
- Device properties
- -------------------------------------------------------------------------- */
-diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
-index bdc1ba00aee9..f22f23933063 100644
---- a/drivers/acpi/x86/utils.c
-+++ b/drivers/acpi/x86/utils.c
-@@ -135,3 +135,28 @@ bool acpi_device_always_present(struct acpi_device *adev)
-
- return ret;
- }
-+
-+/*
-+ * AMD systems from Renoir and Lucienne *require* that the NVME controller
-+ * is put into D3 over a Modern Standby / suspend-to-idle cycle.
-+ *
-+ * This is "typically" accomplished using the `StorageD3Enable`
-+ * property in the _DSD that is checked via the `acpi_storage_d3` function
-+ * but this property was introduced after many of these systems launched
-+ * and most OEM systems don't have it in their BIOS.
-+ *
-+ * The Microsoft documentation for StorageD3Enable mentioned that Windows has
-+ * a hardcoded allowlist for D3 support, which was used for these platforms.
-+ *
-+ * This allows quirking on Linux in a similar fashion.
-+ */
-+static const struct x86_cpu_id storage_d3_cpu_ids[] = {
-+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
-+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
-+ {}
-+};
-+
-+bool force_storage_d3(void)
-+{
-+ return x86_match_cpu(storage_d3_cpu_ids);
-+}
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0022-5.14-ACPI-PM-s2idle-Add-missing-LPS0-functions-for-AMD.patch b/sys-kernel_arch-sources-g14_files-0022-5.14-ACPI-PM-s2idle-Add-missing-LPS0-functions-for-AMD.patch
deleted file mode 100644
index b4616cbe47ce..000000000000
--- a/sys-kernel_arch-sources-g14_files-0022-5.14-ACPI-PM-s2idle-Add-missing-LPS0-functions-for-AMD.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-From 0873305b912e5039e4d9bf2f81de3bc9cf9eae07 Mon Sep 17 00:00:00 2001
-From: Alex Deucher <alexander.deucher@amd.com>
-Date: Wed, 5 May 2021 09:20:32 -0400
-Subject: [PATCH] ACPI: PM: s2idle: Add missing LPS0 functions for AMD
-
-These are supposedly not required for AMD platforms,
-but at least some HP laptops seem to require it to
-properly turn off the keyboard backlight.
-
-Based on a patch from Marcin Bachry <hegel666@gmail.com>.
-
-Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1230
-Reviewed-by: Hans de Goede <hdegoede@redhat.com>
-Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
----
- drivers/acpi/x86/s2idle.c | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
-index 2b69536cdccb..2d7ddb8a8cb6 100644
---- a/drivers/acpi/x86/s2idle.c
-+++ b/drivers/acpi/x86/s2idle.c
-@@ -42,6 +42,8 @@ static const struct acpi_device_id lps0_device_ids[] = {
-
- /* AMD */
- #define ACPI_LPS0_DSM_UUID_AMD "e3f32452-febc-43ce-9039-932122d37721"
-+#define ACPI_LPS0_ENTRY_AMD 2
-+#define ACPI_LPS0_EXIT_AMD 3
- #define ACPI_LPS0_SCREEN_OFF_AMD 4
- #define ACPI_LPS0_SCREEN_ON_AMD 5
-
-@@ -408,6 +410,7 @@ int acpi_s2idle_prepare_late(void)
-
- if (acpi_s2idle_vendor_amd()) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD);
- } else {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
-@@ -422,6 +425,7 @@ void acpi_s2idle_restore_early(void)
- return;
-
- if (acpi_s2idle_vendor_amd()) {
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
- } else {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0023-5.14-1of5-ACPI-PM-s2idle-Use-correct-revision-id.patch b/sys-kernel_arch-sources-g14_files-0023-5.14-1of5-ACPI-PM-s2idle-Use-correct-revision-id.patch
deleted file mode 100644
index 907e56346d17..000000000000
--- a/sys-kernel_arch-sources-g14_files-0023-5.14-1of5-ACPI-PM-s2idle-Use-correct-revision-id.patch
+++ /dev/null
@@ -1,144 +0,0 @@
-From mboxrd@z Thu Jan 1 00:00:00 1970
-Return-Path: <linux-acpi-owner@kernel.org>
-X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on
- aws-us-west-2-korg-lkml-1.web.codeaurora.org
-X-Spam-Level:
-X-Spam-Status: No, score=-18.8 required=3.0 tests=BAYES_00,DKIM_SIGNED,
- DKIM_VALID,DKIM_VALID_AU,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,
- INCLUDES_PATCH,MAILING_LIST_MULTI,MSGID_FROM_MTA_HEADER,SPF_HELO_NONE,
- SPF_PASS,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0
-Received: from mail.kernel.org (mail.kernel.org [198.145.29.99])
- by smtp.lore.kernel.org (Postfix) with ESMTP id 43C40C2B9F4
- for <linux-acpi@archiver.kernel.org>; Thu, 17 Jun 2021 16:42:39 +0000 (UTC)
-Received: from vger.kernel.org (vger.kernel.org [23.128.96.18])
- by mail.kernel.org (Postfix) with ESMTP id 1662C613E2
- for <linux-acpi@archiver.kernel.org>; Thu, 17 Jun 2021 16:42:39 +0000 (UTC)
-Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
- id S230409AbhFQQoq (ORCPT <rfc822;linux-acpi@archiver.kernel.org>);
- Thu, 17 Jun 2021 12:44:46 -0400
-Received: from mail-co1nam11on2044.outbound.protection.outlook.com ([40.107.220.44]:6999
- "EHLO NAM11-CO1-obe.outbound.protection.outlook.com"
- rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP
- id S230381AbhFQQop (ORCPT <rfc822;linux-acpi@vger.kernel.org>);
- Thu, 17 Jun 2021 12:44:45 -0400
-ARC-Seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;
- b=msBlcOXkcduVWo/j0Miv7Vi25uSCIoG6apPq4pM+1zbhQnUzUt9QiswSBifVFi7Hw8BxKpuzpwCPz9NRJdtQSqZ6YeQglqi8xnyG8rVcBFY84ZyjfLag+aTlfQzN+eyT3atxATc0tHJ4/Mw5xMBbMmQOclcGwad55gwTH4Y2btVdB+pBVIdYBDYZMncr8fgb7sfi+y3Rjl4O6d5E6rwvlznUbTOTcop9uMKINlOOzrjOoMzcYH1aM3gIOjyNgqSaHRTOpFeOGIQ+4rrp0SulCj58+F7KL+a2f9TQEESQMzsFuQy8lZ2Rf1YprE8wviisdhu2VqmxRUt6a27dq9GWIw==
-ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;
- s=arcselector9901;
- h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;
- bh=bvg4ThrQP34zVE1cX68/K+94tcKqiOiqkrYWMGZYmy4=;
- b=hgGgH0iJvSjXduIva+IolCPKrk21vVWQYzXu9WP7e4Us91ZRpjxfmkNDijsvXCISMigIX67ZkxCYXf7RavvWLC98sr4Id9Rf46R6quU85Cv3fGUqbpsn1115CciP3VxAOmYUb++bpln9rtpuzL55tpFsu2JPg4FZ4+CXkG1lDLHsEMwCC2yG7QiSQzsuQWHBp6MKo+Z+zkT2zCdgyIaf+SBFrBy7fIOqX32iZJRHlNKO4Dr3vsJYtwbthqi6/IS2z1O1E+sTPXLLrQ1uP1GoVwNOx4prx2b3/CKcurtkMn1UPtmT9NXslJsPeO8SJUEgGCgiluJc/8DJZeVEK1HSyg==
-ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=pass
- smtp.mailfrom=amd.com; dmarc=pass action=none header.from=amd.com; dkim=pass
- header.d=amd.com; arc=none
-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=amd.com; s=selector1;
- h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;
- bh=bvg4ThrQP34zVE1cX68/K+94tcKqiOiqkrYWMGZYmy4=;
- b=4dT/xN1o0aCD2+q+MWZ9kKrlrbKOFN52ukD+Ct9SxlaeuWYxxQHMP4XyZT5vRVbgxVbyZlLO+oxwPCSNKfnls3UPG4bgnry9TfZDfC/qcz3E7V9UcX9TviiX/OhqtD0q92oz1PosXuk33PjNsAFC4bL7NBdrPsVNOCoUAqwEn/M=
-Authentication-Results: rjwysocki.net; dkim=none (message not signed)
- header.d=none;rjwysocki.net; dmarc=none action=none header.from=amd.com;
-Received: from SA0PR12MB4510.namprd12.prod.outlook.com (2603:10b6:806:94::8)
- by SN6PR12MB4671.namprd12.prod.outlook.com (2603:10b6:805:e::22) with
- Microsoft SMTP Server (version=TLS1_2,
- cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4242.21; Thu, 17 Jun
- 2021 16:42:35 +0000
-Received: from SA0PR12MB4510.namprd12.prod.outlook.com
- ([fe80::9c16:2794:cd04:2be0]) by SA0PR12MB4510.namprd12.prod.outlook.com
- ([fe80::9c16:2794:cd04:2be0%6]) with mapi id 15.20.4242.021; Thu, 17 Jun 2021
- 16:42:35 +0000
-From: Mario Limonciello <mario.limonciello@amd.com>
-To: "Rafael J . Wysocki" <rjw@rjwysocki.net>,
- Len Brown <lenb@kernel.org>, linux-acpi@vger.kernel.org
-Cc: Julian Sikorski <belegdol@gmail.com>, teohhanhui@gmail.com,
- Shyam-sundar.S-k@amd.com,
- Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
-Subject: [PATCH 1/5] ACPI: PM: s2idle: Use correct revision id
-Date: Thu, 17 Jun 2021 11:42:08 -0500
-Message-Id: <20210617164212.584-1-mario.limonciello@amd.com>
-X-Mailer: git-send-email 2.25.1
-Content-Transfer-Encoding: 8bit
-Content-Type: text/plain
-X-Originating-IP: [76.251.167.31]
-X-ClientProxiedBy: SN6PR05CA0006.namprd05.prod.outlook.com
- (2603:10b6:805:de::19) To SA0PR12MB4510.namprd12.prod.outlook.com
- (2603:10b6:806:94::8)
-MIME-Version: 1.0
-X-MS-Exchange-MessageSentRepresentingType: 1
-Received: from AUS-LX-MLIMONCI.amd.com (76.251.167.31) by SN6PR05CA0006.namprd05.prod.outlook.com (2603:10b6:805:de::19) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4264.7 via Frontend Transport; Thu, 17 Jun 2021 16:42:35 +0000
-X-MS-PublicTrafficType: Email
-X-MS-Office365-Filtering-Correlation-Id: d250c38d-46a3-4e95-6ea9-08d931aee909
-X-MS-TrafficTypeDiagnostic: SN6PR12MB4671:
-X-MS-Exchange-Transport-Forked: True
-X-Microsoft-Antispam-PRVS: <SN6PR12MB4671B3E9698219D36FC6BA41E20E9@SN6PR12MB4671.namprd12.prod.outlook.com>
-X-MS-Oob-TLC-OOBClassifiers: OLM:190;
-X-MS-Exchange-SenderADCheck: 1
-X-Microsoft-Antispam: BCL:0;
-X-Microsoft-Antispam-Message-Info: YKQk+ajq/oYNhq3F/yvdOjsv5lDIThY81yNMFPqr0RT1HVM+31/PpBtsrzKsscwHbtScAv4tNqQ7fkjF2sfJPZb0pOXS4aW1GF0vc1gbZr2n0Dpm6VUGKN4+zS8nx+C9ELPZ4wrIrsLz8KKVzPa5/8tFdu9PAiaMbTVCznU3QqX5ZVR9oohL1QmCrnV3x2kVk6zYns1mpLZ5hbqfyLJ5mkeZFJVLTmv+rftuGadia0ceUHUS6COMXFIaxZeP2IKhtwkBt20ALCS+83cjpLfmBbN18tuz6FhAVkJkwCV4d/qqLserK1+thW74+wIh+AOiB7W9T9BuuOv6EHzFw1SpAiX5JPHI+pygzecIrFDhYwMtexzi5lZcKaDTI6oOuU1vQDcCvRd118NO7BGXXEKsK0F8t1ijlEjnwHmXoOUj5yTEL9naKQGTxKe1xe5RV9EzkJOSK0HRsUhlzEUnPVE0p9UZ50Zb/+ZoDd5zu3xGblX6HSFhAyJdoHX2Sl7mbReeAp338VzSQbVin09a1uxziIeYl+JjptvWKYJYV+5OomVamYSGhbpUWSh4vjhlbm9XOA0KuLX+0ognqbdgpzMGsLHIkNrf6aKS1A0Xlyb/dCHbZy8QDERIxpV52hsNzdnNksmZbpDYpzonZEJECsnM3A==
-X-Forefront-Antispam-Report: CIP:255.255.255.255;CTRY:;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:SA0PR12MB4510.namprd12.prod.outlook.com;PTR:;CAT:NONE;SFS:(4636009)(39860400002)(346002)(396003)(376002)(366004)(136003)(16526019)(66946007)(478600001)(26005)(66476007)(4744005)(6486002)(186003)(66556008)(316002)(8676002)(86362001)(44832011)(6666004)(110136005)(38350700002)(1076003)(38100700002)(4326008)(83380400001)(52116002)(2616005)(2906002)(5660300002)(7696005)(956004)(8936002)(54906003)(36756003);DIR:OUT;SFP:1101;
-X-MS-Exchange-AntiSpam-MessageData-ChunkCount: 1
-X-MS-Exchange-AntiSpam-MessageData-0: =?us-ascii?Q?eWXpBxo+XtroTeXIAVz3fhT4XoDy6YmbquJoMUKkQRR33zAgqw7ySTTu9qMJ?=
- =?us-ascii?Q?bUhc8L0odyRcHKgPRE/c7k+jCnsor9WvYSqah/4Jd+/W01EjncVX9xa8rs+z?=
- =?us-ascii?Q?N+7tK5LeO0+Un4xiLBoMfrdRDrBVB24hROwfxfEPG7Gun7fWNMKQaCQqYWa3?=
- =?us-ascii?Q?awNytpGWcwOpW9ZwcKY63rDSZUk/lUNvMw7OchsJTdmGP8mASxVfVZbKKCeY?=
- =?us-ascii?Q?oYVv/aigLJaK7ZjDa5Xh592DC3f/k3kLwcyDYaLdn4u/fwFhnUHtmBa+Tqdf?=
- =?us-ascii?Q?kA8FbjszpPNJ+IzTWzCQewC6TnmpmfvwxSwjJNzwpri66ZxRuLI9zthHbDz7?=
- =?us-ascii?Q?Y4NjuHWYm0YnsTpyXf6VGD9ptD0zazLm3KwdTp3N9jjTPvImPiZjVqGW/FYw?=
- =?us-ascii?Q?TVE3PNiGaoARprEiW+xjFW8MGa0TvEQGFptg4RVmVAvDPJOa5Zo5005VNTx5?=
- =?us-ascii?Q?2zwnaF+5Rtf5hE1Iq2E3O49LP+79ErFUWS18nvT70JHn2cdOuK6AvMNp79K0?=
- =?us-ascii?Q?tNHqfvIRdeGHhUx/CdjmtDvnEXl+6qKh93ifQ6yJPgpHIAt638v9//IYE7Ek?=
- =?us-ascii?Q?Ws0kQ0z0TMtQAMmEVJTWX45drbh3aSE3rsthjsBI3ZB5f58saEdOsFg8x/1q?=
- =?us-ascii?Q?bt62EHsg6sc7hL03/CyIs9nt6j/BSaj85JNkCZVgX5kV/Eo081MvFKt702/Q?=
- =?us-ascii?Q?tgNdz5jILnDvMZI9vRVvWPkOnuKZCp2kBseR9jSfKVdcZt/ae3GT3dHSdkSp?=
- =?us-ascii?Q?DXi3MeQ8RBamU6nynpXDV7FMVPseBmqVhbOyI6fu1O7vzBYsWjXrvvNloLLQ?=
- =?us-ascii?Q?ifVqJuLtIWiJQ3rb+YR0UQYfZvbi5/90ixQtKB1BH1n/m5Woqm45cALsTibL?=
- =?us-ascii?Q?VHvnrnLpFAkdk1fwysXarNt1JcZ4aP9DO35MlzQsIBkFRLlNDcaXkOZFUB8J?=
- =?us-ascii?Q?E7EbWNcP42jPfo4gp/rlvGDeFVgLXUFvXYjY9rUpYA9EjJ1xraGVouOuwHjv?=
- =?us-ascii?Q?gaQbk/Ad2fXqjSj8pQsuqlD1TRqMid9So7iGH1V4D8GER96/UPS9aCdqyFB/?=
- =?us-ascii?Q?BG70Mhs1EPEmb5zMEJ52jH8fr6kxCSupLPIh6BPow7fFvOt/gc3cFUKdoyne?=
- =?us-ascii?Q?SchOHVPWVvbtC/aLDpoT5jroBKXLRwTlEYtE0RLdyPnBSzDK0oWKfPY1SFL7?=
- =?us-ascii?Q?sRVVOVH3Flmrgo2c0lSImhElyhxoukUfncJ3ZFF5qOSuzzM8Yvppu7pNOAoB?=
- =?us-ascii?Q?TjpCtNh/5Nv7xGqdn5u+Qp+9KITo9iwoYQ85/K4wbDFpypYNLfkuRRz6Qhzm?=
- =?us-ascii?Q?8VK7NWXey950xrwP6Iiq5o3q?=
-X-OriginatorOrg: amd.com
-X-MS-Exchange-CrossTenant-Network-Message-Id: d250c38d-46a3-4e95-6ea9-08d931aee909
-X-MS-Exchange-CrossTenant-AuthSource: SA0PR12MB4510.namprd12.prod.outlook.com
-X-MS-Exchange-CrossTenant-AuthAs: Internal
-X-MS-Exchange-CrossTenant-OriginalArrivalTime: 17 Jun 2021 16:42:35.4361
- (UTC)
-X-MS-Exchange-CrossTenant-FromEntityHeader: Hosted
-X-MS-Exchange-CrossTenant-Id: 3dd8961f-e488-4e60-8e11-a82d994e183d
-X-MS-Exchange-CrossTenant-MailboxType: HOSTED
-X-MS-Exchange-CrossTenant-UserPrincipalName: MNIEu4WKxDUllZBT2tzfLemeE41SR/DcTBXPTYhBrRtsg9ZJFi8VQ8ALiCdHmo4o1SN7xBm8+AdfmmXmBr9waw==
-X-MS-Exchange-Transport-CrossTenantHeadersStamped: SN6PR12MB4671
-Precedence: bulk
-List-ID: <linux-acpi.vger.kernel.org>
-X-Mailing-List: linux-acpi@vger.kernel.org
-List-Archive: <https://lore.kernel.org/linux-acpi/>
-
-From: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
-
-AMD spec mentions only revision 0. With this change,
-device constraint list is populated properly.
-
-Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
----
- drivers/acpi/x86/s2idle.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
-index 2d7ddb8a8cb6..da27c1c45c9f 100644
---- a/drivers/acpi/x86/s2idle.c
-+++ b/drivers/acpi/x86/s2idle.c
-@@ -96,7 +96,7 @@ static void lpi_device_get_constraints_amd(void)
- int i, j, k;
-
- out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
-- 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
-+ rev_id, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
- NULL, ACPI_TYPE_PACKAGE);
-
- if (!out_obj)
---
-2.25.1
-
-
diff --git a/sys-kernel_arch-sources-g14_files-0024-5.14-2of5-ACPI-PM-s2idle-Refactor-common-code.patch b/sys-kernel_arch-sources-g14_files-0024-5.14-2of5-ACPI-PM-s2idle-Refactor-common-code.patch
deleted file mode 100644
index 3b10381a8b49..000000000000
--- a/sys-kernel_arch-sources-g14_files-0024-5.14-2of5-ACPI-PM-s2idle-Refactor-common-code.patch
+++ /dev/null
@@ -1,258 +0,0 @@
-From mboxrd@z Thu Jan 1 00:00:00 1970
-Return-Path: <linux-acpi-owner@kernel.org>
-X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on
- aws-us-west-2-korg-lkml-1.web.codeaurora.org
-X-Spam-Level:
-X-Spam-Status: No, score=-18.8 required=3.0 tests=BAYES_00,DKIM_SIGNED,
- DKIM_VALID,DKIM_VALID_AU,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,
- INCLUDES_PATCH,MAILING_LIST_MULTI,MSGID_FROM_MTA_HEADER,SPF_HELO_NONE,
- SPF_PASS,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0
-Received: from mail.kernel.org (mail.kernel.org [198.145.29.99])
- by smtp.lore.kernel.org (Postfix) with ESMTP id BC568C48BE5
- for <linux-acpi@archiver.kernel.org>; Thu, 17 Jun 2021 16:42:40 +0000 (UTC)
-Received: from vger.kernel.org (vger.kernel.org [23.128.96.18])
- by mail.kernel.org (Postfix) with ESMTP id 94916613E2
- for <linux-acpi@archiver.kernel.org>; Thu, 17 Jun 2021 16:42:40 +0000 (UTC)
-Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
- id S230381AbhFQQoq (ORCPT <rfc822;linux-acpi@archiver.kernel.org>);
- Thu, 17 Jun 2021 12:44:46 -0400
-Received: from mail-co1nam11on2044.outbound.protection.outlook.com ([40.107.220.44]:6999
- "EHLO NAM11-CO1-obe.outbound.protection.outlook.com"
- rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP
- id S230387AbhFQQop (ORCPT <rfc822;linux-acpi@vger.kernel.org>);
- Thu, 17 Jun 2021 12:44:45 -0400
-ARC-Seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;
- b=V6eXBGcszCWpoAH9tz4DELRomjgGmHAhZ7r+RbHTG44A6eAsLKUudbNWJ9jYGGlXNwEIKDHu/kIPxRguN1zOn2StNISwkef2STFZF4aIPcEwN8cWt6YPE3VoJyn/RUvtC4ZaFZcFpqCJEYHuhIlgaqRKiVIM6vJUXfJ4NiyvkAXLA/Y4AFq8H61RBPOBK1dTU0ITTNUSFjkUbQNAr+hxEZedhqe9vdVzq8P2c+jXGWmsRaHNX0PZS44hYmCBLeeDWkr4I8mpKuKLks+V/Qtk3+G5b3emRWkya6agMVOl5z+MbfLJA67V1z1AkbNWOw6qbZ7wrhGw6xwfC7juRUHdyw==
-ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;
- s=arcselector9901;
- h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;
- bh=Znl4jP9p6Dn7HKGtw+UAuzNdgwbq0RqWRqxn7vO/mX0=;
- b=JuMruFf+J8EsWrr/1MpNtbZ5ri5w+9NsCx2+XXsPT5nOdT88r/bFiVANKnh8fTAOPS9DpDhDS7mc/AEZxto3xwLxbND0YqKFlK96iBpSVX9smVcHGx/9iR6Z92/y2Ymstx+mS6l8TXPdHODu98kffLZq3HIusNg8QxoscjBoTvbbyX1F+kvFhUob7AZIIujFYhAWGRiBX1Doet299G9pQGavQomTPmojfu0M+SLye6Xzr1gmfVY/A5FXphuicsgjonongj4VmDxsoj8mLA5F1Xk2haMV60/eoWSBqdrHn06RfblBkF68OEdouTPJcb6bBJVDUCEMWjPhIB0LPktyiw==
-ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=pass
- smtp.mailfrom=amd.com; dmarc=pass action=none header.from=amd.com; dkim=pass
- header.d=amd.com; arc=none
-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=amd.com; s=selector1;
- h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;
- bh=Znl4jP9p6Dn7HKGtw+UAuzNdgwbq0RqWRqxn7vO/mX0=;
- b=q5hz7YVwWEWssjYP7+1Yhxr3WMyqlmJ8S4vvch5i1mF9sM5zoKjsWCSCIg07ROY7TsmcAlRx7/bG9AR11+ZaTKVvdbfqZV0YQEDmamghhkYDa0V2ty3a7RncTyec7bkMf7PHKk7usUML4HSjErzGU1Y2B1MYayi4ipBPYcMH5R0=
-Authentication-Results: rjwysocki.net; dkim=none (message not signed)
- header.d=none;rjwysocki.net; dmarc=none action=none header.from=amd.com;
-Received: from SA0PR12MB4510.namprd12.prod.outlook.com (2603:10b6:806:94::8)
- by SN6PR12MB4671.namprd12.prod.outlook.com (2603:10b6:805:e::22) with
- Microsoft SMTP Server (version=TLS1_2,
- cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4242.21; Thu, 17 Jun
- 2021 16:42:36 +0000
-Received: from SA0PR12MB4510.namprd12.prod.outlook.com
- ([fe80::9c16:2794:cd04:2be0]) by SA0PR12MB4510.namprd12.prod.outlook.com
- ([fe80::9c16:2794:cd04:2be0%6]) with mapi id 15.20.4242.021; Thu, 17 Jun 2021
- 16:42:36 +0000
-From: Mario Limonciello <mario.limonciello@amd.com>
-To: "Rafael J . Wysocki" <rjw@rjwysocki.net>,
- Len Brown <lenb@kernel.org>, linux-acpi@vger.kernel.org
-Cc: Julian Sikorski <belegdol@gmail.com>, teohhanhui@gmail.com,
- Shyam-sundar.S-k@amd.com,
- Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>,
- Mario Limonciello <mario.limonciello@amd.com>
-Subject: [PATCH 2/5] ACPI: PM: s2idle: Refactor common code
-Date: Thu, 17 Jun 2021 11:42:09 -0500
-Message-Id: <20210617164212.584-2-mario.limonciello@amd.com>
-X-Mailer: git-send-email 2.25.1
-In-Reply-To: <20210617164212.584-1-mario.limonciello@amd.com>
-References: <20210617164212.584-1-mario.limonciello@amd.com>
-Content-Transfer-Encoding: 8bit
-Content-Type: text/plain
-X-Originating-IP: [76.251.167.31]
-X-ClientProxiedBy: SN6PR05CA0006.namprd05.prod.outlook.com
- (2603:10b6:805:de::19) To SA0PR12MB4510.namprd12.prod.outlook.com
- (2603:10b6:806:94::8)
-MIME-Version: 1.0
-X-MS-Exchange-MessageSentRepresentingType: 1
-Received: from AUS-LX-MLIMONCI.amd.com (76.251.167.31) by SN6PR05CA0006.namprd05.prod.outlook.com (2603:10b6:805:de::19) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4264.7 via Frontend Transport; Thu, 17 Jun 2021 16:42:35 +0000
-X-MS-PublicTrafficType: Email
-X-MS-Office365-Filtering-Correlation-Id: 211f9008-b51e-4d2c-2072-08d931aee952
-X-MS-TrafficTypeDiagnostic: SN6PR12MB4671:
-X-MS-Exchange-Transport-Forked: True
-X-Microsoft-Antispam-PRVS: <SN6PR12MB4671EF4833F892B8F576057AE20E9@SN6PR12MB4671.namprd12.prod.outlook.com>
-X-MS-Oob-TLC-OOBClassifiers: OLM:248;
-X-MS-Exchange-SenderADCheck: 1
-X-Microsoft-Antispam: BCL:0;
-X-Microsoft-Antispam-Message-Info: mV1kVtttG8V2C6m/Zvz2LzHRVCTKuWurU0cmIHO8D0tMonIlm8ffpATJOvURvwfKdYqcI9kz+mV6tJAOUJzpElTejoOZLxhJzJ+EvIW6jk+cI63w5M8W/CAzFgvIte4sbnDmTRLJXkgUpDnJJXgn9QS5Xr87l+xvf7suTgktnZfXr1ckahoqt4VMg372ND1C4wVYVS6KsQbPwpoH7uETEUOdVvcP8/YVfeYEA5ASoPxzIjw4uRCrpdcWJMmAucuvRfYIo7tHBnP0CGJCzP+aPvMTvHWnZoz5hmNuWhD279Gl0zLeQHJazomGacwVqr367OnqlgHTo139olnWv5+vVFZW+dgvJ2ioEAk6MyjxmZgi+sU8XvrtAnELbY6XX2nxzzheosdVDZX3cQw0D7o9HctVnJivRDkz9imPpPl+mIDKWYtDN/FhBCrAKsk/r08t88RWOyFjqHjlp7m5eMZt2CLlYNAsxQGDtt5hnBhj9n7g1vlSbyLIsHw/Zy6s2HaYOpAe0r3X9cVSkBBnKK7z2HIke7X4hmqKQCmelDX1eQpkHFJ0nraon1v3cR2DtfSEQuBAnp8v2emI2STJsn57jHnKabg3RA4GaRfG/Bi6hB4LgpA9Sctom6v/57Z+O+GLCL8vc3yTUvxP0WgjE8d3u+TOWhZ4kwxiAwus95UOi7Y=
-X-Forefront-Antispam-Report: CIP:255.255.255.255;CTRY:;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:SA0PR12MB4510.namprd12.prod.outlook.com;PTR:;CAT:NONE;SFS:(4636009)(39860400002)(346002)(396003)(376002)(366004)(136003)(16526019)(66946007)(478600001)(26005)(66476007)(6486002)(186003)(66556008)(316002)(8676002)(86362001)(44832011)(6666004)(110136005)(38350700002)(1076003)(38100700002)(4326008)(83380400001)(52116002)(2616005)(2906002)(5660300002)(7696005)(956004)(8936002)(54906003)(36756003);DIR:OUT;SFP:1101;
-X-MS-Exchange-AntiSpam-MessageData-ChunkCount: 1
-X-MS-Exchange-AntiSpam-MessageData-0: =?us-ascii?Q?7+N6WfA6CSFyNdJE66JlNRr/2qmCfV9gy4RR0Kok3yjpTTNHXIhzu5eg3/ea?=
- =?us-ascii?Q?onxExhatGv88re16YDYoi0VzcyCKzFcC0gJsoAqjQyctdCfMKLgawBIXFYIi?=
- =?us-ascii?Q?WpmqQI082MKtEX9HIhCuaejTUm6moIqem+bme8oRo8xbWsWjhFsJ+q3zNuqK?=
- =?us-ascii?Q?sbojJA12UmIp6vWtWUu4XzHzrcDwtEIV+fWRJW+9gOho8QmV20zo/96f7LfA?=
- =?us-ascii?Q?HVhTRyATyGWJJZRGJkiAZJO6N+jO0RmX/TOxR8moMlNCX4WQNI2b08dBHFhl?=
- =?us-ascii?Q?JJbjbipZ9qMO50QMvoSwPo9x2kLoaVFQgb6IVCUmLNGyuGaEcmk0yU9DMSUd?=
- =?us-ascii?Q?AIrEUxzera/Czrt9OUpCtSKNk//NwGvPGxf6zEI7QAzjGBp/R9lMBLKzZ542?=
- =?us-ascii?Q?wEPLMiGZrxrSXiZgTERUWjSx8TabU7ULrjmVtvB65FS7glFuRLkhaZajiU48?=
- =?us-ascii?Q?8h8N3x9yJYGSTqViY6OGcXHC5imANAuhmEgWn22p0QP33Lo+ea7vguzxt7Z6?=
- =?us-ascii?Q?LF4yvSh14YiQijDabIvKT8DPRb3ndHZ5hNHzmBk5pijzrjgdUkGtZUdWe5F7?=
- =?us-ascii?Q?it7dIxhK8Z/krCP3iCRBj3HDkdsfsVNiKt4F2BeJaYW3HNnO/VSVv/p90p9p?=
- =?us-ascii?Q?smYw5h/9437xNv2y4LttPN+ssDVyIXT2mncxL7d1VU33XAe8lso4mnUVDHh4?=
- =?us-ascii?Q?isJbGwQ8TkI/j4zABT7U3H9GHZnwznh8Y4DCGHeltHey4GA7pGCRhsHp8jna?=
- =?us-ascii?Q?0ODT/KXz1kqn6PyA/7lPvFjqlO/SeztvK7A2EQ8AKy5OCi7JleqQdKhaf6TO?=
- =?us-ascii?Q?+iak1+GP8iKpcq2Q0ujSwYYpZI8Sh1cTjIyMuHbq07tCiYonXOw+Wgt2jnxO?=
- =?us-ascii?Q?e9YQzfvBC56nYzthKZn2LdTCUpZ+DNKfjkXoOObN0wZg7F5BUDS+fXyEGk7C?=
- =?us-ascii?Q?2WgEi9EKJwnrFjCI2H2S0pLo/v8DLpFmIMtl/CfNQpHlUDsMZfWUrQoy5Bxf?=
- =?us-ascii?Q?1YXwPWkVjvPDHZqaqFxAslXJvdPMtYZOj4L1gToDi2gQRiNV/4LGwUQ9JPuL?=
- =?us-ascii?Q?tfp5KkNvXpfoglIXy5FlvwBqPuKeP7500u83/p3wS1RPXnxP+YiM156s40oy?=
- =?us-ascii?Q?NVr7kh6eZrT2DFjfkCSAFY7AhS+UEGQnu21+xuuA9tnXWg1o6XUuQXvxvsH0?=
- =?us-ascii?Q?Z2rv7tyEJaISMEaA2G/VPDPgi1OW39Cub1H6EzCRU9PwIFXnuhLRRSmILk8R?=
- =?us-ascii?Q?u5lh68ECuCYbH9gdjwgSjUC8/XtGK9q2QSpU4Aphz6IfsI0hm7jdh65VTep/?=
- =?us-ascii?Q?smlgFXZT1cOQZeydgYzXDBK5?=
-X-OriginatorOrg: amd.com
-X-MS-Exchange-CrossTenant-Network-Message-Id: 211f9008-b51e-4d2c-2072-08d931aee952
-X-MS-Exchange-CrossTenant-AuthSource: SA0PR12MB4510.namprd12.prod.outlook.com
-X-MS-Exchange-CrossTenant-AuthAs: Internal
-X-MS-Exchange-CrossTenant-OriginalArrivalTime: 17 Jun 2021 16:42:36.0201
- (UTC)
-X-MS-Exchange-CrossTenant-FromEntityHeader: Hosted
-X-MS-Exchange-CrossTenant-Id: 3dd8961f-e488-4e60-8e11-a82d994e183d
-X-MS-Exchange-CrossTenant-MailboxType: HOSTED
-X-MS-Exchange-CrossTenant-UserPrincipalName: zkOQGwcl+JvzU8FpeevtOWNbRsw1JLP4WK9Xan3oml+dXbK06WOhrRX5z3l7x3xsdxscPBV4BogNdB08bSkUyg==
-X-MS-Exchange-Transport-CrossTenantHeadersStamped: SN6PR12MB4671
-Precedence: bulk
-List-ID: <linux-acpi.vger.kernel.org>
-X-Mailing-List: linux-acpi@vger.kernel.org
-List-Archive: <https://lore.kernel.org/linux-acpi/>
-
-From: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
-
-Refactor common code to prepare for upcoming changes.
-* Remove unused struct.
-* Print error before returning.
-* Frees ACPI obj if _DSM type is not as expected.
-* Treat lps0_dsm_func_mask as an integer rather than character
-* Remove extra out_obj
-* Move rev_id
-
-Co-developed-by: Mario Limonciello <mario.limonciello@amd.com>
-Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
-Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
----
- drivers/acpi/x86/s2idle.c | 67 ++++++++++++++++++++-------------------
- 1 file changed, 35 insertions(+), 32 deletions(-)
-
-diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
-index da27c1c45c9f..c0cba025072f 100644
---- a/drivers/acpi/x86/s2idle.c
-+++ b/drivers/acpi/x86/s2idle.c
-@@ -49,7 +49,7 @@ static const struct acpi_device_id lps0_device_ids[] = {
-
- static acpi_handle lps0_device_handle;
- static guid_t lps0_dsm_guid;
--static char lps0_dsm_func_mask;
-+static int lps0_dsm_func_mask;
-
- /* Device constraint entry structure */
- struct lpi_device_info {
-@@ -70,15 +70,7 @@ struct lpi_constraints {
- int min_dstate;
- };
-
--/* AMD */
--/* Device constraint entry structure */
--struct lpi_device_info_amd {
-- int revision;
-- int count;
-- union acpi_object *package;
--};
--
--/* Constraint package structure */
-+/* AMD Constraint package structure */
- struct lpi_device_constraint_amd {
- char *name;
- int enabled;
-@@ -99,12 +91,12 @@ static void lpi_device_get_constraints_amd(void)
- rev_id, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
- NULL, ACPI_TYPE_PACKAGE);
-
-- if (!out_obj)
-- return;
--
- acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
- out_obj ? "successful" : "failed");
-
-+ if (!out_obj)
-+ return;
-+
- for (i = 0; i < out_obj->package.count; i++) {
- union acpi_object *package = &out_obj->package.elements[i];
-
-@@ -336,11 +328,33 @@ static bool acpi_s2idle_vendor_amd(void)
- return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
- }
-
-+static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *dsm_guid)
-+{
-+ union acpi_object *obj;
-+ int ret = -EINVAL;
-+
-+ guid_parse(uuid, dsm_guid);
-+ obj = acpi_evaluate_dsm(handle, dsm_guid, rev, 0, NULL);
-+
-+ /* Check if the _DSM is present and as expected. */
-+ if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length == 0 ||
-+ obj->buffer.length > sizeof(u32)) {
-+ acpi_handle_debug(handle,
-+ "_DSM UUID %s rev %d function 0 evaluation failed\n", uuid, rev);
-+ goto out;
-+ }
-+
-+ ret = *(int *)obj->buffer.pointer;
-+ acpi_handle_debug(handle, "_DSM UUID %s rev %d function mask: 0x%x\n", uuid, rev, ret);
-+
-+out:
-+ ACPI_FREE(obj);
-+ return ret;
-+}
-+
- static int lps0_device_attach(struct acpi_device *adev,
- const struct acpi_device_id *not_used)
- {
-- union acpi_object *out_obj;
--
- if (lps0_device_handle)
- return 0;
-
-@@ -348,28 +362,17 @@ static int lps0_device_attach(struct acpi_device *adev,
- return 0;
-
- if (acpi_s2idle_vendor_amd()) {
-- guid_parse(ACPI_LPS0_DSM_UUID_AMD, &lps0_dsm_guid);
-- out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 0, 0, NULL);
- rev_id = 0;
-+ lps0_dsm_func_mask = validate_dsm(adev->handle,
-+ ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
- } else {
-- guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
-- out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
- rev_id = 1;
-+ lps0_dsm_func_mask = validate_dsm(adev->handle,
-+ ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
- }
-
-- /* Check if the _DSM is present and as expected. */
-- if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
-- acpi_handle_debug(adev->handle,
-- "_DSM function 0 evaluation failed\n");
-- return 0;
-- }
--
-- lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
--
-- ACPI_FREE(out_obj);
--
-- acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
-- lps0_dsm_func_mask);
-+ if (lps0_dsm_func_mask < 0)
-+ return 0;//function eval failed
-
- lps0_device_handle = adev->handle;
-
---
-2.25.1
-
-
diff --git a/sys-kernel_arch-sources-g14_files-0025-5.14-3of5-ACPI-PM-s2idle-Add-support-for-multiple-func-mask.patch b/sys-kernel_arch-sources-g14_files-0025-5.14-3of5-ACPI-PM-s2idle-Add-support-for-multiple-func-mask.patch
deleted file mode 100644
index 9476b6f0ebd7..000000000000
--- a/sys-kernel_arch-sources-g14_files-0025-5.14-3of5-ACPI-PM-s2idle-Add-support-for-multiple-func-mask.patch
+++ /dev/null
@@ -1,196 +0,0 @@
-From mboxrd@z Thu Jan 1 00:00:00 1970
-Return-Path: <linux-acpi-owner@kernel.org>
-X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on
- aws-us-west-2-korg-lkml-1.web.codeaurora.org
-X-Spam-Level:
-X-Spam-Status: No, score=-18.8 required=3.0 tests=BAYES_00,DKIM_SIGNED,
- DKIM_VALID,DKIM_VALID_AU,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,
- INCLUDES_PATCH,MAILING_LIST_MULTI,MSGID_FROM_MTA_HEADER,SPF_HELO_NONE,
- SPF_PASS,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0
-Received: from mail.kernel.org (mail.kernel.org [198.145.29.99])
- by smtp.lore.kernel.org (Postfix) with ESMTP id 1641FC2B9F4
- for <linux-acpi@archiver.kernel.org>; Thu, 17 Jun 2021 16:42:45 +0000 (UTC)
-Received: from vger.kernel.org (vger.kernel.org [23.128.96.18])
- by mail.kernel.org (Postfix) with ESMTP id EBB9D613E1
- for <linux-acpi@archiver.kernel.org>; Thu, 17 Jun 2021 16:42:44 +0000 (UTC)
-Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
- id S230410AbhFQQov (ORCPT <rfc822;linux-acpi@archiver.kernel.org>);
- Thu, 17 Jun 2021 12:44:51 -0400
-Received: from mail-co1nam11on2044.outbound.protection.outlook.com ([40.107.220.44]:6999
- "EHLO NAM11-CO1-obe.outbound.protection.outlook.com"
- rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP
- id S229671AbhFQQot (ORCPT <rfc822;linux-acpi@vger.kernel.org>);
- Thu, 17 Jun 2021 12:44:49 -0400
-ARC-Seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;
- b=YeubTgy3FR0DM2YYB7yW5U1QkPwwpBOmm/vKZesrLZpC///IvHOqB85Xf9PfNldBOU4iph8Lm8XvmNvkUKe15DDqLCBf1iGgiD9BoLEktet4PlMsiuq6X544zvrAOrjkHnJC9MGqc0N/98bo5xBtP62SLA3LjjqP9JmuA7yXIIhvJecsWwi7UdfhcfpBAawlIKuIxuClDE1usPCQjBJmmhkjhCCq/EY+EXoHDss+RwFYd/IZFP4Ib94d4Q7/b6a7T/laUeVKl/LsHMwlq5z2COGNi6dBSNR6C9/OpzTOvxvU7B6tym9PMq2ip+GMidHcY+PP74gxuBnkyXyt3HH0jg==
-ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;
- s=arcselector9901;
- h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;
- bh=c+P7onBhNPYsohbIu2M6AO5t48gCJPl9Z6d/Baar4ww=;
- b=faVlKneIJlYcd1Dmj3TVmdkQDa3oNcX1Q49D0/+drAuDAtzGwwzr1g1oYjTk1wEEaSa5HwL3M32mYdi/cg+zgWH3oAUdnINDw705plNTvE4pBzWXOFS9JBHVrMEzLW677sK8lcQOWiNkHpRX2u77qjlo59Go4UOGXVics7gv+sQoL9JPjWcLAQVXzGz3yKQcdrfnlLFhIX41k6lMP+KFsJgOa6zH4EYxr/tynGVgZBqLkA3983uG6iOm9v/Nef51uTVwT9VXq5Za8aBzPKeS40rA+Mnud94JBnBpafAvsFtCtk4NM0XsZStxFrqTWh0+XQZSYvxVPWNJBMSp7g6uEg==
-ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=pass
- smtp.mailfrom=amd.com; dmarc=pass action=none header.from=amd.com; dkim=pass
- header.d=amd.com; arc=none
-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=amd.com; s=selector1;
- h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;
- bh=c+P7onBhNPYsohbIu2M6AO5t48gCJPl9Z6d/Baar4ww=;
- b=mNwZ/3UGOZSXge9PVpcJuYSwOyoT+rw0IzuFbszehthb7eWgQEL4c2pNUjleHlomwxgC73CzI9Qs24D5UoLlMH5YaR3wRmlQCEqP5mtXrhaSVDKsIDiYwiSk0g2dGnyYYiNDoCuKVjMekJK4ugzB2ibg4NegyP2Jo11UXKYAo1E=
-Authentication-Results: rjwysocki.net; dkim=none (message not signed)
- header.d=none;rjwysocki.net; dmarc=none action=none header.from=amd.com;
-Received: from SA0PR12MB4510.namprd12.prod.outlook.com (2603:10b6:806:94::8)
- by SN6PR12MB4671.namprd12.prod.outlook.com (2603:10b6:805:e::22) with
- Microsoft SMTP Server (version=TLS1_2,
- cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4242.21; Thu, 17 Jun
- 2021 16:42:36 +0000
-Received: from SA0PR12MB4510.namprd12.prod.outlook.com
- ([fe80::9c16:2794:cd04:2be0]) by SA0PR12MB4510.namprd12.prod.outlook.com
- ([fe80::9c16:2794:cd04:2be0%6]) with mapi id 15.20.4242.021; Thu, 17 Jun 2021
- 16:42:36 +0000
-From: Mario Limonciello <mario.limonciello@amd.com>
-To: "Rafael J . Wysocki" <rjw@rjwysocki.net>,
- Len Brown <lenb@kernel.org>, linux-acpi@vger.kernel.org
-Cc: Julian Sikorski <belegdol@gmail.com>, teohhanhui@gmail.com,
- Shyam-sundar.S-k@amd.com,
- Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
-Subject: [PATCH 3/5] ACPI: PM: s2idle: Add support for multiple func mask
-Date: Thu, 17 Jun 2021 11:42:10 -0500
-Message-Id: <20210617164212.584-3-mario.limonciello@amd.com>
-X-Mailer: git-send-email 2.25.1
-In-Reply-To: <20210617164212.584-1-mario.limonciello@amd.com>
-References: <20210617164212.584-1-mario.limonciello@amd.com>
-Content-Transfer-Encoding: 8bit
-Content-Type: text/plain
-X-Originating-IP: [76.251.167.31]
-X-ClientProxiedBy: SN6PR05CA0006.namprd05.prod.outlook.com
- (2603:10b6:805:de::19) To SA0PR12MB4510.namprd12.prod.outlook.com
- (2603:10b6:806:94::8)
-MIME-Version: 1.0
-X-MS-Exchange-MessageSentRepresentingType: 1
-Received: from AUS-LX-MLIMONCI.amd.com (76.251.167.31) by SN6PR05CA0006.namprd05.prod.outlook.com (2603:10b6:805:de::19) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4264.7 via Frontend Transport; Thu, 17 Jun 2021 16:42:36 +0000
-X-MS-PublicTrafficType: Email
-X-MS-Office365-Filtering-Correlation-Id: 7fc07645-10e1-4973-e748-08d931aee9c4
-X-MS-TrafficTypeDiagnostic: SN6PR12MB4671:
-X-MS-Exchange-Transport-Forked: True
-X-Microsoft-Antispam-PRVS: <SN6PR12MB4671CC5893FB8A6AEE146DA2E20E9@SN6PR12MB4671.namprd12.prod.outlook.com>
-X-MS-Oob-TLC-OOBClassifiers: OLM:110;
-X-MS-Exchange-SenderADCheck: 1
-X-Microsoft-Antispam: BCL:0;
-X-Microsoft-Antispam-Message-Info: rHS0r95dYNdD9B49hxu3CviX9ArD/UKFzkxn49BPfsf0IZWd6XkgrcQvZ/ON58qBQ6XS+qDHiMPjJ0pVxwgG/Y9/EeHDcKVJ+M1rR5g/4K2wujbNvqjvkhsIEGVNunvZJeLPiWk+xl6KV2bGwkgHht6SwO6bDEnhtC+wj4yc1AO6dX37j0pPUtlBapunrPNs0F0+bW8vT/0FbAuGgKCZtrVzS1j9xqPEfWLi1QcOV3FaxUvjRTkKW5f1aT+E8SxKiXRgoxltOZjSwDVLiOBltIporPODB0ly77Geo+DGGQqqBwQA1KrxfN1eh81hgc/3n61+/lE98YJtlWZ8xKlHiew97nuoApycMmovY0i8XXcFs8/2tfpeaqyS0uMN8BDPiLCb5WZTc0ZLw0lpgu0VyEQowR9RIdNce8ptxKYWMHVyPO9u4aIjzwD/FzIi5rPz19emXE5Fl6nhYHycijw8UYX3cYPDHKrO2Dn+/FwTbaGHNC9kpl+dPkw7+AEUXbpKJZSG7w0NFIuLbZJG75T7BNmZ4yDAVNR/RlLROVlsQjzJvSxSL10pWfKtDS4aVFJE1/ZDGC3EspSRSHm6RgI2JGt6K74XP5ZA3d+QA5BCWfnRQfeP4rzHO234aeZWKr074dqmzZC39TI/m+cW8dyt83L+8isZBjzYdOk7SNXQB/s=
-X-Forefront-Antispam-Report: CIP:255.255.255.255;CTRY:;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:SA0PR12MB4510.namprd12.prod.outlook.com;PTR:;CAT:NONE;SFS:(4636009)(39860400002)(346002)(396003)(376002)(366004)(136003)(16526019)(66946007)(478600001)(26005)(66476007)(6486002)(186003)(66556008)(316002)(8676002)(86362001)(44832011)(6666004)(110136005)(38350700002)(1076003)(38100700002)(4326008)(83380400001)(52116002)(2616005)(2906002)(5660300002)(7696005)(956004)(8936002)(54906003)(36756003);DIR:OUT;SFP:1101;
-X-MS-Exchange-AntiSpam-MessageData-ChunkCount: 1
-X-MS-Exchange-AntiSpam-MessageData-0: =?us-ascii?Q?hZsve+dxOqi6Iih7PziTpXbkPVRhrvlP0v5+x5S6YJWYjs3vIJtTIsG1UZsY?=
- =?us-ascii?Q?fHhlDiKWOvHlGV9Sulj0jCRVyNhprCiIaxcxEfCUF/Ig0lUg/ieUBalFeiLc?=
- =?us-ascii?Q?N8J8YRG6wa6yQTD192oSMV9xp9vF+H3umi1+ypBjySTDoHJKbx0rZ5Ns8nIk?=
- =?us-ascii?Q?yo2E3IhGZsjnhEyeeoslD1qLPn5R52BwbaT5CSnKpaabXYRT2oaz5TJZOG+w?=
- =?us-ascii?Q?etreIR6ERQgqd+MNCLyhj+PiJPZoZyZk+A5ZEpAqi2Wgbq9Mw5oCOLld8AWN?=
- =?us-ascii?Q?y1H3PDaSDiqIU70IVMN7eYi7VOjsFQmpXJl/rA3i9p3mvUBNeKAIVDYBHptb?=
- =?us-ascii?Q?LQTwk0vrAK8dIqCCd3BK7U6iJcVanUtVa0M4rRexgtGTwYVNYse39jNPa/4l?=
- =?us-ascii?Q?XTBQYYL640MetG3UucTY8jHD0hsaEv8xvSb4B5tphxb8FIwGjR3oBr3AjLps?=
- =?us-ascii?Q?E5aAy1o1HVNBGk8uylAelbHexGpCZ12g0V8LhcB+tC2KvshyFzLtf3avlhg5?=
- =?us-ascii?Q?huB+ONVVlX8X6Yx6IEXk/CL3IH3qTRimMsitauqjlYWDDYAiXD8cnOd9fCaY?=
- =?us-ascii?Q?Um/zbkBkMhvZTPfnXoNQv7g+CFwqsYGeZQQeKsz16DXY+LDHhh22SFrPFsx9?=
- =?us-ascii?Q?fGXDtkXA1GcztxrhUqLcHpwUOmhpmLHkhEltd57BYFKEwaa5h0CUCKm8mG9B?=
- =?us-ascii?Q?QyeevJIw9awrGP/WuWIFSZVxBy2wMzeAhMFP3aXJkPtvDf9otUr4nfCD7BwG?=
- =?us-ascii?Q?y/9L38kxm3COsSjo/3YWxypKHwjnSZO8FtAxXIHy3L4r2F/NUP9Y9qsyglVB?=
- =?us-ascii?Q?mU8Q5DLfwh1v/64BMvuf9FPg/cAzz8UDmUjvTuYLNzbAqWOp9ReeV+6xBgYf?=
- =?us-ascii?Q?//NVJrAM8xmXELJ0xQHQRFixL1L5cWWQ4Z1HdsgLHQVGypJ/bB1kjIlAUioP?=
- =?us-ascii?Q?J6u4Ydu38sYbiZHxmURnD6efosPwOw2DpCeKsz2xKFaQMwO3TTobGFaDBLHX?=
- =?us-ascii?Q?0r0IKceMbSoPNcOj9zh6gWO/PA7EmIQqdAXCuSbbH+fQWQuCZsMd2VkhdmHv?=
- =?us-ascii?Q?bf2HPL+f9Nqj43E5qjKktOToUcSXbZBR9MIOU6+/po4Z9kbseu3/x2sUVRoP?=
- =?us-ascii?Q?bLpkPwmZuCyJD6H0yBHJAQgl2ZQzHJJzwhk/doE3NJuuPvOYXu+xnu3ZqETi?=
- =?us-ascii?Q?HlPmL1DVI8yYJ33EBAV0V1Jrkn6SC+4QMpVYptCx9VVfAaXj9ShJylhQ8Kjy?=
- =?us-ascii?Q?VuBgIaJLAK023Xn+ncs+WvPoM3ixLp0I3NsgZb+k1qZ5kKYOpVhATmLj1ci9?=
- =?us-ascii?Q?jId1Z82vTgjF8fze1jHOj9SM?=
-X-OriginatorOrg: amd.com
-X-MS-Exchange-CrossTenant-Network-Message-Id: 7fc07645-10e1-4973-e748-08d931aee9c4
-X-MS-Exchange-CrossTenant-AuthSource: SA0PR12MB4510.namprd12.prod.outlook.com
-X-MS-Exchange-CrossTenant-AuthAs: Internal
-X-MS-Exchange-CrossTenant-OriginalArrivalTime: 17 Jun 2021 16:42:36.6433
- (UTC)
-X-MS-Exchange-CrossTenant-FromEntityHeader: Hosted
-X-MS-Exchange-CrossTenant-Id: 3dd8961f-e488-4e60-8e11-a82d994e183d
-X-MS-Exchange-CrossTenant-MailboxType: HOSTED
-X-MS-Exchange-CrossTenant-UserPrincipalName: coAzDEjMUd4E+YECbw841cMeq1IqYOVuoVjRyo1wsRfttwrAhrLp2qnff3M+OxqXToib7KbnWzbZPzX3DBfK3A==
-X-MS-Exchange-Transport-CrossTenantHeadersStamped: SN6PR12MB4671
-Precedence: bulk
-List-ID: <linux-acpi.vger.kernel.org>
-X-Mailing-List: linux-acpi@vger.kernel.org
-List-Archive: <https://lore.kernel.org/linux-acpi/>
-
-From: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
-
-Required for follow-up patch adding new UUID
-needing new function mask.
-
-Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
----
- drivers/acpi/x86/s2idle.c | 31 ++++++++++++++++++++-----------
- 1 file changed, 20 insertions(+), 11 deletions(-)
-
-diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
-index c0cba025072f..0d19669ac7ad 100644
---- a/drivers/acpi/x86/s2idle.c
-+++ b/drivers/acpi/x86/s2idle.c
-@@ -309,14 +309,15 @@ static void lpi_check_constraints(void)
- }
- }
-
--static void acpi_sleep_run_lps0_dsm(unsigned int func)
-+static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, guid_t dsm_guid)
- {
- union acpi_object *out_obj;
-
-- if (!(lps0_dsm_func_mask & (1 << func)))
-+ if (!(func_mask & (1 << func)))
- return;
-
-- out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, rev_id, func, NULL);
-+ out_obj = acpi_evaluate_dsm(lps0_device_handle, &dsm_guid,
-+ rev_id, func, NULL);
- ACPI_FREE(out_obj);
-
- acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
-@@ -412,11 +413,15 @@ int acpi_s2idle_prepare_late(void)
- lpi_check_constraints();
-
- if (acpi_s2idle_vendor_amd()) {
-- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
-- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD);
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD,
-+ lps0_dsm_func_mask, lps0_dsm_guid);
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
-+ lps0_dsm_func_mask, lps0_dsm_guid);
- } else {
-- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
-- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
-+ lps0_dsm_func_mask, lps0_dsm_guid);
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
-+ lps0_dsm_func_mask, lps0_dsm_guid);
- }
-
- return 0;
-@@ -428,11 +433,15 @@ void acpi_s2idle_restore_early(void)
- return;
-
- if (acpi_s2idle_vendor_amd()) {
-- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD);
-- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD,
-+ lps0_dsm_func_mask, lps0_dsm_guid);
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD,
-+ lps0_dsm_func_mask, lps0_dsm_guid);
- } else {
-- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
-- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
-+ lps0_dsm_func_mask, lps0_dsm_guid);
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
-+ lps0_dsm_func_mask, lps0_dsm_guid);
- }
- }
-
---
-2.25.1
-
-
diff --git a/sys-kernel_arch-sources-g14_files-0026-5.14-4of5-ACPI-PM-s2idle-Add-support-for-new-Microsoft-UUID.patch b/sys-kernel_arch-sources-g14_files-0026-5.14-4of5-ACPI-PM-s2idle-Add-support-for-new-Microsoft-UUID.patch
deleted file mode 100644
index cfbbc8a25587..000000000000
--- a/sys-kernel_arch-sources-g14_files-0026-5.14-4of5-ACPI-PM-s2idle-Add-support-for-new-Microsoft-UUID.patch
+++ /dev/null
@@ -1,223 +0,0 @@
-From mboxrd@z Thu Jan 1 00:00:00 1970
-Return-Path: <linux-acpi-owner@kernel.org>
-X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on
- aws-us-west-2-korg-lkml-1.web.codeaurora.org
-X-Spam-Level:
-X-Spam-Status: No, score=-18.8 required=3.0 tests=BAYES_00,DKIM_SIGNED,
- DKIM_VALID,DKIM_VALID_AU,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,
- INCLUDES_PATCH,MAILING_LIST_MULTI,MSGID_FROM_MTA_HEADER,SPF_HELO_NONE,
- SPF_PASS,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0
-Received: from mail.kernel.org (mail.kernel.org [198.145.29.99])
- by smtp.lore.kernel.org (Postfix) with ESMTP id 75183C49361
- for <linux-acpi@archiver.kernel.org>; Thu, 17 Jun 2021 16:42:45 +0000 (UTC)
-Received: from vger.kernel.org (vger.kernel.org [23.128.96.18])
- by mail.kernel.org (Postfix) with ESMTP id 588D3613E2
- for <linux-acpi@archiver.kernel.org>; Thu, 17 Jun 2021 16:42:45 +0000 (UTC)
-Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
- id S229671AbhFQQow (ORCPT <rfc822;linux-acpi@archiver.kernel.org>);
- Thu, 17 Jun 2021 12:44:52 -0400
-Received: from mail-co1nam11on2044.outbound.protection.outlook.com ([40.107.220.44]:6999
- "EHLO NAM11-CO1-obe.outbound.protection.outlook.com"
- rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP
- id S230387AbhFQQou (ORCPT <rfc822;linux-acpi@vger.kernel.org>);
- Thu, 17 Jun 2021 12:44:50 -0400
-ARC-Seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;
- b=i1KxDzY8HBaf7MGiUwTJNsUimGQZrpsiVQhntH98E07TxG2f8YbvLmhUT9h+3uQglErcXS4yIn61tqlEIqHMyhvkKFHKBk4pwDrGe1R0ACmLzwXMluIb/cTU3agKTK5OvHXiJbhyG0FOuSfLYXRe8Z4wvRIT0Np0xaWb8mSCoSXGbpVdkxWw2VtXCVacT8STjqar8sqMZuQ+DsFAbRZyK0nrFvsDpKshDstKq1BNg+pVKwxJFk2/9aeYVTR1fumoq9G8Oj97qFccpYLRnvJYtisDR5wxwbdNIwivPuEaTsKri4g7Q2MuK6h2/V2puHLCEIyABdh4HoVQG4vvPSzTDQ==
-ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;
- s=arcselector9901;
- h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;
- bh=20wUNHMI/qxowpHbdKtVM1AMFeSDDQuPV+hs1eqAS5E=;
- b=Zq0hHY9qCByRPmqn+sNHO060Hnq18ZBpXvIxo0mUy1vP+GTtZHklgqo79UAkSMRO37X0ICVDwndH9JYlK/FfCi8zE1MnMcFpKygXNDHRpViJYOvfQFIc+G1kUMA0Ebblga9ytx9sEhUFsJKDOszHWEMkt2QIHBHRnPlN6ipQdleGjDmtpviIS5hoNjoSFU0VAuMIxRGyULPmruS6h/f9nJ5ecAcnoWmom5H+78UIrIgiRgvFtoNo6clVtJysTn7KjGMbPkAYLLM4AsDdOzFEJW9saYCfDlPdRV4vI7WtCVzse0OqmyxTDr42kmwZonCfq5sDa7zm7LPmEfdkrtzq2A==
-ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=pass
- smtp.mailfrom=amd.com; dmarc=pass action=none header.from=amd.com; dkim=pass
- header.d=amd.com; arc=none
-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=amd.com; s=selector1;
- h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;
- bh=20wUNHMI/qxowpHbdKtVM1AMFeSDDQuPV+hs1eqAS5E=;
- b=lf+ZPGmqCYIJHPT8TRxnagDH7FZMFYhSt5fOO9tMiZYh6K6S4dwpieLUCu+NmWnBY2mgyA/Ae+yssWh8TDOtMNtv2lZhcGIXaix1yYT4dLlNDOgAOvCaQBxP3CE74UDRpWShPsc+8Zq/6YELdCVNVABREp+PwG8bgAlW734SPhU=
-Authentication-Results: rjwysocki.net; dkim=none (message not signed)
- header.d=none;rjwysocki.net; dmarc=none action=none header.from=amd.com;
-Received: from SA0PR12MB4510.namprd12.prod.outlook.com (2603:10b6:806:94::8)
- by SN6PR12MB4671.namprd12.prod.outlook.com (2603:10b6:805:e::22) with
- Microsoft SMTP Server (version=TLS1_2,
- cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4242.21; Thu, 17 Jun
- 2021 16:42:37 +0000
-Received: from SA0PR12MB4510.namprd12.prod.outlook.com
- ([fe80::9c16:2794:cd04:2be0]) by SA0PR12MB4510.namprd12.prod.outlook.com
- ([fe80::9c16:2794:cd04:2be0%6]) with mapi id 15.20.4242.021; Thu, 17 Jun 2021
- 16:42:37 +0000
-From: Mario Limonciello <mario.limonciello@amd.com>
-To: "Rafael J . Wysocki" <rjw@rjwysocki.net>,
- Len Brown <lenb@kernel.org>, linux-acpi@vger.kernel.org
-Cc: Julian Sikorski <belegdol@gmail.com>, teohhanhui@gmail.com,
- Shyam-sundar.S-k@amd.com,
- Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>,
- Mario Limonciello <mario.limonciello@amd.com>
-Subject: [PATCH 4/5] ACPI: PM: s2idle: Add support for new Microsoft UUID
-Date: Thu, 17 Jun 2021 11:42:11 -0500
-Message-Id: <20210617164212.584-4-mario.limonciello@amd.com>
-X-Mailer: git-send-email 2.25.1
-In-Reply-To: <20210617164212.584-1-mario.limonciello@amd.com>
-References: <20210617164212.584-1-mario.limonciello@amd.com>
-Content-Transfer-Encoding: 8bit
-Content-Type: text/plain
-X-Originating-IP: [76.251.167.31]
-X-ClientProxiedBy: SN6PR05CA0006.namprd05.prod.outlook.com
- (2603:10b6:805:de::19) To SA0PR12MB4510.namprd12.prod.outlook.com
- (2603:10b6:806:94::8)
-MIME-Version: 1.0
-X-MS-Exchange-MessageSentRepresentingType: 1
-Received: from AUS-LX-MLIMONCI.amd.com (76.251.167.31) by SN6PR05CA0006.namprd05.prod.outlook.com (2603:10b6:805:de::19) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4264.7 via Frontend Transport; Thu, 17 Jun 2021 16:42:36 +0000
-X-MS-PublicTrafficType: Email
-X-MS-Office365-Filtering-Correlation-Id: a5bdbccd-4668-4195-a5e8-08d931aeea09
-X-MS-TrafficTypeDiagnostic: SN6PR12MB4671:
-X-MS-Exchange-Transport-Forked: True
-X-Microsoft-Antispam-PRVS: <SN6PR12MB4671505176337824E82D07C5E20E9@SN6PR12MB4671.namprd12.prod.outlook.com>
-X-MS-Oob-TLC-OOBClassifiers: OLM:529;
-X-MS-Exchange-SenderADCheck: 1
-X-Microsoft-Antispam: BCL:0;
-X-Microsoft-Antispam-Message-Info: vF4Q05O+n/jFrF2A8FIzVAiiLZ/+qyLYBE5YgT5EN8J3inrzCxr6d8TcC8oyPcNB1th3SBdvlr69yVnd5VPs6BugVNIXAC4qDwaEhRBNy0txVY44Z0cbvL2E2IaoHTFhMxeI+F4ypERxH84siYQGB2dOVrJDlLk1ICanZnXWhtN7c4Sj+csIYYPs1ijiYKxP/ZpC6AfxdY7OESuS0s0HYpbK3/nO43shtEscUczNQ3ySYc1XjzfzlDYMXUiVCoTdlvrD8+X2gKHp5jeOE3kxf/+4VHWPJK36rB7OADxUMwuvITtRPFtDtjYshnjJFX+pMltgkTJ96sQTH7RDp7FEPCBobep+ZepAhoe41TNtWMHfHJl6ryMTpYoDi8Y300OIJgkoiZ3gZ/GGX6GlbZqKnXOyofuIoh5dyUR6GKatmrgBULuxbdGQDx8TeVYQ27XuoKye8eCLoUF24DLYWJoa/P+IlxxqXR851cAwXiHWgkuFzZN0w13p1bQuu6ftKHqspMGF812G1ziTYaJwnVsL/rBAz8e055o0Ro3Cs1lGBI6jL+6peN4cJ5wjW92YK+kWLcoBjzRpPHvH5aq+YJwaC0/nFB+6TlgQ6oYdulGMx7EPKQdgGjN0APTPK6f2q/wzehAOrYwMld8PkulEAD0Njh5qn2l7es78+DjQfHwJE8pWjT/LkCUjLnFmncDdOxkrZ+HMSMGNBuuGPxFL7H8i/MUEjfOOdIDH1GgmVzTjTr9LjWVuex7vR+5Ahh2vMI3oDx1EI3GIMZBhSkQKQZ00Ig==
-X-Forefront-Antispam-Report: CIP:255.255.255.255;CTRY:;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:SA0PR12MB4510.namprd12.prod.outlook.com;PTR:;CAT:NONE;SFS:(4636009)(39860400002)(346002)(396003)(376002)(366004)(136003)(47530400004)(52230400001)(16526019)(66946007)(478600001)(26005)(66476007)(6486002)(186003)(66556008)(316002)(45080400002)(8676002)(86362001)(44832011)(966005)(6666004)(110136005)(38350700002)(1076003)(38100700002)(4326008)(83380400001)(52116002)(2616005)(2906002)(5660300002)(7696005)(956004)(8936002)(54906003)(36756003);DIR:OUT;SFP:1101;
-X-MS-Exchange-AntiSpam-MessageData-ChunkCount: 1
-X-MS-Exchange-AntiSpam-MessageData-0: =?us-ascii?Q?HMNrYRZQEsyMX3boGuBDLBpfBcEYPV9aet5xoyyxgw9yEhfBldNg64CjSg1f?=
- =?us-ascii?Q?ig5UNLyCnL5OE0gDaNgH268dTuVpFeOsea58f5XyGVhXP71crCWq8ao7oNNk?=
- =?us-ascii?Q?JyH25xOamHGwlj9yrW4Ew5Whc1rOhu8HrX7L4KjoMusQu79Zwqgw5OgYfiIe?=
- =?us-ascii?Q?4hD/dEvmFSqlhltA7BxvKbdU3edo7aFVfQ0ZfX788gAVz9Skmkj7zNc7sNwm?=
- =?us-ascii?Q?ZiaUNm6CV1ADzAjLtiFEh7AU0Rx0P3o3hegmvl+Qrfn6Iuv0bhsDMYBLGxXG?=
- =?us-ascii?Q?rTOPgPZqHERMtEIIRif7giVKY8oXsECbnS4EhgSm2tY5xysa5Ug2n+1jDg07?=
- =?us-ascii?Q?5E0dEhEMFhUhJMKykMTutOqYk6HLUkAkN1W+wd9U/LUMiGlOdsbeG5RZYEWx?=
- =?us-ascii?Q?ItM5xI/VF3Ghq8k8+eL4nnUK4w+kSXKHyYUOWEgYQuqPM4xI79BLLwnW6UXp?=
- =?us-ascii?Q?U3Km+e6xehoMMHXXGDYJL7r0oLHI3BxMSY8qTxMBoRkAejlc+/19rjKHdLSJ?=
- =?us-ascii?Q?Pzdw5sc23iWQwgI96Ua8m+ZGk/etZPtYknbxi5GBQXmwB/tqA4zzKs8MrIOT?=
- =?us-ascii?Q?Ml/xyVuuHuilmRdo6OK0t6Cw6ZgmLWPeG5eQYXg78aQozQMxuf/6lS22CAn1?=
- =?us-ascii?Q?0w/cTP4dGyJWjTEfbD7JY7gsVhD3vw8EB++VAHF2DnQwgOd1rp0L0d4gIYbo?=
- =?us-ascii?Q?VKd7JNI6kX0ucJX+vWBMLnC4Ko0csDY6hijeC4CkM2JdXGsV38qbgIJmiKlC?=
- =?us-ascii?Q?GU7UaNmi8bZvZ1AY30GDCyxcxbTWZcdeL5YU/KgWwrhJLE8Q1SOut2Ej56FC?=
- =?us-ascii?Q?or/BTFgY32lC4apxl5pM2JbCGgpUBFlXL4mmpIzplbeDGi10bv0EoWemDPMr?=
- =?us-ascii?Q?Ue8OeLZirybzvC2I+iDu3hCNh3C4reqCjfW5J8x7muxi3V2mMB801pZROrcv?=
- =?us-ascii?Q?WkR6qC7ses9kjokO3zTTxtv0N7WxedwsRVFVqNYB46hggT8mBJtTDBXqM7za?=
- =?us-ascii?Q?m4gTEYpjDfET+hKiTIchB5lrPn7n1sXfHLpTp+EMFkxRnYIJAPPLPbnUDRtH?=
- =?us-ascii?Q?IJsOY2aqPEcD78rIO2E9yFULYNGmxU2iRGj3NkLPdhiIiG6jcQVNID3+ZBXj?=
- =?us-ascii?Q?K1c6nTze0pqId+opQp+kvU4YNaAN3Y4aVUkcQU8vdvmKwHW19QFmO68CR40u?=
- =?us-ascii?Q?pKlY/9p1KjpyNv9uGX18IabHyx/9I00yZ/ocmADxsigFyQThNscUY2+/Vodm?=
- =?us-ascii?Q?qmqhrj4SS6yrctoLqpPLjSwur1yB0qal+hDIjw7aMK8TcqsxGR0gACYBjQ0W?=
- =?us-ascii?Q?BkT0qOHj8v+yQz6aKMBaYPUj?=
-X-OriginatorOrg: amd.com
-X-MS-Exchange-CrossTenant-Network-Message-Id: a5bdbccd-4668-4195-a5e8-08d931aeea09
-X-MS-Exchange-CrossTenant-AuthSource: SA0PR12MB4510.namprd12.prod.outlook.com
-X-MS-Exchange-CrossTenant-AuthAs: Internal
-X-MS-Exchange-CrossTenant-OriginalArrivalTime: 17 Jun 2021 16:42:37.0993
- (UTC)
-X-MS-Exchange-CrossTenant-FromEntityHeader: Hosted
-X-MS-Exchange-CrossTenant-Id: 3dd8961f-e488-4e60-8e11-a82d994e183d
-X-MS-Exchange-CrossTenant-MailboxType: HOSTED
-X-MS-Exchange-CrossTenant-UserPrincipalName: 5TeYSCvoY1unzgei7cWJXXe6nbL5dzfs7e5ke4+4mddqxgHKWatlBOOO8hD+gLiqU4A2Pe+jPPAmRzMYHj2yLg==
-X-MS-Exchange-Transport-CrossTenantHeadersStamped: SN6PR12MB4671
-Precedence: bulk
-List-ID: <linux-acpi.vger.kernel.org>
-X-Mailing-List: linux-acpi@vger.kernel.org
-List-Archive: <https://lore.kernel.org/linux-acpi/>
-
-From: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
-
-This adds supports for _DSM notifications to the Microsoft UUID
-described by Microsoft documentation for s2idle.
-
-Link: https://docs.microsoft.com/en-us/windows-hardware/design/device-experiences/modern-standby-firmware-notifications
-Co-developed-by: Mario Limonciello <mario.limonciello@amd.com>
-Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
-Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
----
- drivers/acpi/x86/s2idle.c | 34 ++++++++++++++++++++++++++++++----
- 1 file changed, 30 insertions(+), 4 deletions(-)
-
-diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
-index 0d19669ac7ad..3f2a90648ec9 100644
---- a/drivers/acpi/x86/s2idle.c
-+++ b/drivers/acpi/x86/s2idle.c
-@@ -32,6 +32,9 @@ static const struct acpi_device_id lps0_device_ids[] = {
- {"", },
- };
-
-+/* Microsoft platform agnostic UUID */
-+#define ACPI_LPS0_DSM_UUID_MICROSOFT "11e00d56-ce64-47ce-837b-1f898f9aa461"
-+
- #define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
-
- #define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
-@@ -39,6 +42,8 @@ static const struct acpi_device_id lps0_device_ids[] = {
- #define ACPI_LPS0_SCREEN_ON 4
- #define ACPI_LPS0_ENTRY 5
- #define ACPI_LPS0_EXIT 6
-+#define ACPI_LPS0_MS_ENTRY 7
-+#define ACPI_LPS0_MS_EXIT 8
-
- /* AMD */
- #define ACPI_LPS0_DSM_UUID_AMD "e3f32452-febc-43ce-9039-932122d37721"
-@@ -51,6 +56,9 @@ static acpi_handle lps0_device_handle;
- static guid_t lps0_dsm_guid;
- static int lps0_dsm_func_mask;
-
-+static guid_t lps0_dsm_guid_microsoft;
-+static int lps0_dsm_func_mask_microsoft;
-+
- /* Device constraint entry structure */
- struct lpi_device_info {
- char *name;
-@@ -366,14 +374,18 @@ static int lps0_device_attach(struct acpi_device *adev,
- rev_id = 0;
- lps0_dsm_func_mask = validate_dsm(adev->handle,
- ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
-+ lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
-+ ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
-+ &lps0_dsm_guid_microsoft);
- } else {
- rev_id = 1;
- lps0_dsm_func_mask = validate_dsm(adev->handle,
- ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
-+ lps0_dsm_func_mask_microsoft = -EINVAL;
- }
-
-- if (lps0_dsm_func_mask < 0)
-- return 0;//function eval failed
-+ if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
-+ return 0; //function evaluation failed
-
- lps0_device_handle = adev->handle;
-
-@@ -412,7 +424,14 @@ int acpi_s2idle_prepare_late(void)
- if (pm_debug_messages_on)
- lpi_check_constraints();
-
-- if (acpi_s2idle_vendor_amd()) {
-+ if (lps0_dsm_func_mask_microsoft > 0) {
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
-+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
-+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
-+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-+ } else if (acpi_s2idle_vendor_amd()) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD,
- lps0_dsm_func_mask, lps0_dsm_guid);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
-@@ -432,7 +451,14 @@ void acpi_s2idle_restore_early(void)
- if (!lps0_device_handle || sleep_no_lps0)
- return;
-
-- if (acpi_s2idle_vendor_amd()) {
-+ if (lps0_dsm_func_mask_microsoft > 0) {
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
-+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
-+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
-+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-+ } else if (acpi_s2idle_vendor_amd()) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD,
- lps0_dsm_func_mask, lps0_dsm_guid);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD,
---
-2.25.1
-
-
diff --git a/sys-kernel_arch-sources-g14_files-0027-5.14-5of5-ACPI-PM-s2idle-Adjust-behavior-for-field-problems-on-AMD-systems.patch b/sys-kernel_arch-sources-g14_files-0027-5.14-5of5-ACPI-PM-s2idle-Adjust-behavior-for-field-problems-on-AMD-systems.patch
deleted file mode 100644
index 4d3ae9343976..000000000000
--- a/sys-kernel_arch-sources-g14_files-0027-5.14-5of5-ACPI-PM-s2idle-Adjust-behavior-for-field-problems-on-AMD-systems.patch
+++ /dev/null
@@ -1,168 +0,0 @@
-From mboxrd@z Thu Jan 1 00:00:00 1970
-Return-Path: <linux-acpi-owner@kernel.org>
-X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on
- aws-us-west-2-korg-lkml-1.web.codeaurora.org
-X-Spam-Level:
-X-Spam-Status: No, score=-18.8 required=3.0 tests=BAYES_00,DKIM_SIGNED,
- DKIM_VALID,DKIM_VALID_AU,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,
- INCLUDES_PATCH,MAILING_LIST_MULTI,MSGID_FROM_MTA_HEADER,SPF_HELO_NONE,
- SPF_PASS,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0
-Received: from mail.kernel.org (mail.kernel.org [198.145.29.99])
- by smtp.lore.kernel.org (Postfix) with ESMTP id E7289C48BE5
- for <linux-acpi@archiver.kernel.org>; Thu, 17 Jun 2021 16:42:45 +0000 (UTC)
-Received: from vger.kernel.org (vger.kernel.org [23.128.96.18])
- by mail.kernel.org (Postfix) with ESMTP id CAAF1613E1
- for <linux-acpi@archiver.kernel.org>; Thu, 17 Jun 2021 16:42:45 +0000 (UTC)
-Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
- id S230387AbhFQQow (ORCPT <rfc822;linux-acpi@archiver.kernel.org>);
- Thu, 17 Jun 2021 12:44:52 -0400
-Received: from mail-co1nam11on2044.outbound.protection.outlook.com ([40.107.220.44]:6999
- "EHLO NAM11-CO1-obe.outbound.protection.outlook.com"
- rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP
- id S230459AbhFQQov (ORCPT <rfc822;linux-acpi@vger.kernel.org>);
- Thu, 17 Jun 2021 12:44:51 -0400
-ARC-Seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;
- b=VGDKY+Ept1I+9BpxMjLKaOBxDZ5w8QKESfWM/TeUt/SRMrC7Y8giiZkn+ZhFGxPWq+c+xGYWITdxrw6XpRR4H254UF7Qh3sVoxzZi20vvqMhG2naJS2ud69sygjYg61pNZWOLEZUwUKyWQWx8kdAH3yhCQuHg0B+2uWusY66sRMRJ3YJcsdhesVSSeQShPeHM7uSGvBYj6UMRl9d9uaLinzJY6NPVvoNFUQ5PSlgc0hj3UG5JHN6VBKULINl0HnW/5Y1QCMZ/qYkauU12RgsZ4z/aVrbi08tJ+27B8GP79YkKiNJ/rcQfa4KMQDI98q5KSqKeLhQx2aJ/5GUD92dMg==
-ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;
- s=arcselector9901;
- h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;
- bh=RAwdQeXEeOgNYbcYlQdfLP3BHgaHkSBCzzjryyZxLfM=;
- b=mbI/3sigjAig2/aoYRh8MdHu2Xgv9VwQuFH77plseoDrs5T1WfYSrdo6ZwmtqzLHSYiQH8VzAgG2gl7ZKAkBBvJ0IsE1hn7VWTDBlzEHy8NKue+/KPuSOvQ9Pgz84ilFdx3ZVJClDMsoXfrp/nTEEY43EczYLLal48AzbVXwDWNCSLSD7KMyKuBz0tjok5WZJo2KuJ7AeEsBgTr+QMQpf1zvlLGd5Gt3PcVXHn2oIr4sMM5o1mb2V92VoEZkKgVGjR4wdnwZZ8Ps1FViYuVoxHchXTDcaqrAM6rp/5RccjR27QOpKCObKMQq0jSOtpmd/w0LRDOg8OQmJy5tuUWImw==
-ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=pass
- smtp.mailfrom=amd.com; dmarc=pass action=none header.from=amd.com; dkim=pass
- header.d=amd.com; arc=none
-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=amd.com; s=selector1;
- h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;
- bh=RAwdQeXEeOgNYbcYlQdfLP3BHgaHkSBCzzjryyZxLfM=;
- b=eIbnPzI4mA9sxiUxpo1F2RjKuY9RX0HjL9coy5FlXXvVyvM0p1kn2CN64RR0rN5B73GRRoO4FfeGpYTkmNXOomYWnHlGOa7DkaNwsGziAhzG66cO0GLwHya5hxVS1IPvkHn7JEy9Nof69cCbe7nqp02inw54/2qPAVaPFAhu4lk=
-Authentication-Results: rjwysocki.net; dkim=none (message not signed)
- header.d=none;rjwysocki.net; dmarc=none action=none header.from=amd.com;
-Received: from SA0PR12MB4510.namprd12.prod.outlook.com (2603:10b6:806:94::8)
- by SN6PR12MB4671.namprd12.prod.outlook.com (2603:10b6:805:e::22) with
- Microsoft SMTP Server (version=TLS1_2,
- cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4242.21; Thu, 17 Jun
- 2021 16:42:38 +0000
-Received: from SA0PR12MB4510.namprd12.prod.outlook.com
- ([fe80::9c16:2794:cd04:2be0]) by SA0PR12MB4510.namprd12.prod.outlook.com
- ([fe80::9c16:2794:cd04:2be0%6]) with mapi id 15.20.4242.021; Thu, 17 Jun 2021
- 16:42:38 +0000
-From: Mario Limonciello <mario.limonciello@amd.com>
-To: "Rafael J . Wysocki" <rjw@rjwysocki.net>,
- Len Brown <lenb@kernel.org>, linux-acpi@vger.kernel.org
-Cc: Julian Sikorski <belegdol@gmail.com>, teohhanhui@gmail.com,
- Shyam-sundar.S-k@amd.com,
- Mario Limonciello <mario.limonciello@amd.com>
-Subject: [PATCH 5/5] ACPI: PM: Adjust behavior for field problems on AMD systems
-Date: Thu, 17 Jun 2021 11:42:12 -0500
-Message-Id: <20210617164212.584-5-mario.limonciello@amd.com>
-X-Mailer: git-send-email 2.25.1
-In-Reply-To: <20210617164212.584-1-mario.limonciello@amd.com>
-References: <20210617164212.584-1-mario.limonciello@amd.com>
-Content-Transfer-Encoding: 8bit
-Content-Type: text/plain
-X-Originating-IP: [76.251.167.31]
-X-ClientProxiedBy: SN6PR05CA0006.namprd05.prod.outlook.com
- (2603:10b6:805:de::19) To SA0PR12MB4510.namprd12.prod.outlook.com
- (2603:10b6:806:94::8)
-MIME-Version: 1.0
-X-MS-Exchange-MessageSentRepresentingType: 1
-Received: from AUS-LX-MLIMONCI.amd.com (76.251.167.31) by SN6PR05CA0006.namprd05.prod.outlook.com (2603:10b6:805:de::19) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4264.7 via Frontend Transport; Thu, 17 Jun 2021 16:42:37 +0000
-X-MS-PublicTrafficType: Email
-X-MS-Office365-Filtering-Correlation-Id: 6cb89219-6bd5-4aa3-f637-08d931aeea4b
-X-MS-TrafficTypeDiagnostic: SN6PR12MB4671:
-X-MS-Exchange-Transport-Forked: True
-X-Microsoft-Antispam-PRVS: <SN6PR12MB4671D21CFDD8A0ADC75C3060E20E9@SN6PR12MB4671.namprd12.prod.outlook.com>
-X-MS-Oob-TLC-OOBClassifiers: OLM:5797;
-X-MS-Exchange-SenderADCheck: 1
-X-Microsoft-Antispam: BCL:0;
-X-Microsoft-Antispam-Message-Info: vtkLpdan4nlFbnee7aupm/yHOvOFMSTL+BsBXCIPuDQAz3HBDfPi5TkERmgXF78HPSOy85y1rjk+1LS/LONQgmFG7D2gltYcHO3c/Pk1U67z0bcd7Fr38tj95qHgMNWex2mbPvR5Lb11BrhT5DKGtBWrAuy/eSsf1DrkGN+BtHx5uCgpOJBS5XuIAwPgt/kV2noc0hSjmHQk0nRJzmPjgf+lCfl/oAJucRPLq/hSkIVGmD098LqPNwBrbUyv8b/hfIhRwIfl2nxec5Y/XdCnIRgfi96vNO+mhmLQ7YwQml+ejH3l6sJy0+YXaJUjn3kwKDsle46JnOFiG58mSnKLxGSlyji2Cn5ejZJJYw1G6bnKDRQClNbLDA8GwmvxLVMIkc6kLtQKQ0XCKrlxMk1OAQT7HSjkXomi6r1iYCPUoicZjCWg2yeIHrMpbY0aNeWwqmRLnxr9PGeiFprMc2JSxiLie1No95taRo1bSuLbC8s5j+ZMZjU+/BJcFlmZSvCcQfWuejw0L3+CRXlNU9cA8a0HYKnPczFOLc4ueEkCkGi6BmvsPAjlMvlcoxQf5ScGDAPqKWopcYMoNHvx5Af3Vp/Dvr72KasPrfXaiRaMGo7NjGR1+tqJa1OkMXsItygvbzbNeLRr15Vh0Nojt6iwM6k3C0kezraDV+b1J3bGwWk=
-X-Forefront-Antispam-Report: CIP:255.255.255.255;CTRY:;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:SA0PR12MB4510.namprd12.prod.outlook.com;PTR:;CAT:NONE;SFS:(4636009)(39860400002)(346002)(396003)(376002)(366004)(136003)(16526019)(66946007)(478600001)(26005)(66476007)(6486002)(186003)(66556008)(316002)(45080400002)(8676002)(86362001)(44832011)(6666004)(110136005)(38350700002)(1076003)(38100700002)(4326008)(83380400001)(52116002)(2616005)(2906002)(5660300002)(7696005)(956004)(8936002)(54906003)(36756003);DIR:OUT;SFP:1101;
-X-MS-Exchange-AntiSpam-MessageData-ChunkCount: 1
-X-MS-Exchange-AntiSpam-MessageData-0: =?us-ascii?Q?sn7AT9d8a1IuRzFSsGW9gjagCNKE8lDj/TntaKyMqGSVCPDlBL1z/M56uR0n?=
- =?us-ascii?Q?PME1weaFjRgdyvqFu1micWZypGcXbLDMH1bTu8MnIubMC5wYCoVdrAbHUP7f?=
- =?us-ascii?Q?3RbAiFNukeIW2zF3fWRWaJ0C+RzBO3VtP1numxmLTOF38R1of8i0kooOfADU?=
- =?us-ascii?Q?nil0BQpOUZvKTYzcMIKwJP7JdBY4VPGHBbknHeYbpa261LZHKbThg1hN0Hxg?=
- =?us-ascii?Q?Gic9K/1nSi4dlLZ+ziGC2L87CM2Qkzu39HCxvM0XOBfsOzoYcq3JYs0nNPG3?=
- =?us-ascii?Q?7kkktEdVnuvo/MAmKolgQFRldG/Chl+FWNnDvNyW7LBf4fLhKIQziaej5u2H?=
- =?us-ascii?Q?crtV5eEwS3CX4PtFzN0HYEExh4DHWAMpG6TQEtbACiHyvPYaJxvIeEpTGb5U?=
- =?us-ascii?Q?h2YgXzSUFR4LZt/WZYWBkbbAo7IR+LHEJCRUH1ljDGfuCu45al5WOzFpP5ZV?=
- =?us-ascii?Q?rqabKDZRuWIBS2vpkSZyXAOjdsRd61dSIjTXKi+dHDCrGIKDJBpm2hV/3ohA?=
- =?us-ascii?Q?E5bINpiBDe6D5SNxO2be0P8WCe0f+45N+DMWGX5c+1VyUCMQ19rTVSm4tHFd?=
- =?us-ascii?Q?hDHznUiG6ocinwMuxHlYGp8+KhRpnvsM8H+z6CPWHuqMfaxO6HinRpsw1pgi?=
- =?us-ascii?Q?bnacJTv58nFs6fur9bKTu0VrWappkfajGWowkKdnPm9pjOEbHs1t0N1iOzfx?=
- =?us-ascii?Q?OYacQf1M0M3QS+zs5zSROr2uqVKLkY6r9Ek+HTMeU4VhGh/jEoNFdkv96pz5?=
- =?us-ascii?Q?vOBU8v/fxQXmXH7Nn9mE06Ogej7ILGjOkJXj+aXDUtDI8vD2HY7ZJ/aM9PD+?=
- =?us-ascii?Q?GxWdFyNOESGEr/yZ3D6Lana4aKsDX/GCzhJXmBY3sgMZkf9qkQr33Xna9Xeb?=
- =?us-ascii?Q?lQLOZ3rVL4jCybSd3j7aoc0h4MzaifVC4Xn9i3vryPiusLx0ycq7+3bih1WF?=
- =?us-ascii?Q?hQn6TFRLbtBHqD84w7lIP+uE9tiJ4koW9p9Ny0PqAgcCtyHcdVT/Ey+2xHLF?=
- =?us-ascii?Q?A1jqdeKe1qB6Gh9o/oS6/It2lB0V12XztQCLOiU6iYRto2aOdtHsCXybeQhu?=
- =?us-ascii?Q?R584E3WiW61xPbsus1YsrFCY15fjGTbWGdp2yF8s6b3rqFq5afiHt1ijqYqw?=
- =?us-ascii?Q?Z4nfLiwK0RP3mUyD7RiM4LDDylK1FoeoSC5vK/nKm20rHGBzxqh0hpXV+sTc?=
- =?us-ascii?Q?A2q7mazijm+nktOyc2U8mn6BdYqDWu67PTpm611LW3DSac6GXv1kxN/L0CJE?=
- =?us-ascii?Q?EvMlcI8aODbxFHYzn1vURooJ824E7tKcglgFKgB6biQY35tR90hTYYYvlJcl?=
- =?us-ascii?Q?52Ff72BMFHf29srhXKpL6hpp?=
-X-OriginatorOrg: amd.com
-X-MS-Exchange-CrossTenant-Network-Message-Id: 6cb89219-6bd5-4aa3-f637-08d931aeea4b
-X-MS-Exchange-CrossTenant-AuthSource: SA0PR12MB4510.namprd12.prod.outlook.com
-X-MS-Exchange-CrossTenant-AuthAs: Internal
-X-MS-Exchange-CrossTenant-OriginalArrivalTime: 17 Jun 2021 16:42:37.5339
- (UTC)
-X-MS-Exchange-CrossTenant-FromEntityHeader: Hosted
-X-MS-Exchange-CrossTenant-Id: 3dd8961f-e488-4e60-8e11-a82d994e183d
-X-MS-Exchange-CrossTenant-MailboxType: HOSTED
-X-MS-Exchange-CrossTenant-UserPrincipalName: vdQb2IgeWWNbZ0rqSxO2wd1S4wnfL5Tq4Zwf6QlJ2BMCCFr0FCsXeQR2P/Wz64yMyEvc7GlLvMimI278KacNgA==
-X-MS-Exchange-Transport-CrossTenantHeadersStamped: SN6PR12MB4671
-Precedence: bulk
-List-ID: <linux-acpi.vger.kernel.org>
-X-Mailing-List: linux-acpi@vger.kernel.org
-List-Archive: <https://lore.kernel.org/linux-acpi/>
-
-Some AMD Systems with uPEP _HID AMD004/AMDI005 have an off by one bug
-in their function mask return. This means that they will call entrance
-but not exit for matching functions.
-
-Other AMD systems with this HID should use the Microsoft generic UUID.
-
-AMD systems with uPEP HID AMDI006 should be using the Microsoft method.
-
-Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
----
- drivers/acpi/x86/s2idle.c | 15 +++++++++++++++
- 1 file changed, 15 insertions(+)
-
-diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
-index 3f2a90648ec9..816bf2c34b7a 100644
---- a/drivers/acpi/x86/s2idle.c
-+++ b/drivers/acpi/x86/s2idle.c
-@@ -371,12 +371,27 @@ static int lps0_device_attach(struct acpi_device *adev,
- return 0;
-
- if (acpi_s2idle_vendor_amd()) {
-+ /* AMD0004, AMDI0005:
-+ * - Should use rev_id 0x0
-+ * - function mask > 0x3: Should use AMD method, but has off by one bug
-+ * - function mask = 0x3: Should use Microsoft method
-+ * AMDI0006:
-+ * - should use rev_id 0x0
-+ * - function mask = 0x3: Should use Microsoft method
-+ */
-+ const char *hid = acpi_device_hid(adev);
- rev_id = 0;
- lps0_dsm_func_mask = validate_dsm(adev->handle,
- ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
- lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
- ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
- &lps0_dsm_guid_microsoft);
-+ if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
-+ !strcmp(hid, "AMDI0005"))) {
-+ lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
-+ acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
-+ ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
-+ }
- } else {
- rev_id = 1;
- lps0_dsm_func_mask = validate_dsm(adev->handle,
---
-2.25.1
-
-
diff --git a/sys-kernel_arch-sources-g14_files-0028-platform-x86-amd-pmc-Fix-command-completion-code.patch b/sys-kernel_arch-sources-g14_files-0028-platform-x86-amd-pmc-Fix-command-completion-code.patch
deleted file mode 100644
index fd71de16e19e..000000000000
--- a/sys-kernel_arch-sources-g14_files-0028-platform-x86-amd-pmc-Fix-command-completion-code.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From 375675bfe045e2c3911d6cc5f13281e388ee3544 Mon Sep 17 00:00:00 2001
-From: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
-Date: Thu, 17 Jun 2021 17:00:35 +0530
-Subject: [PATCH] platform/x86: amd-pmc: Fix command completion code
-
-The protocol to submit a job request to SMU is to wait for
-AMD_PMC_REGISTER_RESPONSE to return 1,meaning SMU is ready to take
-requests. PMC driver has to make sure that the response code is always
-AMD_PMC_RESULT_OK before making any command submissions.
-
-Also, when we submit a message to SMU, we have to wait until it processes
-the request. Adding a read_poll_timeout() check as this was missing in
-the existing code.
-
-Fixes: 156ec4731cb2 ("platform/x86: amd-pmc: Add AMD platform support for S2Idle")
-Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
-Reviewed-by: Hans de Goede <hdegoede@redhat.com>
----
- drivers/platform/x86/amd-pmc.c | 10 +++++++++-
- 1 file changed, 9 insertions(+), 1 deletion(-)
-
-diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
-index b9da58ee9b1e..9c8a53120767 100644
---- a/drivers/platform/x86/amd-pmc.c
-+++ b/drivers/platform/x86/amd-pmc.c
-@@ -140,7 +140,7 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
-
- /* Wait until we get a valid response */
- rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
-- val, val > 0, PMC_MSG_DELAY_MIN_US,
-+ val, val == AMD_PMC_RESULT_OK, PMC_MSG_DELAY_MIN_US,
- PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
- if (rc) {
- dev_err(dev->dev, "failed to talk to SMU\n");
-@@ -156,6 +156,14 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
- /* Write message ID to message ID register */
- msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO;
- amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg);
-+ /* Wait until we get a valid response */
-+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
-+ val, val == AMD_PMC_RESULT_OK, PMC_MSG_DELAY_MIN_US,
-+ PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
-+ if (rc) {
-+ dev_err(dev->dev, "SMU response timed out\n");
-+ return rc;
-+ }
- return 0;
- }
-
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0029-platform-x86-amd-pmc-Fix-SMU-firmware-reporting-mechanism.patch b/sys-kernel_arch-sources-g14_files-0029-platform-x86-amd-pmc-Fix-SMU-firmware-reporting-mechanism.patch
deleted file mode 100644
index b5246110db6f..000000000000
--- a/sys-kernel_arch-sources-g14_files-0029-platform-x86-amd-pmc-Fix-SMU-firmware-reporting-mechanism.patch
+++ /dev/null
@@ -1,89 +0,0 @@
-From 5a1cb72ae0adb1249594de47f6a8e6ef77ce173a Mon Sep 17 00:00:00 2001
-From: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
-Date: Thu, 17 Jun 2021 17:00:36 +0530
-Subject: [PATCH] platform/x86: amd-pmc: Fix SMU firmware reporting mechanism
-
-It was lately understood that the current mechanism available in the
-driver to get SMU firmware info works only on internal SMU builds and
-there is a separate way to get all the SMU logging counters (addressed
-in the next patch). Hence remove all the smu info shown via debugfs as it
-is no more useful.
-
-Also, use dump registers routine only at one place i.e. after the command
-submission to SMU is done.
-
-Fixes: 156ec4731cb2 ("platform/x86: amd-pmc: Add AMD platform support for S2Idle")
-Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
----
- drivers/platform/x86/amd-pmc.c | 15 +--------------
- 1 file changed, 1 insertion(+), 14 deletions(-)
-
-diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
-index 9c8a53120767..ce0e2ad94d09 100644
---- a/drivers/platform/x86/amd-pmc.c
-+++ b/drivers/platform/x86/amd-pmc.c
-@@ -52,7 +52,6 @@
- #define AMD_CPU_ID_PCO AMD_CPU_ID_RV
- #define AMD_CPU_ID_CZN AMD_CPU_ID_RN
-
--#define AMD_SMU_FW_VERSION 0x0
- #define PMC_MSG_DELAY_MIN_US 100
- #define RESPONSE_REGISTER_LOOP_MAX 200
-
-@@ -88,11 +87,6 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
- #ifdef CONFIG_DEBUG_FS
- static int smu_fw_info_show(struct seq_file *s, void *unused)
- {
-- struct amd_pmc_dev *dev = s->private;
-- u32 value;
--
-- value = ioread32(dev->smu_base + AMD_SMU_FW_VERSION);
-- seq_printf(s, "SMU FW Info: %x\n", value);
- return 0;
- }
- DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
-@@ -164,6 +158,7 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
- dev_err(dev->dev, "SMU response timed out\n");
- return rc;
- }
-+ amd_pmc_dump_registers(dev);
- return 0;
- }
-
-@@ -176,7 +171,6 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev)
- if (rc)
- dev_err(pdev->dev, "suspend failed\n");
-
-- amd_pmc_dump_registers(pdev);
- return 0;
- }
-
-@@ -189,7 +183,6 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
- if (rc)
- dev_err(pdev->dev, "resume failed\n");
-
-- amd_pmc_dump_registers(pdev);
- return 0;
- }
-
-@@ -256,17 +249,11 @@ static int amd_pmc_probe(struct platform_device *pdev)
- pci_dev_put(rdev);
- base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
-
-- dev->smu_base = devm_ioremap(dev->dev, base_addr, AMD_PMC_MAPPING_SIZE);
-- if (!dev->smu_base)
-- return -ENOMEM;
--
- dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
- AMD_PMC_MAPPING_SIZE);
- if (!dev->regbase)
- return -ENOMEM;
-
-- amd_pmc_dump_registers(dev);
--
- platform_set_drvdata(pdev, dev);
- amd_pmc_dbgfs_register(dev);
- return 0;
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0030-platform-x86-amd-pmc-Add-support-for-logging-SMU-metrics.patch b/sys-kernel_arch-sources-g14_files-0030-platform-x86-amd-pmc-Add-support-for-logging-SMU-metrics.patch
deleted file mode 100644
index 6b2eaa3df76c..000000000000
--- a/sys-kernel_arch-sources-g14_files-0030-platform-x86-amd-pmc-Add-support-for-logging-SMU-metrics.patch
+++ /dev/null
@@ -1,278 +0,0 @@
-From 7852d1ee80a2a74278e6216a1196df734c5c1760 Mon Sep 17 00:00:00 2001
-From: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
-Date: Thu, 17 Jun 2021 17:00:37 +0530
-Subject: [PATCH] platform/x86: amd-pmc: Add support for logging SMU metrics
-
-SMU provides a way to dump the s0ix debug statistics in the form of a
-metrics table via a of set special mailbox commands.
-
-Add support to the driver which can send these commands to SMU and expose
-the information received via debugfs. The information contains the s0ix
-entry/exit, active time of each IP block etc.
-
-As a side note, SMU subsystem logging is not supported on Picasso based
-SoC's.
-
-Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
----
- drivers/platform/x86/amd-pmc.c | 148 +++++++++++++++++++++++++++++++--
- 1 file changed, 140 insertions(+), 8 deletions(-)
-
-diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
-index ce0e2ad94d09..bb067324644d 100644
---- a/drivers/platform/x86/amd-pmc.c
-+++ b/drivers/platform/x86/amd-pmc.c
-@@ -46,6 +46,14 @@
- #define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
- #define AMD_PMC_RESULT_FAILED 0xFF
-
-+/* SMU Message Definations */
-+#define SMU_MSG_GETSMUVERSION 0x02
-+#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
-+#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
-+#define SMU_MSG_LOG_START 0x06
-+#define SMU_MSG_LOG_RESET 0x07
-+#define SMU_MSG_LOG_DUMP_DATA 0x08
-+#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
- /* List of supported CPU ids */
- #define AMD_CPU_ID_RV 0x15D0
- #define AMD_CPU_ID_RN 0x1630
-@@ -55,17 +63,42 @@
- #define PMC_MSG_DELAY_MIN_US 100
- #define RESPONSE_REGISTER_LOOP_MAX 200
-
-+#define SOC_SUBSYSTEM_IP_MAX 12
-+#define DELAY_MIN_US 2000
-+#define DELAY_MAX_US 3000
- enum amd_pmc_def {
- MSG_TEST = 0x01,
- MSG_OS_HINT_PCO,
- MSG_OS_HINT_RN,
- };
-
-+struct amd_pmc_bit_map {
-+ const char *name;
-+ u32 bit_mask;
-+};
-+
-+static const struct amd_pmc_bit_map soc15_ip_blk[] = {
-+ {"DISPLAY", BIT(0)},
-+ {"CPU", BIT(1)},
-+ {"GFX", BIT(2)},
-+ {"VDD", BIT(3)},
-+ {"ACP", BIT(4)},
-+ {"VCN", BIT(5)},
-+ {"ISP", BIT(6)},
-+ {"NBIO", BIT(7)},
-+ {"DF", BIT(8)},
-+ {"USB0", BIT(9)},
-+ {"USB1", BIT(10)},
-+ {"LAPIC", BIT(11)},
-+ {}
-+};
-+
- struct amd_pmc_dev {
- void __iomem *regbase;
-- void __iomem *smu_base;
-+ void __iomem *smu_virt_addr;
- u32 base_addr;
- u32 cpu_id;
-+ u32 active_ips;
- struct device *dev;
- #if IS_ENABLED(CONFIG_DEBUG_FS)
- struct dentry *dbgfs_dir;
-@@ -73,6 +106,7 @@ struct amd_pmc_dev {
- };
-
- static struct amd_pmc_dev pmc;
-+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret);
-
- static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
- {
-@@ -84,9 +118,50 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
- iowrite32(val, dev->regbase + reg_offset);
- }
-
-+struct smu_metrics {
-+ u32 table_version;
-+ u32 hint_count;
-+ u32 s0i3_cyclecount;
-+ u32 timein_s0i2;
-+ u64 timeentering_s0i3_lastcapture;
-+ u64 timeentering_s0i3_totaltime;
-+ u64 timeto_resume_to_os_lastcapture;
-+ u64 timeto_resume_to_os_totaltime;
-+ u64 timein_s0i3_lastcapture;
-+ u64 timein_s0i3_totaltime;
-+ u64 timein_swdrips_lastcapture;
-+ u64 timein_swdrips_totaltime;
-+ u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX];
-+ u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
-+} __packed;
-+
- #ifdef CONFIG_DEBUG_FS
- static int smu_fw_info_show(struct seq_file *s, void *unused)
- {
-+ struct amd_pmc_dev *dev = s->private;
-+ struct smu_metrics table;
-+ u32 value;
-+ int idx;
-+
-+ if (dev->cpu_id == AMD_CPU_ID_PCO)
-+ return -EINVAL;
-+
-+ memcpy_fromio(&table, dev->smu_virt_addr, sizeof(struct smu_metrics));
-+
-+ seq_puts(s, "\n=== SMU Statistics ===\n");
-+ seq_printf(s, "Table Version: %d\n", table.table_version);
-+ seq_printf(s, "Hint Count: %d\n", table.hint_count);
-+ seq_printf(s, "S0i3 Cycle Count: %d\n", table.s0i3_cyclecount);
-+ seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
-+ seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
-+
-+ seq_puts(s, "\n=== Active time (in us) ===\n");
-+ for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) {
-+ if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
-+ seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
-+ table.timecondition_notmet_lastcapture[idx]);
-+ }
-+
- return 0;
- }
- DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
-@@ -112,6 +187,32 @@ static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
- }
- #endif /* CONFIG_DEBUG_FS */
-
-+static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
-+{
-+ u32 phys_addr_low, phys_addr_hi;
-+ u64 smu_phys_addr;
-+
-+ if (dev->cpu_id == AMD_CPU_ID_PCO)
-+ return -EINVAL;
-+
-+ /* Get Active devices list from SMU */
-+ amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1);
-+
-+ /* Get dram address */
-+ amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1);
-+ amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1);
-+ smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
-+
-+ dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, sizeof(struct smu_metrics));
-+ if (!dev->smu_virt_addr)
-+ return -ENOMEM;
-+
-+ /* Start the logging */
-+ amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0);
-+
-+ return 0;
-+}
-+
- static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
- {
- u32 value;
-@@ -126,10 +227,9 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
- dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
- }
-
--static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
-+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret)
- {
- int rc;
-- u8 msg;
- u32 val;
-
- /* Wait until we get a valid response */
-@@ -148,8 +248,8 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
- amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, set);
-
- /* Write message ID to message ID register */
-- msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO;
- amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg);
-+
- /* Wait until we get a valid response */
- rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
- val, val == AMD_PMC_RESULT_OK, PMC_MSG_DELAY_MIN_US,
-@@ -158,16 +258,40 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
- dev_err(dev->dev, "SMU response timed out\n");
- return rc;
- }
-+
-+ if (ret) {
-+ /* PMFW may take longer time to return back the data */
-+ usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
-+ *data = amd_pmc_reg_read(dev, AMD_PMC_REGISTER_ARGUMENT);
-+ }
-+
- amd_pmc_dump_registers(dev);
- return 0;
- }
-
-+static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
-+{
-+ switch (dev->cpu_id) {
-+ case AMD_CPU_ID_PCO:
-+ return MSG_OS_HINT_PCO;
-+ case AMD_CPU_ID_RN:
-+ return MSG_OS_HINT_RN;
-+ }
-+ return -EINVAL;
-+}
-+
- static int __maybe_unused amd_pmc_suspend(struct device *dev)
- {
- struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
- int rc;
-+ u8 msg;
-+
-+ /* Reset and Start SMU logging - to monitor the s0i3 stats */
-+ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0);
-+ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0);
-
-- rc = amd_pmc_send_cmd(pdev, 1);
-+ msg = amd_pmc_get_os_hint(pdev);
-+ rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0);
- if (rc)
- dev_err(pdev->dev, "suspend failed\n");
-
-@@ -178,8 +302,13 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
- {
- struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
- int rc;
-+ u8 msg;
-+
-+ /* Let SMU know that we are looking for stats */
-+ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
-
-- rc = amd_pmc_send_cmd(pdev, 0);
-+ msg = amd_pmc_get_os_hint(pdev);
-+ rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
- if (rc)
- dev_err(pdev->dev, "resume failed\n");
-
-@@ -202,8 +331,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
- {
- struct amd_pmc_dev *dev = &pmc;
- struct pci_dev *rdev;
-- u32 base_addr_lo;
-- u32 base_addr_hi;
-+ u32 base_addr_lo, base_addr_hi;
- u64 base_addr;
- int err;
- u32 val;
-@@ -254,6 +382,10 @@ static int amd_pmc_probe(struct platform_device *pdev)
- if (!dev->regbase)
- return -ENOMEM;
-
-+ /* Use SMU to get the s0i3 debug stats */
-+ err = amd_pmc_setup_smu_logging(dev);
-+ if (err)
-+ dev_err(dev->dev, "SMU debugging info not supported on this platform\n");
- platform_set_drvdata(pdev, dev);
- amd_pmc_dbgfs_register(dev);
- return 0;
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0031-platform-x86-amd-pmc-Add-support-for-s0ix-counters.patch b/sys-kernel_arch-sources-g14_files-0031-platform-x86-amd-pmc-Add-support-for-s0ix-counters.patch
deleted file mode 100644
index 654b1cb98216..000000000000
--- a/sys-kernel_arch-sources-g14_files-0031-platform-x86-amd-pmc-Add-support-for-s0ix-counters.patch
+++ /dev/null
@@ -1,123 +0,0 @@
-From a09d88cec0a5dc088d7859433f2027aad4c68ff4 Mon Sep 17 00:00:00 2001
-From: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
-Date: Thu, 17 Jun 2021 17:00:38 +0530
-Subject: [PATCH] platform/x86: amd-pmc: Add support for logging s0ix counters
-
-Even the FCH SSC registers provides certain level of information
-about the s0ix entry and exit times which comes handy when the SMU
-fails to report the statistics via the mailbox communication.
-
-This information is captured via a new debugfs file "s0ix_stats".
-A non-zero entry in this counters would mean that the system entered
-the s0ix state.
-
-If s0ix entry time and exit time don't change during suspend to idle,
-the silicon has not entered the deepest state.
-
-Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
----
- drivers/platform/x86/amd-pmc.c | 46 ++++++++++++++++++++++++++++++++--
- 1 file changed, 44 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
-index bb067324644d..174f067f0756 100644
---- a/drivers/platform/x86/amd-pmc.c
-+++ b/drivers/platform/x86/amd-pmc.c
-@@ -46,6 +46,15 @@
- #define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
- #define AMD_PMC_RESULT_FAILED 0xFF
-
-+/* FCH SSC Registers */
-+#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
-+#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
-+#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
-+#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
-+#define FCH_SSC_MAPPING_SIZE 0x800
-+#define FCH_BASE_PHY_ADDR_LOW 0xFED81100
-+#define FCH_BASE_PHY_ADDR_HIGH 0x00000000
-+
- /* SMU Message Definations */
- #define SMU_MSG_GETSMUVERSION 0x02
- #define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
-@@ -96,6 +105,7 @@ static const struct amd_pmc_bit_map soc15_ip_blk[] = {
- struct amd_pmc_dev {
- void __iomem *regbase;
- void __iomem *smu_virt_addr;
-+ void __iomem *fch_virt_addr;
- u32 base_addr;
- u32 cpu_id;
- u32 active_ips;
-@@ -140,7 +150,6 @@ static int smu_fw_info_show(struct seq_file *s, void *unused)
- {
- struct amd_pmc_dev *dev = s->private;
- struct smu_metrics table;
-- u32 value;
- int idx;
-
- if (dev->cpu_id == AMD_CPU_ID_PCO)
-@@ -166,6 +175,29 @@ static int smu_fw_info_show(struct seq_file *s, void *unused)
- }
- DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
-
-+static int s0ix_stats_show(struct seq_file *s, void *unused)
-+{
-+ struct amd_pmc_dev *dev = s->private;
-+ u64 entry_time, exit_time, residency;
-+
-+ entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
-+ entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
-+
-+ exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
-+ exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
-+
-+ /* It's in 48MHz. We need to convert it to unit of 100ns */
-+ residency = (exit_time - entry_time) * 10 / 48;
-+
-+ seq_puts(s, "=== S0ix statistics ===\n");
-+ seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
-+ seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
-+ seq_printf(s, "Residency Time: %lld\n", residency);
-+
-+ return 0;
-+}
-+DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
-+
- static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
- {
- debugfs_remove_recursive(dev->dbgfs_dir);
-@@ -176,6 +208,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
- dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
- debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
- &smu_fw_info_fops);
-+ debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
-+ &s0ix_stats_fops);
- }
- #else
- static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
-@@ -332,7 +366,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
- struct amd_pmc_dev *dev = &pmc;
- struct pci_dev *rdev;
- u32 base_addr_lo, base_addr_hi;
-- u64 base_addr;
-+ u64 base_addr, fch_phys_addr;
- int err;
- u32 val;
-
-@@ -382,6 +416,14 @@ static int amd_pmc_probe(struct platform_device *pdev)
- if (!dev->regbase)
- return -ENOMEM;
-
-+ /* Use FCH registers to get the S0ix stats */
-+ base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
-+ base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
-+ fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
-+ dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
-+ if (!dev->fch_virt_addr)
-+ return -ENOMEM;
-+
- /* Use SMU to get the s0i3 debug stats */
- err = amd_pmc_setup_smu_logging(dev);
- if (err)
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0032-platform-x86-amd-pmc-Add-support-for-ACPI-ID-AMDI0006.patch b/sys-kernel_arch-sources-g14_files-0032-platform-x86-amd-pmc-Add-support-for-ACPI-ID-AMDI0006.patch
deleted file mode 100644
index 65412a2a35cd..000000000000
--- a/sys-kernel_arch-sources-g14_files-0032-platform-x86-amd-pmc-Add-support-for-ACPI-ID-AMDI0006.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From 7f0b4e8abb8921faf082fbcff96600f54de669f7 Mon Sep 17 00:00:00 2001
-From: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
-Date: Thu, 17 Jun 2021 17:00:39 +0530
-Subject: [PATCH] platform/x86: amd-pmc: Add support for ACPI ID AMDI0006
-
-Some newer BIOSes have added another ACPI ID for the uPEP device.
-SMU statistics behave identically on this device.
-
-Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
----
- drivers/platform/x86/amd-pmc.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
-index 174f067f0756..e024fd36bd26 100644
---- a/drivers/platform/x86/amd-pmc.c
-+++ b/drivers/platform/x86/amd-pmc.c
-@@ -443,6 +443,7 @@ static int amd_pmc_remove(struct platform_device *pdev)
-
- static const struct acpi_device_id amd_pmc_acpi_ids[] = {
- {"AMDI0005", 0},
-+ {"AMDI0006", 0},
- {"AMD0004", 0},
- { }
- };
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0033-platform-x86-amd-pmc-Add-new-acpi-for-future-PMC.patch b/sys-kernel_arch-sources-g14_files-0033-platform-x86-amd-pmc-Add-new-acpi-for-future-PMC.patch
deleted file mode 100644
index 0328872e9059..000000000000
--- a/sys-kernel_arch-sources-g14_files-0033-platform-x86-amd-pmc-Add-new-acpi-for-future-PMC.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-From 6230d05e0540b47ae8334c1b3a1a2e8fa57db379 Mon Sep 17 00:00:00 2001
-From: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
-Date: Thu, 17 Jun 2021 17:00:40 +0530
-Subject: [PATCH] platform/x86: amd-pmc: Add new acpi id for future PMC
- controllers
-
-The upcoming PMC controller would have a newer acpi id, add that to
-the supported acpid device list.
-
-Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
----
- drivers/platform/x86/amd-pmc.c | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
-index e024fd36bd26..c26ac561c0d4 100644
---- a/drivers/platform/x86/amd-pmc.c
-+++ b/drivers/platform/x86/amd-pmc.c
-@@ -68,6 +68,7 @@
- #define AMD_CPU_ID_RN 0x1630
- #define AMD_CPU_ID_PCO AMD_CPU_ID_RV
- #define AMD_CPU_ID_CZN AMD_CPU_ID_RN
-+#define AMD_CPU_ID_YC 0x14B5
-
- #define PMC_MSG_DELAY_MIN_US 100
- #define RESPONSE_REGISTER_LOOP_MAX 200
-@@ -309,6 +310,7 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
- case AMD_CPU_ID_PCO:
- return MSG_OS_HINT_PCO;
- case AMD_CPU_ID_RN:
-+ case AMD_CPU_ID_YC:
- return MSG_OS_HINT_RN;
- }
- return -EINVAL;
-@@ -354,6 +356,7 @@ static const struct dev_pm_ops amd_pmc_pm_ops = {
- };
-
- static const struct pci_device_id pmc_pci_ids[] = {
-+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
-@@ -444,6 +447,7 @@ static int amd_pmc_remove(struct platform_device *pdev)
- static const struct acpi_device_id amd_pmc_acpi_ids[] = {
- {"AMDI0005", 0},
- {"AMDI0006", 0},
-+ {"AMDI0007", 0},
- {"AMD0004", 0},
- { }
- };
---
-GitLab
-
diff --git a/sys-kernel_arch-sources-g14_files-0034-btusb-mediatek.patch b/sys-kernel_arch-sources-g14_files-0034-btusb-mediatek.patch
index e06344e3e625..e107052b62c0 100644
--- a/sys-kernel_arch-sources-g14_files-0034-btusb-mediatek.patch
+++ b/sys-kernel_arch-sources-g14_files-0034-btusb-mediatek.patch
@@ -69,3 +69,77 @@ index 99fd88f7653d4f..cb18d63a948d89 100644
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK },
+
+
+From 0ea78370d7cad1efe7e014d7b7433073153c1ada Mon Sep 17 00:00:00 2001
+From: Wai Paulo Valerio Wang <waicool20@gmail.com>
+Date: Thu, 8 Jul 2021 03:28:56 +0800
+Subject: [PATCH] Bluetooth: btusb: Add support for IMC Networks Mediatek Chip
+
+This add supports for IMC Networks Wireless_Device Media Chip
+which contains the MT7921 chipset.
+
+Bus 001 Device 004: ID 13d3:3563 IMC Networks Wireless_Device
+
+T: Bus=01 Lev=01 Prnt=01 Port=03 Cnt=03 Dev#= 4 Spd=480 MxCh= 0
+D: Ver= 2.10 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 1
+P: Vendor=13d3 ProdID=3563 Rev= 1.00
+S: Manufacturer=MediaTek Inc.
+S: Product=Wireless_Device
+S: SerialNumber=000000000
+C:* #Ifs= 3 Cfg#= 1 Atr=e0 MxPwr=100mA
+A: FirstIf#= 0 IfCount= 3 Cls=e0(wlcon) Sub=01 Prot=01
+I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=125us
+E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms
+I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms
+I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms
+I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms
+I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms
+I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms
+I: If#= 1 Alt= 6 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 63 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 63 Ivl=1ms
+I:* If#= 2 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=(none)
+E: Ad=8a(I) Atr=03(Int.) MxPS= 64 Ivl=125us
+E: Ad=0a(O) Atr=03(Int.) MxPS= 64 Ivl=125us
+I: If#= 2 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=(none)
+E: Ad=8a(I) Atr=03(Int.) MxPS= 512 Ivl=125us
+E: Ad=0a(O) Atr=03(Int.) MxPS= 512 Ivl=125us
+
+Signed-off-by: Wai Paulo Valerio Wang <waicool20@gmail.com>
+---
+ drivers/bluetooth/btusb.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index a9855a2dd561..edfa5a31a529 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -414,6 +414,9 @@ static const struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
++ { USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH |
++ BTUSB_VALID_LE_STATES },
+
+ /* Additional Realtek 8723AE Bluetooth devices */
+ { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
+--
+2.32.0
+
diff --git a/sys-kernel_arch-sources-g14_files-0035-backport-from-5.14-s0ix-enablement-no-d3hot-2021-06-30.patch b/sys-kernel_arch-sources-g14_files-0035-backport-from-5.14-s0ix-enablement-no-d3hot-2021-06-30.patch
new file mode 100644
index 000000000000..882d4af32dd4
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-0035-backport-from-5.14-s0ix-enablement-no-d3hot-2021-06-30.patch
@@ -0,0 +1,1308 @@
+From 42b2e2ffb3a9e421050893c8a3786210fb541ae1 Mon Sep 17 00:00:00 2001
+From: Scott B <28817345+foundObjects@users.noreply.github.com>
+Date: Thu, 8 Jul 2021 03:24:48 -0700
+Subject: [PATCH] Squashed commit of the following:
+
+all mainline s0ix support up to 2021-06-30:
+ 11 s0ix patches, including EC GPE patch
+ 7 amd_pmc v5 patchset diagnostics
+
+Does not include AMD XHCI D3hot Quirk
+
+commit 2b513555a462fc9233355d44789833a9be300a2f
+Merge: ebf1d8bcad36 b072ea5d76db
+Author: Scott B <28817345+foundObjects@users.noreply.github.com>
+Date: Thu Jul 8 03:16:14 2021 -0700
+
+ Merge branch 'patchwork-amd-pmc-logging-v5' into TEMP-s0ix-on-5.13
+
+commit ebf1d8bcad366f451d54bda9e15943f173003551
+Merge: 62fb9874f5da 35f01a4422b1
+Author: Scott B <28817345+foundObjects@users.noreply.github.com>
+Date: Thu Jul 8 03:15:32 2021 -0700
+
+ Merge branch 'DO-NOT-USE-backport-from-5.14-s0ix-no-d3hot' into HEAD
+
+commit 35f01a4422b180806c6106b5696ab9626b5dac41
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed Jun 30 14:46:06 2021 -0500
+
+ ACPI: PM: Only mark EC GPE for wakeup on Intel systems
+
+ When using s2idle on a variety of AMD notebook systems, they are
+ experiencing spurious events that the EC or SMU are in the wrong
+ state leading to a hard time waking up or higher than expected
+ power consumption.
+
+ These events only occur when the EC GPE is inadvertently set as a wakeup
+ source. Originally the EC GPE was only set as a wakeup source when using
+ the intel-vbtn or intel-hid drivers in commit 10a08fd65ec1 ("ACPI: PM:
+ Set up EC GPE for system wakeup from drivers that need it") but during
+ testing a reporter discovered that this was not enough for their ASUS
+ Zenbook UX430UNR/i7-8550U to wakeup by lid event or keypress.
+ Marking the EC GPE for wakeup universally resolved this for that
+ reporter in commit b90ff3554aa3 ("ACPI: PM: s2idle: Always set up EC GPE
+ for system wakeup").
+
+ However this behavior has lead to a number of problems:
+
+ * On both Lenovo T14 and P14s the keyboard wakeup doesn't work, and
+ sometimes the power button event doesn't work.
+ * On HP 635 G7 detaching or attaching AC during suspend will cause
+ the system not to wakeup
+ * On Asus vivobook to prevent detaching AC causing resume problems
+ * On Lenovo 14ARE05 to prevent detaching AC causing resume problems
+ * On HP ENVY x360 to prevent detaching AC causing resume problems
+
+ As there may be other Intel systems besides ASUS Zenbook UX430UNR/i7-8550U
+ that don't use intel-vbtn or intel-hid avoid these problems by only
+ universally marking the EC GPE wakesource on non-AMD systems.
+
+ Link: https://patchwork.kernel.org/project/linux-pm/cover/5997740.FPbUVk04hV@kreacher/#22825489
+ Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1230
+ Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1629
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Acked-by: Alex Deucher <alexander.deucher@amd.com>
+
+commit 5032c30b7d44b5b9d68f7a8d3bbcb6238484b2a6
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Thu Jun 17 11:42:12 2021 -0500
+
+ ACPI: PM: Adjust behavior for field problems on AMD systems
+
+ Some AMD Systems with uPEP _HID AMD004/AMDI005 have an off by one bug
+ in their function mask return. This means that they will call entrance
+ but not exit for matching functions.
+
+ Other AMD systems with this HID should use the Microsoft generic UUID.
+
+ AMD systems with uPEP HID AMDI006 should be using the Microsoft method.
+
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 6ad8efa21132a0c53372a615c91f5f24b6e2ed63
+Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Date: Thu Jun 17 11:42:11 2021 -0500
+
+ ACPI: PM: s2idle: Add support for new Microsoft UUID
+
+ This adds supports for _DSM notifications to the Microsoft UUID
+ described by Microsoft documentation for s2idle.
+
+ Link: https://docs.microsoft.com/en-us/windows-hardware/design/device-experiences/modern-standby-firmware-notifications
+ Co-developed-by: Mario Limonciello <mario.limonciello@amd.com>
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+
+commit 1c22e7dec26ea42a98b0c87d97a150a415978ec1
+Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Date: Thu Jun 17 11:42:10 2021 -0500
+
+ ACPI: PM: s2idle: Add support for multiple func mask
+
+ Required for follow-up patch adding new UUID
+ needing new function mask.
+
+ Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+
+commit aa430a0319ce9ac4629735ac6a5b86bd48517dff
+Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Date: Thu Jun 17 11:42:09 2021 -0500
+
+ ACPI: PM: s2idle: Refactor common code
+
+ Refactor common code to prepare for upcoming changes.
+ * Remove unused struct.
+ * Print error before returning.
+ * Frees ACPI obj if _DSM type is not as expected.
+ * Treat lps0_dsm_func_mask as an integer rather than character
+ * Remove extra out_obj
+ * Move rev_id
+
+ Co-developed-by: Mario Limonciello <mario.limonciello@amd.com>
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+
+commit a71e52e9ea22c0b976014a6f7eb2ed93f65aa4ca
+Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Date: Thu Jun 17 11:42:08 2021 -0500
+
+ ACPI: PM: s2idle: Use correct revision id
+
+ AMD spec mentions only revision 0. With this change,
+ device constraint list is populated properly.
+
+ Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+
+commit e5b15e24f169d15b231581cccb2bfa9820225cff
+Author: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed May 5 09:20:32 2021 -0400
+
+ ACPI: PM: s2idle: Add missing LPS0 functions for AMD
+
+ These are supposedly not required for AMD platforms,
+ but at least some HP laptops seem to require it to
+ properly turn off the keyboard backlight.
+
+ Based on a patch from Marcin Bachry <hegel666@gmail.com>.
+
+ Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1230
+ Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+ Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+ Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 526bf2ffedece1257d3742a2ad633e00bb1778a7
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed Jun 9 13:40:18 2021 -0500
+
+ ACPI: Add quirks for AMD Renoir/Lucienne CPUs to force the D3 hint
+
+ AMD systems from Renoir and Lucienne require that the NVME controller
+ is put into D3 over a Modern Standby / suspend-to-idle
+ cycle. This is "typically" accomplished using the `StorageD3Enable`
+ property in the _DSD, but this property was introduced after many
+ of these systems launched and most OEM systems don't have it in
+ their BIOS.
+
+ On AMD Renoir without these drives going into D3 over suspend-to-idle
+ the resume will fail with the NVME controller being reset and a trace
+ like this in the kernel logs:
+ ```
+ [ 83.556118] nvme nvme0: I/O 161 QID 2 timeout, aborting
+ [ 83.556178] nvme nvme0: I/O 162 QID 2 timeout, aborting
+ [ 83.556187] nvme nvme0: I/O 163 QID 2 timeout, aborting
+ [ 83.556196] nvme nvme0: I/O 164 QID 2 timeout, aborting
+ [ 95.332114] nvme nvme0: I/O 25 QID 0 timeout, reset controller
+ [ 95.332843] nvme nvme0: Abort status: 0x371
+ [ 95.332852] nvme nvme0: Abort status: 0x371
+ [ 95.332856] nvme nvme0: Abort status: 0x371
+ [ 95.332859] nvme nvme0: Abort status: 0x371
+ [ 95.332909] PM: dpm_run_callback(): pci_pm_resume+0x0/0xe0 returns -16
+ [ 95.332936] nvme 0000:03:00.0: PM: failed to resume async: error -16
+ ```
+
+ The Microsoft documentation for StorageD3Enable mentioned that Windows has
+ a hardcoded allowlist for D3 support, which was used for these platforms.
+ Introduce quirks to hardcode them for Linux as well.
+
+ As this property is now "standardized", OEM systems using AMD Cezanne and
+ newer APU's have adopted this property, and quirks like this should not be
+ necessary.
+
+ CC: Shyam-sundar S-k <Shyam-sundar.S-k@amd.com>
+ CC: Alexander Deucher <Alexander.Deucher@amd.com>
+ CC: Prike Liang <prike.liang@amd.com>
+ Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ Tested-by: Julian Sikorski <belegdol@gmail.com>
+ Signed-off-by: Christoph Hellwig <hch@lst.de>
+
+commit 8b763d23606ce256a5ec9db81d8177ad5e90bbf9
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed Jun 9 13:40:17 2021 -0500
+
+ ACPI: Check StorageD3Enable _DSD property in ACPI code
+
+ Although first implemented for NVME, this check may be usable by
+ other drivers as well. Microsoft's specification explicitly mentions
+ that is may be usable by SATA and AHCI devices. Google also indicates
+ that they have used this with SDHCI in a downstream kernel tree that
+ a user can plug a storage device into.
+
+ Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro
+ Suggested-by: Keith Busch <kbusch@kernel.org>
+ CC: Shyam-sundar S-k <Shyam-sundar.S-k@amd.com>
+ CC: Alexander Deucher <Alexander.Deucher@amd.com>
+ CC: Rafael J. Wysocki <rjw@rjwysocki.net>
+ CC: Prike Liang <prike.liang@amd.com>
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ Signed-off-by: Christoph Hellwig <hch@lst.de>
+
+commit b12b48eb29c8a392f4093898877a02f271b93355
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Fri May 28 11:02:34 2021 -0500
+
+ nvme-pci: look for StorageD3Enable on companion ACPI device instead
+
+ The documentation around the StorageD3Enable property hints that it
+ should be made on the PCI device. This is where newer AMD systems set
+ the property and it's required for S0i3 support.
+
+ So rather than look for nodes of the root port only present on Intel
+ systems, switch to the companion ACPI device for all systems.
+ David Box from Intel indicated this should work on Intel as well.
+
+ Link: https://lore.kernel.org/linux-nvme/YK6gmAWqaRmvpJXb@google.com/T/#m900552229fa455867ee29c33b854845fce80ba70
+ Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro
+ Fixes: df4f9bc4fb9c ("nvme-pci: add support for ACPI StorageD3Enable property")
+ Suggested-by: Liang Prike <Prike.Liang@amd.com>
+ Acked-by: Raul E Rangel <rrangel@chromium.org>
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Reviewed-by: David E. Box <david.e.box@linux.intel.com>
+ Signed-off-by: Christoph Hellwig <hch@lst.de>
+
+commit e54f80e62f9d3412a7add7f3753c7c2604cbfe78
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed May 12 17:15:14 2021 -0500
+
+ ACPI: processor idle: Fix up C-state latency if not ordered
+
+ Generally, the C-state latency is provided by the _CST method or
+ FADT, but some OEM platforms using AMD Picasso, Renoir, Van Gogh,
+ and Cezanne set the C2 latency greater than C3's which causes the
+ C2 state to be skipped.
+
+ That will block the core entering PC6, which prevents S0ix working
+ properly on Linux systems.
+
+ In other operating systems, the latency values are not validated and
+ this does not cause problems by skipping states.
+
+ To avoid this issue on Linux, detect when latencies are not an
+ arithmetic progression and sort them.
+
+ Link: https://gitlab.freedesktop.org/agd5f/linux/-/commit/026d186e4592c1ee9c1cb44295912d0294508725
+ Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1230#note_712174
+ Suggested-by: Prike Liang <Prike.Liang@amd.com>
+ Suggested-by: Alex Deucher <alexander.deucher@amd.com>
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ [ rjw: Subject and changelog edits ]
+ Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit b072ea5d76db90c1316aa04a15f77744ac1bafa1
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:18:03 2021 +0530
+
+ platform/x86: amd-pmc: Add new acpi id for future PMC controllers
+
+ The upcoming PMC controller would have a newer acpi id, add that to
+ the supported acpid device list.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+
+commit 4f025bcec084eb0624b3a34a03b0ff00a14a2fa0
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:18:02 2021 +0530
+
+ platform/x86: amd-pmc: Add support for ACPI ID AMDI0006
+
+ Some newer BIOSes have added another ACPI ID for the uPEP device.
+ SMU statistics behave identically on this device.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+
+commit a3ccb3605aac3f650732a55adea7e666d9df24c5
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:18:01 2021 +0530
+
+ amd-pmc: Add support for logging s0ix counters
+
+ Even the FCH SSC registers provides certain level of information
+ about the s0ix entry and exit times which comes handy when the SMU
+ fails to report the statistics via the mailbox communication.
+
+ This information is captured via a new debugfs file "s0ix_stats".
+ A non-zero entry in this counters would mean that the system entered
+ the s0ix state.
+
+ If s0ix entry time and exit time don't change during suspend to idle,
+ the silicon has not entered the deepest state.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+
+commit 826b61ec6a7785614435fcdeb951290981eff6f1
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:18:00 2021 +0530
+
+ platform/x86: amd-pmc: Add support for logging SMU metrics
+
+ SMU provides a way to dump the s0ix debug statistics in the form of a
+ metrics table via a of set special mailbox commands.
+
+ Add support to the driver which can send these commands to SMU and expose
+ the information received via debugfs. The information contains the s0ix
+ entry/exit, active time of each IP block etc.
+
+ As a side note, SMU subsystem logging is not supported on Picasso based
+ SoC's.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+
+commit dd120f506eea9bee584cf0f7d737631554ba17a3
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:17:59 2021 +0530
+
+ platform/x86: amd-pmc: call dump registers only once
+
+ Currently amd_pmc_dump_registers() routine is being called at
+ multiple places. The best to call it is after command submission
+ to SMU.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+
+commit 87390ff9f263e0aa586d9a33198e79b47e5f85d1
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:17:58 2021 +0530
+
+ platform/x86: amd-pmc: Fix SMU firmware reporting mechanism
+
+ It was lately understood that the current mechanism available in the
+ driver to get SMU firmware info works only on internal SMU builds and
+ there is a separate way to get all the SMU logging counters (addressed
+ in the next patch). Hence remove all the smu info shown via debugfs as it
+ is no more useful.
+
+ Fixes: 156ec4731cb2 ("platform/x86: amd-pmc: Add AMD platform support for S2Idle")
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+
+commit 65581db92e5e4d5fa941cc6bb91e702e3ed97b74
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:17:57 2021 +0530
+
+ platform/x86: amd-pmc: Fix command completion code
+
+ The protocol to submit a job request to SMU is to wait for
+ AMD_PMC_REGISTER_RESPONSE to return 1,meaning SMU is ready to take
+ requests. PMC driver has to make sure that the response code is always
+ AMD_PMC_RESULT_OK before making any command submissions.
+
+ When we submit a message to SMU, we have to wait until it processes
+ the request. Adding a read_poll_timeout() check as this was missing in
+ the existing code.
+
+ Also, add a mutex to protect amd_pmc_send_cmd() calls to SMU.
+
+ Fixes: 156ec4731cb2 ("platform/x86: amd-pmc: Add AMD platform support for S2Idle")
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Acked-by: Raul E Rangel <rrangel@chromium.org>
+---
+ drivers/acpi/device_pm.c | 32 +++++
+ drivers/acpi/internal.h | 9 ++
+ drivers/acpi/processor_idle.c | 40 ++++++
+ drivers/acpi/x86/s2idle.c | 157 ++++++++++++++-------
+ drivers/acpi/x86/utils.c | 25 ++++
+ drivers/nvme/host/pci.c | 50 +------
+ drivers/platform/x86/amd-pmc.c | 244 ++++++++++++++++++++++++++++++---
+ include/linux/acpi.h | 5 +
+ 8 files changed, 443 insertions(+), 119 deletions(-)
+
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index d260bc1f3e6e..6dd9bd64903e 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -1340,4 +1340,36 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
+ return 1;
+ }
+ EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
++
++/**
++ * acpi_storage_d3 - Check if D3 should be used in the suspend path
++ * @dev: Device to check
++ *
++ * Return %true if the platform firmware wants @dev to be programmed
++ * into D3hot or D3cold (if supported) in the suspend path, or %false
++ * when there is no specific preference. On some platforms, if this
++ * hint is ignored, @dev may remain unresponsive after suspending the
++ * platform as a whole.
++ *
++ * Although the property has storage in the name it actually is
++ * applied to the PCIe slot and plugging in a non-storage device the
++ * same platform restrictions will likely apply.
++ */
++bool acpi_storage_d3(struct device *dev)
++{
++ struct acpi_device *adev = ACPI_COMPANION(dev);
++ u8 val;
++
++ if (force_storage_d3())
++ return true;
++
++ if (!adev)
++ return false;
++ if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
++ &val))
++ return false;
++ return val == 1;
++}
++EXPORT_SYMBOL_GPL(acpi_storage_d3);
++
+ #endif /* CONFIG_PM */
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index e21611c9a170..7ac01b03ba67 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -236,6 +236,15 @@ static inline int suspend_nvs_save(void) { return 0; }
+ static inline void suspend_nvs_restore(void) {}
+ #endif
+
++#ifdef CONFIG_X86
++bool force_storage_d3(void);
++#else
++static inline bool force_storage_d3(void)
++{
++ return false;
++}
++#endif
++
+ /*--------------------------------------------------------------------------
+ Device properties
+ -------------------------------------------------------------------------- */
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 45a019619e4a..095c8aca141e 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -16,6 +16,7 @@
+ #include <linux/acpi.h>
+ #include <linux/dmi.h>
+ #include <linux/sched.h> /* need_resched() */
++#include <linux/sort.h>
+ #include <linux/tick.h>
+ #include <linux/cpuidle.h>
+ #include <linux/cpu.h>
+@@ -384,10 +385,37 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
+ return;
+ }
+
++static int acpi_cst_latency_cmp(const void *a, const void *b)
++{
++ const struct acpi_processor_cx *x = a, *y = b;
++
++ if (!(x->valid && y->valid))
++ return 0;
++ if (x->latency > y->latency)
++ return 1;
++ if (x->latency < y->latency)
++ return -1;
++ return 0;
++}
++static void acpi_cst_latency_swap(void *a, void *b, int n)
++{
++ struct acpi_processor_cx *x = a, *y = b;
++ u32 tmp;
++
++ if (!(x->valid && y->valid))
++ return;
++ tmp = x->latency;
++ x->latency = y->latency;
++ y->latency = tmp;
++}
++
+ static int acpi_processor_power_verify(struct acpi_processor *pr)
+ {
+ unsigned int i;
+ unsigned int working = 0;
++ unsigned int last_latency = 0;
++ unsigned int last_type = 0;
++ bool buggy_latency = false;
+
+ pr->power.timer_broadcast_on_state = INT_MAX;
+
+@@ -411,12 +439,24 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
+ }
+ if (!cx->valid)
+ continue;
++ if (cx->type >= last_type && cx->latency < last_latency)
++ buggy_latency = true;
++ last_latency = cx->latency;
++ last_type = cx->type;
+
+ lapic_timer_check_state(i, pr, cx);
+ tsc_check_state(cx->type);
+ working++;
+ }
+
++ if (buggy_latency) {
++ pr_notice("FW issue: working around C-state latencies out of order\n");
++ sort(&pr->power.states[1], max_cstate,
++ sizeof(struct acpi_processor_cx),
++ acpi_cst_latency_cmp,
++ acpi_cst_latency_swap);
++ }
++
+ lapic_timer_propagate_broadcast(pr);
+
+ return (working);
+diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
+index 2b69536cdccb..1c507804fb10 100644
+--- a/drivers/acpi/x86/s2idle.c
++++ b/drivers/acpi/x86/s2idle.c
+@@ -32,6 +32,9 @@ static const struct acpi_device_id lps0_device_ids[] = {
+ {"", },
+ };
+
++/* Microsoft platform agnostic UUID */
++#define ACPI_LPS0_DSM_UUID_MICROSOFT "11e00d56-ce64-47ce-837b-1f898f9aa461"
++
+ #define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
+
+ #define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
+@@ -39,15 +42,22 @@ static const struct acpi_device_id lps0_device_ids[] = {
+ #define ACPI_LPS0_SCREEN_ON 4
+ #define ACPI_LPS0_ENTRY 5
+ #define ACPI_LPS0_EXIT 6
++#define ACPI_LPS0_MS_ENTRY 7
++#define ACPI_LPS0_MS_EXIT 8
+
+ /* AMD */
+ #define ACPI_LPS0_DSM_UUID_AMD "e3f32452-febc-43ce-9039-932122d37721"
++#define ACPI_LPS0_ENTRY_AMD 2
++#define ACPI_LPS0_EXIT_AMD 3
+ #define ACPI_LPS0_SCREEN_OFF_AMD 4
+ #define ACPI_LPS0_SCREEN_ON_AMD 5
+
+ static acpi_handle lps0_device_handle;
+ static guid_t lps0_dsm_guid;
+-static char lps0_dsm_func_mask;
++static int lps0_dsm_func_mask;
++
++static guid_t lps0_dsm_guid_microsoft;
++static int lps0_dsm_func_mask_microsoft;
+
+ /* Device constraint entry structure */
+ struct lpi_device_info {
+@@ -68,15 +78,7 @@ struct lpi_constraints {
+ int min_dstate;
+ };
+
+-/* AMD */
+-/* Device constraint entry structure */
+-struct lpi_device_info_amd {
+- int revision;
+- int count;
+- union acpi_object *package;
+-};
+-
+-/* Constraint package structure */
++/* AMD Constraint package structure */
+ struct lpi_device_constraint_amd {
+ char *name;
+ int enabled;
+@@ -94,15 +96,15 @@ static void lpi_device_get_constraints_amd(void)
+ int i, j, k;
+
+ out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
+- 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
++ rev_id, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
+ NULL, ACPI_TYPE_PACKAGE);
+
+- if (!out_obj)
+- return;
+-
+ acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
+ out_obj ? "successful" : "failed");
+
++ if (!out_obj)
++ return;
++
+ for (i = 0; i < out_obj->package.count; i++) {
+ union acpi_object *package = &out_obj->package.elements[i];
+
+@@ -315,14 +317,15 @@ static void lpi_check_constraints(void)
+ }
+ }
+
+-static void acpi_sleep_run_lps0_dsm(unsigned int func)
++static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, guid_t dsm_guid)
+ {
+ union acpi_object *out_obj;
+
+- if (!(lps0_dsm_func_mask & (1 << func)))
++ if (!(func_mask & (1 << func)))
+ return;
+
+- out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, rev_id, func, NULL);
++ out_obj = acpi_evaluate_dsm(lps0_device_handle, &dsm_guid,
++ rev_id, func, NULL);
+ ACPI_FREE(out_obj);
+
+ acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
+@@ -334,11 +337,33 @@ static bool acpi_s2idle_vendor_amd(void)
+ return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
+ }
+
++static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *dsm_guid)
++{
++ union acpi_object *obj;
++ int ret = -EINVAL;
++
++ guid_parse(uuid, dsm_guid);
++ obj = acpi_evaluate_dsm(handle, dsm_guid, rev, 0, NULL);
++
++ /* Check if the _DSM is present and as expected. */
++ if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length == 0 ||
++ obj->buffer.length > sizeof(u32)) {
++ acpi_handle_debug(handle,
++ "_DSM UUID %s rev %d function 0 evaluation failed\n", uuid, rev);
++ goto out;
++ }
++
++ ret = *(int *)obj->buffer.pointer;
++ acpi_handle_debug(handle, "_DSM UUID %s rev %d function mask: 0x%x\n", uuid, rev, ret);
++
++out:
++ ACPI_FREE(obj);
++ return ret;
++}
++
+ static int lps0_device_attach(struct acpi_device *adev,
+ const struct acpi_device_id *not_used)
+ {
+- union acpi_object *out_obj;
+-
+ if (lps0_device_handle)
+ return 0;
+
+@@ -346,28 +371,36 @@ static int lps0_device_attach(struct acpi_device *adev,
+ return 0;
+
+ if (acpi_s2idle_vendor_amd()) {
+- guid_parse(ACPI_LPS0_DSM_UUID_AMD, &lps0_dsm_guid);
+- out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 0, 0, NULL);
++ /* AMD0004, AMDI0005:
++ * - Should use rev_id 0x0
++ * - function mask > 0x3: Should use AMD method, but has off by one bug
++ * - function mask = 0x3: Should use Microsoft method
++ * AMDI0006:
++ * - should use rev_id 0x0
++ * - function mask = 0x3: Should use Microsoft method
++ */
++ const char *hid = acpi_device_hid(adev);
+ rev_id = 0;
++ lps0_dsm_func_mask = validate_dsm(adev->handle,
++ ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
++ lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
++ ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
++ &lps0_dsm_guid_microsoft);
++ if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
++ !strcmp(hid, "AMDI0005"))) {
++ lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
++ acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
++ ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
++ }
+ } else {
+- guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
+- out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
+ rev_id = 1;
++ lps0_dsm_func_mask = validate_dsm(adev->handle,
++ ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
++ lps0_dsm_func_mask_microsoft = -EINVAL;
+ }
+
+- /* Check if the _DSM is present and as expected. */
+- if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
+- acpi_handle_debug(adev->handle,
+- "_DSM function 0 evaluation failed\n");
+- return 0;
+- }
+-
+- lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
+-
+- ACPI_FREE(out_obj);
+-
+- acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
+- lps0_dsm_func_mask);
++ if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
++ return 0; //function evaluation failed
+
+ lps0_device_handle = adev->handle;
+
+@@ -384,11 +417,15 @@ static int lps0_device_attach(struct acpi_device *adev,
+ mem_sleep_current = PM_SUSPEND_TO_IDLE;
+
+ /*
+- * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
+- * EC GPE to be enabled while suspended for certain wakeup devices to
+- * work, so mark it as wakeup-capable.
++ * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
++ * use intel-hid or intel-vbtn but require the EC GPE to be enabled while
++ * suspended for certain wakeup devices to work, so mark it as wakeup-capable.
++ *
++ * Only enable on !AMD as enabling this universally causes problems for a number
++ * of AMD based systems.
+ */
+- acpi_ec_mark_gpe_for_wake();
++ if (!acpi_s2idle_vendor_amd())
++ acpi_ec_mark_gpe_for_wake();
+
+ return 0;
+ }
+@@ -406,11 +443,23 @@ int acpi_s2idle_prepare_late(void)
+ if (pm_debug_messages_on)
+ lpi_check_constraints();
+
+- if (acpi_s2idle_vendor_amd()) {
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
++ if (lps0_dsm_func_mask_microsoft > 0) {
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ } else if (acpi_s2idle_vendor_amd()) {
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD,
++ lps0_dsm_func_mask, lps0_dsm_guid);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
++ lps0_dsm_func_mask, lps0_dsm_guid);
+ } else {
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
++ lps0_dsm_func_mask, lps0_dsm_guid);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
++ lps0_dsm_func_mask, lps0_dsm_guid);
+ }
+
+ return 0;
+@@ -421,11 +470,23 @@ void acpi_s2idle_restore_early(void)
+ if (!lps0_device_handle || sleep_no_lps0)
+ return;
+
+- if (acpi_s2idle_vendor_amd()) {
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
++ if (lps0_dsm_func_mask_microsoft > 0) {
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ } else if (acpi_s2idle_vendor_amd()) {
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD,
++ lps0_dsm_func_mask, lps0_dsm_guid);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD,
++ lps0_dsm_func_mask, lps0_dsm_guid);
+ } else {
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
++ lps0_dsm_func_mask, lps0_dsm_guid);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
++ lps0_dsm_func_mask, lps0_dsm_guid);
+ }
+ }
+
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index bdc1ba00aee9..f22f23933063 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -135,3 +135,28 @@ bool acpi_device_always_present(struct acpi_device *adev)
+
+ return ret;
+ }
++
++/*
++ * AMD systems from Renoir and Lucienne *require* that the NVME controller
++ * is put into D3 over a Modern Standby / suspend-to-idle cycle.
++ *
++ * This is "typically" accomplished using the `StorageD3Enable`
++ * property in the _DSD that is checked via the `acpi_storage_d3` function
++ * but this property was introduced after many of these systems launched
++ * and most OEM systems don't have it in their BIOS.
++ *
++ * The Microsoft documentation for StorageD3Enable mentioned that Windows has
++ * a hardcoded allowlist for D3 support, which was used for these platforms.
++ *
++ * This allows quirking on Linux in a similar fashion.
++ */
++static const struct x86_cpu_id storage_d3_cpu_ids[] = {
++ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
++ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
++ {}
++};
++
++bool force_storage_d3(void)
++{
++ return x86_match_cpu(storage_d3_cpu_ids);
++}
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index a29b170701fc..8fbc4c87a0d8 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2828,54 +2828,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ return 0;
+ }
+
+-#ifdef CONFIG_ACPI
+-static bool nvme_acpi_storage_d3(struct pci_dev *dev)
+-{
+- struct acpi_device *adev;
+- struct pci_dev *root;
+- acpi_handle handle;
+- acpi_status status;
+- u8 val;
+-
+- /*
+- * Look for _DSD property specifying that the storage device on the port
+- * must use D3 to support deep platform power savings during
+- * suspend-to-idle.
+- */
+- root = pcie_find_root_port(dev);
+- if (!root)
+- return false;
+-
+- adev = ACPI_COMPANION(&root->dev);
+- if (!adev)
+- return false;
+-
+- /*
+- * The property is defined in the PXSX device for South complex ports
+- * and in the PEGP device for North complex ports.
+- */
+- status = acpi_get_handle(adev->handle, "PXSX", &handle);
+- if (ACPI_FAILURE(status)) {
+- status = acpi_get_handle(adev->handle, "PEGP", &handle);
+- if (ACPI_FAILURE(status))
+- return false;
+- }
+-
+- if (acpi_bus_get_device(handle, &adev))
+- return false;
+-
+- if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
+- &val))
+- return false;
+- return val == 1;
+-}
+-#else
+-static inline bool nvme_acpi_storage_d3(struct pci_dev *dev)
+-{
+- return false;
+-}
+-#endif /* CONFIG_ACPI */
+-
+ static void nvme_async_probe(void *data, async_cookie_t cookie)
+ {
+ struct nvme_dev *dev = data;
+@@ -2925,7 +2877,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ quirks |= check_vendor_combination_bug(pdev);
+
+- if (!noacpi && nvme_acpi_storage_d3(pdev)) {
++ if (!noacpi && acpi_storage_d3(&pdev->dev)) {
+ /*
+ * Some systems use a bios work around to ask for D3 on
+ * platforms that support kernel managed suspend.
+diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
+index b9da58ee9b1e..d2f9a62e1166 100644
+--- a/drivers/platform/x86/amd-pmc.c
++++ b/drivers/platform/x86/amd-pmc.c
+@@ -46,34 +46,79 @@
+ #define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
+ #define AMD_PMC_RESULT_FAILED 0xFF
+
++/* FCH SSC Registers */
++#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
++#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
++#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
++#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
++#define FCH_SSC_MAPPING_SIZE 0x800
++#define FCH_BASE_PHY_ADDR_LOW 0xFED81100
++#define FCH_BASE_PHY_ADDR_HIGH 0x00000000
++
++/* SMU Message Definations */
++#define SMU_MSG_GETSMUVERSION 0x02
++#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
++#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
++#define SMU_MSG_LOG_START 0x06
++#define SMU_MSG_LOG_RESET 0x07
++#define SMU_MSG_LOG_DUMP_DATA 0x08
++#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
+ /* List of supported CPU ids */
+ #define AMD_CPU_ID_RV 0x15D0
+ #define AMD_CPU_ID_RN 0x1630
+ #define AMD_CPU_ID_PCO AMD_CPU_ID_RV
+ #define AMD_CPU_ID_CZN AMD_CPU_ID_RN
++#define AMD_CPU_ID_YC 0x14B5
+
+-#define AMD_SMU_FW_VERSION 0x0
+ #define PMC_MSG_DELAY_MIN_US 100
+ #define RESPONSE_REGISTER_LOOP_MAX 200
+
++#define SOC_SUBSYSTEM_IP_MAX 12
++#define DELAY_MIN_US 2000
++#define DELAY_MAX_US 3000
+ enum amd_pmc_def {
+ MSG_TEST = 0x01,
+ MSG_OS_HINT_PCO,
+ MSG_OS_HINT_RN,
+ };
+
++struct amd_pmc_bit_map {
++ const char *name;
++ u32 bit_mask;
++};
++
++static const struct amd_pmc_bit_map soc15_ip_blk[] = {
++ {"DISPLAY", BIT(0)},
++ {"CPU", BIT(1)},
++ {"GFX", BIT(2)},
++ {"VDD", BIT(3)},
++ {"ACP", BIT(4)},
++ {"VCN", BIT(5)},
++ {"ISP", BIT(6)},
++ {"NBIO", BIT(7)},
++ {"DF", BIT(8)},
++ {"USB0", BIT(9)},
++ {"USB1", BIT(10)},
++ {"LAPIC", BIT(11)},
++ {}
++};
++
+ struct amd_pmc_dev {
+ void __iomem *regbase;
+- void __iomem *smu_base;
++ void __iomem *smu_virt_addr;
++ void __iomem *fch_virt_addr;
+ u32 base_addr;
+ u32 cpu_id;
++ u32 active_ips;
+ struct device *dev;
++ struct mutex lock; /* generic mutex lock */
+ #if IS_ENABLED(CONFIG_DEBUG_FS)
+ struct dentry *dbgfs_dir;
+ #endif /* CONFIG_DEBUG_FS */
+ };
+
+ static struct amd_pmc_dev pmc;
++static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret);
+
+ static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
+ {
+@@ -85,18 +130,76 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
+ iowrite32(val, dev->regbase + reg_offset);
+ }
+
++struct smu_metrics {
++ u32 table_version;
++ u32 hint_count;
++ u32 s0i3_cyclecount;
++ u32 timein_s0i2;
++ u64 timeentering_s0i3_lastcapture;
++ u64 timeentering_s0i3_totaltime;
++ u64 timeto_resume_to_os_lastcapture;
++ u64 timeto_resume_to_os_totaltime;
++ u64 timein_s0i3_lastcapture;
++ u64 timein_s0i3_totaltime;
++ u64 timein_swdrips_lastcapture;
++ u64 timein_swdrips_totaltime;
++ u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX];
++ u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
++} __packed;
++
+ #ifdef CONFIG_DEBUG_FS
+ static int smu_fw_info_show(struct seq_file *s, void *unused)
+ {
+ struct amd_pmc_dev *dev = s->private;
+- u32 value;
++ struct smu_metrics table;
++ int idx;
++
++ if (dev->cpu_id == AMD_CPU_ID_PCO)
++ return -EINVAL;
++
++ memcpy_fromio(&table, dev->smu_virt_addr, sizeof(struct smu_metrics));
++
++ seq_puts(s, "\n=== SMU Statistics ===\n");
++ seq_printf(s, "Table Version: %d\n", table.table_version);
++ seq_printf(s, "Hint Count: %d\n", table.hint_count);
++ seq_printf(s, "S0i3 Cycle Count: %d\n", table.s0i3_cyclecount);
++ seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
++ seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
++
++ seq_puts(s, "\n=== Active time (in us) ===\n");
++ for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) {
++ if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
++ seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
++ table.timecondition_notmet_lastcapture[idx]);
++ }
+
+- value = ioread32(dev->smu_base + AMD_SMU_FW_VERSION);
+- seq_printf(s, "SMU FW Info: %x\n", value);
+ return 0;
+ }
+ DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
+
++static int s0ix_stats_show(struct seq_file *s, void *unused)
++{
++ struct amd_pmc_dev *dev = s->private;
++ u64 entry_time, exit_time, residency;
++
++ entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
++ entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
++
++ exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
++ exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
++
++ /* It's in 48MHz. We need to convert it */
++ residency = (exit_time - entry_time) / 48;
++
++ seq_puts(s, "=== S0ix statistics ===\n");
++ seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
++ seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
++ seq_printf(s, "Residency Time: %lld\n", residency);
++
++ return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
++
+ static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
+ {
+ debugfs_remove_recursive(dev->dbgfs_dir);
+@@ -107,6 +210,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
+ dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
+ debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
+ &smu_fw_info_fops);
++ debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
++ &s0ix_stats_fops);
+ }
+ #else
+ static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
+@@ -118,6 +223,32 @@ static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
+ }
+ #endif /* CONFIG_DEBUG_FS */
+
++static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
++{
++ u32 phys_addr_low, phys_addr_hi;
++ u64 smu_phys_addr;
++
++ if (dev->cpu_id == AMD_CPU_ID_PCO)
++ return -EINVAL;
++
++ /* Get Active devices list from SMU */
++ amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1);
++
++ /* Get dram address */
++ amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1);
++ amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1);
++ smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
++
++ dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, sizeof(struct smu_metrics));
++ if (!dev->smu_virt_addr)
++ return -ENOMEM;
++
++ /* Start the logging */
++ amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0);
++
++ return 0;
++}
++
+ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
+ {
+ u32 value;
+@@ -132,15 +263,15 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
+ dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
+ }
+
+-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
++static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret)
+ {
+ int rc;
+- u8 msg;
+ u32 val;
+
++ mutex_lock(&dev->lock);
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
+- val, val > 0, PMC_MSG_DELAY_MIN_US,
++ val, val != 0, PMC_MSG_DELAY_MIN_US,
+ PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "failed to talk to SMU\n");
+@@ -154,21 +285,74 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
+ amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, set);
+
+ /* Write message ID to message ID register */
+- msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO;
+ amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg);
+- return 0;
++
++ /* Wait until we get a valid response */
++ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
++ val, val != 0, PMC_MSG_DELAY_MIN_US,
++ PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
++ if (rc) {
++ dev_err(dev->dev, "SMU response timed out\n");
++ goto out_unlock;
++ }
++
++ switch (val) {
++ case AMD_PMC_RESULT_OK:
++ if (ret) {
++ /* PMFW may take longer time to return back the data */
++ usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
++ *data = amd_pmc_reg_read(dev, AMD_PMC_REGISTER_ARGUMENT);
++ }
++ break;
++ case AMD_PMC_RESULT_CMD_REJECT_BUSY:
++ dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
++ rc = -EBUSY;
++ goto out_unlock;
++ case AMD_PMC_RESULT_CMD_UNKNOWN:
++ dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
++ rc = -EINVAL;
++ goto out_unlock;
++ case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
++ case AMD_PMC_RESULT_FAILED:
++ default:
++ dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
++ rc = -EIO;
++ goto out_unlock;
++ }
++
++out_unlock:
++ mutex_unlock(&dev->lock);
++ amd_pmc_dump_registers(dev);
++ return rc;
++}
++
++static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
++{
++ switch (dev->cpu_id) {
++ case AMD_CPU_ID_PCO:
++ return MSG_OS_HINT_PCO;
++ case AMD_CPU_ID_RN:
++ case AMD_CPU_ID_YC:
++ return MSG_OS_HINT_RN;
++ }
++ return -EINVAL;
+ }
+
+ static int __maybe_unused amd_pmc_suspend(struct device *dev)
+ {
+ struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ int rc;
++ u8 msg;
+
+- rc = amd_pmc_send_cmd(pdev, 1);
++ /* Reset and Start SMU logging - to monitor the s0i3 stats */
++ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0);
++ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0);
++
++ msg = amd_pmc_get_os_hint(pdev);
++ rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0);
+ if (rc)
+ dev_err(pdev->dev, "suspend failed\n");
+
+- amd_pmc_dump_registers(pdev);
+ return 0;
+ }
+
+@@ -176,12 +360,16 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
+ {
+ struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ int rc;
++ u8 msg;
+
+- rc = amd_pmc_send_cmd(pdev, 0);
++ /* Let SMU know that we are looking for stats */
++ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
++
++ msg = amd_pmc_get_os_hint(pdev);
++ rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
+ if (rc)
+ dev_err(pdev->dev, "resume failed\n");
+
+- amd_pmc_dump_registers(pdev);
+ return 0;
+ }
+
+@@ -190,6 +378,7 @@ static const struct dev_pm_ops amd_pmc_pm_ops = {
+ };
+
+ static const struct pci_device_id pmc_pci_ids[] = {
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
+@@ -201,9 +390,8 @@ static int amd_pmc_probe(struct platform_device *pdev)
+ {
+ struct amd_pmc_dev *dev = &pmc;
+ struct pci_dev *rdev;
+- u32 base_addr_lo;
+- u32 base_addr_hi;
+- u64 base_addr;
++ u32 base_addr_lo, base_addr_hi;
++ u64 base_addr, fch_phys_addr;
+ int err;
+ u32 val;
+
+@@ -248,16 +436,25 @@ static int amd_pmc_probe(struct platform_device *pdev)
+ pci_dev_put(rdev);
+ base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+
+- dev->smu_base = devm_ioremap(dev->dev, base_addr, AMD_PMC_MAPPING_SIZE);
+- if (!dev->smu_base)
+- return -ENOMEM;
+-
+ dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
+ AMD_PMC_MAPPING_SIZE);
+ if (!dev->regbase)
+ return -ENOMEM;
+
+- amd_pmc_dump_registers(dev);
++ mutex_init(&dev->lock);
++
++ /* Use FCH registers to get the S0ix stats */
++ base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
++ base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
++ fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
++ dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
++ if (!dev->fch_virt_addr)
++ return -ENOMEM;
++
++ /* Use SMU to get the s0i3 debug stats */
++ err = amd_pmc_setup_smu_logging(dev);
++ if (err)
++ dev_err(dev->dev, "SMU debugging info not supported on this platform\n");
+
+ platform_set_drvdata(pdev, dev);
+ amd_pmc_dbgfs_register(dev);
+@@ -269,11 +466,14 @@ static int amd_pmc_remove(struct platform_device *pdev)
+ struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
+
+ amd_pmc_dbgfs_unregister(dev);
++ mutex_destroy(&dev->lock);
+ return 0;
+ }
+
+ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
+ {"AMDI0005", 0},
++ {"AMDI0006", 0},
++ {"AMDI0007", 0},
+ {"AMD0004", 0},
+ { }
+ };
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index c60745f657e9..dd0dafd21e33 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -1004,6 +1004,7 @@ int acpi_dev_resume(struct device *dev);
+ int acpi_subsys_runtime_suspend(struct device *dev);
+ int acpi_subsys_runtime_resume(struct device *dev);
+ int acpi_dev_pm_attach(struct device *dev, bool power_on);
++bool acpi_storage_d3(struct device *dev);
+ #else
+ static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
+ static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
+@@ -1011,6 +1012,10 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
+ {
+ return 0;
+ }
++static inline bool acpi_storage_d3(struct device *dev)
++{
++ return false;
++}
+ #endif
+
+ #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
+--
+2.32.0
+
diff --git a/sys-kernel_arch-sources-g14_files-0018-PCI-quirks-Quirk-PCI-d3hot-delay-for-AMD-xhci.patch b/sys-kernel_arch-sources-g14_files-0036-PCI-quirks-Quirk-PCI-d3hot-delay-for-AMD-xhci.patch
index a6797595b45a..a6797595b45a 100644
--- a/sys-kernel_arch-sources-g14_files-0018-PCI-quirks-Quirk-PCI-d3hot-delay-for-AMD-xhci.patch
+++ b/sys-kernel_arch-sources-g14_files-0036-PCI-quirks-Quirk-PCI-d3hot-delay-for-AMD-xhci.patch
diff --git a/sys-kernel_arch-sources-g14_files-0037-ACPI-PM-Only-mark-EC-GPE-for-wakeup-on-Intel-systems.patch b/sys-kernel_arch-sources-g14_files-0037-ACPI-PM-Only-mark-EC-GPE-for-wakeup-on-Intel-systems.patch
new file mode 100644
index 000000000000..c4081389ae10
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-0037-ACPI-PM-Only-mark-EC-GPE-for-wakeup-on-Intel-systems.patch
@@ -0,0 +1,171 @@
+From patchwork Wed Jun 30 19:46:06 2021
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Patchwork-Submitter: Mario Limonciello <Mario.Limonciello@amd.com>
+X-Patchwork-Id: 12352783
+Return-Path: <linux-acpi-owner@kernel.org>
+X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on
+ aws-us-west-2-korg-lkml-1.web.codeaurora.org
+X-Spam-Level:
+X-Spam-Status: No, score=-18.8 required=3.0 tests=BAYES_00,DKIM_SIGNED,
+ DKIM_VALID,DKIM_VALID_AU,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,
+ INCLUDES_PATCH,MAILING_LIST_MULTI,MSGID_FROM_MTA_HEADER,SPF_HELO_NONE,
+ SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no
+ version=3.4.0
+Received: from mail.kernel.org (mail.kernel.org [198.145.29.99])
+ by smtp.lore.kernel.org (Postfix) with ESMTP id BEC31C11F65
+ for <linux-acpi@archiver.kernel.org>; Wed, 30 Jun 2021 19:46:21 +0000 (UTC)
+Received: from vger.kernel.org (vger.kernel.org [23.128.96.18])
+ by mail.kernel.org (Postfix) with ESMTP id 975CB61461
+ for <linux-acpi@archiver.kernel.org>; Wed, 30 Jun 2021 19:46:21 +0000 (UTC)
+Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
+ id S233693AbhF3Tsu (ORCPT <rfc822;linux-acpi@archiver.kernel.org>);
+ Wed, 30 Jun 2021 15:48:50 -0400
+Received: from mail-dm6nam12on2075.outbound.protection.outlook.com
+ ([40.107.243.75]:62305
+ "EHLO NAM12-DM6-obe.outbound.protection.outlook.com"
+ rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP
+ id S229700AbhF3Tst (ORCPT <rfc822;linux-acpi@vger.kernel.org>);
+ Wed, 30 Jun 2021 15:48:49 -0400
+ARC-Seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;
+ b=JmnIG3cU0NKjlEyAHpokY/WgKor19FW41t1CEzkuIT/kUl9VGPGypwN9mKeLezFefiSeFdFXfz3gJHE/df1wLTomZJTjJ9pQXBnqsUxqpneeFAvqGgU9DKuHrp8cWXQZ+t39GRNUWWswzJzFpTBhQinUoKfJ7fYE/4PdtHGS3QbCvSJCrY2SO20VOhIcB0KSGJDOuLJvGdflN3dkPp8pogdC2sW5kpRU9KgrI5lGdPsUCdPyH8KO+6Pu6ApVA3EQkQIIOmCVBc0xo2s9r+cNERiCc7JeSq+sp8UPLkDahg2KiNE6t1J1ODajt2t7eENIQ/FV6Jjf+sfdGOBkl9UitQ==
+ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;
+ s=arcselector9901;
+ h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;
+ bh=f9Nm9L3mznSJPWBuxu6jYs8p6of3ysIGVMyMt5nG3l0=;
+ b=hTXEccprTsnbVP53d0vaEr4m4JhYYpDFqkv16OdFXj4yLaEBeElYu+8VYKDssd2dr3SrjyFCNg/7+bhWBSvdCCgL3a7BnZ4tyflMFImUPcDs3NEoYIdVc0JlyncRMa60Zyk8ZhKLd4jSbG7Fzm+nnzJqvIig3yQS7XhCeh98DrXfgOApH7+ocqBdfDK6kO51zhdmdpBb4ut/p1to3RVHw9QIaHBeN02C8kTcGQp9GkTpsEXTqbyOp8TlNLfo6I1vW3mfNNfk/P2I+3ch3mFuKBHryDDqbUIFaLbaPtPtVCL8XT26YMrdR+Uz1f5Xx9gFlg1sUoJBDvOsm23EpAYR+Q==
+ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=pass
+ smtp.mailfrom=amd.com; dmarc=pass action=none header.from=amd.com; dkim=pass
+ header.d=amd.com; arc=none
+DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=amd.com; s=selector1;
+ h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;
+ bh=f9Nm9L3mznSJPWBuxu6jYs8p6of3ysIGVMyMt5nG3l0=;
+ b=yPRB01odGvmaAW9iH0mvZaLxj5hRiZGy261M6i7zuJ06z+zZTtV/VTv8CKAuJjdOdiWbwoUHvkqLMGUGywQ+yh9gyN8rF27RGzQsrTOoLsq6bC9woEaxIt8KuUSo6zViHztZe+q15wEbTYwlSFZvoq/LeO1jEBqoxLHX9T+srdo=
+Authentication-Results: rjwysocki.net; dkim=none (message not signed)
+ header.d=none;rjwysocki.net; dmarc=none action=none header.from=amd.com;
+Received: from SA0PR12MB4510.namprd12.prod.outlook.com (2603:10b6:806:94::8)
+ by SA0PR12MB4592.namprd12.prod.outlook.com (2603:10b6:806:9b::16) with
+ Microsoft SMTP Server (version=TLS1_2,
+ cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4264.20; Wed, 30 Jun
+ 2021 19:46:18 +0000
+Received: from SA0PR12MB4510.namprd12.prod.outlook.com
+ ([fe80::9c16:2794:cd04:2be0]) by SA0PR12MB4510.namprd12.prod.outlook.com
+ ([fe80::9c16:2794:cd04:2be0%6]) with mapi id 15.20.4264.026; Wed, 30 Jun 2021
+ 19:46:18 +0000
+From: Mario Limonciello <mario.limonciello@amd.com>
+To: "Rafael J . Wysocki" <rjw@rjwysocki.net>,
+ Len Brown <lenb@kernel.org>, linux-acpi@vger.kernel.org
+Cc: Julian Sikorski <belegdol@gmail.com>,
+ Mario Limonciello <mario.limonciello@amd.com>,
+ Alex Deucher <alexander.deucher@amd.com>
+Subject: [PATCH] ACPI: PM: Only mark EC GPE for wakeup on Intel systems
+Date: Wed, 30 Jun 2021 14:46:06 -0500
+Message-Id: <20210630194606.530-1-mario.limonciello@amd.com>
+X-Mailer: git-send-email 2.25.1
+X-Originating-IP: [165.204.77.1]
+X-ClientProxiedBy: SA0PR11CA0078.namprd11.prod.outlook.com
+ (2603:10b6:806:d2::23) To SA0PR12MB4510.namprd12.prod.outlook.com
+ (2603:10b6:806:94::8)
+MIME-Version: 1.0
+X-MS-Exchange-MessageSentRepresentingType: 1
+Received: from AUS-LX-MLIMONCI.amd.com (165.204.77.1) by
+ SA0PR11CA0078.namprd11.prod.outlook.com (2603:10b6:806:d2::23) with Microsoft
+ SMTP Server (version=TLS1_2,
+ cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4287.22 via Frontend
+ Transport; Wed, 30 Jun 2021 19:46:18 +0000
+X-MS-PublicTrafficType: Email
+X-MS-Office365-Filtering-Correlation-Id: a6884916-0dc0-4350-1182-08d93bffbad5
+X-MS-TrafficTypeDiagnostic: SA0PR12MB4592:
+X-MS-Exchange-Transport-Forked: True
+X-Microsoft-Antispam-PRVS:
+ <SA0PR12MB45924771257AAAE21B3805E3E2019@SA0PR12MB4592.namprd12.prod.outlook.com>
+X-MS-Oob-TLC-OOBClassifiers: OLM:4502;
+X-MS-Exchange-SenderADCheck: 1
+X-Microsoft-Antispam: BCL:0;
+X-Microsoft-Antispam-Message-Info:
+ sTNyRB8bq24xGmK+WSQ6EkvpaYmKUZAlhgGSljfEX7NiUdg5hZoYOJRQdBY63i6+bALFnzUPr+ZpmP71tYzSYUpV26yXlnrq2j3BSmYP1NJUsgpAPCve4iKGH100fNRImf73rPvc8PZOAFPQizoYOnMmIc34HCdFwwtdcg1Fe7YscTJBR9Hr3h1HTOk2Kn9JGaGHdpG8BVqYbQ5h63kzWPxn9I7x8FpW8iNcspSocBu/4Do+kT/RTEGbC4T4GHnj4thlAIUQ+6J17TAfDav9yf2elKEOxQalN2SvTgzNJyfRRXghrocv9+7Z/qDcr1lrPsLBUGhdozlxS+5YpoGxkBCM6Brcj8Ipej8xSHKLlD2xmHeNNtLCjZvCBeL26JMA8OwEM8Fpyq/m8A5x5fLw52h5Pu8+0FxHCkkS3YYjr/L8PKMezB7590sVNBLvAr2tj/FL8uT/4u4OaIGWV12RQmKRrAwso1HLPqOi4jJclanwUQOOFtWR3iCcUS5+cKqN5/ReBWeaXliDJioSwGJ8JVxjhqBdm8nWZGeb8QchWZZi7gjmpOV23u/4QpXI8/RGL60bBOeAK1rMoDkGF6gF/LagqwQZFfYQggSBO9CIF0S6U8+9i3JKuQwlfqhzudML83TsnegZLMgpGrf/hnh6n8gwGbMqIjPS9aKpPlfVHfGsAs/3mEo2SmxxxA03lMZ8S66pbN3EL5a/cI9lM0nhJSLsyAusJ4UvOyMRJTbFXQyERUNWw8qCBK8TKULU8lbK/sHbm0iJvOX82wjz/EjdeC+yIYgmwbGb7wKWzR18ieMwl9IRNPCIU6coFi7YuWThfnHmy1lL5Dtt88jtCdjxRQ==
+X-Forefront-Antispam-Report:
+ CIP:255.255.255.255;CTRY:;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:SA0PR12MB4510.namprd12.prod.outlook.com;PTR:;CAT:NONE;SFS:(4636009)(376002)(39860400002)(366004)(346002)(136003)(396003)(66556008)(966005)(66476007)(7696005)(52116002)(5660300002)(36756003)(186003)(8676002)(16526019)(2906002)(38100700002)(38350700002)(54906003)(44832011)(6666004)(316002)(6486002)(26005)(86362001)(83380400001)(110136005)(478600001)(956004)(66946007)(4326008)(2616005)(8936002)(1076003);DIR:OUT;SFP:1101;
+X-MS-Exchange-AntiSpam-MessageData-ChunkCount: 1
+X-MS-Exchange-AntiSpam-MessageData-0:
+ Q2VMa7+27RuNEDKB8gbF7BfJMOCHBmQM8yI9OkbM9jOm3bZDMMZaTtlmhwIPD6zZ1CRxRi1s12CDZJDADpP6gX5y0/o/axPNOfsS1PrzFsualk8cP+0NOw+cigWohi0/W8wu+gBtrpTOSYTKTOF82H2X1et8jWUTuID7FCWgice4lT95yQzarHqKJadraT9Z8hSEsrDJaVs6/oABV1IMvrYVwoB3nkE9iT/fm7wmMd0i1vRFZ/WZwlCwIeOrjHu5fREwRoYtWj2tGAKBlbcYKapzdIHG0t5F/gOdW51cXCibOcN0xfGVGMGXeYGN+XjXRwxAKB/4LN4lXO/GCmJAMPuFRmlexK8sNDibFf3BOquffu3ggZ9VYLAwE2whAfAb1B/+txBs5Z4uN9G1uTScmNmyRQfmLXr9anLrMpLvPULZfDRIIBP2rs6prCjOP4sdk3zZ6H1HRFhuZJZpHKpNahgc4tqTBxlu3cTjEGUHzzM1mju6ABnqXuRHxyWfi1iiuCTIlcB2yQod9g7oi9y59LIfkwXHCJxw8idX+2jxp0EgY10ZkO33DUgCWgNhpUpC0DHv93HzWTcbJCoujMVi37i0cjaGnMOxiY/F87M1G9VfCPTum6ZXTxW4srbkq1cqiv5qXCOX28hHPisTZmXxWClBeL9crsK4udkoNzX60W5J53VV6gf7V/ZC59Cnfn0Ux/meIT2YZedYmYszau0ZqC5qJYQLaYtrzDoToXunWFVjfhls2zqiXV86PAZ9iogkjztA4PUM4A0PfA4WSFnOPmN3CwPQTnLZmvJNL0K1QV3N8QtYy+Dk1TrRSzLZAVc7gZTm3izrcd+dO271YDDasvzW2jKLbwvMOSILAHyshwfMYPVwU75Ocuu+aPlk33pA18iwAFEfElqdHw6VxKQhEzVORDoVwp6eH7zKEeoH36zjy8AZnMmruvwap3NbzRuJFnjPvmy87Kjge1oBq2wCX/+cveHiJoQPHdSk4bZue/EbWB2K70RCVR2wr42DVa/74Pb8QpZ1Rnpp4XDZ6DxLaHCXUjlpJS7hqsGW0/2Vf6IvRyChvfEQCLw659oe8vwnRxWKDF2e8/tuEHsvGPJ9OKHXAXRU/9JDeRN2eqdKdsxea8jV06ySh2BB3jxEG5hnHyDPKIWY5kV0evNPiK4PN2QkXA4UewR2StSbb8K6IYoxUI9kxIZJ4/zxjDqgn4LxIeNE6VUMZPO0z7wvqNtSdCzXSqcJNS2wtdQNaDV654FJp93h8peSqlnMVfo+gNguR2Eb25rV74+VNcIo2TlFU4RjCQWFQSwf86lBSgVzNYsFXaQs/ByUsrE+a1aY2ka+
+X-OriginatorOrg: amd.com
+X-MS-Exchange-CrossTenant-Network-Message-Id:
+ a6884916-0dc0-4350-1182-08d93bffbad5
+X-MS-Exchange-CrossTenant-AuthSource: SA0PR12MB4510.namprd12.prod.outlook.com
+X-MS-Exchange-CrossTenant-AuthAs: Internal
+X-MS-Exchange-CrossTenant-OriginalArrivalTime: 30 Jun 2021 19:46:18.8503
+ (UTC)
+X-MS-Exchange-CrossTenant-FromEntityHeader: Hosted
+X-MS-Exchange-CrossTenant-Id: 3dd8961f-e488-4e60-8e11-a82d994e183d
+X-MS-Exchange-CrossTenant-MailboxType: HOSTED
+X-MS-Exchange-CrossTenant-UserPrincipalName:
+ dZD13ysT7xcV+BQUi/R10P8OP1E49HUQyvzNXH87rAzc7mjqBSdKPCAe3tW3LJK0/UtZNwlqjS2xSjrywZ9rNA==
+X-MS-Exchange-Transport-CrossTenantHeadersStamped: SA0PR12MB4592
+Precedence: bulk
+List-ID: <linux-acpi.vger.kernel.org>
+X-Mailing-List: linux-acpi@vger.kernel.org
+
+When using s2idle on a variety of AMD notebook systems, they are
+experiencing spurious events that the EC or SMU are in the wrong
+state leading to a hard time waking up or higher than expected
+power consumption.
+
+These events only occur when the EC GPE is inadvertently set as a wakeup
+source. Originally the EC GPE was only set as a wakeup source when using
+the intel-vbtn or intel-hid drivers in commit 10a08fd65ec1 ("ACPI: PM:
+Set up EC GPE for system wakeup from drivers that need it") but during
+testing a reporter discovered that this was not enough for their ASUS
+Zenbook UX430UNR/i7-8550U to wakeup by lid event or keypress.
+Marking the EC GPE for wakeup universally resolved this for that
+reporter in commit b90ff3554aa3 ("ACPI: PM: s2idle: Always set up EC GPE
+for system wakeup").
+
+However this behavior has lead to a number of problems:
+
+* On both Lenovo T14 and P14s the keyboard wakeup doesn't work, and
+sometimes the power button event doesn't work.
+* On HP 635 G7 detaching or attaching AC during suspend will cause
+the system not to wakeup
+* On Asus vivobook to prevent detaching AC causing resume problems
+* On Lenovo 14ARE05 to prevent detaching AC causing resume problems
+* On HP ENVY x360 to prevent detaching AC causing resume problems
+
+As there may be other Intel systems besides ASUS Zenbook UX430UNR/i7-8550U
+that don't use intel-vbtn or intel-hid avoid these problems by only
+universally marking the EC GPE wakesource on non-AMD systems.
+
+Link: https://patchwork.kernel.org/project/linux-pm/cover/5997740.FPbUVk04hV@kreacher/#22825489
+Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1230
+Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1629
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/acpi/x86/s2idle.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
+index 816bf2c34b7a..1c507804fb10 100644
+--- a/drivers/acpi/x86/s2idle.c
++++ b/drivers/acpi/x86/s2idle.c
+@@ -417,11 +417,15 @@ static int lps0_device_attach(struct acpi_device *adev,
+ mem_sleep_current = PM_SUSPEND_TO_IDLE;
+
+ /*
+- * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
+- * EC GPE to be enabled while suspended for certain wakeup devices to
+- * work, so mark it as wakeup-capable.
++ * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
++ * use intel-hid or intel-vbtn but require the EC GPE to be enabled while
++ * suspended for certain wakeup devices to work, so mark it as wakeup-capable.
++ *
++ * Only enable on !AMD as enabling this universally causes problems for a number
++ * of AMD based systems.
+ */
+- acpi_ec_mark_gpe_for_wake();
++ if (!acpi_s2idle_vendor_amd())
++ acpi_ec_mark_gpe_for_wake();
+
+ return 0;
+ }
diff --git a/sys-kernel_arch-sources-g14_files-0037-platform-x86-amd-pmc-Use-return-code-on-suspend.patch b/sys-kernel_arch-sources-g14_files-0037-platform-x86-amd-pmc-Use-return-code-on-suspend.patch
new file mode 100644
index 000000000000..b35a5323f78f
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-0037-platform-x86-amd-pmc-Use-return-code-on-suspend.patch
@@ -0,0 +1,135 @@
+From patchwork Wed Jul 7 14:16:47 2021
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Patchwork-Submitter: Mario Limonciello <Mario.Limonciello@amd.com>
+X-Patchwork-Id: 12362721
+Return-Path: <platform-driver-x86-owner@kernel.org>
+X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on
+ aws-us-west-2-korg-lkml-1.web.codeaurora.org
+X-Spam-Level:
+X-Spam-Status: No, score=-18.7 required=3.0 tests=BAYES_00,DKIM_SIGNED,
+ DKIM_VALID,DKIM_VALID_AU,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,
+ INCLUDES_PATCH,MAILING_LIST_MULTI,MSGID_FROM_MTA_HEADER,SPF_HELO_NONE,
+ SPF_PASS,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0
+Received: from mail.kernel.org (mail.kernel.org [198.145.29.99])
+ by smtp.lore.kernel.org (Postfix) with ESMTP id 60140C07E95
+ for <platform-driver-x86@archiver.kernel.org>;
+ Wed, 7 Jul 2021 14:17:02 +0000 (UTC)
+Received: from vger.kernel.org (vger.kernel.org [23.128.96.18])
+ by mail.kernel.org (Postfix) with ESMTP id 4272D61CBF
+ for <platform-driver-x86@archiver.kernel.org>;
+ Wed, 7 Jul 2021 14:17:02 +0000 (UTC)
+Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
+ id S231787AbhGGOTm (ORCPT
+ <rfc822;platform-driver-x86@archiver.kernel.org>);
+ Wed, 7 Jul 2021 10:19:42 -0400
+Received: from mail-dm6nam12on2040.outbound.protection.outlook.com
+ ([40.107.243.40]:30812
+ "EHLO NAM12-DM6-obe.outbound.protection.outlook.com"
+ rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP
+ id S231718AbhGGOTl (ORCPT
+ <rfc822;platform-driver-x86@vger.kernel.org>);
+ Wed, 7 Jul 2021 10:19:41 -0400
+ARC-Seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;
+ b=k0W2C0lXK5509UfwTN6QtRX27ElZ9tFDgWkZpvHpZ7kJamBn+4F6lzgWMNZNY6ly+yppN1kLlMt2dcnyQozx/XWD4xUW1o79yh4idMsSmz9kqnskW5hq178yMvYYxjkwvPWLB/NBXLb/TEz1N38rMSrcRbNR5IbXm3lVuphd1y9bZHxQeUEAXgf/3mAMClHqP3ya7uUtBPuf2+oSk7O1oHaqcZcHDcuU43rbnbTmKApJ+JR/XUpPWIBnr9V+nY3ODLi7BLRfZdXbytzcsj4gj7lL//U2tX6pN/5LOEbJwwGqHQlxLIr9Nif7FCMe/fEtYS3pRjyxlAQIKceam1Mqgg==
+ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;
+ s=arcselector9901;
+ h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;
+ bh=isGCQcbhgntdb/xokuLHsVWJtEX5YAh+fg/NJ0ex46M=;
+ b=h3tD5XHOjJ0S/zbEn14A/Ub5QHOKFJgT2p5H2SoXUQPwLF6lmbrNbcJp6eesHkHDw/Vbo0Gm/j3CMzzpo3rAPBdHqbjzswyUm4iRMed0hIoJ2EEQCB1nhGZgoObTHjpQcZf7HKcFPnOfX2YYS9z5XDZBP4Fz4LK+h5YKRoNvZtDOwDePDI6p9gsoToAP6NYy+I72mLn6cRnRwhUrGKZJVFfEyjYRptL3oc4S7rxpXC5YguvAwk/Te2wPSUJSNFwYVkTJwPmfWu9pmlBv8iTyQFuwssKc98h2H5ikLRLf7YpOiI/XfQfjOz+aElCkfWBuUsYRA7KLePtRZl97VkhI1A==
+ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=pass
+ smtp.mailfrom=amd.com; dmarc=pass action=none header.from=amd.com; dkim=pass
+ header.d=amd.com; arc=none
+DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=amd.com; s=selector1;
+ h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;
+ bh=isGCQcbhgntdb/xokuLHsVWJtEX5YAh+fg/NJ0ex46M=;
+ b=roJQ8gko57Hb8CaBE/EYLru6ickefCtnfAsBOjh2/C14etws0Smk5RcCiPG9lsbuyZg1E2w1uicNVKCFEqKm1rgbIHQ6OTJ/RaWVg2bxjPosYH1K6MS1yLjsRpwFINQIpbkDyhzSgfwk4/myP5xRk+YbxOZ1R74zDemr2Pu7TnQ=
+Authentication-Results: redhat.com; dkim=none (message not signed)
+ header.d=none;redhat.com; dmarc=none action=none header.from=amd.com;
+Received: from SA0PR12MB4510.namprd12.prod.outlook.com (2603:10b6:806:94::8)
+ by SN6PR12MB2767.namprd12.prod.outlook.com (2603:10b6:805:75::23) with
+ Microsoft SMTP Server (version=TLS1_2,
+ cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4287.33; Wed, 7 Jul
+ 2021 14:17:00 +0000
+Received: from SA0PR12MB4510.namprd12.prod.outlook.com
+ ([fe80::c05f:7a93:601b:9861]) by SA0PR12MB4510.namprd12.prod.outlook.com
+ ([fe80::c05f:7a93:601b:9861%6]) with mapi id 15.20.4287.033; Wed, 7 Jul 2021
+ 14:17:00 +0000
+From: Mario Limonciello <mario.limonciello@amd.com>
+To: Hans de Goede <hdegoede@redhat.com>,
+ Mark Gross <mgross@linux.intel.com>,
+ platform-driver-x86@vger.kernel.org (open list:X86 PLATFORM DRIVERS)
+Cc: Mario Limonciello <mario.limonciello@amd.com>,
+ Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Subject: [PATCH] platform/x86: amd-pmc: Use return code on suspend
+Date: Wed, 7 Jul 2021 09:16:47 -0500
+Message-Id: <20210707141647.8871-1-mario.limonciello@amd.com>
+X-Mailer: git-send-email 2.25.1
+X-ClientProxiedBy: SN4PR0501CA0042.namprd05.prod.outlook.com
+ (2603:10b6:803:41::19) To SA0PR12MB4510.namprd12.prod.outlook.com
+ (2603:10b6:806:94::8)
+MIME-Version: 1.0
+X-MS-Exchange-MessageSentRepresentingType: 1
+Received: from AUS-LX-MLIMONCI.amd.com (165.204.77.1) by
+ SN4PR0501CA0042.namprd05.prod.outlook.com (2603:10b6:803:41::19) with
+ Microsoft SMTP Server (version=TLS1_2,
+ cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4308.8 via Frontend
+ Transport; Wed, 7 Jul 2021 14:16:59 +0000
+X-MS-PublicTrafficType: Email
+X-MS-Office365-Filtering-Correlation-Id: a19b1585-ba5f-4bec-bce4-08d94151e29d
+X-MS-TrafficTypeDiagnostic: SN6PR12MB2767:
+X-MS-Exchange-Transport-Forked: True
+X-Microsoft-Antispam-PRVS:
+ <SN6PR12MB2767D71FF0391027D193AADCE21A9@SN6PR12MB2767.namprd12.prod.outlook.com>
+X-MS-Oob-TLC-OOBClassifiers: OLM:3044;
+X-MS-Exchange-SenderADCheck: 1
+X-Microsoft-Antispam: BCL:0;
+X-Microsoft-Antispam-Message-Info:
+ 6AhBzvacjkr9Cck62JDESL5RE3LYrtbTWbODIt7/jXGKkb+Wmr1c+sXbUB0R+RJnwf6gmW/qwBOpPRyU2r7PZ5bpd99awOkBLJpG99BL71brvzsKHQXi4Af+Ftom1dtpZItZtBK3XkhswauUupbSSM7JFtpL7hB6GzE5Nttb7Fjk5Tj+ixR5hKeZiitlXrW5gLZww8jCR6bPP+OUzHesAqBm9yjOqnxSBFIx1019kPJo3EAc4ilVX6kj/9b2ICU1AAtgI5PJ05xIURxODGWQuRXtwLKP/gfZKSrecCGULGZdksIyUJfjfBFLiqKUGH8lrzNHIwMwDptgK1vdhvZtahkeRXOKZi5PqohKdLi61fHOKDR2KKQQk1BYS+Zc43AJpuQSTg7n8BexHUkW4pkYuFlgOXwk5IZIwYYLp0OXyLR7pQ0BskHhZ2+r5vSG4VpYGVdl8AxXMfSmroAjQqSIICIrNB/IQ0VT9x1YylzX1y43o+G1SaYshv/d1hhos8evRnHxRwWnIzOqw074GnS6RETf9EguaM+7x2SXR/8ePRoOgj9tiXRk4WYSIYCXZCS72sHGHELYBIiSUFXCn2sC9weSVW5apespetuZzyW5kCtdF8ljiS9N1ycwejjZF+VSVKvQ87cl63k7w9WSLaAV+B67PeZ/85Y6jN2KO+Y/D1p9SfVius/QKiFX9JtsGl1VytgbXjVLIvaHeTZ64moeag==
+X-Forefront-Antispam-Report:
+ CIP:255.255.255.255;CTRY:;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:SA0PR12MB4510.namprd12.prod.outlook.com;PTR:;CAT:NONE;SFS:(4636009)(136003)(366004)(396003)(346002)(376002)(39860400002)(15650500001)(478600001)(8676002)(5660300002)(7696005)(44832011)(4744005)(36756003)(8936002)(956004)(66476007)(66946007)(110136005)(2906002)(54906003)(83380400001)(66556008)(316002)(38350700002)(1076003)(186003)(2616005)(86362001)(26005)(38100700002)(52116002)(4326008)(6666004)(6486002);DIR:OUT;SFP:1101;
+X-MS-Exchange-AntiSpam-MessageData-ChunkCount: 1
+X-MS-Exchange-AntiSpam-MessageData-0:
+ GRYYR8rjMIz7s6ETrmtKfGQRdFU5uBK2V3x+PvQn1DuKHLsuubpERiwqkqY5eq5hG3QPZoYGj5kni8d0vlsok1rw4ZdQ7hMKlZILbx2lwojEwzaYlKIq8R4etvy1N6S7r/4ejmV16+uezsegm0qQWbSg724GtrGXe2ZZInIiHSjT1+DCimO4eE1WbRoeR8jtsw7Q+9XZTo4FSjdTlFu7UWOwk4Nlvj8HUFtVkfJJNGQHLY+HlVEwchRSaIGShF8235o/Wwt1abwmabOEWhHIBKXJlVEFIXsJJBm5wQjaxQhCEExsHu57ttDKUs4lT7b5d7YZHDYHCCIW3i2V0vKtqHkysPbIKIzt7Tk/8r4UEsVx7iIpFl2oFh41JwhafzDiZhWT9Pzxp4GxfbCCIGl39uBoyz+j6uV+Vp+nEQqbpSWOVMNBk/sE0HNsx6kl3t+ojvSCb1qN4/TrQFVDT7c4HRDTymo81n7eKVwUjy+heQmrq9KKrDPwe7pFyJs7NrqQWuJYjGtfdsEHVGX8KmiSMcyUjchK+ECRrD5E0IPpv9csjj5My9PjKxBnXc7nUoQ2oB+gAkZCQTUJLGjPgRXUTpM9K88qF08RTP7t6hzoH2oOWtDkasYBEgK8Vh7OFrDwJo3zdop11IYWXuU1EhOTN4obY0hYOck0mdI46VTZeji13cX7X4LuvvYBRyx/XpmdhRcvS6niUjp+iaFyPQlj39isqz9JN8j1FfISy1y8v29/Dux46l879EC+OTMoBmb+GABwg1y/LzzaUcLoE3Hfy3fjtwoVyrQT4jahLesqfCD0SgNEX6lReHBdldxi2zqwUpqFTyQ4CRuDB3g60tljlmgbAGa38B8xo7uL9xiHzXks1792DRzZ7SC6kO/YQAuPw3idjws2HmXyQdqsLCdg1eiLwaCC+YhJccehsLfoWpAU1HcmohGB+EIExmcChun2OrmCzKNGk/VdXWcAn35F/fXUUMRovtFZ8D0r9Cp7nDCTUrhzqRW3cILRlVt0nwaIVYWmiS2DBGl2WYn6TT7S9fBIQll2ApKYs4xMw4QIJX/YS3YCUpebhqnmqVwsn3FNi+IPrMsV9Jm3Snr5UpTBixhLhudYP1rzb9DX/xSaxxskz1TC2VKXHqg7dkpnkDOfsSkZK6Caec0GTUAoBVoBKRAem/zwMYd0u22CfqpI7U5lnp8wgpkWAH63TH9Vxg1f3SLW9jKDrLRkLoF64MEJKRlhr7z03D5EA4Apl+Mrn1QWIM2ywAomaDfKICj8iG+gWngAsTk+YI0q0l3GsoH5OKFiqE3/GB+fu+mxMEcft39cb+Z1LhXZDH9/nPX/636R
+X-OriginatorOrg: amd.com
+X-MS-Exchange-CrossTenant-Network-Message-Id:
+ a19b1585-ba5f-4bec-bce4-08d94151e29d
+X-MS-Exchange-CrossTenant-AuthSource: SA0PR12MB4510.namprd12.prod.outlook.com
+X-MS-Exchange-CrossTenant-AuthAs: Internal
+X-MS-Exchange-CrossTenant-OriginalArrivalTime: 07 Jul 2021 14:17:00.0905
+ (UTC)
+X-MS-Exchange-CrossTenant-FromEntityHeader: Hosted
+X-MS-Exchange-CrossTenant-Id: 3dd8961f-e488-4e60-8e11-a82d994e183d
+X-MS-Exchange-CrossTenant-MailboxType: HOSTED
+X-MS-Exchange-CrossTenant-UserPrincipalName:
+ Ca0ndGjyAnWX69uDR8h2RlVWpnGzyjbhvqx7Gpv45yyGfXDhs9seNOPoHUS+QcO3WV0CMN7tZBe+3BooTd9Xvg==
+X-MS-Exchange-Transport-CrossTenantHeadersStamped: SN6PR12MB2767
+Precedence: bulk
+List-ID: <platform-driver-x86.vger.kernel.org>
+X-Mailing-List: platform-driver-x86@vger.kernel.org
+
+Right now the driver will still return success even if the OS_HINT
+command failed to send to the SMU. In the rare event of a failure,
+the suspend should really be aborted here so that relevant logs
+can may be captured.
+
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Acked-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+---
+ drivers/platform/x86/amd-pmc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
+index d2f9a62e1166..680f94c7e075 100644
+--- a/drivers/platform/x86/amd-pmc.c
++++ b/drivers/platform/x86/amd-pmc.c
+@@ -353,7 +353,7 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev)
+ if (rc)
+ dev_err(pdev->dev, "suspend failed\n");
+
+- return 0;
++ return rc;
+ }
+
+ static int __maybe_unused amd_pmc_resume(struct device *dev)
diff --git a/sys-kernel_arch-sources-g14_files-0039-asus-wmi-Add-panel-overdrive-functionality.patch b/sys-kernel_arch-sources-g14_files-0039-asus-wmi-Add-panel-overdrive-functionality.patch
new file mode 100644
index 000000000000..9ee246ce3398
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-0039-asus-wmi-Add-panel-overdrive-functionality.patch
@@ -0,0 +1,166 @@
+From 4fb0353bff3b881de7709b114d4607a0988c3420 Mon Sep 17 00:00:00 2001
+From: "Luke D. Jones" <luke@ljones.dev>
+Date: Sat, 17 Jul 2021 20:13:21 +1200
+Subject: [PATCH 101/103] asus-wmi: Add panel overdrive functionality
+
+Some ASUS ROG laptops have the ability to drive the display panel
+a higher rate to eliminate or reduce ghosting.
+
+Signed-off-by: Luke D. Jones <luke@ljones.dev>
+---
+ drivers/platform/x86/asus-wmi.c | 91 ++++++++++++++++++++++
+ include/linux/platform_data/x86/asus-wmi.h | 1 +
+ 2 files changed, 92 insertions(+)
+
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index ebaeb7bb80f5..cd881443bc2f 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -216,6 +216,9 @@ struct asus_wmi {
+ // The RSOC controls the maximum charging percentage.
+ bool battery_rsoc_available;
+
++ bool panel_overdrive_available;
++ bool panel_overdrive;
++
+ struct hotplug_slot hotplug_slot;
+ struct mutex hotplug_lock;
+ struct mutex wmi_lock;
+@@ -1221,6 +1224,86 @@ static int asus_wmi_rfkill_init(struct asus_wmi *asus)
+ return result;
+ }
+
++/* Panel Overdrive ************************************************************/
++static int panel_od_check_present(struct asus_wmi *asus)
++{
++ u32 result;
++ int err;
++
++ asus->panel_overdrive_available = false;
++
++ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_PANEL_OD, &result);
++ if (err) {
++ if (err == -ENODEV)
++ return 0;
++ return err;
++ }
++
++ if (result & ASUS_WMI_DSTS_PRESENCE_BIT) {
++ asus->panel_overdrive_available = true;
++ asus->panel_overdrive = result & ASUS_WMI_DSTS_STATUS_BIT;
++ }
++
++ return 0;
++}
++
++static int panel_od_write(struct asus_wmi *asus)
++{
++ int err;
++ u8 value;
++ u32 retval;
++
++ value = asus->panel_overdrive;
++
++ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PANEL_OD, value, &retval);
++
++ if (err) {
++ pr_warn("Failed to set panel overdrive: %d\n", err);
++ return err;
++ }
++
++ if (retval > 1 || retval < 0) {
++ pr_warn("Failed to set panel overdrive (retval): 0x%x\n", retval);
++ return -EIO;
++ }
++
++ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "panel_od");
++
++ return 0;
++}
++
++static ssize_t panel_od_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++ bool mode = asus->panel_overdrive;
++
++ return sysfs_emit(buf, "%d\n", mode);
++}
++
++static ssize_t panel_od_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int result;
++ bool overdrive;
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++
++ result = kstrtobool(buf, &overdrive);
++ if (result == -EINVAL)
++ return result;
++
++ asus->panel_overdrive = overdrive;
++ result = panel_od_write(asus);
++
++ if (result != 0)
++ return result;
++
++ return count;
++}
++
++static DEVICE_ATTR_RW(panel_od);
++
+ /* Quirks *********************************************************************/
+
+ static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
+@@ -2332,6 +2415,7 @@ static struct attribute *platform_attributes[] = {
+ &dev_attr_als_enable.attr,
+ &dev_attr_fan_boost_mode.attr,
+ &dev_attr_throttle_thermal_policy.attr,
++ &dev_attr_panel_od.attr,
+ NULL
+ };
+
+@@ -2357,6 +2441,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
+ ok = asus->fan_boost_mode_available;
+ else if (attr == &dev_attr_throttle_thermal_policy.attr)
+ ok = asus->throttle_thermal_policy_available;
++ else if (attr == &dev_attr_panel_od.attr)
++ ok = asus->panel_overdrive_available;
+
+ if (devid != -1)
+ ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
+@@ -2622,6 +2708,10 @@ static int asus_wmi_add(struct platform_device *pdev)
+ else
+ throttle_thermal_policy_set_default(asus);
+
++ err = panel_od_check_present(asus);
++ if (err)
++ goto fail_panel_od;
++
+ err = asus_wmi_sysfs_init(asus->platform_device);
+ if (err)
+ goto fail_sysfs;
+@@ -2709,6 +2799,7 @@ static int asus_wmi_add(struct platform_device *pdev)
+ fail_throttle_thermal_policy:
+ fail_fan_boost_mode:
+ fail_platform:
++fail_panel_od:
+ kfree(asus);
+ return err;
+ }
+diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
+index 2f274cf52805..428aea701c7b 100644
+--- a/include/linux/platform_data/x86/asus-wmi.h
++++ b/include/linux/platform_data/x86/asus-wmi.h
+@@ -61,6 +61,7 @@
+ #define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
+
+ /* Misc */
++#define ASUS_WMI_DEVID_PANEL_OD 0x00050019
+ #define ASUS_WMI_DEVID_CAMERA 0x00060013
+ #define ASUS_WMI_DEVID_LID_FLIP 0x00060062
+
+--
+2.32.0
+
diff --git a/sys-kernel_arch-sources-g14_files-0040-asus-wmi-Add-dgpu-disable-method.patch b/sys-kernel_arch-sources-g14_files-0040-asus-wmi-Add-dgpu-disable-method.patch
new file mode 100644
index 000000000000..415590fd6094
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-0040-asus-wmi-Add-dgpu-disable-method.patch
@@ -0,0 +1,182 @@
+From de08016c5ef567c853dcf2ff8d9c9b352af253b6 Mon Sep 17 00:00:00 2001
+From: "Luke D. Jones" <luke@ljones.dev>
+Date: Sat, 17 Jul 2021 20:13:22 +1200
+Subject: [PATCH 102/103] asus-wmi: Add dgpu disable method
+
+In Windows the ASUS Armory Crate program can enable or disable the
+dGPU via a WMI call. This functions much the same as various Linux
+methods in software where the dGPU is removed from the device tree.
+
+However the WMI call saves the state of dGPU (enabled or not) and
+this then changes the dGPU visibility in Linux with no way for
+Linux users to re-enable it. We expose the WMI method so users can
+see and change the dGPU ACPI state.
+
+Signed-off-by: Luke D. Jones <luke@ljones.dev>
+---
+ drivers/platform/x86/asus-wmi.c | 99 ++++++++++++++++++++++
+ include/linux/platform_data/x86/asus-wmi.h | 3 +
+ 2 files changed, 102 insertions(+)
+
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index cd881443bc2f..02762a60d27a 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -210,6 +210,9 @@ struct asus_wmi {
+ u8 fan_boost_mode_mask;
+ u8 fan_boost_mode;
+
++ bool dgpu_disable_available;
++ bool dgpu_disable;
++
+ bool throttle_thermal_policy_available;
+ u8 throttle_thermal_policy_mode;
+
+@@ -427,6 +430,94 @@ static void lid_flip_tablet_mode_get_state(struct asus_wmi *asus)
+ }
+ }
+
++/* dGPU ********************************************************************/
++static int dgpu_disable_check_present(struct asus_wmi *asus)
++{
++ u32 result;
++ int err;
++
++ asus->dgpu_disable_available = false;
++
++ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_DGPU, &result);
++ if (err) {
++ if (err == -ENODEV)
++ return 0;
++ return err;
++ }
++
++ if (result & ASUS_WMI_DSTS_PRESENCE_BIT) {
++ asus->dgpu_disable_available = true;
++ asus->dgpu_disable = result & ASUS_WMI_DSTS_STATUS_BIT;
++ }
++
++ return 0;
++}
++
++static int dgpu_disable_write(struct asus_wmi *asus)
++{
++ int err;
++ u8 value;
++ u32 retval;
++
++ value = asus->dgpu_disable;
++
++ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_DGPU, value, &retval);
++
++ if (err) {
++ pr_warn("Failed to set dgpu disable: %d\n", err);
++ return err;
++ }
++
++ if (retval > 1 || retval < 0) {
++ pr_warn("Failed to set dgpu disable (retval): 0x%x\n", retval);
++ return -EIO;
++ }
++
++ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "dgpu_disable");
++
++ return 0;
++}
++
++static ssize_t dgpu_disable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++ bool mode = asus->dgpu_disable;
++
++ return sysfs_emit(buf, "%d\n", mode);
++}
++
++static ssize_t dgpu_disable_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int result;
++ bool disable;
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++
++ result = kstrtobool(buf, &disable);
++ if (result == -EINVAL)
++ return result;
++
++ asus->dgpu_disable = disable;
++ /*
++ * The ACPI call used does not save the mode unless the call is run twice.
++ * Once to disable, then once to check status and save - this is two code
++ * paths in the method in the ACPI dumps.
++ */
++ result = dgpu_disable_write(asus);
++ if (result != 0)
++ return result;
++
++ result = dgpu_disable_write(asus);
++ if (result != 0)
++ return result;
++
++ return count;
++}
++
++static DEVICE_ATTR_RW(dgpu_disable);
++
+ /* Battery ********************************************************************/
+
+ /* The battery maximum charging percentage */
+@@ -2411,6 +2502,7 @@ static struct attribute *platform_attributes[] = {
+ &dev_attr_camera.attr,
+ &dev_attr_cardr.attr,
+ &dev_attr_touchpad.attr,
++ &dev_attr_dgpu_disable.attr,
+ &dev_attr_lid_resume.attr,
+ &dev_attr_als_enable.attr,
+ &dev_attr_fan_boost_mode.attr,
+@@ -2437,6 +2529,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
+ devid = ASUS_WMI_DEVID_LID_RESUME;
+ else if (attr == &dev_attr_als_enable.attr)
+ devid = ASUS_WMI_DEVID_ALS_ENABLE;
++ else if (attr == &dev_attr_dgpu_disable.attr)
++ ok = asus->dgpu_disable_available;
+ else if (attr == &dev_attr_fan_boost_mode.attr)
+ ok = asus->fan_boost_mode_available;
+ else if (attr == &dev_attr_throttle_thermal_policy.attr)
+@@ -2698,6 +2792,10 @@ static int asus_wmi_add(struct platform_device *pdev)
+ if (err)
+ goto fail_platform;
+
++ err = dgpu_disable_check_present(asus);
++ if (err)
++ goto fail_dgpu_disable;
++
+ err = fan_boost_mode_check_present(asus);
+ if (err)
+ goto fail_fan_boost_mode;
+@@ -2798,6 +2896,7 @@ static int asus_wmi_add(struct platform_device *pdev)
+ fail_sysfs:
+ fail_throttle_thermal_policy:
+ fail_fan_boost_mode:
++fail_dgpu_disable:
+ fail_platform:
+ fail_panel_od:
+ kfree(asus);
+diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
+index 428aea701c7b..a528f9d0e4b7 100644
+--- a/include/linux/platform_data/x86/asus-wmi.h
++++ b/include/linux/platform_data/x86/asus-wmi.h
+@@ -90,6 +90,9 @@
+ /* Keyboard dock */
+ #define ASUS_WMI_DEVID_KBD_DOCK 0x00120063
+
++/* dgpu on/off */
++#define ASUS_WMI_DEVID_DGPU 0x00090020
++
+ /* DSTS masks */
+ #define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
+ #define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002
+--
+2.32.0
+
diff --git a/sys-kernel_arch-sources-g14_files-0041-asus-wmi-Add-egpu-enable-method.patch b/sys-kernel_arch-sources-g14_files-0041-asus-wmi-Add-egpu-enable-method.patch
new file mode 100644
index 000000000000..13804605dc37
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-0041-asus-wmi-Add-egpu-enable-method.patch
@@ -0,0 +1,169 @@
+From 49872973ce89a5778cd57094eb2eebba530b1244 Mon Sep 17 00:00:00 2001
+From: "Luke D. Jones" <luke@ljones.dev>
+Date: Sat, 17 Jul 2021 20:13:23 +1200
+Subject: [PATCH 103/103] asus-wmi: Add egpu enable method
+
+The X13 Flow laptops can utilise an external GPU. This requires
+toggling an ACPI method which will first disable the internal
+dGPU, and then enable the eGPU.
+
+Signed-off-by: Luke D. Jones <luke@ljones.dev>
+---
+ drivers/platform/x86/asus-wmi.c | 91 ++++++++++++++++++++++
+ include/linux/platform_data/x86/asus-wmi.h | 3 +
+ 2 files changed, 94 insertions(+)
+
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 02762a60d27a..ee5d8656641e 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -210,6 +210,9 @@ struct asus_wmi {
+ u8 fan_boost_mode_mask;
+ u8 fan_boost_mode;
+
++ bool egpu_enable_available; // 0 = enable
++ bool egpu_enable;
++
+ bool dgpu_disable_available;
+ bool dgpu_disable;
+
+@@ -430,6 +433,86 @@ static void lid_flip_tablet_mode_get_state(struct asus_wmi *asus)
+ }
+ }
+
++/* eGPU ********************************************************************/
++static int egpu_enable_check_present(struct asus_wmi *asus)
++{
++ u32 result;
++ int err;
++
++ asus->egpu_enable_available = false;
++
++ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_EGPU, &result);
++ if (err) {
++ if (err == -ENODEV)
++ return 0;
++ return err;
++ }
++
++ if (result & ASUS_WMI_DSTS_PRESENCE_BIT) {
++ asus->egpu_enable_available = true;
++ asus->egpu_enable = result & ASUS_WMI_DSTS_STATUS_BIT;
++ }
++
++ return 0;
++}
++
++static int egpu_enable_write(struct asus_wmi *asus)
++{
++ int err;
++ u8 value;
++ u32 retval;
++
++ value = asus->egpu_enable;
++
++ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_EGPU, value, &retval);
++
++ if (err) {
++ pr_warn("Failed to set egpu disable: %d\n", err);
++ return err;
++ }
++
++ if (retval > 1 || retval < 0) {
++ pr_warn("Failed to set egpu disable (retval): 0x%x\n", retval);
++ return -EIO;
++ }
++
++ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "egpu_enable");
++
++ return 0;
++}
++
++static ssize_t egpu_enable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++ bool mode = asus->egpu_enable;
++
++ return sysfs_emit(buf, "%d\n", mode);
++}
++
++static ssize_t egpu_enable_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int result;
++ bool disable;
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++
++ result = kstrtobool(buf, &disable);
++ if (result == -EINVAL)
++ return result;
++
++ asus->egpu_enable = disable;
++
++ result = egpu_enable_write(asus);
++ if (result != 0)
++ return result;
++
++ return count;
++}
++
++static DEVICE_ATTR_RW(egpu_enable);
++
+ /* dGPU ********************************************************************/
+ static int dgpu_disable_check_present(struct asus_wmi *asus)
+ {
+@@ -2502,6 +2585,7 @@ static struct attribute *platform_attributes[] = {
+ &dev_attr_camera.attr,
+ &dev_attr_cardr.attr,
+ &dev_attr_touchpad.attr,
++ &dev_attr_egpu_enable.attr,
+ &dev_attr_dgpu_disable.attr,
+ &dev_attr_lid_resume.attr,
+ &dev_attr_als_enable.attr,
+@@ -2529,6 +2613,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
+ devid = ASUS_WMI_DEVID_LID_RESUME;
+ else if (attr == &dev_attr_als_enable.attr)
+ devid = ASUS_WMI_DEVID_ALS_ENABLE;
++ else if (attr == &dev_attr_egpu_enable.attr)
++ ok = asus->egpu_enable_available;
+ else if (attr == &dev_attr_dgpu_disable.attr)
+ ok = asus->dgpu_disable_available;
+ else if (attr == &dev_attr_fan_boost_mode.attr)
+@@ -2792,6 +2878,10 @@ static int asus_wmi_add(struct platform_device *pdev)
+ if (err)
+ goto fail_platform;
+
++ err = egpu_enable_check_present(asus);
++ if (err)
++ goto fail_egpu_enable;
++
+ err = dgpu_disable_check_present(asus);
+ if (err)
+ goto fail_dgpu_disable;
+@@ -2896,6 +2986,7 @@ static int asus_wmi_add(struct platform_device *pdev)
+ fail_sysfs:
+ fail_throttle_thermal_policy:
+ fail_fan_boost_mode:
++fail_egpu_enable:
+ fail_dgpu_disable:
+ fail_platform:
+ fail_panel_od:
+diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
+index a528f9d0e4b7..17dc5cb6f3f2 100644
+--- a/include/linux/platform_data/x86/asus-wmi.h
++++ b/include/linux/platform_data/x86/asus-wmi.h
+@@ -90,6 +90,9 @@
+ /* Keyboard dock */
+ #define ASUS_WMI_DEVID_KBD_DOCK 0x00120063
+
++/* dgpu on/off */
++#define ASUS_WMI_DEVID_EGPU 0x00090019
++
+ /* dgpu on/off */
+ #define ASUS_WMI_DEVID_DGPU 0x00090020
+
+--
+2.32.0
+
diff --git a/sys-kernel_arch-sources-g14_files-0042-HID-asus-Remove-check-for-same-LED-brightness-on-set.patch b/sys-kernel_arch-sources-g14_files-0042-HID-asus-Remove-check-for-same-LED-brightness-on-set.patch
new file mode 100644
index 000000000000..65bee17ead27
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-0042-HID-asus-Remove-check-for-same-LED-brightness-on-set.patch
@@ -0,0 +1,30 @@
+From ed4bd51509d971f5cc0d1ddffa30acbe4c65969c Mon Sep 17 00:00:00 2001
+From: "Luke D. Jones" <luke@ljones.dev>
+Date: Sun, 6 Jun 2021 11:03:33 +1200
+Subject: [PATCH] HID: asus: Remove check for same LED brightness on set
+
+Remove the early return on LED brightness set. This is required
+because many ASUS ROG keyboards will default to max brightness on
+laptop resume if the LEDs were set to off before sleep.
+
+Signed-off-by: Luke D Jones <luke@ljones.dev>
+---
+ drivers/hid/hid-asus.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index 60606c11bdaf..19da81d2a910 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -486,9 +486,6 @@ static void asus_kbd_backlight_set(struct led_classdev *led_cdev,
+ {
+ struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
+ cdev);
+- if (led->brightness == brightness)
+- return;
+-
+ led->brightness = brightness;
+ schedule_work(&led->work);
+ }
+--
+2.31.1
diff --git a/sys-kernel_arch-sources-g14_files-0043-ALSA-hda-realtek-Fix-speakers-not-working-on-Asus-Fl.patch b/sys-kernel_arch-sources-g14_files-0043-ALSA-hda-realtek-Fix-speakers-not-working-on-Asus-Fl.patch
new file mode 100644
index 000000000000..3152a2006190
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-0043-ALSA-hda-realtek-Fix-speakers-not-working-on-Asus-Fl.patch
@@ -0,0 +1,25 @@
+From d3fcb93a34ad9065cd7d4adc2023ec6ead6eb1ee Mon Sep 17 00:00:00 2001
+From: Bobi Mihalca <bobby@mihalca.eu>
+Date: Tue, 27 Apr 2021 01:14:24 +0300
+Subject: [PATCH] ALSA: hda/realtek: Fix speakers not working on Asus Flow x13
+
+Signed-off-by: Bobi Mihalca <bobby@mihalca.eu>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index cc13a68197f3..7a18b19c5062 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -8336,6 +8336,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
++ SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
+ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
+--
+2.32.0
+
diff --git a/sys-kernel_arch-sources-g14_files-8001-x86-amd_nb-Add-AMD-family-19h-model-50h-PCI-ids.patch b/sys-kernel_arch-sources-g14_files-8001-x86-amd_nb-Add-AMD-family-19h-model-50h-PCI-ids.patch
new file mode 100644
index 000000000000..65e85b859729
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-8001-x86-amd_nb-Add-AMD-family-19h-model-50h-PCI-ids.patch
@@ -0,0 +1,59 @@
+From 2ade8fc65076095460e3ea1ca65a8f619d7d9a3a Mon Sep 17 00:00:00 2001
+From: David Bartley <andareed@gmail.com>
+Date: Thu, 20 May 2021 10:41:30 -0700
+Subject: x86/amd_nb: Add AMD family 19h model 50h PCI ids
+
+This is required to support Zen3 APUs in k10temp.
+
+Signed-off-by: David Bartley <andareed@gmail.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Wei Huang <wei.huang2@amd.com>
+Link: https://lkml.kernel.org/r/20210520174130.94954-1-andareed@gmail.com
+---
+ arch/x86/kernel/amd_nb.c | 3 +++
+ include/linux/pci_ids.h | 1 +
+ 2 files changed, 4 insertions(+)
+
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 09083094eb575..23dda362dc0f3 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -25,6 +25,7 @@
+ #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
+ #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
+ #define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
++#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
+
+ /* Protect the PCI config register pairs used for SMN and DF indirect access. */
+ static DEFINE_MUTEX(smn_mutex);
+@@ -57,6 +58,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
+ {}
+ };
+
+@@ -72,6 +74,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
+ {}
+ };
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 4c3fa5293d763..5356ccf1c275b 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -555,6 +555,7 @@
+ #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
+ #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
+ #define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
++#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d
+ #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
+ #define PCI_DEVICE_ID_AMD_LANCE 0x2000
+ #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
+--
+cgit 1.2.3-1.el7
+
diff --git a/sys-kernel_arch-sources-g14_files-8002-hwmon-k10temp-support-Zen3-APUs.patch b/sys-kernel_arch-sources-g14_files-8002-hwmon-k10temp-support-Zen3-APUs.patch
new file mode 100644
index 000000000000..9f117f8f4365
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-8002-hwmon-k10temp-support-Zen3-APUs.patch
@@ -0,0 +1,37 @@
+From 02c6edd4b1a07f24b187a550d413a07260eb696d Mon Sep 17 00:00:00 2001
+From: David Bartley <andareed@gmail.com>
+Date: Sun, 16 May 2021 23:41:31 -0700
+Subject: hwmon: (k10temp) support Zen3 APUs
+
+Add support for Zen3 Ryzen APU.
+
+Signed-off-by: David Bartley <andareed@gmail.com>
+Link: https://lore.kernel.org/r/20210517064131.4369-1-andareed@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+---
+ drivers/hwmon/k10temp.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index 5ff3669c2b608..fe3d92152e359 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -450,6 +450,7 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ switch (boot_cpu_data.x86_model) {
+ case 0x0 ... 0x1: /* Zen3 SP3/TR */
+ case 0x21: /* Zen3 Ryzen Desktop */
++ case 0x50: /* Zen3 Ryzen APU */
+ k10temp_get_ccd_support(pdev, data, 8);
+ break;
+ }
+@@ -491,6 +492,7 @@ static const struct pci_device_id k10temp_id_table[] = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
+ { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
+ {}
+ };
+--
+cgit 1.2.3-1.el7
+
diff --git a/sys-kernel_arch-sources-g14_files-8011-Bluetooth-btusb-Add-support-for-Lite-On-Mediatek-Chi.patch b/sys-kernel_arch-sources-g14_files-8011-Bluetooth-btusb-Add-support-for-Lite-On-Mediatek-Chi.patch
new file mode 100644
index 000000000000..a2c1f335cde0
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-8011-Bluetooth-btusb-Add-support-for-Lite-On-Mediatek-Chi.patch
@@ -0,0 +1,74 @@
+From d2ee0adeb66389140bc7c84ba8ebc109420b74f3 Mon Sep 17 00:00:00 2001
+From: "mark-yw.chen" <mark-yw.chen@mediatek.com>
+Date: Mon, 12 Apr 2021 23:06:27 +0800
+Subject: [PATCH 8011/8014] Bluetooth: btusb: Add support for Lite-On Mediatek
+ Chip
+
+Add support for Lite-On Mediatek Chip (MT7921)
+Lite On VID = 04CA.
+
+* /sys/kernel/debug/usb/devices
+T: Bus=01 Lev=03 Prnt=04 Port=01 Cnt=02 Dev#= 8 Spd=480 MxCh= 0
+D: Ver= 2.10 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 1
+P: Vendor=04ca ProdID=3802 Rev= 1.00
+S: Manufacturer=MediaTek Inc.
+S: Product=Wireless_Device
+S: SerialNumber=000000000
+C:* #Ifs= 3 Cfg#= 1 Atr=e0 MxPwr=100mA
+A: FirstIf#= 0 IfCount= 3 Cls=e0(wlcon) Sub=01 Prot=01
+I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=125us
+E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms
+I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms
+I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms
+I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms
+I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms
+I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms
+I: If#= 1 Alt= 6 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=83(I) Atr=01(Isoc) MxPS= 63 Ivl=1ms
+E: Ad=03(O) Atr=01(Isoc) MxPS= 63 Ivl=1ms
+I:* If#= 2 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=(none)
+E: Ad=8a(I) Atr=03(Int.) MxPS= 64 Ivl=125us
+E: Ad=0a(O) Atr=03(Int.) MxPS= 64 Ivl=125us
+I: If#= 2 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=(none)
+E: Ad=8a(I) Atr=03(Int.) MxPS= 64 Ivl=125us
+E: Ad=0a(O) Atr=03(Int.) MxPS= 64 Ivl=125us
+
+Signed-off-by: mark-yw.chen <mark-yw.chen@mediatek.com>
+---
+ drivers/bluetooth/btusb.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 99fd88f7653d..cb18d63a948d 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -408,6 +408,11 @@ static const struct usb_device_id blacklist_table[] = {
+ /* Additional MediaTek MT7615E Bluetooth devices */
+ { USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK},
+
++ /* Additional MediaTek MT7921 Bluetooth devices */
++ { USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH |
++ BTUSB_VALID_LE_STATES },
++
+ /* Additional Realtek 8723AE Bluetooth devices */
+ { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK },
+--
+2.32.0
+
diff --git a/sys-kernel_arch-sources-g14_files-8012-mt76-mt7921-continue-to-probe-driver-when-fw-already.patch b/sys-kernel_arch-sources-g14_files-8012-mt76-mt7921-continue-to-probe-driver-when-fw-already.patch
new file mode 100644
index 000000000000..5e540d38cf47
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-8012-mt76-mt7921-continue-to-probe-driver-when-fw-already.patch
@@ -0,0 +1,41 @@
+From 8a236b7058af9c559a5c4b8443054a6d5908afaf Mon Sep 17 00:00:00 2001
+From: Aaron Ma <aaron.ma@canonical.com>
+Date: Thu, 8 Jul 2021 21:17:10 +0800
+Subject: [PATCH 8012/8014] mt76: mt7921: continue to probe driver when fw
+ already downloaded
+
+When reboot system, no power cycles, firmware is already downloaded,
+return -EIO will break driver as error:
+mt7921e: probe of 0000:03:00.0 failed with error -5
+
+Skip firmware download and continue to probe.
+
+Signed-off-by: Aaron Ma <aaron.ma@canonical.com>
+---
+ drivers/net/wireless/mediatek/mt76/mt7921/mcu.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+index 67dc4b4cc094..f7459ad2a073 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+@@ -910,7 +910,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
+ ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
+ if (ret) {
+ dev_dbg(dev->mt76.dev, "Firmware is already download\n");
+- return -EIO;
++ goto fw_loaded;
+ }
+
+ ret = mt7921_load_patch(dev);
+@@ -928,6 +928,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
+ return -EIO;
+ }
+
++fw_loaded:
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
+
+ #ifdef CONFIG_PM
+--
+2.32.0
+
diff --git a/sys-kernel_arch-sources-g14_files-8013-mt76-mt7921-Fix-out-of-order-process-by-invalid-even.patch b/sys-kernel_arch-sources-g14_files-8013-mt76-mt7921-Fix-out-of-order-process-by-invalid-even.patch
new file mode 100644
index 000000000000..e3c25ccae6f9
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-8013-mt76-mt7921-Fix-out-of-order-process-by-invalid-even.patch
@@ -0,0 +1,41 @@
+From 8c7b55f401ae8cf172fd9766178ef20cda322e42 Mon Sep 17 00:00:00 2001
+From: Deren Wu <deren.wu@mediatek.com>
+Date: Wed, 14 Jul 2021 23:50:52 +0800
+Subject: [PATCH 8013/8014] mt76: mt7921: Fix out of order process by invalid
+ event pkt
+
+The acceptable event report should inlcude original CMD-ID. Otherwise,
+drop unexpected result from fw.
+
+Fixes: 1c099ab44727c ("mt76: mt7921: add MCU support")
+Signed-off-by: Jimmy Hu <Jimmy.Hu@mediatek.com>
+Signed-off-by: Deren Wu <deren.wu@mediatek.com>
+---
+ drivers/net/wireless/mediatek/mt76/mt7921/mcu.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+index f7459ad2a073..bc8e3327a49f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+@@ -157,6 +157,7 @@ mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
+ struct sk_buff *skb, int seq)
+ {
+ struct mt7921_mcu_rxd *rxd;
++ int mcu_cmd = cmd & MCU_CMD_MASK;
+ int ret = 0;
+
+ if (!skb) {
+@@ -194,6 +195,9 @@ mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
+ skb_pull(skb, sizeof(*rxd));
+ event = (struct mt7921_mcu_uni_event *)skb->data;
+ ret = le32_to_cpu(event->status);
++ /* skip invalid event */
++ if (mcu_cmd != event->cid)
++ ret = -EAGAIN;
+ break;
+ }
+ case MCU_CMD_REG_READ: {
+--
+2.32.0
+
diff --git a/sys-kernel_arch-sources-g14_files-8014-mt76-mt7921-Add-mt7922-support.patch b/sys-kernel_arch-sources-g14_files-8014-mt76-mt7921-Add-mt7922-support.patch
new file mode 100644
index 000000000000..d11143822c5f
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-8014-mt76-mt7921-Add-mt7922-support.patch
@@ -0,0 +1,221 @@
+From ad4ffe33fd9d517ef27e213c6bd9d11675658fe2 Mon Sep 17 00:00:00 2001
+From: Deren Wu <deren.wu@mediatek.com>
+Date: Fri, 16 Jul 2021 01:34:19 +0800
+Subject: [PATCH 8014/8014] mt76: mt7921: Add mt7922 support
+
+Add new chip mt7922 in mt7921 module with following items
+1. new chip ID / fw bin name
+2. is_mt7922()
+ check chip type for different fw files
+3. mt7921_get_data_mode()
+ check security type of fw (backward compatible)
+
+Co-developed-by: Jimmy Hu <Jimmy.Hu@mediatek.com>
+Signed-off-by: Jimmy Hu <Jimmy.Hu@mediatek.com>
+Signed-off-by: Deren Wu <deren.wu@mediatek.com>
+---
+ .../net/wireless/mediatek/mt76/mt76_connac.h | 7 +-
+ .../wireless/mediatek/mt76/mt7921/eeprom.c | 1 +
+ .../net/wireless/mediatek/mt76/mt7921/mcu.c | 71 +++++++++++++++++--
+ .../wireless/mediatek/mt76/mt7921/mt7921.h | 3 +
+ .../net/wireless/mediatek/mt76/mt7921/pci.c | 3 +
+ 5 files changed, 80 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+index 6c889b90fd12..f6e7671fc3be 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+@@ -82,9 +82,14 @@ struct mt76_connac_coredump {
+
+ extern const struct wiphy_wowlan_support mt76_connac_wowlan_support;
+
++static inline bool is_mt7922(struct mt76_dev *dev)
++{
++ return mt76_chip(dev) == 0x7922;
++}
++
+ static inline bool is_mt7921(struct mt76_dev *dev)
+ {
+- return mt76_chip(dev) == 0x7961;
++ return mt76_chip(dev) == 0x7961 || is_mt7922(dev);
+ }
+
+ static inline bool is_mt7663(struct mt76_dev *dev)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c
+index 691d14a1a7bf..4d0a4aeac6bf 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c
+@@ -36,6 +36,7 @@ static int mt7921_check_eeprom(struct mt7921_dev *dev)
+ val = get_unaligned_le16(eeprom);
+
+ switch (val) {
++ case 0x7922:
+ case 0x7961:
+ return 0;
+ default:
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+index bc8e3327a49f..798ddfde205a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+@@ -82,9 +82,17 @@ struct mt7921_fw_region {
+ #define FW_START_OVERRIDE BIT(0)
+ #define FW_START_WORKING_PDA_CR4 BIT(2)
+
++#define PATCH_SEC_NOT_SUPPORT GENMASK(31, 0)
+ #define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
+ #define PATCH_SEC_TYPE_INFO 0x2
+
++#define PATCH_SEC_ENC_TYPE_MASK GENMASK(31, 24)
++#define PATCH_SEC_ENC_TYPE_PLAIN 0x00
++#define PATCH_SEC_ENC_TYPE_AES 0x01
++#define PATCH_SEC_ENC_TYPE_SCRAMBLE 0x02
++#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0)
++#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0)
++
+ #define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
+ #define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
+
+@@ -722,6 +730,46 @@ static int mt7921_driver_own(struct mt7921_dev *dev)
+ return 0;
+ }
+
++static u32 mt7921_get_data_mode(struct mt7921_dev *dev, u32 info)
++{
++ u32 mode = DL_MODE_NEED_RSP;
++
++ if (info == PATCH_SEC_NOT_SUPPORT)
++ return mode;
++
++ switch (FIELD_GET(PATCH_SEC_ENC_TYPE_MASK, info)) {
++ case PATCH_SEC_ENC_TYPE_PLAIN:
++ break;
++ case PATCH_SEC_ENC_TYPE_AES:
++ mode |= DL_MODE_ENCRYPT;
++ mode |= FIELD_PREP(DL_MODE_KEY_IDX,
++ (info & PATCH_SEC_ENC_AES_KEY_MASK)) & DL_MODE_KEY_IDX;
++ mode |= DL_MODE_RESET_SEC_IV;
++ break;
++ case PATCH_SEC_ENC_TYPE_SCRAMBLE:
++ mode |= DL_MODE_ENCRYPT;
++ mode |= DL_CONFIG_ENCRY_MODE_SEL;
++ mode |= DL_MODE_RESET_SEC_IV;
++ break;
++ default:
++ dev_err(dev->mt76.dev, "Encryption type not support!\n");
++ }
++
++ return mode;
++}
++
++static char *mt7921_patch_name(struct mt7921_dev *dev)
++{
++ char *ret;
++
++ if (is_mt7922(&dev->mt76))
++ ret = MT7922_ROM_PATCH;
++ else
++ ret = MT7921_ROM_PATCH;
++
++ return ret;
++}
++
+ static int mt7921_load_patch(struct mt7921_dev *dev)
+ {
+ const struct mt7921_patch_hdr *hdr;
+@@ -739,7 +787,7 @@ static int mt7921_load_patch(struct mt7921_dev *dev)
+ return -EAGAIN;
+ }
+
+- ret = request_firmware(&fw, MT7921_ROM_PATCH, dev->mt76.dev);
++ ret = request_firmware(&fw, mt7921_patch_name(dev), dev->mt76.dev);
+ if (ret)
+ goto out;
+
+@@ -757,7 +805,8 @@ static int mt7921_load_patch(struct mt7921_dev *dev)
+ for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
+ struct mt7921_patch_sec *sec;
+ const u8 *dl;
+- u32 len, addr;
++ u32 len, addr, mode;
++ u32 sec_info = 0;
+
+ sec = (struct mt7921_patch_sec *)(fw->data + sizeof(*hdr) +
+ i * sizeof(*sec));
+@@ -770,9 +819,11 @@ static int mt7921_load_patch(struct mt7921_dev *dev)
+ addr = be32_to_cpu(sec->info.addr);
+ len = be32_to_cpu(sec->info.len);
+ dl = fw->data + be32_to_cpu(sec->offs);
++ sec_info = be32_to_cpu(sec->info.sec_key_idx);
++ mode = mt7921_get_data_mode(dev, sec_info);
+
+ ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
+- DL_MODE_NEED_RSP);
++ mode);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+@@ -869,13 +920,25 @@ mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
+ return mt76_connac_mcu_start_firmware(&dev->mt76, override, option);
+ }
+
++static char *mt7921_ram_name(struct mt7921_dev *dev)
++{
++ char *ret;
++
++ if (is_mt7922(&dev->mt76))
++ ret = MT7922_FIRMWARE_WM;
++ else
++ ret = MT7921_FIRMWARE_WM;
++
++ return ret;
++}
++
+ static int mt7921_load_ram(struct mt7921_dev *dev)
+ {
+ const struct mt7921_fw_trailer *hdr;
+ const struct firmware *fw;
+ int ret;
+
+- ret = request_firmware(&fw, MT7921_FIRMWARE_WM, dev->mt76.dev);
++ ret = request_firmware(&fw, mt7921_ram_name(dev), dev->mt76.dev);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+index 59862ea4951c..d8e616229e9f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+@@ -33,6 +33,9 @@
+ #define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin"
+ #define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin"
+
++#define MT7922_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7922_1.bin"
++#define MT7922_ROM_PATCH "mediatek/WIFI_MT7922_patch_mcu_1_1_hdr.bin"
++
+ #define MT7921_EEPROM_SIZE 3584
+ #define MT7921_TOKEN_SIZE 8192
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+index fa02d934f0bf..684d0568a92a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+@@ -14,6 +14,7 @@
+
+ static const struct pci_device_id mt7921_pci_device_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961) },
++ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922) },
+ { },
+ };
+
+@@ -317,6 +318,8 @@ module_pci_driver(mt7921_pci_driver);
+ MODULE_DEVICE_TABLE(pci, mt7921_pci_device_table);
+ MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
+ MODULE_FIRMWARE(MT7921_ROM_PATCH);
++MODULE_FIRMWARE(MT7922_FIRMWARE_WM);
++MODULE_FIRMWARE(MT7922_ROM_PATCH);
+ MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+ MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+ MODULE_LICENSE("Dual BSD/GPL");
+--
+2.32.0
+
diff --git a/sys-kernel_arch-sources-g14_files-9001-v5.13.2-s0ix-patch-2021-07-14.patch b/sys-kernel_arch-sources-g14_files-9001-v5.13.2-s0ix-patch-2021-07-14.patch
new file mode 100644
index 000000000000..e00412c4e68b
--- /dev/null
+++ b/sys-kernel_arch-sources-g14_files-9001-v5.13.2-s0ix-patch-2021-07-14.patch
@@ -0,0 +1,1164 @@
+From c5c4ea6d73b96660a0c7a4acfa20260a009eaded Mon Sep 17 00:00:00 2001
+From: Scott B <28817345+foundObjects@users.noreply.github.com>
+Date: Wed, 14 Jul 2021 21:52:37 -0700
+Subject: [PATCH] v5.13.2-s0ix patch 2021-07-14
+
+Squashed commit of the following:
+
+commit dfd19418b30dd4b44909fc2c4b6a9b06c2554d9b
+Author: Marcin Bachry <hegel666@gmail.com>
+Date: Tue Mar 16 15:28:51 2021 -0400
+
+ PCI: quirks: Quirk PCI d3hot delay for AMD xhci
+
+ Renoir needs a similar delay.
+
+ Signed-off-by: Marcin Bachry <hegel666@gmail.com>
+ Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+
+commit 1d4a9adf12b2f2e175f937cd8b056b7e382bbc2d
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed Jul 7 09:16:47 2021 -0500
+
+ platform/x86: amd-pmc: Use return code on suspend
+
+ Right now the driver will still return success even if the OS_HINT
+ command failed to send to the SMU. In the rare event of a failure,
+ the suspend should really be aborted here so that relevant logs
+ can may be captured.
+
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Acked-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+
+commit 5d419d5edb6c52b50a6883496e545dea2108f7ab
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed Jun 30 14:46:06 2021 -0500
+
+ ACPI: PM: Only mark EC GPE for wakeup on Intel systems
+
+ When using s2idle on a variety of AMD notebook systems, they are
+ experiencing spurious events that the EC or SMU are in the wrong
+ state leading to a hard time waking up or higher than expected
+ power consumption.
+
+ These events only occur when the EC GPE is inadvertently set as a wakeup
+ source. Originally the EC GPE was only set as a wakeup source when using
+ the intel-vbtn or intel-hid drivers in commit 10a08fd65ec1 ("ACPI: PM:
+ Set up EC GPE for system wakeup from drivers that need it") but during
+ testing a reporter discovered that this was not enough for their ASUS
+ Zenbook UX430UNR/i7-8550U to wakeup by lid event or keypress.
+ Marking the EC GPE for wakeup universally resolved this for that
+ reporter in commit b90ff3554aa3 ("ACPI: PM: s2idle: Always set up EC GPE
+ for system wakeup").
+
+ However this behavior has lead to a number of problems:
+
+ * On both Lenovo T14 and P14s the keyboard wakeup doesn't work, and
+ sometimes the power button event doesn't work.
+ * On HP 635 G7 detaching or attaching AC during suspend will cause
+ the system not to wakeup
+ * On Asus vivobook to prevent detaching AC causing resume problems
+ * On Lenovo 14ARE05 to prevent detaching AC causing resume problems
+ * On HP ENVY x360 to prevent detaching AC causing resume problems
+
+ As there may be other Intel systems besides ASUS Zenbook UX430UNR/i7-8550U
+ that don't use intel-vbtn or intel-hid avoid these problems by only
+ universally marking the EC GPE wakesource on non-AMD systems.
+
+ Link: https://patchwork.kernel.org/project/linux-pm/cover/5997740.FPbUVk04hV@kreacher/#22825489
+ Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1230
+ Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1629
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Acked-by: Alex Deucher <alexander.deucher@amd.com>
+
+commit 027e28cf9b134b6c9996ba586a5e501953db6c75
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:18:03 2021 +0530
+
+ platform/x86: amd-pmc: Add new acpi id for future PMC controllers
+
+ The upcoming PMC controller would have a newer acpi id, add that to
+ the supported acpid device list.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+
+commit d659780411974015aa21e8740cdf90a6c1821cbb
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:18:02 2021 +0530
+
+ platform/x86: amd-pmc: Add support for ACPI ID AMDI0006
+
+ Some newer BIOSes have added another ACPI ID for the uPEP device.
+ SMU statistics behave identically on this device.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+
+commit 92df7f17d7461ae779957ebbe64797caaab680ff
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:18:01 2021 +0530
+
+ amd-pmc: Add support for logging s0ix counters
+
+ Even the FCH SSC registers provides certain level of information
+ about the s0ix entry and exit times which comes handy when the SMU
+ fails to report the statistics via the mailbox communication.
+
+ This information is captured via a new debugfs file "s0ix_stats".
+ A non-zero entry in this counters would mean that the system entered
+ the s0ix state.
+
+ If s0ix entry time and exit time don't change during suspend to idle,
+ the silicon has not entered the deepest state.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+
+commit ce4b6d7cca5300babedff9030592628ae24e5795
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:18:00 2021 +0530
+
+ platform/x86: amd-pmc: Add support for logging SMU metrics
+
+ SMU provides a way to dump the s0ix debug statistics in the form of a
+ metrics table via a of set special mailbox commands.
+
+ Add support to the driver which can send these commands to SMU and expose
+ the information received via debugfs. The information contains the s0ix
+ entry/exit, active time of each IP block etc.
+
+ As a side note, SMU subsystem logging is not supported on Picasso based
+ SoC's.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+
+commit 8236650e7af934a9912f5810c95506e638ede06c
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:17:59 2021 +0530
+
+ platform/x86: amd-pmc: call dump registers only once
+
+ Currently amd_pmc_dump_registers() routine is being called at
+ multiple places. The best to call it is after command submission
+ to SMU.
+
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+
+commit 46400a2beb5c6c819db50eaeae0a7640c1273a70
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:17:58 2021 +0530
+
+ platform/x86: amd-pmc: Fix SMU firmware reporting mechanism
+
+ It was lately understood that the current mechanism available in the
+ driver to get SMU firmware info works only on internal SMU builds and
+ there is a separate way to get all the SMU logging counters (addressed
+ in the next patch). Hence remove all the smu info shown via debugfs as it
+ is no more useful.
+
+ Fixes: 156ec4731cb2 ("platform/x86: amd-pmc: Add AMD platform support for S2Idle")
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+
+commit bd06b697a1e6a136b2e305634e9f312762059909
+Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Date: Tue Jun 29 14:17:57 2021 +0530
+
+ platform/x86: amd-pmc: Fix command completion code
+
+ The protocol to submit a job request to SMU is to wait for
+ AMD_PMC_REGISTER_RESPONSE to return 1,meaning SMU is ready to take
+ requests. PMC driver has to make sure that the response code is always
+ AMD_PMC_RESULT_OK before making any command submissions.
+
+ When we submit a message to SMU, we have to wait until it processes
+ the request. Adding a read_poll_timeout() check as this was missing in
+ the existing code.
+
+ Also, add a mutex to protect amd_pmc_send_cmd() calls to SMU.
+
+ Fixes: 156ec4731cb2 ("platform/x86: amd-pmc: Add AMD platform support for S2Idle")
+ Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Acked-by: Raul E Rangel <rrangel@chromium.org>
+
+commit e8ef1eac2ff5426b9b35f49a587bc3a7d9397659
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Thu Jun 17 11:42:12 2021 -0500
+
+ ACPI: PM: Adjust behavior for field problems on AMD systems
+
+ Some AMD Systems with uPEP _HID AMD004/AMDI005 have an off by one bug
+ in their function mask return. This means that they will call entrance
+ but not exit for matching functions.
+
+ Other AMD systems with this HID should use the Microsoft generic UUID.
+
+ AMD systems with uPEP HID AMDI006 should be using the Microsoft method.
+
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 864a6b01761b2000ec7bc9fff388c61efceaf353
+Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Date: Thu Jun 17 11:42:11 2021 -0500
+
+ ACPI: PM: s2idle: Add support for new Microsoft UUID
+
+ This adds supports for _DSM notifications to the Microsoft UUID
+ described by Microsoft documentation for s2idle.
+
+ Link: https://docs.microsoft.com/en-us/windows-hardware/design/device-experiences/modern-standby-firmware-notifications
+ Co-developed-by: Mario Limonciello <mario.limonciello@amd.com>
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+
+commit 8b4804c4344b418f9d626814e920ccf95e364024
+Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Date: Thu Jun 17 11:42:10 2021 -0500
+
+ ACPI: PM: s2idle: Add support for multiple func mask
+
+ Required for follow-up patch adding new UUID
+ needing new function mask.
+
+ Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+
+commit 3715cebc4d1c47cf135944773fa9ff284542910e
+Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Date: Thu Jun 17 11:42:09 2021 -0500
+
+ ACPI: PM: s2idle: Refactor common code
+
+ Refactor common code to prepare for upcoming changes.
+ * Remove unused struct.
+ * Print error before returning.
+ * Frees ACPI obj if _DSM type is not as expected.
+ * Treat lps0_dsm_func_mask as an integer rather than character
+ * Remove extra out_obj
+ * Move rev_id
+
+ Co-developed-by: Mario Limonciello <mario.limonciello@amd.com>
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+
+commit 67e6dbc2f704df510523cd62d0b70a33096d565f
+Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+Date: Thu Jun 17 11:42:08 2021 -0500
+
+ ACPI: PM: s2idle: Use correct revision id
+
+ AMD spec mentions only revision 0. With this change,
+ device constraint list is populated properly.
+
+ Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
+
+commit f207e3ee5d44caa2626e08f3d331dc3d994d9ff1
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed Jun 9 13:40:18 2021 -0500
+
+ ACPI: Add quirks for AMD Renoir/Lucienne CPUs to force the D3 hint
+
+ AMD systems from Renoir and Lucienne require that the NVME controller
+ is put into D3 over a Modern Standby / suspend-to-idle
+ cycle. This is "typically" accomplished using the `StorageD3Enable`
+ property in the _DSD, but this property was introduced after many
+ of these systems launched and most OEM systems don't have it in
+ their BIOS.
+
+ On AMD Renoir without these drives going into D3 over suspend-to-idle
+ the resume will fail with the NVME controller being reset and a trace
+ like this in the kernel logs:
+ ```
+ [ 83.556118] nvme nvme0: I/O 161 QID 2 timeout, aborting
+ [ 83.556178] nvme nvme0: I/O 162 QID 2 timeout, aborting
+ [ 83.556187] nvme nvme0: I/O 163 QID 2 timeout, aborting
+ [ 83.556196] nvme nvme0: I/O 164 QID 2 timeout, aborting
+ [ 95.332114] nvme nvme0: I/O 25 QID 0 timeout, reset controller
+ [ 95.332843] nvme nvme0: Abort status: 0x371
+ [ 95.332852] nvme nvme0: Abort status: 0x371
+ [ 95.332856] nvme nvme0: Abort status: 0x371
+ [ 95.332859] nvme nvme0: Abort status: 0x371
+ [ 95.332909] PM: dpm_run_callback(): pci_pm_resume+0x0/0xe0 returns -16
+ [ 95.332936] nvme 0000:03:00.0: PM: failed to resume async: error -16
+ ```
+
+ The Microsoft documentation for StorageD3Enable mentioned that Windows has
+ a hardcoded allowlist for D3 support, which was used for these platforms.
+ Introduce quirks to hardcode them for Linux as well.
+
+ As this property is now "standardized", OEM systems using AMD Cezanne and
+ newer APU's have adopted this property, and quirks like this should not be
+ necessary.
+
+ CC: Shyam-sundar S-k <Shyam-sundar.S-k@amd.com>
+ CC: Alexander Deucher <Alexander.Deucher@amd.com>
+ CC: Prike Liang <prike.liang@amd.com>
+ Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ Tested-by: Julian Sikorski <belegdol@gmail.com>
+ Signed-off-by: Christoph Hellwig <hch@lst.de>
+
+commit 4707d17911041378604fa53b28e09b5aab2d2a5c
+Author: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed Jun 9 13:40:17 2021 -0500
+
+ ACPI: Check StorageD3Enable _DSD property in ACPI code
+
+ Although first implemented for NVME, this check may be usable by
+ other drivers as well. Microsoft's specification explicitly mentions
+ that is may be usable by SATA and AHCI devices. Google also indicates
+ that they have used this with SDHCI in a downstream kernel tree that
+ a user can plug a storage device into.
+
+ Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro
+ Suggested-by: Keith Busch <kbusch@kernel.org>
+ CC: Shyam-sundar S-k <Shyam-sundar.S-k@amd.com>
+ CC: Alexander Deucher <Alexander.Deucher@amd.com>
+ CC: Rafael J. Wysocki <rjw@rjwysocki.net>
+ CC: Prike Liang <prike.liang@amd.com>
+ Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+ Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ Signed-off-by: Christoph Hellwig <hch@lst.de>
+---
+ drivers/acpi/device_pm.c | 32 +++++
+ drivers/acpi/internal.h | 9 ++
+ drivers/acpi/x86/s2idle.c | 157 ++++++++++++++-------
+ drivers/acpi/x86/utils.c | 25 ++++
+ drivers/nvme/host/pci.c | 28 +---
+ drivers/pci/quirks.c | 3 +
+ drivers/platform/x86/amd-pmc.c | 246 ++++++++++++++++++++++++++++++---
+ include/linux/acpi.h | 5 +
+ 8 files changed, 405 insertions(+), 100 deletions(-)
+
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index 9d2d3b9bb8b5..0cfdef2fc3ad 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -1338,4 +1338,36 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
+ return 1;
+ }
+ EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
++
++/**
++ * acpi_storage_d3 - Check if D3 should be used in the suspend path
++ * @dev: Device to check
++ *
++ * Return %true if the platform firmware wants @dev to be programmed
++ * into D3hot or D3cold (if supported) in the suspend path, or %false
++ * when there is no specific preference. On some platforms, if this
++ * hint is ignored, @dev may remain unresponsive after suspending the
++ * platform as a whole.
++ *
++ * Although the property has storage in the name it actually is
++ * applied to the PCIe slot and plugging in a non-storage device the
++ * same platform restrictions will likely apply.
++ */
++bool acpi_storage_d3(struct device *dev)
++{
++ struct acpi_device *adev = ACPI_COMPANION(dev);
++ u8 val;
++
++ if (force_storage_d3())
++ return true;
++
++ if (!adev)
++ return false;
++ if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
++ &val))
++ return false;
++ return val == 1;
++}
++EXPORT_SYMBOL_GPL(acpi_storage_d3);
++
+ #endif /* CONFIG_PM */
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index e21611c9a170..7ac01b03ba67 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -236,6 +236,15 @@ static inline int suspend_nvs_save(void) { return 0; }
+ static inline void suspend_nvs_restore(void) {}
+ #endif
+
++#ifdef CONFIG_X86
++bool force_storage_d3(void);
++#else
++static inline bool force_storage_d3(void)
++{
++ return false;
++}
++#endif
++
+ /*--------------------------------------------------------------------------
+ Device properties
+ -------------------------------------------------------------------------- */
+diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
+index 2d7ddb8a8cb6..1c507804fb10 100644
+--- a/drivers/acpi/x86/s2idle.c
++++ b/drivers/acpi/x86/s2idle.c
+@@ -32,6 +32,9 @@ static const struct acpi_device_id lps0_device_ids[] = {
+ {"", },
+ };
+
++/* Microsoft platform agnostic UUID */
++#define ACPI_LPS0_DSM_UUID_MICROSOFT "11e00d56-ce64-47ce-837b-1f898f9aa461"
++
+ #define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
+
+ #define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
+@@ -39,6 +42,8 @@ static const struct acpi_device_id lps0_device_ids[] = {
+ #define ACPI_LPS0_SCREEN_ON 4
+ #define ACPI_LPS0_ENTRY 5
+ #define ACPI_LPS0_EXIT 6
++#define ACPI_LPS0_MS_ENTRY 7
++#define ACPI_LPS0_MS_EXIT 8
+
+ /* AMD */
+ #define ACPI_LPS0_DSM_UUID_AMD "e3f32452-febc-43ce-9039-932122d37721"
+@@ -49,7 +54,10 @@ static const struct acpi_device_id lps0_device_ids[] = {
+
+ static acpi_handle lps0_device_handle;
+ static guid_t lps0_dsm_guid;
+-static char lps0_dsm_func_mask;
++static int lps0_dsm_func_mask;
++
++static guid_t lps0_dsm_guid_microsoft;
++static int lps0_dsm_func_mask_microsoft;
+
+ /* Device constraint entry structure */
+ struct lpi_device_info {
+@@ -70,15 +78,7 @@ struct lpi_constraints {
+ int min_dstate;
+ };
+
+-/* AMD */
+-/* Device constraint entry structure */
+-struct lpi_device_info_amd {
+- int revision;
+- int count;
+- union acpi_object *package;
+-};
+-
+-/* Constraint package structure */
++/* AMD Constraint package structure */
+ struct lpi_device_constraint_amd {
+ char *name;
+ int enabled;
+@@ -96,15 +96,15 @@ static void lpi_device_get_constraints_amd(void)
+ int i, j, k;
+
+ out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
+- 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
++ rev_id, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
+ NULL, ACPI_TYPE_PACKAGE);
+
+- if (!out_obj)
+- return;
+-
+ acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
+ out_obj ? "successful" : "failed");
+
++ if (!out_obj)
++ return;
++
+ for (i = 0; i < out_obj->package.count; i++) {
+ union acpi_object *package = &out_obj->package.elements[i];
+
+@@ -317,14 +317,15 @@ static void lpi_check_constraints(void)
+ }
+ }
+
+-static void acpi_sleep_run_lps0_dsm(unsigned int func)
++static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, guid_t dsm_guid)
+ {
+ union acpi_object *out_obj;
+
+- if (!(lps0_dsm_func_mask & (1 << func)))
++ if (!(func_mask & (1 << func)))
+ return;
+
+- out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, rev_id, func, NULL);
++ out_obj = acpi_evaluate_dsm(lps0_device_handle, &dsm_guid,
++ rev_id, func, NULL);
+ ACPI_FREE(out_obj);
+
+ acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
+@@ -336,11 +337,33 @@ static bool acpi_s2idle_vendor_amd(void)
+ return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
+ }
+
++static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *dsm_guid)
++{
++ union acpi_object *obj;
++ int ret = -EINVAL;
++
++ guid_parse(uuid, dsm_guid);
++ obj = acpi_evaluate_dsm(handle, dsm_guid, rev, 0, NULL);
++
++ /* Check if the _DSM is present and as expected. */
++ if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length == 0 ||
++ obj->buffer.length > sizeof(u32)) {
++ acpi_handle_debug(handle,
++ "_DSM UUID %s rev %d function 0 evaluation failed\n", uuid, rev);
++ goto out;
++ }
++
++ ret = *(int *)obj->buffer.pointer;
++ acpi_handle_debug(handle, "_DSM UUID %s rev %d function mask: 0x%x\n", uuid, rev, ret);
++
++out:
++ ACPI_FREE(obj);
++ return ret;
++}
++
+ static int lps0_device_attach(struct acpi_device *adev,
+ const struct acpi_device_id *not_used)
+ {
+- union acpi_object *out_obj;
+-
+ if (lps0_device_handle)
+ return 0;
+
+@@ -348,28 +371,36 @@ static int lps0_device_attach(struct acpi_device *adev,
+ return 0;
+
+ if (acpi_s2idle_vendor_amd()) {
+- guid_parse(ACPI_LPS0_DSM_UUID_AMD, &lps0_dsm_guid);
+- out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 0, 0, NULL);
++ /* AMD0004, AMDI0005:
++ * - Should use rev_id 0x0
++ * - function mask > 0x3: Should use AMD method, but has off by one bug
++ * - function mask = 0x3: Should use Microsoft method
++ * AMDI0006:
++ * - should use rev_id 0x0
++ * - function mask = 0x3: Should use Microsoft method
++ */
++ const char *hid = acpi_device_hid(adev);
+ rev_id = 0;
++ lps0_dsm_func_mask = validate_dsm(adev->handle,
++ ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
++ lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
++ ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
++ &lps0_dsm_guid_microsoft);
++ if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
++ !strcmp(hid, "AMDI0005"))) {
++ lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
++ acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
++ ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
++ }
+ } else {
+- guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
+- out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
+ rev_id = 1;
++ lps0_dsm_func_mask = validate_dsm(adev->handle,
++ ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
++ lps0_dsm_func_mask_microsoft = -EINVAL;
+ }
+
+- /* Check if the _DSM is present and as expected. */
+- if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
+- acpi_handle_debug(adev->handle,
+- "_DSM function 0 evaluation failed\n");
+- return 0;
+- }
+-
+- lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
+-
+- ACPI_FREE(out_obj);
+-
+- acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
+- lps0_dsm_func_mask);
++ if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
++ return 0; //function evaluation failed
+
+ lps0_device_handle = adev->handle;
+
+@@ -386,11 +417,15 @@ static int lps0_device_attach(struct acpi_device *adev,
+ mem_sleep_current = PM_SUSPEND_TO_IDLE;
+
+ /*
+- * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
+- * EC GPE to be enabled while suspended for certain wakeup devices to
+- * work, so mark it as wakeup-capable.
++ * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
++ * use intel-hid or intel-vbtn but require the EC GPE to be enabled while
++ * suspended for certain wakeup devices to work, so mark it as wakeup-capable.
++ *
++ * Only enable on !AMD as enabling this universally causes problems for a number
++ * of AMD based systems.
+ */
+- acpi_ec_mark_gpe_for_wake();
++ if (!acpi_s2idle_vendor_amd())
++ acpi_ec_mark_gpe_for_wake();
+
+ return 0;
+ }
+@@ -408,12 +443,23 @@ int acpi_s2idle_prepare_late(void)
+ if (pm_debug_messages_on)
+ lpi_check_constraints();
+
+- if (acpi_s2idle_vendor_amd()) {
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD);
++ if (lps0_dsm_func_mask_microsoft > 0) {
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ } else if (acpi_s2idle_vendor_amd()) {
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD,
++ lps0_dsm_func_mask, lps0_dsm_guid);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
++ lps0_dsm_func_mask, lps0_dsm_guid);
+ } else {
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
++ lps0_dsm_func_mask, lps0_dsm_guid);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
++ lps0_dsm_func_mask, lps0_dsm_guid);
+ }
+
+ return 0;
+@@ -424,12 +470,23 @@ void acpi_s2idle_restore_early(void)
+ if (!lps0_device_handle || sleep_no_lps0)
+ return;
+
+- if (acpi_s2idle_vendor_amd()) {
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD);
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
++ if (lps0_dsm_func_mask_microsoft > 0) {
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
++ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
++ } else if (acpi_s2idle_vendor_amd()) {
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD,
++ lps0_dsm_func_mask, lps0_dsm_guid);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD,
++ lps0_dsm_func_mask, lps0_dsm_guid);
+ } else {
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
+- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
++ lps0_dsm_func_mask, lps0_dsm_guid);
++ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
++ lps0_dsm_func_mask, lps0_dsm_guid);
+ }
+ }
+
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index bdc1ba00aee9..f22f23933063 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -135,3 +135,28 @@ bool acpi_device_always_present(struct acpi_device *adev)
+
+ return ret;
+ }
++
++/*
++ * AMD systems from Renoir and Lucienne *require* that the NVME controller
++ * is put into D3 over a Modern Standby / suspend-to-idle cycle.
++ *
++ * This is "typically" accomplished using the `StorageD3Enable`
++ * property in the _DSD that is checked via the `acpi_storage_d3` function
++ * but this property was introduced after many of these systems launched
++ * and most OEM systems don't have it in their BIOS.
++ *
++ * The Microsoft documentation for StorageD3Enable mentioned that Windows has
++ * a hardcoded allowlist for D3 support, which was used for these platforms.
++ *
++ * This allows quirking on Linux in a similar fashion.
++ */
++static const struct x86_cpu_id storage_d3_cpu_ids[] = {
++ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
++ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
++ {}
++};
++
++bool force_storage_d3(void)
++{
++ return x86_match_cpu(storage_d3_cpu_ids);
++}
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 42ad75ff1348..5a72bdf5ad03 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2828,32 +2828,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ return 0;
+ }
+
+-#ifdef CONFIG_ACPI
+-static bool nvme_acpi_storage_d3(struct pci_dev *dev)
+-{
+- struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+- u8 val;
+-
+- /*
+- * Look for _DSD property specifying that the storage device on the port
+- * must use D3 to support deep platform power savings during
+- * suspend-to-idle.
+- */
+-
+- if (!adev)
+- return false;
+- if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
+- &val))
+- return false;
+- return val == 1;
+-}
+-#else
+-static inline bool nvme_acpi_storage_d3(struct pci_dev *dev)
+-{
+- return false;
+-}
+-#endif /* CONFIG_ACPI */
+-
+ static void nvme_async_probe(void *data, async_cookie_t cookie)
+ {
+ struct nvme_dev *dev = data;
+@@ -2903,7 +2877,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ quirks |= check_vendor_combination_bug(pdev);
+
+- if (!noacpi && nvme_acpi_storage_d3(pdev)) {
++ if (!noacpi && acpi_storage_d3(&pdev->dev)) {
+ /*
+ * Some systems use a bios work around to ask for D3 on
+ * platforms that support kernel managed suspend.
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 22b2bb1109c9..c4f5e2f093a3 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1899,6 +1899,9 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
++/* Renoir XHCI requires longer delay when transitioning from D0 to
++ * D3hot */
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
+
+ #ifdef CONFIG_X86_IO_APIC
+ static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
+diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
+index b9da58ee9b1e..680f94c7e075 100644
+--- a/drivers/platform/x86/amd-pmc.c
++++ b/drivers/platform/x86/amd-pmc.c
+@@ -46,34 +46,79 @@
+ #define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
+ #define AMD_PMC_RESULT_FAILED 0xFF
+
++/* FCH SSC Registers */
++#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
++#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
++#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
++#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
++#define FCH_SSC_MAPPING_SIZE 0x800
++#define FCH_BASE_PHY_ADDR_LOW 0xFED81100
++#define FCH_BASE_PHY_ADDR_HIGH 0x00000000
++
++/* SMU Message Definations */
++#define SMU_MSG_GETSMUVERSION 0x02
++#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
++#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
++#define SMU_MSG_LOG_START 0x06
++#define SMU_MSG_LOG_RESET 0x07
++#define SMU_MSG_LOG_DUMP_DATA 0x08
++#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
+ /* List of supported CPU ids */
+ #define AMD_CPU_ID_RV 0x15D0
+ #define AMD_CPU_ID_RN 0x1630
+ #define AMD_CPU_ID_PCO AMD_CPU_ID_RV
+ #define AMD_CPU_ID_CZN AMD_CPU_ID_RN
++#define AMD_CPU_ID_YC 0x14B5
+
+-#define AMD_SMU_FW_VERSION 0x0
+ #define PMC_MSG_DELAY_MIN_US 100
+ #define RESPONSE_REGISTER_LOOP_MAX 200
+
++#define SOC_SUBSYSTEM_IP_MAX 12
++#define DELAY_MIN_US 2000
++#define DELAY_MAX_US 3000
+ enum amd_pmc_def {
+ MSG_TEST = 0x01,
+ MSG_OS_HINT_PCO,
+ MSG_OS_HINT_RN,
+ };
+
++struct amd_pmc_bit_map {
++ const char *name;
++ u32 bit_mask;
++};
++
++static const struct amd_pmc_bit_map soc15_ip_blk[] = {
++ {"DISPLAY", BIT(0)},
++ {"CPU", BIT(1)},
++ {"GFX", BIT(2)},
++ {"VDD", BIT(3)},
++ {"ACP", BIT(4)},
++ {"VCN", BIT(5)},
++ {"ISP", BIT(6)},
++ {"NBIO", BIT(7)},
++ {"DF", BIT(8)},
++ {"USB0", BIT(9)},
++ {"USB1", BIT(10)},
++ {"LAPIC", BIT(11)},
++ {}
++};
++
+ struct amd_pmc_dev {
+ void __iomem *regbase;
+- void __iomem *smu_base;
++ void __iomem *smu_virt_addr;
++ void __iomem *fch_virt_addr;
+ u32 base_addr;
+ u32 cpu_id;
++ u32 active_ips;
+ struct device *dev;
++ struct mutex lock; /* generic mutex lock */
+ #if IS_ENABLED(CONFIG_DEBUG_FS)
+ struct dentry *dbgfs_dir;
+ #endif /* CONFIG_DEBUG_FS */
+ };
+
+ static struct amd_pmc_dev pmc;
++static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret);
+
+ static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
+ {
+@@ -85,18 +130,76 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
+ iowrite32(val, dev->regbase + reg_offset);
+ }
+
++struct smu_metrics {
++ u32 table_version;
++ u32 hint_count;
++ u32 s0i3_cyclecount;
++ u32 timein_s0i2;
++ u64 timeentering_s0i3_lastcapture;
++ u64 timeentering_s0i3_totaltime;
++ u64 timeto_resume_to_os_lastcapture;
++ u64 timeto_resume_to_os_totaltime;
++ u64 timein_s0i3_lastcapture;
++ u64 timein_s0i3_totaltime;
++ u64 timein_swdrips_lastcapture;
++ u64 timein_swdrips_totaltime;
++ u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX];
++ u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
++} __packed;
++
+ #ifdef CONFIG_DEBUG_FS
+ static int smu_fw_info_show(struct seq_file *s, void *unused)
+ {
+ struct amd_pmc_dev *dev = s->private;
+- u32 value;
++ struct smu_metrics table;
++ int idx;
++
++ if (dev->cpu_id == AMD_CPU_ID_PCO)
++ return -EINVAL;
++
++ memcpy_fromio(&table, dev->smu_virt_addr, sizeof(struct smu_metrics));
++
++ seq_puts(s, "\n=== SMU Statistics ===\n");
++ seq_printf(s, "Table Version: %d\n", table.table_version);
++ seq_printf(s, "Hint Count: %d\n", table.hint_count);
++ seq_printf(s, "S0i3 Cycle Count: %d\n", table.s0i3_cyclecount);
++ seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
++ seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
++
++ seq_puts(s, "\n=== Active time (in us) ===\n");
++ for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) {
++ if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
++ seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
++ table.timecondition_notmet_lastcapture[idx]);
++ }
+
+- value = ioread32(dev->smu_base + AMD_SMU_FW_VERSION);
+- seq_printf(s, "SMU FW Info: %x\n", value);
+ return 0;
+ }
+ DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
+
++static int s0ix_stats_show(struct seq_file *s, void *unused)
++{
++ struct amd_pmc_dev *dev = s->private;
++ u64 entry_time, exit_time, residency;
++
++ entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
++ entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
++
++ exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
++ exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
++
++ /* It's in 48MHz. We need to convert it */
++ residency = (exit_time - entry_time) / 48;
++
++ seq_puts(s, "=== S0ix statistics ===\n");
++ seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
++ seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
++ seq_printf(s, "Residency Time: %lld\n", residency);
++
++ return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
++
+ static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
+ {
+ debugfs_remove_recursive(dev->dbgfs_dir);
+@@ -107,6 +210,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
+ dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
+ debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
+ &smu_fw_info_fops);
++ debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
++ &s0ix_stats_fops);
+ }
+ #else
+ static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
+@@ -118,6 +223,32 @@ static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
+ }
+ #endif /* CONFIG_DEBUG_FS */
+
++static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
++{
++ u32 phys_addr_low, phys_addr_hi;
++ u64 smu_phys_addr;
++
++ if (dev->cpu_id == AMD_CPU_ID_PCO)
++ return -EINVAL;
++
++ /* Get Active devices list from SMU */
++ amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1);
++
++ /* Get dram address */
++ amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1);
++ amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1);
++ smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
++
++ dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, sizeof(struct smu_metrics));
++ if (!dev->smu_virt_addr)
++ return -ENOMEM;
++
++ /* Start the logging */
++ amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0);
++
++ return 0;
++}
++
+ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
+ {
+ u32 value;
+@@ -132,15 +263,15 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
+ dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
+ }
+
+-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
++static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret)
+ {
+ int rc;
+- u8 msg;
+ u32 val;
+
++ mutex_lock(&dev->lock);
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
+- val, val > 0, PMC_MSG_DELAY_MIN_US,
++ val, val != 0, PMC_MSG_DELAY_MIN_US,
+ PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "failed to talk to SMU\n");
+@@ -154,34 +285,91 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
+ amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, set);
+
+ /* Write message ID to message ID register */
+- msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO;
+ amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg);
+- return 0;
++
++ /* Wait until we get a valid response */
++ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
++ val, val != 0, PMC_MSG_DELAY_MIN_US,
++ PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
++ if (rc) {
++ dev_err(dev->dev, "SMU response timed out\n");
++ goto out_unlock;
++ }
++
++ switch (val) {
++ case AMD_PMC_RESULT_OK:
++ if (ret) {
++ /* PMFW may take longer time to return back the data */
++ usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
++ *data = amd_pmc_reg_read(dev, AMD_PMC_REGISTER_ARGUMENT);
++ }
++ break;
++ case AMD_PMC_RESULT_CMD_REJECT_BUSY:
++ dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
++ rc = -EBUSY;
++ goto out_unlock;
++ case AMD_PMC_RESULT_CMD_UNKNOWN:
++ dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
++ rc = -EINVAL;
++ goto out_unlock;
++ case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
++ case AMD_PMC_RESULT_FAILED:
++ default:
++ dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
++ rc = -EIO;
++ goto out_unlock;
++ }
++
++out_unlock:
++ mutex_unlock(&dev->lock);
++ amd_pmc_dump_registers(dev);
++ return rc;
++}
++
++static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
++{
++ switch (dev->cpu_id) {
++ case AMD_CPU_ID_PCO:
++ return MSG_OS_HINT_PCO;
++ case AMD_CPU_ID_RN:
++ case AMD_CPU_ID_YC:
++ return MSG_OS_HINT_RN;
++ }
++ return -EINVAL;
+ }
+
+ static int __maybe_unused amd_pmc_suspend(struct device *dev)
+ {
+ struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ int rc;
++ u8 msg;
+
+- rc = amd_pmc_send_cmd(pdev, 1);
++ /* Reset and Start SMU logging - to monitor the s0i3 stats */
++ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0);
++ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0);
++
++ msg = amd_pmc_get_os_hint(pdev);
++ rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0);
+ if (rc)
+ dev_err(pdev->dev, "suspend failed\n");
+
+- amd_pmc_dump_registers(pdev);
+- return 0;
++ return rc;
+ }
+
+ static int __maybe_unused amd_pmc_resume(struct device *dev)
+ {
+ struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ int rc;
++ u8 msg;
+
+- rc = amd_pmc_send_cmd(pdev, 0);
++ /* Let SMU know that we are looking for stats */
++ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
++
++ msg = amd_pmc_get_os_hint(pdev);
++ rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
+ if (rc)
+ dev_err(pdev->dev, "resume failed\n");
+
+- amd_pmc_dump_registers(pdev);
+ return 0;
+ }
+
+@@ -190,6 +378,7 @@ static const struct dev_pm_ops amd_pmc_pm_ops = {
+ };
+
+ static const struct pci_device_id pmc_pci_ids[] = {
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
+@@ -201,9 +390,8 @@ static int amd_pmc_probe(struct platform_device *pdev)
+ {
+ struct amd_pmc_dev *dev = &pmc;
+ struct pci_dev *rdev;
+- u32 base_addr_lo;
+- u32 base_addr_hi;
+- u64 base_addr;
++ u32 base_addr_lo, base_addr_hi;
++ u64 base_addr, fch_phys_addr;
+ int err;
+ u32 val;
+
+@@ -248,16 +436,25 @@ static int amd_pmc_probe(struct platform_device *pdev)
+ pci_dev_put(rdev);
+ base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+
+- dev->smu_base = devm_ioremap(dev->dev, base_addr, AMD_PMC_MAPPING_SIZE);
+- if (!dev->smu_base)
+- return -ENOMEM;
+-
+ dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
+ AMD_PMC_MAPPING_SIZE);
+ if (!dev->regbase)
+ return -ENOMEM;
+
+- amd_pmc_dump_registers(dev);
++ mutex_init(&dev->lock);
++
++ /* Use FCH registers to get the S0ix stats */
++ base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
++ base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
++ fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
++ dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
++ if (!dev->fch_virt_addr)
++ return -ENOMEM;
++
++ /* Use SMU to get the s0i3 debug stats */
++ err = amd_pmc_setup_smu_logging(dev);
++ if (err)
++ dev_err(dev->dev, "SMU debugging info not supported on this platform\n");
+
+ platform_set_drvdata(pdev, dev);
+ amd_pmc_dbgfs_register(dev);
+@@ -269,11 +466,14 @@ static int amd_pmc_remove(struct platform_device *pdev)
+ struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
+
+ amd_pmc_dbgfs_unregister(dev);
++ mutex_destroy(&dev->lock);
+ return 0;
+ }
+
+ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
+ {"AMDI0005", 0},
++ {"AMDI0006", 0},
++ {"AMDI0007", 0},
+ {"AMD0004", 0},
+ { }
+ };
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index c60745f657e9..dd0dafd21e33 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -1004,6 +1004,7 @@ int acpi_dev_resume(struct device *dev);
+ int acpi_subsys_runtime_suspend(struct device *dev);
+ int acpi_subsys_runtime_resume(struct device *dev);
+ int acpi_dev_pm_attach(struct device *dev, bool power_on);
++bool acpi_storage_d3(struct device *dev);
+ #else
+ static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
+ static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
+@@ -1011,6 +1012,10 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
+ {
+ return 0;
+ }
++static inline bool acpi_storage_d3(struct device *dev)
++{
++ return false;
++}
+ #endif
+
+ #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
+--
+2.32.0
+
diff --git a/sys-kernel_arch-sources-g14_files_0001-HID-asus-Filter-keyboard-EC-for-old-ROG-keyboard.patch b/sys-kernel_arch-sources-g14_files_0001-HID-asus-Filter-keyboard-EC-for-old-ROG-keyboard.patch
deleted file mode 100644
index 3beb53d9ea4f..000000000000
--- a/sys-kernel_arch-sources-g14_files_0001-HID-asus-Filter-keyboard-EC-for-old-ROG-keyboard.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From 20c82cdae05f6ffe8405cc32cfa94551f596b05c Mon Sep 17 00:00:00 2001
-From: Luke D Jones <luke@ljones.dev>
-Date: Fri, 19 Feb 2021 10:33:03 +1300
-Subject: [PATCH] HID: asus: Filter keyboard EC for old ROG keyboard
-
-Older ROG keyboards emit a similar stream of bytes to the new
-N-Key keyboards and require filtering to prevent a lot of
-unmapped key warnings showing. As all the ROG keyboards use
-QUIRK_USE_KBD_BACKLIGHT this is now used to branch to filtering
-in asus_raw_event.
-
-Signed-off-by: Luke D Jones <luke@ljones.dev>
----
- drivers/hid/hid-asus.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
-index 2ab22b925941..1ed1c05c3d54 100644
---- a/drivers/hid/hid-asus.c
-+++ b/drivers/hid/hid-asus.c
-@@ -335,7 +335,7 @@ static int asus_raw_event(struct hid_device *hdev,
- if (drvdata->quirks & QUIRK_MEDION_E1239T)
- return asus_e1239t_event(drvdata, data, size);
-
-- if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
-+ if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) {
- /*
- * Skip these report ID, the device emits a continuous stream associated
- * with the AURA mode it is in which looks like an 'echo'.
---
-2.30.1
-