summarylogtreecommitdiffstats
path: root/sys-kernel_arch-sources-g14_files-9001-v5.13.2-s0ix-patch-2021-07-14.patch
blob: e00412c4e68b3bb753cee0f8b9df5d92a626cbf8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
From c5c4ea6d73b96660a0c7a4acfa20260a009eaded Mon Sep 17 00:00:00 2001
From: Scott B <28817345+foundObjects@users.noreply.github.com>
Date: Wed, 14 Jul 2021 21:52:37 -0700
Subject: [PATCH] v5.13.2-s0ix patch 2021-07-14

Squashed commit of the following:

commit dfd19418b30dd4b44909fc2c4b6a9b06c2554d9b
Author: Marcin Bachry <hegel666@gmail.com>
Date:   Tue Mar 16 15:28:51 2021 -0400

    PCI: quirks: Quirk PCI d3hot delay for AMD xhci

    Renoir needs a similar delay.

    Signed-off-by: Marcin Bachry <hegel666@gmail.com>
    Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

commit 1d4a9adf12b2f2e175f937cd8b056b7e382bbc2d
Author: Mario Limonciello <mario.limonciello@amd.com>
Date:   Wed Jul 7 09:16:47 2021 -0500

    platform/x86: amd-pmc: Use return code on suspend

    Right now the driver will still return success even if the OS_HINT
    command failed to send to the SMU. In the rare event of a failure,
    the suspend should really be aborted here so that relevant logs
    can may be captured.

    Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
    Acked-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>

commit 5d419d5edb6c52b50a6883496e545dea2108f7ab
Author: Mario Limonciello <mario.limonciello@amd.com>
Date:   Wed Jun 30 14:46:06 2021 -0500

    ACPI: PM: Only mark EC GPE for wakeup on Intel systems

    When using s2idle on a variety of AMD notebook systems, they are
    experiencing spurious events that the EC or SMU are in the wrong
    state leading to a hard time waking up or higher than expected
    power consumption.

    These events only occur when the EC GPE is inadvertently set as a wakeup
    source. Originally the EC GPE was only set as a wakeup source when using
    the intel-vbtn or intel-hid drivers in commit 10a08fd65ec1 ("ACPI: PM:
    Set up EC GPE for system wakeup from drivers that need it") but during
    testing a reporter discovered that this was not enough for their ASUS
    Zenbook UX430UNR/i7-8550U to wakeup by lid event or keypress.
    Marking the EC GPE for wakeup universally resolved this for that
    reporter in commit b90ff3554aa3 ("ACPI: PM: s2idle: Always set up EC GPE
    for system wakeup").

    However this behavior has lead to a number of problems:

    * On both Lenovo T14 and P14s the keyboard wakeup doesn't work, and
    sometimes the power button event doesn't work.
    * On HP 635 G7 detaching or attaching AC during suspend will cause
    the system not to wakeup
    * On Asus vivobook to prevent detaching AC causing resume problems
    * On Lenovo 14ARE05 to prevent detaching AC causing resume problems
    * On HP ENVY x360  to prevent detaching AC causing resume problems

    As there may be other Intel systems besides ASUS Zenbook UX430UNR/i7-8550U
    that don't use intel-vbtn or intel-hid avoid these problems by only
    universally marking the EC GPE wakesource on non-AMD systems.

    Link: https://patchwork.kernel.org/project/linux-pm/cover/5997740.FPbUVk04hV@kreacher/#22825489
    Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1230
    Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1629
    Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
    Acked-by: Alex Deucher <alexander.deucher@amd.com>

commit 027e28cf9b134b6c9996ba586a5e501953db6c75
Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
Date:   Tue Jun 29 14:18:03 2021 +0530

    platform/x86: amd-pmc: Add new acpi id for future PMC controllers

    The upcoming PMC controller would have a newer acpi id, add that to
    the supported acpid device list.

    Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
    Reviewed-by: Hans de Goede <hdegoede@redhat.com>

commit d659780411974015aa21e8740cdf90a6c1821cbb
Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
Date:   Tue Jun 29 14:18:02 2021 +0530

    platform/x86: amd-pmc: Add support for ACPI ID AMDI0006

    Some newer BIOSes have added another ACPI ID for the uPEP device.
    SMU statistics behave identically on this device.

    Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
    Reviewed-by: Hans de Goede <hdegoede@redhat.com>

commit 92df7f17d7461ae779957ebbe64797caaab680ff
Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
Date:   Tue Jun 29 14:18:01 2021 +0530

    amd-pmc: Add support for logging s0ix counters

    Even the FCH SSC registers provides certain level of information
    about the s0ix entry and exit times which comes handy when the SMU
    fails to report the statistics via the mailbox communication.

    This information is captured via a new debugfs file "s0ix_stats".
    A non-zero entry in this counters would mean that the system entered
    the s0ix state.

    If s0ix entry time and exit time don't change during suspend to idle,
    the silicon has not entered the deepest state.

    Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
    Reviewed-by: Hans de Goede <hdegoede@redhat.com>

commit ce4b6d7cca5300babedff9030592628ae24e5795
Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
Date:   Tue Jun 29 14:18:00 2021 +0530

    platform/x86: amd-pmc: Add support for logging SMU metrics

    SMU provides a way to dump the s0ix debug statistics in the form of a
    metrics table via a of set special mailbox commands.

    Add support to the driver which can send these commands to SMU and expose
    the information received via debugfs. The information contains the s0ix
    entry/exit, active time of each IP block etc.

    As a side note, SMU subsystem logging is not supported on Picasso based
    SoC's.

    Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
    Reviewed-by: Hans de Goede <hdegoede@redhat.com>

commit 8236650e7af934a9912f5810c95506e638ede06c
Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
Date:   Tue Jun 29 14:17:59 2021 +0530

    platform/x86: amd-pmc: call dump registers only once

    Currently amd_pmc_dump_registers() routine is being called at
    multiple places. The best to call it is after command submission
    to SMU.

    Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>

commit 46400a2beb5c6c819db50eaeae0a7640c1273a70
Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
Date:   Tue Jun 29 14:17:58 2021 +0530

    platform/x86: amd-pmc: Fix SMU firmware reporting mechanism

    It was lately understood that the current mechanism available in the
    driver to get SMU firmware info works only on internal SMU builds and
    there is a separate way to get all the SMU logging counters (addressed
    in the next patch). Hence remove all the smu info shown via debugfs as it
    is no more useful.

    Fixes: 156ec4731cb2 ("platform/x86: amd-pmc: Add AMD platform support for S2Idle")
    Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>

commit bd06b697a1e6a136b2e305634e9f312762059909
Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
Date:   Tue Jun 29 14:17:57 2021 +0530

    platform/x86: amd-pmc: Fix command completion code

    The protocol to submit a job request to SMU is to wait for
    AMD_PMC_REGISTER_RESPONSE to return 1,meaning SMU is ready to take
    requests. PMC driver has to make sure that the response code is always
    AMD_PMC_RESULT_OK before making any command submissions.

    When we submit a message to SMU, we have to wait until it processes
    the request. Adding a read_poll_timeout() check as this was missing in
    the existing code.

    Also, add a mutex to protect amd_pmc_send_cmd() calls to SMU.

    Fixes: 156ec4731cb2 ("platform/x86: amd-pmc: Add AMD platform support for S2Idle")
    Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
    Acked-by: Raul E Rangel <rrangel@chromium.org>

commit e8ef1eac2ff5426b9b35f49a587bc3a7d9397659
Author: Mario Limonciello <mario.limonciello@amd.com>
Date:   Thu Jun 17 11:42:12 2021 -0500

    ACPI: PM: Adjust behavior for field problems on AMD systems

    Some AMD Systems with uPEP _HID AMD004/AMDI005 have an off by one bug
    in their function mask return.  This means that they will call entrance
    but not exit for matching functions.

    Other AMD systems with this HID should use the Microsoft generic UUID.

    AMD systems with uPEP HID AMDI006 should be using the Microsoft method.

    Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>

commit 864a6b01761b2000ec7bc9fff388c61efceaf353
Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
Date:   Thu Jun 17 11:42:11 2021 -0500

    ACPI: PM: s2idle: Add support for new Microsoft UUID

    This adds supports for _DSM notifications to the Microsoft UUID
    described by Microsoft documentation for s2idle.

    Link: https://docs.microsoft.com/en-us/windows-hardware/design/device-experiences/modern-standby-firmware-notifications
    Co-developed-by: Mario Limonciello <mario.limonciello@amd.com>
    Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
    Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>

commit 8b4804c4344b418f9d626814e920ccf95e364024
Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
Date:   Thu Jun 17 11:42:10 2021 -0500

    ACPI: PM: s2idle: Add support for multiple func mask

    Required for follow-up patch adding new UUID
    needing new function mask.

    Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>

commit 3715cebc4d1c47cf135944773fa9ff284542910e
Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
Date:   Thu Jun 17 11:42:09 2021 -0500

    ACPI: PM: s2idle: Refactor common code

    Refactor common code to prepare for upcoming changes.
    * Remove unused struct.
    * Print error before returning.
    * Frees ACPI obj if _DSM type is not as expected.
    * Treat lps0_dsm_func_mask as an integer rather than character
    * Remove extra out_obj
    * Move rev_id

    Co-developed-by: Mario Limonciello <mario.limonciello@amd.com>
    Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
    Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>

commit 67e6dbc2f704df510523cd62d0b70a33096d565f
Author: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>
Date:   Thu Jun 17 11:42:08 2021 -0500

    ACPI: PM: s2idle: Use correct revision id

    AMD spec mentions only revision 0. With this change,
    device constraint list is populated properly.

    Signed-off-by: Pratik Vishwakarma <Pratik.Vishwakarma@amd.com>

commit f207e3ee5d44caa2626e08f3d331dc3d994d9ff1
Author: Mario Limonciello <mario.limonciello@amd.com>
Date:   Wed Jun 9 13:40:18 2021 -0500

    ACPI: Add quirks for AMD Renoir/Lucienne CPUs to force the D3 hint

    AMD systems from Renoir and Lucienne require that the NVME controller
    is put into D3 over a Modern Standby / suspend-to-idle
    cycle.  This is "typically" accomplished using the `StorageD3Enable`
    property in the _DSD, but this property was introduced after many
    of these systems launched and most OEM systems don't have it in
    their BIOS.

    On AMD Renoir without these drives going into D3 over suspend-to-idle
    the resume will fail with the NVME controller being reset and a trace
    like this in the kernel logs:
    ```
    [   83.556118] nvme nvme0: I/O 161 QID 2 timeout, aborting
    [   83.556178] nvme nvme0: I/O 162 QID 2 timeout, aborting
    [   83.556187] nvme nvme0: I/O 163 QID 2 timeout, aborting
    [   83.556196] nvme nvme0: I/O 164 QID 2 timeout, aborting
    [   95.332114] nvme nvme0: I/O 25 QID 0 timeout, reset controller
    [   95.332843] nvme nvme0: Abort status: 0x371
    [   95.332852] nvme nvme0: Abort status: 0x371
    [   95.332856] nvme nvme0: Abort status: 0x371
    [   95.332859] nvme nvme0: Abort status: 0x371
    [   95.332909] PM: dpm_run_callback(): pci_pm_resume+0x0/0xe0 returns -16
    [   95.332936] nvme 0000:03:00.0: PM: failed to resume async: error -16
    ```

    The Microsoft documentation for StorageD3Enable mentioned that Windows has
    a hardcoded allowlist for D3 support, which was used for these platforms.
    Introduce quirks to hardcode them for Linux as well.

    As this property is now "standardized", OEM systems using AMD Cezanne and
    newer APU's have adopted this property, and quirks like this should not be
    necessary.

    CC: Shyam-sundar S-k <Shyam-sundar.S-k@amd.com>
    CC: Alexander Deucher <Alexander.Deucher@amd.com>
    CC: Prike Liang <prike.liang@amd.com>
    Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro
    Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
    Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
    Tested-by: Julian Sikorski <belegdol@gmail.com>
    Signed-off-by: Christoph Hellwig <hch@lst.de>

commit 4707d17911041378604fa53b28e09b5aab2d2a5c
Author: Mario Limonciello <mario.limonciello@amd.com>
Date:   Wed Jun 9 13:40:17 2021 -0500

    ACPI: Check StorageD3Enable _DSD property in ACPI code

    Although first implemented for NVME, this check may be usable by
    other drivers as well. Microsoft's specification explicitly mentions
    that is may be usable by SATA and AHCI devices.  Google also indicates
    that they have used this with SDHCI in a downstream kernel tree that
    a user can plug a storage device into.

    Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro
    Suggested-by: Keith Busch <kbusch@kernel.org>
    CC: Shyam-sundar S-k <Shyam-sundar.S-k@amd.com>
    CC: Alexander Deucher <Alexander.Deucher@amd.com>
    CC: Rafael J. Wysocki <rjw@rjwysocki.net>
    CC: Prike Liang <prike.liang@amd.com>
    Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
    Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
    Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/acpi/device_pm.c       |  32 +++++
 drivers/acpi/internal.h        |   9 ++
 drivers/acpi/x86/s2idle.c      | 157 ++++++++++++++-------
 drivers/acpi/x86/utils.c       |  25 ++++
 drivers/nvme/host/pci.c        |  28 +---
 drivers/pci/quirks.c           |   3 +
 drivers/platform/x86/amd-pmc.c | 246 ++++++++++++++++++++++++++++++---
 include/linux/acpi.h           |   5 +
 8 files changed, 405 insertions(+), 100 deletions(-)

diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 9d2d3b9bb8b5..0cfdef2fc3ad 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1338,4 +1338,36 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
 	return 1;
 }
 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
+
+/**
+ * acpi_storage_d3 - Check if D3 should be used in the suspend path
+ * @dev: Device to check
+ *
+ * Return %true if the platform firmware wants @dev to be programmed
+ * into D3hot or D3cold (if supported) in the suspend path, or %false
+ * when there is no specific preference. On some platforms, if this
+ * hint is ignored, @dev may remain unresponsive after suspending the
+ * platform as a whole.
+ *
+ * Although the property has storage in the name it actually is
+ * applied to the PCIe slot and plugging in a non-storage device the
+ * same platform restrictions will likely apply.
+ */
+bool acpi_storage_d3(struct device *dev)
+{
+	struct acpi_device *adev = ACPI_COMPANION(dev);
+	u8 val;
+
+	if (force_storage_d3())
+		return true;
+
+	if (!adev)
+		return false;
+	if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
+			&val))
+		return false;
+	return val == 1;
+}
+EXPORT_SYMBOL_GPL(acpi_storage_d3);
+
 #endif /* CONFIG_PM */
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index e21611c9a170..7ac01b03ba67 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -236,6 +236,15 @@ static inline int suspend_nvs_save(void) { return 0; }
 static inline void suspend_nvs_restore(void) {}
 #endif
 
+#ifdef CONFIG_X86
+bool force_storage_d3(void);
+#else
+static inline bool force_storage_d3(void)
+{
+	return false;
+}
+#endif
+
 /*--------------------------------------------------------------------------
 				Device properties
   -------------------------------------------------------------------------- */
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 2d7ddb8a8cb6..1c507804fb10 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -32,6 +32,9 @@ static const struct acpi_device_id lps0_device_ids[] = {
 	{"", },
 };
 
+/* Microsoft platform agnostic UUID */
+#define ACPI_LPS0_DSM_UUID_MICROSOFT      "11e00d56-ce64-47ce-837b-1f898f9aa461"
+
 #define ACPI_LPS0_DSM_UUID	"c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
 
 #define ACPI_LPS0_GET_DEVICE_CONSTRAINTS	1
@@ -39,6 +42,8 @@ static const struct acpi_device_id lps0_device_ids[] = {
 #define ACPI_LPS0_SCREEN_ON	4
 #define ACPI_LPS0_ENTRY		5
 #define ACPI_LPS0_EXIT		6
+#define ACPI_LPS0_MS_ENTRY      7
+#define ACPI_LPS0_MS_EXIT       8
 
 /* AMD */
 #define ACPI_LPS0_DSM_UUID_AMD      "e3f32452-febc-43ce-9039-932122d37721"
@@ -49,7 +54,10 @@ static const struct acpi_device_id lps0_device_ids[] = {
 
 static acpi_handle lps0_device_handle;
 static guid_t lps0_dsm_guid;
-static char lps0_dsm_func_mask;
+static int lps0_dsm_func_mask;
+
+static guid_t lps0_dsm_guid_microsoft;
+static int lps0_dsm_func_mask_microsoft;
 
 /* Device constraint entry structure */
 struct lpi_device_info {
@@ -70,15 +78,7 @@ struct lpi_constraints {
 	int min_dstate;
 };
 
-/* AMD */
-/* Device constraint entry structure */
-struct lpi_device_info_amd {
-	int revision;
-	int count;
-	union acpi_object *package;
-};
-
-/* Constraint package structure */
+/* AMD Constraint package structure */
 struct lpi_device_constraint_amd {
 	char *name;
 	int enabled;
@@ -96,15 +96,15 @@ static void lpi_device_get_constraints_amd(void)
 	int i, j, k;
 
 	out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
-					  1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
+					  rev_id, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
 					  NULL, ACPI_TYPE_PACKAGE);
 
-	if (!out_obj)
-		return;
-
 	acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
 			  out_obj ? "successful" : "failed");
 
+	if (!out_obj)
+		return;
+
 	for (i = 0; i < out_obj->package.count; i++) {
 		union acpi_object *package = &out_obj->package.elements[i];
 
@@ -317,14 +317,15 @@ static void lpi_check_constraints(void)
 	}
 }
 
-static void acpi_sleep_run_lps0_dsm(unsigned int func)
+static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, guid_t dsm_guid)
 {
 	union acpi_object *out_obj;
 
-	if (!(lps0_dsm_func_mask & (1 << func)))
+	if (!(func_mask & (1 << func)))
 		return;
 
-	out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, rev_id, func, NULL);
+	out_obj = acpi_evaluate_dsm(lps0_device_handle, &dsm_guid,
+					rev_id, func, NULL);
 	ACPI_FREE(out_obj);
 
 	acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
@@ -336,11 +337,33 @@ static bool acpi_s2idle_vendor_amd(void)
 	return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
 }
 
+static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *dsm_guid)
+{
+	union acpi_object *obj;
+	int ret = -EINVAL;
+
+	guid_parse(uuid, dsm_guid);
+	obj = acpi_evaluate_dsm(handle, dsm_guid, rev, 0, NULL);
+
+	/* Check if the _DSM is present and as expected. */
+	if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length == 0 ||
+	    obj->buffer.length > sizeof(u32)) {
+		acpi_handle_debug(handle,
+				"_DSM UUID %s rev %d function 0 evaluation failed\n", uuid, rev);
+		goto out;
+	}
+
+	ret = *(int *)obj->buffer.pointer;
+	acpi_handle_debug(handle, "_DSM UUID %s rev %d function mask: 0x%x\n", uuid, rev, ret);
+
+out:
+	ACPI_FREE(obj);
+	return ret;
+}
+
 static int lps0_device_attach(struct acpi_device *adev,
 			      const struct acpi_device_id *not_used)
 {
-	union acpi_object *out_obj;
-
 	if (lps0_device_handle)
 		return 0;
 
@@ -348,28 +371,36 @@ static int lps0_device_attach(struct acpi_device *adev,
 		return 0;
 
 	if (acpi_s2idle_vendor_amd()) {
-		guid_parse(ACPI_LPS0_DSM_UUID_AMD, &lps0_dsm_guid);
-		out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 0, 0, NULL);
+		/* AMD0004, AMDI0005:
+		 * - Should use rev_id 0x0
+		 * - function mask > 0x3: Should use AMD method, but has off by one bug
+		 * - function mask = 0x3: Should use Microsoft method
+		 * AMDI0006:
+		 * - should use rev_id 0x0
+		 * - function mask = 0x3: Should use Microsoft method
+		 */
+		const char *hid = acpi_device_hid(adev);
 		rev_id = 0;
+		lps0_dsm_func_mask = validate_dsm(adev->handle,
+					ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
+		lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
+					ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
+					&lps0_dsm_guid_microsoft);
+		if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
+						 !strcmp(hid, "AMDI0005"))) {
+			lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
+			acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
+					  ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
+		}
 	} else {
-		guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
-		out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
 		rev_id = 1;
+		lps0_dsm_func_mask = validate_dsm(adev->handle,
+					ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
+		lps0_dsm_func_mask_microsoft = -EINVAL;
 	}
 
-	/* Check if the _DSM is present and as expected. */
-	if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
-		acpi_handle_debug(adev->handle,
-				  "_DSM function 0 evaluation failed\n");
-		return 0;
-	}
-
-	lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
-
-	ACPI_FREE(out_obj);
-
-	acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
-			  lps0_dsm_func_mask);
+	if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
+		return 0; //function evaluation failed
 
 	lps0_device_handle = adev->handle;
 
@@ -386,11 +417,15 @@ static int lps0_device_attach(struct acpi_device *adev,
 		mem_sleep_current = PM_SUSPEND_TO_IDLE;
 
 	/*
-	 * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
-	 * EC GPE to be enabled while suspended for certain wakeup devices to
-	 * work, so mark it as wakeup-capable.
+	 * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
+	 * use intel-hid or intel-vbtn but require the EC GPE to be enabled while
+	 * suspended for certain wakeup devices to work, so mark it as wakeup-capable.
+	 *
+	 * Only enable on !AMD as enabling this universally causes problems for a number
+	 * of AMD based systems.
 	 */
-	acpi_ec_mark_gpe_for_wake();
+	if (!acpi_s2idle_vendor_amd())
+		acpi_ec_mark_gpe_for_wake();
 
 	return 0;
 }
@@ -408,12 +443,23 @@ int acpi_s2idle_prepare_late(void)
 	if (pm_debug_messages_on)
 		lpi_check_constraints();
 
-	if (acpi_s2idle_vendor_amd()) {
-		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
-		acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD);
+	if (lps0_dsm_func_mask_microsoft > 0) {
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
+				lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
+				lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
+				lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+	} else if (acpi_s2idle_vendor_amd()) {
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD,
+				lps0_dsm_func_mask, lps0_dsm_guid);
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
+				lps0_dsm_func_mask, lps0_dsm_guid);
 	} else {
-		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
-		acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
+				lps0_dsm_func_mask, lps0_dsm_guid);
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
+				lps0_dsm_func_mask, lps0_dsm_guid);
 	}
 
 	return 0;
@@ -424,12 +470,23 @@ void acpi_s2idle_restore_early(void)
 	if (!lps0_device_handle || sleep_no_lps0)
 		return;
 
-	if (acpi_s2idle_vendor_amd()) {
-		acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD);
-		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
+	if (lps0_dsm_func_mask_microsoft > 0) {
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
+				lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
+				lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
+				lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+	} else if (acpi_s2idle_vendor_amd()) {
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD,
+				lps0_dsm_func_mask, lps0_dsm_guid);
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD,
+				lps0_dsm_func_mask, lps0_dsm_guid);
 	} else {
-		acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
-		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
+				lps0_dsm_func_mask, lps0_dsm_guid);
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
+				lps0_dsm_func_mask, lps0_dsm_guid);
 	}
 }
 
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index bdc1ba00aee9..f22f23933063 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -135,3 +135,28 @@ bool acpi_device_always_present(struct acpi_device *adev)
 
 	return ret;
 }
+
+/*
+ * AMD systems from Renoir and Lucienne *require* that the NVME controller
+ * is put into D3 over a Modern Standby / suspend-to-idle cycle.
+ *
+ * This is "typically" accomplished using the `StorageD3Enable`
+ * property in the _DSD that is checked via the `acpi_storage_d3` function
+ * but this property was introduced after many of these systems launched
+ * and most OEM systems don't have it in their BIOS.
+ *
+ * The Microsoft documentation for StorageD3Enable mentioned that Windows has
+ * a hardcoded allowlist for D3 support, which was used for these platforms.
+ *
+ * This allows quirking on Linux in a similar fashion.
+ */
+static const struct x86_cpu_id storage_d3_cpu_ids[] = {
+	X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL),	/* Renoir */
+	X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL),	/* Lucienne */
+	{}
+};
+
+bool force_storage_d3(void)
+{
+	return x86_match_cpu(storage_d3_cpu_ids);
+}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 42ad75ff1348..5a72bdf5ad03 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2828,32 +2828,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
 	return 0;
 }
 
-#ifdef CONFIG_ACPI
-static bool nvme_acpi_storage_d3(struct pci_dev *dev)
-{
-	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
-	u8 val;
-
-	/*
-	 * Look for _DSD property specifying that the storage device on the port
-	 * must use D3 to support deep platform power savings during
-	 * suspend-to-idle.
-	 */
-
-	if (!adev)
-		return false;
-	if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
-			&val))
-		return false;
-	return val == 1;
-}
-#else
-static inline bool nvme_acpi_storage_d3(struct pci_dev *dev)
-{
-	return false;
-}
-#endif /* CONFIG_ACPI */
-
 static void nvme_async_probe(void *data, async_cookie_t cookie)
 {
 	struct nvme_dev *dev = data;
@@ -2903,7 +2877,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	quirks |= check_vendor_combination_bug(pdev);
 
-	if (!noacpi && nvme_acpi_storage_d3(pdev)) {
+	if (!noacpi && acpi_storage_d3(&pdev->dev)) {
 		/*
 		 * Some systems use a bios work around to ask for D3 on
 		 * platforms that support kernel managed suspend.
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 22b2bb1109c9..c4f5e2f093a3 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1899,6 +1899,9 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
+/* Renoir XHCI requires longer delay when transitioning from D0 to
+ * D3hot */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
 
 #ifdef CONFIG_X86_IO_APIC
 static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index b9da58ee9b1e..680f94c7e075 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -46,34 +46,79 @@
 #define AMD_PMC_RESULT_CMD_UNKNOWN           0xFE
 #define AMD_PMC_RESULT_FAILED                0xFF
 
+/* FCH SSC Registers */
+#define FCH_S0I3_ENTRY_TIME_L_OFFSET	0x30
+#define FCH_S0I3_ENTRY_TIME_H_OFFSET	0x34
+#define FCH_S0I3_EXIT_TIME_L_OFFSET	0x38
+#define FCH_S0I3_EXIT_TIME_H_OFFSET	0x3C
+#define FCH_SSC_MAPPING_SIZE		0x800
+#define FCH_BASE_PHY_ADDR_LOW		0xFED81100
+#define FCH_BASE_PHY_ADDR_HIGH		0x00000000
+
+/* SMU Message Definations */
+#define SMU_MSG_GETSMUVERSION		0x02
+#define SMU_MSG_LOG_GETDRAM_ADDR_HI	0x04
+#define SMU_MSG_LOG_GETDRAM_ADDR_LO	0x05
+#define SMU_MSG_LOG_START		0x06
+#define SMU_MSG_LOG_RESET		0x07
+#define SMU_MSG_LOG_DUMP_DATA		0x08
+#define SMU_MSG_GET_SUP_CONSTRAINTS	0x09
 /* List of supported CPU ids */
 #define AMD_CPU_ID_RV			0x15D0
 #define AMD_CPU_ID_RN			0x1630
 #define AMD_CPU_ID_PCO			AMD_CPU_ID_RV
 #define AMD_CPU_ID_CZN			AMD_CPU_ID_RN
+#define AMD_CPU_ID_YC			0x14B5
 
-#define AMD_SMU_FW_VERSION		0x0
 #define PMC_MSG_DELAY_MIN_US		100
 #define RESPONSE_REGISTER_LOOP_MAX	200
 
+#define SOC_SUBSYSTEM_IP_MAX	12
+#define DELAY_MIN_US		2000
+#define DELAY_MAX_US		3000
 enum amd_pmc_def {
 	MSG_TEST = 0x01,
 	MSG_OS_HINT_PCO,
 	MSG_OS_HINT_RN,
 };
 
+struct amd_pmc_bit_map {
+	const char *name;
+	u32 bit_mask;
+};
+
+static const struct amd_pmc_bit_map soc15_ip_blk[] = {
+	{"DISPLAY",	BIT(0)},
+	{"CPU",		BIT(1)},
+	{"GFX",		BIT(2)},
+	{"VDD",		BIT(3)},
+	{"ACP",		BIT(4)},
+	{"VCN",		BIT(5)},
+	{"ISP",		BIT(6)},
+	{"NBIO",	BIT(7)},
+	{"DF",		BIT(8)},
+	{"USB0",	BIT(9)},
+	{"USB1",	BIT(10)},
+	{"LAPIC",	BIT(11)},
+	{}
+};
+
 struct amd_pmc_dev {
 	void __iomem *regbase;
-	void __iomem *smu_base;
+	void __iomem *smu_virt_addr;
+	void __iomem *fch_virt_addr;
 	u32 base_addr;
 	u32 cpu_id;
+	u32 active_ips;
 	struct device *dev;
+	struct mutex lock; /* generic mutex lock */
 #if IS_ENABLED(CONFIG_DEBUG_FS)
 	struct dentry *dbgfs_dir;
 #endif /* CONFIG_DEBUG_FS */
 };
 
 static struct amd_pmc_dev pmc;
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret);
 
 static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
 {
@@ -85,18 +130,76 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
 	iowrite32(val, dev->regbase + reg_offset);
 }
 
+struct smu_metrics {
+	u32 table_version;
+	u32 hint_count;
+	u32 s0i3_cyclecount;
+	u32 timein_s0i2;
+	u64 timeentering_s0i3_lastcapture;
+	u64 timeentering_s0i3_totaltime;
+	u64 timeto_resume_to_os_lastcapture;
+	u64 timeto_resume_to_os_totaltime;
+	u64 timein_s0i3_lastcapture;
+	u64 timein_s0i3_totaltime;
+	u64 timein_swdrips_lastcapture;
+	u64 timein_swdrips_totaltime;
+	u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX];
+	u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
+} __packed;
+
 #ifdef CONFIG_DEBUG_FS
 static int smu_fw_info_show(struct seq_file *s, void *unused)
 {
 	struct amd_pmc_dev *dev = s->private;
-	u32 value;
+	struct smu_metrics table;
+	int idx;
+
+	if (dev->cpu_id == AMD_CPU_ID_PCO)
+		return -EINVAL;
+
+	memcpy_fromio(&table, dev->smu_virt_addr, sizeof(struct smu_metrics));
+
+	seq_puts(s, "\n=== SMU Statistics ===\n");
+	seq_printf(s, "Table Version: %d\n", table.table_version);
+	seq_printf(s, "Hint Count: %d\n", table.hint_count);
+	seq_printf(s, "S0i3 Cycle Count: %d\n", table.s0i3_cyclecount);
+	seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
+	seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
+
+	seq_puts(s, "\n=== Active time (in us) ===\n");
+	for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) {
+		if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
+			seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
+				   table.timecondition_notmet_lastcapture[idx]);
+	}
 
-	value = ioread32(dev->smu_base + AMD_SMU_FW_VERSION);
-	seq_printf(s, "SMU FW Info: %x\n", value);
 	return 0;
 }
 DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
 
+static int s0ix_stats_show(struct seq_file *s, void *unused)
+{
+	struct amd_pmc_dev *dev = s->private;
+	u64 entry_time, exit_time, residency;
+
+	entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
+	entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
+
+	exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
+	exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
+
+	/* It's in 48MHz. We need to convert it */
+	residency = (exit_time - entry_time) / 48;
+
+	seq_puts(s, "=== S0ix statistics ===\n");
+	seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
+	seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
+	seq_printf(s, "Residency Time: %lld\n", residency);
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
+
 static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
 {
 	debugfs_remove_recursive(dev->dbgfs_dir);
@@ -107,6 +210,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
 	dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
 	debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
 			    &smu_fw_info_fops);
+	debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
+			    &s0ix_stats_fops);
 }
 #else
 static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
@@ -118,6 +223,32 @@ static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
 }
 #endif /* CONFIG_DEBUG_FS */
 
+static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
+{
+	u32 phys_addr_low, phys_addr_hi;
+	u64 smu_phys_addr;
+
+	if (dev->cpu_id == AMD_CPU_ID_PCO)
+		return -EINVAL;
+
+	/* Get Active devices list from SMU */
+	amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1);
+
+	/* Get dram address */
+	amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1);
+	amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1);
+	smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
+
+	dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, sizeof(struct smu_metrics));
+	if (!dev->smu_virt_addr)
+		return -ENOMEM;
+
+	/* Start the logging */
+	amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0);
+
+	return 0;
+}
+
 static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
 {
 	u32 value;
@@ -132,15 +263,15 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
 	dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
 }
 
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret)
 {
 	int rc;
-	u8 msg;
 	u32 val;
 
+	mutex_lock(&dev->lock);
 	/* Wait until we get a valid response */
 	rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
-				val, val > 0, PMC_MSG_DELAY_MIN_US,
+				val, val != 0, PMC_MSG_DELAY_MIN_US,
 				PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
 	if (rc) {
 		dev_err(dev->dev, "failed to talk to SMU\n");
@@ -154,34 +285,91 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
 	amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, set);
 
 	/* Write message ID to message ID register */
-	msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO;
 	amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg);
-	return 0;
+
+	/* Wait until we get a valid response */
+	rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
+				val, val != 0, PMC_MSG_DELAY_MIN_US,
+				PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+	if (rc) {
+		dev_err(dev->dev, "SMU response timed out\n");
+		goto out_unlock;
+	}
+
+	switch (val) {
+	case AMD_PMC_RESULT_OK:
+		if (ret) {
+			/* PMFW may take longer time to return back the data */
+			usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
+			*data = amd_pmc_reg_read(dev, AMD_PMC_REGISTER_ARGUMENT);
+		}
+		break;
+	case AMD_PMC_RESULT_CMD_REJECT_BUSY:
+		dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
+		rc = -EBUSY;
+		goto out_unlock;
+	case AMD_PMC_RESULT_CMD_UNKNOWN:
+		dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
+		rc = -EINVAL;
+		goto out_unlock;
+	case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
+	case AMD_PMC_RESULT_FAILED:
+	default:
+		dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
+		rc = -EIO;
+		goto out_unlock;
+	}
+
+out_unlock:
+	mutex_unlock(&dev->lock);
+	amd_pmc_dump_registers(dev);
+	return rc;
+}
+
+static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
+{
+	switch (dev->cpu_id) {
+	case AMD_CPU_ID_PCO:
+		return MSG_OS_HINT_PCO;
+	case AMD_CPU_ID_RN:
+	case AMD_CPU_ID_YC:
+		return MSG_OS_HINT_RN;
+	}
+	return -EINVAL;
 }
 
 static int __maybe_unused amd_pmc_suspend(struct device *dev)
 {
 	struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
 	int rc;
+	u8 msg;
 
-	rc = amd_pmc_send_cmd(pdev, 1);
+	/* Reset and Start SMU logging - to monitor the s0i3 stats */
+	amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0);
+	amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0);
+
+	msg = amd_pmc_get_os_hint(pdev);
+	rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0);
 	if (rc)
 		dev_err(pdev->dev, "suspend failed\n");
 
-	amd_pmc_dump_registers(pdev);
-	return 0;
+	return rc;
 }
 
 static int __maybe_unused amd_pmc_resume(struct device *dev)
 {
 	struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
 	int rc;
+	u8 msg;
 
-	rc = amd_pmc_send_cmd(pdev, 0);
+	/* Let SMU know that we are looking for stats */
+	amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
+
+	msg = amd_pmc_get_os_hint(pdev);
+	rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
 	if (rc)
 		dev_err(pdev->dev, "resume failed\n");
 
-	amd_pmc_dump_registers(pdev);
 	return 0;
 }
 
@@ -190,6 +378,7 @@ static const struct dev_pm_ops amd_pmc_pm_ops = {
 };
 
 static const struct pci_device_id pmc_pci_ids[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
@@ -201,9 +390,8 @@ static int amd_pmc_probe(struct platform_device *pdev)
 {
 	struct amd_pmc_dev *dev = &pmc;
 	struct pci_dev *rdev;
-	u32 base_addr_lo;
-	u32 base_addr_hi;
-	u64 base_addr;
+	u32 base_addr_lo, base_addr_hi;
+	u64 base_addr, fch_phys_addr;
 	int err;
 	u32 val;
 
@@ -248,16 +436,25 @@ static int amd_pmc_probe(struct platform_device *pdev)
 	pci_dev_put(rdev);
 	base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
 
-	dev->smu_base = devm_ioremap(dev->dev, base_addr, AMD_PMC_MAPPING_SIZE);
-	if (!dev->smu_base)
-		return -ENOMEM;
-
 	dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
 				    AMD_PMC_MAPPING_SIZE);
 	if (!dev->regbase)
 		return -ENOMEM;
 
-	amd_pmc_dump_registers(dev);
+	mutex_init(&dev->lock);
+
+	/* Use FCH registers to get the S0ix stats */
+	base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
+	base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
+	fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+	dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
+	if (!dev->fch_virt_addr)
+		return -ENOMEM;
+
+	/* Use SMU to get the s0i3 debug stats */
+	err = amd_pmc_setup_smu_logging(dev);
+	if (err)
+		dev_err(dev->dev, "SMU debugging info not supported on this platform\n");
 
 	platform_set_drvdata(pdev, dev);
 	amd_pmc_dbgfs_register(dev);
@@ -269,11 +466,14 @@ static int amd_pmc_remove(struct platform_device *pdev)
 	struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
 
 	amd_pmc_dbgfs_unregister(dev);
+	mutex_destroy(&dev->lock);
 	return 0;
 }
 
 static const struct acpi_device_id amd_pmc_acpi_ids[] = {
 	{"AMDI0005", 0},
+	{"AMDI0006", 0},
+	{"AMDI0007", 0},
 	{"AMD0004", 0},
 	{ }
 };
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c60745f657e9..dd0dafd21e33 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1004,6 +1004,7 @@ int acpi_dev_resume(struct device *dev);
 int acpi_subsys_runtime_suspend(struct device *dev);
 int acpi_subsys_runtime_resume(struct device *dev);
 int acpi_dev_pm_attach(struct device *dev, bool power_on);
+bool acpi_storage_d3(struct device *dev);
 #else
 static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
 static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
@@ -1011,6 +1012,10 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
 {
 	return 0;
 }
+static inline bool acpi_storage_d3(struct device *dev)
+{
+	return false;
+}
 #endif
 
 #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
-- 
2.32.0