summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorTaijian2024-03-28 00:30:39 +0100
committerTaijian2024-03-28 00:30:39 +0100
commitfe2ced41debb6c571d6542f71ff7bb222144c15f (patch)
tree887a20df3b7a6fd4c712786542478ba4281d2e60
parent005a5394c20fa0495489d479c7c3a3a36172add5 (diff)
downloadaur-fe2ced41debb6c571d6542f71ff7bb222144c15f.tar.gz
update to 6.8.2
-rw-r--r--.SRCINFO30
-rw-r--r--0001-linux6.7.y-bore4.0.0.patch668
-rw-r--r--0001-linux6.8.y-bore4.5.0.patch904
-rw-r--r--0001-platform-x86-asus-wmi-Add-safety-checks-to-dgpu-egpu.patch87
-rw-r--r--0001-platform-x86-asus-wmi-add-support-for-2024-ROG-Mini-.patch150
-rw-r--r--0002-platform-x86-asus-wmi-add-support-for-Vivobook-GPU-M.patch100
-rw-r--r--0003-platform-x86-asus-wmi-add-support-variant-of-TUF-RGB.patch74
-rw-r--r--0004-platform-x86-asus-wmi-support-toggling-POST-sound.patch139
-rw-r--r--0005-platform-x86-asus-wmi-don-t-allow-eGPU-switching-if-.patch37
-rw-r--r--0005-platform-x86-asus-wmi-store-a-min-default-for-ppt-op.patch342
-rw-r--r--0006-platform-x86-asus-wmi-add-safety-checks-to-gpu-switc.patch104
-rw-r--r--PKGBUILD28
-rw-r--r--fix_amd_eDP_HDR_flickering.patch14
-rw-r--r--v2-0006-platform-x86-asus-wmi-add-safety-checks-to-gpu-sw.patch104
14 files changed, 1762 insertions, 1019 deletions
diff --git a/.SRCINFO b/.SRCINFO
index fb574e9aef44..0812a552c493 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,6 +1,6 @@
pkgbase = linux-g14
pkgdesc = Linux-g14
- pkgver = 6.8.1.arch1
+ pkgver = 6.8.2.arch1
pkgrel = 1
url = https://gitlab.com/dragonn/linux-g14.git
arch = x86_64
@@ -17,16 +17,16 @@ pkgbase = linux-g14
makedepends = xz
options = !strip
options = !debug
- source = https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-6.8.1.tar.xz
- source = https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-6.8.1.tar.sign
- source = https://github.com/archlinux/linux/releases/download/v6.8.1-arch1/linux-v6.8.1-arch1.patch.zst
- source = https://github.com/archlinux/linux/releases/download/v6.8.1-arch1/linux-v6.8.1-arch1.patch.zst.sig
+ source = https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-6.8.2.tar.xz
+ source = https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-6.8.2.tar.sign
+ source = https://github.com/archlinux/linux/releases/download/v6.8.2-arch1/linux-v6.8.2-arch1.patch.zst
+ source = https://github.com/archlinux/linux/releases/download/v6.8.2-arch1/linux-v6.8.2-arch1.patch.zst.sig
source = config
source = choose-gcc-optimization.sh
source = sys-kernel_arch-sources-g14-6.8+--more-uarches-for-kernel.patch::https://raw.githubusercontent.com/graysky2/kernel_compiler_patch/master/more-uarches-for-kernel-6.8-rc4%2B.patch
source = 0001-acpi-proc-idle-skip-dummy-wait.patch
source = 0027-mt76_-mt7921_-Disable-powersave-features-by-default.patch
- source = 0001-linux6.7.y-bore4.0.0.patch
+ source = 0001-linux6.8.y-bore4.5.0.patch
source = 0032-Bluetooth-btusb-Add-a-new-PID-VID-0489-e0f6-for-MT7922.patch
source = 0035-Add_quirk_for_polling_the_KBD_port.patch
source = 0001-ACPI-resource-Skip-IRQ-override-on-ASUS-TUF-Gaming-A.patch
@@ -36,21 +36,27 @@ pkgbase = linux-g14
source = 0040-workaround_hardware_decoding_amdgpu.patch
source = 0001-platform-x86-asus-wmi-Support-2023-ROG-X16-tablet-mo.patch
source = amd-tablet-sfh.patch
+ source = fix_amd_eDP_HDR_flickering.patch
+ source = 0001-platform-x86-asus-wmi-add-support-for-2024-ROG-Mini-.patch
+ source = 0002-platform-x86-asus-wmi-add-support-for-Vivobook-GPU-M.patch
+ source = 0003-platform-x86-asus-wmi-add-support-variant-of-TUF-RGB.patch
+ source = 0004-platform-x86-asus-wmi-support-toggling-POST-sound.patch
+ source = 0005-platform-x86-asus-wmi-store-a-min-default-for-ppt-op.patch
source = sys-kernel_arch-sources-g14_files-0047-asus-nb-wmi-Add-tablet_mode_sw-lid-flip.patch
source = sys-kernel_arch-sources-g14_files-0048-asus-nb-wmi-fix-tablet_mode_sw_int.patch
validpgpkeys = ABAF11C65A2970B130ABE3C479BE3E4300411886
validpgpkeys = 647F28654894E3BD457199BE38DBBDC86092693E
validpgpkeys = 83BC8889351B5DEBBB68416EB8AC08600F108CDF
- sha256sums = 8d0c8936e3140a0fbdf511ad7a9f21121598f3656743898f47bb9052d37cff68
+ sha256sums = 9ac322d85bcf98a04667d929f5c2666b15bd58c6c2d68dd512c72acbced07d04
sha256sums = SKIP
- sha256sums = 376db82b4613c3942932fde99d54c3dea1e4b29ab23d8b86daa6414327e6244d
+ sha256sums = 9ccb26c046bacf04777617e96cad5b33d3d048b30bb4840a3b5ac2cdf40a3aba
sha256sums = SKIP
sha256sums = c2b00c84c4b543db431e06604d939a62f93107d18369f4d9860dc8062b01ab45
sha256sums = 278118011d7a2eeca9971ac97b31bf0c55ab55e99c662ab9ae4717b55819c9a2
sha256sums = d69232afd0dd6982ae941cf2d1f577f4be2011e3bb847d1db37952acf416b5d3
sha256sums = 0a7ea482fe20c403788d290826cec42fe395e5a6eab07b88845f8b9a9829998d
sha256sums = ed242f4be3f8eaade2a1d42157c5c6c86281917a08ae43221b088fafdc775ee7
- sha256sums = 6e0b648637a0925df4bb43f5eb5144838415e02c43ff8fddbf82f6813b0f132c
+ sha256sums = 09883311108d461da63a04012d7a2b7f6a4165ee0c4e9cb7a5dc3f9ade326fc7
sha256sums = a8e1e11a4ab1995cc4975c9b134a43ddfe7054ef0c965e52a7d8f9223e15c3e0
sha256sums = 315d1839630b37894a626bbc2aea012618b2e1ccb6f9d8aa27c0a3ce5e90e99c
sha256sums = a00b952d53df9d3617d93e8fba4146a4d6169ebe79f029b3a55cca68f738d8ea
@@ -60,6 +66,12 @@ pkgbase = linux-g14
sha256sums = e41198b29cee4de7a5132d8df606f48c2d0f9c9076fe4230b00a33c7e0b22c71
sha256sums = 1edb362a762c8858374027e30ff58ae0014e117fdc05cc7db6da50f80e7aab87
sha256sums = 508f90cbe81a9a145cc540703470f1e6b5d21c7a7b9166d2ce6e56b401262b04
+ sha256sums = 9b94f02b87c28a7403478ce9f57461c3b2219b7279a928e814cafd78ee767366
+ sha256sums = 1bc69aaec2089599c1154d7ee5709f5a6140434ef6edf81702b0ea7042a44967
+ sha256sums = aa171a103d4133db4cfe153e48e71b58a85d69ed9fe144100dcc792055d79495
+ sha256sums = f0e5b7653c91e025c5c2010e2447c98eaad699106b34ff140da106e628ea5c17
+ sha256sums = 4b1e78681848c34175251e9dcbee02f6b2bb67a65aae6ea8bfb5e1322dc51f7a
+ sha256sums = 4ee418b9d9905a89e58cc860fb93cb226f45ac2d00d767fc3c4dccb297c731ee
sha256sums = 15e912a66e4bbce1cf0450f1dc6610653df29df8dd6d5426f9c1b039490436c8
sha256sums = 444f2d86de8c2177655b01596f939f99c2e7abfa8efad8a509e0a334f42dfa85
diff --git a/0001-linux6.7.y-bore4.0.0.patch b/0001-linux6.7.y-bore4.0.0.patch
deleted file mode 100644
index 6d471fa9a783..000000000000
--- a/0001-linux6.7.y-bore4.0.0.patch
+++ /dev/null
@@ -1,668 +0,0 @@
-From fddf9e0e897d4b1a79755a042c5f3c66b2cdb949 Mon Sep 17 00:00:00 2001
-From: Masahito S <firelzrd@gmail.com>
-Date: Sat, 6 Jan 2024 20:57:20 +0900
-Subject: [PATCH] linux6.7.y-bore4.0.0
-
----
- include/linux/sched.h | 11 ++
- init/Kconfig | 19 +++
- kernel/sched/core.c | 132 +++++++++++++++++++++
- kernel/sched/debug.c | 3 +
- kernel/sched/fair.c | 253 +++++++++++++++++++++++++++++++++++++---
- kernel/sched/features.h | 4 +
- 6 files changed, 404 insertions(+), 18 deletions(-)
-
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 292c316972..8a9e843ec5 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -562,6 +562,17 @@ struct sched_entity {
- u64 sum_exec_runtime;
- u64 prev_sum_exec_runtime;
- u64 vruntime;
-+#ifdef CONFIG_SCHED_BORE
-+ u64 burst_time;
-+ u8 prev_burst_penalty;
-+ u8 curr_burst_penalty;
-+ u8 burst_penalty;
-+ u8 slice_score;
-+ u8 child_burst;
-+ u16 child_burst_cnt;
-+ u64 child_burst_last_cached;
-+ u32 slice_load;
-+#endif // CONFIG_SCHED_BORE
- s64 vlag;
- u64 slice;
-
-diff --git a/init/Kconfig b/init/Kconfig
-index 9ffb103fc9..4492c5de88 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -1258,6 +1258,25 @@ config CHECKPOINT_RESTORE
-
- If unsure, say N here.
-
-+config SCHED_BORE
-+ bool "Burst-Oriented Response Enhancer"
-+ default y
-+ help
-+ In Desktop and Mobile computing, one might prefer interactive
-+ tasks to keep responsive no matter what they run in the background.
-+
-+ Enabling this kernel feature modifies the scheduler to discriminate
-+ tasks by their burst time (runtime since it last went sleeping or
-+ yielding state) and prioritize those that run less bursty.
-+ Such tasks usually include window compositor, widgets backend,
-+ terminal emulator, video playback, games and so on.
-+ With a little impact to scheduling fairness, it may improve
-+ responsiveness especially under heavy background workload.
-+
-+ You can turn it off by setting the sysctl kernel.sched_bore = 0.
-+
-+ If unsure, say Y here.
-+
- config SCHED_AUTOGROUP
- bool "Automatic process group scheduling"
- select CGROUPS
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index a708d225c2..7f2e796b77 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -4480,6 +4480,127 @@ int wake_up_state(struct task_struct *p, unsigned int state)
- return try_to_wake_up(p, state, 0);
- }
-
-+#ifdef CONFIG_SCHED_BORE
-+extern bool sched_bore;
-+extern u8 sched_burst_fork_atavistic;
-+extern uint sched_burst_cache_lifetime;
-+
-+void __init sched_init_bore(void) {
-+ init_task.se.burst_time = 0;
-+ init_task.se.prev_burst_penalty = 0;
-+ init_task.se.curr_burst_penalty = 0;
-+ init_task.se.burst_penalty = 0;
-+ init_task.se.slice_score = 0;
-+ init_task.se.child_burst_last_cached = 0;
-+ init_task.se.slice_load = 0;
-+}
-+
-+void inline sched_fork_bore(struct task_struct *p) {
-+ p->se.burst_time = 0;
-+ p->se.curr_burst_penalty = 0;
-+ p->se.slice_score = 0;
-+ p->se.child_burst_last_cached = 0;
-+ p->se.slice_load = 0;
-+}
-+
-+static u32 count_child_tasks(struct task_struct *p) {
-+ struct task_struct *child;
-+ u32 cnt = 0;
-+ list_for_each_entry(child, &p->children, sibling) {cnt++;}
-+ return cnt;
-+}
-+
-+static inline bool child_burst_cache_expired(struct task_struct *p, u64 now) {
-+ return (p->se.child_burst_last_cached + sched_burst_cache_lifetime < now);
-+}
-+
-+static void __update_child_burst_cache(
-+ struct task_struct *p, u32 cnt, u32 sum, u64 now) {
-+ u8 avg = 0;
-+ if (cnt) avg = sum / cnt;
-+ p->se.child_burst = max(avg, p->se.burst_penalty);
-+ p->se.child_burst_cnt = cnt;
-+ p->se.child_burst_last_cached = now;
-+}
-+
-+static void update_child_burst_cache(struct task_struct *p, u64 now) {
-+ struct task_struct *child;
-+ u32 cnt = 0;
-+ u32 sum = 0;
-+
-+ list_for_each_entry(child, &p->children, sibling) {
-+ if (child->sched_class != &fair_sched_class) continue;
-+ cnt++;
-+ sum += child->se.burst_penalty;
-+ }
-+
-+ __update_child_burst_cache(p, cnt, sum, now);
-+}
-+
-+static void update_child_burst_cache_atavistic(
-+ struct task_struct *p, u64 now, u32 depth, u32 *acnt, u32 *asum) {
-+ struct task_struct *child, *dec;
-+ u32 cnt = 0, dcnt = 0;
-+ u32 sum = 0;
-+
-+ list_for_each_entry(child, &p->children, sibling) {
-+ dec = child;
-+ while ((dcnt = count_child_tasks(dec)) == 1)
-+ dec = list_first_entry(&dec->children, struct task_struct, sibling);
-+
-+ if (!dcnt || !depth) {
-+ if (dec->sched_class != &fair_sched_class) continue;
-+ cnt++;
-+ sum += dec->se.burst_penalty;
-+ continue;
-+ }
-+ if (!child_burst_cache_expired(dec, now)) {
-+ cnt += dec->se.child_burst_cnt;
-+ sum += (u32)dec->se.child_burst * dec->se.child_burst_cnt;
-+ continue;
-+ }
-+ update_child_burst_cache_atavistic(dec, now, depth - 1, &cnt, &sum);
-+ }
-+
-+ __update_child_burst_cache(p, cnt, sum, now);
-+ *acnt += cnt;
-+ *asum += sum;
-+}
-+
-+static void sched_post_fork_bore(struct task_struct *p) {
-+ struct sched_entity *se = &p->se;
-+ struct task_struct *anc;
-+ u64 now;
-+ u32 cnt = 0, sum = 0, depth;
-+ u8 burst_cache;
-+
-+ if (p->sched_class != &fair_sched_class) return;
-+
-+ if (likely(sched_bore)) {
-+ now = ktime_get_ns();
-+ read_lock(&tasklist_lock);
-+
-+ anc = p->real_parent;
-+ depth = sched_burst_fork_atavistic;
-+ if (likely(depth)) {
-+ while ((anc->real_parent != anc) && (count_child_tasks(anc) == 1))
-+ anc = anc->real_parent;
-+ if (child_burst_cache_expired(anc, now))
-+ update_child_burst_cache_atavistic(
-+ anc, now, depth - 1, &cnt, &sum);
-+ } else
-+ if (child_burst_cache_expired(anc, now))
-+ update_child_burst_cache(anc, now);
-+
-+ burst_cache = anc->se.child_burst;
-+
-+ read_unlock(&tasklist_lock);
-+ se->prev_burst_penalty = max(se->prev_burst_penalty, burst_cache);
-+ }
-+ se->burst_penalty = se->prev_burst_penalty;
-+}
-+#endif // CONFIG_SCHED_BORE
-+
- /*
- * Perform scheduler related setup for a newly forked process p.
- * p is forked by current.
-@@ -4496,6 +4617,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
- p->se.prev_sum_exec_runtime = 0;
- p->se.nr_migrations = 0;
- p->se.vruntime = 0;
-+#ifdef CONFIG_SCHED_BORE
-+ sched_fork_bore(p);
-+#endif // CONFIG_SCHED_BORE
- p->se.vlag = 0;
- p->se.slice = sysctl_sched_base_slice;
- INIT_LIST_HEAD(&p->se.group_node);
-@@ -4815,6 +4939,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
-
- void sched_post_fork(struct task_struct *p)
- {
-+#ifdef CONFIG_SCHED_BORE
-+ sched_post_fork_bore(p);
-+#endif // CONFIG_SCHED_BORE
- uclamp_post_fork(p);
- }
-
-@@ -9885,6 +10012,11 @@ void __init sched_init(void)
- BUG_ON(&dl_sched_class != &stop_sched_class + 1);
- #endif
-
-+#ifdef CONFIG_SCHED_BORE
-+ sched_init_bore();
-+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 4.0.0 by Masahito Suzuki");
-+#endif // CONFIG_SCHED_BORE
-+
- wait_bit_init();
-
- #ifdef CONFIG_FAIR_GROUP_SCHED
-diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index 4580a45070..cf2c694c94 100644
---- a/kernel/sched/debug.c
-+++ b/kernel/sched/debug.c
-@@ -595,6 +595,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
- SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
- SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
-
-+#ifdef CONFIG_SCHED_BORE
-+ SEQ_printf(m, " %2d", p->se.slice_score);
-+#endif
- #ifdef CONFIG_NUMA_BALANCING
- SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
- #endif
-diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index d7a3c63a21..bfc1332995 100644
---- a/kernel/sched/fair.c
-+++ b/kernel/sched/fair.c
-@@ -19,6 +19,9 @@
- *
- * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
-+ *
-+ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler
-+ * Copyright (C) 2021-2024 Masahito Suzuki <firelzrd@gmail.com>
- */
- #include <linux/energy_model.h>
- #include <linux/mmap_lock.h>
-@@ -64,20 +67,122 @@
- * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
- * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
- *
-- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
-+ * (BORE default SCHED_TUNABLESCALING_NONE = *1 constant)
-+ * (EEVDF default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
- */
-+#ifdef CONFIG_SCHED_BORE
-+unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
-+#else // CONFIG_SCHED_BORE
- unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
-+#endif // CONFIG_SCHED_BORE
-
- /*
- * Minimal preemption granularity for CPU-bound tasks:
- *
-- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
-+ * (BORE default: 3 msec constant, units: nanoseconds)
-+ * (EEVDF default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
- */
-+#ifdef CONFIG_SCHED_BORE
-+unsigned int sysctl_sched_base_slice = 3000000ULL;
-+static unsigned int normalized_sysctl_sched_base_slice = 3000000ULL;
-+#else // CONFIG_SCHED_BORE
- unsigned int sysctl_sched_base_slice = 750000ULL;
- static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
-+#endif // CONFIG_SCHED_BORE
-
- const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
-
-+#ifdef CONFIG_SCHED_BORE
-+bool __read_mostly sched_bore = 1;
-+bool __read_mostly sched_burst_score_rounding = 0;
-+bool __read_mostly sched_burst_smoothness_long = 1;
-+bool __read_mostly sched_burst_smoothness_short = 0;
-+u8 __read_mostly sched_burst_fork_atavistic = 2;
-+u8 __read_mostly sched_burst_penalty_offset = 22;
-+uint __read_mostly sched_burst_penalty_scale = 1280;
-+uint __read_mostly sched_burst_cache_lifetime = 60000000;
-+static u8 sixty_four = 64;
-+static uint maxval_12_bits = 4095;
-+
-+#define MAX_BURST_PENALTY (39U <<2)
-+
-+static inline u32 log2plus1_u64_u32f8(u64 v) {
-+ u32 msb = fls64(v);
-+ s32 excess_bits = msb - 9;
-+ u8 fractional = (0 <= excess_bits)? v >> excess_bits: v << -excess_bits;
-+ return msb << 8 | fractional;
-+}
-+
-+static inline u32 calc_burst_penalty(u64 burst_time) {
-+ u32 greed, tolerance, penalty, scaled_penalty;
-+
-+ greed = log2plus1_u64_u32f8(burst_time);
-+ tolerance = sched_burst_penalty_offset << 8;
-+ penalty = max(0, (s32)greed - (s32)tolerance);
-+ scaled_penalty = penalty * sched_burst_penalty_scale >> 16;
-+
-+ return min(MAX_BURST_PENALTY, scaled_penalty);
-+}
-+
-+static inline void update_burst_penalty(struct sched_entity *se) {
-+ se->curr_burst_penalty = calc_burst_penalty(se->burst_time);
-+ se->burst_penalty = max(se->prev_burst_penalty, se->curr_burst_penalty);
-+}
-+
-+static inline u64 scale_slice(u64 delta, struct sched_entity *se) {
-+ return mul_u64_u32_shr(delta, sched_prio_to_wmult[se->slice_score], 22);
-+}
-+
-+static inline u64 __unscale_slice(u64 delta, u8 score) {
-+ return mul_u64_u32_shr(delta, sched_prio_to_weight[score], 10);
-+}
-+
-+static inline u64 unscale_slice(u64 delta, struct sched_entity *se) {
-+ return __unscale_slice(delta, se->slice_score);
-+}
-+
-+static void avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se);
-+static void avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se);
-+
-+static void update_slice_score(struct sched_entity *se) {
-+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
-+ u8 prev_score = se->slice_score;
-+ u32 penalty = se->burst_penalty;
-+ if (sched_burst_score_rounding) penalty += 0x2U;
-+ se->slice_score = penalty >> 2;
-+
-+ if (se->slice_score != prev_score && se->slice_load) {
-+ avg_vruntime_sub(cfs_rq, se);
-+ avg_vruntime_add(cfs_rq, se);
-+ }
-+}
-+
-+static inline u32 binary_smooth(u32 new, u32 old) {
-+ int increment = new - old;
-+ return (0 <= increment)?
-+ old + ( increment >> (int)sched_burst_smoothness_long):
-+ old - (-increment >> (int)sched_burst_smoothness_short);
-+}
-+
-+static void restart_burst(struct sched_entity *se) {
-+ se->burst_penalty = se->prev_burst_penalty =
-+ binary_smooth(se->curr_burst_penalty, se->prev_burst_penalty);
-+ se->curr_burst_penalty = 0;
-+ se->burst_time = 0;
-+ update_slice_score(se);
-+}
-+
-+static inline void restart_burst_rescale_deadline(struct sched_entity *se) {
-+ u64 wremain, vremain = se->deadline - se->vruntime;
-+ u8 prev_score = se->slice_score;
-+ restart_burst(se);
-+ if (prev_score > se->slice_score) {
-+ wremain = __unscale_slice(vremain, prev_score);
-+ se->deadline = se->vruntime + scale_slice(wremain, se);
-+ }
-+}
-+#endif // CONFIG_SCHED_BORE
-+
- int sched_thermal_decay_shift;
- static int __init setup_sched_thermal_decay_shift(char *str)
- {
-@@ -137,6 +242,70 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
-
- #ifdef CONFIG_SYSCTL
- static struct ctl_table sched_fair_sysctls[] = {
-+#ifdef CONFIG_SCHED_BORE
-+ {
-+ .procname = "sched_bore",
-+ .data = &sched_bore,
-+ .maxlen = sizeof(bool),
-+ .mode = 0644,
-+ .proc_handler = &proc_dobool,
-+ },
-+ {
-+ .procname = "sched_burst_cache_lifetime",
-+ .data = &sched_burst_cache_lifetime,
-+ .maxlen = sizeof(uint),
-+ .mode = 0644,
-+ .proc_handler = proc_douintvec,
-+ },
-+ {
-+ .procname = "sched_burst_fork_atavistic",
-+ .data = &sched_burst_fork_atavistic,
-+ .maxlen = sizeof(u8),
-+ .mode = 0644,
-+ .proc_handler = &proc_dou8vec_minmax,
-+ .extra1 = SYSCTL_ZERO,
-+ .extra2 = SYSCTL_THREE,
-+ },
-+ {
-+ .procname = "sched_burst_penalty_offset",
-+ .data = &sched_burst_penalty_offset,
-+ .maxlen = sizeof(u8),
-+ .mode = 0644,
-+ .proc_handler = &proc_dou8vec_minmax,
-+ .extra1 = SYSCTL_ZERO,
-+ .extra2 = &sixty_four,
-+ },
-+ {
-+ .procname = "sched_burst_penalty_scale",
-+ .data = &sched_burst_penalty_scale,
-+ .maxlen = sizeof(uint),
-+ .mode = 0644,
-+ .proc_handler = &proc_douintvec_minmax,
-+ .extra1 = SYSCTL_ZERO,
-+ .extra2 = &maxval_12_bits,
-+ },
-+ {
-+ .procname = "sched_burst_score_rounding",
-+ .data = &sched_burst_score_rounding,
-+ .maxlen = sizeof(bool),
-+ .mode = 0644,
-+ .proc_handler = &proc_dobool,
-+ },
-+ {
-+ .procname = "sched_burst_smoothness_long",
-+ .data = &sched_burst_smoothness_long,
-+ .maxlen = sizeof(bool),
-+ .mode = 0644,
-+ .proc_handler = &proc_dobool,
-+ },
-+ {
-+ .procname = "sched_burst_smoothness_short",
-+ .data = &sched_burst_smoothness_short,
-+ .maxlen = sizeof(bool),
-+ .mode = 0644,
-+ .proc_handler = &proc_dobool,
-+ },
-+#endif // CONFIG_SCHED_BORE
- #ifdef CONFIG_CFS_BANDWIDTH
- {
- .procname = "sched_cfs_bandwidth_slice_us",
-@@ -298,6 +467,9 @@ static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
- if (unlikely(se->load.weight != NICE_0_LOAD))
- delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
-
-+#ifdef CONFIG_SCHED_BORE
-+ if (likely(sched_bore)) delta = scale_slice(delta, se);
-+#endif // CONFIG_SCHED_BORE
- return delta;
- }
-
-@@ -620,10 +792,22 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
- *
- * As measured, the max (key * weight) value was ~44 bits for a kernel build.
- */
-+static unsigned long calc_avg_load_weight(struct sched_entity *se) {
-+ unsigned long weight = scale_load_down(se->load.weight);
-+#ifdef CONFIG_SCHED_BORE
-+ weight <<= 5;
-+ if (likely(sched_bore)) weight = unscale_slice(weight, se);
-+#endif // CONFIG_SCHED_BORE
-+ return weight;
-+}
-+
- static void
- avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-- unsigned long weight = scale_load_down(se->load.weight);
-+ unsigned long weight = calc_avg_load_weight(se);
-+#ifdef CONFIG_SCHED_BORE
-+ se->slice_load = weight;
-+#endif // CONFIG_SCHED_BORE
- s64 key = entity_key(cfs_rq, se);
-
- cfs_rq->avg_vruntime += key * weight;
-@@ -633,7 +817,13 @@ avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
- static void
- avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-- unsigned long weight = scale_load_down(se->load.weight);
-+ unsigned long weight;
-+#if !defined(CONFIG_SCHED_BORE)
-+ weight = scale_load_down(se->load.weight);
-+#else // CONFIG_SCHED_BORE
-+ weight = se->slice_load;
-+ se->slice_load = 0;
-+#endif // CONFIG_SCHED_BORE
- s64 key = entity_key(cfs_rq, se);
-
- cfs_rq->avg_vruntime -= key * weight;
-@@ -653,14 +843,14 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
- * Specifically: avg_runtime() + 0 must result in entity_eligible() := true
- * For this to be so, the result of this function must have a left bias.
- */
--u64 avg_vruntime(struct cfs_rq *cfs_rq)
-+static u64 avg_key(struct cfs_rq *cfs_rq)
- {
- struct sched_entity *curr = cfs_rq->curr;
- s64 avg = cfs_rq->avg_vruntime;
- long load = cfs_rq->avg_load;
-
- if (curr && curr->on_rq) {
-- unsigned long weight = scale_load_down(curr->load.weight);
-+ unsigned long weight = calc_avg_load_weight(curr);
-
- avg += entity_key(cfs_rq, curr) * weight;
- load += weight;
-@@ -673,7 +863,11 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
- avg = div_s64(avg, load);
- }
-
-- return cfs_rq->min_vruntime + avg;
-+ return avg;
-+}
-+
-+inline u64 avg_vruntime(struct cfs_rq *cfs_rq) {
-+ return cfs_rq->min_vruntime + avg_key(cfs_rq);
- }
-
- /*
-@@ -694,13 +888,8 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
- */
- static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-- s64 lag, limit;
--
- SCHED_WARN_ON(!se->on_rq);
-- lag = avg_vruntime(cfs_rq) - se->vruntime;
--
-- limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
-- se->vlag = clamp(lag, -limit, limit);
-+ se->vlag = avg_vruntime(cfs_rq) - se->vruntime;
- }
-
- /*
-@@ -727,7 +916,7 @@ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
- long load = cfs_rq->avg_load;
-
- if (curr && curr->on_rq) {
-- unsigned long weight = scale_load_down(curr->load.weight);
-+ unsigned long weight = calc_avg_load_weight(curr);
-
- avg += entity_key(cfs_rq, curr) * weight;
- load += weight;
-@@ -1016,6 +1205,9 @@ static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
- /*
- * EEVDF: vd_i = ve_i + r_i / w_i
- */
-+#ifdef CONFIG_SCHED_BORE
-+ update_slice_score(se);
-+#endif // CONFIG_SCHED_BORE
- se->deadline = se->vruntime + calc_delta_fair(se->slice, se);
-
- /*
-@@ -1158,7 +1350,11 @@ static void update_curr(struct cfs_rq *cfs_rq)
- curr->sum_exec_runtime += delta_exec;
- schedstat_add(cfs_rq->exec_clock, delta_exec);
-
-- curr->vruntime += calc_delta_fair(delta_exec, curr);
-+#ifdef CONFIG_SCHED_BORE
-+ curr->burst_time += delta_exec;
-+ update_burst_penalty(curr);
-+#endif // CONFIG_SCHED_BORE
-+ curr->vruntime += max(1ULL, calc_delta_fair(delta_exec, curr));
- update_deadline(cfs_rq, curr);
- update_min_vruntime(cfs_rq);
-
-@@ -5131,7 +5327,12 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- struct sched_entity *curr = cfs_rq->curr;
- unsigned long load;
-
-- lag = se->vlag;
-+ u64 limit = calc_delta_fair(max_t(u64, se->slice*2, TICK_NSEC), se);
-+ s64 overmet = limit, undermet = limit;
-+#ifdef CONFIG_SCHED_BORE
-+ if (likely(sched_bore)) overmet /= 2;
-+#endif // CONFIG_SCHED_BORE
-+ lag = clamp(se->vlag, -overmet, undermet);
-
- /*
- * If we want to place a task and preserve lag, we have to
-@@ -5187,9 +5388,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- */
- load = cfs_rq->avg_load;
- if (curr && curr->on_rq)
-- load += scale_load_down(curr->load.weight);
-+ load += calc_avg_load_weight(curr);
-
-- lag *= load + scale_load_down(se->load.weight);
-+ lag *= load + calc_avg_load_weight(se);
- if (WARN_ON_ONCE(!load))
- load = 1;
- lag = div_s64(lag, load);
-@@ -6759,6 +6960,12 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- bool was_sched_idle = sched_idle_rq(rq);
-
- util_est_dequeue(&rq->cfs, p);
-+#ifdef CONFIG_SCHED_BORE
-+ if (task_sleep) {
-+ update_curr(cfs_rq_of(se));
-+ restart_burst(se);
-+ }
-+#endif // CONFIG_SCHED_BORE
-
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
-@@ -8494,6 +8701,9 @@ static void yield_task_fair(struct rq *rq)
- /*
- * Are we the only task in the tree?
- */
-+#ifdef CONFIG_SCHED_BORE
-+ if (unlikely(!sched_bore))
-+#endif // CONFIG_SCHED_BORE
- if (unlikely(rq->nr_running == 1))
- return;
-
-@@ -8510,6 +8720,10 @@ static void yield_task_fair(struct rq *rq)
- * and double the fastpath cost.
- */
- rq_clock_skip_update(rq);
-+#ifdef CONFIG_SCHED_BORE
-+ restart_burst_rescale_deadline(se);
-+ if (unlikely(rq->nr_running == 1)) return;
-+#endif // CONFIG_SCHED_BORE
-
- se->deadline += calc_delta_fair(se->slice, se);
- }
-@@ -12590,6 +12804,9 @@ static void task_fork_fair(struct task_struct *p)
- curr = cfs_rq->curr;
- if (curr)
- update_curr(cfs_rq);
-+#ifdef CONFIG_SCHED_BORE
-+ update_slice_score(se);
-+#endif // CONFIG_SCHED_BORE
- place_entity(cfs_rq, se, ENQUEUE_INITIAL);
- rq_unlock(rq, &rf);
- }
-diff --git a/kernel/sched/features.h b/kernel/sched/features.h
-index a3ddf84de4..841a428579 100644
---- a/kernel/sched/features.h
-+++ b/kernel/sched/features.h
-@@ -6,7 +6,11 @@
- */
- SCHED_FEAT(PLACE_LAG, true)
- SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
-+#ifdef CONFIG_SCHED_BORE
-+SCHED_FEAT(RUN_TO_PARITY, false)
-+#else // CONFIG_SCHED_BORE
- SCHED_FEAT(RUN_TO_PARITY, true)
-+#endif // CONFIG_SCHED_BORE
-
- /*
- * Prefer to schedule the task we woke last (assuming it failed
---
-2.25.1
diff --git a/0001-linux6.8.y-bore4.5.0.patch b/0001-linux6.8.y-bore4.5.0.patch
new file mode 100644
index 000000000000..61db732d1be9
--- /dev/null
+++ b/0001-linux6.8.y-bore4.5.0.patch
@@ -0,0 +1,904 @@
+From d8d9f2ff3cbf43b41269da34772b59bc9a11c5c2 Mon Sep 17 00:00:00 2001
+From: Masahito S <firelzrd@gmail.com>
+Date: Sun, 3 Mar 2024 05:14:31 +0900
+Subject: [PATCH] linux6.8.y-bore4.5.0
+
+---
+ include/linux/sched.h | 12 ++
+ init/Kconfig | 19 +++
+ kernel/sched/core.c | 148 ++++++++++++++++++++
+ kernel/sched/debug.c | 61 +++++++-
+ kernel/sched/fair.c | 302 ++++++++++++++++++++++++++++++++++++++--
+ kernel/sched/features.h | 4 +
+ kernel/sched/sched.h | 7 +
+ 7 files changed, 538 insertions(+), 15 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index ffe8f618ab..7ac6163f90 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -547,6 +547,18 @@ struct sched_entity {
+ u64 sum_exec_runtime;
+ u64 prev_sum_exec_runtime;
+ u64 vruntime;
++#ifdef CONFIG_SCHED_BORE
++ u64 burst_time;
++ u8 prev_burst_penalty;
++ u8 curr_burst_penalty;
++ u8 burst_penalty;
++ u8 burst_score;
++ u32 burst_load;
++ bool on_cfs_rq;
++ u8 child_burst;
++ u32 child_burst_cnt;
++ u64 child_burst_last_cached;
++#endif // CONFIG_SCHED_BORE
+ s64 vlag;
+ u64 slice;
+
+diff --git a/init/Kconfig b/init/Kconfig
+index deda3d1413..9fa6b45c03 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1270,6 +1270,25 @@ config CHECKPOINT_RESTORE
+
+ If unsure, say N here.
+
++config SCHED_BORE
++ bool "Burst-Oriented Response Enhancer"
++ default y
++ help
++ In Desktop and Mobile computing, one might prefer interactive
++ tasks to keep responsive no matter what they run in the background.
++
++ Enabling this kernel feature modifies the scheduler to discriminate
++ tasks by their burst time (runtime since it last went sleeping or
++ yielding state) and prioritize those that run less bursty.
++ Such tasks usually include window compositor, widgets backend,
++ terminal emulator, video playback, games and so on.
++ With a little impact to scheduling fairness, it may improve
++ responsiveness especially under heavy background workload.
++
++ You can turn it off by setting the sysctl kernel.sched_bore = 0.
++
++ If unsure, say Y here.
++
+ config SCHED_AUTOGROUP
+ bool "Automatic process group scheduling"
+ select CGROUPS
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 9116bcc903..b4bd85b3bf 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4507,6 +4507,143 @@ int wake_up_state(struct task_struct *p, unsigned int state)
+ return try_to_wake_up(p, state, 0);
+ }
+
++#ifdef CONFIG_SCHED_BORE
++extern bool sched_bore;
++extern u8 sched_burst_fork_atavistic;
++extern uint sched_burst_cache_lifetime;
++
++static void __init sched_init_bore(void) {
++ init_task.se.burst_time = 0;
++ init_task.se.prev_burst_penalty = 0;
++ init_task.se.curr_burst_penalty = 0;
++ init_task.se.burst_penalty = 0;
++ init_task.se.burst_score = 0;
++ init_task.se.on_cfs_rq = false;
++ init_task.se.child_burst_last_cached = 0;
++ init_task.se.burst_load = 0;
++}
++
++void inline sched_fork_bore(struct task_struct *p) {
++ p->se.burst_time = 0;
++ p->se.curr_burst_penalty = 0;
++ p->se.burst_score = 0;
++ p->se.on_cfs_rq = false;
++ p->se.child_burst_last_cached = 0;
++ p->se.burst_load = 0;
++}
++
++static u32 count_child_tasks(struct task_struct *p) {
++ struct task_struct *child;
++ u32 cnt = 0;
++ list_for_each_entry(child, &p->children, sibling) {cnt++;}
++ return cnt;
++}
++
++static inline bool task_is_inheritable(struct task_struct *p) {
++ return (p->sched_class == &fair_sched_class);
++}
++
++static inline bool child_burst_cache_expired(struct task_struct *p, u64 now) {
++ u64 expiration_time =
++ p->se.child_burst_last_cached + sched_burst_cache_lifetime;
++ return ((s64)(expiration_time - now) < 0);
++}
++
++static void __update_child_burst_cache(
++ struct task_struct *p, u32 cnt, u32 sum, u64 now) {
++ u8 avg = 0;
++ if (cnt) avg = sum / cnt;
++ p->se.child_burst = max(avg, p->se.burst_penalty);
++ p->se.child_burst_cnt = cnt;
++ p->se.child_burst_last_cached = now;
++}
++
++static inline void update_child_burst_direct(struct task_struct *p, u64 now) {
++ struct task_struct *child;
++ u32 cnt = 0;
++ u32 sum = 0;
++
++ list_for_each_entry(child, &p->children, sibling) {
++ if (!task_is_inheritable(child)) continue;
++ cnt++;
++ sum += child->se.burst_penalty;
++ }
++
++ __update_child_burst_cache(p, cnt, sum, now);
++}
++
++static inline u8 __inherit_burst_direct(struct task_struct *p, u64 now) {
++ struct task_struct *parent = p->real_parent;
++ if (child_burst_cache_expired(parent, now))
++ update_child_burst_direct(parent, now);
++
++ return parent->se.child_burst;
++}
++
++static void update_child_burst_topological(
++ struct task_struct *p, u64 now, u32 depth, u32 *acnt, u32 *asum) {
++ struct task_struct *child, *dec;
++ u32 cnt = 0, dcnt = 0;
++ u32 sum = 0;
++
++ list_for_each_entry(child, &p->children, sibling) {
++ dec = child;
++ while ((dcnt = count_child_tasks(dec)) == 1)
++ dec = list_first_entry(&dec->children, struct task_struct, sibling);
++
++ if (!dcnt || !depth) {
++ if (!task_is_inheritable(dec)) continue;
++ cnt++;
++ sum += dec->se.burst_penalty;
++ continue;
++ }
++ if (!child_burst_cache_expired(dec, now)) {
++ cnt += dec->se.child_burst_cnt;
++ sum += (u32)dec->se.child_burst * dec->se.child_burst_cnt;
++ continue;
++ }
++ update_child_burst_topological(dec, now, depth - 1, &cnt, &sum);
++ }
++
++ __update_child_burst_cache(p, cnt, sum, now);
++ *acnt += cnt;
++ *asum += sum;
++}
++
++static inline u8 __inherit_burst_topological(struct task_struct *p, u64 now) {
++ struct task_struct *anc = p->real_parent;
++ u32 cnt = 0, sum = 0;
++
++ while (anc->real_parent != anc && count_child_tasks(anc) == 1)
++ anc = anc->real_parent;
++
++ if (child_burst_cache_expired(anc, now))
++ update_child_burst_topological(
++ anc, now, sched_burst_fork_atavistic - 1, &cnt, &sum);
++
++ return anc->se.child_burst;
++}
++
++static inline void inherit_burst(struct task_struct *p) {
++ u8 burst_cache;
++ u64 now = ktime_get_ns();
++
++ read_lock(&tasklist_lock);
++ burst_cache = likely(sched_burst_fork_atavistic)?
++ __inherit_burst_topological(p, now):
++ __inherit_burst_direct(p, now);
++ read_unlock(&tasklist_lock);
++
++ p->se.prev_burst_penalty = max(p->se.prev_burst_penalty, burst_cache);
++}
++
++static void sched_post_fork_bore(struct task_struct *p) {
++ if (p->sched_class == &fair_sched_class && likely(sched_bore))
++ inherit_burst(p);
++ p->se.burst_penalty = p->se.prev_burst_penalty;
++}
++#endif // CONFIG_SCHED_BORE
++
+ /*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+@@ -4523,6 +4660,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
+ p->se.prev_sum_exec_runtime = 0;
+ p->se.nr_migrations = 0;
+ p->se.vruntime = 0;
++#ifdef CONFIG_SCHED_BORE
++ sched_fork_bore(p);
++#endif // CONFIG_SCHED_BORE
+ p->se.vlag = 0;
+ p->se.slice = sysctl_sched_base_slice;
+ INIT_LIST_HEAD(&p->se.group_node);
+@@ -4839,6 +4979,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
+
+ void sched_post_fork(struct task_struct *p)
+ {
++#ifdef CONFIG_SCHED_BORE
++ sched_post_fork_bore(p);
++#endif // CONFIG_SCHED_BORE
+ uclamp_post_fork(p);
+ }
+
+@@ -9910,6 +10053,11 @@ void __init sched_init(void)
+ BUG_ON(&dl_sched_class != &stop_sched_class + 1);
+ #endif
+
++#ifdef CONFIG_SCHED_BORE
++ sched_init_bore();
++ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 4.5.0 by Masahito Suzuki");
++#endif // CONFIG_SCHED_BORE
++
+ wait_bit_init();
+
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 8d5d98a583..a565363fdd 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -167,7 +167,52 @@ static const struct file_operations sched_feat_fops = {
+ };
+
+ #ifdef CONFIG_SMP
++#ifdef CONFIG_SCHED_BORE
++static ssize_t sched_min_base_slice_write(struct file *filp, const char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ char buf[16];
++ unsigned int value;
++
++ if (cnt > 15)
++ cnt = 15;
++
++ if (copy_from_user(&buf, ubuf, cnt))
++ return -EFAULT;
++ buf[cnt] = '\0';
++
++ if (kstrtouint(buf, 10, &value))
++ return -EINVAL;
+
++ if (!value)
++ return -EINVAL;
++
++ sysctl_sched_min_base_slice = value;
++ sched_update_min_base_slice();
++
++ *ppos += cnt;
++ return cnt;
++}
++
++static int sched_min_base_slice_show(struct seq_file *m, void *v)
++{
++ seq_printf(m, "%d\n", sysctl_sched_min_base_slice);
++ return 0;
++}
++
++static int sched_min_base_slice_open(struct inode *inode, struct file *filp)
++{
++ return single_open(filp, sched_min_base_slice_show, NULL);
++}
++
++static const struct file_operations sched_min_base_slice_fops = {
++ .open = sched_min_base_slice_open,
++ .write = sched_min_base_slice_write,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++#else // !CONFIG_SCHED_BORE
+ static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+ {
+@@ -213,7 +258,7 @@ static const struct file_operations sched_scaling_fops = {
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+-
++#endif // CONFIG_SCHED_BORE
+ #endif /* SMP */
+
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+@@ -347,13 +392,20 @@ static __init int sched_init_debug(void)
+ debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
+ #endif
+
++#ifdef CONFIG_SCHED_BORE
++ debugfs_create_file("min_base_slice_ns", 0644, debugfs_sched, NULL, &sched_min_base_slice_fops);
++ debugfs_create_u32("base_slice_ns", 0400, debugfs_sched, &sysctl_sched_base_slice);
++#else // !CONFIG_SCHED_BORE
+ debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
++#endif // CONFIG_SCHED_BORE
+
+ debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
+ debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
+
+ #ifdef CONFIG_SMP
++#if !defined(CONFIG_SCHED_BORE)
+ debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
++#endif // CONFIG_SCHED_BORE
+ debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
+ debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
+
+@@ -595,6 +647,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
+ SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
+ SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
+
++#ifdef CONFIG_SCHED_BORE
++ SEQ_printf(m, " %2d", p->se.burst_score);
++#endif // CONFIG_SCHED_BORE
+ #ifdef CONFIG_NUMA_BALANCING
+ SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
+ #endif
+@@ -1068,6 +1123,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
+
+ P(se.load.weight);
+ #ifdef CONFIG_SMP
++#ifdef CONFIG_SCHED_BORE
++ P(se.burst_load);
++ P(se.burst_score);
++#endif // CONFIG_SCHED_BORE
+ P(se.avg.load_sum);
+ P(se.avg.runnable_sum);
+ P(se.avg.util_sum);
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 533547e3c9..429753c008 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -19,6 +19,9 @@
+ *
+ * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
++ *
++ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler
++ * Copyright (C) 2021-2024 Masahito Suzuki <firelzrd@gmail.com>
+ */
+ #include <linux/energy_model.h>
+ #include <linux/mmap_lock.h>
+@@ -64,20 +67,129 @@
+ * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
+ * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
+ *
+- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
++ * (BORE default SCHED_TUNABLESCALING_NONE = *1 constant)
++ * (EEVDF default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
+ */
++#ifdef CONFIG_SCHED_BORE
++unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
++#else // !CONFIG_SCHED_BORE
+ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
++#endif // CONFIG_SCHED_BORE
+
+ /*
+ * Minimal preemption granularity for CPU-bound tasks:
+ *
+- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
++ * (BORE default: max(1 sec / HZ, min_base_slice) constant, units: nanoseconds)
++ * (EEVDF default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ */
++#ifdef CONFIG_SCHED_BORE
++unsigned int sysctl_sched_base_slice = 1000000000ULL / HZ;
++static unsigned int configured_sched_base_slice = 1000000000ULL / HZ;
++unsigned int sysctl_sched_min_base_slice = 2000000ULL;
++#else // !CONFIG_SCHED_BORE
+ unsigned int sysctl_sched_base_slice = 750000ULL;
+ static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
++#endif // CONFIG_SCHED_BORE
+
+ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+
++#ifdef CONFIG_SCHED_BORE
++u8 __read_mostly sched_bore = 1;
++u8 __read_mostly sched_burst_score_rounding = 0;
++u8 __read_mostly sched_burst_smoothness_long = 1;
++u8 __read_mostly sched_burst_smoothness_short = 0;
++u8 __read_mostly sched_burst_fork_atavistic = 2;
++u8 __read_mostly sched_burst_penalty_offset = 22;
++uint __read_mostly sched_burst_penalty_scale = 1280;
++uint __read_mostly sched_burst_cache_lifetime = 60000000;
++u8 __read_mostly sched_vlag_deviation_limit = 8;
++static int __maybe_unused thirty_two = 32;
++static int __maybe_unused sixty_four = 64;
++static int __maybe_unused maxval_12_bits = 4095;
++
++#define MAX_BURST_PENALTY (39U <<2)
++
++static inline u32 log2plus1_u64_u32f8(u64 v) {
++ u32 msb = fls64(v);
++ s32 excess_bits = msb - 9;
++ u8 fractional = (0 <= excess_bits)? v >> excess_bits: v << -excess_bits;
++ return msb << 8 | fractional;
++}
++
++static inline u32 calc_burst_penalty(u64 burst_time) {
++ u32 greed, tolerance, penalty, scaled_penalty;
++
++ greed = log2plus1_u64_u32f8(burst_time);
++ tolerance = sched_burst_penalty_offset << 8;
++ penalty = max(0, (s32)greed - (s32)tolerance);
++ scaled_penalty = penalty * sched_burst_penalty_scale >> 16;
++
++ return min(MAX_BURST_PENALTY, scaled_penalty);
++}
++
++static inline u64 scale_slice(u64 delta, struct sched_entity *se) {
++ return mul_u64_u32_shr(delta, sched_prio_to_wmult[se->burst_score], 22);
++}
++
++static inline u64 __unscale_slice(u64 delta, u8 score) {
++ return mul_u64_u32_shr(delta, sched_prio_to_weight[score], 10);
++}
++
++static inline u64 unscale_slice(u64 delta, struct sched_entity *se) {
++ return __unscale_slice(delta, se->burst_score);
++}
++
++static void avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se);
++static void avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se);
++
++static void update_burst_score(struct sched_entity *se) {
++ struct cfs_rq *cfs_rq = cfs_rq_of(se);
++ u8 prev_score = se->burst_score;
++ u32 penalty = se->burst_penalty;
++ if (sched_burst_score_rounding) penalty += 0x2U;
++ se->burst_score = penalty >> 2;
++
++ if ((se->burst_score != prev_score) && se->on_cfs_rq) {
++ avg_vruntime_sub(cfs_rq, se);
++ avg_vruntime_add(cfs_rq, se);
++ }
++}
++
++static void update_burst_penalty(struct sched_entity *se) {
++ se->curr_burst_penalty = calc_burst_penalty(se->burst_time);
++ se->burst_penalty = max(se->prev_burst_penalty, se->curr_burst_penalty);
++ update_burst_score(se);
++}
++
++static inline u32 binary_smooth(u32 new, u32 old) {
++ int increment = new - old;
++ return (0 <= increment)?
++ old + ( increment >> (int)sched_burst_smoothness_long):
++ old - (-increment >> (int)sched_burst_smoothness_short);
++}
++
++static void restart_burst(struct sched_entity *se) {
++ se->burst_penalty = se->prev_burst_penalty =
++ binary_smooth(se->curr_burst_penalty, se->prev_burst_penalty);
++ se->curr_burst_penalty = 0;
++ se->burst_time = 0;
++ update_burst_score(se);
++}
++
++static void restart_burst_rescale_deadline(struct sched_entity *se) {
++ s64 vscaled, wremain, vremain = se->deadline - se->vruntime;
++ u8 prev_score = se->burst_score;
++ restart_burst(se);
++ if (prev_score > se->burst_score) {
++ wremain = __unscale_slice(abs(vremain), prev_score);
++ vscaled = scale_slice(wremain, se);
++ if (unlikely(vremain < 0))
++ vscaled = -vscaled;
++ se->deadline = se->vruntime + vscaled;
++ }
++}
++#endif // CONFIG_SCHED_BORE
++
+ int sched_thermal_decay_shift;
+ static int __init setup_sched_thermal_decay_shift(char *str)
+ {
+@@ -137,6 +249,87 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
+
+ #ifdef CONFIG_SYSCTL
+ static struct ctl_table sched_fair_sysctls[] = {
++#ifdef CONFIG_SCHED_BORE
++ {
++ .procname = "sched_bore",
++ .data = &sched_bore,
++ .maxlen = sizeof(u8),
++ .mode = 0644,
++ .proc_handler = proc_dou8vec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ .extra2 = SYSCTL_ONE,
++ },
++ {
++ .procname = "sched_burst_score_rounding",
++ .data = &sched_burst_score_rounding,
++ .maxlen = sizeof(u8),
++ .mode = 0644,
++ .proc_handler = proc_dou8vec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ .extra2 = SYSCTL_ONE,
++ },
++ {
++ .procname = "sched_burst_smoothness_long",
++ .data = &sched_burst_smoothness_long,
++ .maxlen = sizeof(u8),
++ .mode = 0644,
++ .proc_handler = proc_dou8vec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ .extra2 = SYSCTL_ONE,
++ },
++ {
++ .procname = "sched_burst_smoothness_short",
++ .data = &sched_burst_smoothness_short,
++ .maxlen = sizeof(u8),
++ .mode = 0644,
++ .proc_handler = proc_dou8vec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ .extra2 = SYSCTL_ONE,
++ },
++ {
++ .procname = "sched_burst_fork_atavistic",
++ .data = &sched_burst_fork_atavistic,
++ .maxlen = sizeof(u8),
++ .mode = 0644,
++ .proc_handler = proc_dou8vec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ .extra2 = SYSCTL_THREE,
++ },
++ {
++ .procname = "sched_burst_penalty_offset",
++ .data = &sched_burst_penalty_offset,
++ .maxlen = sizeof(u8),
++ .mode = 0644,
++ .proc_handler = proc_dou8vec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ .extra2 = &sixty_four,
++ },
++ {
++ .procname = "sched_burst_penalty_scale",
++ .data = &sched_burst_penalty_scale,
++ .maxlen = sizeof(uint),
++ .mode = 0644,
++ .proc_handler = proc_douintvec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ .extra2 = &maxval_12_bits,
++ },
++ {
++ .procname = "sched_burst_cache_lifetime",
++ .data = &sched_burst_cache_lifetime,
++ .maxlen = sizeof(uint),
++ .mode = 0644,
++ .proc_handler = proc_douintvec,
++ },
++ {
++ .procname = "sched_vlag_deviation_limit",
++ .data = &sched_vlag_deviation_limit,
++ .maxlen = sizeof(u8),
++ .mode = 0644,
++ .proc_handler = proc_dou8vec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ .extra2 = &thirty_two,
++ },
++#endif // CONFIG_SCHED_BORE
+ #ifdef CONFIG_CFS_BANDWIDTH
+ {
+ .procname = "sched_cfs_bandwidth_slice_us",
+@@ -195,6 +388,13 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w)
+ *
+ * This idea comes from the SD scheduler of Con Kolivas:
+ */
++#ifdef CONFIG_SCHED_BORE
++static void update_sysctl(void) {
++ sysctl_sched_base_slice =
++ max(sysctl_sched_min_base_slice, configured_sched_base_slice);
++}
++void sched_update_min_base_slice(void) { update_sysctl(); }
++#else // !CONFIG_SCHED_BORE
+ static unsigned int get_update_sysctl_factor(void)
+ {
+ unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
+@@ -225,6 +425,7 @@ static void update_sysctl(void)
+ SET_SYSCTL(sched_base_slice);
+ #undef SET_SYSCTL
+ }
++#endif // CONFIG_SCHED_BORE
+
+ void __init sched_init_granularity(void)
+ {
+@@ -298,6 +499,9 @@ static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
+ if (unlikely(se->load.weight != NICE_0_LOAD))
+ delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
+
++#ifdef CONFIG_SCHED_BORE
++ if (likely(sched_bore)) delta = scale_slice(delta, se);
++#endif // CONFIG_SCHED_BORE
+ return delta;
+ }
+
+@@ -624,10 +828,26 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ *
+ * As measured, the max (key * weight) value was ~44 bits for a kernel build.
+ */
++#if !defined(CONFIG_SCHED_BORE)
++#define entity_weight(se) scale_load_down(se->load.weight)
++#else // CONFIG_SCHED_BORE
++static unsigned long entity_weight(struct sched_entity *se) {
++ unsigned long weight = se->load.weight;
++ if (likely(sched_bore)) weight = unscale_slice(weight, se);
++#ifdef CONFIG_64BIT
++ weight >>= SCHED_FIXEDPOINT_SHIFT - 3;
++#endif // CONFIG_64BIT
++ return weight;
++}
++#endif // CONFIG_SCHED_BORE
++
+ static void
+ avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- unsigned long weight = scale_load_down(se->load.weight);
++ unsigned long weight = entity_weight(se);
++#ifdef CONFIG_SCHED_BORE
++ se->burst_load = weight;
++#endif // CONFIG_SCHED_BORE
+ s64 key = entity_key(cfs_rq, se);
+
+ cfs_rq->avg_vruntime += key * weight;
+@@ -637,7 +857,12 @@ avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ static void
+ avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- unsigned long weight = scale_load_down(se->load.weight);
++#if !defined(CONFIG_SCHED_BORE)
++ unsigned long weight = entity_weight(se);
++#else // CONFIG_SCHED_BORE
++ unsigned long weight = se->burst_load;
++ se->burst_load = 0;
++#endif // CONFIG_SCHED_BORE
+ s64 key = entity_key(cfs_rq, se);
+
+ cfs_rq->avg_vruntime -= key * weight;
+@@ -657,14 +882,14 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
+ * Specifically: avg_runtime() + 0 must result in entity_eligible() := true
+ * For this to be so, the result of this function must have a left bias.
+ */
+-u64 avg_vruntime(struct cfs_rq *cfs_rq)
++static u64 avg_key(struct cfs_rq *cfs_rq)
+ {
+ struct sched_entity *curr = cfs_rq->curr;
+ s64 avg = cfs_rq->avg_vruntime;
+ long load = cfs_rq->avg_load;
+
+ if (curr && curr->on_rq) {
+- unsigned long weight = scale_load_down(curr->load.weight);
++ unsigned long weight = entity_weight(curr);
+
+ avg += entity_key(cfs_rq, curr) * weight;
+ load += weight;
+@@ -674,12 +899,15 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ /* sign flips effective floor / ceil */
+ if (avg < 0)
+ avg -= (load - 1);
+- avg = div_s64(avg, load);
++ avg = div64_s64(avg, load);
+ }
+
+- return cfs_rq->min_vruntime + avg;
++ return avg;
+ }
+
++u64 avg_vruntime(struct cfs_rq *cfs_rq) {
++ return cfs_rq->min_vruntime + avg_key(cfs_rq);
++}
+ /*
+ * lag_i = S - s_i = w_i * (V - v_i)
+ *
+@@ -704,6 +932,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ lag = avg_vruntime(cfs_rq) - se->vruntime;
+
+ limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
++#ifdef CONFIG_SCHED_BORE
++ if (likely(sched_bore)) limit >>= 1;
++#endif // CONFIG_SCHED_BORE
+ se->vlag = clamp(lag, -limit, limit);
+ }
+
+@@ -731,7 +962,7 @@ static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
+ long load = cfs_rq->avg_load;
+
+ if (curr && curr->on_rq) {
+- unsigned long weight = scale_load_down(curr->load.weight);
++ unsigned long weight = entity_weight(curr);
+
+ avg += entity_key(cfs_rq, curr) * weight;
+ load += weight;
+@@ -827,10 +1058,16 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ se->min_vruntime = se->vruntime;
+ rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
+ __entity_less, &min_vruntime_cb);
++#ifdef CONFIG_SCHED_BORE
++ se->on_cfs_rq = true;
++#endif // CONFIG_SCHED_BORE
+ }
+
+ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
++#ifdef CONFIG_SCHED_BORE
++ se->on_cfs_rq = false;
++#endif // CONFIG_SCHED_BORE
+ rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
+ &min_vruntime_cb);
+ avg_vruntime_sub(cfs_rq, se);
+@@ -955,6 +1192,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
+ * Scheduling class statistics methods:
+ */
+ #ifdef CONFIG_SMP
++#if !defined(CONFIG_SCHED_BORE)
+ int sched_update_scaling(void)
+ {
+ unsigned int factor = get_update_sysctl_factor();
+@@ -966,6 +1204,7 @@ int sched_update_scaling(void)
+
+ return 0;
+ }
++#endif // CONFIG_SCHED_BORE
+ #endif
+ #endif
+
+@@ -1165,7 +1404,13 @@ static void update_curr(struct cfs_rq *cfs_rq)
+ if (unlikely(delta_exec <= 0))
+ return;
+
++#ifdef CONFIG_SCHED_BORE
++ curr->burst_time += delta_exec;
++ update_burst_penalty(curr);
++ curr->vruntime += max(1ULL, calc_delta_fair(delta_exec, curr));
++#else // !CONFIG_SCHED_BORE
+ curr->vruntime += calc_delta_fair(delta_exec, curr);
++#endif // CONFIG_SCHED_BORE
+ update_deadline(cfs_rq, curr);
+ update_min_vruntime(cfs_rq);
+
+@@ -5157,8 +5402,8 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
+ static void
+ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ {
+- u64 vslice, vruntime = avg_vruntime(cfs_rq);
+- s64 lag = 0;
++ s64 lag = 0, key = avg_key(cfs_rq);
++ u64 vslice, vruntime = cfs_rq->min_vruntime + key;
+
+ se->slice = sysctl_sched_base_slice;
+ vslice = calc_delta_fair(se->slice, se);
+@@ -5171,6 +5416,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ *
+ * EEVDF: placement strategy #1 / #2
+ */
++#ifdef CONFIG_SCHED_BORE
++ if (unlikely(!sched_bore) || se->vlag)
++#endif // CONFIG_SCHED_BORE
+ if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
+ struct sched_entity *curr = cfs_rq->curr;
+ unsigned long load;
+@@ -5231,12 +5479,18 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ */
+ load = cfs_rq->avg_load;
+ if (curr && curr->on_rq)
+- load += scale_load_down(curr->load.weight);
++ load += entity_weight(curr);
+
+- lag *= load + scale_load_down(se->load.weight);
++ lag *= load + entity_weight(se);
+ if (WARN_ON_ONCE(!load))
+ load = 1;
+- lag = div_s64(lag, load);
++ lag = div64_s64(lag, load);
++#ifdef CONFIG_SCHED_BORE
++ if (likely(sched_bore)) {
++ s64 limit = vslice << sched_vlag_deviation_limit;
++ lag = clamp(lag, -limit, limit);
++ }
++#endif // CONFIG_SCHED_BORE
+ }
+
+ se->vruntime = vruntime - lag;
+@@ -6803,6 +7057,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ bool was_sched_idle = sched_idle_rq(rq);
+
+ util_est_dequeue(&rq->cfs, p);
++#ifdef CONFIG_SCHED_BORE
++ if (task_sleep) {
++ cfs_rq = cfs_rq_of(se);
++ if (cfs_rq->curr == se)
++ update_curr(cfs_rq);
++ restart_burst(se);
++ }
++#endif // CONFIG_SCHED_BORE
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+@@ -8552,16 +8814,25 @@ static void yield_task_fair(struct rq *rq)
+ /*
+ * Are we the only task in the tree?
+ */
++#if !defined(CONFIG_SCHED_BORE)
+ if (unlikely(rq->nr_running == 1))
+ return;
+
+ clear_buddies(cfs_rq, se);
++#endif // CONFIG_SCHED_BORE
+
+ update_rq_clock(rq);
+ /*
+ * Update run-time statistics of the 'current'.
+ */
+ update_curr(cfs_rq);
++#ifdef CONFIG_SCHED_BORE
++ restart_burst_rescale_deadline(se);
++ if (unlikely(rq->nr_running == 1))
++ return;
++
++ clear_buddies(cfs_rq, se);
++#endif // CONFIG_SCHED_BORE
+ /*
+ * Tell update_rq_clock() that we've just updated,
+ * so we don't do microscopic update in schedule()
+@@ -12651,6 +12922,9 @@ static void task_fork_fair(struct task_struct *p)
+ curr = cfs_rq->curr;
+ if (curr)
+ update_curr(cfs_rq);
++#ifdef CONFIG_SCHED_BORE
++ update_burst_score(se);
++#endif // CONFIG_SCHED_BORE
+ place_entity(cfs_rq, se, ENQUEUE_INITIAL);
+ rq_unlock(rq, &rf);
+ }
+diff --git a/kernel/sched/features.h b/kernel/sched/features.h
+index 143f55df89..3f0fe409f5 100644
+--- a/kernel/sched/features.h
++++ b/kernel/sched/features.h
+@@ -6,7 +6,11 @@
+ */
+ SCHED_FEAT(PLACE_LAG, true)
+ SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
++#ifdef CONFIG_SCHED_BORE
++SCHED_FEAT(RUN_TO_PARITY, false)
++#else // !CONFIG_SCHED_BORE
+ SCHED_FEAT(RUN_TO_PARITY, true)
++#endif // CONFIG_SCHED_BORE
+
+ /*
+ * Prefer to schedule the task we woke last (assuming it failed
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 001fe047bd..da3ad1d4e1 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1965,7 +1965,11 @@ static inline void dirty_sched_domain_sysctl(int cpu)
+ }
+ #endif
+
++#ifdef CONFIG_SCHED_BORE
++extern void sched_update_min_base_slice(void);
++#else // !CONFIG_SCHED_BORE
+ extern int sched_update_scaling(void);
++#endif // CONFIG_SCHED_BORE
+
+ static inline const struct cpumask *task_user_cpus(struct task_struct *p)
+ {
+@@ -2552,6 +2556,9 @@ extern const_debug unsigned int sysctl_sched_nr_migrate;
+ extern const_debug unsigned int sysctl_sched_migration_cost;
+
+ extern unsigned int sysctl_sched_base_slice;
++#ifdef CONFIG_SCHED_BORE
++extern unsigned int sysctl_sched_min_base_slice;
++#endif // CONFIG_SCHED_BORE
+
+ #ifdef CONFIG_SCHED_DEBUG
+ extern int sysctl_resched_latency_warn_ms;
+--
+2.34.1
+
diff --git a/0001-platform-x86-asus-wmi-Add-safety-checks-to-dgpu-egpu.patch b/0001-platform-x86-asus-wmi-Add-safety-checks-to-dgpu-egpu.patch
deleted file mode 100644
index cef15e44df06..000000000000
--- a/0001-platform-x86-asus-wmi-Add-safety-checks-to-dgpu-egpu.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-From 9eccb147466f00a13c593ac078d8639e1eafe3a2 Mon Sep 17 00:00:00 2001
-From: "Luke D. Jones" <luke@ljones.dev>
-Date: Fri, 14 Oct 2022 10:43:09 +1300
-Subject: [PATCH] platform/x86: asus-wmi: Add safety checks to dgpu/egpu/mux
- methods
-
-The WMI methods for dgpu_disable, egpu_enable, and gpu_mux_mode have
-no internal safety checks. This means it is possible for a user to
-set the gpu mux to discreet mode and then disable the dgpu, resulting
-in the user having no screen and very little chance of recovery.
-
-This commit adds checks to dgpu_disable and egpu_enable to ensure that
-the dgpu can not be disabled if the MUX is in discreet mode, and a
-check to gpu_mux_mode to prevent switching to discreet mode if
-dgpu_disable is set.
-
-Signed-off-by: Luke D. Jones <luke@ljones.dev>
----
- drivers/platform/x86/asus-wmi.c | 38 +++++++++++++++++++++++++++++++++
- 1 file changed, 38 insertions(+)
-
-diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
-index 6e8e093f96b3..1afc4d40fa1a 100644
---- a/drivers/platform/x86/asus-wmi.c
-+++ b/drivers/platform/x86/asus-wmi.c
-@@ -615,6 +615,18 @@ static ssize_t dgpu_disable_store(struct device *dev,
- if (disable > 1)
- return -EINVAL;
-
-+ /*
-+ * The GPU MUX must be checked first, if it is in discreet mode the
-+ * dgpu_disable cannot be set to on or users can end up with no screen.
-+ */
-+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
-+ if (result < 0)
-+ return result;
-+ if (!result && disable) {
-+ pr_warn("ASUS MUX is in discreet mode, can not set dgpu_disable on\n");
-+ return -EINVAL;
-+ }
-+
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_DGPU, disable, &result);
- if (err) {
- pr_warn("Failed to set dgpu disable: %d\n", err);
-@@ -663,6 +675,19 @@ static ssize_t egpu_enable_store(struct device *dev,
- if (enable > 1)
- return -EINVAL;
-
-+ /*
-+ * The GPU MUX must be checked first, if it is in discreet mode the
-+ * egpu_enable cannot be set to on or users can end up with no screen.
-+ * Note: egpu_enable method in WMI also sets dgpu_disable to on.
-+ */
-+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
-+ if (result < 0)
-+ return result;
-+ if (!result && enable) {
-+ pr_warn("ASUS MUX is in discreet mode, can not set egpu_enable on\n");
-+ return -EINVAL;
-+ }
-+
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_EGPU, enable, &result);
- if (err) {
- pr_warn("Failed to set egpu disable: %d\n", err);
-@@ -709,6 +734,19 @@ static ssize_t gpu_mux_mode_store(struct device *dev,
- if (optimus > 1)
- return -EINVAL;
-
-+ /*
-+ * The dgpu_disable must be checked first, if it is enabled the
-+ * gpu MUX can not be set to 0 or users can end up with no screen.
-+ * Note: egpu_enable also switches dgpu_disable to 1 if enabled.
-+ */
-+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_DGPU);
-+ if (result < 0)
-+ return result;
-+ if (result && !optimus) {
-+ pr_warn("ASUS dgpu_disable is set, can not switch MUX to discreet mode\n");
-+ return -EINVAL;
-+ }
-+
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_MUX, optimus, &result);
- if (err) {
- dev_err(dev, "Failed to set GPU MUX mode: %d\n", err);
---
-2.37.3
-
diff --git a/0001-platform-x86-asus-wmi-add-support-for-2024-ROG-Mini-.patch b/0001-platform-x86-asus-wmi-add-support-for-2024-ROG-Mini-.patch
new file mode 100644
index 000000000000..9590e07d4a38
--- /dev/null
+++ b/0001-platform-x86-asus-wmi-add-support-for-2024-ROG-Mini-.patch
@@ -0,0 +1,150 @@
+From 55426abb60d99efed912d8309498c0c365e8dcec Mon Sep 17 00:00:00 2001
+From: "Luke D. Jones" <luke@ljones.dev>
+Date: Sun, 10 Mar 2024 15:14:37 +1300
+Subject: [PATCH 1/5] platform/x86: asus-wmi: add support for 2024 ROG Mini-LED
+
+Support the 2024 mini-led backlight and adjust the related functions
+to select the relevant dev-id. Also add `available_mini_led_mode` to the
+platform sysfs since the available mini-led levels can be different.
+
+Signed-off-by: Luke D. Jones <luke@ljones.dev>
+---
+ .../ABI/testing/sysfs-platform-asus-wmi | 8 ++++
+ drivers/platform/x86/asus-wmi.c | 48 ++++++++++++++++---
+ include/linux/platform_data/x86/asus-wmi.h | 1 +
+ 3 files changed, 51 insertions(+), 6 deletions(-)
+
+diff --git a/Documentation/ABI/testing/sysfs-platform-asus-wmi b/Documentation/ABI/testing/sysfs-platform-asus-wmi
+index 8a7e25bde085..e32b4f0ae15f 100644
+--- a/Documentation/ABI/testing/sysfs-platform-asus-wmi
++++ b/Documentation/ABI/testing/sysfs-platform-asus-wmi
+@@ -126,6 +126,14 @@ Description:
+ Change the mini-LED mode:
+ * 0 - Single-zone,
+ * 1 - Multi-zone
++ * 2 - Multi-zone strong (available on newer generation mini-led)
++
++What: /sys/devices/platform/<platform>/avilable_mini_led_mode
++Date: Jun 2023
++KernelVersion: 6.9
++Contact: "Luke Jones" <luke@ljones.dev>
++Description:
++ List the available mini-led modes.
+
+ What: /sys/devices/platform/<platform>/ppt_pl1_spl
+ Date: Jun 2023
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 18be35fdb381..a56152ccfbe7 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -297,6 +297,7 @@ struct asus_wmi {
+
+ bool panel_overdrive_available;
+ bool mini_led_mode_available;
++ u32 mini_led_dev_id;
+
+ struct hotplug_slot hotplug_slot;
+ struct mutex hotplug_lock;
+@@ -2109,10 +2110,17 @@ static ssize_t mini_led_mode_show(struct device *dev,
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
+
+- result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_MINI_LED_MODE);
+- if (result < 0)
+- return result;
++ result = asus_wmi_get_devstate_simple(asus, asus->mini_led_dev_id);
+
++ // Remap the mode values to match previous generation mini-led including
++ // if errored -19 since some of these bios return a bad result if set to "2"
++ // which is mini-led off
++ if (asus->mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2) {
++ if (result >= 0 || result == -19)
++ result = result == 1 ? 2 : result == 0 ? 1 : 0;
++ } else if (result < 0) {
++ return result;
++ }
+ return sysfs_emit(buf, "%d\n", result);
+ }
+
+@@ -2129,10 +2137,15 @@ static ssize_t mini_led_mode_store(struct device *dev,
+ if (result)
+ return result;
+
+- if (mode > 1)
++ if (mode > 1 && asus->mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE)
+ return -EINVAL;
++ if (mode > 2 && asus->mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2)
++ return -EINVAL;
++ // Remap the mode values to match previous generation mini-led
++ if (asus->mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2)
++ mode = mode == 2 ? 1 : mode == 0 ? 2 : 0;
+
+- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MINI_LED_MODE, mode, &result);
++ err = asus_wmi_set_devstate(asus->mini_led_dev_id, mode, &result);
+
+ if (err) {
+ pr_warn("Failed to set mini-LED: %d\n", err);
+@@ -2150,6 +2163,21 @@ static ssize_t mini_led_mode_store(struct device *dev,
+ }
+ static DEVICE_ATTR_RW(mini_led_mode);
+
++static ssize_t available_mini_led_mode_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++
++ if (asus->mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE)
++ return sysfs_emit(buf, "0 1\n");
++ if (asus->mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2)
++ return sysfs_emit(buf, "0 1 2\n");
++
++ return sysfs_emit(buf, "0\n");
++}
++
++static DEVICE_ATTR_RO(available_mini_led_mode);
++
+ /* Quirks *********************************************************************/
+
+ static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
+@@ -4174,6 +4202,7 @@ static struct attribute *platform_attributes[] = {
+ &dev_attr_nv_temp_target.attr,
+ &dev_attr_panel_od.attr,
+ &dev_attr_mini_led_mode.attr,
++ &dev_attr_available_mini_led_mode.attr,
+ NULL
+ };
+
+@@ -4496,10 +4525,17 @@ static int asus_wmi_add(struct platform_device *pdev)
+ asus->nv_dyn_boost_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_DYN_BOOST);
+ asus->nv_temp_tgt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_THERM_TARGET);
+ asus->panel_overdrive_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PANEL_OD);
+- asus->mini_led_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE);
+ asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE)
+ && dmi_match(DMI_BOARD_NAME, "RC71L");
+
++ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE)) {
++ asus->mini_led_mode_available = true;
++ asus->mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE;
++ } else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE2)) {
++ asus->mini_led_mode_available = true;
++ asus->mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE2;
++ }
++
+ err = fan_boost_mode_check_present(asus);
+ if (err)
+ goto fail_fan_boost_mode;
+diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
+index ab1c7deff118..9cadce10ad9a 100644
+--- a/include/linux/platform_data/x86/asus-wmi.h
++++ b/include/linux/platform_data/x86/asus-wmi.h
+@@ -71,6 +71,7 @@
+ #define ASUS_WMI_DEVID_LID_FLIP 0x00060062
+ #define ASUS_WMI_DEVID_LID_FLIP_ROG 0x00060077
+ #define ASUS_WMI_DEVID_MINI_LED_MODE 0x0005001E
++#define ASUS_WMI_DEVID_MINI_LED_MODE2 0x0005002E
+
+ /* Storage */
+ #define ASUS_WMI_DEVID_CARDREADER 0x00080013
+--
+2.44.0
+
diff --git a/0002-platform-x86-asus-wmi-add-support-for-Vivobook-GPU-M.patch b/0002-platform-x86-asus-wmi-add-support-for-Vivobook-GPU-M.patch
new file mode 100644
index 000000000000..dbd8ee997626
--- /dev/null
+++ b/0002-platform-x86-asus-wmi-add-support-for-Vivobook-GPU-M.patch
@@ -0,0 +1,100 @@
+From 06d5a9b83548d99b70764166d723489cc8336b1d Mon Sep 17 00:00:00 2001
+From: "Luke D. Jones" <luke@ljones.dev>
+Date: Sun, 10 Mar 2024 17:10:05 +1300
+Subject: [PATCH 2/5] platform/x86: asus-wmi: add support for Vivobook GPU MUX
+
+Adjust existing MUX support to select whichever MUX support is available
+so that ASUS Vivobook MUX can also be used if detected.
+
+Signed-off-by: Luke D. Jones <luke@ljones.dev>
+---
+ drivers/platform/x86/asus-wmi.c | 18 +++++++++++++-----
+ include/linux/platform_data/x86/asus-wmi.h | 1 +
+ 2 files changed, 14 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index a56152ccfbe7..b9a2fb8007c0 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -268,6 +268,7 @@ struct asus_wmi {
+ bool egpu_connect_available;
+ bool dgpu_disable_available;
+ bool gpu_mux_mode_available;
++ u32 gpu_mux_dev;
+
+ /* Tunables provided by ASUS for gaming laptops */
+ bool ppt_pl2_sppt_available;
+@@ -682,7 +683,7 @@ static ssize_t dgpu_disable_store(struct device *dev,
+ return -EINVAL;
+
+ if (asus->gpu_mux_mode_available) {
+- result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
++ result = asus_wmi_get_devstate_simple(asus, asus->gpu_mux_dev);
+ if (result < 0)
+ /* An error here may signal greater failure of GPU handling */
+ return result;
+@@ -748,7 +749,7 @@ static ssize_t egpu_enable_store(struct device *dev,
+ }
+
+ if (asus->gpu_mux_mode_available) {
+- result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
++ result = asus_wmi_get_devstate_simple(asus, asus->gpu_mux_dev);
+ if (result < 0) {
+ /* An error here may signal greater failure of GPU handling */
+ pr_warn("Failed to get gpu mux status: %d\n", result);
+@@ -801,7 +802,7 @@ static ssize_t gpu_mux_mode_show(struct device *dev,
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
+
+- result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
++ result = asus_wmi_get_devstate_simple(asus, asus->gpu_mux_dev);
+ if (result < 0)
+ return result;
+
+@@ -847,7 +848,7 @@ static ssize_t gpu_mux_mode_store(struct device *dev,
+ }
+ }
+
+- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_MUX, optimus, &result);
++ err = asus_wmi_set_devstate(asus->gpu_mux_dev, optimus, &result);
+ if (err) {
+ dev_err(dev, "Failed to set GPU MUX mode: %d\n", err);
+ return err;
+@@ -4514,7 +4515,6 @@ static int asus_wmi_add(struct platform_device *pdev)
+ asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU);
+ asus->egpu_connect_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU_CONNECTED);
+ asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU);
+- asus->gpu_mux_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX);
+ asus->kbd_rgb_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE);
+ asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE);
+ asus->ppt_pl2_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PL2_SPPT);
+@@ -4536,6 +4536,14 @@ static int asus_wmi_add(struct platform_device *pdev)
+ asus->mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE2;
+ }
+
++ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX)) {
++ asus->gpu_mux_mode_available = true;
++ asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX;
++ } else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX_VIVO)) {
++ asus->gpu_mux_mode_available = true;
++ asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX_VIVO;
++ }
++
+ err = fan_boost_mode_check_present(asus);
+ if (err)
+ goto fail_fan_boost_mode;
+diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
+index 9cadce10ad9a..b48b024dd844 100644
+--- a/include/linux/platform_data/x86/asus-wmi.h
++++ b/include/linux/platform_data/x86/asus-wmi.h
+@@ -128,6 +128,7 @@
+
+ /* gpu mux switch, 0 = dGPU, 1 = Optimus */
+ #define ASUS_WMI_DEVID_GPU_MUX 0x00090016
++#define ASUS_WMI_DEVID_GPU_MUX_VIVO 0x00090026
+
+ /* TUF laptop RGB modes/colours */
+ #define ASUS_WMI_DEVID_TUF_RGB_MODE 0x00100056
+--
+2.44.0
+
diff --git a/0003-platform-x86-asus-wmi-add-support-variant-of-TUF-RGB.patch b/0003-platform-x86-asus-wmi-add-support-variant-of-TUF-RGB.patch
new file mode 100644
index 000000000000..1fd2ce721522
--- /dev/null
+++ b/0003-platform-x86-asus-wmi-add-support-variant-of-TUF-RGB.patch
@@ -0,0 +1,74 @@
+From 9b038d6db81b457738cf65e43f401ccb8bf505e6 Mon Sep 17 00:00:00 2001
+From: "Luke D. Jones" <luke@ljones.dev>
+Date: Sun, 10 Mar 2024 17:20:02 +1300
+Subject: [PATCH 3/5] platform/x86: asus-wmi: add support variant of TUF RGB
+
+Adds support for a second TUF RGB wmi call that some versions of the TUF
+laptop come with. Also adjusts existing support to select whichever is
+available.
+
+Signed-off-by: Luke D. Jones <luke@ljones.dev>
+---
+ drivers/platform/x86/asus-wmi.c | 12 +++++++++++-
+ include/linux/platform_data/x86/asus-wmi.h | 1 +
+ 2 files changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index b9a2fb8007c0..e1100726de53 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -280,6 +280,7 @@ struct asus_wmi {
+ bool nv_temp_tgt_available;
+
+ bool kbd_rgb_mode_available;
++ u32 kbd_rgb_dev;
+ bool kbd_rgb_state_available;
+
+ bool throttle_thermal_policy_available;
+@@ -870,6 +871,7 @@ static ssize_t kbd_rgb_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
++ struct asus_wmi *asus = dev_get_drvdata(dev);
+ u32 cmd, mode, r, g, b, speed;
+ int err;
+
+@@ -906,7 +908,7 @@ static ssize_t kbd_rgb_mode_store(struct device *dev,
+ speed = 0xeb;
+ }
+
+- err = asus_wmi_evaluate_method3(ASUS_WMI_METHODID_DEVS, ASUS_WMI_DEVID_TUF_RGB_MODE,
++ err = asus_wmi_evaluate_method3(ASUS_WMI_METHODID_DEVS, asus->kbd_rgb_dev,
+ cmd | (mode << 8) | (r << 16) | (g << 24), b | (speed << 8), NULL);
+ if (err)
+ return err;
+@@ -4544,6 +4546,14 @@ static int asus_wmi_add(struct platform_device *pdev)
+ asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX_VIVO;
+ }
+
++ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE)) {
++ asus->kbd_rgb_mode_available = true;
++ asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE;
++ } else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2)) {
++ asus->kbd_rgb_mode_available = true;
++ asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2;
++ }
++
+ err = fan_boost_mode_check_present(asus);
+ if (err)
+ goto fail_fan_boost_mode;
+diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
+index b48b024dd844..3e9a01467c67 100644
+--- a/include/linux/platform_data/x86/asus-wmi.h
++++ b/include/linux/platform_data/x86/asus-wmi.h
+@@ -132,6 +132,7 @@
+
+ /* TUF laptop RGB modes/colours */
+ #define ASUS_WMI_DEVID_TUF_RGB_MODE 0x00100056
++#define ASUS_WMI_DEVID_TUF_RGB_MODE2 0x0010005A
+
+ /* TUF laptop RGB power/state */
+ #define ASUS_WMI_DEVID_TUF_RGB_STATE 0x00100057
+--
+2.44.0
+
diff --git a/0004-platform-x86-asus-wmi-support-toggling-POST-sound.patch b/0004-platform-x86-asus-wmi-support-toggling-POST-sound.patch
new file mode 100644
index 000000000000..2b0f7cf7d06e
--- /dev/null
+++ b/0004-platform-x86-asus-wmi-support-toggling-POST-sound.patch
@@ -0,0 +1,139 @@
+From 1c0f375634b3ddbcf479c4ddb81639e397795802 Mon Sep 17 00:00:00 2001
+From: "Luke D. Jones" <luke@ljones.dev>
+Date: Sun, 10 Mar 2024 19:03:11 +1300
+Subject: [PATCH 4/5] platform/x86: asus-wmi: support toggling POST sound
+
+Add support for toggling the BIOS POST sound on some ASUS laptops.
+
+Signed-off-by: Luke D. Jones <luke@ljones.dev>
+---
+ .../ABI/testing/sysfs-platform-asus-wmi | 7 +++
+ drivers/platform/x86/asus-wmi.c | 54 +++++++++++++++++++
+ include/linux/platform_data/x86/asus-wmi.h | 3 ++
+ 3 files changed, 64 insertions(+)
+
+diff --git a/Documentation/ABI/testing/sysfs-platform-asus-wmi b/Documentation/ABI/testing/sysfs-platform-asus-wmi
+index e32b4f0ae15f..f3c53b7453f0 100644
+--- a/Documentation/ABI/testing/sysfs-platform-asus-wmi
++++ b/Documentation/ABI/testing/sysfs-platform-asus-wmi
+@@ -194,3 +194,10 @@ Contact: "Luke Jones" <luke@ljones.dev>
+ Description:
+ Set the target temperature limit of the Nvidia dGPU:
+ * min=75, max=87
++
++What: /sys/devices/platform/<platform>/boot_sound
++Date: Jun 2023
++KernelVersion: 6.9
++Contact: "Luke Jones" <luke@ljones.dev>
++Description:
++ Set if the BIOS POST sound is played on boot.
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index e1100726de53..e4341abb71e0 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -297,6 +297,7 @@ struct asus_wmi {
+ // The RSOC controls the maximum charging percentage.
+ bool battery_rsoc_available;
+
++ bool boot_sound_available;
+ bool panel_overdrive_available;
+ bool mini_led_mode_available;
+ u32 mini_led_dev_id;
+@@ -2106,6 +2107,55 @@ static ssize_t panel_od_store(struct device *dev,
+ }
+ static DEVICE_ATTR_RW(panel_od);
+
++/* Bootup sound ***************************************************************/
++
++static ssize_t boot_sound_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++ int result;
++
++ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_BOOT_SOUND);
++ if (result < 0)
++ return result;
++
++ return sysfs_emit(buf, "%d\n", result);
++}
++
++static ssize_t boot_sound_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int result, err;
++ u32 snd;
++
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++
++ result = kstrtou32(buf, 10, &snd);
++ if (result)
++ return result;
++
++ if (snd > 1)
++ return -EINVAL;
++
++ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BOOT_SOUND, snd, &result);
++
++ if (err) {
++ pr_warn("Failed to set boot sound: %d\n", err);
++ return err;
++ }
++
++ if (result > 1) {
++ pr_warn("Failed to set panel boot sound (result): 0x%x\n", result);
++ return -EIO;
++ }
++
++ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "boot_sound");
++
++ return count;
++}
++static DEVICE_ATTR_RW(boot_sound);
++
+ /* Mini-LED mode **************************************************************/
+ static ssize_t mini_led_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+@@ -4203,6 +4253,7 @@ static struct attribute *platform_attributes[] = {
+ &dev_attr_ppt_platform_sppt.attr,
+ &dev_attr_nv_dynamic_boost.attr,
+ &dev_attr_nv_temp_target.attr,
++ &dev_attr_boot_sound.attr,
+ &dev_attr_panel_od.attr,
+ &dev_attr_mini_led_mode.attr,
+ &dev_attr_available_mini_led_mode.attr,
+@@ -4255,6 +4306,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
+ ok = asus->nv_dyn_boost_available;
+ else if (attr == &dev_attr_nv_temp_target.attr)
+ ok = asus->nv_temp_tgt_available;
++ else if (attr == &dev_attr_boot_sound.attr)
++ ok = asus->boot_sound_available;
+ else if (attr == &dev_attr_panel_od.attr)
+ ok = asus->panel_overdrive_available;
+ else if (attr == &dev_attr_mini_led_mode.attr)
+@@ -4526,6 +4579,7 @@ static int asus_wmi_add(struct platform_device *pdev)
+ asus->ppt_plat_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PLAT_SPPT);
+ asus->nv_dyn_boost_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_DYN_BOOST);
+ asus->nv_temp_tgt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_THERM_TARGET);
++ asus->boot_sound_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_BOOT_SOUND);
+ asus->panel_overdrive_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PANEL_OD);
+ asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE)
+ && dmi_match(DMI_BOARD_NAME, "RC71L");
+diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
+index 3e9a01467c67..3eb5cd6773ad 100644
+--- a/include/linux/platform_data/x86/asus-wmi.h
++++ b/include/linux/platform_data/x86/asus-wmi.h
+@@ -137,6 +137,9 @@
+ /* TUF laptop RGB power/state */
+ #define ASUS_WMI_DEVID_TUF_RGB_STATE 0x00100057
+
++/* Bootup sound control */
++#define ASUS_WMI_DEVID_BOOT_SOUND 0x00130022
++
+ /* DSTS masks */
+ #define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
+ #define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002
+--
+2.44.0
+
diff --git a/0005-platform-x86-asus-wmi-don-t-allow-eGPU-switching-if-.patch b/0005-platform-x86-asus-wmi-don-t-allow-eGPU-switching-if-.patch
deleted file mode 100644
index e4fbb7f01ab6..000000000000
--- a/0005-platform-x86-asus-wmi-don-t-allow-eGPU-switching-if-.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From 1bddf53ccac067e043857d28c1598401cd9db7f4 Mon Sep 17 00:00:00 2001
-From: "Luke D. Jones" <luke@ljones.dev>
-Date: Tue, 20 Jun 2023 12:26:51 +1200
-Subject: [PATCH 05/13] platform/x86: asus-wmi: don't allow eGPU switching if
- eGPU not connected
-
-Check the ASUS_WMI_DEVID_EGPU_CONNECTED method for eGPU connection
-before allowing the ASUS_WMI_DEVID_EGPU method to run.
-
-Signed-off-by: Luke D. Jones <luke@ljones.dev>
----
- drivers/platform/x86/asus-wmi.c | 9 +++++++++
- 1 file changed, 9 insertions(+)
-
-diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
-index a65cf8599124..3cb7cee110e2 100644
---- a/drivers/platform/x86/asus-wmi.c
-+++ b/drivers/platform/x86/asus-wmi.c
-@@ -693,6 +693,15 @@ static ssize_t egpu_enable_store(struct device *dev,
- if (enable > 1)
- return -EINVAL;
-
-+ err = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU_CONNECTED);
-+ if (err < 0)
-+ return err;
-+ if (err < 1) {
-+ err = -ENODEV;
-+ pr_warn("Failed to set egpu disable: %d\n", err);
-+ return err;
-+ }
-+
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_EGPU, enable, &result);
- if (err) {
- pr_warn("Failed to set egpu disable: %d\n", err);
---
-2.41.0
-
diff --git a/0005-platform-x86-asus-wmi-store-a-min-default-for-ppt-op.patch b/0005-platform-x86-asus-wmi-store-a-min-default-for-ppt-op.patch
new file mode 100644
index 000000000000..54402f004083
--- /dev/null
+++ b/0005-platform-x86-asus-wmi-store-a-min-default-for-ppt-op.patch
@@ -0,0 +1,342 @@
+From 6045f385154a2c0a4aaa692d13bb0fa14bbe1d12 Mon Sep 17 00:00:00 2001
+From: "Luke D. Jones" <luke@ljones.dev>
+Date: Mon, 11 Mar 2024 12:15:46 +1300
+Subject: [PATCH 5/5] platform/x86: asus-wmi: store a min default for ppt
+ options
+
+Laptops with any of the ppt or nv tunables default to the minimum setting
+on boot so we can safely assume a stored value is correct.
+
+This patch adds storing of those values in the local struct, and enables
+reading of those values back.
+
+Secondary to the above it renames some internal variables to be more
+consistent (which makes code grepping show all related parts)
+
+Signed-off-by: Luke D. Jones <luke@ljones.dev>
+---
+ drivers/platform/x86/asus-wmi.c | 141 +++++++++++++++++++++++++-------
+ 1 file changed, 111 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index e4341abb71e0..482e23b55e1e 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -272,12 +272,19 @@ struct asus_wmi {
+
+ /* Tunables provided by ASUS for gaming laptops */
+ bool ppt_pl2_sppt_available;
++ u32 ppt_pl2_sppt;
+ bool ppt_pl1_spl_available;
++ u32 ppt_pl1_spl;
+ bool ppt_apu_sppt_available;
+- bool ppt_plat_sppt_available;
++ u32 ppt_apu_sppt;
++ bool ppt_platform_sppt_available;
++ u32 ppt_platform_sppt;
+ bool ppt_fppt_available;
+- bool nv_dyn_boost_available;
+- bool nv_temp_tgt_available;
++ u32 ppt_fppt;
++ bool nv_dynamic_boost_available;
++ u32 nv_dynamic_boost;
++ bool nv_temp_target_available;
++ u32 nv_temp_target;
+
+ bool kbd_rgb_mode_available;
+ u32 kbd_rgb_dev;
+@@ -999,11 +1006,10 @@ static ssize_t ppt_pl2_sppt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
++ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result, err;
+ u32 value;
+
+- struct asus_wmi *asus = dev_get_drvdata(dev);
+-
+ result = kstrtou32(buf, 10, &value);
+ if (result)
+ return result;
+@@ -1022,22 +1028,31 @@ static ssize_t ppt_pl2_sppt_store(struct device *dev,
+ return -EIO;
+ }
+
++ asus->ppt_pl2_sppt = value;
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_pl2_sppt");
+
+ return count;
+ }
+-static DEVICE_ATTR_WO(ppt_pl2_sppt);
++
++static ssize_t ppt_pl2_sppt_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++
++ return sysfs_emit(buf, "%d\n", asus->ppt_pl2_sppt);
++}
++static DEVICE_ATTR_RW(ppt_pl2_sppt);
+
+ /* Tunable: PPT, Intel=PL1, AMD=SPL ******************************************/
+ static ssize_t ppt_pl1_spl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
++ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result, err;
+ u32 value;
+
+- struct asus_wmi *asus = dev_get_drvdata(dev);
+-
+ result = kstrtou32(buf, 10, &value);
+ if (result)
+ return result;
+@@ -1056,22 +1071,30 @@ static ssize_t ppt_pl1_spl_store(struct device *dev,
+ return -EIO;
+ }
+
++ asus->ppt_pl1_spl = value;
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_pl1_spl");
+
+ return count;
+ }
+-static DEVICE_ATTR_WO(ppt_pl1_spl);
++static ssize_t ppt_pl1_spl_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++
++ return sysfs_emit(buf, "%d\n", asus->ppt_pl1_spl);
++}
++static DEVICE_ATTR_RW(ppt_pl1_spl);
+
+ /* Tunable: PPT APU FPPT ******************************************************/
+ static ssize_t ppt_fppt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
++ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result, err;
+ u32 value;
+
+- struct asus_wmi *asus = dev_get_drvdata(dev);
+-
+ result = kstrtou32(buf, 10, &value);
+ if (result)
+ return result;
+@@ -1090,22 +1113,31 @@ static ssize_t ppt_fppt_store(struct device *dev,
+ return -EIO;
+ }
+
++ asus->ppt_fppt = value;
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_fpu_sppt");
+
+ return count;
+ }
+-static DEVICE_ATTR_WO(ppt_fppt);
++
++static ssize_t ppt_fppt_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++
++ return sysfs_emit(buf, "%d\n", asus->ppt_fppt);
++}
++static DEVICE_ATTR_RW(ppt_fppt);
+
+ /* Tunable: PPT APU SPPT *****************************************************/
+ static ssize_t ppt_apu_sppt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
++ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result, err;
+ u32 value;
+
+- struct asus_wmi *asus = dev_get_drvdata(dev);
+-
+ result = kstrtou32(buf, 10, &value);
+ if (result)
+ return result;
+@@ -1124,22 +1156,31 @@ static ssize_t ppt_apu_sppt_store(struct device *dev,
+ return -EIO;
+ }
+
++ asus->ppt_apu_sppt = value;
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_apu_sppt");
+
+ return count;
+ }
+-static DEVICE_ATTR_WO(ppt_apu_sppt);
++
++static ssize_t ppt_apu_sppt_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++
++ return sysfs_emit(buf, "%d\n", asus->ppt_apu_sppt);
++}
++static DEVICE_ATTR_RW(ppt_apu_sppt);
+
+ /* Tunable: PPT platform SPPT ************************************************/
+ static ssize_t ppt_platform_sppt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
++ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result, err;
+ u32 value;
+
+- struct asus_wmi *asus = dev_get_drvdata(dev);
+-
+ result = kstrtou32(buf, 10, &value);
+ if (result)
+ return result;
+@@ -1158,22 +1199,31 @@ static ssize_t ppt_platform_sppt_store(struct device *dev,
+ return -EIO;
+ }
+
++ asus->ppt_platform_sppt = value;
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_platform_sppt");
+
+ return count;
+ }
+-static DEVICE_ATTR_WO(ppt_platform_sppt);
++
++static ssize_t ppt_platform_sppt_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++
++ return sysfs_emit(buf, "%d\n", asus->ppt_platform_sppt);
++}
++static DEVICE_ATTR_RW(ppt_platform_sppt);
+
+ /* Tunable: NVIDIA dynamic boost *********************************************/
+ static ssize_t nv_dynamic_boost_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
++ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result, err;
+ u32 value;
+
+- struct asus_wmi *asus = dev_get_drvdata(dev);
+-
+ result = kstrtou32(buf, 10, &value);
+ if (result)
+ return result;
+@@ -1192,22 +1242,31 @@ static ssize_t nv_dynamic_boost_store(struct device *dev,
+ return -EIO;
+ }
+
++ asus->nv_dynamic_boost = value;
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "nv_dynamic_boost");
+
+ return count;
+ }
+-static DEVICE_ATTR_WO(nv_dynamic_boost);
++
++static ssize_t nv_dynamic_boost_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++
++ return sysfs_emit(buf, "%d\n", asus->nv_dynamic_boost);
++}
++static DEVICE_ATTR_RW(nv_dynamic_boost);
+
+ /* Tunable: NVIDIA temperature target ****************************************/
+ static ssize_t nv_temp_target_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
++ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result, err;
+ u32 value;
+
+- struct asus_wmi *asus = dev_get_drvdata(dev);
+-
+ result = kstrtou32(buf, 10, &value);
+ if (result)
+ return result;
+@@ -1226,11 +1285,21 @@ static ssize_t nv_temp_target_store(struct device *dev,
+ return -EIO;
+ }
+
++ asus->nv_temp_target = value;
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "nv_temp_target");
+
+ return count;
+ }
+-static DEVICE_ATTR_WO(nv_temp_target);
++
++static ssize_t nv_temp_target_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct asus_wmi *asus = dev_get_drvdata(dev);
++
++ return sysfs_emit(buf, "%d\n", asus->nv_temp_target);
++}
++static DEVICE_ATTR_RW(nv_temp_target);
+
+ /* Battery ********************************************************************/
+
+@@ -4301,11 +4370,11 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
+ else if (attr == &dev_attr_ppt_apu_sppt.attr)
+ ok = asus->ppt_apu_sppt_available;
+ else if (attr == &dev_attr_ppt_platform_sppt.attr)
+- ok = asus->ppt_plat_sppt_available;
++ ok = asus->ppt_platform_sppt_available;
+ else if (attr == &dev_attr_nv_dynamic_boost.attr)
+- ok = asus->nv_dyn_boost_available;
++ ok = asus->nv_dynamic_boost_available;
+ else if (attr == &dev_attr_nv_temp_target.attr)
+- ok = asus->nv_temp_tgt_available;
++ ok = asus->nv_temp_target_available;
+ else if (attr == &dev_attr_boot_sound.attr)
+ ok = asus->boot_sound_available;
+ else if (attr == &dev_attr_panel_od.attr)
+@@ -4566,6 +4635,15 @@ static int asus_wmi_add(struct platform_device *pdev)
+ if (err)
+ goto fail_platform;
+
++ /* ensure defaults for tunables */
++ asus->ppt_pl2_sppt = 5;
++ asus->ppt_pl1_spl = 5;
++ asus->ppt_apu_sppt = 5;
++ asus->ppt_platform_sppt = 5;
++ asus->ppt_fppt = 5;
++ asus->nv_dynamic_boost = 5;
++ asus->nv_temp_target = 75;
++
+ asus->charge_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CHARGE_MODE);
+ asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU);
+ asus->egpu_connect_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU_CONNECTED);
+@@ -4576,9 +4654,12 @@ static int asus_wmi_add(struct platform_device *pdev)
+ asus->ppt_pl1_spl_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PL1_SPL);
+ asus->ppt_fppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_FPPT);
+ asus->ppt_apu_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_APU_SPPT);
+- asus->ppt_plat_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PLAT_SPPT);
+- asus->nv_dyn_boost_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_DYN_BOOST);
+- asus->nv_temp_tgt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_THERM_TARGET);
++ asus->ppt_platform_sppt_available = asus_wmi_dev_is_present(asus,
++ ASUS_WMI_DEVID_PPT_PLAT_SPPT);
++ asus->nv_dynamic_boost_available = asus_wmi_dev_is_present(asus,
++ ASUS_WMI_DEVID_NV_DYN_BOOST);
++ asus->nv_temp_target_available = asus_wmi_dev_is_present(asus,
++ ASUS_WMI_DEVID_NV_THERM_TARGET);
+ asus->boot_sound_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_BOOT_SOUND);
+ asus->panel_overdrive_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PANEL_OD);
+ asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE)
+--
+2.44.0
+
diff --git a/0006-platform-x86-asus-wmi-add-safety-checks-to-gpu-switc.patch b/0006-platform-x86-asus-wmi-add-safety-checks-to-gpu-switc.patch
deleted file mode 100644
index c7563a02ca84..000000000000
--- a/0006-platform-x86-asus-wmi-add-safety-checks-to-gpu-switc.patch
+++ /dev/null
@@ -1,104 +0,0 @@
-From 64b96869a3ed4b7c9e41c1a3e8410c3ec2582ca9 Mon Sep 17 00:00:00 2001
-From: "Luke D. Jones" <luke@ljones.dev>
-Date: Tue, 20 Jun 2023 12:48:31 +1200
-Subject: [PATCH 06/13] platform/x86: asus-wmi: add safety checks to gpu
- switching
-
-Add safety checking to dgpu_disable, egpu_enable, gpu_mux_mode.
-
-These checks prevent users from doing such things as:
-- disabling the dGPU while is muxed to drive the internal screen
-- enabling the eGPU which also disables the dGPU, while muxed to
- the internal screen
-- switching the MUX to dGPU while the dGPU is disabled
-
-Signed-off-by: Luke D. Jones <luke@ljones.dev>
----
- drivers/platform/x86/asus-wmi.c | 50 ++++++++++++++++++++++++++++++++-
- 1 file changed, 49 insertions(+), 1 deletion(-)
-
-diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
-index 3cb7cee110e2..7e80ea2a802a 100644
---- a/drivers/platform/x86/asus-wmi.c
-+++ b/drivers/platform/x86/asus-wmi.c
-@@ -645,6 +645,18 @@ static ssize_t dgpu_disable_store(struct device *dev,
- if (disable > 1)
- return -EINVAL;
-
-+ if (asus->gpu_mux_mode_available) {
-+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
-+ if (result < 0)
-+ /* An error here may signal greater failure of GPU handling */
-+ return result;
-+ if (!result && disable) {
-+ err = -ENODEV;
-+ pr_warn("Can not disable dGPU when the MUX is in dGPU mode: %d\n", err);
-+ return err;
-+ }
-+ }
-+
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_DGPU, disable, &result);
- if (err) {
- pr_warn("Failed to set dgpu disable: %d\n", err);
-@@ -693,7 +705,7 @@ static ssize_t egpu_enable_store(struct device *dev,
- if (enable > 1)
- return -EINVAL;
-
-- err = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU_CONNECTED);
-+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU_CONNECTED);
- if (err < 0)
- return err;
- if (err < 1) {
-@@ -702,6 +714,18 @@ static ssize_t egpu_enable_store(struct device *dev,
- return err;
- }
-
-+ if (asus->gpu_mux_mode_available) {
-+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
-+ if (result < 0)
-+ /* An error here may signal greater failure of GPU handling */
-+ return result;
-+ if (!result && enable) {
-+ err = -ENODEV;
-+ pr_warn("Can not enable eGPU when the MUX is in dGPU mode: %d\n", err);
-+ return err;
-+ }
-+ }
-+
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_EGPU, enable, &result);
- if (err) {
- pr_warn("Failed to set egpu disable: %d\n", err);
-@@ -764,6 +788,30 @@ static ssize_t gpu_mux_mode_store(struct device *dev,
- if (optimus > 1)
- return -EINVAL;
-
-+ if (asus->dgpu_disable_available) {
-+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_DGPU);
-+ if (result < 0)
-+ /* An error here may signal greater failure of GPU handling */
-+ return result;
-+ if (result && !optimus) {
-+ err = -ENODEV;
-+ pr_warn("Can not switch MUX to dGPU mode when dGPU is disabled: %d\n", err);
-+ return err;
-+ }
-+ }
-+
-+ if (asus->egpu_enable_available) {
-+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU);
-+ if (result < 0)
-+ /* An error here may signal greater failure of GPU handling */
-+ return result;
-+ if (result && !optimus) {
-+ err = -ENODEV;
-+ pr_warn("Can not switch MUX to dGPU mode when eGPU is enabled: %d\n", err);
-+ return err;
-+ }
-+ }
-+
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_MUX, optimus, &result);
- if (err) {
- dev_err(dev, "Failed to set GPU MUX mode: %d\n", err);
---
-2.41.0
-
diff --git a/PKGBUILD b/PKGBUILD
index 2546e070a590..b8dec971b3db 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -3,7 +3,7 @@
# Contributor: Jan Alexander Steffens (heftig) <jan.steffens@gmail.com>
pkgbase=linux-g14
-pkgver=6.8.1.arch1
+pkgver=6.8.2.arch1
pkgrel=1
pkgdesc='Linux-g14'
url="https://gitlab.com/dragonn/linux-g14.git"
@@ -37,10 +37,9 @@ source=(
0001-acpi-proc-idle-skip-dummy-wait.patch
-# 0001-platform-x86-asus-wmi-Add-safety-checks-to-dgpu-egpu.patch
0027-mt76_-mt7921_-Disable-powersave-features-by-default.patch
- 0001-linux6.7.y-bore4.0.0.patch
+ 0001-linux6.8.y-bore4.5.0.patch
0032-Bluetooth-btusb-Add-a-new-PID-VID-0489-e0f6-for-MT7922.patch
0035-Add_quirk_for_polling_the_KBD_port.patch
@@ -49,16 +48,19 @@ source=(
0002-ACPI-resource-Skip-IRQ-override-on-ASUS-TUF-Gaming-A.patch
v2-0005-platform-x86-asus-wmi-don-t-allow-eGPU-switching-.patch
-# v2-0006-platform-x86-asus-wmi-add-safety-checks-to-gpu-sw.patch
0038-mediatek-pci-reset.patch
0040-workaround_hardware_decoding_amdgpu.patch
-# 0005-platform-x86-asus-wmi-don-t-allow-eGPU-switching-if-.patch
-# 0006-platform-x86-asus-wmi-add-safety-checks-to-gpu-switc.patch
-
0001-platform-x86-asus-wmi-Support-2023-ROG-X16-tablet-mo.patch
amd-tablet-sfh.patch
+ fix_amd_eDP_HDR_flickering.patch
+
+ 0001-platform-x86-asus-wmi-add-support-for-2024-ROG-Mini-.patch
+ 0002-platform-x86-asus-wmi-add-support-for-Vivobook-GPU-M.patch
+ 0003-platform-x86-asus-wmi-add-support-variant-of-TUF-RGB.patch
+ 0004-platform-x86-asus-wmi-support-toggling-POST-sound.patch
+ 0005-platform-x86-asus-wmi-store-a-min-default-for-ppt-op.patch
"sys-kernel_arch-sources-g14_files-0047-asus-nb-wmi-Add-tablet_mode_sw-lid-flip.patch"
"sys-kernel_arch-sources-g14_files-0048-asus-nb-wmi-fix-tablet_mode_sw_int.patch"
@@ -69,16 +71,16 @@ validpgpkeys=(
83BC8889351B5DEBBB68416EB8AC08600F108CDF # Jan Alexander Steffens (heftig)
)
-sha256sums=('8d0c8936e3140a0fbdf511ad7a9f21121598f3656743898f47bb9052d37cff68'
+sha256sums=('9ac322d85bcf98a04667d929f5c2666b15bd58c6c2d68dd512c72acbced07d04'
'SKIP'
- '376db82b4613c3942932fde99d54c3dea1e4b29ab23d8b86daa6414327e6244d'
+ '9ccb26c046bacf04777617e96cad5b33d3d048b30bb4840a3b5ac2cdf40a3aba'
'SKIP'
'c2b00c84c4b543db431e06604d939a62f93107d18369f4d9860dc8062b01ab45'
'278118011d7a2eeca9971ac97b31bf0c55ab55e99c662ab9ae4717b55819c9a2'
'd69232afd0dd6982ae941cf2d1f577f4be2011e3bb847d1db37952acf416b5d3'
'0a7ea482fe20c403788d290826cec42fe395e5a6eab07b88845f8b9a9829998d'
'ed242f4be3f8eaade2a1d42157c5c6c86281917a08ae43221b088fafdc775ee7'
- '6e0b648637a0925df4bb43f5eb5144838415e02c43ff8fddbf82f6813b0f132c'
+ '09883311108d461da63a04012d7a2b7f6a4165ee0c4e9cb7a5dc3f9ade326fc7'
'a8e1e11a4ab1995cc4975c9b134a43ddfe7054ef0c965e52a7d8f9223e15c3e0'
'315d1839630b37894a626bbc2aea012618b2e1ccb6f9d8aa27c0a3ce5e90e99c'
'a00b952d53df9d3617d93e8fba4146a4d6169ebe79f029b3a55cca68f738d8ea'
@@ -88,6 +90,12 @@ sha256sums=('8d0c8936e3140a0fbdf511ad7a9f21121598f3656743898f47bb9052d37cff68'
'e41198b29cee4de7a5132d8df606f48c2d0f9c9076fe4230b00a33c7e0b22c71'
'1edb362a762c8858374027e30ff58ae0014e117fdc05cc7db6da50f80e7aab87'
'508f90cbe81a9a145cc540703470f1e6b5d21c7a7b9166d2ce6e56b401262b04'
+ '9b94f02b87c28a7403478ce9f57461c3b2219b7279a928e814cafd78ee767366'
+ '1bc69aaec2089599c1154d7ee5709f5a6140434ef6edf81702b0ea7042a44967'
+ 'aa171a103d4133db4cfe153e48e71b58a85d69ed9fe144100dcc792055d79495'
+ 'f0e5b7653c91e025c5c2010e2447c98eaad699106b34ff140da106e628ea5c17'
+ '4b1e78681848c34175251e9dcbee02f6b2bb67a65aae6ea8bfb5e1322dc51f7a'
+ '4ee418b9d9905a89e58cc860fb93cb226f45ac2d00d767fc3c4dccb297c731ee'
'15e912a66e4bbce1cf0450f1dc6610653df29df8dd6d5426f9c1b039490436c8'
'444f2d86de8c2177655b01596f939f99c2e7abfa8efad8a509e0a334f42dfa85')
diff --git a/fix_amd_eDP_HDR_flickering.patch b/fix_amd_eDP_HDR_flickering.patch
new file mode 100644
index 000000000000..cbeeb2cdb2a0
--- /dev/null
+++ b/fix_amd_eDP_HDR_flickering.patch
@@ -0,0 +1,14 @@
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 4e82ee4d74ac..9ac2c66a96a8 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -6172,7 +6172,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
+ else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+- stream->signal == SIGNAL_TYPE_EDP) {
++ stream->link->psr_settings.psr_feature_enabled ||
++ stream->link->replay_settings.replay_feature_enabled) {
+ //
+ // should decide stream support vsc sdp colorimetry capability
+ // before building vsc info packet
diff --git a/v2-0006-platform-x86-asus-wmi-add-safety-checks-to-gpu-sw.patch b/v2-0006-platform-x86-asus-wmi-add-safety-checks-to-gpu-sw.patch
deleted file mode 100644
index 124cfa02e5cd..000000000000
--- a/v2-0006-platform-x86-asus-wmi-add-safety-checks-to-gpu-sw.patch
+++ /dev/null
@@ -1,104 +0,0 @@
-From 391b0757f19890d67ec0ade558a255421588047e Mon Sep 17 00:00:00 2001
-From: "Luke D. Jones" <luke@ljones.dev>
-Date: Tue, 20 Jun 2023 12:48:31 +1200
-Subject: [PATCH v2 6/8] platform/x86: asus-wmi: add safety checks to gpu
- switching
-
-Add safety checking to dgpu_disable, egpu_enable, gpu_mux_mode.
-
-These checks prevent users from doing such things as:
-- disabling the dGPU while is muxed to drive the internal screen
-- enabling the eGPU which also disables the dGPU, while muxed to
- the internal screen
-- switching the MUX to dGPU while the dGPU is disabled
-
-Signed-off-by: Luke D. Jones <luke@ljones.dev>
----
- drivers/platform/x86/asus-wmi.c | 50 ++++++++++++++++++++++++++++++++-
- 1 file changed, 49 insertions(+), 1 deletion(-)
-
-diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
-index 821addb284d7..602426a7fb41 100644
---- a/drivers/platform/x86/asus-wmi.c
-+++ b/drivers/platform/x86/asus-wmi.c
-@@ -645,6 +645,18 @@ static ssize_t dgpu_disable_store(struct device *dev,
- if (disable > 1)
- return -EINVAL;
-
-+ if (asus->gpu_mux_mode_available) {
-+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
-+ if (result < 0)
-+ /* An error here may signal greater failure of GPU handling */
-+ return result;
-+ if (!result && disable) {
-+ err = -ENODEV;
-+ pr_warn("Can not disable dGPU when the MUX is in dGPU mode: %d\n", err);
-+ return err;
-+ }
-+ }
-+
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_DGPU, disable, &result);
- if (err) {
- pr_warn("Failed to set dgpu disable: %d\n", err);
-@@ -693,7 +705,7 @@ static ssize_t egpu_enable_store(struct device *dev,
- if (enable > 1)
- return -EINVAL;
-
-- err = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU_CONNECTED);
-+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU_CONNECTED);
- if (err < 0)
- return err;
- if (err < 1) {
-@@ -702,6 +714,18 @@ static ssize_t egpu_enable_store(struct device *dev,
- return err;
- }
-
-+ if (asus->gpu_mux_mode_available) {
-+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
-+ if (result < 0)
-+ /* An error here may signal greater failure of GPU handling */
-+ return result;
-+ if (!result && enable) {
-+ err = -ENODEV;
-+ pr_warn("Can not enable eGPU when the MUX is in dGPU mode: %d\n", err);
-+ return err;
-+ }
-+ }
-+
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_EGPU, enable, &result);
- if (err) {
- pr_warn("Failed to set egpu disable: %d\n", err);
-@@ -764,6 +788,30 @@ static ssize_t gpu_mux_mode_store(struct device *dev,
- if (optimus > 1)
- return -EINVAL;
-
-+ if (asus->dgpu_disable_available) {
-+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_DGPU);
-+ if (result < 0)
-+ /* An error here may signal greater failure of GPU handling */
-+ return result;
-+ if (result && !optimus) {
-+ err = -ENODEV;
-+ pr_warn("Can not switch MUX to dGPU mode when dGPU is disabled: %d\n", err);
-+ return err;
-+ }
-+ }
-+
-+ if (asus->egpu_enable_available) {
-+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU);
-+ if (result < 0)
-+ /* An error here may signal greater failure of GPU handling */
-+ return result;
-+ if (result && !optimus) {
-+ err = -ENODEV;
-+ pr_warn("Can not switch MUX to dGPU mode when eGPU is enabled: %d\n", err);
-+ return err;
-+ }
-+ }
-+
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_MUX, optimus, &result);
- if (err) {
- dev_err(dev, "Failed to set GPU MUX mode: %d\n", err);
---
-2.41.0
-