summarylogtreecommitdiffstats
path: root/0001-linux6.7.y-bore4.0.0.patch
diff options
context:
space:
mode:
authorTaijian2024-01-22 17:37:08 +0100
committerTaijian2024-01-22 17:37:08 +0100
commitf5542f9dbe7d5d079a0d83ef7710f3a2312e5da1 (patch)
tree75ea286f6a4b82bb698faa63b317d24bdadc9c58 /0001-linux6.7.y-bore4.0.0.patch
parente7b57aaec8e71716397d8baffe4350c29f1608e2 (diff)
downloadaur-f5542f9dbe7d5d079a0d83ef7710f3a2312e5da1.tar.gz
update to 6.7.1
Diffstat (limited to '0001-linux6.7.y-bore4.0.0.patch')
-rw-r--r--0001-linux6.7.y-bore4.0.0.patch668
1 files changed, 668 insertions, 0 deletions
diff --git a/0001-linux6.7.y-bore4.0.0.patch b/0001-linux6.7.y-bore4.0.0.patch
new file mode 100644
index 000000000000..6d471fa9a783
--- /dev/null
+++ b/0001-linux6.7.y-bore4.0.0.patch
@@ -0,0 +1,668 @@
+From fddf9e0e897d4b1a79755a042c5f3c66b2cdb949 Mon Sep 17 00:00:00 2001
+From: Masahito S <firelzrd@gmail.com>
+Date: Sat, 6 Jan 2024 20:57:20 +0900
+Subject: [PATCH] linux6.7.y-bore4.0.0
+
+---
+ include/linux/sched.h | 11 ++
+ init/Kconfig | 19 +++
+ kernel/sched/core.c | 132 +++++++++++++++++++++
+ kernel/sched/debug.c | 3 +
+ kernel/sched/fair.c | 253 +++++++++++++++++++++++++++++++++++++---
+ kernel/sched/features.h | 4 +
+ 6 files changed, 404 insertions(+), 18 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 292c316972..8a9e843ec5 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -562,6 +562,17 @@ struct sched_entity {
+ u64 sum_exec_runtime;
+ u64 prev_sum_exec_runtime;
+ u64 vruntime;
++#ifdef CONFIG_SCHED_BORE
++ u64 burst_time;
++ u8 prev_burst_penalty;
++ u8 curr_burst_penalty;
++ u8 burst_penalty;
++ u8 slice_score;
++ u8 child_burst;
++ u16 child_burst_cnt;
++ u64 child_burst_last_cached;
++ u32 slice_load;
++#endif // CONFIG_SCHED_BORE
+ s64 vlag;
+ u64 slice;
+
+diff --git a/init/Kconfig b/init/Kconfig
+index 9ffb103fc9..4492c5de88 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1258,6 +1258,25 @@ config CHECKPOINT_RESTORE
+
+ If unsure, say N here.
+
++config SCHED_BORE
++ bool "Burst-Oriented Response Enhancer"
++ default y
++ help
++ In Desktop and Mobile computing, one might prefer interactive
++ tasks to keep responsive no matter what they run in the background.
++
++ Enabling this kernel feature modifies the scheduler to discriminate
++ tasks by their burst time (runtime since it last went sleeping or
++ yielding state) and prioritize those that run less bursty.
++ Such tasks usually include window compositor, widgets backend,
++ terminal emulator, video playback, games and so on.
++ With a little impact to scheduling fairness, it may improve
++ responsiveness especially under heavy background workload.
++
++ You can turn it off by setting the sysctl kernel.sched_bore = 0.
++
++ If unsure, say Y here.
++
+ config SCHED_AUTOGROUP
+ bool "Automatic process group scheduling"
+ select CGROUPS
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index a708d225c2..7f2e796b77 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4480,6 +4480,127 @@ int wake_up_state(struct task_struct *p, unsigned int state)
+ return try_to_wake_up(p, state, 0);
+ }
+
++#ifdef CONFIG_SCHED_BORE
++extern bool sched_bore;
++extern u8 sched_burst_fork_atavistic;
++extern uint sched_burst_cache_lifetime;
++
++void __init sched_init_bore(void) {
++ init_task.se.burst_time = 0;
++ init_task.se.prev_burst_penalty = 0;
++ init_task.se.curr_burst_penalty = 0;
++ init_task.se.burst_penalty = 0;
++ init_task.se.slice_score = 0;
++ init_task.se.child_burst_last_cached = 0;
++ init_task.se.slice_load = 0;
++}
++
++void inline sched_fork_bore(struct task_struct *p) {
++ p->se.burst_time = 0;
++ p->se.curr_burst_penalty = 0;
++ p->se.slice_score = 0;
++ p->se.child_burst_last_cached = 0;
++ p->se.slice_load = 0;
++}
++
++static u32 count_child_tasks(struct task_struct *p) {
++ struct task_struct *child;
++ u32 cnt = 0;
++ list_for_each_entry(child, &p->children, sibling) {cnt++;}
++ return cnt;
++}
++
++static inline bool child_burst_cache_expired(struct task_struct *p, u64 now) {
++ return (p->se.child_burst_last_cached + sched_burst_cache_lifetime < now);
++}
++
++static void __update_child_burst_cache(
++ struct task_struct *p, u32 cnt, u32 sum, u64 now) {
++ u8 avg = 0;
++ if (cnt) avg = sum / cnt;
++ p->se.child_burst = max(avg, p->se.burst_penalty);
++ p->se.child_burst_cnt = cnt;
++ p->se.child_burst_last_cached = now;
++}
++
++static void update_child_burst_cache(struct task_struct *p, u64 now) {
++ struct task_struct *child;
++ u32 cnt = 0;
++ u32 sum = 0;
++
++ list_for_each_entry(child, &p->children, sibling) {
++ if (child->sched_class != &fair_sched_class) continue;
++ cnt++;
++ sum += child->se.burst_penalty;
++ }
++
++ __update_child_burst_cache(p, cnt, sum, now);
++}
++
++static void update_child_burst_cache_atavistic(
++ struct task_struct *p, u64 now, u32 depth, u32 *acnt, u32 *asum) {
++ struct task_struct *child, *dec;
++ u32 cnt = 0, dcnt = 0;
++ u32 sum = 0;
++
++ list_for_each_entry(child, &p->children, sibling) {
++ dec = child;
++ while ((dcnt = count_child_tasks(dec)) == 1)
++ dec = list_first_entry(&dec->children, struct task_struct, sibling);
++
++ if (!dcnt || !depth) {
++ if (dec->sched_class != &fair_sched_class) continue;
++ cnt++;
++ sum += dec->se.burst_penalty;
++ continue;
++ }
++ if (!child_burst_cache_expired(dec, now)) {
++ cnt += dec->se.child_burst_cnt;
++ sum += (u32)dec->se.child_burst * dec->se.child_burst_cnt;
++ continue;
++ }
++ update_child_burst_cache_atavistic(dec, now, depth - 1, &cnt, &sum);
++ }
++
++ __update_child_burst_cache(p, cnt, sum, now);
++ *acnt += cnt;
++ *asum += sum;
++}
++
++static void sched_post_fork_bore(struct task_struct *p) {
++ struct sched_entity *se = &p->se;
++ struct task_struct *anc;
++ u64 now;
++ u32 cnt = 0, sum = 0, depth;
++ u8 burst_cache;
++
++ if (p->sched_class != &fair_sched_class) return;
++
++ if (likely(sched_bore)) {
++ now = ktime_get_ns();
++ read_lock(&tasklist_lock);
++
++ anc = p->real_parent;
++ depth = sched_burst_fork_atavistic;
++ if (likely(depth)) {
++ while ((anc->real_parent != anc) && (count_child_tasks(anc) == 1))
++ anc = anc->real_parent;
++ if (child_burst_cache_expired(anc, now))
++ update_child_burst_cache_atavistic(
++ anc, now, depth - 1, &cnt, &sum);
++ } else
++ if (child_burst_cache_expired(anc, now))
++ update_child_burst_cache(anc, now);
++
++ burst_cache = anc->se.child_burst;
++
++ read_unlock(&tasklist_lock);
++ se->prev_burst_penalty = max(se->prev_burst_penalty, burst_cache);
++ }
++ se->burst_penalty = se->prev_burst_penalty;
++}
++#endif // CONFIG_SCHED_BORE
++
+ /*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+@@ -4496,6 +4617,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
+ p->se.prev_sum_exec_runtime = 0;
+ p->se.nr_migrations = 0;
+ p->se.vruntime = 0;
++#ifdef CONFIG_SCHED_BORE
++ sched_fork_bore(p);
++#endif // CONFIG_SCHED_BORE
+ p->se.vlag = 0;
+ p->se.slice = sysctl_sched_base_slice;
+ INIT_LIST_HEAD(&p->se.group_node);
+@@ -4815,6 +4939,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
+
+ void sched_post_fork(struct task_struct *p)
+ {
++#ifdef CONFIG_SCHED_BORE
++ sched_post_fork_bore(p);
++#endif // CONFIG_SCHED_BORE
+ uclamp_post_fork(p);
+ }
+
+@@ -9885,6 +10012,11 @@ void __init sched_init(void)
+ BUG_ON(&dl_sched_class != &stop_sched_class + 1);
+ #endif
+
++#ifdef CONFIG_SCHED_BORE
++ sched_init_bore();
++ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 4.0.0 by Masahito Suzuki");
++#endif // CONFIG_SCHED_BORE
++
+ wait_bit_init();
+
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 4580a45070..cf2c694c94 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -595,6 +595,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
+ SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
+ SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
+
++#ifdef CONFIG_SCHED_BORE
++ SEQ_printf(m, " %2d", p->se.slice_score);
++#endif
+ #ifdef CONFIG_NUMA_BALANCING
+ SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
+ #endif
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index d7a3c63a21..bfc1332995 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -19,6 +19,9 @@
+ *
+ * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
++ *
++ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler
++ * Copyright (C) 2021-2024 Masahito Suzuki <firelzrd@gmail.com>
+ */
+ #include <linux/energy_model.h>
+ #include <linux/mmap_lock.h>
+@@ -64,20 +67,122 @@
+ * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
+ * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
+ *
+- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
++ * (BORE default SCHED_TUNABLESCALING_NONE = *1 constant)
++ * (EEVDF default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
+ */
++#ifdef CONFIG_SCHED_BORE
++unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
++#else // CONFIG_SCHED_BORE
+ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
++#endif // CONFIG_SCHED_BORE
+
+ /*
+ * Minimal preemption granularity for CPU-bound tasks:
+ *
+- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
++ * (BORE default: 3 msec constant, units: nanoseconds)
++ * (EEVDF default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ */
++#ifdef CONFIG_SCHED_BORE
++unsigned int sysctl_sched_base_slice = 3000000ULL;
++static unsigned int normalized_sysctl_sched_base_slice = 3000000ULL;
++#else // CONFIG_SCHED_BORE
+ unsigned int sysctl_sched_base_slice = 750000ULL;
+ static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
++#endif // CONFIG_SCHED_BORE
+
+ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+
++#ifdef CONFIG_SCHED_BORE
++bool __read_mostly sched_bore = 1;
++bool __read_mostly sched_burst_score_rounding = 0;
++bool __read_mostly sched_burst_smoothness_long = 1;
++bool __read_mostly sched_burst_smoothness_short = 0;
++u8 __read_mostly sched_burst_fork_atavistic = 2;
++u8 __read_mostly sched_burst_penalty_offset = 22;
++uint __read_mostly sched_burst_penalty_scale = 1280;
++uint __read_mostly sched_burst_cache_lifetime = 60000000;
++static u8 sixty_four = 64;
++static uint maxval_12_bits = 4095;
++
++#define MAX_BURST_PENALTY (39U <<2)
++
++static inline u32 log2plus1_u64_u32f8(u64 v) {
++ u32 msb = fls64(v);
++ s32 excess_bits = msb - 9;
++ u8 fractional = (0 <= excess_bits)? v >> excess_bits: v << -excess_bits;
++ return msb << 8 | fractional;
++}
++
++static inline u32 calc_burst_penalty(u64 burst_time) {
++ u32 greed, tolerance, penalty, scaled_penalty;
++
++ greed = log2plus1_u64_u32f8(burst_time);
++ tolerance = sched_burst_penalty_offset << 8;
++ penalty = max(0, (s32)greed - (s32)tolerance);
++ scaled_penalty = penalty * sched_burst_penalty_scale >> 16;
++
++ return min(MAX_BURST_PENALTY, scaled_penalty);
++}
++
++static inline void update_burst_penalty(struct sched_entity *se) {
++ se->curr_burst_penalty = calc_burst_penalty(se->burst_time);
++ se->burst_penalty = max(se->prev_burst_penalty, se->curr_burst_penalty);
++}
++
++static inline u64 scale_slice(u64 delta, struct sched_entity *se) {
++ return mul_u64_u32_shr(delta, sched_prio_to_wmult[se->slice_score], 22);
++}
++
++static inline u64 __unscale_slice(u64 delta, u8 score) {
++ return mul_u64_u32_shr(delta, sched_prio_to_weight[score], 10);
++}
++
++static inline u64 unscale_slice(u64 delta, struct sched_entity *se) {
++ return __unscale_slice(delta, se->slice_score);
++}
++
++static void avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se);
++static void avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se);
++
++static void update_slice_score(struct sched_entity *se) {
++ struct cfs_rq *cfs_rq = cfs_rq_of(se);
++ u8 prev_score = se->slice_score;
++ u32 penalty = se->burst_penalty;
++ if (sched_burst_score_rounding) penalty += 0x2U;
++ se->slice_score = penalty >> 2;
++
++ if (se->slice_score != prev_score && se->slice_load) {
++ avg_vruntime_sub(cfs_rq, se);
++ avg_vruntime_add(cfs_rq, se);
++ }
++}
++
++static inline u32 binary_smooth(u32 new, u32 old) {
++ int increment = new - old;
++ return (0 <= increment)?
++ old + ( increment >> (int)sched_burst_smoothness_long):
++ old - (-increment >> (int)sched_burst_smoothness_short);
++}
++
++static void restart_burst(struct sched_entity *se) {
++ se->burst_penalty = se->prev_burst_penalty =
++ binary_smooth(se->curr_burst_penalty, se->prev_burst_penalty);
++ se->curr_burst_penalty = 0;
++ se->burst_time = 0;
++ update_slice_score(se);
++}
++
++static inline void restart_burst_rescale_deadline(struct sched_entity *se) {
++ u64 wremain, vremain = se->deadline - se->vruntime;
++ u8 prev_score = se->slice_score;
++ restart_burst(se);
++ if (prev_score > se->slice_score) {
++ wremain = __unscale_slice(vremain, prev_score);
++ se->deadline = se->vruntime + scale_slice(wremain, se);
++ }
++}
++#endif // CONFIG_SCHED_BORE
++
+ int sched_thermal_decay_shift;
+ static int __init setup_sched_thermal_decay_shift(char *str)
+ {
+@@ -137,6 +242,70 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
+
+ #ifdef CONFIG_SYSCTL
+ static struct ctl_table sched_fair_sysctls[] = {
++#ifdef CONFIG_SCHED_BORE
++ {
++ .procname = "sched_bore",
++ .data = &sched_bore,
++ .maxlen = sizeof(bool),
++ .mode = 0644,
++ .proc_handler = &proc_dobool,
++ },
++ {
++ .procname = "sched_burst_cache_lifetime",
++ .data = &sched_burst_cache_lifetime,
++ .maxlen = sizeof(uint),
++ .mode = 0644,
++ .proc_handler = proc_douintvec,
++ },
++ {
++ .procname = "sched_burst_fork_atavistic",
++ .data = &sched_burst_fork_atavistic,
++ .maxlen = sizeof(u8),
++ .mode = 0644,
++ .proc_handler = &proc_dou8vec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ .extra2 = SYSCTL_THREE,
++ },
++ {
++ .procname = "sched_burst_penalty_offset",
++ .data = &sched_burst_penalty_offset,
++ .maxlen = sizeof(u8),
++ .mode = 0644,
++ .proc_handler = &proc_dou8vec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ .extra2 = &sixty_four,
++ },
++ {
++ .procname = "sched_burst_penalty_scale",
++ .data = &sched_burst_penalty_scale,
++ .maxlen = sizeof(uint),
++ .mode = 0644,
++ .proc_handler = &proc_douintvec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ .extra2 = &maxval_12_bits,
++ },
++ {
++ .procname = "sched_burst_score_rounding",
++ .data = &sched_burst_score_rounding,
++ .maxlen = sizeof(bool),
++ .mode = 0644,
++ .proc_handler = &proc_dobool,
++ },
++ {
++ .procname = "sched_burst_smoothness_long",
++ .data = &sched_burst_smoothness_long,
++ .maxlen = sizeof(bool),
++ .mode = 0644,
++ .proc_handler = &proc_dobool,
++ },
++ {
++ .procname = "sched_burst_smoothness_short",
++ .data = &sched_burst_smoothness_short,
++ .maxlen = sizeof(bool),
++ .mode = 0644,
++ .proc_handler = &proc_dobool,
++ },
++#endif // CONFIG_SCHED_BORE
+ #ifdef CONFIG_CFS_BANDWIDTH
+ {
+ .procname = "sched_cfs_bandwidth_slice_us",
+@@ -298,6 +467,9 @@ static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
+ if (unlikely(se->load.weight != NICE_0_LOAD))
+ delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
+
++#ifdef CONFIG_SCHED_BORE
++ if (likely(sched_bore)) delta = scale_slice(delta, se);
++#endif // CONFIG_SCHED_BORE
+ return delta;
+ }
+
+@@ -620,10 +792,22 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ *
+ * As measured, the max (key * weight) value was ~44 bits for a kernel build.
+ */
++static unsigned long calc_avg_load_weight(struct sched_entity *se) {
++ unsigned long weight = scale_load_down(se->load.weight);
++#ifdef CONFIG_SCHED_BORE
++ weight <<= 5;
++ if (likely(sched_bore)) weight = unscale_slice(weight, se);
++#endif // CONFIG_SCHED_BORE
++ return weight;
++}
++
+ static void
+ avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- unsigned long weight = scale_load_down(se->load.weight);
++ unsigned long weight = calc_avg_load_weight(se);
++#ifdef CONFIG_SCHED_BORE
++ se->slice_load = weight;
++#endif // CONFIG_SCHED_BORE
+ s64 key = entity_key(cfs_rq, se);
+
+ cfs_rq->avg_vruntime += key * weight;
+@@ -633,7 +817,13 @@ avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ static void
+ avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- unsigned long weight = scale_load_down(se->load.weight);
++ unsigned long weight;
++#if !defined(CONFIG_SCHED_BORE)
++ weight = scale_load_down(se->load.weight);
++#else // CONFIG_SCHED_BORE
++ weight = se->slice_load;
++ se->slice_load = 0;
++#endif // CONFIG_SCHED_BORE
+ s64 key = entity_key(cfs_rq, se);
+
+ cfs_rq->avg_vruntime -= key * weight;
+@@ -653,14 +843,14 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
+ * Specifically: avg_runtime() + 0 must result in entity_eligible() := true
+ * For this to be so, the result of this function must have a left bias.
+ */
+-u64 avg_vruntime(struct cfs_rq *cfs_rq)
++static u64 avg_key(struct cfs_rq *cfs_rq)
+ {
+ struct sched_entity *curr = cfs_rq->curr;
+ s64 avg = cfs_rq->avg_vruntime;
+ long load = cfs_rq->avg_load;
+
+ if (curr && curr->on_rq) {
+- unsigned long weight = scale_load_down(curr->load.weight);
++ unsigned long weight = calc_avg_load_weight(curr);
+
+ avg += entity_key(cfs_rq, curr) * weight;
+ load += weight;
+@@ -673,7 +863,11 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ avg = div_s64(avg, load);
+ }
+
+- return cfs_rq->min_vruntime + avg;
++ return avg;
++}
++
++inline u64 avg_vruntime(struct cfs_rq *cfs_rq) {
++ return cfs_rq->min_vruntime + avg_key(cfs_rq);
+ }
+
+ /*
+@@ -694,13 +888,8 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+ */
+ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+- s64 lag, limit;
+-
+ SCHED_WARN_ON(!se->on_rq);
+- lag = avg_vruntime(cfs_rq) - se->vruntime;
+-
+- limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
+- se->vlag = clamp(lag, -limit, limit);
++ se->vlag = avg_vruntime(cfs_rq) - se->vruntime;
+ }
+
+ /*
+@@ -727,7 +916,7 @@ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ long load = cfs_rq->avg_load;
+
+ if (curr && curr->on_rq) {
+- unsigned long weight = scale_load_down(curr->load.weight);
++ unsigned long weight = calc_avg_load_weight(curr);
+
+ avg += entity_key(cfs_rq, curr) * weight;
+ load += weight;
+@@ -1016,6 +1205,9 @@ static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ /*
+ * EEVDF: vd_i = ve_i + r_i / w_i
+ */
++#ifdef CONFIG_SCHED_BORE
++ update_slice_score(se);
++#endif // CONFIG_SCHED_BORE
+ se->deadline = se->vruntime + calc_delta_fair(se->slice, se);
+
+ /*
+@@ -1158,7 +1350,11 @@ static void update_curr(struct cfs_rq *cfs_rq)
+ curr->sum_exec_runtime += delta_exec;
+ schedstat_add(cfs_rq->exec_clock, delta_exec);
+
+- curr->vruntime += calc_delta_fair(delta_exec, curr);
++#ifdef CONFIG_SCHED_BORE
++ curr->burst_time += delta_exec;
++ update_burst_penalty(curr);
++#endif // CONFIG_SCHED_BORE
++ curr->vruntime += max(1ULL, calc_delta_fair(delta_exec, curr));
+ update_deadline(cfs_rq, curr);
+ update_min_vruntime(cfs_rq);
+
+@@ -5131,7 +5327,12 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ struct sched_entity *curr = cfs_rq->curr;
+ unsigned long load;
+
+- lag = se->vlag;
++ u64 limit = calc_delta_fair(max_t(u64, se->slice*2, TICK_NSEC), se);
++ s64 overmet = limit, undermet = limit;
++#ifdef CONFIG_SCHED_BORE
++ if (likely(sched_bore)) overmet /= 2;
++#endif // CONFIG_SCHED_BORE
++ lag = clamp(se->vlag, -overmet, undermet);
+
+ /*
+ * If we want to place a task and preserve lag, we have to
+@@ -5187,9 +5388,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ */
+ load = cfs_rq->avg_load;
+ if (curr && curr->on_rq)
+- load += scale_load_down(curr->load.weight);
++ load += calc_avg_load_weight(curr);
+
+- lag *= load + scale_load_down(se->load.weight);
++ lag *= load + calc_avg_load_weight(se);
+ if (WARN_ON_ONCE(!load))
+ load = 1;
+ lag = div_s64(lag, load);
+@@ -6759,6 +6960,12 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ bool was_sched_idle = sched_idle_rq(rq);
+
+ util_est_dequeue(&rq->cfs, p);
++#ifdef CONFIG_SCHED_BORE
++ if (task_sleep) {
++ update_curr(cfs_rq_of(se));
++ restart_burst(se);
++ }
++#endif // CONFIG_SCHED_BORE
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+@@ -8494,6 +8701,9 @@ static void yield_task_fair(struct rq *rq)
+ /*
+ * Are we the only task in the tree?
+ */
++#ifdef CONFIG_SCHED_BORE
++ if (unlikely(!sched_bore))
++#endif // CONFIG_SCHED_BORE
+ if (unlikely(rq->nr_running == 1))
+ return;
+
+@@ -8510,6 +8720,10 @@ static void yield_task_fair(struct rq *rq)
+ * and double the fastpath cost.
+ */
+ rq_clock_skip_update(rq);
++#ifdef CONFIG_SCHED_BORE
++ restart_burst_rescale_deadline(se);
++ if (unlikely(rq->nr_running == 1)) return;
++#endif // CONFIG_SCHED_BORE
+
+ se->deadline += calc_delta_fair(se->slice, se);
+ }
+@@ -12590,6 +12804,9 @@ static void task_fork_fair(struct task_struct *p)
+ curr = cfs_rq->curr;
+ if (curr)
+ update_curr(cfs_rq);
++#ifdef CONFIG_SCHED_BORE
++ update_slice_score(se);
++#endif // CONFIG_SCHED_BORE
+ place_entity(cfs_rq, se, ENQUEUE_INITIAL);
+ rq_unlock(rq, &rf);
+ }
+diff --git a/kernel/sched/features.h b/kernel/sched/features.h
+index a3ddf84de4..841a428579 100644
+--- a/kernel/sched/features.h
++++ b/kernel/sched/features.h
+@@ -6,7 +6,11 @@
+ */
+ SCHED_FEAT(PLACE_LAG, true)
+ SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
++#ifdef CONFIG_SCHED_BORE
++SCHED_FEAT(RUN_TO_PARITY, false)
++#else // CONFIG_SCHED_BORE
+ SCHED_FEAT(RUN_TO_PARITY, true)
++#endif // CONFIG_SCHED_BORE
+
+ /*
+ * Prefer to schedule the task we woke last (assuming it failed
+--
+2.25.1