summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorP. Jung2021-08-12 12:32:21 +0000
committerP. Jung2021-08-12 12:32:21 +0000
commit6f516bf26bd17563f9d49652d0daefbc18be6952 (patch)
treeb1263be3d2209e84f6111b435e66bb1236f18c47
parentc497fbbfddc1b66943dcbe1f70ca44d2a9e5c62e (diff)
downloadaur-6f516bf26bd17563f9d49652d0daefbc18be6952.tar.gz
5.13.10
-rw-r--r--.SRCINFO16
-rw-r--r--PKGBUILD12
-rw-r--r--cacule-5.10.patch2214
-rw-r--r--cacule-5.12.patch1338
-rw-r--r--config408
5 files changed, 2360 insertions, 1628 deletions
diff --git a/.SRCINFO b/.SRCINFO
index 342cd15d96aa..36aeb23020ed 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,6 +1,6 @@
pkgbase = linux-hardened-cacule
pkgdesc = Security-Hardened Linux with the cacule scheduler
- pkgver = 5.12.19.hardened1
+ pkgver = 5.10.57.hardened1
pkgrel = 1
url = https://github.com/anthraxx/linux-hardened
arch = x86_64
@@ -20,16 +20,16 @@ pkgbase = linux-hardened-cacule
makedepends = graphviz
makedepends = imagemagick
options = !strip
- source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.12.19.tar.xz
- source = https://github.com/anthraxx/linux-hardened/releases/download/5.12.19-hardened1/linux-hardened-5.12.19-hardened1.patch
- source = cacule-5.12.patch
+ source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.10.57.tar.xz
+ source = https://github.com/anthraxx/linux-hardened/releases/download/5.10.57-hardened1/linux-hardened-5.10.57-hardened1.patch
+ source = cacule-5.10.patch
source = cpu-patches.patch
source = config
- sha256sums = e9381cd3525a02f5b895f74147e2440be443ecd45484c6c64075046bc6f94c73
- sha256sums = ac19f4aedf7309b0c94cf1562dcd92def86be778e95e566632349af6844823db
- sha256sums = 912786eae40b7993ca04ef3eb86e6f03c95d60749819cb2c75260b63c978989c
+ sha256sums = 00bbaeaac17f82d9a6d93cbc42cafd39d3b2fa3a6087333503d2344fa5e3142d
+ sha256sums = 30c6fa1c9a9962ce546e2c45a10893612b4760de76c60731e1059308e6391b7f
+ sha256sums = 82662e54c8a660775284a73b0fed0f849903770c0a7c8b18c317d28b00a16a55
sha256sums = 4f22a6e4e5fe6f3bb39ca39073fa812eb9c0dbb3ac9cec64ed0a90d06b54d32a
- sha256sums = 06690db6be4142c855b292ca68e63fb16b0e2edeb2e8071e2ef16bb7f69f7612
+ sha256sums = 64dfb0380157875f075cbfba406880089fb9a0fac24d6f9b9b53d3e7121eddb5
pkgname = linux-hardened-cacule
pkgdesc = The Security-Hardened Linux with the cacule scheduler kernel and modules
diff --git a/PKGBUILD b/PKGBUILD
index 3e5d8ecf9692..e85f555fc059 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -6,7 +6,7 @@
pkgbase=linux-hardened-cacule
-pkgver=5.12.19.hardened1
+pkgver=5.10.57.hardened1
pkgrel=1
pkgdesc='Security-Hardened Linux with the cacule scheduler'
url='https://github.com/anthraxx/linux-hardened'
@@ -22,15 +22,15 @@ _srctag=${pkgver%.*}-${pkgver##*.}
source=(
https://www.kernel.org/pub/linux/kernel/v${pkgver%%.*}.x/${_srcname}.tar.xz
https://github.com/anthraxx/linux-hardened/releases/download/${_srctag}/linux-hardened-${_srctag}.patch
- cacule-5.12.patch
+ cacule-5.10.patch
cpu-patches.patch
config # the main kernel config file
)
-sha256sums=('e9381cd3525a02f5b895f74147e2440be443ecd45484c6c64075046bc6f94c73'
- 'ac19f4aedf7309b0c94cf1562dcd92def86be778e95e566632349af6844823db'
- '912786eae40b7993ca04ef3eb86e6f03c95d60749819cb2c75260b63c978989c'
+sha256sums=('00bbaeaac17f82d9a6d93cbc42cafd39d3b2fa3a6087333503d2344fa5e3142d'
+ '30c6fa1c9a9962ce546e2c45a10893612b4760de76c60731e1059308e6391b7f'
+ '82662e54c8a660775284a73b0fed0f849903770c0a7c8b18c317d28b00a16a55'
'4f22a6e4e5fe6f3bb39ca39073fa812eb9c0dbb3ac9cec64ed0a90d06b54d32a'
- '06690db6be4142c855b292ca68e63fb16b0e2edeb2e8071e2ef16bb7f69f7612')
+ '64dfb0380157875f075cbfba406880089fb9a0fac24d6f9b9b53d3e7121eddb5')
export KBUILD_BUILD_HOST=archlinux
export KBUILD_BUILD_USER=$pkgbase
diff --git a/cacule-5.10.patch b/cacule-5.10.patch
new file mode 100644
index 000000000000..dca091638e40
--- /dev/null
+++ b/cacule-5.10.patch
@@ -0,0 +1,2214 @@
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index d4b32cc32bb7..2788c5bbd870 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -1087,6 +1087,10 @@ Model available). If your platform happens to meet the
+ requirements for EAS but you do not want to use it, change
+ this value to 0.
+
++sched_interactivity_factor (CacULE scheduler only)
++==================================================
++Sets the value *m* for interactivity score calculations. See
++Figure 1 in https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
+
+ sched_schedstats
+ ================
+diff --git a/Documentation/scheduler/sched-CacULE.rst b/Documentation/scheduler/sched-CacULE.rst
+new file mode 100644
+index 000000000000..82b0847c468a
+--- /dev/null
++++ b/Documentation/scheduler/sched-CacULE.rst
+@@ -0,0 +1,76 @@
++======================================
++The CacULE Scheduler by Hamad Al Marri.
++======================================
++
++1. Overview
++=============
++
++The CacULE CPU scheduler is based on interactivity score mechanism.
++The interactivity score is inspired by the ULE scheduler (FreeBSD
++scheduler).
++
++1.1 About CacULE Scheduler
++--------------------------
++
++ - Each CPU has its own runqueue.
++
++ - NORMAL runqueue is a linked list of sched_entities (instead of RB-Tree).
++
++ - RT and other runqueues are just the same as the CFS's.
++
++ - Wake up tasks preempt currently running tasks if its interactivity score value
++ is higher.
++
++
++1.2. Complexity
++----------------
++
++The complexity of Enqueue and Dequeue a task is O(1).
++
++The complexity of pick the next task is in O(n), where n is the number of tasks
++in a runqueue (each CPU has its own runqueue).
++
++Note: O(n) sounds scary, but usually for a machine with 4 CPUS where it is used
++for desktop or mobile jobs, the maximum number of runnable tasks might not
++exceeds 10 (at the pick next run time) - the idle tasks are excluded since they
++are dequeued when sleeping and enqueued when they wake up.
++
++
++2. The CacULE Interactivity Score
++=======================================================
++
++The interactivity score is inspired by the ULE scheduler (FreeBSD scheduler).
++For more information see: https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
++CacULE doesn't replace CFS with ULE, it only changes the CFS' pick next task
++mechanism to ULE's interactivity score mechanism for picking next task to run.
++
++
++2.3 sched_interactivity_factor
++=================
++Sets the value *m* for interactivity score calculations. See Figure 1 in
++https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
++The default value of in CacULE is 10 which means that the Maximum Interactive
++Score is 20 (since m = Maximum Interactive Score / 2).
++You can tune sched_interactivity_factor with sysctl command:
++
++ sysctl kernel.sched_interactivity_factor=50
++
++This command changes the sched_interactivity_factor from 10 to 50.
++
++
++3. Scheduling policies
++=======================
++
++CacULE some CFS, implements three scheduling policies:
++
++ - SCHED_NORMAL (traditionally called SCHED_OTHER): The scheduling
++ policy that is used for regular tasks.
++
++ - SCHED_BATCH: Does not preempt nearly as often as regular tasks
++ would, thereby allowing tasks to run longer and make better use of
++ caches but at the cost of interactivity. This is well suited for
++ batch jobs.
++
++ - SCHED_IDLE: This is even weaker than nice 19, but its not a true
++ idle timer scheduler in order to avoid to get into priority
++ inversion problems which would deadlock the machine.
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 2660ee4b08ad..b54f0660cc86 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -456,10 +456,23 @@ struct sched_statistics {
+ #endif
+ };
+
++#ifdef CONFIG_CACULE_SCHED
++struct cacule_node {
++ struct cacule_node* next;
++ struct cacule_node* prev;
++ u64 cacule_start_time;
++ u64 last_run;
++ u64 vruntime;
++};
++#endif
++
+ struct sched_entity {
+ /* For load-balancing: */
+ struct load_weight load;
+ struct rb_node run_node;
++#ifdef CONFIG_CACULE_SCHED
++ struct cacule_node cacule_node;
++#endif
+ struct list_head group_node;
+ unsigned int on_rq;
+
+diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
+index 3c31ba88aca5..e79ca8c67a70 100644
+--- a/include/linux/sched/sysctl.h
++++ b/include/linux/sched/sysctl.h
+@@ -31,6 +31,16 @@ extern unsigned int sysctl_sched_min_granularity;
+ extern unsigned int sysctl_sched_wakeup_granularity;
+ extern unsigned int sysctl_sched_child_runs_first;
+
++#ifdef CONFIG_CACULE_SCHED
++extern unsigned int interactivity_factor;
++extern unsigned int cacule_max_lifetime;
++extern unsigned int cache_factor;
++extern unsigned int cache_divisor;
++extern unsigned int starve_factor;
++extern unsigned int starve_divisor;
++extern int cacule_yield;
++#endif
++
+ enum sched_tunable_scaling {
+ SCHED_TUNABLESCALING_NONE,
+ SCHED_TUNABLESCALING_LOG,
+diff --git a/init/Kconfig b/init/Kconfig
+index fc4c9f416fad..e93632e5b7fc 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -825,6 +825,51 @@ config UCLAMP_BUCKETS_COUNT
+
+ endmenu
+
++config CACULE_SCHED
++ bool "CacULE CPU scheduler"
++ default y
++ help
++ The CacULE CPU scheduler is based on interactivity score mechanism.
++ The interactivity score is inspired by the ULE scheduler (FreeBSD
++ scheduler).
++
++ If unsure, say Y here.
++
++config CACULE_RDB
++ bool "RDB (Response Driven Balancer)"
++ default y
++ depends on CACULE_SCHED
++ help
++ This is an experimental load balancer for CacULE. It is a lightweight
++ load balancer which is a replacement of CFS load balancer. It migrates
++ tasks based on their interactivity scores.
++
++ If unsure, say Y here.
++
++config RDB_INTERVAL
++ int "RDB load balancer interval"
++ default 19
++ depends on CACULE_RDB
++ help
++ This is an interval to control load balance time period.
++ The trigger_load_balance runs in every tick. For High HZ values, the
++ load balance could be overwhelming. RDB load balance includes rq locking
++ which can reduce the performance. The balance interval can help to avoid
++ running load balance on every tick. For example, RDB_INTERVAL=3 will
++ only run load balance every 3ms. Setting RDB_INTERVAL depends on HZ.
++ If you want load balancer run every 2ms while HZ=500 then it is not
++ needed and better to set RDB_INTERVAL=0 since 500HZ already (1000ms
++ / 500HZ = 2ms). However, if you have 1000HZ and want to avoid load
++ balancer from running every 1ms, you could set RDB_INTERVAL=4ms for
++ example to make load balancer run every 4ms. Less RDB_INTERVAL values
++ (or 0 to disable) could make sure tasks are balanced ASAP, but with
++ the cost of locking/blocking time. High RDB_INTERVAL values can relax
++ balancing locking but with the cost of imbalanced workload for that
++ period of time (i.e. if RDB_INTERVAL=100ms) there will be no balancing
++ for 100ms (except for newidle_balance which is not effected by RDB_INTERVAL).
++
++ If in doubt, use the default value.
++
+ #
+ # For architectures that want to enable the support for NUMA-affine scheduler
+ # balancing logic:
+@@ -1208,6 +1253,7 @@ config SCHED_AUTOGROUP
+ select CGROUPS
+ select CGROUP_SCHED
+ select FAIR_GROUP_SCHED
++ default y
+ help
+ This option optimizes the scheduler for common desktop workloads by
+ automatically creating and populating task groups. This separation
+diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
+index 38ef6d06888e..865f8dbddca8 100644
+--- a/kernel/Kconfig.hz
++++ b/kernel/Kconfig.hz
+@@ -46,6 +46,9 @@ choice
+ 1000 Hz is the preferred choice for desktop systems and other
+ systems requiring fast interactive responses to events.
+
++ config HZ_2000
++ bool "2000 HZ"
++
+ endchoice
+
+ config HZ
+@@ -54,6 +57,7 @@ config HZ
+ default 250 if HZ_250
+ default 300 if HZ_300
+ default 1000 if HZ_1000
++ default 2000 if HZ_2000
+
+ config SCHED_HRTICK
+ def_bool HIGH_RES_TIMERS
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 679562d2f55d..b3c4594eb320 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -72,6 +72,10 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
+ */
+ unsigned int sysctl_sched_rt_period = 1000000;
+
++#ifdef CONFIG_CACULE_SCHED
++int __read_mostly cacule_yield = 1;
++#endif
++
+ __read_mostly int scheduler_running;
+
+ /*
+@@ -3068,6 +3072,11 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
+ p->se.prev_sum_exec_runtime = 0;
+ p->se.nr_migrations = 0;
+ p->se.vruntime = 0;
++
++#ifdef CONFIG_CACULE_SCHED
++ p->se.cacule_node.vruntime = 0;
++#endif
++
+ INIT_LIST_HEAD(&p->se.group_node);
+
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+@@ -3352,6 +3361,10 @@ void wake_up_new_task(struct task_struct *p)
+ update_rq_clock(rq);
+ post_init_entity_util_avg(p);
+
++#ifdef CONFIG_CACULE_SCHED
++ p->se.cacule_node.cacule_start_time = sched_clock();
++#endif
++
+ activate_task(rq, p, ENQUEUE_NOCLOCK);
+ trace_sched_wakeup_new(p);
+ check_preempt_curr(rq, p, WF_FORK);
+@@ -4053,7 +4066,9 @@ static void sched_tick_remote(struct work_struct *work)
+ struct rq *rq = cpu_rq(cpu);
+ struct task_struct *curr;
+ struct rq_flags rf;
++#if !defined(CONFIG_CACULE_SCHED)
+ u64 delta;
++#endif
+ int os;
+
+ /*
+@@ -4073,6 +4088,7 @@ static void sched_tick_remote(struct work_struct *work)
+
+ update_rq_clock(rq);
+
++#if !defined(CONFIG_CACULE_SCHED)
+ if (!is_idle_task(curr)) {
+ /*
+ * Make sure the next tick runs within a reasonable
+@@ -4081,6 +4097,8 @@ static void sched_tick_remote(struct work_struct *work)
+ delta = rq_clock_task(rq) - curr->se.exec_start;
+ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+ }
++#endif
++
+ curr->sched_class->task_tick(rq, curr, 0);
+
+ calc_load_nohz_remote(rq);
+@@ -6092,6 +6110,13 @@ static void do_sched_yield(void)
+ struct rq_flags rf;
+ struct rq *rq;
+
++#ifdef CONFIG_CACULE_SCHED
++ struct task_struct *curr = current;
++ struct cacule_node *cn = &curr->se.cacule_node;
++
++ if (cacule_yield)
++ cn->vruntime |= YIELD_MARK;
++#endif
+ rq = this_rq_lock_irq(&rf);
+
+ schedstat_inc(rq->yld_count);
+@@ -7066,6 +7091,14 @@ void __init sched_init(void)
+ BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
+ #endif
+
++#ifdef CONFIG_CACULE_SCHED
++#ifdef CONFIG_CACULE_RDB
++ printk(KERN_INFO "CacULE CPU scheduler (RDB) v5.10-r3 by Hamad Al Marri.");
++#else
++ printk(KERN_INFO "CacULE CPU scheduler v5.10-r3 by Hamad Al Marri.");
++#endif
++#endif
++
+ wait_bit_init();
+
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 70a578272436..506c0512610c 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -554,8 +554,11 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
+
+ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+ {
+- s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
+- spread, rq0_min_vruntime, spread0;
++ s64 MIN_vruntime = -1, max_vruntime = -1,
++#if !defined(CONFIG_CACULE_SCHED)
++ min_vruntime, rq0_min_vruntime, spread0,
++#endif
++ spread;
+ struct rq *rq = cpu_rq(cpu);
+ struct sched_entity *last;
+ unsigned long flags;
+@@ -576,21 +579,27 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+ last = __pick_last_entity(cfs_rq);
+ if (last)
+ max_vruntime = last->vruntime;
++#if !defined(CONFIG_CACULE_SCHED)
+ min_vruntime = cfs_rq->min_vruntime;
+ rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
++#endif
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
+ SPLIT_NS(MIN_vruntime));
++#if !defined(CONFIG_CACULE_SCHED)
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
+ SPLIT_NS(min_vruntime));
++#endif
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
+ SPLIT_NS(max_vruntime));
+ spread = max_vruntime - MIN_vruntime;
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
+ SPLIT_NS(spread));
++#if !defined(CONFIG_CACULE_SCHED)
+ spread0 = min_vruntime - rq0_min_vruntime;
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
+ SPLIT_NS(spread0));
++#endif
+ SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
+ cfs_rq->nr_spread_over);
+ SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 262b02d75007..cf3ae2a8568b 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -19,9 +19,24 @@
+ *
+ * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
++ *
++ * CacULE enhancements CPU cache and scheduler based on
++ * Interactivity Score.
++ * (C) 2020 Hamad Al Marri <hamad.s.almarri@gmail.com>
+ */
+ #include "sched.h"
+
++#ifdef CONFIG_CACULE_SCHED
++unsigned int __read_mostly cacule_max_lifetime = 22000; // in ms
++unsigned int __read_mostly interactivity_factor = 32768;
++
++unsigned int __read_mostly cache_factor = 13107;
++unsigned int __read_mostly cache_divisor = 1000000; // 1ms
++
++unsigned int __read_mostly starve_factor = 19660;
++unsigned int __read_mostly starve_divisor = 3000000; // 3ms
++#endif
++
+ /*
+ * Targeted preemption latency for CPU-bound tasks:
+ *
+@@ -82,7 +97,11 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
+ unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
+ static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
+
++#ifdef CONFIG_CACULE_SCHED
++const_debug unsigned int sysctl_sched_migration_cost = 200000UL;
++#else
+ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
++#endif
+
+ int sched_thermal_decay_shift;
+ static int __init setup_sched_thermal_decay_shift(char *str)
+@@ -253,6 +272,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
+
+ const struct sched_class fair_sched_class;
+
++
++#ifdef CONFIG_CACULE_SCHED
++static inline struct sched_entity *se_of(struct cacule_node *cn)
++{
++ return container_of(cn, struct sched_entity, cacule_node);
++}
++#endif
++
+ /**************************************************************
+ * CFS operations on generic schedulable entities:
+ */
+@@ -512,7 +539,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
+ /**************************************************************
+ * Scheduling class tree data structure manipulation methods:
+ */
+-
++#if !defined(CONFIG_CACULE_SCHED)
+ static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
+ {
+ s64 delta = (s64)(vruntime - max_vruntime);
+@@ -568,7 +595,223 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
+ cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
+ #endif
+ }
++#endif /* CONFIG_CACULE_SCHED */
++
++#ifdef CONFIG_CACULE_SCHED
++static unsigned int
++calc_interactivity(u64 now, struct cacule_node *se)
++{
++ u64 l_se, vr_se, sleep_se = 1ULL, u64_factor_m, _2m;
++ unsigned int score_se;
++
++ /*
++ * in case of vruntime==0, logical OR with 1 would
++ * make sure that the least sig. bit is 1
++ */
++ l_se = now - se->cacule_start_time;
++ vr_se = se->vruntime | 1;
++ u64_factor_m = interactivity_factor;
++ _2m = u64_factor_m << 1;
++
++ /* safety check */
++ if (likely(l_se > vr_se))
++ sleep_se = (l_se - vr_se) | 1;
++
++ if (sleep_se >= vr_se)
++ score_se = u64_factor_m / (sleep_se / vr_se);
++ else
++ score_se = _2m - (u64_factor_m / (vr_se / sleep_se));
++
++ return score_se;
++}
++
++static unsigned int
++calc_cache_score(u64 now, struct cacule_node *cn)
++{
++ struct sched_entity *se = se_of(cn);
++ struct cfs_rq *cfs_rq = cfs_rq_of(se);
++ u64 c_div = cache_divisor;
++ u64 cache_period = 1ULL;
++ u64 u64_factor_m = cache_factor;
++ u64 _2m = u64_factor_m << 1;
++ unsigned int score;
++
++ if (!cache_factor)
++ return 0;
++
++ if (se == cfs_rq->curr)
++ return 0;
++
++ cache_period = (now - se->exec_start) | 1;
++
++ if (c_div >= cache_period)
++ score = u64_factor_m / (c_div / cache_period);
++ else
++ score = _2m - (u64_factor_m / (cache_period / c_div));
++
++ return score;
++}
++
++static unsigned int
++calc_starve_score(u64 now, struct cacule_node *cn)
++{
++ struct sched_entity *se = se_of(cn);
++ struct cfs_rq *cfs_rq = cfs_rq_of(se);
++ u64 s_div = starve_divisor;
++ u64 starving = 1ULL;
++ u64 u64_factor_m = starve_factor;
++ u64 _2m = u64_factor_m << 1;
++ unsigned int score;
++
++ if (!starve_factor)
++ return 0;
++
++ if (se == cfs_rq->curr)
++ return _2m;
++
++ starving = (now - cn->last_run) | 1;
++
++ if (s_div >= starving)
++ score = _2m - (u64_factor_m / (s_div / starving));
++ else
++ score = u64_factor_m / (starving / s_div);
++
++ return score;
++}
++
++static inline int cn_has_idle_policy(struct cacule_node *cn)
++{
++ struct sched_entity *se = se_of(cn);
++
++ if (!entity_is_task(se))
++ return false;
++
++ return task_has_idle_policy(task_of(se));
++}
+
++/*
++ * Does se have lower interactivity score value (i.e. interactive) than curr? If yes, return 1,
++ * otherwise return -1
++ * se is before curr if se has lower interactivity score value
++ * the lower score, the more interactive
++ */
++static inline int
++entity_before(u64 now, struct cacule_node *curr, struct cacule_node *se)
++{
++ unsigned int score_curr, score_se;
++ int diff;
++ int is_curr_idle = cn_has_idle_policy(curr);
++ int is_se_idle = cn_has_idle_policy(se);
++
++ /* if curr is normal but se is idle class, then no */
++ if (!is_curr_idle && is_se_idle)
++ return -1;
++
++ /* if curr is idle class and se is normal, then yes */
++ if (is_curr_idle && !is_se_idle)
++ return 1;
++
++ score_curr = calc_interactivity(now, curr);
++ score_curr += calc_cache_score(now, curr);
++ score_curr += calc_starve_score(now, curr);
++
++ score_se = calc_interactivity(now, se);
++ score_se += calc_cache_score(now, se);
++ score_se += calc_starve_score(now, se);
++
++ diff = score_se - score_curr;
++
++ if (diff < 0)
++ return 1;
++
++ return -1;
++}
++
++#ifdef CONFIG_CACULE_RDB
++static void update_IS(struct rq *rq)
++{
++ struct list_head *tasks = &rq->cfs_tasks;
++ struct task_struct *p, *to_migrate = NULL;
++ unsigned int max_IS = ~0, temp_IS;
++
++ list_for_each_entry(p, tasks, se.group_node) {
++ if (task_running(rq, p))
++ continue;
++
++ temp_IS = calc_interactivity(sched_clock(), &p->se.cacule_node);
++ if (temp_IS < max_IS) {
++ to_migrate = p;
++ max_IS = temp_IS;
++ }
++ }
++
++ if (to_migrate) {
++ WRITE_ONCE(rq->max_IS_score, max_IS);
++ WRITE_ONCE(rq->to_migrate_task, to_migrate);
++ } else if (rq->max_IS_score != ~0) {
++ WRITE_ONCE(rq->max_IS_score, ~0);
++ WRITE_ONCE(rq->to_migrate_task, NULL);
++ }
++}
++#endif
++
++/*
++ * Enqueue an entity
++ */
++static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *_se)
++{
++ struct cacule_node *se = &(_se->cacule_node);
++
++ se->next = NULL;
++ se->prev = NULL;
++
++ if (cfs_rq->head) {
++ // insert se at head
++ se->next = cfs_rq->head;
++ cfs_rq->head->prev = se;
++
++ // lastly reset the head
++ cfs_rq->head = se;
++ } else {
++ // if empty rq
++ cfs_rq->head = se;
++ cfs_rq->tail = se;
++ }
++}
++
++static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *_se)
++{
++ struct cacule_node *se = &(_se->cacule_node);
++
++ // if only one se in rq
++ if (cfs_rq->head == cfs_rq->tail) {
++ cfs_rq->head = NULL;
++ cfs_rq->tail = NULL;
++ } else if (se == cfs_rq->head) {
++ // if it is the head
++ cfs_rq->head = cfs_rq->head->next;
++ cfs_rq->head->prev = NULL;
++ } else if (se == cfs_rq->tail) {
++ // if it is the tail
++ cfs_rq->tail = cfs_rq->tail->prev;
++ cfs_rq->tail->next = NULL;
++ } else {
++ // if in the middle
++ struct cacule_node *prev = se->prev;
++ struct cacule_node *next = se->next;
++
++ prev->next = next;
++
++ if (next)
++ next->prev = prev;
++ }
++}
++
++struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
++{
++ return se_of(cfs_rq->head);
++}
++#else
+ /*
+ * Enqueue an entity into the rb-tree:
+ */
+@@ -626,16 +869,24 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
+
+ return rb_entry(next, struct sched_entity, run_node);
+ }
++#endif /* CONFIG_CACULE_SCHED */
+
+ #ifdef CONFIG_SCHED_DEBUG
+ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
+ {
++#ifdef CONFIG_CACULE_SCHED
++ if (!cfs_rq->tail)
++ return NULL;
++
++ return se_of(cfs_rq->tail);
++#else
+ struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
+
+ if (!last)
+ return NULL;
+
+ return rb_entry(last, struct sched_entity, run_node);
++#endif /* CONFIG_CACULE_SCHED */
+ }
+
+ /**************************************************************
+@@ -730,6 +981,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ return slice;
+ }
+
++#if !defined(CONFIG_CACULE_SCHED)
+ /*
+ * We calculate the vruntime slice of a to-be-inserted task.
+ *
+@@ -739,6 +991,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+ return calc_delta_fair(sched_slice(cfs_rq, se), se);
+ }
++#endif /* CONFIG_CACULE_SCHED */
+
+ #include "pelt.h"
+ #ifdef CONFIG_SMP
+@@ -846,14 +1099,55 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
+ }
+ #endif /* CONFIG_SMP */
+
++#ifdef CONFIG_CACULE_SCHED
++static void normalize_lifetime(u64 now, struct sched_entity *se)
++{
++ struct cacule_node *cn = &se->cacule_node;
++ u64 max_life_ns, life_time, old_hrrn_x;
++ s64 diff;
++
++ /*
++ * left shift 20 bits is approximately = * 1000000
++ * we don't need the precision of life time
++ * Ex. for 30s, with left shift (20bits) == 31.457s
++ */
++ max_life_ns = ((u64) cacule_max_lifetime) << 20;
++ life_time = now - cn->cacule_start_time;
++ diff = life_time - max_life_ns;
++
++ if (diff > 0) {
++ // unmark YIELD. No need to check or remark since
++ // this normalize action doesn't happen very often
++ cn->vruntime &= YIELD_UNMARK;
++
++ // multiply life_time by 1024 for more precision
++ old_hrrn_x = (life_time << 7) / ((cn->vruntime >> 3) | 1);
++
++ // reset life to half max_life (i.e ~15s)
++ cn->cacule_start_time = now - (max_life_ns >> 1);
++
++ // avoid division by zero
++ if (old_hrrn_x == 0) old_hrrn_x = 1;
++
++ // reset vruntime based on old hrrn ratio
++ cn->vruntime = (max_life_ns << 9) / old_hrrn_x;
++ }
++}
++#endif /* CONFIG_CACULE_SCHED */
++
+ /*
+ * Update the current task's runtime statistics.
+ */
+ static void update_curr(struct cfs_rq *cfs_rq)
+ {
+ struct sched_entity *curr = cfs_rq->curr;
++#ifdef CONFIG_CACULE_SCHED
++ u64 now = sched_clock();
++ u64 delta_exec, delta_fair;
++#else
+ u64 now = rq_clock_task(rq_of(cfs_rq));
+ u64 delta_exec;
++#endif
+
+ if (unlikely(!curr))
+ return;
+@@ -870,8 +1164,16 @@ static void update_curr(struct cfs_rq *cfs_rq)
+ curr->sum_exec_runtime += delta_exec;
+ schedstat_add(cfs_rq->exec_clock, delta_exec);
+
++#ifdef CONFIG_CACULE_SCHED
++ curr->cacule_node.last_run = now;
++ delta_fair = calc_delta_fair(delta_exec, curr);
++ curr->vruntime += delta_fair;
++ curr->cacule_node.vruntime += delta_fair;
++ normalize_lifetime(now, curr);
++#else
+ curr->vruntime += calc_delta_fair(delta_exec, curr);
+ update_min_vruntime(cfs_rq);
++#endif
+
+ if (entity_is_task(curr)) {
+ struct task_struct *curtask = task_of(curr);
+@@ -1030,7 +1332,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ static inline void
+ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ {
+-
+ if (!schedstat_enabled())
+ return;
+
+@@ -1062,7 +1363,12 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ /*
+ * We are starting a new run period:
+ */
++#ifdef CONFIG_CACULE_SCHED
++ se->exec_start = sched_clock();
++ se->cacule_node.last_run = sched_clock();
++#else
+ se->exec_start = rq_clock_task(rq_of(cfs_rq));
++#endif
+ }
+
+ /**************************************************
+@@ -4129,7 +4435,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
+
+ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+-#ifdef CONFIG_SCHED_DEBUG
++#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_CACULE_SCHED)
+ s64 d = se->vruntime - cfs_rq->min_vruntime;
+
+ if (d < 0)
+@@ -4140,6 +4446,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ #endif
+ }
+
++#if !defined(CONFIG_CACULE_SCHED)
+ static void
+ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+ {
+@@ -4171,6 +4478,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+ /* ensure we never gain time by being placed backwards. */
+ se->vruntime = max_vruntime(se->vruntime, vruntime);
+ }
++#endif /* CONFIG_CACULE_SCHED */
+
+ static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
+
+@@ -4229,18 +4537,23 @@ static inline bool cfs_bandwidth_used(void);
+ static void
+ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ {
++#if !defined(CONFIG_CACULE_SCHED)
+ bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
++#endif
+ bool curr = cfs_rq->curr == se;
+
++#if !defined(CONFIG_CACULE_SCHED)
+ /*
+ * If we're the current task, we must renormalise before calling
+ * update_curr().
+ */
+ if (renorm && curr)
+ se->vruntime += cfs_rq->min_vruntime;
++#endif
+
+ update_curr(cfs_rq);
+
++#if !defined(CONFIG_CACULE_SCHED)
+ /*
+ * Otherwise, renormalise after, such that we're placed at the current
+ * moment in time, instead of some random moment in the past. Being
+@@ -4249,6 +4562,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ */
+ if (renorm && !curr)
+ se->vruntime += cfs_rq->min_vruntime;
++#endif
+
+ /*
+ * When enqueuing a sched_entity, we must:
+@@ -4263,8 +4577,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ update_cfs_group(se);
+ account_entity_enqueue(cfs_rq, se);
+
++#if !defined(CONFIG_CACULE_SCHED)
+ if (flags & ENQUEUE_WAKEUP)
+ place_entity(cfs_rq, se, 0);
++#endif
+
+ check_schedstat_required();
+ update_stats_enqueue(cfs_rq, se, flags);
+@@ -4285,6 +4601,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ check_enqueue_throttle(cfs_rq);
+ }
+
++#if !defined(CONFIG_CACULE_SCHED)
+ static void __clear_buddies_last(struct sched_entity *se)
+ {
+ for_each_sched_entity(se) {
+@@ -4329,6 +4646,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ if (cfs_rq->skip == se)
+ __clear_buddies_skip(se);
+ }
++#endif /* !CONFIG_CACULE_SCHED */
+
+ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
+
+@@ -4353,13 +4671,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+
+ update_stats_dequeue(cfs_rq, se, flags);
+
++#if !defined(CONFIG_CACULE_SCHED)
+ clear_buddies(cfs_rq, se);
++#endif
+
+ if (se != cfs_rq->curr)
+ __dequeue_entity(cfs_rq, se);
+ se->on_rq = 0;
+ account_entity_dequeue(cfs_rq, se);
+
++#if !defined(CONFIG_CACULE_SCHED)
+ /*
+ * Normalize after update_curr(); which will also have moved
+ * min_vruntime if @se is the one holding it back. But before doing
+@@ -4368,12 +4689,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ */
+ if (!(flags & DEQUEUE_SLEEP))
+ se->vruntime -= cfs_rq->min_vruntime;
++#endif
+
+ /* return excess runtime on last dequeue */
+ return_cfs_rq_runtime(cfs_rq);
+
+ update_cfs_group(se);
+
++#if !defined(CONFIG_CACULE_SCHED)
+ /*
+ * Now advance min_vruntime if @se was the entity holding it back,
+ * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
+@@ -4382,8 +4705,23 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ */
+ if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
+ update_min_vruntime(cfs_rq);
++#endif
+ }
+
++#ifdef CONFIG_CACULE_SCHED
++static struct sched_entity *
++pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr);
++
++/*
++ * Preempt the current task with a newly woken task if needed:
++ */
++static void
++check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
++{
++ if (pick_next_entity(cfs_rq, curr) != curr)
++ resched_curr(rq_of(cfs_rq));
++}
++#else
+ /*
+ * Preempt the current task with a newly woken task if needed:
+ */
+@@ -4423,6 +4761,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+ if (delta > ideal_runtime)
+ resched_curr(rq_of(cfs_rq));
+ }
++#endif /* CONFIG_CACULE_SCHED */
+
+ static void
+ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4457,6 +4796,31 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ se->prev_sum_exec_runtime = se->sum_exec_runtime;
+ }
+
++#ifdef CONFIG_CACULE_SCHED
++static struct sched_entity *
++pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
++{
++ struct cacule_node *se = cfs_rq->head;
++ struct cacule_node *next;
++ u64 now = sched_clock();
++
++ if (!se)
++ return curr;
++
++ next = se->next;
++ while (next) {
++ if (entity_before(now, se, next) == 1)
++ se = next;
++
++ next = next->next;
++ }
++
++ if (curr && entity_before(now, se, &curr->cacule_node) == 1)
++ return curr;
++
++ return se_of(se);
++}
++#else
+ static int
+ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
+
+@@ -4517,6 +4881,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+
+ return se;
+ }
++#endif /* CONFIG_CACULE_SCHED */
+
+ static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
+
+@@ -5608,9 +5973,15 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ assert_list_leaf_cfs_rq(rq);
+
+ hrtick_update(rq);
++
++#ifdef CONFIG_CACULE_RDB
++ update_IS(rq);
++#endif
+ }
+
++#if !defined(CONFIG_CACULE_SCHED)
+ static void set_next_buddy(struct sched_entity *se);
++#endif
+
+ /*
+ * The dequeue_task method is called before nr_running is
+@@ -5642,12 +6013,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ if (cfs_rq->load.weight) {
+ /* Avoid re-evaluating load for this entity: */
+ se = parent_entity(se);
++#if !defined(CONFIG_CACULE_SCHED)
+ /*
+ * Bias pick_next to pick a task from this cfs_rq, as
+ * p is sleeping when it is within its sched_slice.
+ */
+ if (task_sleep && se && !throttled_hierarchy(cfs_rq))
+ set_next_buddy(se);
++#endif
+ break;
+ }
+ flags |= DEQUEUE_SLEEP;
+@@ -5679,6 +6052,10 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ dequeue_throttle:
+ util_est_update(&rq->cfs, p, task_sleep);
+ hrtick_update(rq);
++
++#ifdef CONFIG_CACULE_RDB
++ update_IS(rq);
++#endif
+ }
+
+ #ifdef CONFIG_SMP
+@@ -5763,6 +6140,7 @@ static unsigned long capacity_of(int cpu)
+ return cpu_rq(cpu)->cpu_capacity;
+ }
+
++#if !defined(CONFIG_CACULE_SCHED)
+ static void record_wakee(struct task_struct *p)
+ {
+ /*
+@@ -5809,6 +6187,7 @@ static int wake_wide(struct task_struct *p)
+ return 0;
+ return 1;
+ }
++#endif /* CONFIG_CACULE_SCHED */
+
+ /*
+ * The purpose of wake_affine() is to quickly determine on which CPU we can run
+@@ -6485,6 +6864,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
+ return min_t(unsigned long, util, capacity_orig_of(cpu));
+ }
+
++#if !defined(CONFIG_CACULE_SCHED)
+ /*
+ * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
+ * to @dst_cpu.
+@@ -6718,6 +7098,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+
+ return -1;
+ }
++#endif /* CONFIG_CACULE_SCHED */
+
+ /*
+ * select_task_rq_fair: Select target runqueue for the waking task in domains
+@@ -6740,6 +7121,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
+ int want_affine = 0;
+ int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
+
++#if !defined(CONFIG_CACULE_SCHED)
+ if (sd_flag & SD_BALANCE_WAKE) {
+ record_wakee(p);
+
+@@ -6752,6 +7134,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
+
+ want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
+ }
++#endif /* CONFIG_CACULE_SCHED */
+
+ rcu_read_lock();
+ for_each_domain(cpu, tmp) {
+@@ -6799,6 +7182,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
+ */
+ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
+ {
++#if !defined(CONFIG_CACULE_SCHED)
+ /*
+ * As blocked tasks retain absolute vruntime the migration needs to
+ * deal with this by subtracting the old and adding the new
+@@ -6824,6 +7208,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
+
+ se->vruntime -= min_vruntime;
+ }
++#endif /* CONFIG_CACULE_SCHED */
+
+ if (p->on_rq == TASK_ON_RQ_MIGRATING) {
+ /*
+@@ -6869,6 +7254,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+ }
+ #endif /* CONFIG_SMP */
+
++#if !defined(CONFIG_CACULE_SCHED)
+ static unsigned long wakeup_gran(struct sched_entity *se)
+ {
+ unsigned long gran = sysctl_sched_wakeup_granularity;
+@@ -6947,6 +7333,7 @@ static void set_skip_buddy(struct sched_entity *se)
+ for_each_sched_entity(se)
+ cfs_rq_of(se)->skip = se;
+ }
++#endif /* CONFIG_CACULE_SCHED */
+
+ /*
+ * Preempt the current task with a newly woken task if needed:
+@@ -6955,9 +7342,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+ {
+ struct task_struct *curr = rq->curr;
+ struct sched_entity *se = &curr->se, *pse = &p->se;
++
++#if !defined(CONFIG_CACULE_SCHED)
+ struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+ int scale = cfs_rq->nr_running >= sched_nr_latency;
+ int next_buddy_marked = 0;
++#endif /* CONFIG_CACULE_SCHED */
+
+ if (unlikely(se == pse))
+ return;
+@@ -6971,10 +7361,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+ if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
+ return;
+
++#if !defined(CONFIG_CACULE_SCHED)
+ if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
+ set_next_buddy(pse);
+ next_buddy_marked = 1;
+ }
++#endif /* CONFIG_CACULE_SCHED */
+
+ /*
+ * We can come here with TIF_NEED_RESCHED already set from new task
+@@ -7004,6 +7396,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+ find_matching_se(&se, &pse);
+ update_curr(cfs_rq_of(se));
+ BUG_ON(!pse);
++
++#ifdef CONFIG_CACULE_SCHED
++ if (entity_before(sched_clock(), &se->cacule_node, &pse->cacule_node) == 1)
++ goto preempt;
++#else
+ if (wakeup_preempt_entity(se, pse) == 1) {
+ /*
+ * Bias pick_next to pick the sched entity that is
+@@ -7013,11 +7410,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+ set_next_buddy(pse);
+ goto preempt;
+ }
++#endif /* CONFIG_CACULE_SCHED */
+
+ return;
+
+ preempt:
+ resched_curr(rq);
++
++#if !defined(CONFIG_CACULE_SCHED)
+ /*
+ * Only set the backward buddy when the current task is still
+ * on the rq. This can happen when a wakeup gets interleaved
+@@ -7032,6 +7432,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+
+ if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
+ set_last_buddy(se);
++#endif /* CONFIG_CACULE_SCHED */
+ }
+
+ struct task_struct *
+@@ -7093,6 +7494,11 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+ cfs_rq = group_cfs_rq(se);
+ } while (cfs_rq);
+
++ /*
++ * Here we picked a sched_entity starting from
++ * the same group of curr, but the task could
++ * be a child of the selected sched_entity.
++ */
+ p = task_of(se);
+
+ /*
+@@ -7103,6 +7509,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+ if (prev != p) {
+ struct sched_entity *pse = &prev->se;
+
++ /* while se and pse are not in the same group */
+ while (!(cfs_rq = is_same_group(se, pse))) {
+ int se_depth = se->depth;
+ int pse_depth = pse->depth;
+@@ -7117,6 +7524,9 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+ }
+ }
+
++ /* Here we reached the point were both
++ * sched_entities are in the same group.
++ */
+ put_prev_entity(cfs_rq, pse);
+ set_next_entity(cfs_rq, se);
+ }
+@@ -7127,6 +7537,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+ if (prev)
+ put_prev_task(rq, prev);
+
++ /* Going down the hierarchy */
+ do {
+ se = pick_next_entity(cfs_rq, NULL);
+ set_next_entity(cfs_rq, se);
+@@ -7136,6 +7547,14 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+ p = task_of(se);
+
+ done: __maybe_unused;
++#ifdef CONFIG_CACULE_SCHED
++ if (prev)
++ prev->se.cacule_node.vruntime &= YIELD_UNMARK;
++
++#ifdef CONFIG_CACULE_RDB
++ update_IS(rq);
++#endif
++#endif
+ #ifdef CONFIG_SMP
+ /*
+ * Move the next running task to the front of
+@@ -7153,6 +7572,11 @@ done: __maybe_unused;
+ return p;
+
+ idle:
++#ifdef CONFIG_CACULE_RDB
++ WRITE_ONCE(rq->max_IS_score, ~0);
++ WRITE_ONCE(rq->to_migrate_task, NULL);
++#endif
++
+ if (!rf)
+ return NULL;
+
+@@ -7206,7 +7630,10 @@ static void yield_task_fair(struct rq *rq)
+ {
+ struct task_struct *curr = rq->curr;
+ struct cfs_rq *cfs_rq = task_cfs_rq(curr);
++
++#if !defined(CONFIG_CACULE_SCHED)
+ struct sched_entity *se = &curr->se;
++#endif
+
+ /*
+ * Are we the only task in the tree?
+@@ -7214,7 +7641,9 @@ static void yield_task_fair(struct rq *rq)
+ if (unlikely(rq->nr_running == 1))
+ return;
+
++#if !defined(CONFIG_CACULE_SCHED)
+ clear_buddies(cfs_rq, se);
++#endif
+
+ if (curr->policy != SCHED_BATCH) {
+ update_rq_clock(rq);
+@@ -7230,7 +7659,9 @@ static void yield_task_fair(struct rq *rq)
+ rq_clock_skip_update(rq);
+ }
+
++#if !defined(CONFIG_CACULE_SCHED)
+ set_skip_buddy(se);
++#endif
+ }
+
+ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
+@@ -7241,8 +7672,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
+ if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
+ return false;
+
++#if !defined(CONFIG_CACULE_SCHED)
+ /* Tell the scheduler that we'd really like pse to run next. */
+ set_next_buddy(se);
++#endif
+
+ yield_task_fair(rq);
+
+@@ -7451,6 +7884,7 @@ struct lb_env {
+ struct list_head tasks;
+ };
+
++#if !defined(CONFIG_CACULE_RDB)
+ /*
+ * Is this task likely cache-hot:
+ */
+@@ -7470,6 +7904,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+ if (env->sd->flags & SD_SHARE_CPUCAPACITY)
+ return 0;
+
++#if !defined(CONFIG_CACULE_SCHED)
+ /*
+ * Buddy candidates are cache hot:
+ */
+@@ -7477,6 +7912,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+ (&p->se == cfs_rq_of(&p->se)->next ||
+ &p->se == cfs_rq_of(&p->se)->last))
+ return 1;
++#endif
+
+ if (sysctl_sched_migration_cost == -1)
+ return 1;
+@@ -7854,6 +8290,7 @@ static void attach_tasks(struct lb_env *env)
+
+ rq_unlock(env->dst_rq, &rf);
+ }
++#endif
+
+ #ifdef CONFIG_NO_HZ_COMMON
+ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
+@@ -7899,6 +8336,7 @@ static inline bool others_have_blocked(struct rq *rq) { return false; }
+ static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
+ #endif
+
++#if !defined(CONFIG_CACULE_RDB)
+ static bool __update_blocked_others(struct rq *rq, bool *done)
+ {
+ const struct sched_class *curr_class;
+@@ -7924,6 +8362,7 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
+
+ return decayed;
+ }
++#endif
+
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+
+@@ -7944,6 +8383,7 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+ return true;
+ }
+
++#if !defined(CONFIG_CACULE_RDB)
+ static bool __update_blocked_fair(struct rq *rq, bool *done)
+ {
+ struct cfs_rq *cfs_rq, *pos;
+@@ -7983,6 +8423,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
+
+ return decayed;
+ }
++#endif
+
+ /*
+ * Compute the hierarchical load factor for cfs_rq and all its ascendants.
+@@ -8049,6 +8490,7 @@ static unsigned long task_h_load(struct task_struct *p)
+ }
+ #endif
+
++#if !defined(CONFIG_CACULE_RDB)
+ static void update_blocked_averages(int cpu)
+ {
+ bool decayed = false, done = true;
+@@ -8066,6 +8508,7 @@ static void update_blocked_averages(int cpu)
+ cpufreq_update_util(rq, 0);
+ rq_unlock_irqrestore(rq, &rf);
+ }
++#endif
+
+ /********** Helpers for find_busiest_group ************************/
+
+@@ -8400,7 +8843,9 @@ static bool update_nohz_stats(struct rq *rq, bool force)
+ if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick))
+ return true;
+
++#if !defined(CONFIG_CACULE_RDB)
+ update_blocked_averages(cpu);
++#endif
+
+ return rq->has_blocked_load;
+ #else
+@@ -9211,6 +9656,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
+ * different in groups.
+ */
+
++#if !defined(CONFIG_CACULE_RDB)
+ /**
+ * find_busiest_group - Returns the busiest group within the sched_domain
+ * if there is an imbalance.
+@@ -9476,6 +9922,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
+
+ return busiest;
+ }
++#endif
+
+ /*
+ * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
+@@ -9495,6 +9942,7 @@ asym_active_balance(struct lb_env *env)
+ sched_asym_prefer(env->dst_cpu, env->src_cpu);
+ }
+
++#if !defined(CONFIG_CACULE_RDB)
+ static inline bool
+ voluntary_active_balance(struct lb_env *env)
+ {
+@@ -9843,6 +10291,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
+ out:
+ return ld_moved;
+ }
++#endif
+
+ static inline unsigned long
+ get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
+@@ -9881,6 +10330,7 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
+ *next_balance = next;
+ }
+
++#if !defined(CONFIG_CACULE_RDB)
+ /*
+ * active_load_balance_cpu_stop is run by the CPU stopper. It pushes
+ * running tasks off the busiest CPU onto idle CPUs. It requires at
+@@ -9972,6 +10422,7 @@ static int active_load_balance_cpu_stop(void *data)
+ }
+
+ static DEFINE_SPINLOCK(balancing);
++#endif
+
+ /*
+ * Scale the max load_balance interval with the number of CPUs in the system.
+@@ -9982,6 +10433,7 @@ void update_max_interval(void)
+ max_load_balance_interval = HZ*num_online_cpus()/10;
+ }
+
++#if !defined(CONFIG_CACULE_RDB)
+ /*
+ * It checks each scheduling domain to see if it is due to be balanced,
+ * and initiates a balancing operation if so.
+@@ -10087,6 +10539,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
+ #endif
+ }
+ }
++#endif
+
+ static inline int on_null_domain(struct rq *rq)
+ {
+@@ -10116,6 +10569,7 @@ static inline int find_new_ilb(void)
+ return nr_cpu_ids;
+ }
+
++#if !defined(CONFIG_CACULE_RDB)
+ /*
+ * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
+ * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
+@@ -10266,6 +10720,7 @@ static void nohz_balancer_kick(struct rq *rq)
+ if (flags)
+ kick_ilb(flags);
+ }
++#endif /* CONFIG_CACULE_RDB */
+
+ static void set_cpu_sd_state_busy(int cpu)
+ {
+@@ -10373,6 +10828,7 @@ void nohz_balance_enter_idle(int cpu)
+ WRITE_ONCE(nohz.has_blocked, 1);
+ }
+
++#if !defined(CONFIG_CACULE_RDB)
+ /*
+ * Internal function that runs load balance for all idle cpus. The load balance
+ * can be a simple update of blocked load or a complete load balance with
+@@ -10442,6 +10898,7 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
+
+ if (flags & NOHZ_BALANCE_KICK)
+ rebalance_domains(rq, CPU_IDLE);
++
+ }
+
+ if (time_after(next_balance, rq->next_balance)) {
+@@ -10458,6 +10915,7 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
+ if (likely(update_next_balance))
+ nohz.next_balance = next_balance;
+
++#if !defined(CONFIG_CACULE_RDB)
+ /* Newly idle CPU doesn't need an update */
+ if (idle != CPU_NEWLY_IDLE) {
+ update_blocked_averages(this_cpu);
+@@ -10466,6 +10924,7 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
+
+ if (flags & NOHZ_BALANCE_KICK)
+ rebalance_domains(this_rq, CPU_IDLE);
++#endif
+
+ WRITE_ONCE(nohz.next_blocked,
+ now + msecs_to_jiffies(LOAD_AVG_PERIOD));
+@@ -10513,9 +10972,11 @@ static void nohz_newidle_balance(struct rq *this_rq)
+ if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED))
+ return;
+
++#if !defined(CONFIG_CACULE_SCHED)
+ /* Will wake up very soon. No time for doing anything else*/
+ if (this_rq->avg_idle < sysctl_sched_migration_cost)
+ return;
++#endif
+
+ /* Don't need to update blocked load of idle CPUs*/
+ if (!READ_ONCE(nohz.has_blocked) ||
+@@ -10533,8 +10994,10 @@ static void nohz_newidle_balance(struct rq *this_rq)
+ kick_ilb(NOHZ_STATS_KICK);
+ raw_spin_lock(&this_rq->lock);
+ }
++#endif
+
+ #else /* !CONFIG_NO_HZ_COMMON */
++#if !defined(CONFIG_CACULE_RDB)
+ static inline void nohz_balancer_kick(struct rq *rq) { }
+
+ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
+@@ -10543,8 +11006,134 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle
+ }
+
+ static inline void nohz_newidle_balance(struct rq *this_rq) { }
++#endif
++
+ #endif /* CONFIG_NO_HZ_COMMON */
+
++#ifdef CONFIG_CACULE_RDB
++static int
++can_migrate_task(struct task_struct *p, int dst_cpu, struct rq *src_rq)
++{
++ if (task_running(src_rq, p))
++ return 0;
++
++ /* Disregard pcpu kthreads; they are where they need to be. */
++ if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
++ return 0;
++
++ if (!cpumask_test_cpu(dst_cpu, p->cpus_ptr))
++ return 0;
++
++ if (p->se.exec_start == 0)
++ return 0;
++
++ return 1;
++}
++
++static void push_to_unlock(struct rq *this_rq,
++ struct rq *dst_rq,
++ struct task_struct *p,
++ int dst_cpu)
++{
++ struct rq_flags rf;
++
++ // detach task
++ deactivate_task(this_rq, p, DEQUEUE_NOCLOCK);
++ set_task_cpu(p, dst_cpu);
++
++ // unlock this rq
++ raw_spin_unlock(&this_rq->lock);
++
++ /* push to */
++ rq_lock_irqsave(dst_rq, &rf);
++ update_rq_clock(dst_rq);
++
++ activate_task(dst_rq, p, ENQUEUE_NOCLOCK);
++ check_preempt_curr(dst_rq, p, 0);
++
++ // unlock src rq
++ rq_unlock(dst_rq, &rf);
++ local_irq_restore(rf.flags);
++}
++
++static void pull_from_unlock(struct rq *this_rq,
++ struct rq *src_rq,
++ struct rq_flags *rf,
++ struct task_struct *p,
++ int dst_cpu)
++{
++ // detach task
++ deactivate_task(src_rq, p, DEQUEUE_NOCLOCK);
++ set_task_cpu(p, dst_cpu);
++
++ // unlock src rq
++ rq_unlock(src_rq, rf);
++ local_irq_restore(rf->flags);
++
++ // lock this rq
++ raw_spin_lock(&this_rq->lock);
++ update_rq_clock(this_rq);
++
++ activate_task(this_rq, p, ENQUEUE_NOCLOCK);
++ check_preempt_curr(this_rq, p, 0);
++
++ // unlock this rq
++ raw_spin_unlock(&this_rq->lock);
++}
++
++static inline struct rq *
++find_max_IS_rq(struct rq *this_rq, int dst_cpu)
++{
++ struct rq *tmp_rq, *max_rq = NULL;
++ int cpu;
++ unsigned int max_IS = this_rq->max_IS_score;
++ unsigned int local_IS;
++
++ // find max hrrn
++ for_each_online_cpu(cpu) {
++ if (cpu == dst_cpu)
++ continue;
++
++ tmp_rq = cpu_rq(cpu);
++
++ if (tmp_rq->nr_running < 2 || !(READ_ONCE(tmp_rq->to_migrate_task)))
++ continue;
++
++ local_IS = READ_ONCE(tmp_rq->max_IS_score);
++
++ if (local_IS < max_IS) {
++ max_IS = local_IS;
++ max_rq = tmp_rq;
++ }
++ }
++
++ return max_rq;
++}
++
++static int try_pull_from(struct rq *src_rq, struct rq *this_rq)
++{
++ struct rq_flags rf;
++ int dst_cpu = cpu_of(this_rq);
++ struct task_struct *p;
++
++ rq_lock_irqsave(src_rq, &rf);
++ update_rq_clock(src_rq);
++
++ if (src_rq->to_migrate_task && src_rq->nr_running > 1) {
++ p = src_rq->to_migrate_task;
++
++ if (can_migrate_task(p, dst_cpu, src_rq)) {
++ pull_from_unlock(this_rq, src_rq, &rf, p, dst_cpu);
++ return 1;
++ }
++ }
++
++ rq_unlock(src_rq, &rf);
++ local_irq_restore(rf.flags);
++
++ return 0;
++}
++
+ /*
+ * idle_balance is called by schedule() if this_cpu is about to become
+ * idle. Attempts to pull tasks from other CPUs.
+@@ -10555,6 +11144,109 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { }
+ * > 0 - success, new (fair) tasks present
+ */
+ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
++{
++ int this_cpu = this_rq->cpu;
++ struct task_struct *p = NULL;
++ struct rq *src_rq;
++ int src_cpu;
++ struct rq_flags src_rf;
++ int pulled_task = 0;
++ int cores_round = 1;
++
++ update_misfit_status(NULL, this_rq);
++ /*
++ * We must set idle_stamp _before_ calling idle_balance(), such that we
++ * measure the duration of idle_balance() as idle time.
++ */
++ this_rq->idle_stamp = rq_clock(this_rq);
++
++ /*
++ * Do not pull tasks towards !active CPUs...
++ */
++ if (!cpu_active(this_cpu))
++ return 0;
++
++ /*
++ * This is OK, because current is on_cpu, which avoids it being picked
++ * for load-balance and preemption/IRQs are still disabled avoiding
++ * further scheduler activity on it and we're being very careful to
++ * re-start the picking loop.
++ */
++ rq_unpin_lock(this_rq, rf);
++ raw_spin_unlock(&this_rq->lock);
++
++again:
++ for_each_online_cpu(src_cpu) {
++
++ if (src_cpu == this_cpu)
++ continue;
++
++ if (cores_round && !cpus_share_cache(src_cpu, this_cpu))
++ continue;
++
++ src_rq = cpu_rq(src_cpu);
++
++ if (src_rq->nr_running < 2
++ || !(READ_ONCE(src_rq->to_migrate_task)))
++ continue;
++
++ rq_lock_irqsave(src_rq, &src_rf);
++ update_rq_clock(src_rq);
++
++ if (src_rq->nr_running < 2 || !(src_rq->to_migrate_task))
++ goto next;
++
++ p = src_rq->to_migrate_task;
++
++ if (can_migrate_task(p, this_cpu, src_rq)) {
++ pull_from_unlock(this_rq, src_rq, &src_rf, p, this_cpu);
++
++ pulled_task = 1;
++ goto out;
++ }
++
++next:
++ rq_unlock(src_rq, &src_rf);
++ local_irq_restore(src_rf.flags);
++
++ /*
++ * Stop searching for tasks to pull if there are
++ * now runnable tasks on this rq.
++ */
++ if (pulled_task || this_rq->nr_running > 0)
++ goto out;
++ }
++
++ if (cores_round) {
++ // now search for all cpus
++ cores_round = 0;
++ goto again;
++ }
++
++out:
++ raw_spin_lock(&this_rq->lock);
++
++ /*
++ * While browsing the domains, we released the rq lock, a task could
++ * have been enqueued in the meantime. Since we're not going idle,
++ * pretend we pulled a task.
++ */
++ if (this_rq->cfs.h_nr_running && !pulled_task)
++ pulled_task = 1;
++
++ /* Is there a task of a high priority class? */
++ if (this_rq->nr_running != this_rq->cfs.h_nr_running)
++ pulled_task = -1;
++
++ if (pulled_task)
++ this_rq->idle_stamp = 0;
++
++ rq_repin_lock(this_rq, rf);
++
++ return pulled_task;
++}
++#else
++static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
+ {
+ unsigned long next_balance = jiffies + HZ;
+ int this_cpu = this_rq->cpu;
+@@ -10583,7 +11275,10 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
+ */
+ rq_unpin_lock(this_rq, rf);
+
+- if (this_rq->avg_idle < sysctl_sched_migration_cost ||
++ if (
++#if !defined(CONFIG_CACULE_SCHED)
++ this_rq->avg_idle < sysctl_sched_migration_cost ||
++#endif
+ !READ_ONCE(this_rq->rd->overload)) {
+
+ rcu_read_lock();
+@@ -10705,6 +11400,217 @@ void trigger_load_balance(struct rq *rq)
+
+ nohz_balancer_kick(rq);
+ }
++#endif
++
++#ifdef CONFIG_CACULE_RDB
++static int
++idle_try_pull_any(struct cfs_rq *cfs_rq)
++{
++ struct task_struct *p = NULL;
++ struct rq *this_rq = rq_of(cfs_rq), *src_rq;
++ int dst_cpu = cpu_of(this_rq);
++ int src_cpu;
++ struct rq_flags rf;
++ int pulled = 0;
++ int cores_round = 1;
++
++again:
++ for_each_online_cpu(src_cpu) {
++
++ if (src_cpu == dst_cpu)
++ continue;
++
++ if (cores_round && !cpus_share_cache(src_cpu, dst_cpu))
++ continue;
++
++ src_rq = cpu_rq(src_cpu);
++
++ if (src_rq->nr_running < 2
++ || !(READ_ONCE(src_rq->to_migrate_task)))
++ continue;
++
++ rq_lock_irqsave(src_rq, &rf);
++ update_rq_clock(src_rq);
++
++ if (src_rq->nr_running < 2 || !(src_rq->to_migrate_task))
++ goto next;
++
++ p = src_rq->to_migrate_task;
++
++ if (can_migrate_task(p, dst_cpu, src_rq)) {
++ pull_from_unlock(this_rq, src_rq, &rf, p, dst_cpu);
++ pulled = 1;
++ goto out;
++ }
++
++next:
++ rq_unlock(src_rq, &rf);
++ local_irq_restore(rf.flags);
++ }
++
++ if (cores_round) {
++ // now search for all cpus
++ cores_round = 0;
++ goto again;
++ }
++
++out:
++ return pulled;
++}
++
++
++static int
++try_pull_higher_IS(struct rq *this_rq)
++{
++ struct rq *max_rq;
++ int dst_cpu = cpu_of(this_rq);
++
++ max_rq = find_max_IS_rq(this_rq, dst_cpu);
++
++ if (!max_rq)
++ return 0;
++
++ if (try_pull_from(max_rq, this_rq))
++ return 1;
++
++ return 0;
++}
++
++static void try_push_any(struct rq *this_rq)
++{
++ struct task_struct *p = NULL;
++ struct rq *dst_rq;
++ int dst_cpu;
++ int src_cpu = cpu_of(this_rq);
++ int cores_round = 1;
++
++again:
++ for_each_online_cpu(dst_cpu) {
++
++ if (dst_cpu == src_cpu)
++ continue;
++
++ if (cores_round && !cpus_share_cache(src_cpu, dst_cpu))
++ continue;
++
++ dst_rq = cpu_rq(dst_cpu);
++
++ if (dst_rq->nr_running >= this_rq->nr_running - 1)
++ continue;
++
++ // lock this rq
++ raw_spin_lock(&this_rq->lock);
++ update_rq_clock(this_rq);
++
++ if (!this_rq->to_migrate_task) {
++ // unlock this rq
++ raw_spin_unlock(&this_rq->lock);
++ return;
++ }
++
++ p = this_rq->to_migrate_task;
++
++ if (can_migrate_task(p, dst_cpu, this_rq)) {
++ push_to_unlock(this_rq, dst_rq, p, dst_cpu);
++ return;
++ }
++
++ // unlock this rq
++ raw_spin_unlock(&this_rq->lock);
++ }
++
++ if (cores_round) {
++ // now search for all cpus
++ cores_round = 0;
++ goto again;
++ }
++}
++
++static void try_pull_any(struct rq *this_rq)
++{
++ struct task_struct *p = NULL;
++ struct rq *src_rq;
++ int dst_cpu = cpu_of(this_rq);
++ int src_cpu;
++ struct rq_flags src_rf;
++ int cores_round = 1;
++ unsigned int this_max_IS = this_rq->max_IS_score;
++
++again:
++ for_each_online_cpu(src_cpu) {
++
++ if (src_cpu == dst_cpu)
++ continue;
++
++ if (cores_round && !cpus_share_cache(src_cpu, dst_cpu))
++ continue;
++
++ src_rq = cpu_rq(src_cpu);
++
++ p = READ_ONCE(src_rq->to_migrate_task);
++ if (src_rq->nr_running < 2 || !p
++ || READ_ONCE(src_rq->max_IS_score) >= this_max_IS)
++ continue;
++
++ rq_lock_irqsave(src_rq, &src_rf);
++ update_rq_clock(src_rq);
++
++ if (src_rq->nr_running < 2 || !(src_rq->to_migrate_task)
++ || src_rq->max_IS_score >= this_max_IS)
++ goto next;
++
++ p = src_rq->to_migrate_task;
++
++ if (can_migrate_task(p, dst_cpu, src_rq)) {
++ pull_from_unlock(this_rq, src_rq, &src_rf, p, dst_cpu);
++ return;
++ }
++
++next:
++ rq_unlock(src_rq, &src_rf);
++ local_irq_restore(src_rf.flags);
++ }
++
++ if (cores_round) {
++ // now search for all cpus
++ cores_round = 0;
++ goto again;
++ }
++}
++
++static inline void
++active_balance(struct rq *rq)
++{
++ if (rq->nr_running < 2)
++ try_pull_higher_IS(rq);
++ else {
++ try_push_any(rq);
++ try_pull_any(rq);
++ }
++}
++
++void trigger_load_balance(struct rq *rq)
++{
++ unsigned long interval;
++
++#ifdef CONFIG_RDB_INTERVAL
++ if (time_before(jiffies, rq->next_balance))
++ return;
++#endif
++
++ if (rq->idle_balance)
++ idle_try_pull_any(&rq->cfs);
++ else {
++ active_balance(rq);
++
++#ifdef CONFIG_RDB_INTERVAL
++ /* scale ms to jiffies */
++ interval = msecs_to_jiffies(CONFIG_RDB_INTERVAL);
++ rq->next_balance = jiffies + interval;
++#endif
++ }
++}
++#endif
+
+ static void rq_online_fair(struct rq *rq)
+ {
+@@ -10741,6 +11647,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
+ entity_tick(cfs_rq, se, queued);
+ }
+
++#ifdef CONFIG_CACULE_RDB
++ update_IS(rq);
++#endif
++
+ if (static_branch_unlikely(&sched_numa_balancing))
+ task_tick_numa(rq, curr);
+
+@@ -10748,11 +11658,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
+ update_overutilized_status(task_rq(curr));
+ }
+
++#ifdef CONFIG_CACULE_SCHED
+ /*
+ * called on fork with the child task as argument from the parent's context
+ * - child not yet on the tasklist
+ * - preemption disabled
+ */
++ static void task_fork_fair(struct task_struct *p)
++{
++ struct cfs_rq *cfs_rq;
++ struct sched_entity *curr;
++ struct rq *rq = this_rq();
++ struct rq_flags rf;
++
++ rq_lock(rq, &rf);
++ update_rq_clock(rq);
++
++ cfs_rq = task_cfs_rq(current);
++ curr = cfs_rq->curr;
++ if (curr)
++ update_curr(cfs_rq);
++
++ rq_unlock(rq, &rf);
++}
++#else
+ static void task_fork_fair(struct task_struct *p)
+ {
+ struct cfs_rq *cfs_rq;
+@@ -10783,6 +11712,7 @@ static void task_fork_fair(struct task_struct *p)
+ se->vruntime -= cfs_rq->min_vruntime;
+ rq_unlock(rq, &rf);
+ }
++#endif /* CONFIG_CACULE_SCHED */
+
+ /*
+ * Priority of the task has changed. Check to see if we preempt
+@@ -10901,6 +11831,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
+ static void detach_task_cfs_rq(struct task_struct *p)
+ {
+ struct sched_entity *se = &p->se;
++
++#if !defined(CONFIG_CACULE_SCHED)
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ if (!vruntime_normalized(p)) {
+@@ -10911,6 +11843,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
+ place_entity(cfs_rq, se, 0);
+ se->vruntime -= cfs_rq->min_vruntime;
+ }
++#endif
+
+ detach_entity_cfs_rq(se);
+ }
+@@ -10918,12 +11851,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
+ static void attach_task_cfs_rq(struct task_struct *p)
+ {
+ struct sched_entity *se = &p->se;
++
++#if !defined(CONFIG_CACULE_SCHED)
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
++#endif
+
+ attach_entity_cfs_rq(se);
+
++#if !defined(CONFIG_CACULE_SCHED)
+ if (!vruntime_normalized(p))
+ se->vruntime += cfs_rq->min_vruntime;
++#endif
+ }
+
+ static void switched_from_fair(struct rq *rq, struct task_struct *p)
+@@ -10979,13 +11917,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+ void init_cfs_rq(struct cfs_rq *cfs_rq)
+ {
+ cfs_rq->tasks_timeline = RB_ROOT_CACHED;
++
++#if !defined(CONFIG_CACULE_SCHED)
+ cfs_rq->min_vruntime = (u64)(-(1LL << 20));
+ #ifndef CONFIG_64BIT
+ cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
+ #endif
++#endif /* CONFIG_CACULE_SCHED */
++
+ #ifdef CONFIG_SMP
+ raw_spin_lock_init(&cfs_rq->removed.lock);
+ #endif
++
++#ifdef CONFIG_CACULE_SCHED
++ cfs_rq->head = NULL;
++ cfs_rq->tail = NULL;
++#endif
+ }
+
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+@@ -11310,7 +12257,9 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
+ __init void init_sched_fair_class(void)
+ {
+ #ifdef CONFIG_SMP
++#if !defined(CONFIG_CACULE_RDB)
+ open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
++#endif
+
+ #ifdef CONFIG_NO_HZ_COMMON
+ nohz.next_balance = jiffies;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 39112ac7ab34..5881814c7e1c 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -158,6 +158,11 @@ extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
+ */
+ #define RUNTIME_INF ((u64)~0ULL)
+
++#ifdef CONFIG_CACULE_SCHED
++#define YIELD_MARK 0x8000000000000000ULL
++#define YIELD_UNMARK 0x7FFFFFFFFFFFFFFFULL
++#endif
++
+ static inline int idle_policy(int policy)
+ {
+ return policy == SCHED_IDLE;
+@@ -524,10 +529,13 @@ struct cfs_rq {
+ unsigned int idle_h_nr_running; /* SCHED_IDLE */
+
+ u64 exec_clock;
++
++#if !defined(CONFIG_CACULE_SCHED)
+ u64 min_vruntime;
+ #ifndef CONFIG_64BIT
+ u64 min_vruntime_copy;
+ #endif
++#endif /* CONFIG_CACULE_SCHED */
+
+ struct rb_root_cached tasks_timeline;
+
+@@ -536,9 +544,14 @@ struct cfs_rq {
+ * It is set to NULL otherwise (i.e when none are currently running).
+ */
+ struct sched_entity *curr;
++#ifdef CONFIG_CACULE_SCHED
++ struct cacule_node *head;
++ struct cacule_node *tail;
++#else
+ struct sched_entity *next;
+ struct sched_entity *last;
+ struct sched_entity *skip;
++#endif // CONFIG_CACULE_SCHED
+
+ #ifdef CONFIG_SCHED_DEBUG
+ unsigned int nr_spread_over;
+@@ -933,6 +946,11 @@ struct rq {
+ struct rt_rq rt;
+ struct dl_rq dl;
+
++#ifdef CONFIG_CACULE_RDB
++ unsigned int max_IS_score;
++ struct task_struct *to_migrate_task;
++#endif
++
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ /* list of leaf cfs_rq on this CPU: */
+ struct list_head leaf_cfs_rq_list;
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index b9306d2bb426..20f07aa87b8e 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1659,6 +1659,59 @@ static struct ctl_table kern_table[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
++#ifdef CONFIG_CACULE_SCHED
++ {
++ .procname = "sched_interactivity_factor",
++ .data = &interactivity_factor,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++ {
++ .procname = "sched_max_lifetime_ms",
++ .data = &cacule_max_lifetime,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++ {
++ .procname = "sched_cache_factor",
++ .data = &cache_factor,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++ {
++ .procname = "sched_cache_divisor",
++ .data = &cache_divisor,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++ {
++ .procname = "sched_starve_factor",
++ .data = &starve_factor,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++ {
++ .procname = "sched_starve_divisor",
++ .data = &starve_divisor,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++ {
++ .procname = "sched_cacule_yield",
++ .data = &cacule_yield,
++ .maxlen = sizeof (int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ .extra2 = &one_ul,
++ },
++#endif
+ #ifdef CONFIG_SCHED_DEBUG
+ {
+ .procname = "sched_min_granularity_ns",
+
diff --git a/cacule-5.12.patch b/cacule-5.12.patch
deleted file mode 100644
index a74c39f5a621..000000000000
--- a/cacule-5.12.patch
+++ /dev/null
@@ -1,1338 +0,0 @@
-diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
-index 1d56a6b73a4e..4d55ff02310c 100644
---- a/Documentation/admin-guide/sysctl/kernel.rst
-+++ b/Documentation/admin-guide/sysctl/kernel.rst
-@@ -1087,6 +1087,10 @@ Model available). If your platform happens to meet the
- requirements for EAS but you do not want to use it, change
- this value to 0.
-
-+sched_interactivity_factor (CacULE scheduler only)
-+==================================================
-+Sets the value *m* for interactivity score calculations. See
-+Figure 1 in https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
-
- sched_schedstats
- ================
-diff --git a/Documentation/scheduler/sched-CacULE.rst b/Documentation/scheduler/sched-CacULE.rst
-new file mode 100644
-index 000000000000..82b0847c468a
---- /dev/null
-+++ b/Documentation/scheduler/sched-CacULE.rst
-@@ -0,0 +1,76 @@
-+======================================
-+The CacULE Scheduler by Hamad Al Marri.
-+======================================
-+
-+1. Overview
-+=============
-+
-+The CacULE CPU scheduler is based on interactivity score mechanism.
-+The interactivity score is inspired by the ULE scheduler (FreeBSD
-+scheduler).
-+
-+1.1 About CacULE Scheduler
-+--------------------------
-+
-+ - Each CPU has its own runqueue.
-+
-+ - NORMAL runqueue is a linked list of sched_entities (instead of RB-Tree).
-+
-+ - RT and other runqueues are just the same as the CFS's.
-+
-+ - Wake up tasks preempt currently running tasks if its interactivity score value
-+ is higher.
-+
-+
-+1.2. Complexity
-+----------------
-+
-+The complexity of Enqueue and Dequeue a task is O(1).
-+
-+The complexity of pick the next task is in O(n), where n is the number of tasks
-+in a runqueue (each CPU has its own runqueue).
-+
-+Note: O(n) sounds scary, but usually for a machine with 4 CPUS where it is used
-+for desktop or mobile jobs, the maximum number of runnable tasks might not
-+exceeds 10 (at the pick next run time) - the idle tasks are excluded since they
-+are dequeued when sleeping and enqueued when they wake up.
-+
-+
-+2. The CacULE Interactivity Score
-+=======================================================
-+
-+The interactivity score is inspired by the ULE scheduler (FreeBSD scheduler).
-+For more information see: https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
-+CacULE doesn't replace CFS with ULE, it only changes the CFS' pick next task
-+mechanism to ULE's interactivity score mechanism for picking next task to run.
-+
-+
-+2.3 sched_interactivity_factor
-+=================
-+Sets the value *m* for interactivity score calculations. See Figure 1 in
-+https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
-+The default value of in CacULE is 10 which means that the Maximum Interactive
-+Score is 20 (since m = Maximum Interactive Score / 2).
-+You can tune sched_interactivity_factor with sysctl command:
-+
-+ sysctl kernel.sched_interactivity_factor=50
-+
-+This command changes the sched_interactivity_factor from 10 to 50.
-+
-+
-+3. Scheduling policies
-+=======================
-+
-+CacULE some CFS, implements three scheduling policies:
-+
-+ - SCHED_NORMAL (traditionally called SCHED_OTHER): The scheduling
-+ policy that is used for regular tasks.
-+
-+ - SCHED_BATCH: Does not preempt nearly as often as regular tasks
-+ would, thereby allowing tasks to run longer and make better use of
-+ caches but at the cost of interactivity. This is well suited for
-+ batch jobs.
-+
-+ - SCHED_IDLE: This is even weaker than nice 19, but its not a true
-+ idle timer scheduler in order to avoid to get into priority
-+ inversion problems which would deadlock the machine.
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index edc01bcefbfd..9e16c9dd3d78 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -458,10 +458,22 @@ struct sched_statistics {
- #endif
- };
-
-+#ifdef CONFIG_CACULE_SCHED
-+struct cacule_node {
-+ struct cacule_node* next;
-+ struct cacule_node* prev;
-+ u64 cacule_start_time;
-+ u64 vruntime;
-+};
-+#endif
-+
- struct sched_entity {
- /* For load-balancing: */
- struct load_weight load;
- struct rb_node run_node;
-+#ifdef CONFIG_CACULE_SCHED
-+ struct cacule_node cacule_node;
-+#endif
- struct list_head group_node;
- unsigned int on_rq;
-
-diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
-index 3c31ba88aca5..4cf162341ab8 100644
---- a/include/linux/sched/sysctl.h
-+++ b/include/linux/sched/sysctl.h
-@@ -31,6 +31,12 @@ extern unsigned int sysctl_sched_min_granularity;
- extern unsigned int sysctl_sched_wakeup_granularity;
- extern unsigned int sysctl_sched_child_runs_first;
-
-+#ifdef CONFIG_CACULE_SCHED
-+extern unsigned int interactivity_factor;
-+extern unsigned int interactivity_threshold;
-+extern unsigned int cacule_max_lifetime;
-+#endif
-+
- enum sched_tunable_scaling {
- SCHED_TUNABLESCALING_NONE,
- SCHED_TUNABLESCALING_LOG,
-diff --git a/init/Kconfig b/init/Kconfig
-index 5f5c776ef192..92330b5d8897 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -830,6 +830,17 @@ config UCLAMP_BUCKETS_COUNT
-
- endmenu
-
-+config CACULE_SCHED
-+ bool "CacULE CPU scheduler"
-+ default y
-+ help
-+ The CacULE CPU scheduler is based on interactivity score mechanism.
-+ The interactivity score is inspired by the ULE scheduler (FreeBSD
-+ scheduler).
-+
-+ If unsure, say Y here.
-+
-+
- #
- # For architectures that want to enable the support for NUMA-affine scheduler
- # balancing logic:
-@@ -1213,6 +1224,7 @@ config SCHED_AUTOGROUP
- select CGROUPS
- select CGROUP_SCHED
- select FAIR_GROUP_SCHED
-+ default y
- help
- This option optimizes the scheduler for common desktop workloads by
- automatically creating and populating task groups. This separation
-diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
-index 38ef6d06888e..865f8dbddca8 100644
---- a/kernel/Kconfig.hz
-+++ b/kernel/Kconfig.hz
-@@ -46,6 +46,9 @@ choice
- 1000 Hz is the preferred choice for desktop systems and other
- systems requiring fast interactive responses to events.
-
-+ config HZ_2000
-+ bool "2000 HZ"
-+
- endchoice
-
- config HZ
-@@ -54,6 +57,7 @@ config HZ
- default 250 if HZ_250
- default 300 if HZ_300
- default 1000 if HZ_1000
-+ default 2000 if HZ_2000
-
- config SCHED_HRTICK
- def_bool HIGH_RES_TIMERS
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 814200541f8f..353f88cd05ca 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -3555,6 +3555,11 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
- p->se.prev_sum_exec_runtime = 0;
- p->se.nr_migrations = 0;
- p->se.vruntime = 0;
-+
-+#ifdef CONFIG_CACULE_SCHED
-+ p->se.cacule_node.vruntime = 0;
-+#endif
-+
- INIT_LIST_HEAD(&p->se.group_node);
-
- #ifdef CONFIG_FAIR_GROUP_SCHED
-@@ -3840,6 +3845,10 @@ void wake_up_new_task(struct task_struct *p)
- update_rq_clock(rq);
- post_init_entity_util_avg(p);
-
-+#ifdef CONFIG_CACULE_SCHED
-+ p->se.cacule_node.cacule_start_time = sched_clock();
-+#endif
-+
- activate_task(rq, p, ENQUEUE_NOCLOCK);
- trace_sched_wakeup_new(p);
- check_preempt_curr(rq, p, WF_FORK);
-@@ -8094,6 +8103,10 @@ void __init sched_init(void)
- BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
- #endif
-
-+#ifdef CONFIG_CACULE_SCHED
-+ printk(KERN_INFO "CacULE CPU scheduler v5.12-r2 by Hamad Al Marri.");
-+#endif
-+
- wait_bit_init();
-
- #ifdef CONFIG_FAIR_GROUP_SCHED
-diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index 9e0a915e6eb8..77ac9cd82113 100644
---- a/kernel/sched/debug.c
-+++ b/kernel/sched/debug.c
-@@ -554,8 +554,11 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
-
- void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
- {
-- s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
-- spread, rq0_min_vruntime, spread0;
-+ s64 MIN_vruntime = -1, max_vruntime = -1,
-+#if !defined(CONFIG_CACULE_SCHED)
-+ min_vruntime, rq0_min_vruntime, spread0,
-+#endif
-+ spread;
- struct rq *rq = cpu_rq(cpu);
- struct sched_entity *last;
- unsigned long flags;
-@@ -576,21 +579,27 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
- last = __pick_last_entity(cfs_rq);
- if (last)
- max_vruntime = last->vruntime;
-+#if !defined(CONFIG_CACULE_SCHED)
- min_vruntime = cfs_rq->min_vruntime;
- rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
-+#endif
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
- SPLIT_NS(MIN_vruntime));
-+#if !defined(CONFIG_CACULE_SCHED)
- SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
- SPLIT_NS(min_vruntime));
-+#endif
- SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
- SPLIT_NS(max_vruntime));
- spread = max_vruntime - MIN_vruntime;
- SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
- SPLIT_NS(spread));
-+#if !defined(CONFIG_CACULE_SCHED)
- spread0 = min_vruntime - rq0_min_vruntime;
- SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
- SPLIT_NS(spread0));
-+#endif
- SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
- cfs_rq->nr_spread_over);
- SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
-diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 47fcc3fe9dc5..c0a60cc8d9ce 100644
---- a/kernel/sched/fair.c
-+++ b/kernel/sched/fair.c
-@@ -19,6 +19,10 @@
- *
- * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
-+ *
-+ * CacULE enhancements CPU cache and scheduler based on
-+ * Interactivity Score.
-+ * (C) 2020 Hamad Al Marri <hamad.s.almarri@gmail.com>
- */
- #include "sched.h"
-
-@@ -82,7 +86,11 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
- unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
- static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
-
-+#ifdef CONFIG_CACULE_SCHED
-+const_debug unsigned int sysctl_sched_migration_cost = 200000UL;
-+#else
- const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
-+#endif
-
- int sched_thermal_decay_shift;
- static int __init setup_sched_thermal_decay_shift(char *str)
-@@ -113,6 +121,17 @@ int __weak arch_asym_cpu_priority(int cpu)
- */
- #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
-
-+#endif
-+#ifdef CONFIG_CACULE_SCHED
-+unsigned int __read_mostly cacule_max_lifetime = 22000; // in ms
-+unsigned int __read_mostly interactivity_factor = 32768;
-+
-+#ifdef CONFIG_FAIR_GROUP_SCHED
-+unsigned int __read_mostly interactivity_threshold = 0;
-+#else
-+unsigned int __read_mostly interactivity_threshold = 1000;
-+#endif
-+
- #endif
-
- #ifdef CONFIG_CFS_BANDWIDTH
-@@ -253,6 +272,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
-
- const struct sched_class fair_sched_class;
-
-+
-+#ifdef CONFIG_CACULE_SCHED
-+static inline struct sched_entity *se_of(struct cacule_node *cn)
-+{
-+ return container_of(cn, struct sched_entity, cacule_node);
-+}
-+#endif
-+
- /**************************************************************
- * CFS operations on generic schedulable entities:
- */
-@@ -512,7 +539,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
- /**************************************************************
- * Scheduling class tree data structure manipulation methods:
- */
--
-+#if !defined(CONFIG_CACULE_SCHED)
- static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
- {
- s64 delta = (s64)(vruntime - max_vruntime);
-@@ -575,7 +602,204 @@ static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
- {
- return entity_before(__node_2_se(a), __node_2_se(b));
- }
-+#endif /* CONFIG_CACULE_SCHED */
-+
-+#ifdef CONFIG_CACULE_SCHED
-+static unsigned int
-+calc_interactivity(u64 now, struct cacule_node *se)
-+{
-+ u64 l_se, vr_se, sleep_se = 1ULL, u64_factor_m, _2m;
-+ unsigned int score_se;
-+
-+ /*
-+ * in case of vruntime==0, logical OR with 1 would
-+ * make sure that the least sig. bit is 1
-+ */
-+ l_se = now - se->cacule_start_time;
-+ vr_se = se->vruntime | 1;
-+ u64_factor_m = interactivity_factor;
-+ _2m = u64_factor_m << 1;
-+
-+ /* safety check */
-+ if (likely(l_se > vr_se))
-+ sleep_se = (l_se - vr_se) | 1;
-+
-+ if (sleep_se >= vr_se)
-+ score_se = u64_factor_m / (sleep_se / vr_se);
-+ else
-+ score_se = _2m - (u64_factor_m / (vr_se / sleep_se));
-+
-+ return score_se;
-+}
-+
-+static inline int is_interactive(struct cacule_node *cn)
-+{
-+ if (!interactivity_threshold || se_of(cn)->vruntime == 0)
-+ return 0;
-+
-+ return calc_interactivity(sched_clock(), cn) < interactivity_threshold;
-+}
-+
-+static inline int cn_has_idle_policy(struct cacule_node *se)
-+{
-+ return task_has_idle_policy(task_of(se_of(se)));
-+}
-+
-+static inline int
-+entity_before_cached(u64 now, unsigned int score_curr, struct cacule_node *se)
-+{
-+ unsigned int score_se;
-+ int diff;
-+
-+ /*
-+ * if se has idle class, then no need to
-+ * calculate, since we are sure that score_curr
-+ * is a score for non idle class task
-+ */
-+ if (cn_has_idle_policy(se))
-+ return -1;
-+
-+ score_se = calc_interactivity(now, se);
-+ diff = score_se - score_curr;
-+
-+ if (diff <= 0)
-+ return 1;
-+
-+ return -1;
-+}
-+
-+/*
-+ * Does se have lower interactivity score value (i.e. interactive) than curr? If yes, return 1,
-+ * otherwise return -1
-+ * se is before curr if se has lower interactivity score value
-+ * the lower score, the more interactive
-+ */
-+static inline int
-+entity_before(u64 now, struct cacule_node *curr, struct cacule_node *se)
-+{
-+ unsigned int score_curr, score_se;
-+ int diff;
-+ int is_curr_idle = cn_has_idle_policy(curr);
-+ int is_se_idle = cn_has_idle_policy(se);
-+
-+ /* if curr is normal but se is idle class, then no */
-+ if (!is_curr_idle && is_se_idle)
-+ return -1;
-+
-+ /* if curr is idle class and se is normal, then yes */
-+ if (is_curr_idle && !is_se_idle)
-+ return 1;
-+
-+ score_curr = calc_interactivity(now, curr);
-+ score_se = calc_interactivity(now, se);
-+
-+ diff = score_se - score_curr;
-+
-+ if (diff < 0)
-+ return 1;
-+
-+ return -1;
-+}
-
-+/*
-+ * Enqueue an entity
-+ */
-+static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *_se)
-+{
-+ struct cacule_node *se = &(_se->cacule_node);
-+ struct cacule_node *iter, *next = NULL;
-+ u64 now = sched_clock();
-+ unsigned int score_se = calc_interactivity(now, se);
-+ int is_idle_task = cn_has_idle_policy(se);
-+
-+ se->next = NULL;
-+ se->prev = NULL;
-+
-+ if (likely(cfs_rq->head)) {
-+
-+ // start from tail
-+ iter = cfs_rq->tail;
-+
-+ /*
-+ * if this task has idle class, then
-+ * push it to the tail right away
-+ */
-+ if (is_idle_task)
-+ goto to_tail;
-+
-+ /* here we know that this task isn't idle clas */
-+
-+ // does se have higher IS than iter?
-+ while (iter && entity_before_cached(now, score_se, iter) == -1) {
-+ next = iter;
-+ iter = iter->prev;
-+ }
-+
-+ // se in tail position
-+ if (iter == cfs_rq->tail) {
-+to_tail:
-+ cfs_rq->tail->next = se;
-+ se->prev = cfs_rq->tail;
-+
-+ cfs_rq->tail = se;
-+ }
-+ // else if not head no tail, insert se after iter
-+ else if (iter) {
-+ se->next = next;
-+ se->prev = iter;
-+
-+ iter->next = se;
-+ next->prev = se;
-+ }
-+ // insert se at head
-+ else {
-+ se->next = cfs_rq->head;
-+ cfs_rq->head->prev = se;
-+
-+ // lastly reset the head
-+ cfs_rq->head = se;
-+ }
-+ } else {
-+ // if empty rq
-+ cfs_rq->head = se;
-+ cfs_rq->tail = se;
-+ }
-+}
-+
-+static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *_se)
-+{
-+ struct cacule_node *se = &(_se->cacule_node);
-+
-+ // if only one se in rq
-+ if (cfs_rq->head == cfs_rq->tail) {
-+ cfs_rq->head = NULL;
-+ cfs_rq->tail = NULL;
-+
-+ } else if (se == cfs_rq->head) {
-+ // if it is the head
-+ cfs_rq->head = cfs_rq->head->next;
-+ cfs_rq->head->prev = NULL;
-+ } else if (se == cfs_rq->tail) {
-+ // if it is the tail
-+ cfs_rq->tail = cfs_rq->tail->prev;
-+ cfs_rq->tail->next = NULL;
-+ } else {
-+ // if in the middle
-+ struct cacule_node *prev = se->prev;
-+ struct cacule_node *next = se->next;
-+
-+ prev->next = next;
-+
-+ if (next)
-+ next->prev = prev;
-+ }
-+}
-+
-+struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
-+{
-+ return se_of(cfs_rq->head);
-+}
-+#else
- /*
- * Enqueue an entity into the rb-tree:
- */
-@@ -608,16 +832,24 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
-
- return __node_2_se(next);
- }
-+#endif /* CONFIG_CACULE_SCHED */
-
- #ifdef CONFIG_SCHED_DEBUG
- struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
- {
-+#ifdef CONFIG_CACULE_SCHED
-+ if (!cfs_rq->tail)
-+ return NULL;
-+
-+ return se_of(cfs_rq->tail);
-+#else
- struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
-
- if (!last)
- return NULL;
-
- return __node_2_se(last);
-+#endif /* CONFIG_CACULE_SCHED */
- }
-
- /**************************************************************
-@@ -712,6 +944,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
- return slice;
- }
-
-+#if !defined(CONFIG_CACULE_SCHED)
- /*
- * We calculate the vruntime slice of a to-be-inserted task.
- *
-@@ -721,6 +954,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- return calc_delta_fair(sched_slice(cfs_rq, se), se);
- }
-+#endif /* CONFIG_CACULE_SCHED */
-
- #include "pelt.h"
- #ifdef CONFIG_SMP
-@@ -828,14 +1062,51 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
- }
- #endif /* CONFIG_SMP */
-
-+#ifdef CONFIG_CACULE_SCHED
-+static void normalize_lifetime(u64 now, struct sched_entity *se)
-+{
-+ struct cacule_node *cn = &se->cacule_node;
-+ u64 max_life_ns, life_time;
-+ s64 diff;
-+
-+ /*
-+ * left shift 20 bits is approximately = * 1000000
-+ * we don't need the precision of life time
-+ * Ex. for 30s, with left shift (20bits) == 31.457s
-+ */
-+ max_life_ns = ((u64) cacule_max_lifetime) << 20;
-+ life_time = now - cn->cacule_start_time;
-+ diff = life_time - max_life_ns;
-+
-+ if (diff > 0) {
-+ // multiply life_time by 1024 for more precision
-+ u64 old_hrrn_x = (life_time << 7) / ((cn->vruntime >> 3) | 1);
-+
-+ // reset life to half max_life (i.e ~15s)
-+ cn->cacule_start_time = now - (max_life_ns >> 1);
-+
-+ // avoid division by zero
-+ if (old_hrrn_x == 0) old_hrrn_x = 1;
-+
-+ // reset vruntime based on old hrrn ratio
-+ cn->vruntime = (max_life_ns << 9) / old_hrrn_x;
-+ }
-+}
-+#endif /* CONFIG_CACULE_SCHED */
-+
- /*
- * Update the current task's runtime statistics.
- */
- static void update_curr(struct cfs_rq *cfs_rq)
- {
- struct sched_entity *curr = cfs_rq->curr;
-+#ifdef CONFIG_CACULE_SCHED
-+ u64 now = sched_clock();
-+ u64 delta_exec, delta_fair;
-+#else
- u64 now = rq_clock_task(rq_of(cfs_rq));
- u64 delta_exec;
-+#endif
-
- if (unlikely(!curr))
- return;
-@@ -852,8 +1123,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
- curr->sum_exec_runtime += delta_exec;
- schedstat_add(cfs_rq->exec_clock, delta_exec);
-
-+#ifdef CONFIG_CACULE_SCHED
-+ delta_fair = calc_delta_fair(delta_exec, curr);
-+ curr->vruntime += delta_fair;
-+ curr->cacule_node.vruntime += delta_fair;
-+ normalize_lifetime(now, curr);
-+#else
- curr->vruntime += calc_delta_fair(delta_exec, curr);
- update_min_vruntime(cfs_rq);
-+#endif
-
- if (entity_is_task(curr)) {
- struct task_struct *curtask = task_of(curr);
-@@ -1021,7 +1299,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- static inline void
- update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- {
--
- if (!schedstat_enabled())
- return;
-
-@@ -1053,7 +1330,11 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
- /*
- * We are starting a new run period:
- */
-+#ifdef CONFIG_CACULE_SCHED
-+ se->exec_start = sched_clock();
-+#else
- se->exec_start = rq_clock_task(rq_of(cfs_rq));
-+#endif
- }
-
- /**************************************************
-@@ -4122,7 +4403,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
-
- static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
--#ifdef CONFIG_SCHED_DEBUG
-+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_CACULE_SCHED)
- s64 d = se->vruntime - cfs_rq->min_vruntime;
-
- if (d < 0)
-@@ -4133,6 +4414,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
- #endif
- }
-
-+#if !defined(CONFIG_CACULE_SCHED)
- static void
- place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
- {
-@@ -4164,6 +4446,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
- /* ensure we never gain time by being placed backwards. */
- se->vruntime = max_vruntime(se->vruntime, vruntime);
- }
-+#endif /* CONFIG_CACULE_SCHED */
-
- static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
-
-@@ -4222,18 +4505,23 @@ static inline bool cfs_bandwidth_used(void);
- static void
- enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- {
-+#if !defined(CONFIG_CACULE_SCHED)
- bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
-+#endif
- bool curr = cfs_rq->curr == se;
-
-+#if !defined(CONFIG_CACULE_SCHED)
- /*
- * If we're the current task, we must renormalise before calling
- * update_curr().
- */
- if (renorm && curr)
- se->vruntime += cfs_rq->min_vruntime;
-+#endif
-
- update_curr(cfs_rq);
-
-+#if !defined(CONFIG_CACULE_SCHED)
- /*
- * Otherwise, renormalise after, such that we're placed at the current
- * moment in time, instead of some random moment in the past. Being
-@@ -4242,6 +4530,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- */
- if (renorm && !curr)
- se->vruntime += cfs_rq->min_vruntime;
-+#endif
-
- /*
- * When enqueuing a sched_entity, we must:
-@@ -4256,8 +4545,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- update_cfs_group(se);
- account_entity_enqueue(cfs_rq, se);
-
-+#if !defined(CONFIG_CACULE_SCHED)
- if (flags & ENQUEUE_WAKEUP)
- place_entity(cfs_rq, se, 0);
-+#endif
-
- check_schedstat_required();
- update_stats_enqueue(cfs_rq, se, flags);
-@@ -4278,6 +4569,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- check_enqueue_throttle(cfs_rq);
- }
-
-+#if !defined(CONFIG_CACULE_SCHED)
- static void __clear_buddies_last(struct sched_entity *se)
- {
- for_each_sched_entity(se) {
-@@ -4322,6 +4614,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
- if (cfs_rq->skip == se)
- __clear_buddies_skip(se);
- }
-+#endif /* !CONFIG_CACULE_SCHED */
-
- static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
-@@ -4346,13 +4639,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
-
- update_stats_dequeue(cfs_rq, se, flags);
-
-+#if !defined(CONFIG_CACULE_SCHED)
- clear_buddies(cfs_rq, se);
-+#endif
-
- if (se != cfs_rq->curr)
- __dequeue_entity(cfs_rq, se);
- se->on_rq = 0;
- account_entity_dequeue(cfs_rq, se);
-
-+#if !defined(CONFIG_CACULE_SCHED)
- /*
- * Normalize after update_curr(); which will also have moved
- * min_vruntime if @se is the one holding it back. But before doing
-@@ -4361,12 +4657,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- */
- if (!(flags & DEQUEUE_SLEEP))
- se->vruntime -= cfs_rq->min_vruntime;
-+#endif
-
- /* return excess runtime on last dequeue */
- return_cfs_rq_runtime(cfs_rq);
-
- update_cfs_group(se);
-
-+#if !defined(CONFIG_CACULE_SCHED)
- /*
- * Now advance min_vruntime if @se was the entity holding it back,
- * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
-@@ -4375,8 +4673,21 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- */
- if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
- update_min_vruntime(cfs_rq);
-+#endif
- }
-
-+#ifdef CONFIG_CACULE_SCHED
-+/*
-+ * Preempt the current task with a newly woken task if needed:
-+ */
-+static void
-+check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
-+{
-+ // does head have higher IS than curr
-+ if (entity_before(sched_clock(), &curr->cacule_node, cfs_rq->head) == 1)
-+ resched_curr(rq_of(cfs_rq));
-+}
-+#else
- /*
- * Preempt the current task with a newly woken task if needed:
- */
-@@ -4416,6 +4727,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
- if (delta > ideal_runtime)
- resched_curr(rq_of(cfs_rq));
- }
-+#endif /* CONFIG_CACULE_SCHED */
-
- static void
- set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
-@@ -4450,6 +4762,21 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
- se->prev_sum_exec_runtime = se->sum_exec_runtime;
- }
-
-+#ifdef CONFIG_CACULE_SCHED
-+static struct sched_entity *
-+pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
-+{
-+ struct cacule_node *se = cfs_rq->head;
-+
-+ if (unlikely(!se))
-+ se = &curr->cacule_node;
-+ else if (unlikely(curr
-+ && entity_before(sched_clock(), se, &curr->cacule_node) == 1))
-+ se = &curr->cacule_node;
-+
-+ return se_of(se);
-+}
-+#else
- static int
- wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
-
-@@ -4510,6 +4837,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
-
- return se;
- }
-+#endif /* CONFIG_CACULE_SCHED */
-
- static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
-@@ -5612,7 +5940,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- hrtick_update(rq);
- }
-
-+#if !defined(CONFIG_CACULE_SCHED)
- static void set_next_buddy(struct sched_entity *se);
-+#endif
-
- /*
- * The dequeue_task method is called before nr_running is
-@@ -5644,12 +5974,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- if (cfs_rq->load.weight) {
- /* Avoid re-evaluating load for this entity: */
- se = parent_entity(se);
-+#if !defined(CONFIG_CACULE_SCHED)
- /*
- * Bias pick_next to pick a task from this cfs_rq, as
- * p is sleeping when it is within its sched_slice.
- */
- if (task_sleep && se && !throttled_hierarchy(cfs_rq))
- set_next_buddy(se);
-+#endif
- break;
- }
- flags |= DEQUEUE_SLEEP;
-@@ -5765,6 +6097,7 @@ static unsigned long capacity_of(int cpu)
- return cpu_rq(cpu)->cpu_capacity;
- }
-
-+#if !defined(CONFIG_CACULE_SCHED)
- static void record_wakee(struct task_struct *p)
- {
- /*
-@@ -5811,6 +6144,7 @@ static int wake_wide(struct task_struct *p)
- return 0;
- return 1;
- }
-+#endif /* CONFIG_CACULE_SCHED */
-
- /*
- * The purpose of wake_affine() is to quickly determine on which CPU we can run
-@@ -6513,6 +6847,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
- return min_t(unsigned long, util, capacity_orig_of(cpu));
- }
-
-+#if !defined(CONFIG_CACULE_SCHED)
- /*
- * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
- * to @dst_cpu.
-@@ -6762,6 +7097,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
-
- return -1;
- }
-+#endif /* CONFIG_CACULE_SCHED */
-+
-+#ifdef CONFIG_CACULE_SCHED
-+static int
-+find_least_IS_cpu(struct task_struct *p)
-+{
-+ struct cfs_rq *cfs_rq;
-+ unsigned int max_IS = 0;
-+ unsigned int IS, IS_c, IS_h;
-+ struct sched_entity *curr_se;
-+ struct cacule_node *cn, *head;
-+ int cpu_i;
-+ int new_cpu = -1;
-+
-+ for_each_online_cpu(cpu_i) {
-+ if (!cpumask_test_cpu(cpu_i, p->cpus_ptr))
-+ continue;
-+
-+ cn = NULL;
-+ cfs_rq = &cpu_rq(cpu_i)->cfs;
-+
-+ curr_se = cfs_rq->curr;
-+ head = cfs_rq->head;
-+
-+ if (!curr_se && head)
-+ cn = head;
-+ else if (curr_se && !head)
-+ cn = &curr_se->cacule_node;
-+ else if (curr_se && head) {
-+ IS_c = calc_interactivity(sched_clock(), &curr_se->cacule_node);
-+ IS_h = calc_interactivity(sched_clock(), head);
-+
-+ IS = IS_c > IS_h? IS_c : IS_h;
-+ goto compare;
-+ }
-+
-+ if (!cn)
-+ return cpu_i;
-+
-+ IS = calc_interactivity(sched_clock(), cn);
-+
-+compare:
-+ if (IS > max_IS) {
-+ max_IS = IS;
-+ new_cpu = cpu_i;
-+ }
-+ }
-+
-+ return new_cpu;
-+}
-+#endif
-
- /*
- * select_task_rq_fair: Select target runqueue for the waking task in domains
-@@ -6786,6 +7172,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
- /* SD_flags and WF_flags share the first nibble */
- int sd_flag = wake_flags & 0xF;
-
-+#ifdef CONFIG_CACULE_SCHED
-+ struct sched_entity *se = &p->se;
-+
-+ if (!is_interactive(&se->cacule_node))
-+ goto cfs_way;
-+
-+ // check first if the prev cpu
-+ // has 0 tasks
-+ if (cpumask_test_cpu(prev_cpu, p->cpus_ptr) &&
-+ cpu_rq(prev_cpu)->cfs.nr_running == 0)
-+ return prev_cpu;
-+
-+ new_cpu = find_least_IS_cpu(p);
-+
-+ if (new_cpu != -1)
-+ return new_cpu;
-+
-+ new_cpu = prev_cpu;
-+cfs_way:
-+#else
- if (wake_flags & WF_TTWU) {
- record_wakee(p);
-
-@@ -6798,6 +7204,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
-
- want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
- }
-+#endif /* CONFIG_CACULE_SCHED */
-
- rcu_read_lock();
- for_each_domain(cpu, tmp) {
-@@ -6844,6 +7251,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
- */
- static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
- {
-+#if !defined(CONFIG_CACULE_SCHED)
- /*
- * As blocked tasks retain absolute vruntime the migration needs to
- * deal with this by subtracting the old and adding the new
-@@ -6869,6 +7277,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
-
- se->vruntime -= min_vruntime;
- }
-+#endif /* CONFIG_CACULE_SCHED */
-
- if (p->on_rq == TASK_ON_RQ_MIGRATING) {
- /*
-@@ -6914,6 +7323,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
- }
- #endif /* CONFIG_SMP */
-
-+#if !defined(CONFIG_CACULE_SCHED)
- static unsigned long wakeup_gran(struct sched_entity *se)
- {
- unsigned long gran = sysctl_sched_wakeup_granularity;
-@@ -6992,6 +7402,7 @@ static void set_skip_buddy(struct sched_entity *se)
- for_each_sched_entity(se)
- cfs_rq_of(se)->skip = se;
- }
-+#endif /* CONFIG_CACULE_SCHED */
-
- /*
- * Preempt the current task with a newly woken task if needed:
-@@ -7000,9 +7411,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
- {
- struct task_struct *curr = rq->curr;
- struct sched_entity *se = &curr->se, *pse = &p->se;
-+
-+#if !defined(CONFIG_CACULE_SCHED)
- struct cfs_rq *cfs_rq = task_cfs_rq(curr);
- int scale = cfs_rq->nr_running >= sched_nr_latency;
- int next_buddy_marked = 0;
-+#endif /* CONFIG_CACULE_SCHED */
-
- if (unlikely(se == pse))
- return;
-@@ -7016,10 +7430,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
- if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
- return;
-
-+#if !defined(CONFIG_CACULE_SCHED)
- if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
- set_next_buddy(pse);
- next_buddy_marked = 1;
- }
-+#endif /* CONFIG_CACULE_SCHED */
-
- /*
- * We can come here with TIF_NEED_RESCHED already set from new task
-@@ -7049,6 +7465,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
- find_matching_se(&se, &pse);
- update_curr(cfs_rq_of(se));
- BUG_ON(!pse);
-+
-+#ifdef CONFIG_CACULE_SCHED
-+ if (entity_before(sched_clock(), &se->cacule_node, &pse->cacule_node) == 1)
-+ goto preempt;
-+#else
- if (wakeup_preempt_entity(se, pse) == 1) {
- /*
- * Bias pick_next to pick the sched entity that is
-@@ -7058,11 +7479,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
- set_next_buddy(pse);
- goto preempt;
- }
-+#endif /* CONFIG_CACULE_SCHED */
-
- return;
-
- preempt:
- resched_curr(rq);
-+
-+#if !defined(CONFIG_CACULE_SCHED)
- /*
- * Only set the backward buddy when the current task is still
- * on the rq. This can happen when a wakeup gets interleaved
-@@ -7077,6 +7501,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
-
- if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
- set_last_buddy(se);
-+#endif /* CONFIG_CACULE_SCHED */
- }
-
- struct task_struct *
-@@ -7251,7 +7676,10 @@ static void yield_task_fair(struct rq *rq)
- {
- struct task_struct *curr = rq->curr;
- struct cfs_rq *cfs_rq = task_cfs_rq(curr);
-+
-+#if !defined(CONFIG_CACULE_SCHED)
- struct sched_entity *se = &curr->se;
-+#endif
-
- /*
- * Are we the only task in the tree?
-@@ -7259,7 +7687,9 @@ static void yield_task_fair(struct rq *rq)
- if (unlikely(rq->nr_running == 1))
- return;
-
-+#if !defined(CONFIG_CACULE_SCHED)
- clear_buddies(cfs_rq, se);
-+#endif
-
- if (curr->policy != SCHED_BATCH) {
- update_rq_clock(rq);
-@@ -7275,7 +7705,9 @@ static void yield_task_fair(struct rq *rq)
- rq_clock_skip_update(rq);
- }
-
-+#if !defined(CONFIG_CACULE_SCHED)
- set_skip_buddy(se);
-+#endif
- }
-
- static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
-@@ -7286,8 +7718,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
- if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
- return false;
-
-+#if !defined(CONFIG_CACULE_SCHED)
- /* Tell the scheduler that we'd really like pse to run next. */
- set_next_buddy(se);
-+#endif
-
- yield_task_fair(rq);
-
-@@ -7515,6 +7949,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
- if (env->sd->flags & SD_SHARE_CPUCAPACITY)
- return 0;
-
-+#if !defined(CONFIG_CACULE_SCHED)
- /*
- * Buddy candidates are cache hot:
- */
-@@ -7522,6 +7957,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
- (&p->se == cfs_rq_of(&p->se)->next ||
- &p->se == cfs_rq_of(&p->se)->last))
- return 1;
-+#endif
-
- if (sysctl_sched_migration_cost == -1)
- return 1;
-@@ -10585,9 +11021,11 @@ static void nohz_newidle_balance(struct rq *this_rq)
- if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED))
- return;
-
-+#if !defined(CONFIG_CACULE_SCHED)
- /* Will wake up very soon. No time for doing anything else*/
- if (this_rq->avg_idle < sysctl_sched_migration_cost)
- return;
-+#endif
-
- /* Don't need to update blocked load of idle CPUs*/
- if (!READ_ONCE(nohz.has_blocked) ||
-@@ -10655,7 +11093,10 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
- */
- rq_unpin_lock(this_rq, rf);
-
-- if (this_rq->avg_idle < sysctl_sched_migration_cost ||
-+ if (
-+#if !defined(CONFIG_CACULE_SCHED)
-+ this_rq->avg_idle < sysctl_sched_migration_cost ||
-+#endif
- !READ_ONCE(this_rq->rd->overload)) {
-
- rcu_read_lock();
-@@ -10823,11 +11264,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
- update_overutilized_status(task_rq(curr));
- }
-
-+#ifdef CONFIG_CACULE_SCHED
- /*
- * called on fork with the child task as argument from the parent's context
- * - child not yet on the tasklist
- * - preemption disabled
- */
-+ static void task_fork_fair(struct task_struct *p)
-+{
-+ struct cfs_rq *cfs_rq;
-+ struct sched_entity *curr;
-+ struct rq *rq = this_rq();
-+ struct rq_flags rf;
-+
-+ rq_lock(rq, &rf);
-+ update_rq_clock(rq);
-+
-+ cfs_rq = task_cfs_rq(current);
-+ curr = cfs_rq->curr;
-+ if (curr)
-+ update_curr(cfs_rq);
-+
-+ rq_unlock(rq, &rf);
-+}
-+#else
- static void task_fork_fair(struct task_struct *p)
- {
- struct cfs_rq *cfs_rq;
-@@ -10858,6 +11318,7 @@ static void task_fork_fair(struct task_struct *p)
- se->vruntime -= cfs_rq->min_vruntime;
- rq_unlock(rq, &rf);
- }
-+#endif /* CONFIG_CACULE_SCHED */
-
- /*
- * Priority of the task has changed. Check to see if we preempt
-@@ -10976,6 +11437,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
- static void detach_task_cfs_rq(struct task_struct *p)
- {
- struct sched_entity *se = &p->se;
-+
-+#if !defined(CONFIG_CACULE_SCHED)
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
- if (!vruntime_normalized(p)) {
-@@ -10986,6 +11449,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
- place_entity(cfs_rq, se, 0);
- se->vruntime -= cfs_rq->min_vruntime;
- }
-+#endif
-
- detach_entity_cfs_rq(se);
- }
-@@ -10993,12 +11457,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
- static void attach_task_cfs_rq(struct task_struct *p)
- {
- struct sched_entity *se = &p->se;
-+
-+#if !defined(CONFIG_CACULE_SCHED)
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
-+#endif
-
- attach_entity_cfs_rq(se);
-
-+#if !defined(CONFIG_CACULE_SCHED)
- if (!vruntime_normalized(p))
- se->vruntime += cfs_rq->min_vruntime;
-+#endif
- }
-
- static void switched_from_fair(struct rq *rq, struct task_struct *p)
-@@ -11054,13 +11523,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
- void init_cfs_rq(struct cfs_rq *cfs_rq)
- {
- cfs_rq->tasks_timeline = RB_ROOT_CACHED;
-+
-+#if !defined(CONFIG_CACULE_SCHED)
- cfs_rq->min_vruntime = (u64)(-(1LL << 20));
- #ifndef CONFIG_64BIT
- cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
- #endif
-+#endif /* CONFIG_CACULE_SCHED */
-+
- #ifdef CONFIG_SMP
- raw_spin_lock_init(&cfs_rq->removed.lock);
- #endif
-+
-+#ifdef CONFIG_CACULE_SCHED
-+ cfs_rq->head = NULL;
-+ cfs_rq->tail = NULL;
-+#endif
- }
-
- #ifdef CONFIG_FAIR_GROUP_SCHED
-diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index e4e4f47cee6a..0eb4fca83ffe 100644
---- a/kernel/sched/sched.h
-+++ b/kernel/sched/sched.h
-@@ -523,10 +523,13 @@ struct cfs_rq {
- unsigned int idle_h_nr_running; /* SCHED_IDLE */
-
- u64 exec_clock;
-+
-+#if !defined(CONFIG_CACULE_SCHED)
- u64 min_vruntime;
- #ifndef CONFIG_64BIT
- u64 min_vruntime_copy;
- #endif
-+#endif /* CONFIG_CACULE_SCHED */
-
- struct rb_root_cached tasks_timeline;
-
-@@ -535,9 +538,15 @@ struct cfs_rq {
- * It is set to NULL otherwise (i.e when none are currently running).
- */
- struct sched_entity *curr;
-+#ifdef CONFIG_CACULE_SCHED
-+ struct cacule_node *head;
-+ struct cacule_node *tail;
-+
-+#else
- struct sched_entity *next;
- struct sched_entity *last;
- struct sched_entity *skip;
-+#endif // CONFIG_CACULE_SCHED
-
- #ifdef CONFIG_SCHED_DEBUG
- unsigned int nr_spread_over;
-diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 62fbd09b5dc1..8cbb8c5663d3 100644
---- a/kernel/sysctl.c
-+++ b/kernel/sysctl.c
-@@ -1659,6 +1659,29 @@ static struct ctl_table kern_table[] = {
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
-+#ifdef CONFIG_CACULE_SCHED
-+ {
-+ .procname = "sched_interactivity_factor",
-+ .data = &interactivity_factor,
-+ .maxlen = sizeof(unsigned int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec,
-+ },
-+ {
-+ .procname = "sched_interactivity_threshold",
-+ .data = &interactivity_threshold,
-+ .maxlen = sizeof(unsigned int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec,
-+ },
-+ {
-+ .procname = "sched_max_lifetime_ms",
-+ .data = &cacule_max_lifetime,
-+ .maxlen = sizeof(unsigned int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec,
-+ },
-+#endif
- #ifdef CONFIG_SCHED_DEBUG
- {
- .procname = "sched_min_granularity_ns",
diff --git a/config b/config
index 3acca4d1d966..bd9337d50798 100644
--- a/config
+++ b/config
@@ -1,18 +1,16 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 5.12.17-hardened1 Kernel Configuration
+# Linux/x86 5.10.23-hardened1 Kernel Configuration
#
-CONFIG_CC_VERSION_TEXT="gcc (GCC) 11.1.0"
+CONFIG_CC_VERSION_TEXT="gcc (GCC) 10.2.0"
CONFIG_CC_IS_GCC=y
-CONFIG_GCC_VERSION=110100
+CONFIG_GCC_VERSION=100200
+CONFIG_LD_VERSION=236010000
CONFIG_CLANG_VERSION=0
-CONFIG_LD_IS_BFD=y
-CONFIG_LD_VERSION=23601
CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y
CONFIG_CC_HAS_ASM_GOTO=y
-CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
CONFIG_CC_HAS_ASM_INLINE=y
CONFIG_IRQ_WORK=y
CONFIG_BUILDTIME_TABLE_SORT=y
@@ -105,7 +103,6 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_PREEMPT_COUNT=y
CONFIG_PREEMPTION=y
-CONFIG_PREEMPT_DYNAMIC=y
#
# CPU/Task time and stats accounting
@@ -318,6 +315,7 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
CONFIG_ZONE_DMA32=y
CONFIG_AUDIT_ARCH=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
CONFIG_HAVE_INTEL_TXT=y
CONFIG_X86_64_SMP=y
CONFIG_ARCH_SUPPORTS_UPROBES=y
@@ -352,12 +350,11 @@ CONFIG_PARAVIRT_SPINLOCKS=y
CONFIG_X86_HV_CALLBACK_VECTOR=y
CONFIG_XEN=y
CONFIG_XEN_PV=y
-CONFIG_XEN_512GB=y
CONFIG_XEN_PV_SMP=y
CONFIG_XEN_DOM0=y
CONFIG_XEN_PVHVM=y
CONFIG_XEN_PVHVM_SMP=y
-CONFIG_XEN_PVHVM_GUEST=y
+CONFIG_XEN_512GB=y
CONFIG_XEN_SAVE_RESTORE=y
# CONFIG_XEN_DEBUG_FS is not set
CONFIG_XEN_PVH=y
@@ -408,6 +405,7 @@ CONFIG_X86_MCE_INTEL=y
CONFIG_X86_MCE_AMD=y
CONFIG_X86_MCE_THRESHOLD=y
CONFIG_X86_MCE_INJECT=m
+CONFIG_X86_THERMAL_VECTOR=y
#
# Performance monitoring
@@ -460,7 +458,6 @@ CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y
# CONFIG_X86_INTEL_TSX_MODE_OFF is not set
# CONFIG_X86_INTEL_TSX_MODE_ON is not set
CONFIG_X86_INTEL_TSX_MODE_AUTO=y
-CONFIG_X86_SGX=y
CONFIG_EFI=y
CONFIG_EFI_STUB=y
CONFIG_EFI_MIXED=y
@@ -468,7 +465,6 @@ CONFIG_EFI_MIXED=y
# CONFIG_HZ_250 is not set
CONFIG_HZ_300=y
# CONFIG_HZ_1000 is not set
-# CONFIG_HZ_2000 is not set
CONFIG_HZ=300
CONFIG_SCHED_HRTICK=y
# CONFIG_KEXEC is not set
@@ -535,7 +531,6 @@ CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y
CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y
# CONFIG_ACPI_DEBUGGER is not set
CONFIG_ACPI_SPCR_TABLE=y
-CONFIG_ACPI_FPDT=y
CONFIG_ACPI_LPIT=y
CONFIG_ACPI_SLEEP=y
CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y
@@ -556,7 +551,6 @@ CONFIG_ACPI_IPMI=m
CONFIG_ACPI_HOTPLUG_CPU=y
CONFIG_ACPI_PROCESSOR_AGGREGATOR=m
CONFIG_ACPI_THERMAL=y
-CONFIG_ACPI_PLATFORM_PROFILE=m
CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y
CONFIG_ACPI_TABLE_UPGRADE=y
# CONFIG_ACPI_DEBUG is not set
@@ -596,6 +590,7 @@ CONFIG_CHT_WC_PMIC_OPREGION=y
CONFIG_CHT_DC_TI_PMIC_OPREGION=y
CONFIG_TPS68470_PMIC_OPREGION=y
CONFIG_X86_PM_TIMER=y
+# CONFIG_SFI is not set
#
# CPU Frequency scaling
@@ -698,7 +693,7 @@ CONFIG_GOOGLE_VPD=m
# CONFIG_EFI_VARS is not set
CONFIG_EFI_ESRT=y
CONFIG_EFI_VARS_PSTORE=y
-CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y
+# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set
# CONFIG_EFI_FAKE_MEMMAP is not set
CONFIG_EFI_SOFT_RESERVE=y
CONFIG_EFI_RUNTIME_WRAPPERS=y
@@ -745,7 +740,6 @@ CONFIG_KVM=m
CONFIG_KVM_INTEL=m
CONFIG_KVM_AMD=m
CONFIG_KVM_AMD_SEV=y
-CONFIG_KVM_XEN=y
CONFIG_KVM_MMU_AUDIT=y
CONFIG_AS_AVX512=y
CONFIG_AS_SHA1_NI=y
@@ -757,6 +751,10 @@ CONFIG_AS_TPAUSE=y
#
CONFIG_HOTPLUG_SMT=y
CONFIG_GENERIC_ENTRY=y
+CONFIG_OPROFILE=m
+# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_OPROFILE_NMI_TIMER=y
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
# CONFIG_STATIC_KEYS_SELFTEST is not set
@@ -808,20 +806,14 @@ CONFIG_HAVE_ARCH_SECCOMP=y
CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
CONFIG_SECCOMP=y
CONFIG_SECCOMP_FILTER=y
-# CONFIG_SECCOMP_CACHE_DEBUG is not set
CONFIG_HAVE_ARCH_STACKLEAK=y
CONFIG_HAVE_STACKPROTECTOR=y
CONFIG_STACKPROTECTOR=y
CONFIG_STACKPROTECTOR_STRONG=y
-CONFIG_ARCH_SUPPORTS_LTO_CLANG=y
-CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y
-CONFIG_LTO_NONE=y
CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y
CONFIG_HAVE_CONTEXT_TRACKING=y
-CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK=y
CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
-CONFIG_HAVE_MOVE_PUD=y
CONFIG_HAVE_MOVE_PMD=y
CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y
@@ -830,8 +822,6 @@ CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
CONFIG_HAVE_ARCH_SOFT_DIRTY=y
CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
CONFIG_MODULES_USE_ELF_RELA=y
-CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y
-CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y
CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
CONFIG_HAVE_EXIT_THREAD=y
@@ -857,10 +847,7 @@ CONFIG_LOCK_EVENT_COUNTS=y
CONFIG_ARCH_HAS_MEM_ENCRYPT=y
CONFIG_HAVE_STATIC_CALL=y
CONFIG_HAVE_STATIC_CALL_INLINE=y
-CONFIG_HAVE_PREEMPT_DYNAMIC=y
CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y
-CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
-CONFIG_ARCH_HAS_ELFCORE_COMPAT=y
#
# GCOV-based kernel profiling
@@ -897,6 +884,7 @@ CONFIG_MODULE_COMPRESS=y
# CONFIG_MODULE_COMPRESS_GZIP is not set
CONFIG_MODULE_COMPRESS_XZ=y
# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set
+CONFIG_UNUSED_SYMBOLS=y
CONFIG_MODULES_TREE_LOOKUP=y
CONFIG_BLOCK=y
CONFIG_BLK_RQ_ALLOC_TIME=y
@@ -1056,17 +1044,18 @@ CONFIG_ZSMALLOC=y
# CONFIG_ZSMALLOC_STAT is not set
CONFIG_GENERIC_EARLY_IOREMAP=y
# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set
-CONFIG_IDLE_PAGE_TRACKING=y
+# CONFIG_IDLE_PAGE_TRACKING is not set
CONFIG_ARCH_HAS_PTE_DEVMAP=y
CONFIG_ZONE_DEVICE=y
CONFIG_DEV_PAGEMAP_OPS=y
CONFIG_HMM_MIRROR=y
CONFIG_DEVICE_PRIVATE=y
CONFIG_VMAP_PFN=y
+CONFIG_FRAME_VECTOR=y
CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y
CONFIG_ARCH_HAS_PKEYS=y
# CONFIG_PERCPU_STATS is not set
-# CONFIG_GUP_TEST is not set
+# CONFIG_GUP_BENCHMARK is not set
CONFIG_READ_ONLY_THP_FOR_FS=y
CONFIG_ARCH_HAS_PTE_SPECIAL=y
CONFIG_MAPPING_DIRTY_HELPERS=y
@@ -1291,7 +1280,6 @@ CONFIG_NF_DUP_NETDEV=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
-CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NETFILTER_XTABLES=m
@@ -1434,7 +1422,6 @@ CONFIG_IP_VS_SH=m
CONFIG_IP_VS_MH=m
CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
-CONFIG_IP_VS_TWOS=m
#
# IPVS SH scheduler
@@ -1589,7 +1576,6 @@ CONFIG_BRIDGE=m
CONFIG_BRIDGE_IGMP_SNOOPING=y
CONFIG_BRIDGE_VLAN_FILTERING=y
CONFIG_BRIDGE_MRP=y
-CONFIG_BRIDGE_CFM=y
CONFIG_HAVE_NET_DSA=y
CONFIG_NET_DSA=m
CONFIG_NET_DSA_TAG_8021Q=m
@@ -1597,21 +1583,17 @@ CONFIG_NET_DSA_TAG_AR9331=m
CONFIG_NET_DSA_TAG_BRCM_COMMON=m
CONFIG_NET_DSA_TAG_BRCM=m
CONFIG_NET_DSA_TAG_BRCM_PREPEND=m
-CONFIG_NET_DSA_TAG_HELLCREEK=m
CONFIG_NET_DSA_TAG_GSWIP=m
-CONFIG_NET_DSA_TAG_DSA_COMMON=m
CONFIG_NET_DSA_TAG_DSA=m
CONFIG_NET_DSA_TAG_EDSA=m
CONFIG_NET_DSA_TAG_MTK=m
CONFIG_NET_DSA_TAG_KSZ=m
CONFIG_NET_DSA_TAG_RTL4_A=m
CONFIG_NET_DSA_TAG_OCELOT=m
-CONFIG_NET_DSA_TAG_OCELOT_8021Q=m
CONFIG_NET_DSA_TAG_QCA=m
CONFIG_NET_DSA_TAG_LAN9303=m
CONFIG_NET_DSA_TAG_SJA1105=m
CONFIG_NET_DSA_TAG_TRAILER=m
-CONFIG_NET_DSA_TAG_XRS700X=m
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_VLAN_8021Q_MVRP=y
@@ -1752,7 +1734,9 @@ CONFIG_BATMAN_ADV_BLA=y
CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
+# CONFIG_BATMAN_ADV_DEBUGFS is not set
# CONFIG_BATMAN_ADV_DEBUG is not set
+CONFIG_BATMAN_ADV_SYSFS=y
# CONFIG_BATMAN_ADV_TRACING is not set
CONFIG_OPENVSWITCH=m
CONFIG_OPENVSWITCH_GRE=m
@@ -1782,7 +1766,6 @@ CONFIG_NET_NCSI=y
CONFIG_NCSI_OEM_CMD_GET_MAC=y
CONFIG_RPS=y
CONFIG_RFS_ACCEL=y
-CONFIG_SOCK_RX_QUEUE_MAPPING=y
CONFIG_XPS=y
CONFIG_CGROUP_NET_PRIO=y
CONFIG_CGROUP_NET_CLASSID=y
@@ -1847,7 +1830,6 @@ CONFIG_CAN_CC770=m
CONFIG_CAN_CC770_PLATFORM=m
CONFIG_CAN_IFI_CANFD=m
CONFIG_CAN_M_CAN=m
-CONFIG_CAN_M_CAN_PCI=m
CONFIG_CAN_M_CAN_PLATFORM=m
CONFIG_CAN_M_CAN_TCAN4X5X=m
CONFIG_CAN_PEAK_PCIEFD=m
@@ -1899,7 +1881,7 @@ CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_CMTP=m
CONFIG_BT_HIDP=m
-# CONFIG_BT_HS is not set
+CONFIG_BT_HS=y
CONFIG_BT_LE=y
CONFIG_BT_6LOWPAN=m
CONFIG_BT_LEDS=y
@@ -1989,6 +1971,8 @@ CONFIG_MAC80211_LEDS=y
# CONFIG_MAC80211_MESSAGE_TRACING is not set
# CONFIG_MAC80211_DEBUG_MENU is not set
CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
+CONFIG_WIMAX=m
+CONFIG_WIMAX_DEBUG_LEVEL=8
CONFIG_RFKILL=m
CONFIG_RFKILL_LEDS=y
CONFIG_RFKILL_INPUT=y
@@ -2017,7 +2001,6 @@ CONFIG_NFC_TRF7970A=m
CONFIG_NFC_MEI_PHY=m
CONFIG_NFC_SIM=m
CONFIG_NFC_PORT100=m
-CONFIG_NFC_VIRTUAL_NCI=m
CONFIG_NFC_FDP=m
CONFIG_NFC_FDP_I2C=m
CONFIG_NFC_PN544=m
@@ -2044,7 +2027,6 @@ CONFIG_NFC_NXP_NCI=m
CONFIG_NFC_NXP_NCI_I2C=m
CONFIG_NFC_S3FWRN5=m
CONFIG_NFC_S3FWRN5_I2C=m
-CONFIG_NFC_S3FWRN82_UART=m
CONFIG_NFC_ST95HF=m
# end of Near Field Communication (NFC) devices
@@ -2083,6 +2065,7 @@ CONFIG_PCIEASPM_DEFAULT=y
CONFIG_PCIE_PME=y
CONFIG_PCIE_DPC=y
CONFIG_PCIE_PTM=y
+# CONFIG_PCIE_BW is not set
CONFIG_PCIE_EDR=y
CONFIG_PCI_MSI=y
CONFIG_PCI_MSI_IRQ_DOMAIN=y
@@ -2147,9 +2130,6 @@ CONFIG_PCI_MESON=y
CONFIG_PCI_SW_SWITCHTEC=m
# end of PCI switch controller drivers
-CONFIG_CXL_BUS=m
-CONFIG_CXL_MEM=m
-# CONFIG_CXL_MEM_RAW_COMMANDS is not set
CONFIG_PCCARD=m
CONFIG_PCMCIA=m
CONFIG_PCMCIA_LOAD_CIS=y
@@ -2172,7 +2152,6 @@ CONFIG_PCCARD_NONSTATIC=y
#
# Generic Driver Options
#
-CONFIG_AUXILIARY_BUS=y
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -2220,7 +2199,6 @@ CONFIG_DMA_SHARED_BUFFER=y
#
CONFIG_MHI_BUS=m
# CONFIG_MHI_BUS_DEBUG is not set
-CONFIG_MHI_BUS_PCI_GENERIC=m
# end of Bus devices
CONFIG_CONNECTOR=y
@@ -2254,6 +2232,7 @@ CONFIG_MTD_BLOCK=m
# CONFIG_SSFDC is not set
# CONFIG_SM_FTL is not set
# CONFIG_MTD_OOPS is not set
+CONFIG_MTD_PSTORE=m
# CONFIG_MTD_SWAP is not set
# CONFIG_MTD_PARTITIONED_MASTER is not set
@@ -2267,8 +2246,8 @@ CONFIG_MTD_MAP_BANK_WIDTH_2=y
CONFIG_MTD_MAP_BANK_WIDTH_4=y
CONFIG_MTD_CFI_I1=y
CONFIG_MTD_CFI_I2=y
-CONFIG_MTD_RAM=m
-CONFIG_MTD_ROM=m
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
# CONFIG_MTD_ABSENT is not set
# end of RAM/ROM/Flash chip drivers
@@ -2276,9 +2255,8 @@ CONFIG_MTD_ROM=m
# Mapping drivers for chip access
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
-# CONFIG_MTD_PHYSMAP is not set
# CONFIG_MTD_INTEL_VR_NOR is not set
-CONFIG_MTD_PLATRAM=m
+# CONFIG_MTD_PLATRAM is not set
# end of Mapping drivers for chip access
#
@@ -2304,7 +2282,10 @@ CONFIG_MTD_BLOCK2MTD=m
#
CONFIG_MTD_NAND_CORE=m
# CONFIG_MTD_ONENAND is not set
+CONFIG_MTD_NAND_ECC_SW_HAMMING=m
+CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
CONFIG_MTD_RAW_NAND=m
+# CONFIG_MTD_NAND_ECC_SW_BCH is not set
#
# Raw/parallel NAND flash controllers
@@ -2328,9 +2309,6 @@ CONFIG_MTD_NAND_NANDSIM=m
# ECC engine support
#
CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_NAND_ECC_SW_HAMMING=y
-CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
-CONFIG_MTD_NAND_ECC_SW_BCH=y
# end of ECC engine support
# end of NAND
@@ -2340,14 +2318,7 @@ CONFIG_MTD_NAND_ECC_SW_BCH=y
# CONFIG_MTD_LPDDR is not set
# end of LPDDR & LPDDR2 PCM memory drivers
-CONFIG_MTD_SPI_NOR=m
-CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
-# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set
-CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y
-# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set
-CONFIG_SPI_INTEL_SPI=m
-CONFIG_SPI_INTEL_SPI_PCI=m
-CONFIG_SPI_INTEL_SPI_PLATFORM=m
+# CONFIG_MTD_SPI_NOR is not set
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_WL_THRESHOLD=4096
CONFIG_MTD_UBI_BEB_LIMIT=20
@@ -2380,13 +2351,6 @@ CONFIG_CDROM=m
# CONFIG_PARIDE is not set
CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m
CONFIG_ZRAM=m
-CONFIG_ZRAM_DEF_COMP_LZORLE=y
-# CONFIG_ZRAM_DEF_COMP_ZSTD is not set
-# CONFIG_ZRAM_DEF_COMP_LZ4 is not set
-# CONFIG_ZRAM_DEF_COMP_LZO is not set
-# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set
-# CONFIG_ZRAM_DEF_COMP_842 is not set
-CONFIG_ZRAM_DEF_COMP="lzo-rle"
CONFIG_ZRAM_WRITEBACK=y
# CONFIG_ZRAM_MEMORY_TRACKING is not set
CONFIG_BLK_DEV_UMEM=m
@@ -2396,6 +2360,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
# CONFIG_DRBD_FAULT_INJECTION is not set
CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_SKD=m
CONFIG_BLK_DEV_SX8=m
CONFIG_BLK_DEV_RAM=m
CONFIG_BLK_DEV_RAM_COUNT=16
@@ -2499,8 +2464,6 @@ CONFIG_VMWARE_VMCI=m
CONFIG_GENWQE=m
CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0
CONFIG_ECHO=m
-CONFIG_BCM_VK=m
-CONFIG_BCM_VK_TTY=y
CONFIG_MISC_ALCOR_PCI=m
CONFIG_MISC_RTSX_PCI=m
CONFIG_MISC_RTSX_USB=m
@@ -2618,6 +2581,7 @@ CONFIG_SCSI_SNIC=m
CONFIG_SCSI_DMX3191D=m
CONFIG_SCSI_FDOMAIN=m
CONFIG_SCSI_FDOMAIN_PCI=m
+CONFIG_SCSI_GDTH=m
CONFIG_SCSI_ISCI=m
CONFIG_SCSI_IPS=m
CONFIG_SCSI_INITIO=m
@@ -2803,14 +2767,12 @@ CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m
CONFIG_DM_MULTIPATH_ST=m
CONFIG_DM_MULTIPATH_HST=m
-CONFIG_DM_MULTIPATH_IOA=m
CONFIG_DM_DELAY=m
CONFIG_DM_DUST=m
CONFIG_DM_UEVENT=y
CONFIG_DM_FLAKEY=m
CONFIG_DM_VERITY=m
CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
-# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG_SECONDARY_KEYRING is not set
CONFIG_DM_VERITY_FEC=y
CONFIG_DM_SWITCH=m
CONFIG_DM_LOG_WRITES=m
@@ -2886,7 +2848,6 @@ CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
CONFIG_NET_VRF=m
CONFIG_VSOCKMON=m
-CONFIG_MHI_NET=m
CONFIG_SUNGEM_PHY=m
# CONFIG_ARCNET is not set
CONFIG_ATM_DRIVERS=y
@@ -2931,7 +2892,6 @@ CONFIG_B53_SRAB_DRIVER=m
CONFIG_B53_SERDES=m
CONFIG_NET_DSA_BCM_SF2=m
CONFIG_NET_DSA_LOOP=m
-CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK=m
# CONFIG_NET_DSA_LANTIQ_GSWIP is not set
CONFIG_NET_DSA_MT7530=m
CONFIG_NET_DSA_MV88E6060=m
@@ -2942,6 +2902,7 @@ CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI=m
CONFIG_NET_DSA_MICROCHIP_KSZ8795=m
CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI=m
CONFIG_NET_DSA_MV88E6XXX=m
+CONFIG_NET_DSA_MV88E6XXX_GLOBAL2=y
CONFIG_NET_DSA_MV88E6XXX_PTP=y
CONFIG_NET_DSA_MSCC_SEVILLE=m
CONFIG_NET_DSA_AR9331=m
@@ -2949,9 +2910,6 @@ CONFIG_NET_DSA_SJA1105=m
CONFIG_NET_DSA_SJA1105_PTP=y
CONFIG_NET_DSA_SJA1105_TAS=y
CONFIG_NET_DSA_SJA1105_VL=y
-CONFIG_NET_DSA_XRS700X=m
-CONFIG_NET_DSA_XRS700X_I2C=m
-CONFIG_NET_DSA_XRS700X_MDIO=m
CONFIG_NET_DSA_QCA8K=m
CONFIG_NET_DSA_REALTEK_SMI=m
CONFIG_NET_DSA_SMSC_LAN9303=m
@@ -2997,6 +2955,8 @@ CONFIG_ATL1=m
CONFIG_ATL1E=m
CONFIG_ATL1C=m
CONFIG_ALX=m
+CONFIG_NET_VENDOR_AURORA=y
+CONFIG_AURORA_NB8800=m
CONFIG_NET_VENDOR_BROADCOM=y
CONFIG_B44=m
CONFIG_B44_PCI_AUTOSELECT=y
@@ -3138,8 +3098,6 @@ CONFIG_MLX5_FPGA_TLS=y
CONFIG_MLX5_TLS=y
CONFIG_MLX5_EN_TLS=y
CONFIG_MLX5_SW_STEERING=y
-CONFIG_MLX5_SF=y
-CONFIG_MLX5_SF_MANAGER=y
CONFIG_MLXSW_CORE=m
CONFIG_MLXSW_CORE_HWMON=y
CONFIG_MLXSW_CORE_THERMAL=y
@@ -3280,7 +3238,6 @@ CONFIG_WIZNET_W5300=m
CONFIG_WIZNET_BUS_ANY=y
CONFIG_WIZNET_W5100_SPI=m
CONFIG_NET_VENDOR_XILINX=y
-CONFIG_XILINX_EMACLITE=m
CONFIG_XILINX_AXI_EMAC=m
CONFIG_XILINX_LL_TEMAC=m
CONFIG_NET_VENDOR_XIRCOM=y
@@ -3428,7 +3385,6 @@ CONFIG_USB_SIERRA_NET=m
CONFIG_USB_VL600=m
CONFIG_USB_NET_CH9200=m
CONFIG_USB_NET_AQC111=m
-CONFIG_USB_RTL8153_ECM=m
CONFIG_WLAN=y
CONFIG_WLAN_VENDOR_ADMTEK=y
CONFIG_ADM8211=m
@@ -3616,7 +3572,6 @@ CONFIG_MT76_USB=m
CONFIG_MT76_SDIO=m
CONFIG_MT76x02_LIB=m
CONFIG_MT76x02_USB=m
-CONFIG_MT76_CONNAC_LIB=m
CONFIG_MT76x0_COMMON=m
CONFIG_MT76x0U=m
CONFIG_MT76x0E=m
@@ -3630,7 +3585,6 @@ CONFIG_MT7663_USB_SDIO_COMMON=m
CONFIG_MT7663U=m
CONFIG_MT7663S=m
CONFIG_MT7915E=m
-CONFIG_MT7921E=m
CONFIG_WLAN_VENDOR_MICROCHIP=y
CONFIG_WILC1000=m
CONFIG_WILC1000_SDIO=m
@@ -3732,6 +3686,15 @@ CONFIG_PCMCIA_WL3501=m
CONFIG_MAC80211_HWSIM=m
CONFIG_USB_NET_RNDIS_WLAN=m
CONFIG_VIRT_WIFI=m
+
+#
+# WiMAX Wireless Broadband devices
+#
+CONFIG_WIMAX_I2400M=m
+CONFIG_WIMAX_I2400M_USB=m
+CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
+# end of WiMAX Wireless Broadband devices
+
# CONFIG_WAN is not set
CONFIG_IEEE802154_DRIVERS=m
CONFIG_IEEE802154_FAKELB=m
@@ -3785,6 +3748,7 @@ CONFIG_NVM_PBLK=m
CONFIG_INPUT=y
CONFIG_INPUT_LEDS=m
CONFIG_INPUT_FF_MEMLESS=m
+CONFIG_INPUT_POLLDEV=m
CONFIG_INPUT_SPARSEKMAP=m
CONFIG_INPUT_MATRIXKMAP=m
@@ -3902,6 +3866,7 @@ CONFIG_JOYSTICK_FSIA6B=m
CONFIG_INPUT_TABLET=y
CONFIG_TABLET_USB_ACECAD=m
CONFIG_TABLET_USB_AIPTEK=m
+CONFIG_TABLET_USB_GTCO=m
CONFIG_TABLET_USB_HANWANG=m
CONFIG_TABLET_USB_KBTAB=m
CONFIG_TABLET_USB_PEGASUS=m
@@ -4048,7 +4013,6 @@ CONFIG_INPUT_PCF8574=m
CONFIG_INPUT_PWM_BEEPER=m
CONFIG_INPUT_PWM_VIBRA=m
CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
-CONFIG_INPUT_DA7280_HAPTICS=m
CONFIG_INPUT_DA9052_ONKEY=m
CONFIG_INPUT_DA9055_ONKEY=m
CONFIG_INPUT_DA9063_ONKEY=m
@@ -4167,11 +4131,11 @@ CONFIG_SERIAL_SC16IS7XX_CORE=m
CONFIG_SERIAL_SC16IS7XX=m
CONFIG_SERIAL_SC16IS7XX_I2C=y
CONFIG_SERIAL_SC16IS7XX_SPI=y
-# CONFIG_SERIAL_BCM63XX is not set
CONFIG_SERIAL_ALTERA_JTAGUART=m
CONFIG_SERIAL_ALTERA_UART=m
CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
+CONFIG_SERIAL_IFX6X60=m
CONFIG_SERIAL_ARC=m
CONFIG_SERIAL_ARC_NR_PORTS=1
CONFIG_SERIAL_RP2=m
@@ -4189,6 +4153,8 @@ CONFIG_CYCLADES=m
CONFIG_CYZ_INTR=y
CONFIG_MOXA_INTELLIO=m
CONFIG_MOXA_SMARTIO=m
+CONFIG_SYNCLINK=m
+CONFIG_SYNCLINKMP=m
CONFIG_SYNCLINK_GT=m
CONFIG_ISI=m
CONFIG_N_HDLC=m
@@ -4253,7 +4219,6 @@ CONFIG_TCG_TIS_CORE=m
CONFIG_TCG_TIS=m
CONFIG_TCG_TIS_SPI=m
CONFIG_TCG_TIS_SPI_CR50=y
-CONFIG_TCG_TIS_I2C_CR50=m
CONFIG_TCG_TIS_I2C_ATMEL=m
CONFIG_TCG_TIS_I2C_INFINEON=m
CONFIG_TCG_TIS_I2C_NUVOTON=m
@@ -4455,7 +4420,6 @@ CONFIG_PTP_1588_CLOCK_KVM=m
CONFIG_PTP_1588_CLOCK_IDT82P33=m
CONFIG_PTP_1588_CLOCK_IDTCM=m
CONFIG_PTP_1588_CLOCK_VMW=m
-CONFIG_PTP_1588_CLOCK_OCP=m
# end of PTP clock support
CONFIG_PINCTRL=y
@@ -4473,17 +4437,14 @@ CONFIG_PINCTRL_BAYTRAIL=y
CONFIG_PINCTRL_CHERRYVIEW=y
CONFIG_PINCTRL_LYNXPOINT=y
CONFIG_PINCTRL_INTEL=y
-CONFIG_PINCTRL_ALDERLAKE=y
CONFIG_PINCTRL_BROXTON=y
CONFIG_PINCTRL_CANNONLAKE=y
CONFIG_PINCTRL_CEDARFORK=y
CONFIG_PINCTRL_DENVERTON=y
-CONFIG_PINCTRL_ELKHARTLAKE=y
CONFIG_PINCTRL_EMMITSBURG=y
CONFIG_PINCTRL_GEMINILAKE=y
CONFIG_PINCTRL_ICELAKE=y
CONFIG_PINCTRL_JASPERLAKE=y
-CONFIG_PINCTRL_LAKEFIELD=y
CONFIG_PINCTRL_LEWISBURG=y
CONFIG_PINCTRL_SUNRISEPOINT=y
CONFIG_PINCTRL_TIGERLAKE=y
@@ -4504,6 +4465,7 @@ CONFIG_GPIOLIB_FASTPATH_LIMIT=512
CONFIG_GPIO_ACPI=y
CONFIG_GPIOLIB_IRQCHIP=y
# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_CDEV=y
CONFIG_GPIO_CDEV_V1=y
CONFIG_GPIO_GENERIC=m
@@ -4521,6 +4483,7 @@ CONFIG_GPIO_MB86S7X=m
CONFIG_GPIO_MENZ127=m
CONFIG_GPIO_SIOX=m
CONFIG_GPIO_VX855=m
+CONFIG_GPIO_XILINX=m
CONFIG_GPIO_AMD_FCH=m
# end of Memory mapped GPIO drivers
@@ -4563,6 +4526,7 @@ CONFIG_GPIO_KEMPLD=m
CONFIG_GPIO_LP3943=m
CONFIG_GPIO_LP873X=m
CONFIG_GPIO_MADERA=m
+CONFIG_GPIO_MSIC=y
CONFIG_GPIO_PALMAS=y
CONFIG_GPIO_RC5T583=y
CONFIG_GPIO_TPS65086=m
@@ -4606,13 +4570,8 @@ CONFIG_GPIO_XRA1403=m
CONFIG_GPIO_VIPERBOARD=m
# end of USB GPIO expanders
-#
-# Virtual GPIO drivers
-#
CONFIG_GPIO_AGGREGATOR=m
CONFIG_GPIO_MOCKUP=m
-# end of Virtual GPIO drivers
-
CONFIG_W1=m
CONFIG_W1_CON=y
@@ -4702,7 +4661,6 @@ CONFIG_CHARGER_LP8788=m
CONFIG_CHARGER_GPIO=m
CONFIG_CHARGER_MANAGER=y
CONFIG_CHARGER_LT3651=m
-CONFIG_CHARGER_LTC4162L=m
CONFIG_CHARGER_MAX14577=m
CONFIG_CHARGER_MAX77693=m
CONFIG_CHARGER_MAX8997=m
@@ -4715,7 +4673,6 @@ CONFIG_CHARGER_BQ24735=m
CONFIG_CHARGER_BQ2515X=m
CONFIG_CHARGER_BQ25890=m
CONFIG_CHARGER_BQ25980=m
-CONFIG_CHARGER_BQ256XX=m
CONFIG_CHARGER_SMB347=m
CONFIG_CHARGER_TPS65090=m
CONFIG_BATTERY_GAUGE_LTC2941=m
@@ -4750,7 +4707,6 @@ CONFIG_SENSORS_ADT7411=m
CONFIG_SENSORS_ADT7462=m
CONFIG_SENSORS_ADT7470=m
CONFIG_SENSORS_ADT7475=m
-CONFIG_SENSORS_AHT10=m
CONFIG_SENSORS_AS370=m
CONFIG_SENSORS_ASC7621=m
CONFIG_SENSORS_AXI_FAN_CONTROL=m
@@ -4763,7 +4719,6 @@ CONFIG_SENSORS_ASB100=m
CONFIG_SENSORS_ASPEED=m
CONFIG_SENSORS_ATXP1=m
CONFIG_SENSORS_CORSAIR_CPRO=m
-CONFIG_SENSORS_CORSAIR_PSU=m
CONFIG_SENSORS_DRIVETEMP=m
CONFIG_SENSORS_DS620=m
CONFIG_SENSORS_DS1621=m
@@ -4796,7 +4751,6 @@ CONFIG_SENSORS_LTC2947=m
CONFIG_SENSORS_LTC2947_I2C=m
CONFIG_SENSORS_LTC2947_SPI=m
CONFIG_SENSORS_LTC2990=m
-CONFIG_SENSORS_LTC2992=m
CONFIG_SENSORS_LTC4151=m
CONFIG_SENSORS_LTC4215=m
CONFIG_SENSORS_LTC4222=m
@@ -4804,7 +4758,6 @@ CONFIG_SENSORS_LTC4245=m
CONFIG_SENSORS_LTC4260=m
CONFIG_SENSORS_LTC4261=m
CONFIG_SENSORS_MAX1111=m
-CONFIG_SENSORS_MAX127=m
CONFIG_SENSORS_MAX16065=m
CONFIG_SENSORS_MAX1619=m
CONFIG_SENSORS_MAX1668=m
@@ -4820,7 +4773,6 @@ CONFIG_SENSORS_MAX31790=m
CONFIG_SENSORS_MCP3021=m
CONFIG_SENSORS_MLXREG_FAN=m
CONFIG_SENSORS_TC654=m
-CONFIG_SENSORS_TPS23861=m
CONFIG_SENSORS_MENF21BMC_HWMON=m
CONFIG_SENSORS_MR75203=m
CONFIG_SENSORS_ADCXX=m
@@ -4872,16 +4824,13 @@ CONFIG_SENSORS_MAX31785=m
CONFIG_SENSORS_MAX34440=m
CONFIG_SENSORS_MAX8688=m
CONFIG_SENSORS_MP2975=m
-CONFIG_SENSORS_PM6764TR=m
CONFIG_SENSORS_PXE1610=m
-CONFIG_SENSORS_Q54SJ108A2=m
CONFIG_SENSORS_TPS40422=m
CONFIG_SENSORS_TPS53679=m
CONFIG_SENSORS_UCD9000=m
CONFIG_SENSORS_UCD9200=m
CONFIG_SENSORS_XDPE122=m
CONFIG_SENSORS_ZL6100=m
-CONFIG_SENSORS_SBTSI=m
CONFIG_SENSORS_SHT15=m
CONFIG_SENSORS_SHT21=m
CONFIG_SENSORS_SHT3x=m
@@ -4961,7 +4910,6 @@ CONFIG_DEVFREQ_THERMAL=y
# Intel thermal drivers
#
CONFIG_INTEL_POWERCLAMP=m
-CONFIG_X86_THERMAL_VECTOR=y
CONFIG_X86_PKG_TEMP_THERMAL=m
CONFIG_INTEL_SOC_DTS_IOSF_CORE=m
CONFIG_INTEL_SOC_DTS_THERMAL=m
@@ -4972,7 +4920,7 @@ CONFIG_INTEL_SOC_DTS_THERMAL=m
CONFIG_INT340X_THERMAL=m
CONFIG_ACPI_THERMAL_REL=m
CONFIG_INT3406_THERMAL=m
-CONFIG_PROC_THERMAL_MMIO_RAPL=m
+CONFIG_PROC_THERMAL_MMIO_RAPL=y
# end of ACPI INT340X thermal drivers
CONFIG_INTEL_BXT_PMIC_THERMAL=m
@@ -5142,8 +5090,8 @@ CONFIG_INTEL_SOC_PMIC_MRFLD=m
CONFIG_MFD_INTEL_LPSS=m
CONFIG_MFD_INTEL_LPSS_ACPI=m
CONFIG_MFD_INTEL_LPSS_PCI=m
+CONFIG_MFD_INTEL_MSIC=y
CONFIG_MFD_INTEL_PMC_BXT=m
-CONFIG_MFD_INTEL_PMT=m
CONFIG_MFD_IQS62X=m
CONFIG_MFD_JANZ_CMODIO=m
CONFIG_MFD_KEMPLD=m
@@ -5235,6 +5183,7 @@ CONFIG_REGULATOR_88PM8607=m
CONFIG_REGULATOR_ACT8865=m
CONFIG_REGULATOR_AD5398=m
CONFIG_REGULATOR_AAT2870=m
+CONFIG_REGULATOR_AB3100=m
CONFIG_REGULATOR_ARIZONA_LDO1=m
CONFIG_REGULATOR_ARIZONA_MICSUPP=m
CONFIG_REGULATOR_AS3711=m
@@ -5283,6 +5232,7 @@ CONFIG_REGULATOR_PALMAS=m
CONFIG_REGULATOR_PCA9450=m
CONFIG_REGULATOR_PCAP=m
CONFIG_REGULATOR_PCF50633=m
+CONFIG_REGULATOR_PFUZE100=m
CONFIG_REGULATOR_PV88060=m
CONFIG_REGULATOR_PV88080=m
CONFIG_REGULATOR_PV88090=m
@@ -5656,7 +5606,6 @@ CONFIG_DVB_DDBRIDGE=m
CONFIG_DVB_SMIPCIE=m
CONFIG_DVB_NETUP_UNIDVB=m
CONFIG_VIDEO_IPU3_CIO2=m
-CONFIG_CIO2_BRIDGE=y
CONFIG_RADIO_ADAPTERS=y
CONFIG_RADIO_TEA575X=m
CONFIG_RADIO_SI470X=m
@@ -5774,7 +5723,7 @@ CONFIG_VIDEO_M52790=m
# Camera sensor devices
#
CONFIG_VIDEO_APTINA_PLL=m
-CONFIG_VIDEO_CCS_PLL=m
+CONFIG_VIDEO_SMIAPP_PLL=m
CONFIG_VIDEO_HI556=m
CONFIG_VIDEO_IMX214=m
CONFIG_VIDEO_IMX219=m
@@ -5783,14 +5732,12 @@ CONFIG_VIDEO_IMX274=m
CONFIG_VIDEO_IMX290=m
CONFIG_VIDEO_IMX319=m
CONFIG_VIDEO_IMX355=m
-CONFIG_VIDEO_OV02A10=m
CONFIG_VIDEO_OV2640=m
CONFIG_VIDEO_OV2659=m
CONFIG_VIDEO_OV2680=m
CONFIG_VIDEO_OV2685=m
CONFIG_VIDEO_OV2740=m
CONFIG_VIDEO_OV5647=m
-CONFIG_VIDEO_OV5648=m
CONFIG_VIDEO_OV6650=m
CONFIG_VIDEO_OV5670=m
CONFIG_VIDEO_OV5675=m
@@ -5801,10 +5748,8 @@ CONFIG_VIDEO_OV7640=m
CONFIG_VIDEO_OV7670=m
CONFIG_VIDEO_OV7740=m
CONFIG_VIDEO_OV8856=m
-CONFIG_VIDEO_OV8865=m
CONFIG_VIDEO_OV9640=m
CONFIG_VIDEO_OV9650=m
-CONFIG_VIDEO_OV9734=m
CONFIG_VIDEO_OV13858=m
CONFIG_VIDEO_VS6624=m
CONFIG_VIDEO_MT9M001=m
@@ -5819,15 +5764,13 @@ CONFIG_VIDEO_MT9V111=m
CONFIG_VIDEO_SR030PC30=m
CONFIG_VIDEO_NOON010PC30=m
CONFIG_VIDEO_M5MOLS=m
-CONFIG_VIDEO_MAX9271_LIB=m
CONFIG_VIDEO_RDACM20=m
-CONFIG_VIDEO_RDACM21=m
CONFIG_VIDEO_RJ54N1=m
CONFIG_VIDEO_S5K6AA=m
CONFIG_VIDEO_S5K6A3=m
CONFIG_VIDEO_S5K4ECGX=m
CONFIG_VIDEO_S5K5BAF=m
-CONFIG_VIDEO_CCS=m
+CONFIG_VIDEO_SMIAPP=m
CONFIG_VIDEO_ET8EK8=m
CONFIG_VIDEO_S5C73M3=m
# end of Camera sensor devices
@@ -6012,7 +5955,6 @@ CONFIG_DVB_AU8522=m
CONFIG_DVB_AU8522_DTV=m
CONFIG_DVB_AU8522_V4L=m
CONFIG_DVB_S5H1411=m
-CONFIG_DVB_MXL692=m
#
# ISDB-T (terrestrial) frontends
@@ -6085,6 +6027,7 @@ CONFIG_DRM_FBDEV_OVERALLOC=100
CONFIG_DRM_LOAD_EDID_FIRMWARE=y
CONFIG_DRM_DP_CEC=y
CONFIG_DRM_TTM=m
+CONFIG_DRM_TTM_DMA_PAGE_POOL=y
CONFIG_DRM_VRAM_HELPER=m
CONFIG_DRM_TTM_HELPER=m
CONFIG_DRM_GEM_CMA_HELPER=y
@@ -6125,6 +6068,7 @@ CONFIG_DRM_AMD_ACP=y
#
CONFIG_DRM_AMD_DC=y
CONFIG_DRM_AMD_DC_DCN=y
+CONFIG_DRM_AMD_DC_DCN3_0=y
CONFIG_DRM_AMD_DC_HDCP=y
CONFIG_DRM_AMD_DC_SI=y
# end of Display Engine Configuration
@@ -6158,6 +6102,7 @@ CONFIG_DRM_VMWGFX=m
CONFIG_DRM_VMWGFX_FBCON=y
CONFIG_DRM_GMA500=m
CONFIG_DRM_GMA600=y
+CONFIG_DRM_GMA3600=y
CONFIG_DRM_UDL=m
CONFIG_DRM_AST=m
CONFIG_DRM_MGAG200=m
@@ -6216,7 +6161,7 @@ CONFIG_FB_SYS_IMAGEBLIT=m
# CONFIG_FB_FOREIGN_ENDIAN is not set
CONFIG_FB_SYS_FOPS=m
CONFIG_FB_DEFERRED_IO=y
-CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_MODE_HELPERS is not set
CONFIG_FB_TILEBLITTING=y
#
@@ -6229,7 +6174,7 @@ CONFIG_FB_TILEBLITTING=y
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
# CONFIG_FB_VGA16 is not set
-CONFIG_FB_UVESA=m
+# CONFIG_FB_UVESA is not set
CONFIG_FB_VESA=y
CONFIG_FB_EFI=y
# CONFIG_FB_N411 is not set
@@ -6367,7 +6312,6 @@ CONFIG_SND_DEBUG=y
# CONFIG_SND_DEBUG_VERBOSE is not set
# CONFIG_SND_PCM_XRUN_DEBUG is not set
# CONFIG_SND_CTL_VALIDATION is not set
-# CONFIG_SND_JACK_INJECTION_DEBUG is not set
CONFIG_SND_VMASTER=y
CONFIG_SND_DMA_SGBUF=y
CONFIG_SND_SEQUENCER=m
@@ -6503,11 +6447,9 @@ CONFIG_SND_HDA_DSP_LOADER=y
CONFIG_SND_HDA_COMPONENT=y
CONFIG_SND_HDA_I915=y
CONFIG_SND_HDA_EXT_CORE=m
-CONFIG_SND_HDA_PREALLOC_SIZE=0
+CONFIG_SND_HDA_PREALLOC_SIZE=4096
CONFIG_SND_INTEL_NHLT=y
CONFIG_SND_INTEL_DSP_CONFIG=m
-CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m
-CONFIG_SND_INTEL_BYT_PREFER_SOF=y
CONFIG_SND_SPI=y
CONFIG_SND_USB=y
CONFIG_SND_USB_AUDIO=m
@@ -6545,9 +6487,6 @@ CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y
CONFIG_SND_SOC_COMPRESS=y
CONFIG_SND_SOC_TOPOLOGY=y
CONFIG_SND_SOC_ACPI=m
-CONFIG_SND_SOC_ADI=m
-CONFIG_SND_SOC_ADI_AXI_I2S=m
-CONFIG_SND_SOC_ADI_AXI_SPDIF=m
CONFIG_SND_SOC_AMD_ACP=m
CONFIG_SND_SOC_AMD_CZ_DA7219MX98357_MACH=m
CONFIG_SND_SOC_AMD_CZ_RT5645_MACH=m
@@ -6556,7 +6495,7 @@ CONFIG_SND_SOC_AMD_RV_RT5682_MACH=m
CONFIG_SND_SOC_AMD_RENOIR=m
CONFIG_SND_SOC_AMD_RENOIR_MACH=m
CONFIG_SND_ATMEL_SOC=m
-# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set
+CONFIG_SND_BCM63XX_I2S_WHISTLER=m
CONFIG_SND_DESIGNWARE_I2S=m
CONFIG_SND_DESIGNWARE_PCM=y
@@ -6574,7 +6513,6 @@ CONFIG_SND_DESIGNWARE_PCM=y
# CONFIG_SND_SOC_FSL_SPDIF is not set
# CONFIG_SND_SOC_FSL_ESAI is not set
# CONFIG_SND_SOC_FSL_MICFIL is not set
-CONFIG_SND_SOC_FSL_XCVR=m
# CONFIG_SND_SOC_IMX_AUDMUX is not set
# end of SoC Audio for Freescale CPUs
@@ -6603,7 +6541,7 @@ CONFIG_SND_SOC_INTEL_CML_H=m
CONFIG_SND_SOC_INTEL_CML_LP=m
CONFIG_SND_SOC_INTEL_SKYLAKE_FAMILY=m
CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m
-CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC=y
+# CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC is not set
CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON=m
CONFIG_SND_SOC_ACPI_INTEL_MATCH=m
CONFIG_SND_SOC_INTEL_MACH=y
@@ -6614,7 +6552,6 @@ CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH=m
CONFIG_SND_SOC_INTEL_BROADWELL_MACH=m
CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH=m
CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH=m
-CONFIG_SND_SOC_INTEL_BYTCR_WM5102_MACH=m
CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH=m
CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m
CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH=m
@@ -6648,40 +6585,45 @@ CONFIG_SND_SOC_INTEL_EHL_RT5660_MACH=m
CONFIG_SND_SOC_INTEL_SOUNDWIRE_SOF_MACH=m
CONFIG_SND_SOC_MTK_BTCVSD=m
CONFIG_SND_SOC_SOF_TOPLEVEL=y
-CONFIG_SND_SOC_SOF_PCI_DEV=m
CONFIG_SND_SOC_SOF_PCI=m
CONFIG_SND_SOC_SOF_ACPI=m
-CONFIG_SND_SOC_SOF_ACPI_DEV=m
# CONFIG_SND_SOC_SOF_DEBUG_PROBES is not set
CONFIG_SND_SOC_SOF=m
CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE=y
CONFIG_SND_SOC_SOF_INTEL_TOPLEVEL=y
+CONFIG_SND_SOC_SOF_INTEL_ACPI=m
+CONFIG_SND_SOC_SOF_INTEL_PCI=m
CONFIG_SND_SOC_SOF_INTEL_HIFI_EP_IPC=m
CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP=m
CONFIG_SND_SOC_SOF_INTEL_COMMON=m
-CONFIG_SND_SOC_SOF_BAYTRAIL=m
-# CONFIG_SND_SOC_SOF_BROADWELL is not set
+CONFIG_SND_SOC_SOF_MERRIFIELD_SUPPORT=y
CONFIG_SND_SOC_SOF_MERRIFIELD=m
-CONFIG_SND_SOC_SOF_INTEL_APL=m
+CONFIG_SND_SOC_SOF_APOLLOLAKE_SUPPORT=y
CONFIG_SND_SOC_SOF_APOLLOLAKE=m
+CONFIG_SND_SOC_SOF_GEMINILAKE_SUPPORT=y
CONFIG_SND_SOC_SOF_GEMINILAKE=m
-CONFIG_SND_SOC_SOF_INTEL_CNL=m
+CONFIG_SND_SOC_SOF_CANNONLAKE_SUPPORT=y
CONFIG_SND_SOC_SOF_CANNONLAKE=m
+CONFIG_SND_SOC_SOF_COFFEELAKE_SUPPORT=y
CONFIG_SND_SOC_SOF_COFFEELAKE=m
-CONFIG_SND_SOC_SOF_COMETLAKE=m
-CONFIG_SND_SOC_SOF_INTEL_ICL=m
+CONFIG_SND_SOC_SOF_ICELAKE_SUPPORT=y
CONFIG_SND_SOC_SOF_ICELAKE=m
-CONFIG_SND_SOC_SOF_JASPERLAKE=m
-CONFIG_SND_SOC_SOF_INTEL_TGL=m
+CONFIG_SND_SOC_SOF_COMETLAKE=m
+CONFIG_SND_SOC_SOF_COMETLAKE_SUPPORT=y
+CONFIG_SND_SOC_SOF_COMETLAKE_LP_SUPPORT=y
+CONFIG_SND_SOC_SOF_TIGERLAKE_SUPPORT=y
CONFIG_SND_SOC_SOF_TIGERLAKE=m
+CONFIG_SND_SOC_SOF_ELKHARTLAKE_SUPPORT=y
CONFIG_SND_SOC_SOF_ELKHARTLAKE=m
-CONFIG_SND_SOC_SOF_ALDERLAKE=m
+CONFIG_SND_SOC_SOF_JASPERLAKE_SUPPORT=y
+CONFIG_SND_SOC_SOF_JASPERLAKE=m
CONFIG_SND_SOC_SOF_HDA_COMMON=m
CONFIG_SND_SOC_SOF_HDA_LINK=y
CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC=y
# CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1 is not set
CONFIG_SND_SOC_SOF_HDA_LINK_BASELINE=m
CONFIG_SND_SOC_SOF_HDA=m
+CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE_LINK=y
CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE=m
CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE=m
CONFIG_SND_SOC_SOF_XTENSA=m
@@ -6695,18 +6637,14 @@ CONFIG_SND_SOC_XILINX_I2S=m
CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=m
CONFIG_SND_SOC_XILINX_SPDIF=m
CONFIG_SND_SOC_XTFPGA_I2S=m
+CONFIG_ZX_TDM=m
CONFIG_SND_SOC_I2C_AND_SPI=m
#
# CODEC drivers
#
-CONFIG_SND_SOC_ARIZONA=m
-CONFIG_SND_SOC_WM_ADSP=m
CONFIG_SND_SOC_AC97_CODEC=m
CONFIG_SND_SOC_ADAU_UTILS=m
-CONFIG_SND_SOC_ADAU1372=m
-CONFIG_SND_SOC_ADAU1372_I2C=m
-CONFIG_SND_SOC_ADAU1372_SPI=m
CONFIG_SND_SOC_ADAU1701=m
CONFIG_SND_SOC_ADAU17X1=m
CONFIG_SND_SOC_ADAU1761=m
@@ -6793,7 +6731,6 @@ CONFIG_SND_SOC_PCM3060_SPI=m
CONFIG_SND_SOC_PCM3168A=m
CONFIG_SND_SOC_PCM3168A_I2C=m
CONFIG_SND_SOC_PCM3168A_SPI=m
-CONFIG_SND_SOC_PCM5102A=m
CONFIG_SND_SOC_PCM512x=m
CONFIG_SND_SOC_PCM512x_I2C=m
CONFIG_SND_SOC_PCM512x_SPI=m
@@ -6813,7 +6750,6 @@ CONFIG_SND_SOC_RT5631=m
CONFIG_SND_SOC_RT5640=m
CONFIG_SND_SOC_RT5645=m
CONFIG_SND_SOC_RT5651=m
-CONFIG_SND_SOC_RT5659=m
CONFIG_SND_SOC_RT5660=m
CONFIG_SND_SOC_RT5663=m
CONFIG_SND_SOC_RT5670=m
@@ -6834,7 +6770,7 @@ CONFIG_SND_SOC_SIGMADSP=m
CONFIG_SND_SOC_SIGMADSP_I2C=m
CONFIG_SND_SOC_SIGMADSP_REGMAP=m
CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m
-CONFIG_SND_SOC_SIMPLE_MUX=m
+CONFIG_SND_SOC_SIRF_AUDIO_CODEC=m
CONFIG_SND_SOC_SPDIF=m
CONFIG_SND_SOC_SSM2305=m
CONFIG_SND_SOC_SSM2602=m
@@ -6869,7 +6805,6 @@ CONFIG_SND_SOC_TSCS454=m
CONFIG_SND_SOC_UDA1334=m
CONFIG_SND_SOC_WCD9335=m
CONFIG_SND_SOC_WCD934X=m
-CONFIG_SND_SOC_WM5102=m
CONFIG_SND_SOC_WM8510=m
CONFIG_SND_SOC_WM8523=m
CONFIG_SND_SOC_WM8524=m
@@ -6901,17 +6836,12 @@ CONFIG_SND_SOC_MAX9759=m
CONFIG_SND_SOC_MT6351=m
CONFIG_SND_SOC_MT6358=m
CONFIG_SND_SOC_MT6660=m
-CONFIG_SND_SOC_NAU8315=m
CONFIG_SND_SOC_NAU8540=m
CONFIG_SND_SOC_NAU8810=m
CONFIG_SND_SOC_NAU8822=m
CONFIG_SND_SOC_NAU8824=m
CONFIG_SND_SOC_NAU8825=m
CONFIG_SND_SOC_TPA6130A2=m
-CONFIG_SND_SOC_LPASS_WSA_MACRO=m
-CONFIG_SND_SOC_LPASS_VA_MACRO=m
-CONFIG_SND_SOC_LPASS_RX_MACRO=m
-CONFIG_SND_SOC_LPASS_TX_MACRO=m
# end of CODEC drivers
CONFIG_SND_SIMPLE_CARD_UTILS=m
@@ -7012,8 +6942,6 @@ CONFIG_HID_PICOLCD_LCD=y
CONFIG_HID_PICOLCD_LEDS=y
CONFIG_HID_PICOLCD_CIR=y
CONFIG_HID_PLANTRONICS=m
-CONFIG_HID_PLAYSTATION=m
-CONFIG_PLAYSTATION_FF=y
CONFIG_HID_PRIMAX=m
CONFIG_HID_RETRODE=m
CONFIG_HID_ROCCAT=m
@@ -7045,7 +6973,7 @@ CONFIG_HID_ZEROPLUS=m
CONFIG_ZEROPLUS_FF=y
CONFIG_HID_ZYDACRON=m
CONFIG_HID_SENSOR_HUB=m
-CONFIG_HID_SENSOR_CUSTOM_SENSOR=m
+# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set
CONFIG_HID_ALPS=m
CONFIG_HID_MCP2221=m
# end of Special HID drivers
@@ -7061,23 +6989,15 @@ CONFIG_USB_HIDDEV=y
#
# I2C HID support
#
-CONFIG_I2C_HID_ACPI=m
+CONFIG_I2C_HID=m
# end of I2C HID support
-CONFIG_I2C_HID_CORE=m
-
#
# Intel ISH HID support
#
CONFIG_INTEL_ISH_HID=m
CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER=m
# end of Intel ISH HID support
-
-#
-# AMD SFH HID Support
-#
-CONFIG_AMD_SFH_HID=m
-# end of AMD SFH HID Support
# end of HID support
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
@@ -7180,11 +7100,7 @@ CONFIG_USBIP_VHCI_HC_PORTS=8
CONFIG_USBIP_VHCI_NR_HCS=1
CONFIG_USBIP_HOST=m
# CONFIG_USBIP_DEBUG is not set
-CONFIG_USB_CDNS_SUPPORT=m
-CONFIG_USB_CDNS_HOST=y
# CONFIG_USB_CDNS3 is not set
-CONFIG_USB_CDNSP_PCI=m
-CONFIG_USB_CDNSP_HOST=y
# CONFIG_USB_MUSB_HDRC is not set
# CONFIG_USB_DWC3 is not set
# CONFIG_USB_DWC2 is not set
@@ -7241,6 +7157,7 @@ CONFIG_USB_SERIAL_SIERRAWIRELESS=m
CONFIG_USB_SERIAL_SYMBOL=m
CONFIG_USB_SERIAL_TI=m
CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_WWAN=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_SERIAL_OMNINET=m
@@ -7250,7 +7167,6 @@ CONFIG_USB_SERIAL_WISHBONE=m
CONFIG_USB_SERIAL_SSU100=m
CONFIG_USB_SERIAL_QT2=m
CONFIG_USB_SERIAL_UPD78F0730=m
-CONFIG_USB_SERIAL_XR=m
CONFIG_USB_SERIAL_DEBUG=m
#
@@ -7336,7 +7252,6 @@ CONFIG_MMC_BLOCK=m
CONFIG_MMC_BLOCK_MINORS=8
CONFIG_SDIO_UART=m
CONFIG_MMC_TEST=m
-CONFIG_MMC_CRYPTO=y
#
# MMC/SD/SDIO Host Controller Drivers
@@ -7444,11 +7359,6 @@ CONFIG_LEDS_TPS6105X=m
CONFIG_LEDS_SGM3140=m
#
-# Flash and Torch LED drivers
-#
-CONFIG_LEDS_RT8515=m
-
-#
# LED Triggers
#
CONFIG_LEDS_TRIGGERS=y
@@ -7472,12 +7382,6 @@ CONFIG_LEDS_TRIGGER_PANIC=y
CONFIG_LEDS_TRIGGER_NETDEV=m
CONFIG_LEDS_TRIGGER_PATTERN=m
CONFIG_LEDS_TRIGGER_AUDIO=m
-CONFIG_LEDS_TRIGGER_TTY=m
-
-#
-# LED Blink
-#
-CONFIG_LEDS_BLINK=y
CONFIG_ACCESSIBILITY=y
CONFIG_A11Y_BRAILLE_CONSOLE=y
@@ -7546,6 +7450,7 @@ CONFIG_EDAC_LEGACY_SYSFS=y
CONFIG_EDAC_DECODE_MCE=m
CONFIG_EDAC_GHES=y
CONFIG_EDAC_AMD64=m
+# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set
CONFIG_EDAC_E752X=m
CONFIG_EDAC_I82975X=m
CONFIG_EDAC_I3000=m
@@ -7641,6 +7546,7 @@ CONFIG_RTC_DRV_DS1390=m
CONFIG_RTC_DRV_MAX6916=m
CONFIG_RTC_DRV_R9701=m
CONFIG_RTC_DRV_RX4581=m
+CONFIG_RTC_DRV_RX6110=m
CONFIG_RTC_DRV_RS5C348=m
CONFIG_RTC_DRV_MAX6902=m
CONFIG_RTC_DRV_PCF2123=m
@@ -7655,7 +7561,6 @@ CONFIG_RTC_DRV_DS3232_HWMON=y
CONFIG_RTC_DRV_PCF2127=m
CONFIG_RTC_DRV_RV3029C2=m
CONFIG_RTC_DRV_RV3029_HWMON=y
-CONFIG_RTC_DRV_RX6110=m
#
# Platform RTC drivers
@@ -7686,6 +7591,7 @@ CONFIG_RTC_DRV_V3020=m
CONFIG_RTC_DRV_WM831X=m
CONFIG_RTC_DRV_WM8350=m
CONFIG_RTC_DRV_PCF50633=m
+CONFIG_RTC_DRV_AB3100=m
CONFIG_RTC_DRV_CROS_EC=m
#
@@ -7713,19 +7619,18 @@ CONFIG_DMA_ACPI=y
CONFIG_ALTERA_MSGDMA=m
CONFIG_INTEL_IDMA64=m
CONFIG_INTEL_IDXD=m
-CONFIG_INTEL_IDXD_SVM=y
CONFIG_INTEL_IOATDMA=m
CONFIG_PLX_DMA=m
+CONFIG_XILINX_ZYNQMP_DPDMA=m
CONFIG_QCOM_HIDMA_MGMT=m
CONFIG_QCOM_HIDMA=m
CONFIG_DW_DMAC_CORE=y
-CONFIG_DW_DMAC=m
+CONFIG_DW_DMAC=y
CONFIG_DW_DMAC_PCI=y
CONFIG_DW_EDMA=m
CONFIG_DW_EDMA_PCIE=m
CONFIG_HSU_DMA=y
CONFIG_SF_PDMA=m
-CONFIG_INTEL_LDMA=y
#
# DMA Clients
@@ -7741,7 +7646,6 @@ CONFIG_SYNC_FILE=y
# CONFIG_SW_SYNC is not set
CONFIG_UDMABUF=y
# CONFIG_DMABUF_MOVE_NOTIFY is not set
-# CONFIG_DMABUF_DEBUG is not set
# CONFIG_DMABUF_SELFTESTS is not set
CONFIG_DMABUF_HEAPS=y
CONFIG_DMABUF_HEAPS_SYSTEM=y
@@ -7750,8 +7654,6 @@ CONFIG_DMABUF_HEAPS_CMA=y
CONFIG_DCA=m
CONFIG_AUXDISPLAY=y
-CONFIG_CHARLCD=m
-CONFIG_HD44780_COMMON=m
CONFIG_HD44780=m
CONFIG_KS0108=m
CONFIG_KS0108_PORT=0x378
@@ -7759,7 +7661,6 @@ CONFIG_KS0108_DELAY=2
CONFIG_CFAG12864B=m
CONFIG_CFAG12864B_RATE=20
CONFIG_IMG_ASCII_LCD=m
-CONFIG_LCD2S=m
CONFIG_PARPORT_PANEL=m
CONFIG_PANEL_PARPORT=0
CONFIG_PANEL_PROFILE=5
@@ -7768,6 +7669,7 @@ CONFIG_PANEL_PROFILE=5
# CONFIG_CHARLCD_BL_ON is not set
CONFIG_CHARLCD_BL_FLASH=y
CONFIG_PANEL=m
+CONFIG_CHARLCD=m
CONFIG_UIO=m
CONFIG_UIO_CIF=m
CONFIG_UIO_PDRV_GENIRQ=m
@@ -7794,9 +7696,7 @@ CONFIG_IRQ_BYPASS_MANAGER=m
CONFIG_VIRT_DRIVERS=y
CONFIG_VBOXGUEST=m
CONFIG_NITRO_ENCLAVES=m
-CONFIG_ACRN_HSM=m
CONFIG_VIRTIO=y
-CONFIG_VIRTIO_PCI_LIB=m
CONFIG_VIRTIO_MENU=y
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_PCI_LEGACY=y
@@ -7810,7 +7710,6 @@ CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_VIRTIO_DMA_SHARED_BUFFER=m
CONFIG_VDPA=m
CONFIG_VDPA_SIM=m
-CONFIG_VDPA_SIM_NET=m
CONFIG_IFCVF=m
CONFIG_MLX5_VDPA=y
CONFIG_MLX5_VDPA_NET=m
@@ -7838,7 +7737,7 @@ CONFIG_HYPERV_BALLOON=m
#
CONFIG_XEN_BALLOON=y
CONFIG_XEN_BALLOON_MEMORY_HOTPLUG=y
-CONFIG_XEN_MEMORY_HOTPLUG_LIMIT=512
+CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT=512
CONFIG_XEN_SCRUB_PAGES_DEFAULT=y
CONFIG_XEN_DEV_EVTCHN=m
CONFIG_XEN_BACKEND=y
@@ -8000,15 +7899,11 @@ CONFIG_KPC2000_SPI=m
CONFIG_KPC2000_I2C=m
CONFIG_KPC2000_DMA=m
CONFIG_QLGE=m
-CONFIG_WIMAX=m
-CONFIG_WIMAX_DEBUG_LEVEL=8
-CONFIG_WIMAX_I2400M=m
-CONFIG_WIMAX_I2400M_USB=m
-CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
CONFIG_WFX=m
CONFIG_X86_PLATFORM_DEVICES=y
CONFIG_ACPI_WMI=m
CONFIG_WMI_BMOF=m
+CONFIG_ALIENWARE_WMI=m
CONFIG_HUAWEI_WMI=m
CONFIG_INTEL_WMI_SBL_FW_UPDATE=m
CONFIG_INTEL_WMI_THUNDERBOLT=m
@@ -8018,7 +7913,6 @@ CONFIG_XIAOMI_WMI=m
CONFIG_ACERHDF=m
CONFIG_ACER_WIRELESS=m
CONFIG_ACER_WMI=m
-CONFIG_AMD_PMC=m
CONFIG_APPLE_GMUX=m
CONFIG_ASUS_LAPTOP=m
CONFIG_ASUS_WIRELESS=m
@@ -8026,21 +7920,18 @@ CONFIG_ASUS_WMI=m
CONFIG_ASUS_NB_WMI=m
CONFIG_EEEPC_LAPTOP=m
CONFIG_EEEPC_WMI=m
-CONFIG_X86_PLATFORM_DRIVERS_DELL=y
-CONFIG_ALIENWARE_WMI=m
CONFIG_DCDBAS=m
-CONFIG_DELL_LAPTOP=m
-# CONFIG_DELL_RBU is not set
-CONFIG_DELL_RBTN=m
CONFIG_DELL_SMBIOS=m
CONFIG_DELL_SMBIOS_WMI=y
CONFIG_DELL_SMBIOS_SMM=y
+CONFIG_DELL_LAPTOP=m
+CONFIG_DELL_RBTN=m
+# CONFIG_DELL_RBU is not set
CONFIG_DELL_SMO8800=m
CONFIG_DELL_WMI=m
-CONFIG_DELL_WMI_AIO=m
CONFIG_DELL_WMI_DESCRIPTOR=m
+CONFIG_DELL_WMI_AIO=m
CONFIG_DELL_WMI_LED=m
-CONFIG_DELL_WMI_SYSMAN=m
CONFIG_AMILO_RFKILL=m
CONFIG_FUJITSU_LAPTOP=m
CONFIG_FUJITSU_TABLET=m
@@ -8065,6 +7956,10 @@ CONFIG_INTEL_INT0002_VGPIO=m
CONFIG_INTEL_MENLOW=m
CONFIG_INTEL_OAKTRAIL=m
CONFIG_INTEL_VBTN=m
+CONFIG_SURFACE3_WMI=m
+CONFIG_SURFACE_3_BUTTON=m
+CONFIG_SURFACE_3_POWER_OPREGION=m
+CONFIG_SURFACE_PRO3_BUTTON=m
CONFIG_MSI_LAPTOP=m
CONFIG_MSI_WMI=m
CONFIG_PCENGINES_APU2=m
@@ -8099,11 +7994,10 @@ CONFIG_INTEL_TURBO_MAX_3=y
CONFIG_INTEL_UNCORE_FREQ_CONTROL=m
CONFIG_INTEL_BXTWC_PMIC_TMU=m
CONFIG_INTEL_CHTDC_TI_PWRBTN=m
+CONFIG_INTEL_MFLD_THERMAL=m
+CONFIG_INTEL_MID_POWER_BUTTON=m
CONFIG_INTEL_MRFLD_PWRBTN=m
CONFIG_INTEL_PMC_CORE=y
-CONFIG_INTEL_PMT_CLASS=m
-CONFIG_INTEL_PMT_TELEMETRY=m
-CONFIG_INTEL_PMT_CRASHLOG=m
CONFIG_INTEL_PUNIT_IPC=m
CONFIG_INTEL_SCU_IPC=y
CONFIG_INTEL_SCU=y
@@ -8138,18 +8032,6 @@ CONFIG_WILCO_EC_TELEMETRY=m
CONFIG_MELLANOX_PLATFORM=y
CONFIG_MLXREG_HOTPLUG=m
CONFIG_MLXREG_IO=m
-CONFIG_SURFACE_PLATFORMS=y
-CONFIG_SURFACE3_WMI=m
-CONFIG_SURFACE_3_BUTTON=m
-CONFIG_SURFACE_3_POWER_OPREGION=m
-CONFIG_SURFACE_ACPI_NOTIFY=m
-CONFIG_SURFACE_AGGREGATOR_CDEV=m
-CONFIG_SURFACE_GPE=m
-CONFIG_SURFACE_HOTPLUG=m
-CONFIG_SURFACE_PRO3_BUTTON=m
-CONFIG_SURFACE_AGGREGATOR=m
-CONFIG_SURFACE_AGGREGATOR_BUS=y
-# CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION is not set
CONFIG_HAVE_CLK=y
CONFIG_CLKDEV_LOOKUP=y
CONFIG_HAVE_CLK_PREPARE=y
@@ -8165,7 +8047,6 @@ CONFIG_COMMON_CLK_S2MPS11=m
CONFIG_CLK_TWL6040=m
CONFIG_COMMON_CLK_PALMAS=m
CONFIG_COMMON_CLK_PWM=m
-CONFIG_XILINX_VCU=m
CONFIG_HWSPINLOCK=y
#
@@ -8187,7 +8068,6 @@ CONFIG_IOMMU_SUPPORT=y
#
# Generic IOMMU Pagetable Support
#
-CONFIG_IOMMU_IO_PGTABLE=y
# end of Generic IOMMU Pagetable Support
# CONFIG_IOMMU_DEBUGFS is not set
@@ -8216,7 +8096,6 @@ CONFIG_REMOTEPROC_CDEV=y
#
CONFIG_RPMSG=m
CONFIG_RPMSG_CHAR=m
-CONFIG_RPMSG_NS=m
CONFIG_RPMSG_QCOM_GLINK=m
CONFIG_RPMSG_QCOM_GLINK_RPM=m
CONFIG_RPMSG_VIRTIO=m
@@ -8242,6 +8121,11 @@ CONFIG_SOUNDWIRE_GENERIC_ALLOCATION=m
# end of Amlogic SoC drivers
#
+# Aspeed SoC drivers
+#
+# end of Aspeed SoC drivers
+
+#
# Broadcom SoC drivers
#
# end of Broadcom SoC drivers
@@ -8257,11 +8141,6 @@ CONFIG_SOUNDWIRE_GENERIC_ALLOCATION=m
# end of i.MX SoC drivers
#
-# Enable LiteX SoC Builder specific drivers
-#
-# end of Enable LiteX SoC Builder specific drivers
-
-#
# Qualcomm SoC drivers
#
CONFIG_QCOM_QMI_HELPERS=m
@@ -8272,6 +8151,7 @@ CONFIG_SOC_TI=y
#
# Xilinx SoC drivers
#
+CONFIG_XILINX_VCU=m
# end of Xilinx SoC drivers
# end of SOC (System On Chip) specific Drivers
@@ -8314,9 +8194,7 @@ CONFIG_EXTCON_RT8973A=m
CONFIG_EXTCON_SM5502=m
CONFIG_EXTCON_USB_GPIO=m
CONFIG_EXTCON_USBC_CROS_EC=m
-CONFIG_EXTCON_USBC_TUSB320=m
CONFIG_MEMORY=y
-CONFIG_FPGA_DFL_EMIF=m
CONFIG_IIO=m
CONFIG_IIO_BUFFER=y
CONFIG_IIO_BUFFER_CB=m
@@ -8400,6 +8278,8 @@ CONFIG_AD7887=m
CONFIG_AD7923=m
CONFIG_AD7949=m
CONFIG_AD799X=m
+CONFIG_AD9467=m
+CONFIG_ADI_AXI_ADC=m
CONFIG_AXP20X_ADC=m
CONFIG_AXP288_ADC=m
CONFIG_CC10001_ADC=m
@@ -8423,7 +8303,6 @@ CONFIG_MAX9611=m
CONFIG_MCP320X=m
CONFIG_MCP3422=m
CONFIG_MCP3911=m
-CONFIG_MEDIATEK_MT6360_ADC=m
CONFIG_MEN_Z188_ADC=m
CONFIG_MP2629_ADC=m
CONFIG_NAU7802=m
@@ -8521,7 +8400,6 @@ CONFIG_AD5755=m
CONFIG_AD5758=m
CONFIG_AD5761=m
CONFIG_AD5764=m
-CONFIG_AD5766=m
CONFIG_AD5770R=m
CONFIG_AD5791=m
CONFIG_AD7303=m
@@ -8722,7 +8600,6 @@ CONFIG_SENSORS_HMC5843_SPI=m
CONFIG_SENSORS_RM3100=m
CONFIG_SENSORS_RM3100_I2C=m
CONFIG_SENSORS_RM3100_SPI=m
-CONFIG_YAMAHA_YAS530=m
# end of Magnetometer sensors
#
@@ -8750,7 +8627,6 @@ CONFIG_IIO_SYSFS_TRIGGER=m
# Linear and angular position sensors
#
CONFIG_IQS624_POS=m
-CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE=m
# end of Linear and angular position sensors
#
@@ -8855,7 +8731,6 @@ CONFIG_NTB_MSI=y
CONFIG_NTB_AMD=m
CONFIG_NTB_IDT=m
CONFIG_NTB_INTEL=m
-CONFIG_NTB_EPF=m
CONFIG_NTB_SWITCHTEC=m
# CONFIG_NTB_PINGPONG is not set
# CONFIG_NTB_TOOL is not set
@@ -8868,7 +8743,6 @@ CONFIG_PWM_SYSFS=y
# CONFIG_PWM_DEBUG is not set
CONFIG_PWM_CRC=y
CONFIG_PWM_CROS_EC=m
-CONFIG_PWM_DWC=m
CONFIG_PWM_IQS620A=m
CONFIG_PWM_LP3943=m
CONFIG_PWM_LPSS=m
@@ -8910,8 +8784,6 @@ CONFIG_POWERCAP=y
CONFIG_INTEL_RAPL_CORE=m
CONFIG_INTEL_RAPL=m
CONFIG_IDLE_INJECT=y
-CONFIG_DTPM=y
-CONFIG_DTPM_CPU=y
CONFIG_MCB=m
CONFIG_MCB_PCI=m
CONFIG_MCB_LPC=m
@@ -8926,7 +8798,6 @@ CONFIG_RAS_CEC=y
# CONFIG_RAS_CEC_DEBUG is not set
CONFIG_USB4=m
# CONFIG_USB4_DEBUGFS_WRITE is not set
-# CONFIG_USB4_DMA_TEST is not set
#
# Android
@@ -8954,7 +8825,6 @@ CONFIG_DEV_DAX_PMEM_COMPAT=m
CONFIG_NVMEM=y
CONFIG_NVMEM_SYSFS=y
CONFIG_RAVE_SP_EEPROM=m
-CONFIG_NVMEM_RMEM=m
#
# HW tracing support
@@ -8992,7 +8862,6 @@ CONFIG_FPGA_DFL_FME_MGR=m
CONFIG_FPGA_DFL_FME_BRIDGE=m
CONFIG_FPGA_DFL_FME_REGION=m
CONFIG_FPGA_DFL_AFU=m
-CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000=m
CONFIG_FPGA_DFL_PCI=m
CONFIG_TEE=m
@@ -9081,11 +8950,11 @@ CONFIG_F2FS_FS_XATTR=y
CONFIG_F2FS_FS_POSIX_ACL=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_CHECK_FS=y
+# CONFIG_F2FS_IO_TRACE is not set
# CONFIG_F2FS_FAULT_INJECTION is not set
CONFIG_F2FS_FS_COMPRESSION=y
CONFIG_F2FS_FS_LZO=y
CONFIG_F2FS_FS_LZ4=y
-CONFIG_F2FS_FS_LZ4HC=y
CONFIG_F2FS_FS_ZSTD=y
CONFIG_F2FS_FS_LZORLE=y
CONFIG_ZONEFS_FS=m
@@ -9127,7 +8996,6 @@ CONFIG_OVERLAY_FS_REDIRECT_DIR=y
CONFIG_OVERLAY_FS_INDEX=y
CONFIG_OVERLAY_FS_XINO_AUTO=y
CONFIG_OVERLAY_FS_METACOPY=y
-# CONFIG_OVERLAY_FS_UNPRIVILEGED is not set
#
# Caches
@@ -9252,7 +9120,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
# CONFIG_ROMFS_BACKED_BY_BOTH is not set
CONFIG_ROMFS_ON_BLOCK=y
CONFIG_PSTORE=y
-CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240
CONFIG_PSTORE_DEFLATE_COMPRESS=m
CONFIG_PSTORE_LZO_COMPRESS=m
CONFIG_PSTORE_LZ4_COMPRESS=m
@@ -9270,6 +9137,11 @@ CONFIG_PSTORE_COMPRESS_DEFAULT="zstd"
# CONFIG_PSTORE_PMSG is not set
# CONFIG_PSTORE_FTRACE is not set
CONFIG_PSTORE_RAM=m
+CONFIG_PSTORE_ZONE=m
+CONFIG_PSTORE_BLK=m
+CONFIG_PSTORE_BLK_BLKDEV=""
+CONFIG_PSTORE_BLK_KMSG_SIZE=64
+CONFIG_PSTORE_BLK_MAX_REASON=2
# CONFIG_SYSV_FS is not set
CONFIG_UFS_FS=m
# CONFIG_UFS_FS_WRITE is not set
@@ -9318,7 +9190,6 @@ CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y
CONFIG_NFS_ACL_SUPPORT=m
CONFIG_NFS_COMMON=y
-CONFIG_NFS_V4_2_SSC_HELPER=m
CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m
CONFIG_SUNRPC_BACKCHANNEL=y
@@ -9340,7 +9211,6 @@ CONFIG_CIFS_DEBUG=y
# CONFIG_CIFS_DEBUG2 is not set
# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set
CONFIG_CIFS_DFS_UPCALL=y
-CONFIG_CIFS_SWN_UPCALL=y
# CONFIG_CIFS_SMB_DIRECT is not set
CONFIG_CIFS_FSCACHE=y
CONFIG_CODA_FS=m
@@ -9451,8 +9321,8 @@ CONFIG_SECURITY_TOMOYO=y
CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY=2048
CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024
# CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER is not set
-CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/usr/bin/tomoyo-init"
-CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/usr/lib/systemd/systemd"
+CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/sbin/tomoyo-init"
+CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/sbin/init"
# CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING is not set
CONFIG_SECURITY_APPARMOR=y
CONFIG_SECURITY_APPARMOR_HASH=y
@@ -9540,6 +9410,7 @@ CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_SIMD=m
+CONFIG_CRYPTO_GLUE_HELPER_X86=m
CONFIG_CRYPTO_ENGINE=m
#
@@ -9611,7 +9482,10 @@ CONFIG_CRYPTO_POLY1305_X86_64=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA1_SSSE3=m
CONFIG_CRYPTO_SHA256_SSSE3=m
@@ -9621,6 +9495,7 @@ CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_STREEBOG=m
+CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m
@@ -9645,6 +9520,7 @@ CONFIG_CRYPTO_CAST6_AVX_X86_64=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_DES3_EDE_X86_64=m
CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_CHACHA20=m
CONFIG_CRYPTO_CHACHA20_X86_64=m
CONFIG_CRYPTO_SERPENT=m
@@ -9726,7 +9602,6 @@ CONFIG_CRYPTO_DEV_QAT=m
CONFIG_CRYPTO_DEV_QAT_DH895xCC=m
CONFIG_CRYPTO_DEV_QAT_C3XXX=m
CONFIG_CRYPTO_DEV_QAT_C62X=m
-CONFIG_CRYPTO_DEV_QAT_4XXX=m
CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m
CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m
CONFIG_CRYPTO_DEV_QAT_C62XVF=m
@@ -9757,8 +9632,6 @@ CONFIG_SYSTEM_TRUSTED_KEYS=""
CONFIG_SECONDARY_TRUSTED_KEYRING=y
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
CONFIG_SYSTEM_BLACKLIST_HASH_LIST=""
-CONFIG_SYSTEM_REVOCATION_LIST=y
-CONFIG_SYSTEM_REVOCATION_KEYS=""
# end of Certificates for signature checking
CONFIG_BINARY_PRINTF=y
@@ -9807,7 +9680,7 @@ CONFIG_ZLIB_DEFLATE=y
CONFIG_LZO_COMPRESS=y
CONFIG_LZO_DECOMPRESS=y
CONFIG_LZ4_COMPRESS=y
-CONFIG_LZ4HC_COMPRESS=y
+CONFIG_LZ4HC_COMPRESS=m
CONFIG_LZ4_DECOMPRESS=y
CONFIG_ZSTD_COMPRESS=y
CONFIG_ZSTD_DECOMPRESS=y
@@ -9831,7 +9704,6 @@ CONFIG_GENERIC_ALLOCATOR=y
CONFIG_REED_SOLOMON=m
CONFIG_REED_SOLOMON_ENC8=y
CONFIG_REED_SOLOMON_DEC8=y
-CONFIG_BCH=m
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
@@ -9863,7 +9735,6 @@ CONFIG_CMA_SIZE_SEL_MBYTES=y
# CONFIG_CMA_SIZE_SEL_MAX is not set
CONFIG_CMA_ALIGNMENT=8
# CONFIG_DMA_API_DEBUG is not set
-# CONFIG_DMA_MAP_BENCHMARK is not set
CONFIG_SGL_ALLOC=y
CONFIG_CHECK_SIGNATURE=y
CONFIG_CPU_RMAP=y
@@ -9933,6 +9804,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
# Compile-time checks and compiler options
#
# CONFIG_DEBUG_INFO is not set
+CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=2048
CONFIG_STRIP_ASM_SYMS=y
# CONFIG_READABLE_ASM is not set
@@ -9960,8 +9832,6 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
# CONFIG_UBSAN is not set
CONFIG_HAVE_ARCH_KCSAN=y
-CONFIG_HAVE_KCSAN_COMPILER=y
-# CONFIG_KCSAN is not set
# end of Generic Kernel Debugging Instruments
CONFIG_DEBUG_KERNEL=y
@@ -9995,22 +9865,14 @@ CONFIG_DEBUG_VM=y
# CONFIG_DEBUG_VM_PGFLAGS is not set
# CONFIG_DEBUG_VM_PGTABLE is not set
CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
-CONFIG_DEBUG_VIRTUAL=y
+# CONFIG_DEBUG_VIRTUAL is not set
CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_DEBUG_PER_CPU_MAPS is not set
-CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y
-# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set
CONFIG_HAVE_ARCH_KASAN=y
CONFIG_HAVE_ARCH_KASAN_VMALLOC=y
CONFIG_CC_HAS_KASAN_GENERIC=y
CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y
# CONFIG_KASAN is not set
-CONFIG_HAVE_ARCH_KFENCE=y
-CONFIG_KFENCE=y
-CONFIG_KFENCE_STATIC_KEYS=y
-CONFIG_KFENCE_SAMPLE_INTERVAL=0
-CONFIG_KFENCE_NUM_OBJECTS=255
-CONFIG_KFENCE_STRESS_TEST_FAULTS=0
# end of Memory Debugging
CONFIG_DEBUG_SHIRQ=y
@@ -10069,7 +9931,6 @@ CONFIG_LOCK_DEBUGGING_SUPPORT=y
# CONFIG_CSD_LOCK_WAIT_DEBUG is not set
# end of Lock Debugging (spinlocks, mutexes, etc...)
-# CONFIG_DEBUG_IRQFLAGS is not set
CONFIG_STACKTRACE=y
# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
# CONFIG_DEBUG_KOBJECT is not set
@@ -10109,17 +9970,16 @@ CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y
-CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_HAVE_FENTRY=y
-CONFIG_HAVE_OBJTOOL_MCOUNT=y
CONFIG_HAVE_C_RECORDMCOUNT=y
CONFIG_TRACER_MAX_TRACE=y
CONFIG_TRACE_CLOCK=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_GENERIC_TRACER=y
CONFIG_TRACING_SUPPORT=y
@@ -10151,17 +10011,14 @@ CONFIG_DYNAMIC_EVENTS=y
CONFIG_PROBE_EVENTS=y
# CONFIG_BPF_KPROBE_OVERRIDE is not set
CONFIG_FTRACE_MCOUNT_RECORD=y
-CONFIG_FTRACE_MCOUNT_USE_CC=y
CONFIG_SYNTH_EVENTS=y
# CONFIG_HIST_TRIGGERS is not set
# CONFIG_TRACE_EVENT_INJECT is not set
# CONFIG_TRACEPOINT_BENCHMARK is not set
# CONFIG_RING_BUFFER_BENCHMARK is not set
# CONFIG_TRACE_EVAL_MAP_FILE is not set
-# CONFIG_FTRACE_RECORD_RECURSION is not set
# CONFIG_FTRACE_STARTUP_TEST is not set
# CONFIG_RING_BUFFER_STARTUP_TEST is not set
-# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set
# CONFIG_MMIOTRACE_TEST is not set
# CONFIG_PREEMPTIRQ_DELAY_TEST is not set
# CONFIG_SYNTH_EVENT_GEN_TEST is not set
@@ -10246,7 +10103,6 @@ CONFIG_TEST_KSTRTOX=y
# CONFIG_TEST_UDELAY is not set
# CONFIG_TEST_STATIC_KEYS is not set
# CONFIG_TEST_KMOD is not set
-# CONFIG_TEST_DEBUG_VIRTUAL is not set
# CONFIG_TEST_MEMCAT_P is not set
# CONFIG_TEST_OBJAGG is not set
# CONFIG_TEST_STACKINIT is not set