summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorP.Jung2021-08-30 09:43:19 +0000
committerP.Jung2021-08-30 09:43:19 +0000
commit3ce87b282494da40184a6335c989dcc87fd714ca (patch)
tree63571fbd4c630c78b60d52b3d6fe5ac7e10ff0da
parentca92096b441b30c06018840207ae99b016576879 (diff)
downloadaur-3ce87b282494da40184a6335c989dcc87fd714ca.tar.gz
5.13.13
-rw-r--r--.SRCINFO16
-rw-r--r--PKGBUILD12
-rw-r--r--cacule-5.13.patch (renamed from cacule-5.10.patch)685
-rw-r--r--config278
4 files changed, 374 insertions, 617 deletions
diff --git a/.SRCINFO b/.SRCINFO
index 46c9b5c82d4b..987a5a10b262 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,6 +1,6 @@
pkgbase = linux-hardened-cacule
pkgdesc = Security-Hardened Linux with the cacule scheduler
- pkgver = 5.10.61.hardened1
+ pkgver = 5.13.13.hardened1
pkgrel = 1
url = https://github.com/anthraxx/linux-hardened
arch = x86_64
@@ -20,16 +20,16 @@ pkgbase = linux-hardened-cacule
makedepends = graphviz
makedepends = imagemagick
options = !strip
- source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.10.61.tar.xz
- source = https://github.com/anthraxx/linux-hardened/releases/download/5.10.61-hardened1/linux-hardened-5.10.61-hardened1.patch
- source = cacule-5.10.patch
+ source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.13.13.tar.xz
+ source = https://github.com/anthraxx/linux-hardened/releases/download/5.13.13-hardened1/linux-hardened-5.13.13-hardened1.patch
+ source = cacule-5.13.patch
source = cpu-patches.patch
source = config
- sha256sums = 82eae38cc5cd11dd6aaac91c02ff0d006c7bafd6d4cf5c6a791930820a3a91d1
- sha256sums = c0a15212df86b0f0432fe8f7a08dd2a28a6508b32c008fb2aaee5ba3133fe641
- sha256sums = 3d4a0602425000d18162fdd45c6f13dd1c5ef78ef3b5b7f19365a8b0cf030c3a
+ sha256sums = 5531d2200c7923c377ef2e0fb7fc44d892e3dabf302961d790c9bd1df3c83434
+ sha256sums = 839ec0e3dfe575e742314b6c959671d5265740eebc9d7a9797f1ae8ffe9564e5
+ sha256sums = 2c2851ea35a8e8628caf2caf674736af0547c737652737d6a8aebf677ae92e5e
sha256sums = 4f22a6e4e5fe6f3bb39ca39073fa812eb9c0dbb3ac9cec64ed0a90d06b54d32a
- sha256sums = 4f3152a8b04c56e1a3a823cb1afbece7bca8205493548ecfe7979d8555c22340
+ sha256sums = 49a34dfc8ee7663a8a20c614f086e16ec70e8822db27a91050fd6ffebf87a650
pkgname = linux-hardened-cacule
pkgdesc = The Security-Hardened Linux with the cacule scheduler kernel and modules
diff --git a/PKGBUILD b/PKGBUILD
index eea98fbc86e4..9f8cfc78b039 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -6,7 +6,7 @@
pkgbase=linux-hardened-cacule
-pkgver=5.10.61.hardened1
+pkgver=5.13.13.hardened1
pkgrel=1
pkgdesc='Security-Hardened Linux with the cacule scheduler'
url='https://github.com/anthraxx/linux-hardened'
@@ -22,15 +22,15 @@ _srctag=${pkgver%.*}-${pkgver##*.}
source=(
https://www.kernel.org/pub/linux/kernel/v${pkgver%%.*}.x/${_srcname}.tar.xz
https://github.com/anthraxx/linux-hardened/releases/download/${_srctag}/linux-hardened-${_srctag}.patch
- cacule-5.10.patch
+ cacule-5.13.patch
cpu-patches.patch
config # the main kernel config file
)
-sha256sums=('82eae38cc5cd11dd6aaac91c02ff0d006c7bafd6d4cf5c6a791930820a3a91d1'
- 'c0a15212df86b0f0432fe8f7a08dd2a28a6508b32c008fb2aaee5ba3133fe641'
- '3d4a0602425000d18162fdd45c6f13dd1c5ef78ef3b5b7f19365a8b0cf030c3a'
+sha256sums=('5531d2200c7923c377ef2e0fb7fc44d892e3dabf302961d790c9bd1df3c83434'
+ '839ec0e3dfe575e742314b6c959671d5265740eebc9d7a9797f1ae8ffe9564e5'
+ '2c2851ea35a8e8628caf2caf674736af0547c737652737d6a8aebf677ae92e5e'
'4f22a6e4e5fe6f3bb39ca39073fa812eb9c0dbb3ac9cec64ed0a90d06b54d32a'
- '4f3152a8b04c56e1a3a823cb1afbece7bca8205493548ecfe7979d8555c22340')
+ '49a34dfc8ee7663a8a20c614f086e16ec70e8822db27a91050fd6ffebf87a650')
export KBUILD_BUILD_HOST=archlinux
export KBUILD_BUILD_USER=$pkgbase
diff --git a/cacule-5.10.patch b/cacule-5.13.patch
index eb5486e6a4e9..100bdcb0c5f6 100644
--- a/cacule-5.10.patch
+++ b/cacule-5.13.patch
@@ -1,16 +1,16 @@
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
-index d4b32cc32bb7..2788c5bbd870 100644
+index 68b21395a743..3f4b9c6911be 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
-@@ -1087,6 +1087,10 @@ Model available). If your platform happens to meet the
+@@ -1088,6 +1088,10 @@ Model available). If your platform happens to meet the
requirements for EAS but you do not want to use it, change
this value to 0.
-
+
+sched_interactivity_factor (CacULE scheduler only)
+==================================================
+Sets the value *m* for interactivity score calculations. See
+Figure 1 in https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
-
+
sched_schedstats
================
diff --git a/Documentation/scheduler/sched-CacULE.rst b/Documentation/scheduler/sched-CacULE.rst
@@ -96,13 +96,13 @@ index 000000000000..82b0847c468a
+ idle timer scheduler in order to avoid to get into priority
+ inversion problems which would deadlock the machine.
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 2660ee4b08ad..b54f0660cc86 100644
+index 32813c345115..0dc06f09715f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -456,10 +456,23 @@ struct sched_statistics {
+@@ -458,10 +458,23 @@ struct sched_statistics {
#endif
};
-
+
+#ifdef CONFIG_CACULE_SCHED
+struct cacule_node {
+ struct cacule_node* next;
@@ -122,15 +122,15 @@ index 2660ee4b08ad..b54f0660cc86 100644
+#endif
struct list_head group_node;
unsigned int on_rq;
-
+
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
-index 3c31ba88aca5..e79ca8c67a70 100644
+index db2c0f34aaaf..a0ef2748ee6e 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
-@@ -31,6 +31,16 @@ extern unsigned int sysctl_sched_min_granularity;
+@@ -32,6 +32,16 @@ extern unsigned int sysctl_sched_latency;
+ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
- extern unsigned int sysctl_sched_child_runs_first;
-
+
+#ifdef CONFIG_CACULE_SCHED
+extern unsigned int interactivity_factor;
+extern unsigned int cacule_max_lifetime;
@@ -145,13 +145,13 @@ index 3c31ba88aca5..e79ca8c67a70 100644
SCHED_TUNABLESCALING_NONE,
SCHED_TUNABLESCALING_LOG,
diff --git a/init/Kconfig b/init/Kconfig
-index fc4c9f416fad..ff0e446221da 100644
+index a61c92066c2e..427593be8c5a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -825,6 +825,51 @@ config UCLAMP_BUCKETS_COUNT
-
+@@ -834,6 +834,51 @@ config UCLAMP_BUCKETS_COUNT
+
endmenu
-
+
+config CACULE_SCHED
+ bool "CacULE CPU scheduler"
+ default y
@@ -164,14 +164,14 @@ index fc4c9f416fad..ff0e446221da 100644
+
+config CACULE_RDB
+ bool "RDB (Response Driven Balancer)"
-+ default n
++ default y
+ depends on CACULE_SCHED
+ help
+ This is an experimental load balancer for CacULE. It is a lightweight
+ load balancer which is a replacement of CFS load balancer. It migrates
+ tasks based on their interactivity scores.
+
-+ If unsure, say N.
++ If unsure, say Y here.
+
+config RDB_INTERVAL
+ int "RDB load balancer interval"
@@ -200,7 +200,7 @@ index fc4c9f416fad..ff0e446221da 100644
#
# For architectures that want to enable the support for NUMA-affine scheduler
# balancing logic:
-@@ -1208,6 +1253,7 @@ config SCHED_AUTOGROUP
+@@ -1231,6 +1276,7 @@ config SCHED_AUTOGROUP
select CGROUPS
select CGROUP_SCHED
select FAIR_GROUP_SCHED
@@ -215,37 +215,37 @@ index 38ef6d06888e..865f8dbddca8 100644
@@ -46,6 +46,9 @@ choice
1000 Hz is the preferred choice for desktop systems and other
systems requiring fast interactive responses to events.
-
+
+ config HZ_2000
+ bool "2000 HZ"
+
endchoice
-
+
config HZ
@@ -54,6 +57,7 @@ config HZ
default 250 if HZ_250
default 300 if HZ_300
default 1000 if HZ_1000
+ default 2000 if HZ_2000
-
+
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 679562d2f55d..b3c4594eb320 100644
+index e5858999b54d..c326d30424f9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -72,6 +72,10 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
+@@ -82,6 +82,10 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
*/
unsigned int sysctl_sched_rt_period = 1000000;
-
+
+#ifdef CONFIG_CACULE_SCHED
+int __read_mostly cacule_yield = 1;
+#endif
+
__read_mostly int scheduler_running;
-
+
/*
-@@ -3068,6 +3072,11 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
+@@ -3578,6 +3582,11 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
@@ -255,12 +255,12 @@ index 679562d2f55d..b3c4594eb320 100644
+#endif
+
INIT_LIST_HEAD(&p->se.group_node);
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
-@@ -3352,6 +3361,10 @@ void wake_up_new_task(struct task_struct *p)
+@@ -3863,6 +3872,10 @@ void wake_up_new_task(struct task_struct *p)
update_rq_clock(rq);
post_init_entity_util_avg(p);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ p->se.cacule_node.cacule_start_time = sched_clock();
+#endif
@@ -268,7 +268,7 @@ index 679562d2f55d..b3c4594eb320 100644
activate_task(rq, p, ENQUEUE_NOCLOCK);
trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK);
-@@ -4053,7 +4066,9 @@ static void sched_tick_remote(struct work_struct *work)
+@@ -4674,7 +4687,9 @@ static void sched_tick_remote(struct work_struct *work)
struct rq *rq = cpu_rq(cpu);
struct task_struct *curr;
struct rq_flags rf;
@@ -276,29 +276,29 @@ index 679562d2f55d..b3c4594eb320 100644
u64 delta;
+#endif
int os;
-
+
/*
-@@ -4073,6 +4088,7 @@ static void sched_tick_remote(struct work_struct *work)
-
+@@ -4694,6 +4709,7 @@ static void sched_tick_remote(struct work_struct *work)
+
update_rq_clock(rq);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (!is_idle_task(curr)) {
/*
* Make sure the next tick runs within a reasonable
-@@ -4081,6 +4097,8 @@ static void sched_tick_remote(struct work_struct *work)
+@@ -4702,6 +4718,8 @@ static void sched_tick_remote(struct work_struct *work)
delta = rq_clock_task(rq) - curr->se.exec_start;
WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
}
+#endif
+
curr->sched_class->task_tick(rq, curr, 0);
-
+
calc_load_nohz_remote(rq);
-@@ -6092,6 +6110,13 @@ static void do_sched_yield(void)
+@@ -6958,6 +6976,13 @@ static void do_sched_yield(void)
struct rq_flags rf;
struct rq *rq;
-
+
+#ifdef CONFIG_CACULE_SCHED
+ struct task_struct *curr = current;
+ struct cacule_node *cn = &curr->se.cacule_node;
@@ -307,29 +307,29 @@ index 679562d2f55d..b3c4594eb320 100644
+ cn->vruntime |= YIELD_MARK;
+#endif
rq = this_rq_lock_irq(&rf);
-
+
schedstat_inc(rq->yld_count);
-@@ -7066,6 +7091,14 @@ void __init sched_init(void)
+@@ -8115,6 +8140,14 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
#endif
-
+
+#ifdef CONFIG_CACULE_SCHED
+#ifdef CONFIG_CACULE_RDB
-+ printk(KERN_INFO "CacULE CPU scheduler (RDB) v5.10-r3 by Hamad Al Marri.");
++ printk(KERN_INFO "CacULE CPU scheduler (RDB) v5.13-r3 by Hamad Al Marri.");
+#else
-+ printk(KERN_INFO "CacULE CPU scheduler v5.10-r3 by Hamad Al Marri.");
++ printk(KERN_INFO "CacULE CPU scheduler v5.13-r3 by Hamad Al Marri.");
+#endif
+#endif
+
wait_bit_init();
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index 70a578272436..506c0512610c 100644
+index c5aacbd492a1..adb021b7da8a 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
-@@ -554,8 +554,11 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
-
+@@ -560,8 +560,11 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
+
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
- s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
@@ -342,7 +342,7 @@ index 70a578272436..506c0512610c 100644
struct rq *rq = cpu_rq(cpu);
struct sched_entity *last;
unsigned long flags;
-@@ -576,21 +579,27 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+@@ -582,21 +585,27 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
last = __pick_last_entity(cfs_rq);
if (last)
max_vruntime = last->vruntime;
@@ -371,7 +371,7 @@ index 70a578272436..506c0512610c 100644
cfs_rq->nr_spread_over);
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 262b02d75007..1dc6f346111c 100644
+index 7dd0d859d95b..4aa5fced8f69 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,9 +19,24 @@
@@ -384,7 +384,7 @@ index 262b02d75007..1dc6f346111c 100644
+ * (C) 2020 Hamad Al Marri <hamad.s.almarri@gmail.com>
*/
#include "sched.h"
-
+
+#ifdef CONFIG_CACULE_SCHED
+unsigned int __read_mostly cacule_max_lifetime = 22000; // in ms
+unsigned int __read_mostly interactivity_factor = 32768;
@@ -402,19 +402,19 @@ index 262b02d75007..1dc6f346111c 100644
@@ -82,7 +97,11 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
-
+
+#ifdef CONFIG_CACULE_SCHED
+const_debug unsigned int sysctl_sched_migration_cost = 200000UL;
+#else
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+#endif
-
+
int sched_thermal_decay_shift;
static int __init setup_sched_thermal_decay_shift(char *str)
-@@ -253,6 +272,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
-
+@@ -263,6 +282,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
+
const struct sched_class fair_sched_class;
-
+
+
+#ifdef CONFIG_CACULE_SCHED
+static inline struct sched_entity *se_of(struct cacule_node *cn)
@@ -426,7 +426,7 @@ index 262b02d75007..1dc6f346111c 100644
/**************************************************************
* CFS operations on generic schedulable entities:
*/
-@@ -512,7 +539,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
+@@ -522,7 +549,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
/**************************************************************
* Scheduling class tree data structure manipulation methods:
*/
@@ -435,9 +435,9 @@ index 262b02d75007..1dc6f346111c 100644
static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - max_vruntime);
-@@ -568,7 +595,223 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
- cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
- #endif
+@@ -585,7 +612,223 @@ static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
+ {
+ return entity_before(__node_2_se(a), __node_2_se(b));
}
+#endif /* CONFIG_CACULE_SCHED */
+
@@ -532,7 +532,7 @@ index 262b02d75007..1dc6f346111c 100644
+
+ return task_has_idle_policy(task_of(se));
+}
-
++
+/*
+ * Does se have lower interactivity score value (i.e. interactive) than curr? If yes, return 1,
+ * otherwise return -1
@@ -598,7 +598,7 @@ index 262b02d75007..1dc6f346111c 100644
+ }
+}
+#endif
-+
+
+/*
+ * Enqueue an entity
+ */
@@ -659,12 +659,12 @@ index 262b02d75007..1dc6f346111c 100644
/*
* Enqueue an entity into the rb-tree:
*/
-@@ -626,16 +869,24 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
-
- return rb_entry(next, struct sched_entity, run_node);
+@@ -618,16 +861,24 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
+
+ return __node_2_se(next);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
#ifdef CONFIG_SCHED_DEBUG
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
@@ -675,35 +675,35 @@ index 262b02d75007..1dc6f346111c 100644
+ return se_of(cfs_rq->tail);
+#else
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
-
+
if (!last)
return NULL;
-
- return rb_entry(last, struct sched_entity, run_node);
+
+ return __node_2_se(last);
+#endif /* CONFIG_CACULE_SCHED */
}
-
+
/**************************************************************
-@@ -730,6 +981,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -717,6 +968,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
return slice;
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* We calculate the vruntime slice of a to-be-inserted task.
*
-@@ -739,6 +991,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -726,6 +978,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return calc_delta_fair(sched_slice(cfs_rq, se), se);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
#include "pelt.h"
#ifdef CONFIG_SMP
-@@ -846,14 +1099,55 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
+@@ -833,14 +1086,55 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
}
#endif /* CONFIG_SMP */
-
+
+#ifdef CONFIG_CACULE_SCHED
+static void normalize_lifetime(u64 now, struct sched_entity *se)
+{
@@ -753,13 +753,13 @@ index 262b02d75007..1dc6f346111c 100644
u64 now = rq_clock_task(rq_of(cfs_rq));
u64 delta_exec;
+#endif
-
+
if (unlikely(!curr))
return;
-@@ -870,8 +1164,16 @@ static void update_curr(struct cfs_rq *cfs_rq)
+@@ -857,8 +1151,16 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ curr->cacule_node.last_run = now;
+ delta_fair = calc_delta_fair(delta_exec, curr);
@@ -770,18 +770,18 @@ index 262b02d75007..1dc6f346111c 100644
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_min_vruntime(cfs_rq);
+#endif
-
+
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
-@@ -1030,7 +1332,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -1026,7 +1328,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
-
if (!schedstat_enabled())
return;
-
-@@ -1062,7 +1363,12 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+
+@@ -1058,7 +1359,12 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
/*
* We are starting a new run period:
*/
@@ -792,34 +792,34 @@ index 262b02d75007..1dc6f346111c 100644
se->exec_start = rq_clock_task(rq_of(cfs_rq));
+#endif
}
-
+
/**************************************************
-@@ -4129,7 +4435,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
-
+@@ -4178,7 +4484,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
+
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
-#ifdef CONFIG_SCHED_DEBUG
+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_CACULE_SCHED)
s64 d = se->vruntime - cfs_rq->min_vruntime;
-
+
if (d < 0)
-@@ -4140,6 +4446,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4189,6 +4495,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
-@@ -4171,6 +4478,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+@@ -4220,6 +4527,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
/* ensure we never gain time by being placed backwards. */
se->vruntime = max_vruntime(se->vruntime, vruntime);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
-
-@@ -4229,18 +4537,23 @@ static inline bool cfs_bandwidth_used(void);
+
+@@ -4278,18 +4586,23 @@ static inline bool cfs_bandwidth_used(void);
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
@@ -827,7 +827,7 @@ index 262b02d75007..1dc6f346111c 100644
bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
+#endif
bool curr = cfs_rq->curr == se;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* If we're the current task, we must renormalise before calling
@@ -836,87 +836,87 @@ index 262b02d75007..1dc6f346111c 100644
if (renorm && curr)
se->vruntime += cfs_rq->min_vruntime;
+#endif
-
+
update_curr(cfs_rq);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Otherwise, renormalise after, such that we're placed at the current
* moment in time, instead of some random moment in the past. Being
-@@ -4249,6 +4562,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4298,6 +4611,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (renorm && !curr)
se->vruntime += cfs_rq->min_vruntime;
+#endif
-
+
/*
* When enqueuing a sched_entity, we must:
-@@ -4263,8 +4577,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4312,8 +4626,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_cfs_group(se);
account_entity_enqueue(cfs_rq, se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (flags & ENQUEUE_WAKEUP)
place_entity(cfs_rq, se, 0);
+#endif
-
+
check_schedstat_required();
update_stats_enqueue(cfs_rq, se, flags);
-@@ -4285,6 +4601,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4334,6 +4650,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
check_enqueue_throttle(cfs_rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void __clear_buddies_last(struct sched_entity *se)
{
for_each_sched_entity(se) {
-@@ -4329,6 +4646,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4378,6 +4695,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (cfs_rq->skip == se)
__clear_buddies_skip(se);
}
+#endif /* !CONFIG_CACULE_SCHED */
-
+
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
-@@ -4353,13 +4671,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
-
+
+@@ -4402,13 +4720,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+
update_stats_dequeue(cfs_rq, se, flags);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
clear_buddies(cfs_rq, se);
+#endif
-
+
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->on_rq = 0;
account_entity_dequeue(cfs_rq, se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Normalize after update_curr(); which will also have moved
* min_vruntime if @se is the one holding it back. But before doing
-@@ -4368,12 +4689,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4417,12 +4738,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (!(flags & DEQUEUE_SLEEP))
se->vruntime -= cfs_rq->min_vruntime;
+#endif
-
+
/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);
-
+
update_cfs_group(se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Now advance min_vruntime if @se was the entity holding it back,
* except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
-@@ -4382,8 +4705,23 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4431,8 +4754,23 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
+#endif
}
-
+
+#ifdef CONFIG_CACULE_SCHED
+static struct sched_entity *
+pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr);
@@ -934,18 +934,18 @@ index 262b02d75007..1dc6f346111c 100644
/*
* Preempt the current task with a newly woken task if needed:
*/
-@@ -4423,6 +4761,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4472,6 +4810,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
if (delta > ideal_runtime)
resched_curr(rq_of(cfs_rq));
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
-@@ -4457,6 +4796,31 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4506,6 +4845,31 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
-
+
+#ifdef CONFIG_CACULE_SCHED
+static struct sched_entity *
+pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
@@ -973,32 +973,32 @@ index 262b02d75007..1dc6f346111c 100644
+#else
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
-
-@@ -4517,6 +4881,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
-
+
+@@ -4566,6 +4930,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+
return se;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
-@@ -5608,9 +5973,15 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+
+@@ -5666,9 +6031,15 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
assert_list_leaf_cfs_rq(rq);
-
+
hrtick_update(rq);
+
+#ifdef CONFIG_CACULE_RDB
+ update_IS(rq);
+#endif
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void set_next_buddy(struct sched_entity *se);
+#endif
-
+
/*
* The dequeue_task method is called before nr_running is
-@@ -5642,12 +6013,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+@@ -5700,12 +6071,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq->load.weight) {
/* Avoid re-evaluating load for this entity: */
se = parent_entity(se);
@@ -1013,7 +1013,7 @@ index 262b02d75007..1dc6f346111c 100644
break;
}
flags |= DEQUEUE_SLEEP;
-@@ -5679,6 +6052,10 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+@@ -5737,6 +6110,10 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
dequeue_throttle:
util_est_update(&rq->cfs, p, task_sleep);
hrtick_update(rq);
@@ -1022,57 +1022,57 @@ index 262b02d75007..1dc6f346111c 100644
+ update_IS(rq);
+#endif
}
-
+
#ifdef CONFIG_SMP
-@@ -5763,6 +6140,7 @@ static unsigned long capacity_of(int cpu)
+@@ -5821,6 +6198,7 @@ static unsigned long capacity_of(int cpu)
return cpu_rq(cpu)->cpu_capacity;
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void record_wakee(struct task_struct *p)
{
/*
-@@ -5809,6 +6187,7 @@ static int wake_wide(struct task_struct *p)
+@@ -5867,6 +6245,7 @@ static int wake_wide(struct task_struct *p)
return 0;
return 1;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* The purpose of wake_affine() is to quickly determine on which CPU we can run
-@@ -6485,6 +6864,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
+@@ -6569,6 +6948,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
return min_t(unsigned long, util, capacity_orig_of(cpu));
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
* to @dst_cpu.
-@@ -6718,6 +7098,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
-
+@@ -6823,6 +7203,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+
return -1;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
-@@ -6740,6 +7121,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
- int want_affine = 0;
- int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
-
+@@ -6847,6 +7228,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
+ /* SD_flags and WF_flags share the first nibble */
+ int sd_flag = wake_flags & 0xF;
+
+#if !defined(CONFIG_CACULE_SCHED)
- if (sd_flag & SD_BALANCE_WAKE) {
+ if (wake_flags & WF_TTWU) {
record_wakee(p);
-
-@@ -6752,6 +7134,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
-
+
+@@ -6859,6 +7241,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
+
want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
rcu_read_lock();
for_each_domain(cpu, tmp) {
-@@ -6799,6 +7182,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
+@@ -6905,6 +7288,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
*/
static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
{
@@ -1080,31 +1080,31 @@ index 262b02d75007..1dc6f346111c 100644
/*
* As blocked tasks retain absolute vruntime the migration needs to
* deal with this by subtracting the old and adding the new
-@@ -6824,6 +7208,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
-
+@@ -6930,6 +7314,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
+
se->vruntime -= min_vruntime;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
if (p->on_rq == TASK_ON_RQ_MIGRATING) {
/*
-@@ -6869,6 +7254,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+@@ -6975,6 +7360,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
#endif /* CONFIG_SMP */
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static unsigned long wakeup_gran(struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;
-@@ -6947,6 +7333,7 @@ static void set_skip_buddy(struct sched_entity *se)
+@@ -7053,6 +7439,7 @@ static void set_skip_buddy(struct sched_entity *se)
for_each_sched_entity(se)
cfs_rq_of(se)->skip = se;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* Preempt the current task with a newly woken task if needed:
-@@ -6955,9 +7342,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7061,9 +7448,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
@@ -1114,23 +1114,23 @@ index 262b02d75007..1dc6f346111c 100644
int scale = cfs_rq->nr_running >= sched_nr_latency;
int next_buddy_marked = 0;
+#endif /* CONFIG_CACULE_SCHED */
-
+
if (unlikely(se == pse))
return;
-@@ -6971,10 +7361,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7077,10 +7467,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
set_next_buddy(pse);
next_buddy_marked = 1;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* We can come here with TIF_NEED_RESCHED already set from new task
-@@ -7004,6 +7396,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7110,6 +7502,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
find_matching_se(&se, &pse);
update_curr(cfs_rq_of(se));
BUG_ON(!pse);
@@ -1142,14 +1142,14 @@ index 262b02d75007..1dc6f346111c 100644
if (wakeup_preempt_entity(se, pse) == 1) {
/*
* Bias pick_next to pick the sched entity that is
-@@ -7013,11 +7410,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7119,11 +7516,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
set_next_buddy(pse);
goto preempt;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
return;
-
+
preempt:
resched_curr(rq);
+
@@ -1157,55 +1157,55 @@ index 262b02d75007..1dc6f346111c 100644
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -7032,6 +7432,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
-
+@@ -7138,6 +7538,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
set_last_buddy(se);
+#endif /* CONFIG_CACULE_SCHED */
}
-
+
struct task_struct *
-@@ -7093,6 +7494,11 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+@@ -7199,6 +7600,11 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
-
+
+ /*
+ * Here we picked a sched_entity starting from
+ * the same group of curr, but the task could
+ * be a child of the selected sched_entity.
+ */
p = task_of(se);
-
+
/*
-@@ -7103,6 +7509,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+@@ -7209,6 +7615,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
if (prev != p) {
struct sched_entity *pse = &prev->se;
-
+
+ /* while se and pse are not in the same group */
while (!(cfs_rq = is_same_group(se, pse))) {
int se_depth = se->depth;
int pse_depth = pse->depth;
-@@ -7117,6 +7524,9 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+@@ -7223,6 +7630,9 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
}
}
-
+
+ /* Here we reached the point were both
+ * sched_entities are in the same group.
+ */
put_prev_entity(cfs_rq, pse);
set_next_entity(cfs_rq, se);
}
-@@ -7127,6 +7537,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+@@ -7233,6 +7643,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
if (prev)
put_prev_task(rq, prev);
-
+
+ /* Going down the hierarchy */
do {
se = pick_next_entity(cfs_rq, NULL);
set_next_entity(cfs_rq, se);
-@@ -7136,6 +7547,14 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+@@ -7242,6 +7653,15 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
p = task_of(se);
-
+
done: __maybe_unused;
+#ifdef CONFIG_CACULE_SCHED
+ if (prev)
@@ -1214,13 +1214,14 @@ index 262b02d75007..1dc6f346111c 100644
+#ifdef CONFIG_CACULE_RDB
+ update_IS(rq);
+#endif
++
+#endif
#ifdef CONFIG_SMP
/*
* Move the next running task to the front of
-@@ -7153,6 +7572,11 @@ done: __maybe_unused;
+@@ -7259,6 +7679,11 @@ done: __maybe_unused;
return p;
-
+
idle:
+#ifdef CONFIG_CACULE_RDB
+ WRITE_ONCE(rq->max_IS_score, ~0);
@@ -1229,8 +1230,8 @@ index 262b02d75007..1dc6f346111c 100644
+
if (!rf)
return NULL;
-
-@@ -7206,7 +7630,10 @@ static void yield_task_fair(struct rq *rq)
+
+@@ -7312,7 +7737,10 @@ static void yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
@@ -1238,274 +1239,270 @@ index 262b02d75007..1dc6f346111c 100644
+#if !defined(CONFIG_CACULE_SCHED)
struct sched_entity *se = &curr->se;
+#endif
-
+
/*
* Are we the only task in the tree?
-@@ -7214,7 +7641,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7320,7 +7748,9 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(rq->nr_running == 1))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
clear_buddies(cfs_rq, se);
+#endif
-
+
if (curr->policy != SCHED_BATCH) {
update_rq_clock(rq);
-@@ -7230,7 +7659,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7336,7 +7766,9 @@ static void yield_task_fair(struct rq *rq)
rq_clock_skip_update(rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
set_skip_buddy(se);
+#endif
}
-
+
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
-@@ -7241,8 +7672,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
+@@ -7347,8 +7779,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
return false;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/* Tell the scheduler that we'd really like pse to run next. */
set_next_buddy(se);
+#endif
-
+
yield_task_fair(rq);
-
-@@ -7451,6 +7884,7 @@ struct lb_env {
+
+@@ -7556,6 +7990,7 @@ struct lb_env {
struct list_head tasks;
};
-
+
+#if !defined(CONFIG_CACULE_RDB)
/*
* Is this task likely cache-hot:
*/
-@@ -7470,6 +7904,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+@@ -7575,6 +8010,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
if (env->sd->flags & SD_SHARE_CPUCAPACITY)
return 0;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Buddy candidates are cache hot:
*/
-@@ -7477,6 +7912,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+@@ -7582,6 +8018,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
(&p->se == cfs_rq_of(&p->se)->next ||
&p->se == cfs_rq_of(&p->se)->last))
return 1;
+#endif
-
+
if (sysctl_sched_migration_cost == -1)
return 1;
-@@ -7854,6 +8290,7 @@ static void attach_tasks(struct lb_env *env)
-
+@@ -7975,6 +8412,7 @@ static void attach_tasks(struct lb_env *env)
+
rq_unlock(env->dst_rq, &rf);
}
+#endif
-
+
#ifdef CONFIG_NO_HZ_COMMON
static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
-@@ -7899,6 +8336,7 @@ static inline bool others_have_blocked(struct rq *rq) { return false; }
+@@ -8024,6 +8462,7 @@ static inline void update_blocked_load_tick(struct rq *rq) {}
static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
#endif
-
+
+#if !defined(CONFIG_CACULE_RDB)
static bool __update_blocked_others(struct rq *rq, bool *done)
{
const struct sched_class *curr_class;
-@@ -7924,6 +8362,7 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
-
+@@ -8049,9 +8488,11 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
+
return decayed;
}
+#endif
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
-
-@@ -7944,6 +8383,7 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
- return true;
- }
-
+
+#if !defined(CONFIG_CACULE_RDB)
static bool __update_blocked_fair(struct rq *rq, bool *done)
{
struct cfs_rq *cfs_rq, *pos;
-@@ -7983,6 +8423,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
-
+@@ -8091,6 +8532,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
+
return decayed;
}
+#endif
-
+
/*
* Compute the hierarchical load factor for cfs_rq and all its ascendants.
-@@ -8049,6 +8490,7 @@ static unsigned long task_h_load(struct task_struct *p)
+@@ -8157,6 +8599,7 @@ static unsigned long task_h_load(struct task_struct *p)
}
#endif
-
+
+#if !defined(CONFIG_CACULE_RDB)
static void update_blocked_averages(int cpu)
{
bool decayed = false, done = true;
-@@ -8066,6 +8508,7 @@ static void update_blocked_averages(int cpu)
+@@ -8175,6 +8618,7 @@ static void update_blocked_averages(int cpu)
cpufreq_update_util(rq, 0);
rq_unlock_irqrestore(rq, &rf);
}
+#endif
-
+
/********** Helpers for find_busiest_group ************************/
-
-@@ -8400,7 +8843,9 @@ static bool update_nohz_stats(struct rq *rq, bool force)
- if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick))
- return true;
-
-+#if !defined(CONFIG_CACULE_RDB)
- update_blocked_averages(cpu);
-+#endif
-
- return rq->has_blocked_load;
- #else
-@@ -9211,6 +9656,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
+
+@@ -9278,6 +9722,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* different in groups.
*/
-
+
+#if !defined(CONFIG_CACULE_RDB)
/**
* find_busiest_group - Returns the busiest group within the sched_domain
* if there is an imbalance.
-@@ -9476,6 +9922,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
-
+@@ -9546,6 +9991,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
+
return busiest;
}
+#endif
-
+
/*
* Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
-@@ -9495,6 +9942,7 @@ asym_active_balance(struct lb_env *env)
- sched_asym_prefer(env->dst_cpu, env->src_cpu);
+@@ -9582,6 +10028,7 @@ imbalanced_active_balance(struct lb_env *env)
+ return 0;
}
-
+
+#if !defined(CONFIG_CACULE_RDB)
- static inline bool
- voluntary_active_balance(struct lb_env *env)
+ static int need_active_balance(struct lb_env *env)
{
-@@ -9843,6 +10291,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
+ struct sched_domain *sd = env->sd;
+@@ -9914,6 +10361,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
out:
return ld_moved;
}
+#endif
-
+
static inline unsigned long
get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
-@@ -9881,6 +10330,7 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
+@@ -9952,6 +10400,7 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
*next_balance = next;
}
-
+
+#if !defined(CONFIG_CACULE_RDB)
/*
* active_load_balance_cpu_stop is run by the CPU stopper. It pushes
* running tasks off the busiest CPU onto idle CPUs. It requires at
-@@ -9972,6 +10422,7 @@ static int active_load_balance_cpu_stop(void *data)
+@@ -10037,6 +10486,7 @@ static int active_load_balance_cpu_stop(void *data)
}
-
+
static DEFINE_SPINLOCK(balancing);
+#endif
-
+
/*
* Scale the max load_balance interval with the number of CPUs in the system.
-@@ -9982,6 +10433,7 @@ void update_max_interval(void)
+@@ -10047,6 +10497,7 @@ void update_max_interval(void)
max_load_balance_interval = HZ*num_online_cpus()/10;
}
-
+
+#if !defined(CONFIG_CACULE_RDB)
/*
* It checks each scheduling domain to see if it is due to be balanced,
* and initiates a balancing operation if so.
-@@ -10087,6 +10539,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
- #endif
- }
+@@ -10139,6 +10590,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
+ rq->next_balance = next_balance;
+
}
+#endif
-
+
static inline int on_null_domain(struct rq *rq)
{
-@@ -10116,6 +10569,7 @@ static inline int find_new_ilb(void)
+@@ -10172,6 +10624,7 @@ static inline int find_new_ilb(void)
return nr_cpu_ids;
}
-
+
+#if !defined(CONFIG_CACULE_RDB)
/*
* Kick a CPU to do the nohz balancing, if it is time for it. We pick any
* idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
-@@ -10266,6 +10720,7 @@ static void nohz_balancer_kick(struct rq *rq)
+@@ -10322,6 +10775,7 @@ static void nohz_balancer_kick(struct rq *rq)
if (flags)
kick_ilb(flags);
}
+#endif /* CONFIG_CACULE_RDB */
-
+
static void set_cpu_sd_state_busy(int cpu)
{
-@@ -10373,6 +10828,7 @@ void nohz_balance_enter_idle(int cpu)
- WRITE_ONCE(nohz.has_blocked, 1);
- }
-
+@@ -10442,11 +10896,17 @@ static bool update_nohz_stats(struct rq *rq)
+ if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
+ return true;
+
+#if !defined(CONFIG_CACULE_RDB)
+ update_blocked_averages(cpu);
++#endif
+
+ return rq->has_blocked_load;
+ }
+
++#ifdef CONFIG_CACULE_RDB
++static int idle_try_pull_any(struct cfs_rq *cfs_rq);
++#endif
++
/*
* Internal function that runs load balance for all idle cpus. The load balance
* can be a simple update of blocked load or a complete load balance with
-@@ -10442,6 +10898,7 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
-
+@@ -10516,7 +10976,11 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
+ rq_unlock_irqrestore(rq, &rf);
+
if (flags & NOHZ_BALANCE_KICK)
++#if !defined(CONFIG_CACULE_RDB)
rebalance_domains(rq, CPU_IDLE);
-+
++#else
++ idle_try_pull_any(&rq->cfs);
++#endif
}
-
+
if (time_after(next_balance, rq->next_balance)) {
-@@ -10458,6 +10915,7 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
- if (likely(update_next_balance))
- nohz.next_balance = next_balance;
-
+@@ -10542,6 +11006,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
+ WRITE_ONCE(nohz.has_blocked, 1);
+ }
+
+#if !defined(CONFIG_CACULE_RDB)
- /* Newly idle CPU doesn't need an update */
- if (idle != CPU_NEWLY_IDLE) {
- update_blocked_averages(this_cpu);
-@@ -10466,6 +10924,7 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
-
- if (flags & NOHZ_BALANCE_KICK)
- rebalance_domains(this_rq, CPU_IDLE);
-+#endif
-
- WRITE_ONCE(nohz.next_blocked,
- now + msecs_to_jiffies(LOAD_AVG_PERIOD));
-@@ -10513,9 +10972,11 @@ static void nohz_newidle_balance(struct rq *this_rq)
+ /*
+ * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
+ * rebalancing for all the cpus for whom scheduler ticks are stopped.
+@@ -10562,6 +11027,7 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
+
+ return true;
+ }
++#endif
+
+ /*
+ * Check if we need to run the ILB for updating blocked load before entering
+@@ -10592,9 +11058,11 @@ static void nohz_newidle_balance(struct rq *this_rq)
if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/* Will wake up very soon. No time for doing anything else*/
if (this_rq->avg_idle < sysctl_sched_migration_cost)
return;
+#endif
-
+
/* Don't need to update blocked load of idle CPUs*/
if (!READ_ONCE(nohz.has_blocked) ||
-@@ -10533,18 +10994,146 @@ static void nohz_newidle_balance(struct rq *this_rq)
- kick_ilb(NOHZ_STATS_KICK);
- raw_spin_lock(&this_rq->lock);
+@@ -10609,6 +11077,7 @@ static void nohz_newidle_balance(struct rq *this_rq)
}
-+#endif
-
+
#else /* !CONFIG_NO_HZ_COMMON */
+#if !defined(CONFIG_CACULE_RDB)
static inline void nohz_balancer_kick(struct rq *rq) { }
-
+
static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
- {
- return false;
+@@ -10617,8 +11086,134 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle
}
-+#endif
-
+
static inline void nohz_newidle_balance(struct rq *this_rq) { }
++#endif
+
#endif /* CONFIG_NO_HZ_COMMON */
-
+
+#ifdef CONFIG_CACULE_RDB
+static int
+can_migrate_task(struct task_struct *p, int dst_cpu, struct rq *src_rq)
@@ -1514,7 +1511,7 @@ index 262b02d75007..1dc6f346111c 100644
+ return 0;
+
+ /* Disregard pcpu kthreads; they are where they need to be. */
-+ if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
++ if (kthread_is_per_cpu(p))
+ return 0;
+
+ if (!cpumask_test_cpu(dst_cpu, p->cpus_ptr))
@@ -1631,9 +1628,9 @@ index 262b02d75007..1dc6f346111c 100644
+}
+
/*
- * idle_balance is called by schedule() if this_cpu is about to become
+ * newidle_balance is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs.
-@@ -10555,6 +11144,109 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { }
+@@ -10629,6 +11224,111 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { }
* > 0 - success, new (fair) tasks present
*/
static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
@@ -1733,6 +1730,8 @@ index 262b02d75007..1dc6f346111c 100644
+
+ if (pulled_task)
+ this_rq->idle_stamp = 0;
++ else
++ nohz_newidle_balance(this_rq);
+
+ rq_repin_lock(this_rq, rf);
+
@@ -1743,20 +1742,20 @@ index 262b02d75007..1dc6f346111c 100644
{
unsigned long next_balance = jiffies + HZ;
int this_cpu = this_rq->cpu;
-@@ -10583,7 +11275,10 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
+@@ -10657,7 +11357,10 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
*/
rq_unpin_lock(this_rq, rf);
-
+
- if (this_rq->avg_idle < sysctl_sched_migration_cost ||
+ if (
+#if !defined(CONFIG_CACULE_SCHED)
+ this_rq->avg_idle < sysctl_sched_migration_cost ||
+#endif
!READ_ONCE(this_rq->rd->overload)) {
-
+
rcu_read_lock();
-@@ -10705,6 +11400,217 @@ void trigger_load_balance(struct rq *rq)
-
+@@ -10782,6 +11485,217 @@ void trigger_load_balance(struct rq *rq)
+
nohz_balancer_kick(rq);
}
+#endif
@@ -1970,24 +1969,24 @@ index 262b02d75007..1dc6f346111c 100644
+ }
+}
+#endif
-
+
static void rq_online_fair(struct rq *rq)
{
-@@ -10741,6 +11647,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
+@@ -10818,6 +11732,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
entity_tick(cfs_rq, se, queued);
}
-
+
+#ifdef CONFIG_CACULE_RDB
+ update_IS(rq);
+#endif
+
if (static_branch_unlikely(&sched_numa_balancing))
task_tick_numa(rq, curr);
-
-@@ -10748,11 +11658,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
+
+@@ -10825,11 +11743,28 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
update_overutilized_status(task_rq(curr));
}
-
+
+#ifdef CONFIG_CACULE_SCHED
/*
* called on fork with the child task as argument from the parent's context
@@ -1997,7 +1996,6 @@ index 262b02d75007..1dc6f346111c 100644
+ static void task_fork_fair(struct task_struct *p)
+{
+ struct cfs_rq *cfs_rq;
-+ struct sched_entity *curr;
+ struct rq *rq = this_rq();
+ struct rq_flags rf;
+
@@ -2005,8 +2003,7 @@ index 262b02d75007..1dc6f346111c 100644
+ update_rq_clock(rq);
+
+ cfs_rq = task_cfs_rq(current);
-+ curr = cfs_rq->curr;
-+ if (curr)
++ if (cfs_rq->curr)
+ update_curr(cfs_rq);
+
+ rq_unlock(rq, &rf);
@@ -2015,32 +2012,32 @@ index 262b02d75007..1dc6f346111c 100644
static void task_fork_fair(struct task_struct *p)
{
struct cfs_rq *cfs_rq;
-@@ -10783,6 +11712,7 @@ static void task_fork_fair(struct task_struct *p)
+@@ -10860,6 +11795,7 @@ static void task_fork_fair(struct task_struct *p)
se->vruntime -= cfs_rq->min_vruntime;
rq_unlock(rq, &rf);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* Priority of the task has changed. Check to see if we preempt
-@@ -10901,6 +11831,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
+@@ -10978,6 +11914,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
static void detach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
+
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
+
if (!vruntime_normalized(p)) {
-@@ -10911,6 +11843,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10988,6 +11926,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
place_entity(cfs_rq, se, 0);
se->vruntime -= cfs_rq->min_vruntime;
}
+#endif
-
+
detach_entity_cfs_rq(se);
}
-@@ -10918,12 +11851,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10995,12 +11934,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
static void attach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
@@ -2048,17 +2045,17 @@ index 262b02d75007..1dc6f346111c 100644
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
+#endif
-
+
attach_entity_cfs_rq(se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (!vruntime_normalized(p))
se->vruntime += cfs_rq->min_vruntime;
+#endif
}
-
+
static void switched_from_fair(struct rq *rq, struct task_struct *p)
-@@ -10979,13 +11917,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+@@ -11056,13 +12000,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
void init_cfs_rq(struct cfs_rq *cfs_rq)
{
cfs_rq->tasks_timeline = RB_ROOT_CACHED;
@@ -2079,26 +2076,26 @@ index 262b02d75007..1dc6f346111c 100644
+ cfs_rq->tail = NULL;
+#endif
}
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
-@@ -11310,7 +12257,9 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
+@@ -11387,7 +12340,9 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
__init void init_sched_fair_class(void)
{
#ifdef CONFIG_SMP
+#if !defined(CONFIG_CACULE_RDB)
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
+#endif
-
+
#ifdef CONFIG_NO_HZ_COMMON
nohz.next_balance = jiffies;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index 39112ac7ab34..5881814c7e1c 100644
+index 35f7efed75c4..6ab803743b40 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -158,6 +158,11 @@ extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
+@@ -159,6 +159,11 @@ extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
*/
#define RUNTIME_INF ((u64)~0ULL)
-
+
+#ifdef CONFIG_CACULE_SCHED
+#define YIELD_MARK 0x8000000000000000ULL
+#define YIELD_UNMARK 0x7FFFFFFFFFFFFFFFULL
@@ -2107,9 +2104,9 @@ index 39112ac7ab34..5881814c7e1c 100644
static inline int idle_policy(int policy)
{
return policy == SCHED_IDLE;
-@@ -524,10 +529,13 @@ struct cfs_rq {
+@@ -525,10 +530,13 @@ struct cfs_rq {
unsigned int idle_h_nr_running; /* SCHED_IDLE */
-
+
u64 exec_clock;
+
+#if !defined(CONFIG_CACULE_SCHED)
@@ -2118,10 +2115,10 @@ index 39112ac7ab34..5881814c7e1c 100644
u64 min_vruntime_copy;
#endif
+#endif /* CONFIG_CACULE_SCHED */
-
+
struct rb_root_cached tasks_timeline;
-
-@@ -536,9 +544,14 @@ struct cfs_rq {
+
+@@ -537,9 +545,14 @@ struct cfs_rq {
* It is set to NULL otherwise (i.e when none are currently running).
*/
struct sched_entity *curr;
@@ -2133,13 +2130,13 @@ index 39112ac7ab34..5881814c7e1c 100644
struct sched_entity *last;
struct sched_entity *skip;
+#endif // CONFIG_CACULE_SCHED
-
+
#ifdef CONFIG_SCHED_DEBUG
unsigned int nr_spread_over;
-@@ -933,6 +946,11 @@ struct rq {
+@@ -943,6 +956,11 @@ struct rq {
struct rt_rq rt;
struct dl_rq dl;
-
+
+#ifdef CONFIG_CACULE_RDB
+ unsigned int max_IS_score;
+ struct task_struct *to_migrate_task;
@@ -2149,10 +2146,10 @@ index 39112ac7ab34..5881814c7e1c 100644
/* list of leaf cfs_rq on this CPU: */
struct list_head leaf_cfs_rq_list;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index b9306d2bb426..20f07aa87b8e 100644
+index d4a78e08f6d8..d85615ec6cb9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
-@@ -1659,6 +1659,59 @@ static struct ctl_table kern_table[] = {
+@@ -1736,6 +1736,59 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
@@ -2209,6 +2206,6 @@ index b9306d2bb426..20f07aa87b8e 100644
+ .extra2 = &one_ul,
+ },
+#endif
- #ifdef CONFIG_SCHED_DEBUG
+ #ifdef CONFIG_SCHEDSTATS
{
- .procname = "sched_min_granularity_ns",
+ .procname = "sched_schedstats",
diff --git a/config b/config
index de70b855af85..7b5e165e9656 100644
--- a/config
+++ b/config
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 5.10.59 Kernel Configuration
+# Linux/x86 5.13.13-hardened1 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="gcc (GCC) 11.1.0"
CONFIG_CC_IS_GCC=y
@@ -64,7 +64,6 @@ CONFIG_GENERIC_IRQ_SHOW=y
CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
CONFIG_GENERIC_PENDING_IRQ=y
CONFIG_GENERIC_IRQ_MIGRATION=y
-CONFIG_GENERIC_IRQ_INJECTION=y
CONFIG_HARDIRQS_SW_RESEND=y
CONFIG_GENERIC_IRQ_CHIP=y
CONFIG_IRQ_DOMAIN=y
@@ -114,10 +113,8 @@ CONFIG_BPF_SYSCALL=y
CONFIG_BPF_JIT=y
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_JIT_DEFAULT_ON=y
-# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set
-CONFIG_USERMODE_DRIVER=y
-CONFIG_BPF_PRELOAD=y
-CONFIG_BPF_PRELOAD_UMD=m
+CONFIG_BPF_UNPRIV_DEFAULT_OFF=y
+# CONFIG_BPF_PRELOAD is not set
CONFIG_BPF_LSM=y
# end of BPF subsystem
@@ -227,8 +224,6 @@ CONFIG_PID_NS=y
CONFIG_NET_NS=y
# CONFIG_CHECKPOINT_RESTORE is not set
CONFIG_SCHED_AUTOGROUP=y
-CONFIG_CACULE_SCHED=y
-รค CONFIG_CACULE_RDB is not set
# CONFIG_SYSFS_DEPRECATED is not set
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
@@ -271,19 +266,11 @@ CONFIG_SHMEM=y
CONFIG_AIO=y
CONFIG_IO_URING=y
CONFIG_ADVISE_SYSCALLS=y
-CONFIG_HAVE_ARCH_USERFAULTFD_WP=y
-CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y
CONFIG_MEMBARRIER=y
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y
CONFIG_KALLSYMS_BASE_RELATIVE=y
-CONFIG_BPF_LSM=y
-CONFIG_BPF_SYSCALL=y
-CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
-CONFIG_BPF_JIT_ALWAYS_ON=y
-CONFIG_BPF_JIT_DEFAULT_ON=y
-# CONFIG_BPF_PRELOAD is not set
# CONFIG_USERFAULTFD is not set
CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y
CONFIG_KCMP=y
@@ -306,6 +293,7 @@ CONFIG_SLUB=y
# CONFIG_SLAB_MERGE_DEFAULT is not set
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_SLAB_CANARY=y
CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
CONFIG_SLUB_CPU_PARTIAL=y
CONFIG_SYSTEM_DATA_VERIFICATION=y
@@ -440,8 +428,6 @@ CONFIG_PERF_EVENTS_INTEL_CSTATE=m
CONFIG_PERF_EVENTS_AMD_POWER=m
# end of Performance monitoring
-CONFIG_X86_16BIT=y
-CONFIG_X86_ESPFIX64=y
CONFIG_X86_VSYSCALL_EMULATION=y
CONFIG_X86_IOPL_IOPERM=y
CONFIG_I8K=m
@@ -465,7 +451,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
# CONFIG_ARCH_MEMORY_PROBE is not set
-CONFIG_ARCH_PROC_KCORE_TEXT=y
CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
CONFIG_X86_PMEM_LEGACY_DEVICE=y
CONFIG_X86_PMEM_LEGACY=m
@@ -497,10 +482,7 @@ CONFIG_HZ=300
CONFIG_SCHED_HRTICK=y
# CONFIG_KEXEC is not set
# CONFIG_KEXEC_FILE is not set
-CONFIG_ARCH_HAS_KEXEC_PURGATORY=y
-# CONFIG_KEXEC_SIG is not set
CONFIG_CRASH_DUMP=y
-CONFIG_KEXEC_JUMP=y
CONFIG_PHYSICAL_START=0x1000000
CONFIG_RELOCATABLE=y
CONFIG_RANDOMIZE_BASE=y
@@ -531,13 +513,10 @@ CONFIG_USE_PERCPU_NUMA_NODE_ID=y
#
# Power management and ACPI options
#
-CONFIG_ARCH_HIBERNATION_HEADER=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
CONFIG_HIBERNATE_CALLBACKS=y
# CONFIG_HIBERNATION is not set
-CONFIG_HIBERNATION_SNAPSHOT_DEV=y
-CONFIG_PM_STD_PARTITION=""
CONFIG_PM_SLEEP=y
CONFIG_PM_SLEEP_SMP=y
# CONFIG_PM_AUTOSLEEP is not set
@@ -725,7 +704,6 @@ CONFIG_GOOGLE_VPD=m
CONFIG_EFI_ESRT=y
CONFIG_EFI_VARS_PSTORE=y
CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y
-CONFIG_EFI_RUNTIME_MAP=y
# CONFIG_EFI_FAKE_MEMMAP is not set
CONFIG_EFI_SOFT_RESERVE=y
CONFIG_EFI_RUNTIME_WRAPPERS=y
@@ -783,8 +761,6 @@ CONFIG_AS_TPAUSE=y
#
# General architecture-dependent options
#
-CONFIG_CRASH_CORE=y
-CONFIG_KEXEC_CORE=y
CONFIG_HOTPLUG_SMT=y
CONFIG_GENERIC_ENTRY=y
CONFIG_KPROBES=y
@@ -878,7 +854,7 @@ CONFIG_COMPAT_32BIT_TIME=y
CONFIG_HAVE_ARCH_VMAP_STACK=y
CONFIG_VMAP_STACK=y
CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y
-# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set
+CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y
CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
CONFIG_STRICT_KERNEL_RWX=y
CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
@@ -925,7 +901,7 @@ CONFIG_MODULE_SIG_ALL=y
# CONFIG_MODULE_SIG_SHA384 is not set
CONFIG_MODULE_SIG_SHA512=y
CONFIG_MODULE_SIG_HASH="sha512"
-CONFIG_MODULE_COMPRESS=y
+# CONFIG_MODULE_COMPRESS_NONE is not set
# CONFIG_MODULE_COMPRESS_GZIP is not set
# CONFIG_MODULE_COMPRESS_XZ is not set
CONFIG_MODULE_COMPRESS_ZSTD=y
@@ -1076,7 +1052,6 @@ CONFIG_CMA=y
# CONFIG_CMA_DEBUGFS is not set
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
-CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set
# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set
@@ -1262,8 +1237,6 @@ CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NETFILTER_NETLINK_LOG=m
CONFIG_NETFILTER_NETLINK_OSF=m
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_COMMON=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_LOG_SYSLOG=m
CONFIG_NETFILTER_CONNCOUNT=m
CONFIG_NF_CONNTRACK_MARK=y
@@ -1604,13 +1577,6 @@ CONFIG_BRIDGE_EBT_NFLOG=m
# CONFIG_BPFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
-# CONFIG_SCTP_DBG_OBJCNT is not set
-# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set
-CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y
-# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
-CONFIG_SCTP_COOKIE_HMAC_MD5=y
-CONFIG_SCTP_COOKIE_HMAC_SHA1=y
-CONFIG_INET_SCTP_DIAG=m
CONFIG_RDS=m
CONFIG_RDS_RDMA=m
CONFIG_RDS_TCP=m
@@ -1669,9 +1635,6 @@ CONFIG_VLAN_8021Q_MVRP=y
CONFIG_LLC=m
CONFIG_LLC2=m
# CONFIG_ATALK is not set
-CONFIG_DEV_APPLETALK=m
-CONFIG_IPDDP=m
-CONFIG_IPDDP_ENCAP=y
# CONFIG_X25 is not set
# CONFIG_LAPB is not set
CONFIG_PHONET=m
@@ -2832,7 +2795,6 @@ CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
-CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m
# CONFIG_BCACHE_DEBUG is not set
# CONFIG_BCACHE_CLOSURES_DEBUG is not set
@@ -3506,13 +3468,11 @@ CONFIG_ATH5K=m
CONFIG_ATH5K_PCI=y
CONFIG_ATH9K_HW=m
CONFIG_ATH9K_COMMON=m
-CONFIG_ATH9K_COMMON_DEBUG=y
CONFIG_ATH9K_BTCOEX_SUPPORT=y
CONFIG_ATH9K=m
CONFIG_ATH9K_PCI=y
CONFIG_ATH9K_AHB=y
# CONFIG_ATH9K_DEBUGFS is not set
-# CONFIG_ATH9K_STATION_STATISTICS is not set
CONFIG_ATH9K_DYNACK=y
CONFIG_ATH9K_WOW=y
CONFIG_ATH9K_RFKILL=y
@@ -3522,10 +3482,8 @@ CONFIG_ATH9K_PCI_NO_EEPROM=m
CONFIG_ATH9K_HTC=m
# CONFIG_ATH9K_HTC_DEBUGFS is not set
CONFIG_ATH9K_HWRNG=y
-CONFIG_ATH9K_COMMON_SPECTRAL=y
CONFIG_CARL9170=m
CONFIG_CARL9170_LEDS=y
-CONFIG_CARL9170_DEBUGFS=y
CONFIG_CARL9170_WPC=y
# CONFIG_CARL9170_HWRNG is not set
CONFIG_ATH6KL=m
@@ -3545,7 +3503,6 @@ CONFIG_ATH10K_SDIO=m
CONFIG_ATH10K_USB=m
# CONFIG_ATH10K_DEBUG is not set
# CONFIG_ATH10K_DEBUGFS is not set
-# CONFIG_ATH10K_SPECTRAL is not set
# CONFIG_ATH10K_TRACING is not set
CONFIG_WCN36XX=m
# CONFIG_WCN36XX_DEBUGFS is not set
@@ -3553,9 +3510,7 @@ CONFIG_ATH11K=m
CONFIG_ATH11K_AHB=m
CONFIG_ATH11K_PCI=m
# CONFIG_ATH11K_DEBUG is not set
-# CONFIG_ATH11K_DEBUGFS is not set
# CONFIG_ATH11K_TRACING is not set
-CONFIG_ATH11K_SPECTRAL=y
CONFIG_WLAN_VENDOR_ATMEL=y
CONFIG_ATMEL=m
CONFIG_PCI_ATMEL=m
@@ -3624,7 +3579,6 @@ CONFIG_IWL3945=m
# iwl3945 / iwl4965 Debugging Options
#
# CONFIG_IWLEGACY_DEBUG is not set
-# CONFIG_IWLEGACY_DEBUGFS is not set
# end of iwl3945 / iwl4965 Debugging Options
CONFIG_IWLWIFI=m
@@ -3638,7 +3592,6 @@ CONFIG_IWLWIFI_OPMODE_MODULAR=y
# Debugging Options
#
# CONFIG_IWLWIFI_DEBUG is not set
-# CONFIG_IWLWIFI_DEBUGFS is not set
# CONFIG_IWLWIFI_DEVICE_TRACING is not set
# end of Debugging Options
@@ -3738,7 +3691,6 @@ CONFIG_RT2X00_LIB=m
CONFIG_RT2X00_LIB_FIRMWARE=y
CONFIG_RT2X00_LIB_CRYPTO=y
CONFIG_RT2X00_LIB_LEDS=y
-CONFIG_RT2X00_LIB_DEBUGFS=y
# CONFIG_RT2X00_DEBUG is not set
CONFIG_WLAN_VENDOR_REALTEK=y
CONFIG_RTL8180=m
@@ -4320,7 +4272,6 @@ CONFIG_IPWIRELESS=m
CONFIG_MWAVE=m
# CONFIG_DEVMEM is not set
-# CONFIG_DEVKMEM is not set
CONFIG_NVRAM=m
CONFIG_RAW_DRIVER=m
CONFIG_MAX_RAW_DEVS=8192
@@ -5412,7 +5363,6 @@ CONFIG_REGULATOR_WM8994=m
CONFIG_RC_CORE=m
CONFIG_RC_MAP=m
CONFIG_LIRC=y
-CONFIG_BPF_LIRC_MODE2=y
CONFIG_RC_DECODERS=y
CONFIG_IR_NEC_DECODER=m
CONFIG_IR_RC5_DECODER=m
@@ -5500,11 +5450,6 @@ CONFIG_VIDEOBUF_VMALLOC=m
# Media controller options
#
CONFIG_MEDIA_CONTROLLER_DVB=y
-CONFIG_MEDIA_CONTROLLER_REQUEST_API=y
-
-#
-# Please notice that the enabled Media controller Request API is EXPERIMENTAL
-#
# end of Media controller options
#
@@ -5801,7 +5746,6 @@ CONFIG_VIDEO_SAA7146_VV=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
# CONFIG_SMS_SIANO_DEBUGFS is not set
-CONFIG_VIDEO_V4L2_TPG=m
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_VIDEO_CAFE_CCIC=m
CONFIG_VIDEO_CADENCE=y
@@ -5816,15 +5760,6 @@ CONFIG_DVB_PLATFORM_DRIVERS=y
# MMC/SDIO DVB adapters
#
CONFIG_SMS_SDIO_DRV=m
-CONFIG_V4L_TEST_DRIVERS=y
-CONFIG_VIDEO_VIMC=m
-CONFIG_VIDEO_VIVID=m
-CONFIG_VIDEO_VIVID_CEC=y
-CONFIG_VIDEO_VIVID_MAX_DEVS=64
-CONFIG_VIDEO_VIM2M=m
-CONFIG_VIDEO_VICODEC=m
-CONFIG_DVB_TEST_DRIVERS=y
-CONFIG_DVB_VIDTV=m
#
# FireWire (IEEE 1394) Adapters
@@ -6169,11 +6104,6 @@ CONFIG_DVB_HELENE=m
#
CONFIG_DVB_CXD2099=m
CONFIG_DVB_SP2=m
-
-#
-# Tools to develop new frontends
-#
-CONFIG_DVB_DUMMY_FE=m
# end of Media ancillary drivers
#
@@ -7048,7 +6978,7 @@ CONFIG_SND_SYNTH_EMUX=m
CONFIG_SND_XEN_FRONTEND=m
CONFIG_SND_VIRTIO=m
CONFIG_AC97_BUS=m
-#
+
#
# HID support
#
@@ -7317,64 +7247,17 @@ CONFIG_USBIP_VHCI_HCD=m
CONFIG_USBIP_VHCI_HC_PORTS=8
CONFIG_USBIP_VHCI_NR_HCS=1
CONFIG_USBIP_HOST=m
-CONFIG_USBIP_VUDC=m
# CONFIG_USBIP_DEBUG is not set
CONFIG_USB_CDNS_SUPPORT=m
CONFIG_USB_CDNS_HOST=y
# CONFIG_USB_CDNS3 is not set
-# CONFIG_USB_CDNS3_GADGET is not set
-# CONFIG_USB_CDNS3_HOST is not set
-# CONFIG_USB_CDNS3_PCI_WRAP is not set
CONFIG_USB_CDNSP_PCI=m
-CONFIG_USB_CDNSP_GADGET=y
CONFIG_USB_CDNSP_HOST=y
# CONFIG_USB_MUSB_HDRC is not set
-# CONFIG_USB_MUSB_HOST is not set
-# CONFIG_USB_MUSB_GADGET is not set
-# CONFIG_USB_MUSB_DUAL_ROLE is not set
-
-#
-# Platform Glue Layer
-#
-
-#
-# MUSB DMA mode
-#
-# CONFIG_MUSB_PIO_ONLY is not set
# CONFIG_USB_DWC3 is not set
-# CONFIG_USB_DWC3_ULPI is not set
-# CONFIG_USB_DWC3_HOST is not set
-# CONFIG_USB_DWC3_GADGET is not set
-# CONFIG_USB_DWC3_DUAL_ROLE is not set
-
-#
-# Platform Glue Driver Support
-#
-# CONFIG_USB_DWC3_PCI is not set
-# CONFIG_USB_DWC3_HAPS is not set
# CONFIG_USB_DWC2 is not set
-# CONFIG_USB_DWC2_HOST is not set
-
-#
-# Gadget/Dual-role mode requires USB Gadget support to be enabled
-#
-# CONFIG_USB_DWC2_PERIPHERAL is not set
-# CONFIG_USB_DWC2_DUAL_ROLE is not set
-# CONFIG_USB_DWC2_PCI is not set
-# CONFIG_USB_DWC2_DEBUG is not set
-# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
# CONFIG_USB_CHIPIDEA is not set
-# CONFIG_USB_CHIPIDEA_UDC is not set
-# CONFIG_USB_CHIPIDEA_HOST is not set
-# CONFIG_USB_CHIPIDEA_PCI is not set
-# CONFIG_USB_CHIPIDEA_MSM is not set
-# CONFIG_USB_CHIPIDEA_GENERIC is not set
# CONFIG_USB_ISP1760 is not set
-# CONFIG_USB_ISP1760_HCD=y
-# CONFIG_USB_ISP1761_UDC=y
-# CONFIG_USB_ISP1760_HOST_ROLE is not set
-# CONFIG_USB_ISP1760_GADGET_ROLE is not set
-# CONFIG_USB_ISP1760_DUAL_ROLE=y
#
# USB port drivers
@@ -7485,114 +7368,6 @@ CONFIG_USB_ISP1301=m
# end of USB Physical Layer drivers
# CONFIG_USB_GADGET is not set
-# CONFIG_USB_GADGET_DEBUG is not set
-# CONFIG_USB_GADGET_DEBUG_FILES is not set
-# CONFIG_USB_GADGET_DEBUG_FS is not set
-# CONFIG_USB_GADGET_VBUS_DRAW is not set
-# CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS is not set
-# CONFIG_U_SERIAL_CONSOLE is not set
-
-# USB Peripheral Controller
-#
-CONFIG_USB_FOTG210_UDC=m
-CONFIG_USB_GR_UDC=m
-CONFIG_USB_R8A66597=m
-CONFIG_USB_PXA27X=m
-CONFIG_USB_MV_UDC=m
-CONFIG_USB_MV_U3D=m
-CONFIG_USB_SNP_CORE=m
-CONFIG_USB_M66592=m
-CONFIG_USB_BDC_UDC=m
-CONFIG_USB_AMD5536UDC=m
-CONFIG_USB_NET2272=m
-# CONFIG_USB_NET2272_DMA is not set
-CONFIG_USB_NET2280=m
-CONFIG_USB_GOKU=m
-CONFIG_USB_EG20T=m
-CONFIG_USB_MAX3420_UDC=m
-CONFIG_USB_DUMMY_HCD=m
-# end of USB Peripheral Controller
-
-CONFIG_USB_LIBCOMPOSITE=m
-CONFIG_USB_F_ACM=m
-CONFIG_USB_F_SS_LB=m
-CONFIG_USB_U_SERIAL=m
-CONFIG_USB_U_ETHER=m
-CONFIG_USB_U_AUDIO=m
-CONFIG_USB_F_SERIAL=m
-CONFIG_USB_F_OBEX=m
-CONFIG_USB_F_NCM=m
-CONFIG_USB_F_ECM=m
-CONFIG_USB_F_PHONET=m
-CONFIG_USB_F_EEM=m
-CONFIG_USB_F_SUBSET=m
-CONFIG_USB_F_RNDIS=m
-CONFIG_USB_F_MASS_STORAGE=m
-CONFIG_USB_F_FS=m
-CONFIG_USB_F_UAC1=m
-CONFIG_USB_F_UAC1_LEGACY=m
-CONFIG_USB_F_UAC2=m
-CONFIG_USB_F_UVC=m
-CONFIG_USB_F_MIDI=m
-CONFIG_USB_F_HID=m
-CONFIG_USB_F_PRINTER=m
-CONFIG_USB_F_TCM=m
-CONFIG_USB_CONFIGFS=m
-CONFIG_USB_CONFIGFS_SERIAL=y
-CONFIG_USB_CONFIGFS_ACM=y
-CONFIG_USB_CONFIGFS_OBEX=y
-CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_ECM=y
-CONFIG_USB_CONFIGFS_ECM_SUBSET=y
-CONFIG_USB_CONFIGFS_RNDIS=y
-CONFIG_USB_CONFIGFS_EEM=y
-CONFIG_USB_CONFIGFS_PHONET=y
-CONFIG_USB_CONFIGFS_MASS_STORAGE=y
-CONFIG_USB_CONFIGFS_F_LB_SS=y
-CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_UAC1=y
-CONFIG_USB_CONFIGFS_F_UAC1_LEGACY=y
-CONFIG_USB_CONFIGFS_F_UAC2=y
-CONFIG_USB_CONFIGFS_F_MIDI=y
-CONFIG_USB_CONFIGFS_F_HID=y
-CONFIG_USB_CONFIGFS_F_UVC=y
-CONFIG_USB_CONFIGFS_F_PRINTER=y
-CONFIG_USB_CONFIGFS_F_TCM=y
-
-#
-# USB Gadget precomposed configurations
-#
-CONFIG_USB_ZERO=m
-CONFIG_USB_AUDIO=m
-# CONFIG_GADGET_UAC1 is not set
-CONFIG_USB_ETH=m
-CONFIG_USB_ETH_RNDIS=y
-CONFIG_USB_ETH_EEM=y
-CONFIG_USB_G_NCM=m
-CONFIG_USB_GADGETFS=m
-CONFIG_USB_FUNCTIONFS=m
-CONFIG_USB_FUNCTIONFS_ETH=y
-CONFIG_USB_FUNCTIONFS_RNDIS=y
-CONFIG_USB_FUNCTIONFS_GENERIC=y
-CONFIG_USB_MASS_STORAGE=m
-CONFIG_USB_GADGET_TARGET=m
-CONFIG_USB_G_SERIAL=m
-CONFIG_USB_MIDI_GADGET=m
-CONFIG_USB_G_PRINTER=m
-CONFIG_USB_CDC_COMPOSITE=m
-CONFIG_USB_G_NOKIA=m
-CONFIG_USB_G_ACM_MS=m
-CONFIG_USB_G_MULTI=m
-CONFIG_USB_G_MULTI_RNDIS=y
-CONFIG_USB_G_MULTI_CDC=y
-CONFIG_USB_G_HID=m
-CONFIG_USB_G_DBGP=m
-# CONFIG_USB_G_DBGP_PRINTK is not set
-CONFIG_USB_G_DBGP_SERIAL=y
-CONFIG_USB_G_WEBCAM=m
-CONFIG_USB_RAW_GADGET=m
-# end of USB Gadget precomposed configurations
-
CONFIG_TYPEC=m
CONFIG_TYPEC_TCPM=m
CONFIG_TYPEC_TCPCI=m
@@ -9200,7 +8975,6 @@ CONFIG_PHY_PXA_28NM_USB2=m
CONFIG_PHY_CPCAP_USB=m
CONFIG_PHY_QCOM_USB_HS=m
CONFIG_PHY_QCOM_USB_HSIC=m
-CONFIG_PHY_SAMSUNG_USB2=m
CONFIG_PHY_TUSB1210=m
CONFIG_PHY_INTEL_LGM_EMMC=m
# end of PHY Subsystem
@@ -9363,10 +9137,8 @@ CONFIG_XFS_ONLINE_REPAIR=y
# CONFIG_XFS_WARN is not set
# CONFIG_XFS_DEBUG is not set
CONFIG_GFS2_FS=m
-CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m
CONFIG_OCFS2_FS_O2CB=m
-CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
CONFIG_OCFS2_FS_STATS=y
CONFIG_OCFS2_DEBUG_MASKLOG=y
# CONFIG_OCFS2_DEBUG_FS is not set
@@ -9430,6 +9202,7 @@ CONFIG_OVERLAY_FS_REDIRECT_DIR=y
CONFIG_OVERLAY_FS_INDEX=y
CONFIG_OVERLAY_FS_XINO_AUTO=y
CONFIG_OVERLAY_FS_METACOPY=y
+# CONFIG_OVERLAY_FS_UNPRIVILEGED is not set
#
# Caches
@@ -9475,7 +9248,6 @@ CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8"
CONFIG_PROC_FS=y
# CONFIG_PROC_KCORE is not set
# CONFIG_PROC_VMCORE is not set
-# CONFIG_PROC_VMCORE_DEVICE_DUMP is not set
CONFIG_PROC_SYSCTL=y
CONFIG_PROC_PAGE_MONITOR=y
CONFIG_PROC_CHILDREN=y
@@ -9604,7 +9376,6 @@ CONFIG_NFS_V4_SECURITY_LABEL=y
CONFIG_NFS_FSCACHE=y
# CONFIG_NFS_USE_LEGACY_DNS is not set
CONFIG_NFS_USE_KERNEL_DNS=y
-CONFIG_NFS_DEBUG=y
# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set
# CONFIG_NFS_V4_2_READ_PLUS is not set
CONFIG_NFSD=m
@@ -9623,7 +9394,7 @@ CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y
CONFIG_NFS_ACL_SUPPORT=m
CONFIG_NFS_COMMON=y
-CONFIG_NFS_V4_2_SSC_HELPER=m
+CONFIG_NFS_V4_2_SSC_HELPER=y
CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m
CONFIG_SUNRPC_BACKCHANNEL=y
@@ -9639,10 +9410,8 @@ CONFIG_CEPH_FS_SECURITY_LABEL=y
CONFIG_CIFS=m
# CONFIG_CIFS_STATS2 is not set
# CONFIG_CIFS_ALLOW_INSECURE_LEGACY is not set
-# CONFIG_CIFS_WEAK_PW_HASH is not set
CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
-CONFIG_CIFS_POSIX=y
CONFIG_CIFS_DEBUG=y
# CONFIG_CIFS_DEBUG2 is not set
# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set
@@ -9711,7 +9480,6 @@ CONFIG_NLS_MAC_ROMANIAN=m
CONFIG_NLS_MAC_TURKISH=m
CONFIG_NLS_UTF8=m
# CONFIG_DLM is not set
-# CONFIG_DLM_DEBUG is not set
CONFIG_UNICODE=y
# CONFIG_UNICODE_NORMALIZATION_SELFTEST is not set
CONFIG_IO_WQ=y
@@ -9728,6 +9496,8 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_KEY_DH_OPERATIONS=y
CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY_DMESG_RESTRICT=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY_TIOCSTI_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
CONFIG_SECURITY_NETWORK=y
@@ -9747,7 +9517,6 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM=y
# CONFIG_SECURITY_SELINUX_DISABLE is not set
CONFIG_SECURITY_SELINUX_DEVELOP=y
CONFIG_SECURITY_SELINUX_AVC_STATS=y
-CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0
CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9
CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256
CONFIG_SECURITY_SMACK=y
@@ -9782,7 +9551,7 @@ CONFIG_SECURITY_LANDLOCK=y
# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
# CONFIG_DEFAULT_SECURITY_APPARMOR is not set
CONFIG_DEFAULT_SECURITY_DAC=y
-CONFIG_LSM="landlock,lockdown,yama,bpf,apparmor"
+CONFIG_LSM="landlock,lockdown,yama,bpf"
#
# Kernel hardening options
@@ -10113,10 +9882,10 @@ CONFIG_842_COMPRESS=m
CONFIG_842_DECOMPRESS=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
-CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_COMPRESS=m
CONFIG_LZO_DECOMPRESS=y
CONFIG_LZ4_COMPRESS=y
-CONFIG_LZ4HC_COMPRESS=y
+CONFIG_LZ4HC_COMPRESS=m
CONFIG_LZ4_DECOMPRESS=y
CONFIG_ZSTD_COMPRESS=y
CONFIG_ZSTD_DECOMPRESS=y
@@ -10243,21 +10012,13 @@ CONFIG_DEBUG_BUGVERBOSE=y
# Compile-time checks and compiler options
#
# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_INFO_REDUCED is not set
-# CONFIG_DEBUG_INFO_COMPRESSED is not set
-# CONFIG_DEBUG_INFO_SPLIT is not set
-# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set
-# CONFIG_DEBUG_INFO_DWARF4 is not set
-# CONFIG_DEBUG_INFO_BTF is not set
-CONFIG_PAHOLE_HAS_SPLIT_BTF=y
-# CONFIG_DEBUG_INFO_BTF_MODULES is not set
-# CONFIG_GDB_SCRIPTS is not set
CONFIG_FRAME_WARN=2048
CONFIG_STRIP_ASM_SYMS=y
# CONFIG_READABLE_ASM is not set
# CONFIG_HEADERS_INSTALL is not set
# CONFIG_DEBUG_SECTION_MISMATCH is not set
CONFIG_SECTION_MISMATCH_WARN_ONLY=y
+# CONFIG_DEBUG_WRITABLE_FUNCTION_POINTERS_VERBOSE is not set
CONFIG_STACK_VALIDATION=y
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# end of Compile-time checks and compiler options
@@ -10326,9 +10087,10 @@ CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y
CONFIG_HAVE_ARCH_KFENCE=y
CONFIG_KFENCE=y
CONFIG_KFENCE_STATIC_KEYS=y
-CONFIG_KFENCE_SAMPLE_INTERVAL=0
+CONFIG_KFENCE_SAMPLE_INTERVAL=100
CONFIG_KFENCE_NUM_OBJECTS=255
CONFIG_KFENCE_STRESS_TEST_FAULTS=0
+CONFIG_KFENCE_BUG_ON_DATA_CORRUPTION=y
# end of Memory Debugging
CONFIG_DEBUG_SHIRQ=y
@@ -10448,6 +10210,7 @@ CONFIG_FUNCTION_GRAPH_TRACER=y
CONFIG_DYNAMIC_FTRACE=y
CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y
+CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_STACK_TRACER=y
# CONFIG_IRQSOFF_TRACER is not set
@@ -10470,7 +10233,6 @@ CONFIG_PROBE_EVENTS=y
# CONFIG_BPF_KPROBE_OVERRIDE is not set
CONFIG_FTRACE_MCOUNT_RECORD=y
CONFIG_FTRACE_MCOUNT_USE_CC=y
-CONFIG_TRACING_MAP=y
CONFIG_SYNTH_EVENTS=y
# CONFIG_HIST_TRIGGERS is not set
# CONFIG_TRACE_EVENT_INJECT is not set
@@ -10485,19 +10247,16 @@ CONFIG_SYNTH_EVENTS=y
# CONFIG_PREEMPTIRQ_DELAY_TEST is not set
# CONFIG_SYNTH_EVENT_GEN_TEST is not set
# CONFIG_KPROBE_EVENT_GEN_TEST is not set
-# CONFIG_HIST_TRIGGERS_DEBUG is not set
# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
# CONFIG_SAMPLES is not set
CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
# CONFIG_STRICT_DEVMEM is not set
-# CONFIG_IO_STRICT_DEVMEM is not set
#
# x86 Debugging
#
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y
-CONFIG_EARLY_PRINTK_USB=y
# CONFIG_X86_VERBOSE_BOOTUP is not set
CONFIG_EARLY_PRINTK=y
# CONFIG_EARLY_PRINTK_DBGP is not set
@@ -10569,6 +10328,7 @@ CONFIG_TEST_KSTRTOX=y
# CONFIG_TEST_UDELAY is not set
# CONFIG_TEST_STATIC_KEYS is not set
# CONFIG_TEST_KMOD is not set
+# CONFIG_TEST_DEBUG_VIRTUAL is not set
# CONFIG_TEST_MEMCAT_P is not set
# CONFIG_TEST_OBJAGG is not set
# CONFIG_TEST_STACKINIT is not set