summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorptr13372021-07-01 20:17:57 +0000
committerptr13372021-07-01 20:17:57 +0000
commite9d5f500e24f0ae2397cc01f466ce3a009eaba4d (patch)
treeeae2a255abf361b99e43942ea21f5bbfa9595a61
parent498e6d69f3da3a6f0ffe1b7f4944dfe2100f3faf (diff)
downloadaur-e9d5f500e24f0ae2397cc01f466ce3a009eaba4d.tar.gz
5.11.14
-rw-r--r--.SRCINFO14
-rw-r--r--PKGBUILD10
-rw-r--r--cacule-5.12.patch224
-rw-r--r--config19
4 files changed, 138 insertions, 129 deletions
diff --git a/.SRCINFO b/.SRCINFO
index 1342ba0d6797..5f41be3919f2 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,6 +1,6 @@
pkgbase = linux-hardened-cacule
pkgdesc = Security-Hardened Linux with the cacule scheduler
- pkgver = 5.12.13.hardened1
+ pkgver = 5.12.14.hardened1
pkgrel = 1
url = https://github.com/anthraxx/linux-hardened
arch = x86_64
@@ -19,16 +19,16 @@ pkgbase = linux-hardened-cacule
makedepends = graphviz
makedepends = imagemagick
options = !strip
- source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.12.13.tar.xz
- source = https://github.com/anthraxx/linux-hardened/releases/download/5.12.13-hardened1/linux-hardened-5.12.13-hardened1.patch
+ source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.12.14.tar.xz
+ source = https://github.com/anthraxx/linux-hardened/releases/download/5.12.14-hardened1/linux-hardened-5.12.14-hardened1.patch
source = cacule-5.12.patch
source = cpu-patches.patch
source = config
- sha256sums = af485fcde5635981e6713b547cc8904a7f6e74e5ffb784cc08781fa5999dd255
- sha256sums = e0fc0e22756f0f17e59aee6031c239015450ff79834869254504bf61e4054fba
- sha256sums = a2b820c815823f850eb251c746f525c9b8600b805a92790604ee2ca36006065b
+ sha256sums = 90ca3b98088f5d9af097067e04b195ecf0d4fe167bcfaca8a97b142bccb27dac
+ sha256sums = 5b88d78aca0f73936bf9a4cfbf8406db923c8bc3a1296ec0c950ae220788bf30
+ sha256sums = 912786eae40b7993ca04ef3eb86e6f03c95d60749819cb2c75260b63c978989c
sha256sums = 4f22a6e4e5fe6f3bb39ca39073fa812eb9c0dbb3ac9cec64ed0a90d06b54d32a
- sha256sums = 63aa00dae9642074b77df5d321eafdc9b0e21471a7c6df5c9bb8e5a18580797c
+ sha256sums = e283e40e436ab48ae5a1d9def8460dd46da30d5844b81c7d5e2d660c2d2a31d9
pkgname = linux-hardened-cacule
pkgdesc = The Security-Hardened Linux with the cacule scheduler kernel and modules
diff --git a/PKGBUILD b/PKGBUILD
index 7828cbf0bfe2..0b38ea522ec2 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -6,7 +6,7 @@
pkgbase=linux-hardened-cacule
-pkgver=5.12.13.hardened1
+pkgver=5.12.14.hardened1
pkgrel=1
pkgdesc='Security-Hardened Linux with the cacule scheduler'
url='https://github.com/anthraxx/linux-hardened'
@@ -26,11 +26,11 @@ source=(
cpu-patches.patch
config # the main kernel config file
)
-sha256sums=('af485fcde5635981e6713b547cc8904a7f6e74e5ffb784cc08781fa5999dd255'
- 'e0fc0e22756f0f17e59aee6031c239015450ff79834869254504bf61e4054fba'
- 'a2b820c815823f850eb251c746f525c9b8600b805a92790604ee2ca36006065b'
+sha256sums=('90ca3b98088f5d9af097067e04b195ecf0d4fe167bcfaca8a97b142bccb27dac'
+ '5b88d78aca0f73936bf9a4cfbf8406db923c8bc3a1296ec0c950ae220788bf30'
+ '912786eae40b7993ca04ef3eb86e6f03c95d60749819cb2c75260b63c978989c'
'4f22a6e4e5fe6f3bb39ca39073fa812eb9c0dbb3ac9cec64ed0a90d06b54d32a'
- '63aa00dae9642074b77df5d321eafdc9b0e21471a7c6df5c9bb8e5a18580797c')
+ 'e283e40e436ab48ae5a1d9def8460dd46da30d5844b81c7d5e2d660c2d2a31d9')
export KBUILD_BUILD_HOST=archlinux
export KBUILD_BUILD_USER=$pkgbase
diff --git a/cacule-5.12.patch b/cacule-5.12.patch
index 0c3e79377aba..a74c39f5a621 100644
--- a/cacule-5.12.patch
+++ b/cacule-5.12.patch
@@ -5,12 +5,12 @@ index 1d56a6b73a4e..4d55ff02310c 100644
@@ -1087,6 +1087,10 @@ Model available). If your platform happens to meet the
requirements for EAS but you do not want to use it, change
this value to 0.
-
+
+sched_interactivity_factor (CacULE scheduler only)
+==================================================
+Sets the value *m* for interactivity score calculations. See
+Figure 1 in https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
-
+
sched_schedstats
================
diff --git a/Documentation/scheduler/sched-CacULE.rst b/Documentation/scheduler/sched-CacULE.rst
@@ -102,7 +102,7 @@ index edc01bcefbfd..9e16c9dd3d78 100644
@@ -458,10 +458,22 @@ struct sched_statistics {
#endif
};
-
+
+#ifdef CONFIG_CACULE_SCHED
+struct cacule_node {
+ struct cacule_node* next;
@@ -121,7 +121,7 @@ index edc01bcefbfd..9e16c9dd3d78 100644
+#endif
struct list_head group_node;
unsigned int on_rq;
-
+
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 3c31ba88aca5..4cf162341ab8 100644
--- a/include/linux/sched/sysctl.h
@@ -129,7 +129,7 @@ index 3c31ba88aca5..4cf162341ab8 100644
@@ -31,6 +31,12 @@ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
-
+
+#ifdef CONFIG_CACULE_SCHED
+extern unsigned int interactivity_factor;
+extern unsigned int interactivity_threshold;
@@ -144,9 +144,9 @@ index 5f5c776ef192..92330b5d8897 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -830,6 +830,17 @@ config UCLAMP_BUCKETS_COUNT
-
+
endmenu
-
+
+config CACULE_SCHED
+ bool "CacULE CPU scheduler"
+ default y
@@ -176,19 +176,19 @@ index 38ef6d06888e..865f8dbddca8 100644
@@ -46,6 +46,9 @@ choice
1000 Hz is the preferred choice for desktop systems and other
systems requiring fast interactive responses to events.
-
+
+ config HZ_2000
+ bool "2000 HZ"
+
endchoice
-
+
config HZ
@@ -54,6 +57,7 @@ config HZ
default 250 if HZ_250
default 300 if HZ_300
default 1000 if HZ_1000
+ default 2000 if HZ_2000
-
+
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
@@ -205,12 +205,12 @@ index 814200541f8f..353f88cd05ca 100644
+#endif
+
INIT_LIST_HEAD(&p->se.group_node);
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -3840,6 +3845,10 @@ void wake_up_new_task(struct task_struct *p)
update_rq_clock(rq);
post_init_entity_util_avg(p);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ p->se.cacule_node.cacule_start_time = sched_clock();
+#endif
@@ -221,20 +221,20 @@ index 814200541f8f..353f88cd05ca 100644
@@ -8094,6 +8103,10 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
#endif
-
+
+#ifdef CONFIG_CACULE_SCHED
+ printk(KERN_INFO "CacULE CPU scheduler v5.12-r2 by Hamad Al Marri.");
+#endif
+
wait_bit_init();
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 9e0a915e6eb8..77ac9cd82113 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -554,8 +554,11 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
-
+
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
- s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
@@ -289,23 +289,23 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
+ * (C) 2020 Hamad Al Marri <hamad.s.almarri@gmail.com>
*/
#include "sched.h"
-
+
@@ -82,7 +86,11 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
-
+
+#ifdef CONFIG_CACULE_SCHED
+const_debug unsigned int sysctl_sched_migration_cost = 200000UL;
+#else
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+#endif
-
+
int sched_thermal_decay_shift;
static int __init setup_sched_thermal_decay_shift(char *str)
@@ -113,6 +121,17 @@ int __weak arch_asym_cpu_priority(int cpu)
*/
#define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
-
+
+#endif
+#ifdef CONFIG_CACULE_SCHED
+unsigned int __read_mostly cacule_max_lifetime = 22000; // in ms
@@ -318,12 +318,12 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
+#endif
+
#endif
-
+
#ifdef CONFIG_CFS_BANDWIDTH
@@ -253,6 +272,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
-
+
const struct sched_class fair_sched_class;
-
+
+
+#ifdef CONFIG_CACULE_SCHED
+static inline struct sched_entity *se_of(struct cacule_node *cn)
@@ -446,7 +446,7 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
+
+ return -1;
+}
-
+
+/*
+ * Enqueue an entity
+ */
@@ -550,11 +550,11 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
* Enqueue an entity into the rb-tree:
*/
@@ -608,16 +832,24 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
-
+
return __node_2_se(next);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
#ifdef CONFIG_SCHED_DEBUG
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
@@ -565,19 +565,19 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
+ return se_of(cfs_rq->tail);
+#else
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
-
+
if (!last)
return NULL;
-
+
return __node_2_se(last);
+#endif /* CONFIG_CACULE_SCHED */
}
-
+
/**************************************************************
@@ -712,6 +944,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
return slice;
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* We calculate the vruntime slice of a to-be-inserted task.
@@ -587,13 +587,13 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
return calc_delta_fair(sched_slice(cfs_rq, se), se);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
#include "pelt.h"
#ifdef CONFIG_SMP
@@ -828,14 +1062,51 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
}
#endif /* CONFIG_SMP */
-
+
+#ifdef CONFIG_CACULE_SCHED
+static void normalize_lifetime(u64 now, struct sched_entity *se)
+{
@@ -639,13 +639,13 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
u64 now = rq_clock_task(rq_of(cfs_rq));
u64 delta_exec;
+#endif
-
+
if (unlikely(!curr))
return;
@@ -852,8 +1123,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ delta_fair = calc_delta_fair(delta_exec, curr);
+ curr->vruntime += delta_fair;
@@ -655,7 +655,7 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_min_vruntime(cfs_rq);
+#endif
-
+
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
@@ -1021,7 +1299,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -665,7 +665,7 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
-
if (!schedstat_enabled())
return;
-
+
@@ -1053,7 +1330,11 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
/*
* We are starting a new run period:
@@ -676,21 +676,21 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
se->exec_start = rq_clock_task(rq_of(cfs_rq));
+#endif
}
-
+
/**************************************************
@@ -4122,7 +4403,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
-
+
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
-#ifdef CONFIG_SCHED_DEBUG
+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_CACULE_SCHED)
s64 d = se->vruntime - cfs_rq->min_vruntime;
-
+
if (d < 0)
@@ -4133,6 +4414,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
@@ -700,9 +700,9 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
se->vruntime = max_vruntime(se->vruntime, vruntime);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
-
+
@@ -4222,18 +4505,23 @@ static inline bool cfs_bandwidth_used(void);
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -711,7 +711,7 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
+#endif
bool curr = cfs_rq->curr == se;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* If we're the current task, we must renormalise before calling
@@ -720,9 +720,9 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
if (renorm && curr)
se->vruntime += cfs_rq->min_vruntime;
+#endif
-
+
update_curr(cfs_rq);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Otherwise, renormalise after, such that we're placed at the current
@@ -732,24 +732,24 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
if (renorm && !curr)
se->vruntime += cfs_rq->min_vruntime;
+#endif
-
+
/*
* When enqueuing a sched_entity, we must:
@@ -4256,8 +4545,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_cfs_group(se);
account_entity_enqueue(cfs_rq, se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (flags & ENQUEUE_WAKEUP)
place_entity(cfs_rq, se, 0);
+#endif
-
+
check_schedstat_required();
update_stats_enqueue(cfs_rq, se, flags);
@@ -4278,6 +4569,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
check_enqueue_throttle(cfs_rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void __clear_buddies_last(struct sched_entity *se)
{
@@ -759,22 +759,22 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
__clear_buddies_skip(se);
}
+#endif /* !CONFIG_CACULE_SCHED */
-
+
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
+
@@ -4346,13 +4639,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
-
+
update_stats_dequeue(cfs_rq, se, flags);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
clear_buddies(cfs_rq, se);
+#endif
-
+
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->on_rq = 0;
account_entity_dequeue(cfs_rq, se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Normalize after update_curr(); which will also have moved
@@ -784,12 +784,12 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
if (!(flags & DEQUEUE_SLEEP))
se->vruntime -= cfs_rq->min_vruntime;
+#endif
-
+
/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);
-
+
update_cfs_group(se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Now advance min_vruntime if @se was the entity holding it back,
@@ -800,7 +800,7 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
update_min_vruntime(cfs_rq);
+#endif
}
-
+
+#ifdef CONFIG_CACULE_SCHED
+/*
+ * Preempt the current task with a newly woken task if needed:
@@ -821,13 +821,13 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
resched_curr(rq_of(cfs_rq));
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -4450,6 +4762,21 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
-
+
+#ifdef CONFIG_CACULE_SCHED
+static struct sched_entity *
+pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
@@ -845,23 +845,23 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
+#else
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
-
+
@@ -4510,6 +4837,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
-
+
return se;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
+
@@ -5612,7 +5940,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
hrtick_update(rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void set_next_buddy(struct sched_entity *se);
+#endif
-
+
/*
* The dequeue_task method is called before nr_running is
@@ -5644,12 +5974,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
@@ -882,7 +882,7 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
@@ -5765,6 +6097,7 @@ static unsigned long capacity_of(int cpu)
return cpu_rq(cpu)->cpu_capacity;
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void record_wakee(struct task_struct *p)
{
@@ -892,19 +892,19 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
return 1;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* The purpose of wake_affine() is to quickly determine on which CPU we can run
@@ -6513,6 +6847,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
return min_t(unsigned long, util, capacity_orig_of(cpu));
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
* to @dst_cpu.
@@ -6762,6 +7097,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
-
+
return -1;
}
+#endif /* CONFIG_CACULE_SCHED */
@@ -958,13 +958,13 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
+ return new_cpu;
+}
+#endif
-
+
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
@@ -6786,6 +7172,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
/* SD_flags and WF_flags share the first nibble */
int sd_flag = wake_flags & 0xF;
-
+
+#ifdef CONFIG_CACULE_SCHED
+ struct sched_entity *se = &p->se;
+
@@ -987,13 +987,13 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
+#else
if (wake_flags & WF_TTWU) {
record_wakee(p);
-
+
@@ -6798,6 +7204,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
-
+
want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
rcu_read_lock();
for_each_domain(cpu, tmp) {
@@ -6844,6 +7251,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
@@ -1005,17 +1005,17 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
* As blocked tasks retain absolute vruntime the migration needs to
* deal with this by subtracting the old and adding the new
@@ -6869,6 +7277,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
-
+
se->vruntime -= min_vruntime;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
if (p->on_rq == TASK_ON_RQ_MIGRATING) {
/*
@@ -6914,6 +7323,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
#endif /* CONFIG_SMP */
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static unsigned long wakeup_gran(struct sched_entity *se)
{
@@ -1025,7 +1025,7 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
cfs_rq_of(se)->skip = se;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* Preempt the current task with a newly woken task if needed:
@@ -7000,9 +7411,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
@@ -1038,20 +1038,20 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
int scale = cfs_rq->nr_running >= sched_nr_latency;
int next_buddy_marked = 0;
+#endif /* CONFIG_CACULE_SCHED */
-
+
if (unlikely(se == pse))
return;
@@ -7016,10 +7430,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
set_next_buddy(pse);
next_buddy_marked = 1;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* We can come here with TIF_NEED_RESCHED already set from new task
@@ -7049,6 +7465,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
@@ -1071,9 +1071,9 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
goto preempt;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
return;
-
+
preempt:
resched_curr(rq);
+
@@ -1082,12 +1082,12 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
@@ -7077,6 +7501,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
-
+
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
set_last_buddy(se);
+#endif /* CONFIG_CACULE_SCHED */
}
-
+
struct task_struct *
@@ -7251,7 +7676,10 @@ static void yield_task_fair(struct rq *rq)
{
@@ -1097,44 +1097,44 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
+#if !defined(CONFIG_CACULE_SCHED)
struct sched_entity *se = &curr->se;
+#endif
-
+
/*
* Are we the only task in the tree?
@@ -7259,7 +7687,9 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(rq->nr_running == 1))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
clear_buddies(cfs_rq, se);
+#endif
-
+
if (curr->policy != SCHED_BATCH) {
update_rq_clock(rq);
@@ -7275,7 +7705,9 @@ static void yield_task_fair(struct rq *rq)
rq_clock_skip_update(rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
set_skip_buddy(se);
+#endif
}
-
+
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
@@ -7286,8 +7718,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
return false;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/* Tell the scheduler that we'd really like pse to run next. */
set_next_buddy(se);
+#endif
-
+
yield_task_fair(rq);
-
+
@@ -7515,6 +7949,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
if (env->sd->flags & SD_SHARE_CPUCAPACITY)
return 0;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Buddy candidates are cache hot:
@@ -1144,37 +1144,37 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
&p->se == cfs_rq_of(&p->se)->last))
return 1;
+#endif
-
+
if (sysctl_sched_migration_cost == -1)
return 1;
@@ -10585,9 +11021,11 @@ static void nohz_newidle_balance(struct rq *this_rq)
if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/* Will wake up very soon. No time for doing anything else*/
if (this_rq->avg_idle < sysctl_sched_migration_cost)
return;
+#endif
-
+
/* Don't need to update blocked load of idle CPUs*/
if (!READ_ONCE(nohz.has_blocked) ||
@@ -10655,7 +11093,10 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
*/
rq_unpin_lock(this_rq, rf);
-
+
- if (this_rq->avg_idle < sysctl_sched_migration_cost ||
+ if (
+#if !defined(CONFIG_CACULE_SCHED)
+ this_rq->avg_idle < sysctl_sched_migration_cost ||
+#endif
!READ_ONCE(this_rq->rd->overload)) {
-
+
rcu_read_lock();
@@ -10823,11 +11264,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
update_overutilized_status(task_rq(curr));
}
-
+
+#ifdef CONFIG_CACULE_SCHED
/*
* called on fork with the child task as argument from the parent's context
@@ -1207,7 +1207,7 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
rq_unlock(rq, &rf);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* Priority of the task has changed. Check to see if we preempt
@@ -10976,6 +11437,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
@@ -1217,14 +1217,14 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
+
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
+
if (!vruntime_normalized(p)) {
@@ -10986,6 +11449,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
place_entity(cfs_rq, se, 0);
se->vruntime -= cfs_rq->min_vruntime;
}
+#endif
-
+
detach_entity_cfs_rq(se);
}
@@ -10993,12 +11457,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
@@ -1235,15 +1235,15 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
+#endif
-
+
attach_entity_cfs_rq(se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (!vruntime_normalized(p))
se->vruntime += cfs_rq->min_vruntime;
+#endif
}
-
+
static void switched_from_fair(struct rq *rq, struct task_struct *p)
@@ -11054,13 +11523,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
void init_cfs_rq(struct cfs_rq *cfs_rq)
@@ -1266,7 +1266,7 @@ index 47fcc3fe9dc5..c0a60cc8d9ce 100644
+ cfs_rq->tail = NULL;
+#endif
}
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e4e4f47cee6a..0eb4fca83ffe 100644
@@ -1274,7 +1274,7 @@ index e4e4f47cee6a..0eb4fca83ffe 100644
+++ b/kernel/sched/sched.h
@@ -523,10 +523,13 @@ struct cfs_rq {
unsigned int idle_h_nr_running; /* SCHED_IDLE */
-
+
u64 exec_clock;
+
+#if !defined(CONFIG_CACULE_SCHED)
@@ -1283,9 +1283,9 @@ index e4e4f47cee6a..0eb4fca83ffe 100644
u64 min_vruntime_copy;
#endif
+#endif /* CONFIG_CACULE_SCHED */
-
+
struct rb_root_cached tasks_timeline;
-
+
@@ -535,9 +538,15 @@ struct cfs_rq {
* It is set to NULL otherwise (i.e when none are currently running).
*/
@@ -1299,7 +1299,7 @@ index e4e4f47cee6a..0eb4fca83ffe 100644
struct sched_entity *last;
struct sched_entity *skip;
+#endif // CONFIG_CACULE_SCHED
-
+
#ifdef CONFIG_SCHED_DEBUG
unsigned int nr_spread_over;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
diff --git a/config b/config
index 18f8f4a624b5..5bac2fcc3f78 100644
--- a/config
+++ b/config
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 5.12.13-hardened1 Kernel Configuration
+# Linux/x86 5.12.14-hardened1 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="gcc (GCC) 11.1.0"
CONFIG_CC_IS_GCC=y
@@ -181,7 +181,7 @@ CONFIG_CGROUP_WRITEBACK=y
CONFIG_CGROUP_SCHED=y
CONFIG_FAIR_GROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
-CONFIG_CACULE_SCHED=y
+CONFIG_CAULE_SCHED=y
# CONFIG_RT_GROUP_SCHED is not set
CONFIG_UCLAMP_TASK_GROUP=y
CONFIG_CGROUP_PIDS=y
@@ -468,6 +468,7 @@ CONFIG_EFI_MIXED=y
# CONFIG_HZ_250 is not set
CONFIG_HZ_300=y
# CONFIG_HZ_1000 is not set
+# CONFIG_HZ_2000 is not set
CONFIG_HZ=300
CONFIG_SCHED_HRTICK=y
# CONFIG_KEXEC is not set
@@ -2268,7 +2269,7 @@ CONFIG_MTD_MAP_BANK_WIDTH_4=y
CONFIG_MTD_CFI_I1=y
CONFIG_MTD_CFI_I2=y
CONFIG_MTD_RAM=m
-# CONFIG_MTD_ROM is not set
+CONFIG_MTD_ROM=m
# CONFIG_MTD_ABSENT is not set
# end of RAM/ROM/Flash chip drivers
@@ -2340,7 +2341,14 @@ CONFIG_MTD_NAND_ECC_SW_BCH=y
# CONFIG_MTD_LPDDR is not set
# end of LPDDR & LPDDR2 PCM memory drivers
-# CONFIG_MTD_SPI_NOR is not set
+CONFIG_MTD_SPI_NOR=m
+CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
+# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set
+CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y
+# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set
+CONFIG_SPI_INTEL_SPI=m
+CONFIG_SPI_INTEL_SPI_PCI=m
+CONFIG_SPI_INTEL_SPI_PLATFORM=m
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_WL_THRESHOLD=4096
CONFIG_MTD_UBI_BEB_LIMIT=20
@@ -7710,7 +7718,6 @@ CONFIG_INTEL_IDXD=m
CONFIG_INTEL_IDXD_SVM=y
CONFIG_INTEL_IOATDMA=m
CONFIG_PLX_DMA=m
-CONFIG_XILINX_ZYNQMP_DPDMA=m
CONFIG_QCOM_HIDMA_MGMT=m
CONFIG_QCOM_HIDMA=m
CONFIG_DW_DMAC_CORE=y
@@ -9757,6 +9764,8 @@ CONFIG_SYSTEM_TRUSTED_KEYS=""
CONFIG_SECONDARY_TRUSTED_KEYRING=y
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
CONFIG_SYSTEM_BLACKLIST_HASH_LIST=""
+CONFIG_SYSTEM_REVOCATION_LIST=y
+CONFIG_SYSTEM_REVOCATION_KEYS=""
# end of Certificates for signature checking
CONFIG_BINARY_PRINTF=y