summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorptr13372021-05-13 09:41:23 +0200
committerptr13372021-05-13 09:41:23 +0200
commit2a8481058c11c6e078527010d55d44d7a7281e6b (patch)
treeaff14a75df23d7bb10581f8223752736385b1679
parentbecf67e5f30667ace3519a94f6d1eb9071323497 (diff)
downloadaur-2a8481058c11c6e078527010d55d44d7a7281e6b.tar.gz
5.10.20, cacule updated,config updated, cpu patch
-rw-r--r--.SRCINFO10
-rw-r--r--PKGBUILD10
-rw-r--r--cacule-5.11.patch466
-rw-r--r--config5
-rw-r--r--cpu-patches.patch49
5 files changed, 207 insertions, 333 deletions
diff --git a/.SRCINFO b/.SRCINFO
index 1e210a1ad0b1..f4f9f764b3a6 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,7 +1,7 @@
pkgbase = linux-hardened-cacule
pkgdesc = Security-Hardened Linux with the cacule scheduler
pkgver = 5.11.20.hardened1
- pkgrel = 1
+ pkgrel = 2
url = https://github.com/anthraxx/linux-hardened
arch = x86_64
license = GPL2
@@ -22,8 +22,8 @@ pkgbase = linux-hardened-cacule
source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.11.20.tar.xz
source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.11.20.tar.sign
source = https://github.com/anthraxx/linux-hardened/releases/download/5.11.20-hardened1/linux-hardened-5.11.20-hardened1.patch
- source = cpu-patches.patch
source = cacule-5.11.patch
+ source = cpu-patches.patch
source = config
validpgpkeys = ABAF11C65A2970B130ABE3C479BE3E4300411886
validpgpkeys = 647F28654894E3BD457199BE38DBBDC86092693E
@@ -31,9 +31,9 @@ pkgbase = linux-hardened-cacule
sha256sums = 73faeea560dc4fe1f9ee17af4b8d72446192e536979460d66ed6d0cd03bf0797
sha256sums = SKIP
sha256sums = aefaba9c76209bc8c80392ba20917eca55206e101da4a2e2f27fa2dc53164e1a
- sha256sums = cb22d1ab70ca801e0b8b89bcf98bf4eb911667c5bd93a7914efb5995da96af75
- sha256sums = 94bc3f303f69863d5cbc9c64e24862b4948a32756d7167f13e261fabd15c0f66
- sha256sums = 0b1c504b9da36c2048e82363c6fbd41f9ed362ab656c9b648d143aee6055f026
+ sha256sums = 5670d4f198be1d907aa1941b648ace4e8da1726be223e3208ee4285b396f48cb
+ sha256sums = 3fcac4b300dfd7dd0c092033db413949e82811985b97cb4f7dc826791511fc34
+ sha256sums = 9c7918c51a41f3e192aa7546f546537f4f04341be17127a5ec87ce7dcf75555e
pkgname = linux-hardened-cacule
pkgdesc = The Security-Hardened Linux with the cacule scheduler kernel and modules
diff --git a/PKGBUILD b/PKGBUILD
index 6f7388795ff3..7b01aa38363e 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -7,7 +7,7 @@
pkgbase=linux-hardened-cacule
pkgver=5.11.20.hardened1
-pkgrel=1
+pkgrel=2
pkgdesc='Security-Hardened Linux with the cacule scheduler'
url='https://github.com/anthraxx/linux-hardened'
arch=(x86_64)
@@ -22,8 +22,8 @@ _srctag=${pkgver%.*}-${pkgver##*.}
source=(
https://www.kernel.org/pub/linux/kernel/v${pkgver%%.*}.x/${_srcname}.tar.{xz,sign}
https://github.com/anthraxx/linux-hardened/releases/download/5.11.20-hardened1/linux-hardened-5.11.20-hardened1.patch
- cpu-patches.patch
cacule-5.11.patch
+ cpu-patches.patch
config # the main kernel config file
)
validpgpkeys=(
@@ -34,9 +34,9 @@ validpgpkeys=(
sha256sums=('73faeea560dc4fe1f9ee17af4b8d72446192e536979460d66ed6d0cd03bf0797'
'SKIP'
'aefaba9c76209bc8c80392ba20917eca55206e101da4a2e2f27fa2dc53164e1a'
- 'cb22d1ab70ca801e0b8b89bcf98bf4eb911667c5bd93a7914efb5995da96af75'
- '94bc3f303f69863d5cbc9c64e24862b4948a32756d7167f13e261fabd15c0f66'
- '0b1c504b9da36c2048e82363c6fbd41f9ed362ab656c9b648d143aee6055f026')
+ '5670d4f198be1d907aa1941b648ace4e8da1726be223e3208ee4285b396f48cb'
+ '3fcac4b300dfd7dd0c092033db413949e82811985b97cb4f7dc826791511fc34'
+ '9c7918c51a41f3e192aa7546f546537f4f04341be17127a5ec87ce7dcf75555e')
export KBUILD_BUILD_HOST=archlinux
export KBUILD_BUILD_USER=$pkgbase
diff --git a/cacule-5.11.patch b/cacule-5.11.patch
index 7f6ccd2f9422..449f37825465 100644
--- a/cacule-5.11.patch
+++ b/cacule-5.11.patch
@@ -5,12 +5,12 @@ index 1d56a6b73a4e..4d55ff02310c 100644
@@ -1087,6 +1087,10 @@ Model available). If your platform happens to meet the
requirements for EAS but you do not want to use it, change
this value to 0.
-
+
+sched_interactivity_factor (CacULE scheduler only)
+==================================================
+Sets the value *m* for interactivity score calculations. See
+Figure 1 in https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
-
+
sched_schedstats
================
diff --git a/Documentation/scheduler/sched-CacULE.rst b/Documentation/scheduler/sched-CacULE.rst
@@ -96,13 +96,13 @@ index 000000000000..82b0847c468a
+ idle timer scheduler in order to avoid to get into priority
+ inversion problems which would deadlock the machine.
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 6e3a5eeec509..f5a4fc49286f 100644
+index 6e3a5eeec509..97103c8a91b2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -450,10 +450,22 @@ struct sched_statistics {
#endif
};
-
+
+#ifdef CONFIG_CACULE_SCHED
+struct cacule_node {
+ struct cacule_node* next;
@@ -121,34 +121,19 @@ index 6e3a5eeec509..f5a4fc49286f 100644
+#endif
struct list_head group_node;
unsigned int on_rq;
-
-@@ -872,6 +884,12 @@ struct task_struct {
- struct list_head sibling;
- struct task_struct *group_leader;
-
-+#ifdef CONFIG_CACULE_SCHED
-+ u64 fork_start_win_stamp;
-+ unsigned int nr_forks_per_time;
-+ int is_fake_interactive;
-+#endif
-+
- /*
- * 'ptraced' is the list of tasks this task is using ptrace() on.
- *
+
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
-index 3c31ba88aca5..20c85c808485 100644
+index 3c31ba88aca5..4cf162341ab8 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
-@@ -31,6 +31,14 @@ extern unsigned int sysctl_sched_min_granularity;
+@@ -31,6 +31,12 @@ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
-
+
+#ifdef CONFIG_CACULE_SCHED
+extern unsigned int interactivity_factor;
+extern unsigned int interactivity_threshold;
+extern unsigned int cacule_max_lifetime;
-+extern unsigned int fake_interactive_decay_time;
-+extern unsigned int nr_fork_threshold;
+#endif
+
enum sched_tunable_scaling {
@@ -159,9 +144,9 @@ index a3d27421de8f..d0cfdf6e9bed 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -824,6 +824,17 @@ config UCLAMP_BUCKETS_COUNT
-
+
endmenu
-
+
+config CACULE_SCHED
+ bool "CacULE CPU scheduler"
+ default y
@@ -191,66 +176,41 @@ index 38ef6d06888e..c8cf984c294e 100644
@@ -46,6 +46,9 @@ choice
1000 Hz is the preferred choice for desktop systems and other
systems requiring fast interactive responses to events.
-
+
+ config HZ_2000
+ bool "2000 HZ"
+
endchoice
-
+
config HZ
@@ -54,6 +57,7 @@ config HZ
default 250 if HZ_250
default 300 if HZ_300
default 1000 if HZ_1000
+ default 2000 if HZ_2000
-
+
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
-diff --git a/kernel/exit.c b/kernel/exit.c
-index 04029e35e69a..9dfd515104db 100644
---- a/kernel/exit.c
-+++ b/kernel/exit.c
-@@ -667,6 +667,17 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
- write_lock_irq(&tasklist_lock);
- forget_original_parent(tsk, &dead);
-
-+#ifdef CONFIG_CACULE_SCHED
-+ p = tsk->parent;
-+ if (p) {
-+ if (p->nr_forks_per_time)
-+ p->nr_forks_per_time--;
-+
-+ if (p->is_fake_interactive)
-+ p->is_fake_interactive--;
-+ }
-+#endif
-+
- if (group_dead)
- kill_orphaned_pgrp(tsk->group_leader, NULL);
-
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index f0056507a373..9ecce53ddcc1 100644
+index f0056507a373..c29107de2ce0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3555,6 +3555,14 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
+@@ -3555,6 +3555,11 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
+
+#ifdef CONFIG_CACULE_SCHED
+ p->se.cacule_node.vruntime = 0;
-+ p->fork_start_win_stamp = 0;
-+ p->nr_forks_per_time = 0;
-+ p->is_fake_interactive = 0;
+#endif
+
INIT_LIST_HEAD(&p->se.group_node);
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
-@@ -3840,6 +3848,10 @@ void wake_up_new_task(struct task_struct *p)
+@@ -3840,6 +3845,10 @@ void wake_up_new_task(struct task_struct *p)
update_rq_clock(rq);
post_init_entity_util_avg(p);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ p->se.cacule_node.cacule_start_time = sched_clock();
+#endif
@@ -258,23 +218,23 @@ index f0056507a373..9ecce53ddcc1 100644
activate_task(rq, p, ENQUEUE_NOCLOCK);
trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK);
-@@ -7727,6 +7739,10 @@ void __init sched_init(void)
+@@ -7727,6 +7736,10 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
#endif
-
+
+#ifdef CONFIG_CACULE_SCHED
+ printk(KERN_INFO "CacULE CPU scheduler v5.11-r2 by Hamad Al Marri.");
+#endif
+
wait_bit_init();
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 2357921580f9..3adc9ee2bcfc 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -535,8 +535,11 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
-
+
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
- s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
@@ -316,7 +276,7 @@ index 2357921580f9..3adc9ee2bcfc 100644
cfs_rq->nr_spread_over);
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index bbc78794224a..9e035ed1e746 100644
+index bbc78794224a..ffb6e862561d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,10 @@
@@ -329,25 +289,23 @@ index bbc78794224a..9e035ed1e746 100644
+ * (C) 2020 Hamad Al Marri <hamad.s.almarri@gmail.com>
*/
#include "sched.h"
-
-@@ -113,6 +117,13 @@ int __weak arch_asym_cpu_priority(int cpu)
+
+@@ -113,6 +117,11 @@ int __weak arch_asym_cpu_priority(int cpu)
*/
#define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
-
+
+#endif
+#ifdef CONFIG_CACULE_SCHED
+unsigned int __read_mostly cacule_max_lifetime = 22000; // in ms
+unsigned int __read_mostly interactivity_factor = 32768;
+unsigned int __read_mostly interactivity_threshold = 1000;
-+unsigned int __read_mostly fake_interactive_decay_time = 1000; // in ms
-+unsigned int __read_mostly nr_fork_threshold = 3;
#endif
-
+
#ifdef CONFIG_CFS_BANDWIDTH
-@@ -253,6 +264,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
-
+@@ -253,6 +262,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
+
const struct sched_class fair_sched_class;
-
+
+
+#ifdef CONFIG_CACULE_SCHED
+static inline struct sched_entity *se_of(struct cacule_node *cn)
@@ -359,7 +317,7 @@ index bbc78794224a..9e035ed1e746 100644
/**************************************************************
* CFS operations on generic schedulable entities:
*/
-@@ -512,7 +531,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
+@@ -512,7 +529,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
/**************************************************************
* Scheduling class tree data structure manipulation methods:
*/
@@ -368,53 +326,18 @@ index bbc78794224a..9e035ed1e746 100644
static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - max_vruntime);
-@@ -568,7 +587,183 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
+@@ -568,7 +585,141 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
}
+#endif /* CONFIG_CACULE_SCHED */
+
+#ifdef CONFIG_CACULE_SCHED
-+static inline unsigned int is_fake_interactive(struct cacule_node *cn)
-+{
-+ struct sched_entity *se = se_of(cn);
-+ struct task_struct *parent = NULL;
-+ struct cfs_rq *cfs_rq;
-+ u64 win_time = fake_interactive_decay_time * 1000000ULL;
-+ u64 now = sched_clock();
-+
-+ while (!parent) {
-+ if (entity_is_task(se)) {
-+ parent = task_of(se)->parent;
-+ break;
-+ }
-+
-+ cfs_rq = group_cfs_rq(se);
-+
-+ if (!cfs_rq->head && !cfs_rq->curr)
-+ return 0;
-+
-+ if (cfs_rq->head)
-+ se = se_of(cfs_rq->head);
-+ else if (cfs_rq->curr)
-+ se = cfs_rq->curr;
-+ }
-+
-+ if (parent->is_fake_interactive
-+ && (now - parent->fork_start_win_stamp > win_time))
-+ {
-+ parent->fork_start_win_stamp = now;
-+ parent->is_fake_interactive--;
-+ }
-+
-+ return parent->is_fake_interactive;
-+}
-+
+static unsigned int
+calc_interactivity(u64 now, struct cacule_node *se)
+{
+ u64 l_se, vr_se, sleep_se = 1ULL, u64_factor_m, _2m;
-+ unsigned int score_se, fake_interactivity;
++ unsigned int score_se;
+
+ /*
+ * in case of vruntime==0, logical OR with 1 would
@@ -434,10 +357,6 @@ index bbc78794224a..9e035ed1e746 100644
+ else
+ score_se = _2m - (u64_factor_m / (vr_se / sleep_se));
+
-+ fake_interactivity = is_fake_interactive(se);
-+ if (fake_interactivity)
-+ score_se += (_2m * fake_interactivity) + 1;
-+
+ return score_se;
+}
+
@@ -446,9 +365,6 @@ index bbc78794224a..9e035ed1e746 100644
+ if (se_of(cn)->vruntime == 0)
+ return 0;
+
-+ if (is_fake_interactive(cn))
-+ return 0;
-+
+ return calc_interactivity(sched_clock(), cn) < interactivity_threshold;
+}
+
@@ -466,7 +382,7 @@ index bbc78794224a..9e035ed1e746 100644
+
+ return -1;
+}
-
+
+/*
+ * Does se have lower interactivity score value (i.e. interactive) than curr? If yes, return 1,
+ * otherwise return -1
@@ -552,12 +468,12 @@ index bbc78794224a..9e035ed1e746 100644
/*
* Enqueue an entity into the rb-tree:
*/
-@@ -626,16 +821,29 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
-
+@@ -626,16 +777,29 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
+
return rb_entry(next, struct sched_entity, run_node);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
#ifdef CONFIG_SCHED_DEBUG
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
@@ -573,35 +489,35 @@ index bbc78794224a..9e035ed1e746 100644
+ return se_of(cn);
+#else
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
-
+
if (!last)
return NULL;
-
+
return rb_entry(last, struct sched_entity, run_node);
+#endif /* CONFIG_CACULE_SCHED */
}
-
+
/**************************************************************
-@@ -720,6 +928,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -720,6 +884,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
return slice;
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* We calculate the vruntime slice of a to-be-inserted task.
*
-@@ -729,6 +938,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -729,6 +894,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return calc_delta_fair(sched_slice(cfs_rq, se), se);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
#include "pelt.h"
#ifdef CONFIG_SMP
-@@ -836,14 +1046,46 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
+@@ -836,14 +1002,46 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
}
#endif /* CONFIG_SMP */
-
+
+#ifdef CONFIG_CACULE_SCHED
+static void normalize_lifetime(u64 now, struct sched_entity *se)
+{
@@ -644,13 +560,13 @@ index bbc78794224a..9e035ed1e746 100644
- u64 delta_exec;
+ u64 now = sched_clock();
+ u64 delta_exec, delta_fair;
-
+
if (unlikely(!curr))
return;
-@@ -860,8 +1102,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
+@@ -860,8 +1058,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ delta_fair = calc_delta_fair(delta_exec, curr);
+ curr->vruntime += delta_fair;
@@ -660,52 +576,52 @@ index bbc78794224a..9e035ed1e746 100644
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_min_vruntime(cfs_rq);
+#endif
-
+
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
-@@ -1029,7 +1278,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -1029,7 +1234,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
-
if (!schedstat_enabled())
return;
-
-@@ -1061,7 +1309,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+
+@@ -1061,7 +1265,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
/*
* We are starting a new run period:
*/
- se->exec_start = rq_clock_task(rq_of(cfs_rq));
+ se->exec_start = sched_clock();
}
-
+
/**************************************************
-@@ -4115,7 +4363,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
-
+@@ -4115,7 +4319,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
+
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
-#ifdef CONFIG_SCHED_DEBUG
+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_CACULE_SCHED)
s64 d = se->vruntime - cfs_rq->min_vruntime;
-
+
if (d < 0)
-@@ -4126,6 +4374,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4126,6 +4330,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
-@@ -4157,6 +4406,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+@@ -4157,6 +4362,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
/* ensure we never gain time by being placed backwards. */
se->vruntime = max_vruntime(se->vruntime, vruntime);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
-
-@@ -4215,18 +4465,23 @@ static inline bool cfs_bandwidth_used(void);
+
+@@ -4215,18 +4421,23 @@ static inline bool cfs_bandwidth_used(void);
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
@@ -713,7 +629,7 @@ index bbc78794224a..9e035ed1e746 100644
bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
+#endif
bool curr = cfs_rq->curr == se;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* If we're the current task, we must renormalise before calling
@@ -722,87 +638,87 @@ index bbc78794224a..9e035ed1e746 100644
if (renorm && curr)
se->vruntime += cfs_rq->min_vruntime;
+#endif
-
+
update_curr(cfs_rq);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Otherwise, renormalise after, such that we're placed at the current
* moment in time, instead of some random moment in the past. Being
-@@ -4235,6 +4490,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4235,6 +4446,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (renorm && !curr)
se->vruntime += cfs_rq->min_vruntime;
+#endif
-
+
/*
* When enqueuing a sched_entity, we must:
-@@ -4249,8 +4505,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4249,8 +4461,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_cfs_group(se);
account_entity_enqueue(cfs_rq, se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (flags & ENQUEUE_WAKEUP)
place_entity(cfs_rq, se, 0);
+#endif
-
+
check_schedstat_required();
update_stats_enqueue(cfs_rq, se, flags);
-@@ -4271,6 +4529,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4271,6 +4485,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
check_enqueue_throttle(cfs_rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void __clear_buddies_last(struct sched_entity *se)
{
for_each_sched_entity(se) {
-@@ -4315,6 +4574,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4315,6 +4530,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (cfs_rq->skip == se)
__clear_buddies_skip(se);
}
+#endif /* !CONFIG_CACULE_SCHED */
-
+
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
-@@ -4339,13 +4599,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
-
+
+@@ -4339,13 +4555,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+
update_stats_dequeue(cfs_rq, se, flags);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
clear_buddies(cfs_rq, se);
+#endif
-
+
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->on_rq = 0;
account_entity_dequeue(cfs_rq, se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Normalize after update_curr(); which will also have moved
* min_vruntime if @se is the one holding it back. But before doing
-@@ -4354,12 +4617,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4354,12 +4573,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (!(flags & DEQUEUE_SLEEP))
se->vruntime -= cfs_rq->min_vruntime;
+#endif
-
+
/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);
-
+
update_cfs_group(se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Now advance min_vruntime if @se was the entity holding it back,
* except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
-@@ -4368,8 +4633,24 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4368,8 +4589,24 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
+#endif
}
-
+
+#ifdef CONFIG_CACULE_SCHED
+static struct sched_entity *
+pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr);
@@ -821,18 +737,18 @@ index bbc78794224a..9e035ed1e746 100644
/*
* Preempt the current task with a newly woken task if needed:
*/
-@@ -4409,6 +4690,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4409,6 +4646,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
if (delta > ideal_runtime)
resched_curr(rq_of(cfs_rq));
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
-@@ -4443,6 +4725,35 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4443,6 +4681,35 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
-
+
+#ifdef CONFIG_CACULE_SCHED
+static struct sched_entity *
+pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
@@ -864,44 +780,26 @@ index bbc78794224a..9e035ed1e746 100644
+#else
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
-
-@@ -4503,6 +4814,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
-
+
+@@ -4503,6 +4770,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+
return se;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
-@@ -5605,7 +5917,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+
+@@ -5605,7 +5873,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
hrtick_update(rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void set_next_buddy(struct sched_entity *se);
+#endif
-
+
/*
* The dequeue_task method is called before nr_running is
-@@ -5619,6 +5933,17 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- int task_sleep = flags & DEQUEUE_SLEEP;
- int idle_h_nr_running = task_has_idle_policy(p);
- bool was_sched_idle = sched_idle_rq(rq);
-+#ifdef CONFIG_CACULE_SCHED
-+ struct task_struct *parent = p->parent;
-+
-+ if (task_sleep && parent) {
-+ if (parent->nr_forks_per_time)
-+ parent->nr_forks_per_time--;
-+
-+ if (parent->is_fake_interactive)
-+ parent->is_fake_interactive--;
-+ }
-+#endif
-
- util_est_dequeue(&rq->cfs, p);
-
-@@ -5637,12 +5962,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+@@ -5637,12 +5907,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq->load.weight) {
/* Avoid re-evaluating load for this entity: */
se = parent_entity(se);
@@ -916,32 +814,32 @@ index bbc78794224a..9e035ed1e746 100644
break;
}
flags |= DEQUEUE_SLEEP;
-@@ -5758,6 +6085,7 @@ static unsigned long capacity_of(int cpu)
+@@ -5758,6 +6030,7 @@ static unsigned long capacity_of(int cpu)
return cpu_rq(cpu)->cpu_capacity;
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void record_wakee(struct task_struct *p)
{
/*
-@@ -5804,6 +6132,7 @@ static int wake_wide(struct task_struct *p)
+@@ -5804,6 +6077,7 @@ static int wake_wide(struct task_struct *p)
return 0;
return 1;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* The purpose of wake_affine() is to quickly determine on which CPU we can run
-@@ -6484,6 +6813,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
+@@ -6484,6 +6758,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
return min_t(unsigned long, util, capacity_orig_of(cpu));
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
* to @dst_cpu.
-@@ -6717,6 +7047,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
-
+@@ -6717,6 +6992,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+
return -1;
}
+#endif /* CONFIG_CACULE_SCHED */
@@ -995,13 +893,13 @@ index bbc78794224a..9e035ed1e746 100644
+ return new_cpu;
+}
+#endif
-
+
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
-@@ -6741,6 +7122,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
+@@ -6741,6 +7067,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
/* SD_flags and WF_flags share the first nibble */
int sd_flag = wake_flags & 0xF;
-
+
+#ifdef CONFIG_CACULE_SCHED
+ struct sched_entity *se = &p->se;
+
@@ -1024,16 +922,16 @@ index bbc78794224a..9e035ed1e746 100644
+#else
if (wake_flags & WF_TTWU) {
record_wakee(p);
-
-@@ -6753,6 +7154,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
-
+
+@@ -6753,6 +7099,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
+
want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
rcu_read_lock();
for_each_domain(cpu, tmp) {
-@@ -6799,6 +7201,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
+@@ -6799,6 +7146,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
*/
static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
{
@@ -1041,31 +939,31 @@ index bbc78794224a..9e035ed1e746 100644
/*
* As blocked tasks retain absolute vruntime the migration needs to
* deal with this by subtracting the old and adding the new
-@@ -6824,6 +7227,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
-
+@@ -6824,6 +7172,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
+
se->vruntime -= min_vruntime;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
if (p->on_rq == TASK_ON_RQ_MIGRATING) {
/*
-@@ -6869,6 +7273,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+@@ -6869,6 +7218,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
#endif /* CONFIG_SMP */
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static unsigned long wakeup_gran(struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;
-@@ -6947,6 +7352,7 @@ static void set_skip_buddy(struct sched_entity *se)
+@@ -6947,6 +7297,7 @@ static void set_skip_buddy(struct sched_entity *se)
for_each_sched_entity(se)
cfs_rq_of(se)->skip = se;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* Preempt the current task with a newly woken task if needed:
-@@ -6955,9 +7361,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6955,9 +7306,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
@@ -1075,23 +973,23 @@ index bbc78794224a..9e035ed1e746 100644
int scale = cfs_rq->nr_running >= sched_nr_latency;
int next_buddy_marked = 0;
+#endif /* CONFIG_CACULE_SCHED */
-
+
if (unlikely(se == pse))
return;
-@@ -6971,10 +7380,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6971,10 +7325,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
set_next_buddy(pse);
next_buddy_marked = 1;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* We can come here with TIF_NEED_RESCHED already set from new task
-@@ -7004,6 +7415,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7004,6 +7360,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
find_matching_se(&se, &pse);
update_curr(cfs_rq_of(se));
BUG_ON(!pse);
@@ -1103,14 +1001,14 @@ index bbc78794224a..9e035ed1e746 100644
if (wakeup_preempt_entity(se, pse) == 1) {
/*
* Bias pick_next to pick the sched entity that is
-@@ -7013,11 +7429,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7013,11 +7374,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
set_next_buddy(pse);
goto preempt;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
return;
-
+
preempt:
resched_curr(rq);
+
@@ -1118,15 +1016,15 @@ index bbc78794224a..9e035ed1e746 100644
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -7032,6 +7451,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
-
+@@ -7032,6 +7396,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
set_last_buddy(se);
+#endif /* CONFIG_CACULE_SCHED */
}
-
+
struct task_struct *
-@@ -7206,7 +7626,10 @@ static void yield_task_fair(struct rq *rq)
+@@ -7206,7 +7571,10 @@ static void yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
@@ -1134,60 +1032,60 @@ index bbc78794224a..9e035ed1e746 100644
+#if !defined(CONFIG_CACULE_SCHED)
struct sched_entity *se = &curr->se;
+#endif
-
+
/*
* Are we the only task in the tree?
-@@ -7214,7 +7637,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7214,7 +7582,9 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(rq->nr_running == 1))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
clear_buddies(cfs_rq, se);
+#endif
-
+
if (curr->policy != SCHED_BATCH) {
update_rq_clock(rq);
-@@ -7230,7 +7655,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7230,7 +7600,9 @@ static void yield_task_fair(struct rq *rq)
rq_clock_skip_update(rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
set_skip_buddy(se);
+#endif
}
-
+
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
-@@ -7241,8 +7668,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
+@@ -7241,8 +7613,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
return false;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/* Tell the scheduler that we'd really like pse to run next. */
set_next_buddy(se);
+#endif
-
+
yield_task_fair(rq);
-
-@@ -7470,6 +7899,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+
+@@ -7470,6 +7844,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
if (env->sd->flags & SD_SHARE_CPUCAPACITY)
return 0;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Buddy candidates are cache hot:
*/
-@@ -7477,6 +7907,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+@@ -7477,6 +7852,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
(&p->se == cfs_rq_of(&p->se)->next ||
&p->se == cfs_rq_of(&p->se)->last))
return 1;
+#endif
-
+
if (sysctl_sched_migration_cost == -1)
return 1;
-@@ -10765,11 +11196,38 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
+@@ -10765,11 +11141,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
update_overutilized_status(task_rq(curr));
}
-
+
+#ifdef CONFIG_CACULE_SCHED
/*
* called on fork with the child task as argument from the parent's context
@@ -1200,8 +1098,6 @@ index bbc78794224a..9e035ed1e746 100644
+ struct sched_entity *curr;
+ struct rq *rq = this_rq();
+ struct rq_flags rf;
-+ struct task_struct *parent = p->parent;
-+ u64 now = sched_clock();
+
+ rq_lock(rq, &rf);
+ update_rq_clock(rq);
@@ -1212,43 +1108,37 @@ index bbc78794224a..9e035ed1e746 100644
+ update_curr(cfs_rq);
+
+ rq_unlock(rq, &rf);
-+
-+ parent->fork_start_win_stamp = now;
-+ if (parent->nr_forks_per_time >= nr_fork_threshold)
-+ parent->is_fake_interactive++;
-+
-+ parent->nr_forks_per_time++;
+}
+#else
static void task_fork_fair(struct task_struct *p)
{
struct cfs_rq *cfs_rq;
-@@ -10800,6 +11258,7 @@ static void task_fork_fair(struct task_struct *p)
+@@ -10800,6 +11195,7 @@ static void task_fork_fair(struct task_struct *p)
se->vruntime -= cfs_rq->min_vruntime;
rq_unlock(rq, &rf);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* Priority of the task has changed. Check to see if we preempt
-@@ -10912,6 +11371,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
+@@ -10912,6 +11308,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
static void detach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
+
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
+
if (!vruntime_normalized(p)) {
-@@ -10922,6 +11383,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10922,6 +11320,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
place_entity(cfs_rq, se, 0);
se->vruntime -= cfs_rq->min_vruntime;
}
+#endif
-
+
detach_entity_cfs_rq(se);
}
-@@ -10929,12 +11391,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10929,12 +11328,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
static void attach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
@@ -1256,17 +1146,17 @@ index bbc78794224a..9e035ed1e746 100644
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
+#endif
-
+
attach_entity_cfs_rq(se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (!vruntime_normalized(p))
se->vruntime += cfs_rq->min_vruntime;
+#endif
}
-
+
static void switched_from_fair(struct rq *rq, struct task_struct *p)
-@@ -10990,13 +11457,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+@@ -10990,13 +11394,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
void init_cfs_rq(struct cfs_rq *cfs_rq)
{
cfs_rq->tasks_timeline = RB_ROOT_CACHED;
@@ -1287,7 +1177,7 @@ index bbc78794224a..9e035ed1e746 100644
+ cfs_rq->tail = NULL;
+#endif
}
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 282a6bbaacd7..a3b7316dd537 100644
@@ -1295,7 +1185,7 @@ index 282a6bbaacd7..a3b7316dd537 100644
+++ b/kernel/sched/sched.h
@@ -516,10 +516,13 @@ struct cfs_rq {
unsigned int idle_h_nr_running; /* SCHED_IDLE */
-
+
u64 exec_clock;
+
+#if !defined(CONFIG_CACULE_SCHED)
@@ -1304,9 +1194,9 @@ index 282a6bbaacd7..a3b7316dd537 100644
u64 min_vruntime_copy;
#endif
+#endif /* CONFIG_CACULE_SCHED */
-
+
struct rb_root_cached tasks_timeline;
-
+
@@ -528,9 +531,15 @@ struct cfs_rq {
* It is set to NULL otherwise (i.e when none are currently running).
*/
@@ -1320,14 +1210,14 @@ index 282a6bbaacd7..a3b7316dd537 100644
struct sched_entity *last;
struct sched_entity *skip;
+#endif // CONFIG_CACULE_SCHED
-
+
#ifdef CONFIG_SCHED_DEBUG
unsigned int nr_spread_over;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 62fbd09b5dc1..5f10c51fa7df 100644
+index 62fbd09b5dc1..8cbb8c5663d3 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
-@@ -1659,6 +1659,43 @@ static struct ctl_table kern_table[] = {
+@@ -1659,6 +1659,29 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
@@ -1353,20 +1243,6 @@ index 62fbd09b5dc1..5f10c51fa7df 100644
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
-+ {
-+ .procname = "sched_fake_interactive_decay_time_ms",
-+ .data = &fake_interactive_decay_time,
-+ .maxlen = sizeof(unsigned int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec,
-+ },
-+ {
-+ .procname = "sched_nr_fork_threshold",
-+ .data = &nr_fork_threshold,
-+ .maxlen = sizeof(unsigned int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec,
-+ },
+#endif
#ifdef CONFIG_SCHED_DEBUG
{
diff --git a/config b/config
index 5d928e7a6a7a..af6d4df89121 100644
--- a/config
+++ b/config
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 5.11.18-hardened1 Kernel Configuration
+# Linux/x86 5.11.20-hardened1 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="gcc (GCC) 10.2.0"
CONFIG_CC_IS_GCC=y
@@ -369,8 +369,7 @@ CONFIG_ACRN_GUEST=y
# CONFIG_MPSC is not set
# CONFIG_MCORE2 is not set
# CONFIG_MATOM is not set
-# CONFIG_GENERIC_CPU
-CONFIG_MZEN2=y
+CONFIG_GENERIC_CPU=y
CONFIG_X86_INTERNODE_CACHE_SHIFT=6
CONFIG_X86_L1_CACHE_SHIFT=6
CONFIG_X86_TSC=y
diff --git a/cpu-patches.patch b/cpu-patches.patch
index 49478e8a0947..b5b4bafb48cb 100644
--- a/cpu-patches.patch
+++ b/cpu-patches.patch
@@ -15,8 +15,8 @@ index 814fe0d34..872b9cf59 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -157,7 +157,7 @@ config MPENTIUM4
-
-
+
+
config MK6
- bool "K6/K6-II/K6-III"
+ bool "AMD K6/K6-II/K6-III"
@@ -25,7 +25,7 @@ index 814fe0d34..872b9cf59 100644
Select this for an AMD K6-family processor. Enables use of
@@ -165,7 +165,7 @@ config MK6
flags to GCC.
-
+
config MK7
- bool "Athlon/Duron/K7"
+ bool "AMD Athlon/Duron/K7"
@@ -34,7 +34,7 @@ index 814fe0d34..872b9cf59 100644
Select this for an AMD Athlon K7-family processor. Enables use of
@@ -173,12 +173,98 @@ config MK7
flags to GCC.
-
+
config MK8
- bool "Opteron/Athlon64/Hammer/K8"
+ bool "AMD Opteron/Athlon64/Hammer/K8"
@@ -42,7 +42,7 @@ index 814fe0d34..872b9cf59 100644
Select this for an AMD Opteron or Athlon64 Hammer-family processor.
Enables use of some extended instructions, and passes appropriate
optimization flags to GCC.
-
+
+config MK8SSE3
+ bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
+ help
@@ -134,17 +134,17 @@ index 814fe0d34..872b9cf59 100644
depends on X86_32
@@ -270,7 +356,7 @@ config MPSC
in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
-
+
config MCORE2
- bool "Core 2/newer Xeon"
+ bool "Intel Core 2"
help
-
+
Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
@@ -278,6 +364,8 @@ config MCORE2
family in /proc/cpuinfo. Newer ones have 6 and older ones 15
(not a typo)
-
+
+ Enables -march=core2
+
config MATOM
@@ -153,7 +153,7 @@ index 814fe0d34..872b9cf59 100644
@@ -287,6 +375,182 @@ config MATOM
accordingly optimized code. Use a recent GCC with specific Atom
support in order to fully benefit from selecting this option.
-
+
+config MNEHALEM
+ bool "Intel Nehalem"
+ select X86_P6_NOP
@@ -336,7 +336,7 @@ index 814fe0d34..872b9cf59 100644
@@ -294,6 +558,50 @@ config GENERIC_CPU
Generic x86-64 CPU.
Run equally well on all x86-64 CPUs.
-
+
+config GENERIC_CPU2
+ bool "Generic-x86-64-v2"
+ depends on GCC_VERSION > 110000
@@ -382,7 +382,7 @@ index 814fe0d34..872b9cf59 100644
+ Enables -march=native
+
endchoice
-
+
config X86_GENERIC
@@ -318,7 +626,7 @@ config X86_INTERNODE_CACHE_SHIFT
config X86_L1_CACHE_SHIFT
@@ -392,19 +392,19 @@ index 814fe0d34..872b9cf59 100644
+ default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 || GENERIC_CPU4
default "4" if MELAN || M486SX || M486 || MGEODEGX1
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
-
+
@@ -336,11 +644,11 @@ config X86_ALIGNMENT_16
-
+
config X86_INTEL_USERCOPY
def_bool y
- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
+ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL
-
+
config X86_USE_PPRO_CHECKSUM
def_bool y
- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
-
+
config X86_USE_3DNOW
def_bool y
@@ -360,26 +668,26 @@ config X86_USE_3DNOW
@@ -413,24 +413,24 @@ index 814fe0d34..872b9cf59 100644
depends on X86_64
- depends on (MCORE2 || MPENTIUM4 || MPSC)
+ depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL)
-
+
config X86_TSC
def_bool y
- depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
+ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) || X86_64
-
+
config X86_CMPXCHG64
def_bool y
- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
+ depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
-
+
# this should be set for all -march=.. options where the compiler
# generates cmov.
config X86_CMOV
def_bool y
- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+ depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
-
+
config X86_MINIMUM_CPU_FAMILY
int
default "64" if X86_64
@@ -438,7 +438,7 @@ index 814fe0d34..872b9cf59 100644
+ default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
default "5" if X86_32 && X86_CMPXCHG64
default "4"
-
+
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 9a85eae37..facf9a278 100644
--- a/arch/x86/Makefile
@@ -496,7 +496,7 @@ index 9a85eae37..facf9a278 100644
+ cflags-$(CONFIG_GENERIC_CPU4) += $(call cc-option,-march=x86-64-v4)
cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
KBUILD_CFLAGS += $(cflags-y)
-
+
diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
index 75884d2cd..4e6a08d4c 100644
--- a/arch/x86/include/asm/vermagic.h
@@ -581,7 +581,7 @@ index 75884d2cd..4e6a08d4c 100644
#elif defined CONFIG_MELAN
#define MODULE_PROC_FAMILY "ELAN "
#elif defined CONFIG_MCRUSOE
---
+--
2.31.1.305.gd1b10fc6d8
@@ -600,13 +600,12 @@ index 5f5c776ef..c0f7ef4b4 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1316,7 +1316,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE
-
+
config CC_OPTIMIZE_FOR_PERFORMANCE_O3
bool "Optimize more for performance (-O3)"
- depends on ARC
help
Choosing this option will pass "-O3" to your compiler to optimize
the kernel yet more for performance.
---
+--
2.31.1.305.gd1b10fc6d8
-