summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorptr13372021-05-14 23:51:38 +0200
committerptr13372021-05-14 23:51:38 +0200
commit6a9fd84c216c9cc27de719cc13e3e283792af159 (patch)
tree3cf3dfa7f69e7f6e9506c2b69d2b74f8fdcb24c4
parent2a8481058c11c6e078527010d55d44d7a7281e6b (diff)
downloadaur-6a9fd84c216c9cc27de719cc13e3e283792af159.tar.gz
new-cacule, kernel updated
-rw-r--r--.SRCINFO16
-rw-r--r--PKGBUILD12
-rw-r--r--cacule-5.11.patch413
3 files changed, 227 insertions, 214 deletions
diff --git a/.SRCINFO b/.SRCINFO
index f4f9f764b3a6..d00b9551dc2c 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,7 +1,7 @@
pkgbase = linux-hardened-cacule
pkgdesc = Security-Hardened Linux with the cacule scheduler
- pkgver = 5.11.20.hardened1
- pkgrel = 2
+ pkgver = 5.11.21.hardened1
+ pkgrel = 1
url = https://github.com/anthraxx/linux-hardened
arch = x86_64
license = GPL2
@@ -19,19 +19,19 @@ pkgbase = linux-hardened-cacule
makedepends = graphviz
makedepends = imagemagick
options = !strip
- source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.11.20.tar.xz
- source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.11.20.tar.sign
- source = https://github.com/anthraxx/linux-hardened/releases/download/5.11.20-hardened1/linux-hardened-5.11.20-hardened1.patch
+ source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.11.21.tar.xz
+ source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.11.21.tar.sign
+ source = https://github.com/anthraxx/linux-hardened/releases/download/5.11.21-hardened1/linux-hardened-5.11.21-hardened1.patch
source = cacule-5.11.patch
source = cpu-patches.patch
source = config
validpgpkeys = ABAF11C65A2970B130ABE3C479BE3E4300411886
validpgpkeys = 647F28654894E3BD457199BE38DBBDC86092693E
validpgpkeys = E240B57E2C4630BA768E2F26FC1B547C8D8172C8
- sha256sums = 73faeea560dc4fe1f9ee17af4b8d72446192e536979460d66ed6d0cd03bf0797
+ sha256sums = 366ba5bb00be28b604aac630c4f64301063892f27353b299177c396af0ad877f
sha256sums = SKIP
- sha256sums = aefaba9c76209bc8c80392ba20917eca55206e101da4a2e2f27fa2dc53164e1a
- sha256sums = 5670d4f198be1d907aa1941b648ace4e8da1726be223e3208ee4285b396f48cb
+ sha256sums = 6f5431afc0a3ea2df1132eca91161e8db143bfc338c59e98627f5d782b7aff20
+ sha256sums = cbea4c6603c8a0f8e8cc9db6014bcbaceb36bb8805ff14e47099926d37c7ad4d
sha256sums = 3fcac4b300dfd7dd0c092033db413949e82811985b97cb4f7dc826791511fc34
sha256sums = 9c7918c51a41f3e192aa7546f546537f4f04341be17127a5ec87ce7dcf75555e
diff --git a/PKGBUILD b/PKGBUILD
index 7b01aa38363e..ca65b1277c60 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -6,8 +6,8 @@
pkgbase=linux-hardened-cacule
-pkgver=5.11.20.hardened1
-pkgrel=2
+pkgver=5.11.21.hardened1
+pkgrel=1
pkgdesc='Security-Hardened Linux with the cacule scheduler'
url='https://github.com/anthraxx/linux-hardened'
arch=(x86_64)
@@ -21,7 +21,7 @@ _srcname=linux-${pkgver%.*}
_srctag=${pkgver%.*}-${pkgver##*.}
source=(
https://www.kernel.org/pub/linux/kernel/v${pkgver%%.*}.x/${_srcname}.tar.{xz,sign}
- https://github.com/anthraxx/linux-hardened/releases/download/5.11.20-hardened1/linux-hardened-5.11.20-hardened1.patch
+ https://github.com/anthraxx/linux-hardened/releases/download/5.11.21-hardened1/linux-hardened-5.11.21-hardened1.patch
cacule-5.11.patch
cpu-patches.patch
config # the main kernel config file
@@ -31,10 +31,10 @@ validpgpkeys=(
'647F28654894E3BD457199BE38DBBDC86092693E' # Greg Kroah-Hartman
'E240B57E2C4630BA768E2F26FC1B547C8D8172C8' # Levente Polyak
)
-sha256sums=('73faeea560dc4fe1f9ee17af4b8d72446192e536979460d66ed6d0cd03bf0797'
+sha256sums=('366ba5bb00be28b604aac630c4f64301063892f27353b299177c396af0ad877f'
'SKIP'
- 'aefaba9c76209bc8c80392ba20917eca55206e101da4a2e2f27fa2dc53164e1a'
- '5670d4f198be1d907aa1941b648ace4e8da1726be223e3208ee4285b396f48cb'
+ '6f5431afc0a3ea2df1132eca91161e8db143bfc338c59e98627f5d782b7aff20'
+ 'cbea4c6603c8a0f8e8cc9db6014bcbaceb36bb8805ff14e47099926d37c7ad4d'
'3fcac4b300dfd7dd0c092033db413949e82811985b97cb4f7dc826791511fc34'
'9c7918c51a41f3e192aa7546f546537f4f04341be17127a5ec87ce7dcf75555e')
diff --git a/cacule-5.11.patch b/cacule-5.11.patch
index 449f37825465..e1848f0209ae 100644
--- a/cacule-5.11.patch
+++ b/cacule-5.11.patch
@@ -5,12 +5,12 @@ index 1d56a6b73a4e..4d55ff02310c 100644
@@ -1087,6 +1087,10 @@ Model available). If your platform happens to meet the
requirements for EAS but you do not want to use it, change
this value to 0.
-
+
+sched_interactivity_factor (CacULE scheduler only)
+==================================================
+Sets the value *m* for interactivity score calculations. See
+Figure 1 in https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
-
+
sched_schedstats
================
diff --git a/Documentation/scheduler/sched-CacULE.rst b/Documentation/scheduler/sched-CacULE.rst
@@ -102,7 +102,7 @@ index 6e3a5eeec509..97103c8a91b2 100644
@@ -450,10 +450,22 @@ struct sched_statistics {
#endif
};
-
+
+#ifdef CONFIG_CACULE_SCHED
+struct cacule_node {
+ struct cacule_node* next;
@@ -121,7 +121,7 @@ index 6e3a5eeec509..97103c8a91b2 100644
+#endif
struct list_head group_node;
unsigned int on_rq;
-
+
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 3c31ba88aca5..4cf162341ab8 100644
--- a/include/linux/sched/sysctl.h
@@ -129,7 +129,7 @@ index 3c31ba88aca5..4cf162341ab8 100644
@@ -31,6 +31,12 @@ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
-
+
+#ifdef CONFIG_CACULE_SCHED
+extern unsigned int interactivity_factor;
+extern unsigned int interactivity_threshold;
@@ -144,9 +144,9 @@ index a3d27421de8f..d0cfdf6e9bed 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -824,6 +824,17 @@ config UCLAMP_BUCKETS_COUNT
-
+
endmenu
-
+
+config CACULE_SCHED
+ bool "CacULE CPU scheduler"
+ default y
@@ -170,25 +170,25 @@ index a3d27421de8f..d0cfdf6e9bed 100644
This option optimizes the scheduler for common desktop workloads by
automatically creating and populating task groups. This separation
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
-index 38ef6d06888e..c8cf984c294e 100644
+index 38ef6d06888e..865f8dbddca8 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -46,6 +46,9 @@ choice
1000 Hz is the preferred choice for desktop systems and other
systems requiring fast interactive responses to events.
-
+
+ config HZ_2000
-+ bool "2000 HZ"
++ bool "2000 HZ"
+
endchoice
-
+
config HZ
@@ -54,6 +57,7 @@ config HZ
default 250 if HZ_250
default 300 if HZ_300
default 1000 if HZ_1000
+ default 2000 if HZ_2000
-
+
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
@@ -205,12 +205,12 @@ index f0056507a373..c29107de2ce0 100644
+#endif
+
INIT_LIST_HEAD(&p->se.group_node);
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -3840,6 +3845,10 @@ void wake_up_new_task(struct task_struct *p)
update_rq_clock(rq);
post_init_entity_util_avg(p);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ p->se.cacule_node.cacule_start_time = sched_clock();
+#endif
@@ -221,20 +221,20 @@ index f0056507a373..c29107de2ce0 100644
@@ -7727,6 +7736,10 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
#endif
-
+
+#ifdef CONFIG_CACULE_SCHED
+ printk(KERN_INFO "CacULE CPU scheduler v5.11-r2 by Hamad Al Marri.");
+#endif
+
wait_bit_init();
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index 2357921580f9..3adc9ee2bcfc 100644
+index 2357921580f9..86bd2a41f57a 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -535,8 +535,11 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
-
+
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
- s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
@@ -243,7 +243,7 @@ index 2357921580f9..3adc9ee2bcfc 100644
+#if !defined(CONFIG_CACULE_SCHED)
+ min_vruntime, rq0_min_vruntime, spread0,
+#endif
-+ spread;
++ spread;
struct rq *rq = cpu_rq(cpu);
struct sched_entity *last;
unsigned long flags;
@@ -276,7 +276,7 @@ index 2357921580f9..3adc9ee2bcfc 100644
cfs_rq->nr_spread_over);
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index bbc78794224a..ffb6e862561d 100644
+index bbc78794224a..a3c1b7581c7a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,10 @@
@@ -289,23 +289,23 @@ index bbc78794224a..ffb6e862561d 100644
+ * (C) 2020 Hamad Al Marri <hamad.s.almarri@gmail.com>
*/
#include "sched.h"
-
+
@@ -113,6 +117,11 @@ int __weak arch_asym_cpu_priority(int cpu)
*/
#define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
-
+
+#endif
+#ifdef CONFIG_CACULE_SCHED
+unsigned int __read_mostly cacule_max_lifetime = 22000; // in ms
+unsigned int __read_mostly interactivity_factor = 32768;
+unsigned int __read_mostly interactivity_threshold = 1000;
#endif
-
+
#ifdef CONFIG_CFS_BANDWIDTH
@@ -253,6 +262,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
-
+
const struct sched_class fair_sched_class;
-
+
+
+#ifdef CONFIG_CACULE_SCHED
+static inline struct sched_entity *se_of(struct cacule_node *cn)
@@ -326,7 +326,7 @@ index bbc78794224a..ffb6e862561d 100644
static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - max_vruntime);
-@@ -568,7 +585,141 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
+@@ -568,7 +585,170 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
}
@@ -382,7 +382,7 @@ index bbc78794224a..ffb6e862561d 100644
+
+ return -1;
+}
-
++
+/*
+ * Does se have lower interactivity score value (i.e. interactive) than curr? If yes, return 1,
+ * otherwise return -1
@@ -412,18 +412,47 @@ index bbc78794224a..ffb6e862561d 100644
+static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *_se)
+{
+ struct cacule_node *se = &(_se->cacule_node);
++ struct cacule_node *iter, *next = NULL;
++ u64 now = sched_clock();
++ unsigned int score_se = calc_interactivity(now, se);
+
+ se->next = NULL;
+ se->prev = NULL;
+
+ if (likely(cfs_rq->head)) {
-+ // insert se at head
-+ se->next = cfs_rq->head;
-+ cfs_rq->head->prev = se;
+
-+ // lastly reset the head
-+ cfs_rq->head = se;
++ // start from tail
++ iter = cfs_rq->tail;
++
++ // does se have higher IS than iter?
++ while (iter && entity_before_cached(now, score_se, iter) == -1) {
++ next = iter;
++ iter = iter->prev;
++ }
++
++ // se in tail position
++ if (iter == cfs_rq->tail) {
++ cfs_rq->tail->next = se;
++ se->prev = cfs_rq->tail;
++
++ cfs_rq->tail = se;
++ }
++ // else if not head no tail, insert se after iter
++ else if (iter) {
++ se->next = next;
++ se->prev = iter;
+
++ iter->next = se;
++ next->prev = se;
++ }
++ // insert se at head
++ else {
++ se->next = cfs_rq->head;
++ cfs_rq->head->prev = se;
++
++ // lastly reset the head
++ cfs_rq->head = se;
++ }
+ } else {
+ // if empty rq
+ cfs_rq->head = se;
@@ -454,7 +483,7 @@ index bbc78794224a..ffb6e862561d 100644
+ struct cacule_node *next = se->next;
+
+ prev->next = next;
-+
+
+ if (next)
+ next->prev = prev;
+ }
@@ -468,56 +497,51 @@ index bbc78794224a..ffb6e862561d 100644
/*
* Enqueue an entity into the rb-tree:
*/
-@@ -626,16 +777,29 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
-
+@@ -626,16 +806,24 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
+
return rb_entry(next, struct sched_entity, run_node);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
#ifdef CONFIG_SCHED_DEBUG
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
+#ifdef CONFIG_CACULE_SCHED
-+ struct cacule_node *cn = cfs_rq->head;
-+
-+ if (!cn)
++ if (!cfs_rq->tail)
+ return NULL;
+
-+ while (cn->next)
-+ cn = cn->next;
-+
-+ return se_of(cn);
++ return se_of(cfs_rq->tail);
+#else
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
-
+
if (!last)
return NULL;
-
+
return rb_entry(last, struct sched_entity, run_node);
+#endif /* CONFIG_CACULE_SCHED */
}
-
+
/**************************************************************
-@@ -720,6 +884,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -720,6 +908,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
return slice;
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* We calculate the vruntime slice of a to-be-inserted task.
*
-@@ -729,6 +894,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -729,6 +918,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return calc_delta_fair(sched_slice(cfs_rq, se), se);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
#include "pelt.h"
#ifdef CONFIG_SMP
-@@ -836,14 +1002,46 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
+@@ -836,14 +1026,51 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
}
#endif /* CONFIG_SMP */
-
+
+#ifdef CONFIG_CACULE_SCHED
+static void normalize_lifetime(u64 now, struct sched_entity *se)
+{
@@ -556,17 +580,20 @@ index bbc78794224a..ffb6e862561d 100644
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
-- u64 now = rq_clock_task(rq_of(cfs_rq));
-- u64 delta_exec;
++#ifdef CONFIG_CACULE_SCHED
+ u64 now = sched_clock();
+ u64 delta_exec, delta_fair;
-
++#else
+ u64 now = rq_clock_task(rq_of(cfs_rq));
+ u64 delta_exec;
++#endif
+
if (unlikely(!curr))
return;
-@@ -860,8 +1058,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
+@@ -860,8 +1087,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ delta_fair = calc_delta_fair(delta_exec, curr);
+ curr->vruntime += delta_fair;
@@ -576,52 +603,55 @@ index bbc78794224a..ffb6e862561d 100644
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_min_vruntime(cfs_rq);
+#endif
-
+
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
-@@ -1029,7 +1234,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -1029,7 +1263,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
-
if (!schedstat_enabled())
return;
-
-@@ -1061,7 +1265,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+
+@@ -1061,7 +1294,11 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
/*
* We are starting a new run period:
*/
-- se->exec_start = rq_clock_task(rq_of(cfs_rq));
++#ifdef CONFIG_CACULE_SCHED
+ se->exec_start = sched_clock();
++#else
+ se->exec_start = rq_clock_task(rq_of(cfs_rq));
++#endif
}
-
+
/**************************************************
-@@ -4115,7 +4319,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
-
+@@ -4115,7 +4352,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
+
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
-#ifdef CONFIG_SCHED_DEBUG
+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_CACULE_SCHED)
s64 d = se->vruntime - cfs_rq->min_vruntime;
-
+
if (d < 0)
-@@ -4126,6 +4330,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4126,6 +4363,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
-@@ -4157,6 +4362,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+@@ -4157,6 +4395,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
/* ensure we never gain time by being placed backwards. */
se->vruntime = max_vruntime(se->vruntime, vruntime);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
-
-@@ -4215,18 +4421,23 @@ static inline bool cfs_bandwidth_used(void);
+
+@@ -4215,18 +4454,23 @@ static inline bool cfs_bandwidth_used(void);
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
@@ -629,7 +659,7 @@ index bbc78794224a..ffb6e862561d 100644
bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
+#endif
bool curr = cfs_rq->curr == se;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* If we're the current task, we must renormalise before calling
@@ -638,91 +668,88 @@ index bbc78794224a..ffb6e862561d 100644
if (renorm && curr)
se->vruntime += cfs_rq->min_vruntime;
+#endif
-
+
update_curr(cfs_rq);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Otherwise, renormalise after, such that we're placed at the current
* moment in time, instead of some random moment in the past. Being
-@@ -4235,6 +4446,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4235,6 +4479,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (renorm && !curr)
se->vruntime += cfs_rq->min_vruntime;
+#endif
-
+
/*
* When enqueuing a sched_entity, we must:
-@@ -4249,8 +4461,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4249,8 +4494,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_cfs_group(se);
account_entity_enqueue(cfs_rq, se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (flags & ENQUEUE_WAKEUP)
place_entity(cfs_rq, se, 0);
+#endif
-
+
check_schedstat_required();
update_stats_enqueue(cfs_rq, se, flags);
-@@ -4271,6 +4485,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4271,6 +4518,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
check_enqueue_throttle(cfs_rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void __clear_buddies_last(struct sched_entity *se)
{
for_each_sched_entity(se) {
-@@ -4315,6 +4530,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4315,6 +4563,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (cfs_rq->skip == se)
__clear_buddies_skip(se);
}
+#endif /* !CONFIG_CACULE_SCHED */
-
+
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
-@@ -4339,13 +4555,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
-
+
+@@ -4339,13 +4588,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+
update_stats_dequeue(cfs_rq, se, flags);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
clear_buddies(cfs_rq, se);
+#endif
-
+
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->on_rq = 0;
account_entity_dequeue(cfs_rq, se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Normalize after update_curr(); which will also have moved
* min_vruntime if @se is the one holding it back. But before doing
-@@ -4354,12 +4573,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4354,12 +4606,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (!(flags & DEQUEUE_SLEEP))
se->vruntime -= cfs_rq->min_vruntime;
+#endif
-
+
/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);
-
+
update_cfs_group(se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Now advance min_vruntime if @se was the entity holding it back,
* except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
-@@ -4368,8 +4589,24 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4368,8 +4622,21 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
+#endif
}
-
+
+#ifdef CONFIG_CACULE_SCHED
-+static struct sched_entity *
-+pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr);
-+
+/*
+ * Preempt the current task with a newly woken task if needed:
+ */
@@ -730,49 +757,35 @@ index bbc78794224a..ffb6e862561d 100644
+check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+{
+ // does head have higher IS than curr
-+ if (pick_next_entity(cfs_rq, curr) != curr)
++ if (entity_before(sched_clock(), &curr->cacule_node, cfs_rq->head) == 1)
+ resched_curr(rq_of(cfs_rq));
+}
+#else
/*
* Preempt the current task with a newly woken task if needed:
*/
-@@ -4409,6 +4646,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4409,6 +4676,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
if (delta > ideal_runtime)
resched_curr(rq_of(cfs_rq));
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
-@@ -4443,6 +4681,35 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4443,6 +4711,21 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
-
+
+#ifdef CONFIG_CACULE_SCHED
+static struct sched_entity *
+pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+{
-+ struct cacule_node *next, *se = cfs_rq->head;
-+ u64 now = sched_clock();
-+ unsigned int score_se;
++ struct cacule_node *se = cfs_rq->head;
+
+ if (unlikely(!se))
-+ return curr;
-+
-+ score_se = calc_interactivity(now, se);
-+
-+ next = se->next;
-+ while (next) {
-+ if (entity_before_cached(now, score_se, next) == 1) {
-+ se = next;
-+ score_se = calc_interactivity(now, se);
-+ }
-+
-+ next = next->next;
-+ }
-+
-+ if (unlikely(curr && entity_before_cached(now, score_se, &curr->cacule_node) == 1))
++ se = &curr->cacule_node;
++ else if (unlikely(curr
++ && entity_before(sched_clock(), se, &curr->cacule_node) == 1))
+ se = &curr->cacule_node;
+
+ return se_of(se);
@@ -780,26 +793,26 @@ index bbc78794224a..ffb6e862561d 100644
+#else
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
-
-@@ -4503,6 +4770,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
-
+
+@@ -4503,6 +4786,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+
return se;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
-@@ -5605,7 +5873,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+
+@@ -5605,7 +5889,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
hrtick_update(rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void set_next_buddy(struct sched_entity *se);
+#endif
-
+
/*
* The dequeue_task method is called before nr_running is
-@@ -5637,12 +5907,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+@@ -5637,12 +5923,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq->load.weight) {
/* Avoid re-evaluating load for this entity: */
se = parent_entity(se);
@@ -814,32 +827,32 @@ index bbc78794224a..ffb6e862561d 100644
break;
}
flags |= DEQUEUE_SLEEP;
-@@ -5758,6 +6030,7 @@ static unsigned long capacity_of(int cpu)
+@@ -5758,6 +6046,7 @@ static unsigned long capacity_of(int cpu)
return cpu_rq(cpu)->cpu_capacity;
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void record_wakee(struct task_struct *p)
{
/*
-@@ -5804,6 +6077,7 @@ static int wake_wide(struct task_struct *p)
+@@ -5804,6 +6093,7 @@ static int wake_wide(struct task_struct *p)
return 0;
return 1;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* The purpose of wake_affine() is to quickly determine on which CPU we can run
-@@ -6484,6 +6758,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
+@@ -6484,6 +6774,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
return min_t(unsigned long, util, capacity_orig_of(cpu));
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
* to @dst_cpu.
-@@ -6717,6 +6992,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
-
+@@ -6717,6 +7008,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+
return -1;
}
+#endif /* CONFIG_CACULE_SCHED */
@@ -893,13 +906,13 @@ index bbc78794224a..ffb6e862561d 100644
+ return new_cpu;
+}
+#endif
-
+
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
-@@ -6741,6 +7067,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
+@@ -6741,6 +7083,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
/* SD_flags and WF_flags share the first nibble */
int sd_flag = wake_flags & 0xF;
-
+
+#ifdef CONFIG_CACULE_SCHED
+ struct sched_entity *se = &p->se;
+
@@ -922,16 +935,16 @@ index bbc78794224a..ffb6e862561d 100644
+#else
if (wake_flags & WF_TTWU) {
record_wakee(p);
-
-@@ -6753,6 +7099,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
-
+
+@@ -6753,6 +7115,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
+
want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
rcu_read_lock();
for_each_domain(cpu, tmp) {
-@@ -6799,6 +7146,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
+@@ -6799,6 +7162,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
*/
static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
{
@@ -939,31 +952,31 @@ index bbc78794224a..ffb6e862561d 100644
/*
* As blocked tasks retain absolute vruntime the migration needs to
* deal with this by subtracting the old and adding the new
-@@ -6824,6 +7172,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
-
+@@ -6824,6 +7188,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
+
se->vruntime -= min_vruntime;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
if (p->on_rq == TASK_ON_RQ_MIGRATING) {
/*
-@@ -6869,6 +7218,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+@@ -6869,6 +7234,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
#endif /* CONFIG_SMP */
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static unsigned long wakeup_gran(struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;
-@@ -6947,6 +7297,7 @@ static void set_skip_buddy(struct sched_entity *se)
+@@ -6947,6 +7313,7 @@ static void set_skip_buddy(struct sched_entity *se)
for_each_sched_entity(se)
cfs_rq_of(se)->skip = se;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* Preempt the current task with a newly woken task if needed:
-@@ -6955,9 +7306,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6955,9 +7322,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
@@ -973,23 +986,23 @@ index bbc78794224a..ffb6e862561d 100644
int scale = cfs_rq->nr_running >= sched_nr_latency;
int next_buddy_marked = 0;
+#endif /* CONFIG_CACULE_SCHED */
-
+
if (unlikely(se == pse))
return;
-@@ -6971,10 +7325,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6971,10 +7341,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
set_next_buddy(pse);
next_buddy_marked = 1;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* We can come here with TIF_NEED_RESCHED already set from new task
-@@ -7004,6 +7360,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7004,6 +7376,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
find_matching_se(&se, &pse);
update_curr(cfs_rq_of(se));
BUG_ON(!pse);
@@ -1001,14 +1014,14 @@ index bbc78794224a..ffb6e862561d 100644
if (wakeup_preempt_entity(se, pse) == 1) {
/*
* Bias pick_next to pick the sched entity that is
-@@ -7013,11 +7374,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7013,11 +7390,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
set_next_buddy(pse);
goto preempt;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
return;
-
+
preempt:
resched_curr(rq);
+
@@ -1016,15 +1029,15 @@ index bbc78794224a..ffb6e862561d 100644
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -7032,6 +7396,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
-
+@@ -7032,6 +7412,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
set_last_buddy(se);
+#endif /* CONFIG_CACULE_SCHED */
}
-
+
struct task_struct *
-@@ -7206,7 +7571,10 @@ static void yield_task_fair(struct rq *rq)
+@@ -7206,7 +7587,10 @@ static void yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
@@ -1032,60 +1045,60 @@ index bbc78794224a..ffb6e862561d 100644
+#if !defined(CONFIG_CACULE_SCHED)
struct sched_entity *se = &curr->se;
+#endif
-
+
/*
* Are we the only task in the tree?
-@@ -7214,7 +7582,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7214,7 +7598,9 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(rq->nr_running == 1))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
clear_buddies(cfs_rq, se);
+#endif
-
+
if (curr->policy != SCHED_BATCH) {
update_rq_clock(rq);
-@@ -7230,7 +7600,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7230,7 +7616,9 @@ static void yield_task_fair(struct rq *rq)
rq_clock_skip_update(rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
set_skip_buddy(se);
+#endif
}
-
+
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
-@@ -7241,8 +7613,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
+@@ -7241,8 +7629,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
return false;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/* Tell the scheduler that we'd really like pse to run next. */
set_next_buddy(se);
+#endif
-
+
yield_task_fair(rq);
-
-@@ -7470,6 +7844,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+
+@@ -7470,6 +7860,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
if (env->sd->flags & SD_SHARE_CPUCAPACITY)
return 0;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Buddy candidates are cache hot:
*/
-@@ -7477,6 +7852,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+@@ -7477,6 +7868,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
(&p->se == cfs_rq_of(&p->se)->next ||
&p->se == cfs_rq_of(&p->se)->last))
return 1;
+#endif
-
+
if (sysctl_sched_migration_cost == -1)
return 1;
-@@ -10765,11 +11141,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
+@@ -10765,11 +11157,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
update_overutilized_status(task_rq(curr));
}
-
+
+#ifdef CONFIG_CACULE_SCHED
/*
* called on fork with the child task as argument from the parent's context
@@ -1113,32 +1126,32 @@ index bbc78794224a..ffb6e862561d 100644
static void task_fork_fair(struct task_struct *p)
{
struct cfs_rq *cfs_rq;
-@@ -10800,6 +11195,7 @@ static void task_fork_fair(struct task_struct *p)
+@@ -10800,6 +11211,7 @@ static void task_fork_fair(struct task_struct *p)
se->vruntime -= cfs_rq->min_vruntime;
rq_unlock(rq, &rf);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* Priority of the task has changed. Check to see if we preempt
-@@ -10912,6 +11308,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
+@@ -10912,6 +11324,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
static void detach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
+
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
+
if (!vruntime_normalized(p)) {
-@@ -10922,6 +11320,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10922,6 +11336,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
place_entity(cfs_rq, se, 0);
se->vruntime -= cfs_rq->min_vruntime;
}
+#endif
-
+
detach_entity_cfs_rq(se);
}
-@@ -10929,12 +11328,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10929,12 +11344,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
static void attach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
@@ -1146,17 +1159,17 @@ index bbc78794224a..ffb6e862561d 100644
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
+#endif
-
+
attach_entity_cfs_rq(se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (!vruntime_normalized(p))
se->vruntime += cfs_rq->min_vruntime;
+#endif
}
-
+
static void switched_from_fair(struct rq *rq, struct task_struct *p)
-@@ -10990,13 +11394,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+@@ -10990,13 +11410,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
void init_cfs_rq(struct cfs_rq *cfs_rq)
{
cfs_rq->tasks_timeline = RB_ROOT_CACHED;
@@ -1177,7 +1190,7 @@ index bbc78794224a..ffb6e862561d 100644
+ cfs_rq->tail = NULL;
+#endif
}
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 282a6bbaacd7..a3b7316dd537 100644
@@ -1185,7 +1198,7 @@ index 282a6bbaacd7..a3b7316dd537 100644
+++ b/kernel/sched/sched.h
@@ -516,10 +516,13 @@ struct cfs_rq {
unsigned int idle_h_nr_running; /* SCHED_IDLE */
-
+
u64 exec_clock;
+
+#if !defined(CONFIG_CACULE_SCHED)
@@ -1194,9 +1207,9 @@ index 282a6bbaacd7..a3b7316dd537 100644
u64 min_vruntime_copy;
#endif
+#endif /* CONFIG_CACULE_SCHED */
-
+
struct rb_root_cached tasks_timeline;
-
+
@@ -528,9 +531,15 @@ struct cfs_rq {
* It is set to NULL otherwise (i.e when none are currently running).
*/
@@ -1210,7 +1223,7 @@ index 282a6bbaacd7..a3b7316dd537 100644
struct sched_entity *last;
struct sched_entity *skip;
+#endif // CONFIG_CACULE_SCHED
-
+
#ifdef CONFIG_SCHED_DEBUG
unsigned int nr_spread_over;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c