summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorptr13372021-05-09 18:00:42 +0200
committerptr13372021-05-09 18:00:42 +0200
commit1120f0fc15f1a735ce359445e7225abaa88fd3a7 (patch)
tree315698cbb2c7956d79d8efa51a286390b1ef87bc
parentac875bc1709c35f089fa83b6818db0466dd2a332 (diff)
downloadaur-1120f0fc15f1a735ce359445e7225abaa88fd3a7.tar.gz
cacule rev2 patches, kernel update
-rw-r--r--.SRCINFO15
-rw-r--r--PKGBUILD10
-rw-r--r--cacule-5.11.patch714
3 files changed, 403 insertions, 336 deletions
diff --git a/.SRCINFO b/.SRCINFO
index c0a65b8ba98f..a6115192837d 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,6 +1,6 @@
pkgbase = linux-hardened-cacule
pkgdesc = Security-Hardened Linux with the cacule scheduler
- pkgver = 5.11.18.hardened1
+ pkgver = 5.11.19.hardened1
pkgrel = 1
url = https://github.com/anthraxx/linux-hardened
arch = x86_64
@@ -19,18 +19,18 @@ pkgbase = linux-hardened-cacule
makedepends = graphviz
makedepends = imagemagick
options = !strip
- source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.11.18.tar.xz
- source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.11.18.tar.sign
- source = https://github.com/anthraxx/linux-hardened/releases/download/5.11.18-hardened1/linux-hardened-5.11.18-hardened1.patch
+ source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.11.19.tar.xz
+ source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.11.19.tar.sign
+ source = https://github.com/anthraxx/linux-hardened/releases/download/5.11.19-hardened1/linux-hardened-5.11.19-hardened1.patch
source = cacule-5.11.patch
source = config
validpgpkeys = ABAF11C65A2970B130ABE3C479BE3E4300411886
validpgpkeys = 647F28654894E3BD457199BE38DBBDC86092693E
validpgpkeys = E240B57E2C4630BA768E2F26FC1B547C8D8172C8
- sha256sums = 8b070bbdd66eced489cbfcf842d540f50742df10f7b221e10998ab57284fcc6f
+ sha256sums = 5aee19ad466b5bbbde642077f42bfaafff4e612296bdd7946faa01d917472b4b
sha256sums = SKIP
- sha256sums = cdb101c77aa305824819fb344ad048fca005c6f6898befe92d9506cf3634e768
- sha256sums = 302a771ca3f2b2c8b9f1d9aed7afe222ef770f2b5ce6c5bb743e9d4ca5645115
+ sha256sums = 6e8491ffdfb350fa37fa6a3cac31603fc2f3517c8f2ca8d43f54461bc8ab9f99
+ sha256sums = 94bc3f303f69863d5cbc9c64e24862b4948a32756d7167f13e261fabd15c0f66
sha256sums = b8d0a96303d908269f5b96ba21cc97fbc948f099fd7dd01e6c91fce095f6b861
pkgname = linux-hardened-cacule
@@ -46,4 +46,3 @@ pkgname = linux-hardened-cacule
pkgname = linux-hardened-cacule-headers
pkgdesc = Headers and scripts for building modules for the Security-Hardened Linux with the cacule scheduler kernel
-
diff --git a/PKGBUILD b/PKGBUILD
index a02c80f7d04a..970dea38c7ee 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -6,7 +6,7 @@
pkgbase=linux-hardened-cacule
-pkgver=5.11.18.hardened1
+pkgver=5.11.19.hardened1
pkgrel=1
pkgdesc='Security-Hardened Linux with the cacule scheduler'
url='https://github.com/anthraxx/linux-hardened'
@@ -21,7 +21,7 @@ _srcname=linux-${pkgver%.*}
_srctag=${pkgver%.*}-${pkgver##*.}
source=(
https://www.kernel.org/pub/linux/kernel/v${pkgver%%.*}.x/${_srcname}.tar.{xz,sign}
- https://github.com/anthraxx/linux-hardened/releases/download/5.11.18-hardened1/linux-hardened-5.11.18-hardened1.patch
+ https://github.com/anthraxx/linux-hardened/releases/download/5.11.19-hardened1/linux-hardened-5.11.19-hardened1.patch
cacule-5.11.patch
config # the main kernel config file
)
@@ -30,10 +30,10 @@ validpgpkeys=(
'647F28654894E3BD457199BE38DBBDC86092693E' # Greg Kroah-Hartman
'E240B57E2C4630BA768E2F26FC1B547C8D8172C8' # Levente Polyak
)
-sha256sums=('8b070bbdd66eced489cbfcf842d540f50742df10f7b221e10998ab57284fcc6f'
+sha256sums=('5aee19ad466b5bbbde642077f42bfaafff4e612296bdd7946faa01d917472b4b'
'SKIP'
- 'cdb101c77aa305824819fb344ad048fca005c6f6898befe92d9506cf3634e768'
- '302a771ca3f2b2c8b9f1d9aed7afe222ef770f2b5ce6c5bb743e9d4ca5645115'
+ '6e8491ffdfb350fa37fa6a3cac31603fc2f3517c8f2ca8d43f54461bc8ab9f99'
+ '94bc3f303f69863d5cbc9c64e24862b4948a32756d7167f13e261fabd15c0f66'
'b8d0a96303d908269f5b96ba21cc97fbc948f099fd7dd01e6c91fce095f6b861')
export KBUILD_BUILD_HOST=archlinux
diff --git a/cacule-5.11.patch b/cacule-5.11.patch
index bc5b28ce267e..7f6ccd2f9422 100644
--- a/cacule-5.11.patch
+++ b/cacule-5.11.patch
@@ -5,12 +5,12 @@ index 1d56a6b73a4e..4d55ff02310c 100644
@@ -1087,6 +1087,10 @@ Model available). If your platform happens to meet the
requirements for EAS but you do not want to use it, change
this value to 0.
-
+
+sched_interactivity_factor (CacULE scheduler only)
+==================================================
+Sets the value *m* for interactivity score calculations. See
+Figure 1 in https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
-
+
sched_schedstats
================
diff --git a/Documentation/scheduler/sched-CacULE.rst b/Documentation/scheduler/sched-CacULE.rst
@@ -96,13 +96,13 @@ index 000000000000..82b0847c468a
+ idle timer scheduler in order to avoid to get into priority
+ inversion problems which would deadlock the machine.
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 6e3a5eeec509..e5da9a62fe4e 100644
+index 6e3a5eeec509..f5a4fc49286f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -450,16 +450,29 @@ struct sched_statistics {
+@@ -450,10 +450,22 @@ struct sched_statistics {
#endif
};
-
+
+#ifdef CONFIG_CACULE_SCHED
+struct cacule_node {
+ struct cacule_node* next;
@@ -118,30 +118,37 @@ index 6e3a5eeec509..e5da9a62fe4e 100644
struct rb_node run_node;
+#ifdef CONFIG_CACULE_SCHED
+ struct cacule_node cacule_node;
-+#else
-+ u64 vruntime;
+#endif
struct list_head group_node;
unsigned int on_rq;
-
- u64 exec_start;
- u64 sum_exec_runtime;
-- u64 vruntime;
- u64 prev_sum_exec_runtime;
-
- u64 nr_migrations;
+
+@@ -872,6 +884,12 @@ struct task_struct {
+ struct list_head sibling;
+ struct task_struct *group_leader;
+
++#ifdef CONFIG_CACULE_SCHED
++ u64 fork_start_win_stamp;
++ unsigned int nr_forks_per_time;
++ int is_fake_interactive;
++#endif
++
+ /*
+ * 'ptraced' is the list of tasks this task is using ptrace() on.
+ *
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
-index 3c31ba88aca5..cb819c3d86f3 100644
+index 3c31ba88aca5..20c85c808485 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
-@@ -31,6 +31,12 @@ extern unsigned int sysctl_sched_min_granularity;
+@@ -31,6 +31,14 @@ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
-
+
+#ifdef CONFIG_CACULE_SCHED
-+extern int interactivity_factor;
++extern unsigned int interactivity_factor;
+extern unsigned int interactivity_threshold;
-+extern int cacule_max_lifetime;
++extern unsigned int cacule_max_lifetime;
++extern unsigned int fake_interactive_decay_time;
++extern unsigned int nr_fork_threshold;
+#endif
+
enum sched_tunable_scaling {
@@ -152,9 +159,9 @@ index a3d27421de8f..d0cfdf6e9bed 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -824,6 +824,17 @@ config UCLAMP_BUCKETS_COUNT
-
+
endmenu
-
+
+config CACULE_SCHED
+ bool "CacULE CPU scheduler"
+ default y
@@ -177,28 +184,73 @@ index a3d27421de8f..d0cfdf6e9bed 100644
help
This option optimizes the scheduler for common desktop workloads by
automatically creating and populating task groups. This separation
+diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
+index 38ef6d06888e..c8cf984c294e 100644
+--- a/kernel/Kconfig.hz
++++ b/kernel/Kconfig.hz
+@@ -46,6 +46,9 @@ choice
+ 1000 Hz is the preferred choice for desktop systems and other
+ systems requiring fast interactive responses to events.
+
++ config HZ_2000
++ bool "2000 HZ"
++
+ endchoice
+
+ config HZ
+@@ -54,6 +57,7 @@ config HZ
+ default 250 if HZ_250
+ default 300 if HZ_300
+ default 1000 if HZ_1000
++ default 2000 if HZ_2000
+
+ config SCHED_HRTICK
+ def_bool HIGH_RES_TIMERS
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 04029e35e69a..9dfd515104db 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -667,6 +667,17 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
+ write_lock_irq(&tasklist_lock);
+ forget_original_parent(tsk, &dead);
+
++#ifdef CONFIG_CACULE_SCHED
++ p = tsk->parent;
++ if (p) {
++ if (p->nr_forks_per_time)
++ p->nr_forks_per_time--;
++
++ if (p->is_fake_interactive)
++ p->is_fake_interactive--;
++ }
++#endif
++
+ if (group_dead)
+ kill_orphaned_pgrp(tsk->group_leader, NULL);
+
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index f0056507a373..4d8a3b232ae9 100644
+index f0056507a373..9ecce53ddcc1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3554,7 +3554,13 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
- p->se.sum_exec_runtime = 0;
+@@ -3555,6 +3555,14 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
+ p->se.vruntime = 0;
+
+#ifdef CONFIG_CACULE_SCHED
+ p->se.cacule_node.vruntime = 0;
-+#else
- p->se.vruntime = 0;
++ p->fork_start_win_stamp = 0;
++ p->nr_forks_per_time = 0;
++ p->is_fake_interactive = 0;
+#endif
+
INIT_LIST_HEAD(&p->se.group_node);
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
-@@ -3840,6 +3846,10 @@ void wake_up_new_task(struct task_struct *p)
+@@ -3840,6 +3848,10 @@ void wake_up_new_task(struct task_struct *p)
update_rq_clock(rq);
post_init_entity_util_avg(p);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ p->se.cacule_node.cacule_start_time = sched_clock();
+#endif
@@ -206,78 +258,39 @@ index f0056507a373..4d8a3b232ae9 100644
activate_task(rq, p, ENQUEUE_NOCLOCK);
trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK);
-@@ -7727,6 +7737,10 @@ void __init sched_init(void)
+@@ -7727,6 +7739,10 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
#endif
-
+
+#ifdef CONFIG_CACULE_SCHED
-+ printk(KERN_INFO "CacULE CPU scheduler v5.11 by Hamad Al Marri.");
++ printk(KERN_INFO "CacULE CPU scheduler v5.11-r2 by Hamad Al Marri.");
+#endif
+
wait_bit_init();
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index 2357921580f9..fb4ef69724c3 100644
+index 2357921580f9..3adc9ee2bcfc 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
-@@ -439,7 +439,11 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
- return;
-
- PN(se->exec_start);
-+#ifdef CONFIG_CACULE_SCHED
-+ PN(se->cacule_node.vruntime);
-+#else
- PN(se->vruntime);
-+#endif
- PN(se->sum_exec_runtime);
-
- if (schedstat_enabled()) {
-@@ -493,7 +497,11 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
-
- SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
- p->comm, task_pid_nr(p),
-+#ifdef CONFIG_CACULE_SCHED
-+ SPLIT_NS(p->se.cacule_node.vruntime),
-+#else
- SPLIT_NS(p->se.vruntime),
-+#endif
- (long long)(p->nvcsw + p->nivcsw),
- p->prio);
-
-@@ -535,8 +543,12 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
-
+@@ -535,8 +535,11 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
+
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
- s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
- spread, rq0_min_vruntime, spread0;
-+ s64 MIN_vruntime = -1,
++ s64 MIN_vruntime = -1, max_vruntime = -1,
+#if !defined(CONFIG_CACULE_SCHED)
-+ min_vruntime, rq0_min_vruntime,
-+ spread0,
++ min_vruntime, rq0_min_vruntime, spread0,
+#endif
-+ max_vruntime = -1, spread;
++ spread;
struct rq *rq = cpu_rq(cpu);
struct sched_entity *last;
unsigned long flags;
-@@ -553,25 +565,41 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- if (rb_first_cached(&cfs_rq->tasks_timeline))
-+#ifdef CONFIG_CACULE_SCHED
-+ MIN_vruntime = (__pick_first_entity(cfs_rq))->cacule_node.vruntime;
-+#else
- MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
-+#endif
-+
+@@ -557,21 +560,27 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
last = __pick_last_entity(cfs_rq);
if (last)
-+#ifdef CONFIG_CACULE_SCHED
-+ max_vruntime = last->cacule_node.vruntime;
-+#else
max_vruntime = last->vruntime;
-+#endif
-+
+#if !defined(CONFIG_CACULE_SCHED)
min_vruntime = cfs_rq->min_vruntime;
rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
@@ -302,20 +315,8 @@ index 2357921580f9..fb4ef69724c3 100644
SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
cfs_rq->nr_spread_over);
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
-@@ -928,7 +956,11 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
- #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
-
- PN(se.exec_start);
-+#ifdef CONFIG_CACULE_SCHED
-+ PN(se.cacule_node.vruntime);
-+#else
- PN(se.vruntime);
-+#endif
- PN(se.sum_exec_runtime);
-
- nr_switches = p->nvcsw + p->nivcsw;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index bbc78794224a..06e9701a3027 100644
+index bbc78794224a..9e035ed1e746 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,10 @@
@@ -328,23 +329,25 @@ index bbc78794224a..06e9701a3027 100644
+ * (C) 2020 Hamad Al Marri <hamad.s.almarri@gmail.com>
*/
#include "sched.h"
-
-@@ -113,6 +117,11 @@ int __weak arch_asym_cpu_priority(int cpu)
+
+@@ -113,6 +117,13 @@ int __weak arch_asym_cpu_priority(int cpu)
*/
#define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
-
+
+#endif
+#ifdef CONFIG_CACULE_SCHED
-+int __read_mostly cacule_max_lifetime = 22000; // in ms
-+int __read_mostly interactivity_factor = 32768;
-+unsigned int __read_mostly interactivity_threshold = 20480;
++unsigned int __read_mostly cacule_max_lifetime = 22000; // in ms
++unsigned int __read_mostly interactivity_factor = 32768;
++unsigned int __read_mostly interactivity_threshold = 1000;
++unsigned int __read_mostly fake_interactive_decay_time = 1000; // in ms
++unsigned int __read_mostly nr_fork_threshold = 3;
#endif
-
+
#ifdef CONFIG_CFS_BANDWIDTH
-@@ -253,6 +262,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
-
+@@ -253,6 +264,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
+
const struct sched_class fair_sched_class;
-
+
+
+#ifdef CONFIG_CACULE_SCHED
+static inline struct sched_entity *se_of(struct cacule_node *cn)
@@ -356,7 +359,7 @@ index bbc78794224a..06e9701a3027 100644
/**************************************************************
* CFS operations on generic schedulable entities:
*/
-@@ -512,7 +529,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
+@@ -512,7 +531,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
/**************************************************************
* Scheduling class tree data structure manipulation methods:
*/
@@ -365,42 +368,85 @@ index bbc78794224a..06e9701a3027 100644
static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - max_vruntime);
-@@ -568,7 +585,169 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
+@@ -568,7 +587,183 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
}
+#endif /* CONFIG_CACULE_SCHED */
+
+#ifdef CONFIG_CACULE_SCHED
++static inline unsigned int is_fake_interactive(struct cacule_node *cn)
++{
++ struct sched_entity *se = se_of(cn);
++ struct task_struct *parent = NULL;
++ struct cfs_rq *cfs_rq;
++ u64 win_time = fake_interactive_decay_time * 1000000ULL;
++ u64 now = sched_clock();
++
++ while (!parent) {
++ if (entity_is_task(se)) {
++ parent = task_of(se)->parent;
++ break;
++ }
++
++ cfs_rq = group_cfs_rq(se);
++
++ if (!cfs_rq->head && !cfs_rq->curr)
++ return 0;
++
++ if (cfs_rq->head)
++ se = se_of(cfs_rq->head);
++ else if (cfs_rq->curr)
++ se = cfs_rq->curr;
++ }
++
++ if (parent->is_fake_interactive
++ && (now - parent->fork_start_win_stamp > win_time))
++ {
++ parent->fork_start_win_stamp = now;
++ parent->is_fake_interactive--;
++ }
++
++ return parent->is_fake_interactive;
++}
++
+static unsigned int
+calc_interactivity(u64 now, struct cacule_node *se)
+{
-+ u64 l_se, vr_se, sleep_se = 1ULL, u64_factor;
-+ unsigned int score_se;
++ u64 l_se, vr_se, sleep_se = 1ULL, u64_factor_m, _2m;
++ unsigned int score_se, fake_interactivity;
+
+ /*
+ * in case of vruntime==0, logical OR with 1 would
+ * make sure that the least sig. bit is 1
+ */
+ l_se = now - se->cacule_start_time;
-+ vr_se = se->vruntime | 1;
-+ u64_factor = interactivity_factor;
++ vr_se = se->vruntime | 1;
++ u64_factor_m = interactivity_factor;
++ _2m = u64_factor_m << 1;
+
+ /* safety check */
+ if (likely(l_se > vr_se))
+ sleep_se = (l_se - vr_se) | 1;
+
+ if (sleep_se >= vr_se)
-+ score_se = u64_factor / (sleep_se / vr_se);
++ score_se = u64_factor_m / (sleep_se / vr_se);
+ else
-+ score_se = (u64_factor << 1) - (u64_factor / (vr_se / sleep_se));
++ score_se = _2m - (u64_factor_m / (vr_se / sleep_se));
++
++ fake_interactivity = is_fake_interactive(se);
++ if (fake_interactivity)
++ score_se += (_2m * fake_interactivity) + 1;
+
+ return score_se;
+}
+
+static inline int is_interactive(struct cacule_node *cn)
+{
-+ if (cn->vruntime == 0)
++ if (se_of(cn)->vruntime == 0)
++ return 0;
++
++ if (is_fake_interactive(cn))
+ return 0;
+
+ return calc_interactivity(sched_clock(), cn) < interactivity_threshold;
@@ -420,7 +466,7 @@ index bbc78794224a..06e9701a3027 100644
+
+ return -1;
+}
-+
+
+/*
+ * Does se have lower interactivity score value (i.e. interactive) than curr? If yes, return 1,
+ * otherwise return -1
@@ -443,54 +489,25 @@ index bbc78794224a..06e9701a3027 100644
+
+ return -1;
+}
-
++
+/*
+ * Enqueue an entity
+ */
+static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *_se)
+{
+ struct cacule_node *se = &(_se->cacule_node);
-+ struct cacule_node *iter, *next = NULL;
-+ u64 now = sched_clock();
-+ unsigned int score_se = calc_interactivity(now, se);
+
+ se->next = NULL;
+ se->prev = NULL;
+
+ if (likely(cfs_rq->head)) {
-+
-+ // start from tail
-+ iter = cfs_rq->tail;
-+
-+ // does se have higher IS than iter?
-+ while (iter && entity_before_cached(now, score_se, iter) == -1) {
-+ next = iter;
-+ iter = iter->prev;
-+ }
-+
-+ // se in tail position
-+ if (iter == cfs_rq->tail) {
-+ cfs_rq->tail->next = se;
-+ se->prev = cfs_rq->tail;
-+
-+ cfs_rq->tail = se;
-+ }
-+ // else if not head no tail, insert se after iter
-+ else if (iter) {
-+ se->next = next;
-+ se->prev = iter;
-+
-+ iter->next = se;
-+ next->prev = se;
-+ }
+ // insert se at head
-+ else {
-+ se->next = cfs_rq->head;
-+ cfs_rq->head->prev = se;
++ se->next = cfs_rq->head;
++ cfs_rq->head->prev = se;
++
++ // lastly reset the head
++ cfs_rq->head = se;
+
-+ // lastly reset the head
-+ cfs_rq->head = se;
-+ }
+ } else {
+ // if empty rq
+ cfs_rq->head = se;
@@ -535,12 +552,12 @@ index bbc78794224a..06e9701a3027 100644
/*
* Enqueue an entity into the rb-tree:
*/
-@@ -626,16 +805,29 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
-
+@@ -626,16 +821,29 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
+
return rb_entry(next, struct sched_entity, run_node);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
#ifdef CONFIG_SCHED_DEBUG
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
@@ -556,39 +573,39 @@ index bbc78794224a..06e9701a3027 100644
+ return se_of(cn);
+#else
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
-
+
if (!last)
return NULL;
-
+
return rb_entry(last, struct sched_entity, run_node);
+#endif /* CONFIG_CACULE_SCHED */
}
-
+
/**************************************************************
-@@ -720,6 +912,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -720,6 +928,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
return slice;
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* We calculate the vruntime slice of a to-be-inserted task.
*
-@@ -729,6 +922,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -729,6 +938,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return calc_delta_fair(sched_slice(cfs_rq, se), se);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
#include "pelt.h"
#ifdef CONFIG_SMP
-@@ -836,13 +1030,49 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
+@@ -836,14 +1046,46 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
}
#endif /* CONFIG_SMP */
-
+
+#ifdef CONFIG_CACULE_SCHED
+static void normalize_lifetime(u64 now, struct sched_entity *se)
+{
-+ struct cacule_node *cn;
++ struct cacule_node *cn = &se->cacule_node;
+ u64 max_life_ns, life_time;
+ s64 diff;
+
@@ -598,25 +615,21 @@ index bbc78794224a..06e9701a3027 100644
+ * Ex. for 30s, with left shift (20bits) == 31.457s
+ */
+ max_life_ns = ((u64) cacule_max_lifetime) << 20;
++ life_time = now - cn->cacule_start_time;
++ diff = life_time - max_life_ns;
+
-+ for_each_sched_entity(se) {
-+ cn = &se->cacule_node;
-+ life_time = now - cn->cacule_start_time;
-+ diff = life_time - max_life_ns;
++ if (diff > 0) {
++ // multiply life_time by 1024 for more precision
++ u64 old_hrrn_x = (life_time << 7) / ((cn->vruntime >> 3) | 1);
+
-+ if (unlikely(diff > 0)) {
-+ // multiply life_time by 8 for more precision
-+ u64 old_hrrn_x8 = life_time / ((cn->vruntime >> 3) | 1);
++ // reset life to half max_life (i.e ~15s)
++ cn->cacule_start_time = now - (max_life_ns >> 1);
+
-+ // reset life to half max_life (i.e ~15s)
-+ cn->cacule_start_time = now - (max_life_ns >> 1);
++ // avoid division by zero
++ if (old_hrrn_x == 0) old_hrrn_x = 1;
+
-+ // avoid division by zero
-+ if (old_hrrn_x8 == 0) old_hrrn_x8 = 1;
-+
-+ // reset vruntime based on old hrrn ratio
-+ cn->vruntime = (max_life_ns << 2) / old_hrrn_x8;
-+ }
++ // reset vruntime based on old hrrn ratio
++ cn->vruntime = (max_life_ns << 9) / old_hrrn_x;
+ }
+}
+#endif /* CONFIG_CACULE_SCHED */
@@ -628,76 +641,71 @@ index bbc78794224a..06e9701a3027 100644
{
struct sched_entity *curr = cfs_rq->curr;
- u64 now = rq_clock_task(rq_of(cfs_rq));
+- u64 delta_exec;
+ u64 now = sched_clock();
- u64 delta_exec;
-
++ u64 delta_exec, delta_fair;
+
if (unlikely(!curr))
-@@ -860,13 +1090,22 @@ static void update_curr(struct cfs_rq *cfs_rq)
+ return;
+@@ -860,8 +1102,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
-
+
+#ifdef CONFIG_CACULE_SCHED
-+ curr->cacule_node.vruntime += calc_delta_fair(delta_exec, curr);
++ delta_fair = calc_delta_fair(delta_exec, curr);
++ curr->vruntime += delta_fair;
++ curr->cacule_node.vruntime += delta_fair;
+ normalize_lifetime(now, curr);
+#else
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_min_vruntime(cfs_rq);
+#endif
-
+
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
-
-+#ifdef CONFIG_CACULE_SCHED
-+ trace_sched_stat_runtime(curtask, delta_exec, curr->cacule_node.vruntime);
-+#else
- trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
-+#endif
- cgroup_account_cputime(curtask, delta_exec);
- account_group_exec_runtime(curtask, delta_exec);
- }
-@@ -1029,7 +1268,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -1029,7 +1278,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
-
if (!schedstat_enabled())
return;
-
-@@ -1061,7 +1299,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+
+@@ -1061,7 +1309,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
/*
* We are starting a new run period:
*/
- se->exec_start = rq_clock_task(rq_of(cfs_rq));
+ se->exec_start = sched_clock();
}
-
+
/**************************************************
-@@ -4115,7 +4353,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
-
+@@ -4115,7 +4363,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
+
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
-#ifdef CONFIG_SCHED_DEBUG
+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_CACULE_SCHED)
s64 d = se->vruntime - cfs_rq->min_vruntime;
-
+
if (d < 0)
-@@ -4126,6 +4364,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4126,6 +4374,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
-@@ -4157,6 +4396,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+@@ -4157,6 +4406,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
/* ensure we never gain time by being placed backwards. */
se->vruntime = max_vruntime(se->vruntime, vruntime);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
-
-@@ -4215,18 +4455,23 @@ static inline bool cfs_bandwidth_used(void);
+
+@@ -4215,18 +4465,23 @@ static inline bool cfs_bandwidth_used(void);
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
@@ -705,7 +713,7 @@ index bbc78794224a..06e9701a3027 100644
bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
+#endif
bool curr = cfs_rq->curr == se;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* If we're the current task, we must renormalise before calling
@@ -714,126 +722,141 @@ index bbc78794224a..06e9701a3027 100644
if (renorm && curr)
se->vruntime += cfs_rq->min_vruntime;
+#endif
-
+
update_curr(cfs_rq);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Otherwise, renormalise after, such that we're placed at the current
* moment in time, instead of some random moment in the past. Being
-@@ -4235,6 +4480,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4235,6 +4490,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (renorm && !curr)
se->vruntime += cfs_rq->min_vruntime;
+#endif
-
+
/*
* When enqueuing a sched_entity, we must:
-@@ -4249,8 +4495,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4249,8 +4505,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_cfs_group(se);
account_entity_enqueue(cfs_rq, se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (flags & ENQUEUE_WAKEUP)
place_entity(cfs_rq, se, 0);
+#endif
-
+
check_schedstat_required();
update_stats_enqueue(cfs_rq, se, flags);
-@@ -4271,6 +4519,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4271,6 +4529,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
check_enqueue_throttle(cfs_rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void __clear_buddies_last(struct sched_entity *se)
{
for_each_sched_entity(se) {
-@@ -4315,6 +4564,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4315,6 +4574,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (cfs_rq->skip == se)
__clear_buddies_skip(se);
}
+#endif /* !CONFIG_CACULE_SCHED */
-
+
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
-@@ -4339,13 +4589,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
-
+
+@@ -4339,13 +4599,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+
update_stats_dequeue(cfs_rq, se, flags);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
clear_buddies(cfs_rq, se);
+#endif
-
+
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->on_rq = 0;
account_entity_dequeue(cfs_rq, se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Normalize after update_curr(); which will also have moved
* min_vruntime if @se is the one holding it back. But before doing
-@@ -4354,12 +4607,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4354,12 +4617,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (!(flags & DEQUEUE_SLEEP))
se->vruntime -= cfs_rq->min_vruntime;
+#endif
-
+
/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);
-
+
update_cfs_group(se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Now advance min_vruntime if @se was the entity holding it back,
* except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
-@@ -4368,8 +4623,23 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4368,8 +4633,24 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
+#endif
}
-
+
+#ifdef CONFIG_CACULE_SCHED
++static struct sched_entity *
++pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr);
++
+/*
+ * Preempt the current task with a newly woken task if needed:
+ */
+static void
+check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+{
-+ u64 now = sched_clock();
-+
+ // does head have higher IS than curr
-+ if (entity_before(now, &curr->cacule_node, cfs_rq->head) == 1)
++ if (pick_next_entity(cfs_rq, curr) != curr)
+ resched_curr(rq_of(cfs_rq));
+}
+#else
/*
* Preempt the current task with a newly woken task if needed:
*/
-@@ -4409,6 +4679,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4409,6 +4690,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
if (delta > ideal_runtime)
resched_curr(rq_of(cfs_rq));
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
-@@ -4443,6 +4714,21 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4443,6 +4725,35 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
-
+
+#ifdef CONFIG_CACULE_SCHED
+static struct sched_entity *
+pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+{
-+ struct cacule_node *se = cfs_rq->head;
++ struct cacule_node *next, *se = cfs_rq->head;
++ u64 now = sched_clock();
++ unsigned int score_se;
+
+ if (unlikely(!se))
-+ se = &curr->cacule_node;
-+ else if (unlikely(curr
-+ && entity_before(sched_clock(), se, &curr->cacule_node) == 1))
++ return curr;
++
++ score_se = calc_interactivity(now, se);
++
++ next = se->next;
++ while (next) {
++ if (entity_before_cached(now, score_se, next) == 1) {
++ se = next;
++ score_se = calc_interactivity(now, se);
++ }
++
++ next = next->next;
++ }
++
++ if (unlikely(curr && entity_before_cached(now, score_se, &curr->cacule_node) == 1))
+ se = &curr->cacule_node;
+
+ return se_of(se);
@@ -841,26 +864,44 @@ index bbc78794224a..06e9701a3027 100644
+#else
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
-
-@@ -4503,6 +4789,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
-
+
+@@ -4503,6 +4814,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+
return se;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
-@@ -5605,7 +5892,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+
+@@ -5605,7 +5917,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
hrtick_update(rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void set_next_buddy(struct sched_entity *se);
+#endif
-
+
/*
* The dequeue_task method is called before nr_running is
-@@ -5637,12 +5926,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+@@ -5619,6 +5933,17 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ int task_sleep = flags & DEQUEUE_SLEEP;
+ int idle_h_nr_running = task_has_idle_policy(p);
+ bool was_sched_idle = sched_idle_rq(rq);
++#ifdef CONFIG_CACULE_SCHED
++ struct task_struct *parent = p->parent;
++
++ if (task_sleep && parent) {
++ if (parent->nr_forks_per_time)
++ parent->nr_forks_per_time--;
++
++ if (parent->is_fake_interactive)
++ parent->is_fake_interactive--;
++ }
++#endif
+
+ util_est_dequeue(&rq->cfs, p);
+
+@@ -5637,12 +5962,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq->load.weight) {
/* Avoid re-evaluating load for this entity: */
se = parent_entity(se);
@@ -875,32 +916,32 @@ index bbc78794224a..06e9701a3027 100644
break;
}
flags |= DEQUEUE_SLEEP;
-@@ -5758,6 +6049,7 @@ static unsigned long capacity_of(int cpu)
+@@ -5758,6 +6085,7 @@ static unsigned long capacity_of(int cpu)
return cpu_rq(cpu)->cpu_capacity;
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void record_wakee(struct task_struct *p)
{
/*
-@@ -5804,6 +6096,7 @@ static int wake_wide(struct task_struct *p)
+@@ -5804,6 +6132,7 @@ static int wake_wide(struct task_struct *p)
return 0;
return 1;
}
-+#endif
-
++#endif /* CONFIG_CACULE_SCHED */
+
/*
* The purpose of wake_affine() is to quickly determine on which CPU we can run
-@@ -6484,6 +6777,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
+@@ -6484,6 +6813,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
return min_t(unsigned long, util, capacity_orig_of(cpu));
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
* to @dst_cpu.
-@@ -6717,6 +7011,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
-
+@@ -6717,6 +7047,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+
return -1;
}
+#endif /* CONFIG_CACULE_SCHED */
@@ -954,23 +995,28 @@ index bbc78794224a..06e9701a3027 100644
+ return new_cpu;
+}
+#endif
-
+
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
-@@ -6741,6 +7086,21 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
+@@ -6741,6 +7122,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
/* SD_flags and WF_flags share the first nibble */
int sd_flag = wake_flags & 0xF;
-
+
+#ifdef CONFIG_CACULE_SCHED
+ struct sched_entity *se = &p->se;
-+ unsigned int autogroup_enabled = 0;
+
+ if (!is_interactive(&se->cacule_node))
+ goto cfs_way;
+
++ // check first if the prev cpu
++ // has 0 tasks
++ if (cpumask_test_cpu(prev_cpu, p->cpus_ptr) &&
++ cpu_rq(prev_cpu)->cfs.nr_running == 0)
++ return prev_cpu;
++
+ new_cpu = find_least_IS_cpu(p);
+
-+ if (likely(new_cpu != -1))
++ if (new_cpu != -1)
+ return new_cpu;
+
+ new_cpu = prev_cpu;
@@ -978,16 +1024,16 @@ index bbc78794224a..06e9701a3027 100644
+#else
if (wake_flags & WF_TTWU) {
record_wakee(p);
-
-@@ -6753,6 +7113,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
-
+
+@@ -6753,6 +7154,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
+
want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
rcu_read_lock();
for_each_domain(cpu, tmp) {
-@@ -6799,6 +7160,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
+@@ -6799,6 +7201,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
*/
static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
{
@@ -995,31 +1041,31 @@ index bbc78794224a..06e9701a3027 100644
/*
* As blocked tasks retain absolute vruntime the migration needs to
* deal with this by subtracting the old and adding the new
-@@ -6824,6 +7186,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
-
+@@ -6824,6 +7227,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
+
se->vruntime -= min_vruntime;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
if (p->on_rq == TASK_ON_RQ_MIGRATING) {
/*
-@@ -6869,6 +7232,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+@@ -6869,6 +7273,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
#endif /* CONFIG_SMP */
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static unsigned long wakeup_gran(struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;
-@@ -6947,6 +7311,7 @@ static void set_skip_buddy(struct sched_entity *se)
+@@ -6947,6 +7352,7 @@ static void set_skip_buddy(struct sched_entity *se)
for_each_sched_entity(se)
cfs_rq_of(se)->skip = se;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* Preempt the current task with a newly woken task if needed:
-@@ -6955,9 +7320,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6955,9 +7361,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
@@ -1029,23 +1075,23 @@ index bbc78794224a..06e9701a3027 100644
int scale = cfs_rq->nr_running >= sched_nr_latency;
int next_buddy_marked = 0;
+#endif /* CONFIG_CACULE_SCHED */
-
+
if (unlikely(se == pse))
return;
-@@ -6971,10 +7339,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6971,10 +7380,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
set_next_buddy(pse);
next_buddy_marked = 1;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* We can come here with TIF_NEED_RESCHED already set from new task
-@@ -7004,6 +7374,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7004,6 +7415,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
find_matching_se(&se, &pse);
update_curr(cfs_rq_of(se));
BUG_ON(!pse);
@@ -1057,14 +1103,14 @@ index bbc78794224a..06e9701a3027 100644
if (wakeup_preempt_entity(se, pse) == 1) {
/*
* Bias pick_next to pick the sched entity that is
-@@ -7013,11 +7388,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7013,11 +7429,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
set_next_buddy(pse);
goto preempt;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
return;
-
+
preempt:
resched_curr(rq);
+
@@ -1072,15 +1118,15 @@ index bbc78794224a..06e9701a3027 100644
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -7032,6 +7410,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
-
+@@ -7032,6 +7451,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
set_last_buddy(se);
+#endif /* CONFIG_CACULE_SCHED */
}
-
+
struct task_struct *
-@@ -7206,7 +7585,10 @@ static void yield_task_fair(struct rq *rq)
+@@ -7206,7 +7626,10 @@ static void yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
@@ -1088,60 +1134,60 @@ index bbc78794224a..06e9701a3027 100644
+#if !defined(CONFIG_CACULE_SCHED)
struct sched_entity *se = &curr->se;
+#endif
-
+
/*
* Are we the only task in the tree?
-@@ -7214,7 +7596,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7214,7 +7637,9 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(rq->nr_running == 1))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
clear_buddies(cfs_rq, se);
+#endif
-
+
if (curr->policy != SCHED_BATCH) {
update_rq_clock(rq);
-@@ -7230,7 +7614,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7230,7 +7655,9 @@ static void yield_task_fair(struct rq *rq)
rq_clock_skip_update(rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
set_skip_buddy(se);
+#endif
}
-
+
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
-@@ -7241,8 +7627,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
+@@ -7241,8 +7668,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
return false;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/* Tell the scheduler that we'd really like pse to run next. */
set_next_buddy(se);
+#endif
-
+
yield_task_fair(rq);
-
-@@ -7470,6 +7858,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+
+@@ -7470,6 +7899,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
if (env->sd->flags & SD_SHARE_CPUCAPACITY)
return 0;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Buddy candidates are cache hot:
*/
-@@ -7477,6 +7866,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+@@ -7477,6 +7907,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
(&p->se == cfs_rq_of(&p->se)->next ||
&p->se == cfs_rq_of(&p->se)->last))
return 1;
+#endif
-
+
if (sysctl_sched_migration_cost == -1)
return 1;
-@@ -10765,11 +11155,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
+@@ -10765,11 +11196,38 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
update_overutilized_status(task_rq(curr));
}
-
+
+#ifdef CONFIG_CACULE_SCHED
/*
* called on fork with the child task as argument from the parent's context
@@ -1154,6 +1200,8 @@ index bbc78794224a..06e9701a3027 100644
+ struct sched_entity *curr;
+ struct rq *rq = this_rq();
+ struct rq_flags rf;
++ struct task_struct *parent = p->parent;
++ u64 now = sched_clock();
+
+ rq_lock(rq, &rf);
+ update_rq_clock(rq);
@@ -1164,37 +1212,43 @@ index bbc78794224a..06e9701a3027 100644
+ update_curr(cfs_rq);
+
+ rq_unlock(rq, &rf);
++
++ parent->fork_start_win_stamp = now;
++ if (parent->nr_forks_per_time >= nr_fork_threshold)
++ parent->is_fake_interactive++;
++
++ parent->nr_forks_per_time++;
+}
+#else
static void task_fork_fair(struct task_struct *p)
{
struct cfs_rq *cfs_rq;
-@@ -10800,6 +11209,7 @@ static void task_fork_fair(struct task_struct *p)
+@@ -10800,6 +11258,7 @@ static void task_fork_fair(struct task_struct *p)
se->vruntime -= cfs_rq->min_vruntime;
rq_unlock(rq, &rf);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* Priority of the task has changed. Check to see if we preempt
-@@ -10912,6 +11322,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
+@@ -10912,6 +11371,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
static void detach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
+
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
+
if (!vruntime_normalized(p)) {
-@@ -10922,6 +11334,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10922,6 +11383,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
place_entity(cfs_rq, se, 0);
se->vruntime -= cfs_rq->min_vruntime;
}
+#endif
-
+
detach_entity_cfs_rq(se);
}
-@@ -10929,12 +11342,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10929,12 +11391,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
static void attach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
@@ -1202,17 +1256,17 @@ index bbc78794224a..06e9701a3027 100644
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
+#endif
-
+
attach_entity_cfs_rq(se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (!vruntime_normalized(p))
se->vruntime += cfs_rq->min_vruntime;
+#endif
}
-
+
static void switched_from_fair(struct rq *rq, struct task_struct *p)
-@@ -10990,13 +11408,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+@@ -10990,13 +11457,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
void init_cfs_rq(struct cfs_rq *cfs_rq)
{
cfs_rq->tasks_timeline = RB_ROOT_CACHED;
@@ -1233,7 +1287,7 @@ index bbc78794224a..06e9701a3027 100644
+ cfs_rq->tail = NULL;
+#endif
}
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 282a6bbaacd7..a3b7316dd537 100644
@@ -1241,7 +1295,7 @@ index 282a6bbaacd7..a3b7316dd537 100644
+++ b/kernel/sched/sched.h
@@ -516,10 +516,13 @@ struct cfs_rq {
unsigned int idle_h_nr_running; /* SCHED_IDLE */
-
+
u64 exec_clock;
+
+#if !defined(CONFIG_CACULE_SCHED)
@@ -1250,9 +1304,9 @@ index 282a6bbaacd7..a3b7316dd537 100644
u64 min_vruntime_copy;
#endif
+#endif /* CONFIG_CACULE_SCHED */
-
+
struct rb_root_cached tasks_timeline;
-
+
@@ -528,9 +531,15 @@ struct cfs_rq {
* It is set to NULL otherwise (i.e when none are currently running).
*/
@@ -1266,14 +1320,14 @@ index 282a6bbaacd7..a3b7316dd537 100644
struct sched_entity *last;
struct sched_entity *skip;
+#endif // CONFIG_CACULE_SCHED
-
+
#ifdef CONFIG_SCHED_DEBUG
unsigned int nr_spread_over;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 62fbd09b5dc1..a0bf55bbb3a7 100644
+index 62fbd09b5dc1..5f10c51fa7df 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
-@@ -1659,6 +1659,29 @@ static struct ctl_table kern_table[] = {
+@@ -1659,6 +1659,43 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
@@ -1281,7 +1335,7 @@ index 62fbd09b5dc1..a0bf55bbb3a7 100644
+ {
+ .procname = "sched_interactivity_factor",
+ .data = &interactivity_factor,
-+ .maxlen = sizeof(int),
++ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
@@ -1295,7 +1349,21 @@ index 62fbd09b5dc1..a0bf55bbb3a7 100644
+ {
+ .procname = "sched_max_lifetime_ms",
+ .data = &cacule_max_lifetime,
-+ .maxlen = sizeof(int),
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++ {
++ .procname = "sched_fake_interactive_decay_time_ms",
++ .data = &fake_interactive_decay_time,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++ {
++ .procname = "sched_nr_fork_threshold",
++ .data = &nr_fork_threshold,
++ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },