summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorptr13372021-05-13 20:44:35 +0200
committerptr13372021-05-13 20:44:35 +0200
commit62be3fe171e2fde69b82cfa3c6978d8e66de50e3 (patch)
tree94ad394b98c9ba997776be7fa79e971faaef93e8
parentba07b813841074e2dc926a5333aab7a3a493f8c8 (diff)
downloadaur-62be3fe171e2fde69b82cfa3c6978d8e66de50e3.tar.gz
cacule updated, 5.10.36
-rw-r--r--.SRCINFO12
-rw-r--r--PKGBUILD12
-rw-r--r--cacule-32bit-converter.patch6
-rw-r--r--cacule-5.10.patch254
4 files changed, 80 insertions, 204 deletions
diff --git a/.SRCINFO b/.SRCINFO
index d2dc33a017ec..3af69c24584c 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,7 +1,7 @@
pkgbase = linux-raspberrypi4-cacule
pkgdesc = Raspberry Pi 4 lts Kernel with the cacule scheduler, aarch64 and armv7
- pkgver = 5.10.35
- pkgrel = 3
+ pkgver = 5.10.36
+ pkgrel = 1
url = http://www.kernel.org/
arch = armv7h
arch = aarch64
@@ -13,26 +13,26 @@ pkgbase = linux-raspberrypi4-cacule
makedepends = bc
makedepends = git
options = !strip
- source = https://github.com/raspberrypi/linux/archive/53a5ac4935c500d32bfc465551cc5107e091c09c.tar.gz
+ source = https://github.com/raspberrypi/linux/archive/25d90363263a24fbfc7e74cbd2950ff08d47e108.tar.gz
source = cmdline.txt
source = linux.preset
source = 60-linux.hook
source = 90-linux.hook
source = 0001-Make-proc-cpuinfo-consistent-on-arm64-and-arm.patch
source = cacule-5.10.patch
- md5sums = 86644ce5432d4e6f92322ad1c2ae0628
+ md5sums = 5cc1c06b917f1326427cab7f607523db
md5sums = 31c02f4518d46deb5f0c2ad1f8b083cd
md5sums = 86d4a35722b5410e3b29fc92dae15d4b
md5sums = ce6c81ad1ad1f8b333fd6077d47abdaf
md5sums = 441ec084c47cddc53e592fb0cbce4edf
md5sums = f66a7ea3feb708d398ef57e4da4815e9
- md5sums = 2b524cf468b920f6fb87204b6901de05
+ md5sums = bdeecd1cb24baab80b8487b8ab6f7246
source_armv7h = config
source_armv7h = config.txt
source_armv7h = cacule-32bit-converter.patch
md5sums_armv7h = 869c9f4314d92c14a45128c8af56b663
md5sums_armv7h = 9669d916a5929a2eedbd64477f83d99e
- md5sums_armv7h = ff19fef26cbb90c2c41a3868a4ef458c
+ md5sums_armv7h = 9359279d43630751e06121347a64deae
source_aarch64 = config8
source_aarch64 = config8.txt
md5sums_aarch64 = 42bd8ecdb82ed46972e8d6c184cb0f45
diff --git a/PKGBUILD b/PKGBUILD
index 37cb6d7ebbfd..869205aafda3 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -7,12 +7,12 @@
buildarch=12
pkgbase=linux-raspberrypi4-cacule
-_commit=53a5ac4935c500d32bfc465551cc5107e091c09c
+_commit=25d90363263a24fbfc7e74cbd2950ff08d47e108
_srcname=linux-${_commit}
_kernelname=${pkgbase#linux}
_desc="Raspberry Pi 4 with the cacule scheduler"
-pkgver=5.10.35
-pkgrel=3
+pkgver=5.10.36
+pkgrel=1
pkgdesc="Raspberry Pi 4 lts Kernel with the cacule scheduler, aarch64 and armv7"
arch=('armv7h' 'aarch64')
url="http://www.kernel.org/"
@@ -29,16 +29,16 @@ source=("https://github.com/raspberrypi/linux/archive/${_commit}.tar.gz"
)
source_armv7h=('config' 'config.txt' 'cacule-32bit-converter.patch')
source_aarch64=('config8' 'config8.txt')
-md5sums=('86644ce5432d4e6f92322ad1c2ae0628'
+md5sums=('5cc1c06b917f1326427cab7f607523db'
'31c02f4518d46deb5f0c2ad1f8b083cd'
'86d4a35722b5410e3b29fc92dae15d4b'
'ce6c81ad1ad1f8b333fd6077d47abdaf'
'441ec084c47cddc53e592fb0cbce4edf'
'f66a7ea3feb708d398ef57e4da4815e9'
- '2b524cf468b920f6fb87204b6901de05')
+ 'bdeecd1cb24baab80b8487b8ab6f7246')
md5sums_armv7h=('869c9f4314d92c14a45128c8af56b663'
'9669d916a5929a2eedbd64477f83d99e'
- 'ff19fef26cbb90c2c41a3868a4ef458c')
+ '9359279d43630751e06121347a64deae')
md5sums_aarch64=('42bd8ecdb82ed46972e8d6c184cb0f45'
'9669d916a5929a2eedbd64477f83d99e')
diff --git a/cacule-32bit-converter.patch b/cacule-32bit-converter.patch
index ac277f113762..66709f3810ee 100644
--- a/cacule-32bit-converter.patch
+++ b/cacule-32bit-converter.patch
@@ -23,7 +23,7 @@ index c99fc326ec24..71c27133c53c 100644
@@ -602,6 +604,7 @@ calc_interactivity(u64 now, struct cacule_node *se)
{
u64 l_se, vr_se, sleep_se = 1ULL, u64_factor_m, _2m;
- unsigned int score_se, fake_interactivity;
+ unsigned int score_se;
+ u32 r_se_rem;
/*
@@ -48,8 +48,8 @@ index c99fc326ec24..71c27133c53c 100644
+ ), &r_se_rem)
+ );
- fake_interactivity = is_fake_interactive(se);
- if (fake_interactivity)
+ return score_se;
+ }
@@ -1041,6 +1054,7 @@ static void normalize_lifetime(u64 now, struct sched_entity *se)
struct cacule_node *cn;
u64 max_life_ns, life_time;
diff --git a/cacule-5.10.patch b/cacule-5.10.patch
index 4ca76155284c..ba2f9ba8c72e 100644
--- a/cacule-5.10.patch
+++ b/cacule-5.10.patch
@@ -96,7 +96,7 @@ index 000000000000..82b0847c468a
+ idle timer scheduler in order to avoid to get into priority
+ inversion problems which would deadlock the machine.
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 76cd21fa5501..75dd669e5e8b 100644
+index 76cd21fa5501..0abad9f1247a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -448,10 +448,22 @@ struct sched_statistics {
@@ -122,24 +122,11 @@ index 76cd21fa5501..75dd669e5e8b 100644
struct list_head group_node;
unsigned int on_rq;
-@@ -858,6 +870,12 @@ struct task_struct {
- struct list_head sibling;
- struct task_struct *group_leader;
-
-+#ifdef CONFIG_CACULE_SCHED
-+ u64 fork_start_win_stamp;
-+ unsigned int nr_forks_per_time;
-+ int is_fake_interactive;
-+#endif
-+
- /*
- * 'ptraced' is the list of tasks this task is using ptrace() on.
- *
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
-index 3c31ba88aca5..20c85c808485 100644
+index 3c31ba88aca5..4cf162341ab8 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
-@@ -31,6 +31,14 @@ extern unsigned int sysctl_sched_min_granularity;
+@@ -31,6 +31,12 @@ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
@@ -147,8 +134,6 @@ index 3c31ba88aca5..20c85c808485 100644
+extern unsigned int interactivity_factor;
+extern unsigned int interactivity_threshold;
+extern unsigned int cacule_max_lifetime;
-+extern unsigned int fake_interactive_decay_time;
-+extern unsigned int nr_fork_threshold;
+#endif
+
enum sched_tunable_scaling {
@@ -206,48 +191,23 @@ index 38ef6d06888e..c8cf984c294e 100644
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
-diff --git a/kernel/exit.c b/kernel/exit.c
-index d13d67fc5f4e..cdd5d05a8af6 100644
---- a/kernel/exit.c
-+++ b/kernel/exit.c
-@@ -653,6 +653,17 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
- write_lock_irq(&tasklist_lock);
- forget_original_parent(tsk, &dead);
-
-+#ifdef CONFIG_CACULE_SCHED
-+ p = tsk->parent;
-+ if (p) {
-+ if (p->nr_forks_per_time)
-+ p->nr_forks_per_time--;
-+
-+ if (p->is_fake_interactive)
-+ p->is_fake_interactive--;
-+ }
-+#endif
-+
- if (group_dead)
- kill_orphaned_pgrp(tsk->group_leader, NULL);
-
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 3a150445e0cb..ede4e99ba7ab 100644
+index 3a150445e0cb..4abd45eebdb5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3068,6 +3068,14 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
+@@ -3068,6 +3068,11 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
+
+#ifdef CONFIG_CACULE_SCHED
+ p->se.cacule_node.vruntime = 0;
-+ p->fork_start_win_stamp = 0;
-+ p->nr_forks_per_time = 0;
-+ p->is_fake_interactive = 0;
+#endif
+
INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_FAIR_GROUP_SCHED
-@@ -3352,6 +3360,10 @@ void wake_up_new_task(struct task_struct *p)
+@@ -3352,6 +3357,10 @@ void wake_up_new_task(struct task_struct *p)
update_rq_clock(rq);
post_init_entity_util_avg(p);
@@ -258,7 +218,7 @@ index 3a150445e0cb..ede4e99ba7ab 100644
activate_task(rq, p, ENQUEUE_NOCLOCK);
trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK);
-@@ -7066,6 +7078,10 @@ void __init sched_init(void)
+@@ -7066,6 +7075,10 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
#endif
@@ -316,7 +276,7 @@ index 2357921580f9..3adc9ee2bcfc 100644
cfs_rq->nr_spread_over);
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 348605306027..a7a57e97b098 100644
+index 348605306027..a440f1619e3b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,10 @@
@@ -330,7 +290,7 @@ index 348605306027..a7a57e97b098 100644
*/
#include "sched.h"
-@@ -113,6 +117,13 @@ int __weak arch_asym_cpu_priority(int cpu)
+@@ -113,6 +117,11 @@ int __weak arch_asym_cpu_priority(int cpu)
*/
#define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
@@ -339,12 +299,10 @@ index 348605306027..a7a57e97b098 100644
+unsigned int __read_mostly cacule_max_lifetime = 22000; // in ms
+unsigned int __read_mostly interactivity_factor = 32768;
+unsigned int __read_mostly interactivity_threshold = 1000;
-+unsigned int __read_mostly fake_interactive_decay_time = 1000; // in ms
-+unsigned int __read_mostly nr_fork_threshold = 3;
#endif
#ifdef CONFIG_CFS_BANDWIDTH
-@@ -253,6 +264,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
+@@ -253,6 +262,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
const struct sched_class fair_sched_class;
@@ -359,7 +317,7 @@ index 348605306027..a7a57e97b098 100644
/**************************************************************
* CFS operations on generic schedulable entities:
*/
-@@ -512,7 +531,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
+@@ -512,7 +529,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
/**************************************************************
* Scheduling class tree data structure manipulation methods:
*/
@@ -368,53 +326,18 @@ index 348605306027..a7a57e97b098 100644
static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - max_vruntime);
-@@ -568,7 +587,183 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
+@@ -568,7 +585,141 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
}
+#endif /* CONFIG_CACULE_SCHED */
+
+#ifdef CONFIG_CACULE_SCHED
-+static inline unsigned int is_fake_interactive(struct cacule_node *cn)
-+{
-+ struct sched_entity *se = se_of(cn);
-+ struct task_struct *parent = NULL;
-+ struct cfs_rq *cfs_rq;
-+ u64 win_time = fake_interactive_decay_time * 1000000ULL;
-+ u64 now = sched_clock();
-+
-+ while (!parent) {
-+ if (entity_is_task(se)) {
-+ parent = task_of(se)->parent;
-+ break;
-+ }
-+
-+ cfs_rq = group_cfs_rq(se);
-+
-+ if (!cfs_rq->head && !cfs_rq->curr)
-+ return 0;
-+
-+ if (cfs_rq->head)
-+ se = se_of(cfs_rq->head);
-+ else if (cfs_rq->curr)
-+ se = cfs_rq->curr;
-+ }
-+
-+ if (parent->is_fake_interactive
-+ && (now - parent->fork_start_win_stamp > win_time))
-+ {
-+ parent->fork_start_win_stamp = now;
-+ parent->is_fake_interactive--;
-+ }
-+
-+ return parent->is_fake_interactive;
-+}
-+
+static unsigned int
+calc_interactivity(u64 now, struct cacule_node *se)
+{
+ u64 l_se, vr_se, sleep_se = 1ULL, u64_factor_m, _2m;
-+ unsigned int score_se, fake_interactivity;
++ unsigned int score_se;
+
+ /*
+ * in case of vruntime==0, logical OR with 1 would
@@ -434,10 +357,6 @@ index 348605306027..a7a57e97b098 100644
+ else
+ score_se = _2m - (u64_factor_m / (vr_se / sleep_se));
+
-+ fake_interactivity = is_fake_interactive(se);
-+ if (fake_interactivity)
-+ score_se += (_2m * fake_interactivity) + 1;
-+
+ return score_se;
+}
+
@@ -446,9 +365,6 @@ index 348605306027..a7a57e97b098 100644
+ if (se_of(cn)->vruntime == 0)
+ return 0;
+
-+ if (is_fake_interactive(cn))
-+ return 0;
-+
+ return calc_interactivity(sched_clock(), cn) < interactivity_threshold;
+}
+
@@ -552,7 +468,7 @@ index 348605306027..a7a57e97b098 100644
/*
* Enqueue an entity into the rb-tree:
*/
-@@ -626,16 +821,29 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
+@@ -626,16 +777,29 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
return rb_entry(next, struct sched_entity, run_node);
}
@@ -582,7 +498,7 @@ index 348605306027..a7a57e97b098 100644
}
/**************************************************************
-@@ -720,6 +928,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -720,6 +884,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
return slice;
}
@@ -590,7 +506,7 @@ index 348605306027..a7a57e97b098 100644
/*
* We calculate the vruntime slice of a to-be-inserted task.
*
-@@ -729,6 +938,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -729,6 +894,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return calc_delta_fair(sched_slice(cfs_rq, se), se);
}
@@ -598,7 +514,7 @@ index 348605306027..a7a57e97b098 100644
#include "pelt.h"
#ifdef CONFIG_SMP
-@@ -836,14 +1046,46 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
+@@ -836,14 +1002,46 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
}
#endif /* CONFIG_SMP */
@@ -647,7 +563,7 @@ index 348605306027..a7a57e97b098 100644
if (unlikely(!curr))
return;
-@@ -860,8 +1102,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
+@@ -860,8 +1058,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
@@ -663,7 +579,7 @@ index 348605306027..a7a57e97b098 100644
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
-@@ -1020,7 +1269,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -1020,7 +1225,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
@@ -671,7 +587,7 @@ index 348605306027..a7a57e97b098 100644
if (!schedstat_enabled())
return;
-@@ -1052,7 +1300,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -1052,7 +1256,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
/*
* We are starting a new run period:
*/
@@ -680,7 +596,7 @@ index 348605306027..a7a57e97b098 100644
}
/**************************************************
-@@ -4104,7 +4352,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
+@@ -4104,7 +4308,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
@@ -689,7 +605,7 @@ index 348605306027..a7a57e97b098 100644
s64 d = se->vruntime - cfs_rq->min_vruntime;
if (d < 0)
-@@ -4115,6 +4363,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4115,6 +4319,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}
@@ -697,7 +613,7 @@ index 348605306027..a7a57e97b098 100644
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
-@@ -4146,6 +4395,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+@@ -4146,6 +4351,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
/* ensure we never gain time by being placed backwards. */
se->vruntime = max_vruntime(se->vruntime, vruntime);
}
@@ -705,7 +621,7 @@ index 348605306027..a7a57e97b098 100644
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
-@@ -4204,18 +4454,23 @@ static inline bool cfs_bandwidth_used(void);
+@@ -4204,18 +4410,23 @@ static inline bool cfs_bandwidth_used(void);
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
@@ -729,7 +645,7 @@ index 348605306027..a7a57e97b098 100644
/*
* Otherwise, renormalise after, such that we're placed at the current
* moment in time, instead of some random moment in the past. Being
-@@ -4224,6 +4479,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4224,6 +4435,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (renorm && !curr)
se->vruntime += cfs_rq->min_vruntime;
@@ -737,7 +653,7 @@ index 348605306027..a7a57e97b098 100644
/*
* When enqueuing a sched_entity, we must:
-@@ -4238,8 +4494,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4238,8 +4450,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_cfs_group(se);
account_entity_enqueue(cfs_rq, se);
@@ -748,7 +664,7 @@ index 348605306027..a7a57e97b098 100644
check_schedstat_required();
update_stats_enqueue(cfs_rq, se, flags);
-@@ -4260,6 +4518,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4260,6 +4474,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
check_enqueue_throttle(cfs_rq);
}
@@ -756,7 +672,7 @@ index 348605306027..a7a57e97b098 100644
static void __clear_buddies_last(struct sched_entity *se)
{
for_each_sched_entity(se) {
-@@ -4304,6 +4563,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4304,6 +4519,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (cfs_rq->skip == se)
__clear_buddies_skip(se);
}
@@ -764,7 +680,7 @@ index 348605306027..a7a57e97b098 100644
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-@@ -4328,13 +4588,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4328,13 +4544,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_stats_dequeue(cfs_rq, se, flags);
@@ -781,7 +697,7 @@ index 348605306027..a7a57e97b098 100644
/*
* Normalize after update_curr(); which will also have moved
* min_vruntime if @se is the one holding it back. But before doing
-@@ -4343,12 +4606,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4343,12 +4562,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (!(flags & DEQUEUE_SLEEP))
se->vruntime -= cfs_rq->min_vruntime;
@@ -796,7 +712,7 @@ index 348605306027..a7a57e97b098 100644
/*
* Now advance min_vruntime if @se was the entity holding it back,
* except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
-@@ -4357,8 +4622,24 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4357,8 +4578,24 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
@@ -821,7 +737,7 @@ index 348605306027..a7a57e97b098 100644
/*
* Preempt the current task with a newly woken task if needed:
*/
-@@ -4398,6 +4679,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4398,6 +4635,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
if (delta > ideal_runtime)
resched_curr(rq_of(cfs_rq));
}
@@ -829,7 +745,7 @@ index 348605306027..a7a57e97b098 100644
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
-@@ -4432,6 +4714,35 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4432,6 +4670,35 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
@@ -865,7 +781,7 @@ index 348605306027..a7a57e97b098 100644
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
-@@ -4492,6 +4803,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4492,6 +4759,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
return se;
}
@@ -873,7 +789,7 @@ index 348605306027..a7a57e97b098 100644
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-@@ -5585,7 +5897,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+@@ -5585,7 +5853,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
hrtick_update(rq);
}
@@ -883,25 +799,7 @@ index 348605306027..a7a57e97b098 100644
/*
* The dequeue_task method is called before nr_running is
-@@ -5599,6 +5913,17 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- int task_sleep = flags & DEQUEUE_SLEEP;
- int idle_h_nr_running = task_has_idle_policy(p);
- bool was_sched_idle = sched_idle_rq(rq);
-+#ifdef CONFIG_CACULE_SCHED
-+ struct task_struct *parent = p->parent;
-+
-+ if (task_sleep && parent) {
-+ if (parent->nr_forks_per_time)
-+ parent->nr_forks_per_time--;
-+
-+ if (parent->is_fake_interactive)
-+ parent->is_fake_interactive--;
-+ }
-+#endif
-
- util_est_dequeue(&rq->cfs, p);
-
-@@ -5617,12 +5942,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+@@ -5617,12 +5887,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq->load.weight) {
/* Avoid re-evaluating load for this entity: */
se = parent_entity(se);
@@ -916,7 +814,7 @@ index 348605306027..a7a57e97b098 100644
break;
}
flags |= DEQUEUE_SLEEP;
-@@ -5738,6 +6065,7 @@ static unsigned long capacity_of(int cpu)
+@@ -5738,6 +6010,7 @@ static unsigned long capacity_of(int cpu)
return cpu_rq(cpu)->cpu_capacity;
}
@@ -924,7 +822,7 @@ index 348605306027..a7a57e97b098 100644
static void record_wakee(struct task_struct *p)
{
/*
-@@ -5784,6 +6112,7 @@ static int wake_wide(struct task_struct *p)
+@@ -5784,6 +6057,7 @@ static int wake_wide(struct task_struct *p)
return 0;
return 1;
}
@@ -932,7 +830,7 @@ index 348605306027..a7a57e97b098 100644
/*
* The purpose of wake_affine() is to quickly determine on which CPU we can run
-@@ -6460,6 +6789,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
+@@ -6460,6 +6734,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
return min_t(unsigned long, util, capacity_orig_of(cpu));
}
@@ -940,7 +838,7 @@ index 348605306027..a7a57e97b098 100644
/*
* Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
* to @dst_cpu.
-@@ -6693,6 +7023,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+@@ -6693,6 +6968,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
return -1;
}
@@ -998,7 +896,7 @@ index 348605306027..a7a57e97b098 100644
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
-@@ -6715,6 +7096,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
+@@ -6715,6 +7041,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
int want_affine = 0;
int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
@@ -1025,7 +923,7 @@ index 348605306027..a7a57e97b098 100644
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
-@@ -6727,6 +7128,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
+@@ -6727,6 +7073,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
}
@@ -1033,7 +931,7 @@ index 348605306027..a7a57e97b098 100644
rcu_read_lock();
for_each_domain(cpu, tmp) {
-@@ -6774,6 +7176,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
+@@ -6774,6 +7121,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
*/
static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
{
@@ -1041,7 +939,7 @@ index 348605306027..a7a57e97b098 100644
/*
* As blocked tasks retain absolute vruntime the migration needs to
* deal with this by subtracting the old and adding the new
-@@ -6799,6 +7202,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
+@@ -6799,6 +7147,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
se->vruntime -= min_vruntime;
}
@@ -1049,7 +947,7 @@ index 348605306027..a7a57e97b098 100644
if (p->on_rq == TASK_ON_RQ_MIGRATING) {
/*
-@@ -6844,6 +7248,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+@@ -6844,6 +7193,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
#endif /* CONFIG_SMP */
@@ -1057,7 +955,7 @@ index 348605306027..a7a57e97b098 100644
static unsigned long wakeup_gran(struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;
-@@ -6922,6 +7327,7 @@ static void set_skip_buddy(struct sched_entity *se)
+@@ -6922,6 +7272,7 @@ static void set_skip_buddy(struct sched_entity *se)
for_each_sched_entity(se)
cfs_rq_of(se)->skip = se;
}
@@ -1065,7 +963,7 @@ index 348605306027..a7a57e97b098 100644
/*
* Preempt the current task with a newly woken task if needed:
-@@ -6930,9 +7336,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6930,9 +7281,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
@@ -1078,7 +976,7 @@ index 348605306027..a7a57e97b098 100644
if (unlikely(se == pse))
return;
-@@ -6946,10 +7355,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6946,10 +7300,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
return;
@@ -1091,7 +989,7 @@ index 348605306027..a7a57e97b098 100644
/*
* We can come here with TIF_NEED_RESCHED already set from new task
-@@ -6979,6 +7390,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6979,6 +7335,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
find_matching_se(&se, &pse);
update_curr(cfs_rq_of(se));
BUG_ON(!pse);
@@ -1103,7 +1001,7 @@ index 348605306027..a7a57e97b098 100644
if (wakeup_preempt_entity(se, pse) == 1) {
/*
* Bias pick_next to pick the sched entity that is
-@@ -6988,11 +7404,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6988,11 +7349,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
set_next_buddy(pse);
goto preempt;
}
@@ -1118,7 +1016,7 @@ index 348605306027..a7a57e97b098 100644
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -7007,6 +7426,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7007,6 +7371,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
set_last_buddy(se);
@@ -1126,7 +1024,7 @@ index 348605306027..a7a57e97b098 100644
}
struct task_struct *
-@@ -7181,7 +7601,10 @@ static void yield_task_fair(struct rq *rq)
+@@ -7181,7 +7546,10 @@ static void yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
@@ -1137,7 +1035,7 @@ index 348605306027..a7a57e97b098 100644
/*
* Are we the only task in the tree?
-@@ -7189,7 +7612,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7189,7 +7557,9 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(rq->nr_running == 1))
return;
@@ -1147,7 +1045,7 @@ index 348605306027..a7a57e97b098 100644
if (curr->policy != SCHED_BATCH) {
update_rq_clock(rq);
-@@ -7205,7 +7630,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7205,7 +7575,9 @@ static void yield_task_fair(struct rq *rq)
rq_clock_skip_update(rq);
}
@@ -1157,7 +1055,7 @@ index 348605306027..a7a57e97b098 100644
}
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
-@@ -7216,8 +7643,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
+@@ -7216,8 +7588,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
return false;
@@ -1168,7 +1066,7 @@ index 348605306027..a7a57e97b098 100644
yield_task_fair(rq);
-@@ -7445,6 +7874,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+@@ -7445,6 +7819,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
if (env->sd->flags & SD_SHARE_CPUCAPACITY)
return 0;
@@ -1176,7 +1074,7 @@ index 348605306027..a7a57e97b098 100644
/*
* Buddy candidates are cache hot:
*/
-@@ -7452,6 +7882,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+@@ -7452,6 +7827,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
(&p->se == cfs_rq_of(&p->se)->next ||
&p->se == cfs_rq_of(&p->se)->last))
return 1;
@@ -1184,7 +1082,7 @@ index 348605306027..a7a57e97b098 100644
if (sysctl_sched_migration_cost == -1)
return 1;
-@@ -10720,11 +11151,38 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
+@@ -10720,11 +11096,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
update_overutilized_status(task_rq(curr));
}
@@ -1200,8 +1098,6 @@ index 348605306027..a7a57e97b098 100644
+ struct sched_entity *curr;
+ struct rq *rq = this_rq();
+ struct rq_flags rf;
-+ struct task_struct *parent = p->parent;
-+ u64 now = sched_clock();
+
+ rq_lock(rq, &rf);
+ update_rq_clock(rq);
@@ -1212,18 +1108,12 @@ index 348605306027..a7a57e97b098 100644
+ update_curr(cfs_rq);
+
+ rq_unlock(rq, &rf);
-+
-+ parent->fork_start_win_stamp = now;
-+ if (parent->nr_forks_per_time >= nr_fork_threshold)
-+ parent->is_fake_interactive++;
-+
-+ parent->nr_forks_per_time++;
+}
+#else
static void task_fork_fair(struct task_struct *p)
{
struct cfs_rq *cfs_rq;
-@@ -10755,6 +11213,7 @@ static void task_fork_fair(struct task_struct *p)
+@@ -10755,6 +11150,7 @@ static void task_fork_fair(struct task_struct *p)
se->vruntime -= cfs_rq->min_vruntime;
rq_unlock(rq, &rf);
}
@@ -1231,7 +1121,7 @@ index 348605306027..a7a57e97b098 100644
/*
* Priority of the task has changed. Check to see if we preempt
-@@ -10867,6 +11326,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
+@@ -10867,6 +11263,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
static void detach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
@@ -1240,7 +1130,7 @@ index 348605306027..a7a57e97b098 100644
struct cfs_rq *cfs_rq = cfs_rq_of(se);
if (!vruntime_normalized(p)) {
-@@ -10877,6 +11338,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10877,6 +11275,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
place_entity(cfs_rq, se, 0);
se->vruntime -= cfs_rq->min_vruntime;
}
@@ -1248,7 +1138,7 @@ index 348605306027..a7a57e97b098 100644
detach_entity_cfs_rq(se);
}
-@@ -10884,12 +11346,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10884,12 +11283,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
static void attach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
@@ -1266,7 +1156,7 @@ index 348605306027..a7a57e97b098 100644
}
static void switched_from_fair(struct rq *rq, struct task_struct *p)
-@@ -10945,13 +11412,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+@@ -10945,13 +11349,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
void init_cfs_rq(struct cfs_rq *cfs_rq)
{
cfs_rq->tasks_timeline = RB_ROOT_CACHED;
@@ -1324,10 +1214,10 @@ index fac1b121d113..7d9d59cee2d2 100644
#ifdef CONFIG_SCHED_DEBUG
unsigned int nr_spread_over;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index b9306d2bb426..1ccdf97188bd 100644
+index b9306d2bb426..2103ee43de9e 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
-@@ -1659,6 +1659,43 @@ static struct ctl_table kern_table[] = {
+@@ -1659,6 +1659,29 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
@@ -1353,20 +1243,6 @@ index b9306d2bb426..1ccdf97188bd 100644
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
-+ {
-+ .procname = "sched_fake_interactive_decay_time_ms",
-+ .data = &fake_interactive_decay_time,
-+ .maxlen = sizeof(unsigned int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec,
-+ },
-+ {
-+ .procname = "sched_nr_fork_threshold",
-+ .data = &nr_fork_threshold,
-+ .maxlen = sizeof(unsigned int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec,
-+ },
+#endif
#ifdef CONFIG_SCHED_DEBUG
{