summarylogtreecommitdiffstats
path: root/cacule-5.10.patch
diff options
context:
space:
mode:
Diffstat (limited to 'cacule-5.10.patch')
-rw-r--r--cacule-5.10.patch587
1 files changed, 355 insertions, 232 deletions
diff --git a/cacule-5.10.patch b/cacule-5.10.patch
index 5867f49b2258..4ca76155284c 100644
--- a/cacule-5.10.patch
+++ b/cacule-5.10.patch
@@ -5,12 +5,12 @@ index d4b32cc32bb7..2788c5bbd870 100644
@@ -1087,6 +1087,10 @@ Model available). If your platform happens to meet the
requirements for EAS but you do not want to use it, change
this value to 0.
-
+
+sched_interactivity_factor (CacULE scheduler only)
+==================================================
+Sets the value *m* for interactivity score calculations. See
+Figure 1 in https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
-
+
sched_schedstats
================
diff --git a/Documentation/scheduler/sched-CacULE.rst b/Documentation/scheduler/sched-CacULE.rst
@@ -96,13 +96,13 @@ index 000000000000..82b0847c468a
+ idle timer scheduler in order to avoid to get into priority
+ inversion problems which would deadlock the machine.
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 76cd21fa5501..0abad9f1247a 100644
+index 76cd21fa5501..75dd669e5e8b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -448,10 +448,22 @@ struct sched_statistics {
#endif
};
-
+
+#ifdef CONFIG_CACULE_SCHED
+struct cacule_node {
+ struct cacule_node* next;
@@ -121,19 +121,34 @@ index 76cd21fa5501..0abad9f1247a 100644
+#endif
struct list_head group_node;
unsigned int on_rq;
-
+
+@@ -858,6 +870,12 @@ struct task_struct {
+ struct list_head sibling;
+ struct task_struct *group_leader;
+
++#ifdef CONFIG_CACULE_SCHED
++ u64 fork_start_win_stamp;
++ unsigned int nr_forks_per_time;
++ int is_fake_interactive;
++#endif
++
+ /*
+ * 'ptraced' is the list of tasks this task is using ptrace() on.
+ *
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
-index 3c31ba88aca5..cb819c3d86f3 100644
+index 3c31ba88aca5..20c85c808485 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
-@@ -31,6 +31,12 @@ extern unsigned int sysctl_sched_min_granularity;
+@@ -31,6 +31,14 @@ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
-
+
+#ifdef CONFIG_CACULE_SCHED
-+extern int interactivity_factor;
++extern unsigned int interactivity_factor;
+extern unsigned int interactivity_threshold;
-+extern int cacule_max_lifetime;
++extern unsigned int cacule_max_lifetime;
++extern unsigned int fake_interactive_decay_time;
++extern unsigned int nr_fork_threshold;
+#endif
+
enum sched_tunable_scaling {
@@ -144,9 +159,9 @@ index fc4c9f416fad..16676cfd11d7 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -825,6 +825,17 @@ config UCLAMP_BUCKETS_COUNT
-
+
endmenu
-
+
+config CACULE_SCHED
+ bool "CacULE CPU scheduler"
+ default y
@@ -176,43 +191,66 @@ index 38ef6d06888e..c8cf984c294e 100644
@@ -46,6 +46,9 @@ choice
1000 Hz is the preferred choice for desktop systems and other
systems requiring fast interactive responses to events.
-
+
+ config HZ_2000
+ bool "2000 HZ"
+
endchoice
-
+
config HZ
@@ -54,6 +57,7 @@ config HZ
default 250 if HZ_250
default 300 if HZ_300
default 1000 if HZ_1000
+ default 2000 if HZ_2000
-
+
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
+diff --git a/kernel/exit.c b/kernel/exit.c
+index d13d67fc5f4e..cdd5d05a8af6 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -653,6 +653,17 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
+ write_lock_irq(&tasklist_lock);
+ forget_original_parent(tsk, &dead);
+
++#ifdef CONFIG_CACULE_SCHED
++ p = tsk->parent;
++ if (p) {
++ if (p->nr_forks_per_time)
++ p->nr_forks_per_time--;
++
++ if (p->is_fake_interactive)
++ p->is_fake_interactive--;
++ }
++#endif
++
+ if (group_dead)
+ kill_orphaned_pgrp(tsk->group_leader, NULL);
+
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 3a150445e0cb..75f80beab9b7 100644
+index 3a150445e0cb..ede4e99ba7ab 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3067,7 +3067,13 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
- p->se.sum_exec_runtime = 0;
+@@ -3068,6 +3068,14 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
+ p->se.vruntime = 0;
+
+#ifdef CONFIG_CACULE_SCHED
+ p->se.cacule_node.vruntime = 0;
-+#else
- p->se.vruntime = 0;
++ p->fork_start_win_stamp = 0;
++ p->nr_forks_per_time = 0;
++ p->is_fake_interactive = 0;
+#endif
+
INIT_LIST_HEAD(&p->se.group_node);
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
-@@ -3352,6 +3358,10 @@ void wake_up_new_task(struct task_struct *p)
+@@ -3352,6 +3360,10 @@ void wake_up_new_task(struct task_struct *p)
update_rq_clock(rq);
post_init_entity_util_avg(p);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ p->se.cacule_node.cacule_start_time = sched_clock();
+#endif
@@ -220,22 +258,36 @@ index 3a150445e0cb..75f80beab9b7 100644
activate_task(rq, p, ENQUEUE_NOCLOCK);
trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK);
-@@ -7066,6 +7076,10 @@ void __init sched_init(void)
+@@ -7066,6 +7078,10 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
#endif
-
+
+#ifdef CONFIG_CACULE_SCHED
-+ printk(KERN_INFO "CacULE CPU scheduler v5.10 by Hamad Al Marri.");
++ printk(KERN_INFO "CacULE CPU scheduler v5.10-r2 by Hamad Al Marri.");
+#endif
+
wait_bit_init();
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index 2357921580f9..ac08a7ced508 100644
+index 2357921580f9..3adc9ee2bcfc 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
-@@ -557,21 +557,27 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+@@ -535,8 +535,11 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
+
+ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+ {
+- s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
+- spread, rq0_min_vruntime, spread0;
++ s64 MIN_vruntime = -1, max_vruntime = -1,
++#if !defined(CONFIG_CACULE_SCHED)
++ min_vruntime, rq0_min_vruntime, spread0,
++#endif
++ spread;
+ struct rq *rq = cpu_rq(cpu);
+ struct sched_entity *last;
+ unsigned long flags;
+@@ -557,21 +560,27 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
last = __pick_last_entity(cfs_rq);
if (last)
max_vruntime = last->vruntime;
@@ -264,7 +316,7 @@ index 2357921580f9..ac08a7ced508 100644
cfs_rq->nr_spread_over);
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 348605306027..e9681f0fb831 100644
+index 348605306027..a7a57e97b098 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,10 @@
@@ -277,23 +329,25 @@ index 348605306027..e9681f0fb831 100644
+ * (C) 2020 Hamad Al Marri <hamad.s.almarri@gmail.com>
*/
#include "sched.h"
-
-@@ -113,6 +117,11 @@ int __weak arch_asym_cpu_priority(int cpu)
+
+@@ -113,6 +117,13 @@ int __weak arch_asym_cpu_priority(int cpu)
*/
#define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
-
+
+#endif
+#ifdef CONFIG_CACULE_SCHED
-+int __read_mostly cacule_max_lifetime = 22000; // in ms
-+int __read_mostly interactivity_factor = 32768;
-+unsigned int __read_mostly interactivity_threshold = 20480;
++unsigned int __read_mostly cacule_max_lifetime = 22000; // in ms
++unsigned int __read_mostly interactivity_factor = 32768;
++unsigned int __read_mostly interactivity_threshold = 1000;
++unsigned int __read_mostly fake_interactive_decay_time = 1000; // in ms
++unsigned int __read_mostly nr_fork_threshold = 3;
#endif
-
+
#ifdef CONFIG_CFS_BANDWIDTH
-@@ -253,6 +262,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
-
+@@ -253,6 +264,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
+
const struct sched_class fair_sched_class;
-
+
+
+#ifdef CONFIG_CACULE_SCHED
+static inline struct sched_entity *se_of(struct cacule_node *cn)
@@ -305,7 +359,7 @@ index 348605306027..e9681f0fb831 100644
/**************************************************************
* CFS operations on generic schedulable entities:
*/
-@@ -512,7 +529,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
+@@ -512,7 +531,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
/**************************************************************
* Scheduling class tree data structure manipulation methods:
*/
@@ -314,35 +368,75 @@ index 348605306027..e9681f0fb831 100644
static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - max_vruntime);
-@@ -568,7 +585,169 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
+@@ -568,7 +587,183 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
}
+#endif /* CONFIG_CACULE_SCHED */
+
+#ifdef CONFIG_CACULE_SCHED
++static inline unsigned int is_fake_interactive(struct cacule_node *cn)
++{
++ struct sched_entity *se = se_of(cn);
++ struct task_struct *parent = NULL;
++ struct cfs_rq *cfs_rq;
++ u64 win_time = fake_interactive_decay_time * 1000000ULL;
++ u64 now = sched_clock();
++
++ while (!parent) {
++ if (entity_is_task(se)) {
++ parent = task_of(se)->parent;
++ break;
++ }
++
++ cfs_rq = group_cfs_rq(se);
++
++ if (!cfs_rq->head && !cfs_rq->curr)
++ return 0;
++
++ if (cfs_rq->head)
++ se = se_of(cfs_rq->head);
++ else if (cfs_rq->curr)
++ se = cfs_rq->curr;
++ }
++
++ if (parent->is_fake_interactive
++ && (now - parent->fork_start_win_stamp > win_time))
++ {
++ parent->fork_start_win_stamp = now;
++ parent->is_fake_interactive--;
++ }
++
++ return parent->is_fake_interactive;
++}
++
+static unsigned int
+calc_interactivity(u64 now, struct cacule_node *se)
+{
-+ u64 l_se, vr_se, sleep_se = 1ULL, u64_factor;
-+ unsigned int score_se;
++ u64 l_se, vr_se, sleep_se = 1ULL, u64_factor_m, _2m;
++ unsigned int score_se, fake_interactivity;
+
+ /*
+ * in case of vruntime==0, logical OR with 1 would
+ * make sure that the least sig. bit is 1
+ */
+ l_se = now - se->cacule_start_time;
-+ vr_se = se->vruntime | 1;
-+ u64_factor = interactivity_factor;
++ vr_se = se->vruntime | 1;
++ u64_factor_m = interactivity_factor;
++ _2m = u64_factor_m << 1;
+
+ /* safety check */
+ if (likely(l_se > vr_se))
+ sleep_se = (l_se - vr_se) | 1;
+
+ if (sleep_se >= vr_se)
-+ score_se = u64_factor / (sleep_se / vr_se);
++ score_se = u64_factor_m / (sleep_se / vr_se);
+ else
-+ score_se = (u64_factor << 1) - (u64_factor / (vr_se / sleep_se));
++ score_se = _2m - (u64_factor_m / (vr_se / sleep_se));
++
++ fake_interactivity = is_fake_interactive(se);
++ if (fake_interactivity)
++ score_se += (_2m * fake_interactivity) + 1;
+
+ return score_se;
+}
@@ -352,6 +446,9 @@ index 348605306027..e9681f0fb831 100644
+ if (se_of(cn)->vruntime == 0)
+ return 0;
+
++ if (is_fake_interactive(cn))
++ return 0;
++
+ return calc_interactivity(sched_clock(), cn) < interactivity_threshold;
+}
+
@@ -369,7 +466,7 @@ index 348605306027..e9681f0fb831 100644
+
+ return -1;
+}
-+
+
+/*
+ * Does se have lower interactivity score value (i.e. interactive) than curr? If yes, return 1,
+ * otherwise return -1
@@ -399,47 +496,18 @@ index 348605306027..e9681f0fb831 100644
+static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *_se)
+{
+ struct cacule_node *se = &(_se->cacule_node);
-+ struct cacule_node *iter, *next = NULL;
-+ u64 now = sched_clock();
-+ unsigned int score_se = calc_interactivity(now, se);
+
+ se->next = NULL;
+ se->prev = NULL;
+
+ if (likely(cfs_rq->head)) {
-+
-+ // start from tail
-+ iter = cfs_rq->tail;
-+
-+ // does se have higher IS than iter?
-+ while (iter && entity_before_cached(now, score_se, iter) == -1) {
-+ next = iter;
-+ iter = iter->prev;
-+ }
-+
-+ // se in tail position
-+ if (iter == cfs_rq->tail) {
-+ cfs_rq->tail->next = se;
-+ se->prev = cfs_rq->tail;
-+
-+ cfs_rq->tail = se;
-+ }
-+ // else if not head no tail, insert se after iter
-+ else if (iter) {
-+ se->next = next;
-+ se->prev = iter;
-+
-+ iter->next = se;
-+ next->prev = se;
-+ }
+ // insert se at head
-+ else {
-+ se->next = cfs_rq->head;
-+ cfs_rq->head->prev = se;
++ se->next = cfs_rq->head;
++ cfs_rq->head->prev = se;
++
++ // lastly reset the head
++ cfs_rq->head = se;
+
-+ // lastly reset the head
-+ cfs_rq->head = se;
-+ }
+ } else {
+ // if empty rq
+ cfs_rq->head = se;
@@ -470,7 +538,7 @@ index 348605306027..e9681f0fb831 100644
+ struct cacule_node *next = se->next;
+
+ prev->next = next;
-
++
+ if (next)
+ next->prev = prev;
+ }
@@ -484,12 +552,12 @@ index 348605306027..e9681f0fb831 100644
/*
* Enqueue an entity into the rb-tree:
*/
-@@ -626,16 +805,29 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
-
+@@ -626,16 +821,29 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
+
return rb_entry(next, struct sched_entity, run_node);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
#ifdef CONFIG_SCHED_DEBUG
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
@@ -505,35 +573,35 @@ index 348605306027..e9681f0fb831 100644
+ return se_of(cn);
+#else
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
-
+
if (!last)
return NULL;
-
+
return rb_entry(last, struct sched_entity, run_node);
+#endif /* CONFIG_CACULE_SCHED */
}
-
+
/**************************************************************
-@@ -720,6 +912,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -720,6 +928,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
return slice;
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* We calculate the vruntime slice of a to-be-inserted task.
*
-@@ -729,6 +922,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -729,6 +938,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return calc_delta_fair(sched_slice(cfs_rq, se), se);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
#include "pelt.h"
#ifdef CONFIG_SMP
-@@ -836,14 +1030,46 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
+@@ -836,14 +1046,46 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
}
#endif /* CONFIG_SMP */
-
+
+#ifdef CONFIG_CACULE_SCHED
+static void normalize_lifetime(u64 now, struct sched_entity *se)
+{
@@ -576,13 +644,13 @@ index 348605306027..e9681f0fb831 100644
- u64 delta_exec;
+ u64 now = sched_clock();
+ u64 delta_exec, delta_fair;
-
+
if (unlikely(!curr))
return;
-@@ -860,8 +1086,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
+@@ -860,8 +1102,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ delta_fair = calc_delta_fair(delta_exec, curr);
+ curr->vruntime += delta_fair;
@@ -592,52 +660,52 @@ index 348605306027..e9681f0fb831 100644
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_min_vruntime(cfs_rq);
+#endif
-
+
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
-@@ -1020,7 +1253,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -1020,7 +1269,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
-
if (!schedstat_enabled())
return;
-
-@@ -1052,7 +1284,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+
+@@ -1052,7 +1300,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
/*
* We are starting a new run period:
*/
- se->exec_start = rq_clock_task(rq_of(cfs_rq));
+ se->exec_start = sched_clock();
}
-
+
/**************************************************
-@@ -4104,7 +4336,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
-
+@@ -4104,7 +4352,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
+
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
-#ifdef CONFIG_SCHED_DEBUG
+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_CACULE_SCHED)
s64 d = se->vruntime - cfs_rq->min_vruntime;
-
+
if (d < 0)
-@@ -4115,6 +4347,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4115,6 +4363,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
-@@ -4146,6 +4379,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+@@ -4146,6 +4395,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
/* ensure we never gain time by being placed backwards. */
se->vruntime = max_vruntime(se->vruntime, vruntime);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
-
-@@ -4204,18 +4438,23 @@ static inline bool cfs_bandwidth_used(void);
+
+@@ -4204,18 +4454,23 @@ static inline bool cfs_bandwidth_used(void);
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
@@ -645,7 +713,7 @@ index 348605306027..e9681f0fb831 100644
bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
+#endif
bool curr = cfs_rq->curr == se;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* If we're the current task, we must renormalise before calling
@@ -654,126 +722,141 @@ index 348605306027..e9681f0fb831 100644
if (renorm && curr)
se->vruntime += cfs_rq->min_vruntime;
+#endif
-
+
update_curr(cfs_rq);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Otherwise, renormalise after, such that we're placed at the current
* moment in time, instead of some random moment in the past. Being
-@@ -4224,6 +4463,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4224,6 +4479,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (renorm && !curr)
se->vruntime += cfs_rq->min_vruntime;
+#endif
-
+
/*
* When enqueuing a sched_entity, we must:
-@@ -4238,8 +4478,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4238,8 +4494,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_cfs_group(se);
account_entity_enqueue(cfs_rq, se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (flags & ENQUEUE_WAKEUP)
place_entity(cfs_rq, se, 0);
+#endif
-
+
check_schedstat_required();
update_stats_enqueue(cfs_rq, se, flags);
-@@ -4260,6 +4502,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4260,6 +4518,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
check_enqueue_throttle(cfs_rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void __clear_buddies_last(struct sched_entity *se)
{
for_each_sched_entity(se) {
-@@ -4304,6 +4547,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4304,6 +4563,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (cfs_rq->skip == se)
__clear_buddies_skip(se);
}
+#endif /* !CONFIG_CACULE_SCHED */
-
+
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
-@@ -4328,13 +4572,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
-
+
+@@ -4328,13 +4588,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+
update_stats_dequeue(cfs_rq, se, flags);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
clear_buddies(cfs_rq, se);
+#endif
-
+
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->on_rq = 0;
account_entity_dequeue(cfs_rq, se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Normalize after update_curr(); which will also have moved
* min_vruntime if @se is the one holding it back. But before doing
-@@ -4343,12 +4590,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4343,12 +4606,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (!(flags & DEQUEUE_SLEEP))
se->vruntime -= cfs_rq->min_vruntime;
+#endif
-
+
/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);
-
+
update_cfs_group(se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Now advance min_vruntime if @se was the entity holding it back,
* except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
-@@ -4357,8 +4606,23 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4357,8 +4622,24 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
+#endif
}
-
+
+#ifdef CONFIG_CACULE_SCHED
++static struct sched_entity *
++pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr);
++
+/*
+ * Preempt the current task with a newly woken task if needed:
+ */
+static void
+check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+{
-+ u64 now = sched_clock();
-+
+ // does head have higher IS than curr
-+ if (entity_before(now, &curr->cacule_node, cfs_rq->head) == 1)
++ if (pick_next_entity(cfs_rq, curr) != curr)
+ resched_curr(rq_of(cfs_rq));
+}
+#else
/*
* Preempt the current task with a newly woken task if needed:
*/
-@@ -4398,6 +4662,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4398,6 +4679,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
if (delta > ideal_runtime)
resched_curr(rq_of(cfs_rq));
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
-@@ -4432,6 +4697,21 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4432,6 +4714,35 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
-
+
+#ifdef CONFIG_CACULE_SCHED
+static struct sched_entity *
+pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+{
-+ struct cacule_node *se = cfs_rq->head;
++ struct cacule_node *next, *se = cfs_rq->head;
++ u64 now = sched_clock();
++ unsigned int score_se;
+
+ if (unlikely(!se))
-+ se = &curr->cacule_node;
-+ else if (unlikely(curr
-+ && entity_before(sched_clock(), se, &curr->cacule_node) == 1))
++ return curr;
++
++ score_se = calc_interactivity(now, se);
++
++ next = se->next;
++ while (next) {
++ if (entity_before_cached(now, score_se, next) == 1) {
++ se = next;
++ score_se = calc_interactivity(now, se);
++ }
++
++ next = next->next;
++ }
++
++ if (unlikely(curr && entity_before_cached(now, score_se, &curr->cacule_node) == 1))
+ se = &curr->cacule_node;
+
+ return se_of(se);
@@ -781,26 +864,44 @@ index 348605306027..e9681f0fb831 100644
+#else
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
-
-@@ -4492,6 +4772,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
-
+
+@@ -4492,6 +4803,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+
return se;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
-@@ -5585,7 +5866,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+
+@@ -5585,7 +5897,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
hrtick_update(rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void set_next_buddy(struct sched_entity *se);
+#endif
-
+
/*
* The dequeue_task method is called before nr_running is
-@@ -5617,12 +5900,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+@@ -5599,6 +5913,17 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ int task_sleep = flags & DEQUEUE_SLEEP;
+ int idle_h_nr_running = task_has_idle_policy(p);
+ bool was_sched_idle = sched_idle_rq(rq);
++#ifdef CONFIG_CACULE_SCHED
++ struct task_struct *parent = p->parent;
++
++ if (task_sleep && parent) {
++ if (parent->nr_forks_per_time)
++ parent->nr_forks_per_time--;
++
++ if (parent->is_fake_interactive)
++ parent->is_fake_interactive--;
++ }
++#endif
+
+ util_est_dequeue(&rq->cfs, p);
+
+@@ -5617,12 +5942,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq->load.weight) {
/* Avoid re-evaluating load for this entity: */
se = parent_entity(se);
@@ -815,32 +916,32 @@ index 348605306027..e9681f0fb831 100644
break;
}
flags |= DEQUEUE_SLEEP;
-@@ -5738,6 +6023,7 @@ static unsigned long capacity_of(int cpu)
+@@ -5738,6 +6065,7 @@ static unsigned long capacity_of(int cpu)
return cpu_rq(cpu)->cpu_capacity;
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void record_wakee(struct task_struct *p)
{
/*
-@@ -5784,6 +6070,7 @@ static int wake_wide(struct task_struct *p)
+@@ -5784,6 +6112,7 @@ static int wake_wide(struct task_struct *p)
return 0;
return 1;
}
-+#endif
-
++#endif /* CONFIG_CACULE_SCHED */
+
/*
* The purpose of wake_affine() is to quickly determine on which CPU we can run
-@@ -6460,6 +6747,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
+@@ -6460,6 +6789,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
return min_t(unsigned long, util, capacity_orig_of(cpu));
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
* to @dst_cpu.
-@@ -6693,6 +6981,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
-
+@@ -6693,6 +7023,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+
return -1;
}
+#endif /* CONFIG_CACULE_SCHED */
@@ -894,13 +995,13 @@ index 348605306027..e9681f0fb831 100644
+ return new_cpu;
+}
+#endif
-
+
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
-@@ -6715,6 +7054,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
+@@ -6715,6 +7096,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
int want_affine = 0;
int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ struct sched_entity *se = &p->se;
+
@@ -915,7 +1016,7 @@ index 348605306027..e9681f0fb831 100644
+
+ new_cpu = find_least_IS_cpu(p);
+
-+ if (likely(new_cpu != -1))
++ if (new_cpu != -1)
+ return new_cpu;
+
+ new_cpu = prev_cpu;
@@ -923,16 +1024,16 @@ index 348605306027..e9681f0fb831 100644
+#else
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
-
-@@ -6727,6 +7086,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
-
+
+@@ -6727,6 +7128,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
+
want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
rcu_read_lock();
for_each_domain(cpu, tmp) {
-@@ -6774,6 +7134,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
+@@ -6774,6 +7176,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
*/
static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
{
@@ -940,31 +1041,31 @@ index 348605306027..e9681f0fb831 100644
/*
* As blocked tasks retain absolute vruntime the migration needs to
* deal with this by subtracting the old and adding the new
-@@ -6799,6 +7160,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
-
+@@ -6799,6 +7202,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
+
se->vruntime -= min_vruntime;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
if (p->on_rq == TASK_ON_RQ_MIGRATING) {
/*
-@@ -6844,6 +7206,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+@@ -6844,6 +7248,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
#endif /* CONFIG_SMP */
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static unsigned long wakeup_gran(struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;
-@@ -6922,6 +7285,7 @@ static void set_skip_buddy(struct sched_entity *se)
+@@ -6922,6 +7327,7 @@ static void set_skip_buddy(struct sched_entity *se)
for_each_sched_entity(se)
cfs_rq_of(se)->skip = se;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* Preempt the current task with a newly woken task if needed:
-@@ -6930,9 +7294,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6930,9 +7336,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
@@ -974,23 +1075,23 @@ index 348605306027..e9681f0fb831 100644
int scale = cfs_rq->nr_running >= sched_nr_latency;
int next_buddy_marked = 0;
+#endif /* CONFIG_CACULE_SCHED */
-
+
if (unlikely(se == pse))
return;
-@@ -6946,10 +7313,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6946,10 +7355,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
set_next_buddy(pse);
next_buddy_marked = 1;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* We can come here with TIF_NEED_RESCHED already set from new task
-@@ -6979,6 +7348,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6979,6 +7390,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
find_matching_se(&se, &pse);
update_curr(cfs_rq_of(se));
BUG_ON(!pse);
@@ -1002,14 +1103,14 @@ index 348605306027..e9681f0fb831 100644
if (wakeup_preempt_entity(se, pse) == 1) {
/*
* Bias pick_next to pick the sched entity that is
-@@ -6988,11 +7362,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6988,11 +7404,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
set_next_buddy(pse);
goto preempt;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
return;
-
+
preempt:
resched_curr(rq);
+
@@ -1017,15 +1118,15 @@ index 348605306027..e9681f0fb831 100644
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -7007,6 +7384,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
-
+@@ -7007,6 +7426,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
set_last_buddy(se);
+#endif /* CONFIG_CACULE_SCHED */
}
-
+
struct task_struct *
-@@ -7181,7 +7559,10 @@ static void yield_task_fair(struct rq *rq)
+@@ -7181,7 +7601,10 @@ static void yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
@@ -1033,60 +1134,60 @@ index 348605306027..e9681f0fb831 100644
+#if !defined(CONFIG_CACULE_SCHED)
struct sched_entity *se = &curr->se;
+#endif
-
+
/*
* Are we the only task in the tree?
-@@ -7189,7 +7570,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7189,7 +7612,9 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(rq->nr_running == 1))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
clear_buddies(cfs_rq, se);
+#endif
-
+
if (curr->policy != SCHED_BATCH) {
update_rq_clock(rq);
-@@ -7205,7 +7588,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7205,7 +7630,9 @@ static void yield_task_fair(struct rq *rq)
rq_clock_skip_update(rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
set_skip_buddy(se);
+#endif
}
-
+
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
-@@ -7216,8 +7601,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
+@@ -7216,8 +7643,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
return false;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/* Tell the scheduler that we'd really like pse to run next. */
set_next_buddy(se);
+#endif
-
+
yield_task_fair(rq);
-
-@@ -7445,6 +7832,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+
+@@ -7445,6 +7874,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
if (env->sd->flags & SD_SHARE_CPUCAPACITY)
return 0;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Buddy candidates are cache hot:
*/
-@@ -7452,6 +7840,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+@@ -7452,6 +7882,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
(&p->se == cfs_rq_of(&p->se)->next ||
&p->se == cfs_rq_of(&p->se)->last))
return 1;
+#endif
-
+
if (sysctl_sched_migration_cost == -1)
return 1;
-@@ -10720,11 +11109,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
+@@ -10720,11 +11151,38 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
update_overutilized_status(task_rq(curr));
}
-
+
+#ifdef CONFIG_CACULE_SCHED
/*
* called on fork with the child task as argument from the parent's context
@@ -1099,6 +1200,8 @@ index 348605306027..e9681f0fb831 100644
+ struct sched_entity *curr;
+ struct rq *rq = this_rq();
+ struct rq_flags rf;
++ struct task_struct *parent = p->parent;
++ u64 now = sched_clock();
+
+ rq_lock(rq, &rf);
+ update_rq_clock(rq);
@@ -1109,37 +1212,43 @@ index 348605306027..e9681f0fb831 100644
+ update_curr(cfs_rq);
+
+ rq_unlock(rq, &rf);
++
++ parent->fork_start_win_stamp = now;
++ if (parent->nr_forks_per_time >= nr_fork_threshold)
++ parent->is_fake_interactive++;
++
++ parent->nr_forks_per_time++;
+}
+#else
static void task_fork_fair(struct task_struct *p)
{
struct cfs_rq *cfs_rq;
-@@ -10755,6 +11163,7 @@ static void task_fork_fair(struct task_struct *p)
+@@ -10755,6 +11213,7 @@ static void task_fork_fair(struct task_struct *p)
se->vruntime -= cfs_rq->min_vruntime;
rq_unlock(rq, &rf);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* Priority of the task has changed. Check to see if we preempt
-@@ -10867,6 +11276,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
+@@ -10867,6 +11326,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
static void detach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
+
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
+
if (!vruntime_normalized(p)) {
-@@ -10877,6 +11288,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10877,6 +11338,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
place_entity(cfs_rq, se, 0);
se->vruntime -= cfs_rq->min_vruntime;
}
+#endif
-
+
detach_entity_cfs_rq(se);
}
-@@ -10884,12 +11296,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10884,12 +11346,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
static void attach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
@@ -1147,17 +1256,17 @@ index 348605306027..e9681f0fb831 100644
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
+#endif
-
+
attach_entity_cfs_rq(se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (!vruntime_normalized(p))
se->vruntime += cfs_rq->min_vruntime;
+#endif
}
-
+
static void switched_from_fair(struct rq *rq, struct task_struct *p)
-@@ -10945,13 +11362,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+@@ -10945,13 +11412,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
void init_cfs_rq(struct cfs_rq *cfs_rq)
{
cfs_rq->tasks_timeline = RB_ROOT_CACHED;
@@ -1178,7 +1287,7 @@ index 348605306027..e9681f0fb831 100644
+ cfs_rq->tail = NULL;
+#endif
}
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index fac1b121d113..7d9d59cee2d2 100644
@@ -1186,7 +1295,7 @@ index fac1b121d113..7d9d59cee2d2 100644
+++ b/kernel/sched/sched.h
@@ -517,10 +517,13 @@ struct cfs_rq {
unsigned int idle_h_nr_running; /* SCHED_IDLE */
-
+
u64 exec_clock;
+
+#if !defined(CONFIG_CACULE_SCHED)
@@ -1195,9 +1304,9 @@ index fac1b121d113..7d9d59cee2d2 100644
u64 min_vruntime_copy;
#endif
+#endif /* CONFIG_CACULE_SCHED */
-
+
struct rb_root_cached tasks_timeline;
-
+
@@ -529,9 +532,15 @@ struct cfs_rq {
* It is set to NULL otherwise (i.e when none are currently running).
*/
@@ -1211,14 +1320,14 @@ index fac1b121d113..7d9d59cee2d2 100644
struct sched_entity *last;
struct sched_entity *skip;
+#endif // CONFIG_CACULE_SCHED
-
+
#ifdef CONFIG_SCHED_DEBUG
unsigned int nr_spread_over;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index b9306d2bb426..8b3c772eb458 100644
+index b9306d2bb426..1ccdf97188bd 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
-@@ -1659,6 +1659,29 @@ static struct ctl_table kern_table[] = {
+@@ -1659,6 +1659,43 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
@@ -1226,7 +1335,7 @@ index b9306d2bb426..8b3c772eb458 100644
+ {
+ .procname = "sched_interactivity_factor",
+ .data = &interactivity_factor,
-+ .maxlen = sizeof(int),
++ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
@@ -1240,7 +1349,21 @@ index b9306d2bb426..8b3c772eb458 100644
+ {
+ .procname = "sched_max_lifetime_ms",
+ .data = &cacule_max_lifetime,
-+ .maxlen = sizeof(int),
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++ {
++ .procname = "sched_fake_interactive_decay_time_ms",
++ .data = &fake_interactive_decay_time,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++ {
++ .procname = "sched_nr_fork_threshold",
++ .data = &nr_fork_threshold,
++ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },