summarylogtreecommitdiffstats
path: root/cacule-5.10.patch
diff options
context:
space:
mode:
authorptr13372021-06-19 13:58:48 +0200
committerptr13372021-06-19 13:58:48 +0200
commit162facef51cd051251dc76ffd77467990e5b2329 (patch)
treed2d2f523aa1e612a91ad93da40ad01d76e5bf121 /cacule-5.10.patch
parent86dc972c1115133d981b5beddc146cbc2c456044 (diff)
downloadaur-linux-raspberrypi4-cacule.tar.gz
5.10.44
Diffstat (limited to 'cacule-5.10.patch')
-rw-r--r--cacule-5.10.patch382
1 files changed, 212 insertions, 170 deletions
diff --git a/cacule-5.10.patch b/cacule-5.10.patch
index 31f04ae85d2e..c873c5b2f1ae 100644
--- a/cacule-5.10.patch
+++ b/cacule-5.10.patch
@@ -5,12 +5,12 @@ index d4b32cc32bb7..2788c5bbd870 100644
@@ -1087,6 +1087,10 @@ Model available). If your platform happens to meet the
requirements for EAS but you do not want to use it, change
this value to 0.
-
+
+sched_interactivity_factor (CacULE scheduler only)
+==================================================
+Sets the value *m* for interactivity score calculations. See
+Figure 1 in https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
-
+
sched_schedstats
================
diff --git a/Documentation/scheduler/sched-CacULE.rst b/Documentation/scheduler/sched-CacULE.rst
@@ -102,7 +102,7 @@ index 76cd21fa5501..0abad9f1247a 100644
@@ -448,10 +448,22 @@ struct sched_statistics {
#endif
};
-
+
+#ifdef CONFIG_CACULE_SCHED
+struct cacule_node {
+ struct cacule_node* next;
@@ -121,7 +121,7 @@ index 76cd21fa5501..0abad9f1247a 100644
+#endif
struct list_head group_node;
unsigned int on_rq;
-
+
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 3c31ba88aca5..4cf162341ab8 100644
--- a/include/linux/sched/sysctl.h
@@ -129,7 +129,7 @@ index 3c31ba88aca5..4cf162341ab8 100644
@@ -31,6 +31,12 @@ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
-
+
+#ifdef CONFIG_CACULE_SCHED
+extern unsigned int interactivity_factor;
+extern unsigned int interactivity_threshold;
@@ -144,9 +144,9 @@ index fc4c9f416fad..16676cfd11d7 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -825,6 +825,17 @@ config UCLAMP_BUCKETS_COUNT
-
+
endmenu
-
+
+config CACULE_SCHED
+ bool "CacULE CPU scheduler"
+ default y
@@ -176,23 +176,23 @@ index 38ef6d06888e..865f8dbddca8 100644
@@ -46,6 +46,9 @@ choice
1000 Hz is the preferred choice for desktop systems and other
systems requiring fast interactive responses to events.
-
+
+ config HZ_2000
+ bool "2000 HZ"
+
endchoice
-
+
config HZ
@@ -54,6 +57,7 @@ config HZ
default 250 if HZ_250
default 300 if HZ_300
default 1000 if HZ_1000
+ default 2000 if HZ_2000
-
+
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 3a150445e0cb..4abd45eebdb5 100644
+index 57b236251884..6ad7f6f33ed5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3068,6 +3068,11 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
@@ -205,12 +205,12 @@ index 3a150445e0cb..4abd45eebdb5 100644
+#endif
+
INIT_LIST_HEAD(&p->se.group_node);
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -3352,6 +3357,10 @@ void wake_up_new_task(struct task_struct *p)
update_rq_clock(rq);
post_init_entity_util_avg(p);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ p->se.cacule_node.cacule_start_time = sched_clock();
+#endif
@@ -221,20 +221,20 @@ index 3a150445e0cb..4abd45eebdb5 100644
@@ -7066,6 +7075,10 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
#endif
-
+
+#ifdef CONFIG_CACULE_SCHED
+ printk(KERN_INFO "CacULE CPU scheduler v5.10-r2 by Hamad Al Marri.");
+#endif
+
wait_bit_init();
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index 2357921580f9..86bd2a41f57a 100644
+index 6264584b51c2..bd3901a849c4 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
-@@ -535,8 +535,11 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
-
+@@ -554,8 +554,11 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
+
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
- s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
@@ -247,7 +247,7 @@ index 2357921580f9..86bd2a41f57a 100644
struct rq *rq = cpu_rq(cpu);
struct sched_entity *last;
unsigned long flags;
-@@ -557,21 +560,27 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+@@ -576,21 +579,27 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
last = __pick_last_entity(cfs_rq);
if (last)
max_vruntime = last->vruntime;
@@ -276,7 +276,7 @@ index 2357921580f9..86bd2a41f57a 100644
cfs_rq->nr_spread_over);
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 348605306027..8c6e30328aeb 100644
+index 1ad0e52487f6..b3ae01a34323 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,10 @@
@@ -289,23 +289,41 @@ index 348605306027..8c6e30328aeb 100644
+ * (C) 2020 Hamad Al Marri <hamad.s.almarri@gmail.com>
*/
#include "sched.h"
-
-@@ -113,6 +117,11 @@ int __weak arch_asym_cpu_priority(int cpu)
+
+@@ -82,7 +86,11 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
+ unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
+ static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
+
++#ifdef CONFIG_CACULE_SCHED
++const_debug unsigned int sysctl_sched_migration_cost = 200000UL;
++#else
+ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
++#endif
+
+ int sched_thermal_decay_shift;
+ static int __init setup_sched_thermal_decay_shift(char *str)
+@@ -113,6 +121,17 @@ int __weak arch_asym_cpu_priority(int cpu)
*/
#define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
-
+
+#endif
+#ifdef CONFIG_CACULE_SCHED
+unsigned int __read_mostly cacule_max_lifetime = 22000; // in ms
+unsigned int __read_mostly interactivity_factor = 32768;
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++unsigned int __read_mostly interactivity_threshold = 0;
++#else
+unsigned int __read_mostly interactivity_threshold = 1000;
++#endif
++
#endif
-
+
#ifdef CONFIG_CFS_BANDWIDTH
-@@ -253,6 +262,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
-
+@@ -253,6 +272,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
+
const struct sched_class fair_sched_class;
-
+
+
+#ifdef CONFIG_CACULE_SCHED
+static inline struct sched_entity *se_of(struct cacule_node *cn)
@@ -317,7 +335,7 @@ index 348605306027..8c6e30328aeb 100644
/**************************************************************
* CFS operations on generic schedulable entities:
*/
-@@ -512,7 +529,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
+@@ -512,7 +539,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
/**************************************************************
* Scheduling class tree data structure manipulation methods:
*/
@@ -326,7 +344,7 @@ index 348605306027..8c6e30328aeb 100644
static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - max_vruntime);
-@@ -568,7 +585,170 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
+@@ -568,7 +595,170 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
}
@@ -362,7 +380,7 @@ index 348605306027..8c6e30328aeb 100644
+
+static inline int is_interactive(struct cacule_node *cn)
+{
-+ if (se_of(cn)->vruntime == 0)
++ if (!interactivity_threshold || se_of(cn)->vruntime == 0)
+ return 0;
+
+ return calc_interactivity(sched_clock(), cn) < interactivity_threshold;
@@ -405,7 +423,7 @@ index 348605306027..8c6e30328aeb 100644
+
+ return -1;
+}
-+
+
+/*
+ * Enqueue an entity
+ */
@@ -483,7 +501,7 @@ index 348605306027..8c6e30328aeb 100644
+ struct cacule_node *next = se->next;
+
+ prev->next = next;
-
++
+ if (next)
+ next->prev = prev;
+ }
@@ -497,12 +515,12 @@ index 348605306027..8c6e30328aeb 100644
/*
* Enqueue an entity into the rb-tree:
*/
-@@ -626,16 +806,24 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
-
+@@ -626,16 +816,24 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
+
return rb_entry(next, struct sched_entity, run_node);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
#ifdef CONFIG_SCHED_DEBUG
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
@@ -513,35 +531,35 @@ index 348605306027..8c6e30328aeb 100644
+ return se_of(cfs_rq->tail);
+#else
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
-
+
if (!last)
return NULL;
-
+
return rb_entry(last, struct sched_entity, run_node);
+#endif /* CONFIG_CACULE_SCHED */
}
-
+
/**************************************************************
-@@ -720,6 +908,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -730,6 +928,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
return slice;
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* We calculate the vruntime slice of a to-be-inserted task.
*
-@@ -729,6 +918,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -739,6 +938,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return calc_delta_fair(sched_slice(cfs_rq, se), se);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
#include "pelt.h"
#ifdef CONFIG_SMP
-@@ -836,14 +1026,51 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
+@@ -846,14 +1046,51 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
}
#endif /* CONFIG_SMP */
-
+
+#ifdef CONFIG_CACULE_SCHED
+static void normalize_lifetime(u64 now, struct sched_entity *se)
+{
@@ -587,13 +605,13 @@ index 348605306027..8c6e30328aeb 100644
u64 now = rq_clock_task(rq_of(cfs_rq));
u64 delta_exec;
+#endif
-
+
if (unlikely(!curr))
return;
-@@ -860,8 +1087,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
+@@ -870,8 +1107,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ delta_fair = calc_delta_fair(delta_exec, curr);
+ curr->vruntime += delta_fair;
@@ -603,18 +621,18 @@ index 348605306027..8c6e30328aeb 100644
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_min_vruntime(cfs_rq);
+#endif
-
+
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
-@@ -1020,7 +1254,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -1030,7 +1274,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
-
if (!schedstat_enabled())
return;
-
-@@ -1052,7 +1285,11 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+
+@@ -1062,7 +1305,11 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
/*
* We are starting a new run period:
*/
@@ -624,34 +642,34 @@ index 348605306027..8c6e30328aeb 100644
se->exec_start = rq_clock_task(rq_of(cfs_rq));
+#endif
}
-
+
/**************************************************
-@@ -4104,7 +4341,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
-
+@@ -4123,7 +4370,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
+
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
-#ifdef CONFIG_SCHED_DEBUG
+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_CACULE_SCHED)
s64 d = se->vruntime - cfs_rq->min_vruntime;
-
+
if (d < 0)
-@@ -4115,6 +4352,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4134,6 +4381,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
-@@ -4146,6 +4384,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+@@ -4165,6 +4413,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
/* ensure we never gain time by being placed backwards. */
se->vruntime = max_vruntime(se->vruntime, vruntime);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
-
-@@ -4204,18 +4443,23 @@ static inline bool cfs_bandwidth_used(void);
+
+@@ -4223,18 +4472,23 @@ static inline bool cfs_bandwidth_used(void);
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
@@ -659,7 +677,7 @@ index 348605306027..8c6e30328aeb 100644
bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
+#endif
bool curr = cfs_rq->curr == se;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* If we're the current task, we must renormalise before calling
@@ -668,87 +686,87 @@ index 348605306027..8c6e30328aeb 100644
if (renorm && curr)
se->vruntime += cfs_rq->min_vruntime;
+#endif
-
+
update_curr(cfs_rq);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Otherwise, renormalise after, such that we're placed at the current
* moment in time, instead of some random moment in the past. Being
-@@ -4224,6 +4468,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4243,6 +4497,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (renorm && !curr)
se->vruntime += cfs_rq->min_vruntime;
+#endif
-
+
/*
* When enqueuing a sched_entity, we must:
-@@ -4238,8 +4483,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4257,8 +4512,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_cfs_group(se);
account_entity_enqueue(cfs_rq, se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (flags & ENQUEUE_WAKEUP)
place_entity(cfs_rq, se, 0);
+#endif
-
+
check_schedstat_required();
update_stats_enqueue(cfs_rq, se, flags);
-@@ -4260,6 +4507,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4279,6 +4536,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
check_enqueue_throttle(cfs_rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void __clear_buddies_last(struct sched_entity *se)
{
for_each_sched_entity(se) {
-@@ -4304,6 +4552,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4323,6 +4581,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (cfs_rq->skip == se)
__clear_buddies_skip(se);
}
+#endif /* !CONFIG_CACULE_SCHED */
-
+
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
-@@ -4328,13 +4577,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
-
+
+@@ -4347,13 +4606,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+
update_stats_dequeue(cfs_rq, se, flags);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
clear_buddies(cfs_rq, se);
+#endif
-
+
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->on_rq = 0;
account_entity_dequeue(cfs_rq, se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Normalize after update_curr(); which will also have moved
* min_vruntime if @se is the one holding it back. But before doing
-@@ -4343,12 +4595,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4362,12 +4624,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (!(flags & DEQUEUE_SLEEP))
se->vruntime -= cfs_rq->min_vruntime;
+#endif
-
+
/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);
-
+
update_cfs_group(se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Now advance min_vruntime if @se was the entity holding it back,
* except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
-@@ -4357,8 +4611,21 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4376,8 +4640,21 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
+#endif
}
-
+
+#ifdef CONFIG_CACULE_SCHED
+/*
+ * Preempt the current task with a newly woken task if needed:
@@ -764,18 +782,18 @@ index 348605306027..8c6e30328aeb 100644
/*
* Preempt the current task with a newly woken task if needed:
*/
-@@ -4398,6 +4665,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4417,6 +4694,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
if (delta > ideal_runtime)
resched_curr(rq_of(cfs_rq));
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
-@@ -4432,6 +4700,21 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4451,6 +4729,21 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
-
+
+#ifdef CONFIG_CACULE_SCHED
+static struct sched_entity *
+pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
@@ -793,26 +811,26 @@ index 348605306027..8c6e30328aeb 100644
+#else
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
-
-@@ -4492,6 +4775,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
-
+
+@@ -4511,6 +4804,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+
return se;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-
-@@ -5585,7 +5869,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+
+@@ -5604,7 +5898,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
hrtick_update(rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void set_next_buddy(struct sched_entity *se);
+#endif
-
+
/*
* The dequeue_task method is called before nr_running is
-@@ -5617,12 +5903,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+@@ -5636,12 +5932,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq->load.weight) {
/* Avoid re-evaluating load for this entity: */
se = parent_entity(se);
@@ -827,32 +845,32 @@ index 348605306027..8c6e30328aeb 100644
break;
}
flags |= DEQUEUE_SLEEP;
-@@ -5738,6 +6026,7 @@ static unsigned long capacity_of(int cpu)
+@@ -5757,6 +6055,7 @@ static unsigned long capacity_of(int cpu)
return cpu_rq(cpu)->cpu_capacity;
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static void record_wakee(struct task_struct *p)
{
/*
-@@ -5784,6 +6073,7 @@ static int wake_wide(struct task_struct *p)
+@@ -5803,6 +6102,7 @@ static int wake_wide(struct task_struct *p)
return 0;
return 1;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* The purpose of wake_affine() is to quickly determine on which CPU we can run
-@@ -6460,6 +6750,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
+@@ -6479,6 +6779,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
return min_t(unsigned long, util, capacity_orig_of(cpu));
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
* to @dst_cpu.
-@@ -6693,6 +6984,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
-
+@@ -6712,6 +7013,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+
return -1;
}
+#endif /* CONFIG_CACULE_SCHED */
@@ -906,13 +924,13 @@ index 348605306027..8c6e30328aeb 100644
+ return new_cpu;
+}
+#endif
-
+
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
-@@ -6715,6 +7057,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
+@@ -6734,6 +7086,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
int want_affine = 0;
int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
-
+
+#ifdef CONFIG_CACULE_SCHED
+ struct sched_entity *se = &p->se;
+
@@ -935,16 +953,16 @@ index 348605306027..8c6e30328aeb 100644
+#else
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
-
-@@ -6727,6 +7089,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
-
+
+@@ -6746,6 +7118,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
+
want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
rcu_read_lock();
for_each_domain(cpu, tmp) {
-@@ -6774,6 +7137,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
+@@ -6793,6 +7166,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
*/
static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
{
@@ -952,31 +970,31 @@ index 348605306027..8c6e30328aeb 100644
/*
* As blocked tasks retain absolute vruntime the migration needs to
* deal with this by subtracting the old and adding the new
-@@ -6799,6 +7163,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
-
+@@ -6818,6 +7192,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
+
se->vruntime -= min_vruntime;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
if (p->on_rq == TASK_ON_RQ_MIGRATING) {
/*
-@@ -6844,6 +7209,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+@@ -6863,6 +7238,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
#endif /* CONFIG_SMP */
-
+
+#if !defined(CONFIG_CACULE_SCHED)
static unsigned long wakeup_gran(struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;
-@@ -6922,6 +7288,7 @@ static void set_skip_buddy(struct sched_entity *se)
+@@ -6941,6 +7317,7 @@ static void set_skip_buddy(struct sched_entity *se)
for_each_sched_entity(se)
cfs_rq_of(se)->skip = se;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* Preempt the current task with a newly woken task if needed:
-@@ -6930,9 +7297,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6949,9 +7326,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
@@ -986,23 +1004,23 @@ index 348605306027..8c6e30328aeb 100644
int scale = cfs_rq->nr_running >= sched_nr_latency;
int next_buddy_marked = 0;
+#endif /* CONFIG_CACULE_SCHED */
-
+
if (unlikely(se == pse))
return;
-@@ -6946,10 +7316,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6965,10 +7345,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
set_next_buddy(pse);
next_buddy_marked = 1;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* We can come here with TIF_NEED_RESCHED already set from new task
-@@ -6979,6 +7351,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6998,6 +7380,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
find_matching_se(&se, &pse);
update_curr(cfs_rq_of(se));
BUG_ON(!pse);
@@ -1014,14 +1032,14 @@ index 348605306027..8c6e30328aeb 100644
if (wakeup_preempt_entity(se, pse) == 1) {
/*
* Bias pick_next to pick the sched entity that is
-@@ -6988,11 +7365,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7007,11 +7394,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
set_next_buddy(pse);
goto preempt;
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
return;
-
+
preempt:
resched_curr(rq);
+
@@ -1029,15 +1047,15 @@ index 348605306027..8c6e30328aeb 100644
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -7007,6 +7387,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
-
+@@ -7026,6 +7416,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
set_last_buddy(se);
+#endif /* CONFIG_CACULE_SCHED */
}
-
+
struct task_struct *
-@@ -7181,7 +7562,10 @@ static void yield_task_fair(struct rq *rq)
+@@ -7200,7 +7591,10 @@ static void yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
@@ -1045,60 +1063,84 @@ index 348605306027..8c6e30328aeb 100644
+#if !defined(CONFIG_CACULE_SCHED)
struct sched_entity *se = &curr->se;
+#endif
-
+
/*
* Are we the only task in the tree?
-@@ -7189,7 +7573,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7208,7 +7602,9 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(rq->nr_running == 1))
return;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
clear_buddies(cfs_rq, se);
+#endif
-
+
if (curr->policy != SCHED_BATCH) {
update_rq_clock(rq);
-@@ -7205,7 +7591,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7224,7 +7620,9 @@ static void yield_task_fair(struct rq *rq)
rq_clock_skip_update(rq);
}
-
+
+#if !defined(CONFIG_CACULE_SCHED)
set_skip_buddy(se);
+#endif
}
-
+
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
-@@ -7216,8 +7604,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
+@@ -7235,8 +7633,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
return false;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/* Tell the scheduler that we'd really like pse to run next. */
set_next_buddy(se);
+#endif
-
+
yield_task_fair(rq);
-
-@@ -7445,6 +7835,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+
+@@ -7464,6 +7864,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
if (env->sd->flags & SD_SHARE_CPUCAPACITY)
return 0;
-
+
+#if !defined(CONFIG_CACULE_SCHED)
/*
* Buddy candidates are cache hot:
*/
-@@ -7452,6 +7843,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+@@ -7471,6 +7872,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
(&p->se == cfs_rq_of(&p->se)->next ||
&p->se == cfs_rq_of(&p->se)->last))
return 1;
+#endif
-
+
if (sysctl_sched_migration_cost == -1)
return 1;
-@@ -10720,11 +11112,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
+@@ -10507,9 +10909,11 @@ static void nohz_newidle_balance(struct rq *this_rq)
+ if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED))
+ return;
+
++#if !defined(CONFIG_CACULE_SCHED)
+ /* Will wake up very soon. No time for doing anything else*/
+ if (this_rq->avg_idle < sysctl_sched_migration_cost)
+ return;
++#endif
+
+ /* Don't need to update blocked load of idle CPUs*/
+ if (!READ_ONCE(nohz.has_blocked) ||
+@@ -10577,7 +10981,10 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
+ */
+ rq_unpin_lock(this_rq, rf);
+
+- if (this_rq->avg_idle < sysctl_sched_migration_cost ||
++ if (
++#if !defined(CONFIG_CACULE_SCHED)
++ this_rq->avg_idle < sysctl_sched_migration_cost ||
++#endif
+ !READ_ONCE(this_rq->rd->overload)) {
+
+ rcu_read_lock();
+@@ -10742,11 +11149,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
update_overutilized_status(task_rq(curr));
}
-
+
+#ifdef CONFIG_CACULE_SCHED
/*
* called on fork with the child task as argument from the parent's context
@@ -1126,32 +1168,32 @@ index 348605306027..8c6e30328aeb 100644
static void task_fork_fair(struct task_struct *p)
{
struct cfs_rq *cfs_rq;
-@@ -10755,6 +11166,7 @@ static void task_fork_fair(struct task_struct *p)
+@@ -10777,6 +11203,7 @@ static void task_fork_fair(struct task_struct *p)
se->vruntime -= cfs_rq->min_vruntime;
rq_unlock(rq, &rf);
}
+#endif /* CONFIG_CACULE_SCHED */
-
+
/*
* Priority of the task has changed. Check to see if we preempt
-@@ -10867,6 +11279,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
+@@ -10895,6 +11322,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
static void detach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
+
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
+
if (!vruntime_normalized(p)) {
-@@ -10877,6 +11291,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10905,6 +11334,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
place_entity(cfs_rq, se, 0);
se->vruntime -= cfs_rq->min_vruntime;
}
+#endif
-
+
detach_entity_cfs_rq(se);
}
-@@ -10884,12 +11299,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10912,12 +11342,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
static void attach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
@@ -1159,17 +1201,17 @@ index 348605306027..8c6e30328aeb 100644
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
+#endif
-
+
attach_entity_cfs_rq(se);
-
+
+#if !defined(CONFIG_CACULE_SCHED)
if (!vruntime_normalized(p))
se->vruntime += cfs_rq->min_vruntime;
+#endif
}
-
+
static void switched_from_fair(struct rq *rq, struct task_struct *p)
-@@ -10945,13 +11365,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+@@ -10973,13 +11408,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
void init_cfs_rq(struct cfs_rq *cfs_rq)
{
cfs_rq->tasks_timeline = RB_ROOT_CACHED;
@@ -1190,15 +1232,15 @@ index 348605306027..8c6e30328aeb 100644
+ cfs_rq->tail = NULL;
+#endif
}
-
+
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index fac1b121d113..7d9d59cee2d2 100644
+index fdebfcbdfca9..e7fd62e89154 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -517,10 +517,13 @@ struct cfs_rq {
+@@ -524,10 +524,13 @@ struct cfs_rq {
unsigned int idle_h_nr_running; /* SCHED_IDLE */
-
+
u64 exec_clock;
+
+#if !defined(CONFIG_CACULE_SCHED)
@@ -1207,10 +1249,10 @@ index fac1b121d113..7d9d59cee2d2 100644
u64 min_vruntime_copy;
#endif
+#endif /* CONFIG_CACULE_SCHED */
-
+
struct rb_root_cached tasks_timeline;
-
-@@ -529,9 +532,15 @@ struct cfs_rq {
+
+@@ -536,9 +539,15 @@ struct cfs_rq {
* It is set to NULL otherwise (i.e when none are currently running).
*/
struct sched_entity *curr;
@@ -1223,7 +1265,7 @@ index fac1b121d113..7d9d59cee2d2 100644
struct sched_entity *last;
struct sched_entity *skip;
+#endif // CONFIG_CACULE_SCHED
-
+
#ifdef CONFIG_SCHED_DEBUG
unsigned int nr_spread_over;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c