diff options
author | P.Jung | 2021-08-26 14:39:29 +0000 |
---|---|---|
committer | P.Jung | 2021-08-26 14:39:29 +0000 |
commit | 7dc01293d9c88235194e38948231b9de6da50cc2 (patch) | |
tree | f5a36689116011a9541813cdaa5879dd058eba1f | |
parent | 2d48310cbcfce2d0aac23a6945065d40353c9961 (diff) | |
download | aur-7dc01293d9c88235194e38948231b9de6da50cc2.tar.gz |
5.13.13
-rw-r--r-- | .SRCINFO | 12 | ||||
-rw-r--r-- | PKGBUILD | 8 | ||||
-rw-r--r-- | cacule-5.10.patch | 360 |
3 files changed, 190 insertions, 190 deletions
@@ -1,6 +1,6 @@ pkgbase = linux-hardened-cacule pkgdesc = Security-Hardened Linux with the cacule scheduler - pkgver = 5.10.59.hardened1 + pkgver = 5.10.60.hardened1 pkgrel = 1 url = https://github.com/anthraxx/linux-hardened arch = x86_64 @@ -20,14 +20,14 @@ pkgbase = linux-hardened-cacule makedepends = graphviz makedepends = imagemagick options = !strip - source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.10.59.tar.xz - source = https://github.com/anthraxx/linux-hardened/releases/download/5.10.59-hardened1/linux-hardened-5.10.59-hardened1.patch + source = https://www.kernel.org/pub/linux/kernel/v5.x/linux-5.10.60.tar.xz + source = https://github.com/anthraxx/linux-hardened/releases/download/5.10.60-hardened1/linux-hardened-5.10.60-hardened1.patch source = cacule-5.10.patch source = cpu-patches.patch source = config - sha256sums = 333cadc15f23e2060bb9701dbd9c23cfb196c528cded797329a0c369c2b6ea80 - sha256sums = 535f3557bf7f43339c87f19b39b3be0628867fc5fd8a92dbed1bf1ac060031ec - sha256sums = 82662e54c8a660775284a73b0fed0f849903770c0a7c8b18c317d28b00a16a55 + sha256sums = 696ff7753f6c7c5123dbcb0a22d693cb358c760c61a76649531b6a207155f78d + sha256sums = 083a7e07b89fb7212eee92c84e849f6c36586b6af8875816b194bf77bd08cc42 + sha256sums = 3d4a0602425000d18162fdd45c6f13dd1c5ef78ef3b5b7f19365a8b0cf030c3a sha256sums = 4f22a6e4e5fe6f3bb39ca39073fa812eb9c0dbb3ac9cec64ed0a90d06b54d32a sha256sums = 4f3152a8b04c56e1a3a823cb1afbece7bca8205493548ecfe7979d8555c22340 @@ -6,7 +6,7 @@ pkgbase=linux-hardened-cacule -pkgver=5.10.59.hardened1 +pkgver=5.10.60.hardened1 pkgrel=1 pkgdesc='Security-Hardened Linux with the cacule scheduler' url='https://github.com/anthraxx/linux-hardened' @@ -26,9 +26,9 @@ source=( cpu-patches.patch config # the main kernel config file ) -sha256sums=('333cadc15f23e2060bb9701dbd9c23cfb196c528cded797329a0c369c2b6ea80' - '535f3557bf7f43339c87f19b39b3be0628867fc5fd8a92dbed1bf1ac060031ec' - '82662e54c8a660775284a73b0fed0f849903770c0a7c8b18c317d28b00a16a55' +sha256sums=('696ff7753f6c7c5123dbcb0a22d693cb358c760c61a76649531b6a207155f78d' + '083a7e07b89fb7212eee92c84e849f6c36586b6af8875816b194bf77bd08cc42' + '3d4a0602425000d18162fdd45c6f13dd1c5ef78ef3b5b7f19365a8b0cf030c3a' '4f22a6e4e5fe6f3bb39ca39073fa812eb9c0dbb3ac9cec64ed0a90d06b54d32a' '4f3152a8b04c56e1a3a823cb1afbece7bca8205493548ecfe7979d8555c22340') diff --git a/cacule-5.10.patch b/cacule-5.10.patch index dca091638e40..eb5486e6a4e9 100644 --- a/cacule-5.10.patch +++ b/cacule-5.10.patch @@ -5,12 +5,12 @@ index d4b32cc32bb7..2788c5bbd870 100644 @@ -1087,6 +1087,10 @@ Model available). If your platform happens to meet the requirements for EAS but you do not want to use it, change this value to 0. - + +sched_interactivity_factor (CacULE scheduler only) +================================================== +Sets the value *m* for interactivity score calculations. See +Figure 1 in https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf - + sched_schedstats ================ diff --git a/Documentation/scheduler/sched-CacULE.rst b/Documentation/scheduler/sched-CacULE.rst @@ -102,7 +102,7 @@ index 2660ee4b08ad..b54f0660cc86 100644 @@ -456,10 +456,23 @@ struct sched_statistics { #endif }; - + +#ifdef CONFIG_CACULE_SCHED +struct cacule_node { + struct cacule_node* next; @@ -122,7 +122,7 @@ index 2660ee4b08ad..b54f0660cc86 100644 +#endif struct list_head group_node; unsigned int on_rq; - + diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 3c31ba88aca5..e79ca8c67a70 100644 --- a/include/linux/sched/sysctl.h @@ -130,7 +130,7 @@ index 3c31ba88aca5..e79ca8c67a70 100644 @@ -31,6 +31,16 @@ extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_child_runs_first; - + +#ifdef CONFIG_CACULE_SCHED +extern unsigned int interactivity_factor; +extern unsigned int cacule_max_lifetime; @@ -145,13 +145,13 @@ index 3c31ba88aca5..e79ca8c67a70 100644 SCHED_TUNABLESCALING_NONE, SCHED_TUNABLESCALING_LOG, diff --git a/init/Kconfig b/init/Kconfig -index fc4c9f416fad..e93632e5b7fc 100644 +index fc4c9f416fad..ff0e446221da 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -825,6 +825,51 @@ config UCLAMP_BUCKETS_COUNT - + endmenu - + +config CACULE_SCHED + bool "CacULE CPU scheduler" + default y @@ -164,14 +164,14 @@ index fc4c9f416fad..e93632e5b7fc 100644 + +config CACULE_RDB + bool "RDB (Response Driven Balancer)" -+ default y ++ default n + depends on CACULE_SCHED + help + This is an experimental load balancer for CacULE. It is a lightweight + load balancer which is a replacement of CFS load balancer. It migrates + tasks based on their interactivity scores. + -+ If unsure, say Y here. ++ If unsure, say N. + +config RDB_INTERVAL + int "RDB load balancer interval" @@ -215,19 +215,19 @@ index 38ef6d06888e..865f8dbddca8 100644 @@ -46,6 +46,9 @@ choice 1000 Hz is the preferred choice for desktop systems and other systems requiring fast interactive responses to events. - + + config HZ_2000 + bool "2000 HZ" + endchoice - + config HZ @@ -54,6 +57,7 @@ config HZ default 250 if HZ_250 default 300 if HZ_300 default 1000 if HZ_1000 + default 2000 if HZ_2000 - + config SCHED_HRTICK def_bool HIGH_RES_TIMERS diff --git a/kernel/sched/core.c b/kernel/sched/core.c @@ -237,13 +237,13 @@ index 679562d2f55d..b3c4594eb320 100644 @@ -72,6 +72,10 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; */ unsigned int sysctl_sched_rt_period = 1000000; - + +#ifdef CONFIG_CACULE_SCHED +int __read_mostly cacule_yield = 1; +#endif + __read_mostly int scheduler_running; - + /* @@ -3068,6 +3072,11 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) p->se.prev_sum_exec_runtime = 0; @@ -255,12 +255,12 @@ index 679562d2f55d..b3c4594eb320 100644 +#endif + INIT_LIST_HEAD(&p->se.group_node); - + #ifdef CONFIG_FAIR_GROUP_SCHED @@ -3352,6 +3361,10 @@ void wake_up_new_task(struct task_struct *p) update_rq_clock(rq); post_init_entity_util_avg(p); - + +#ifdef CONFIG_CACULE_SCHED + p->se.cacule_node.cacule_start_time = sched_clock(); +#endif @@ -276,12 +276,12 @@ index 679562d2f55d..b3c4594eb320 100644 u64 delta; +#endif int os; - + /* @@ -4073,6 +4088,7 @@ static void sched_tick_remote(struct work_struct *work) - + update_rq_clock(rq); - + +#if !defined(CONFIG_CACULE_SCHED) if (!is_idle_task(curr)) { /* @@ -293,12 +293,12 @@ index 679562d2f55d..b3c4594eb320 100644 +#endif + curr->sched_class->task_tick(rq, curr, 0); - + calc_load_nohz_remote(rq); @@ -6092,6 +6110,13 @@ static void do_sched_yield(void) struct rq_flags rf; struct rq *rq; - + +#ifdef CONFIG_CACULE_SCHED + struct task_struct *curr = current; + struct cacule_node *cn = &curr->se.cacule_node; @@ -307,12 +307,12 @@ index 679562d2f55d..b3c4594eb320 100644 + cn->vruntime |= YIELD_MARK; +#endif rq = this_rq_lock_irq(&rf); - + schedstat_inc(rq->yld_count); @@ -7066,6 +7091,14 @@ void __init sched_init(void) BUG_ON(&dl_sched_class + 1 != &stop_sched_class); #endif - + +#ifdef CONFIG_CACULE_SCHED +#ifdef CONFIG_CACULE_RDB + printk(KERN_INFO "CacULE CPU scheduler (RDB) v5.10-r3 by Hamad Al Marri."); @@ -322,14 +322,14 @@ index 679562d2f55d..b3c4594eb320 100644 +#endif + wait_bit_init(); - + #ifdef CONFIG_FAIR_GROUP_SCHED diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 70a578272436..506c0512610c 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -554,8 +554,11 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) - + void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) { - s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, @@ -371,7 +371,7 @@ index 70a578272436..506c0512610c 100644 cfs_rq->nr_spread_over); SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 262b02d75007..cf3ae2a8568b 100644 +index 262b02d75007..1dc6f346111c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -19,9 +19,24 @@ @@ -384,7 +384,7 @@ index 262b02d75007..cf3ae2a8568b 100644 + * (C) 2020 Hamad Al Marri <hamad.s.almarri@gmail.com> */ #include "sched.h" - + +#ifdef CONFIG_CACULE_SCHED +unsigned int __read_mostly cacule_max_lifetime = 22000; // in ms +unsigned int __read_mostly interactivity_factor = 32768; @@ -402,19 +402,19 @@ index 262b02d75007..cf3ae2a8568b 100644 @@ -82,7 +97,11 @@ unsigned int sysctl_sched_child_runs_first __read_mostly; unsigned int sysctl_sched_wakeup_granularity = 1000000UL; static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; - + +#ifdef CONFIG_CACULE_SCHED +const_debug unsigned int sysctl_sched_migration_cost = 200000UL; +#else const_debug unsigned int sysctl_sched_migration_cost = 500000UL; +#endif - + int sched_thermal_decay_shift; static int __init setup_sched_thermal_decay_shift(char *str) @@ -253,6 +272,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight - + const struct sched_class fair_sched_class; - + + +#ifdef CONFIG_CACULE_SCHED +static inline struct sched_entity *se_of(struct cacule_node *cn) @@ -532,7 +532,7 @@ index 262b02d75007..cf3ae2a8568b 100644 + + return task_has_idle_policy(task_of(se)); +} - + +/* + * Does se have lower interactivity score value (i.e. interactive) than curr? If yes, return 1, + * otherwise return -1 @@ -660,11 +660,11 @@ index 262b02d75007..cf3ae2a8568b 100644 * Enqueue an entity into the rb-tree: */ @@ -626,16 +869,24 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se) - + return rb_entry(next, struct sched_entity, run_node); } +#endif /* CONFIG_CACULE_SCHED */ - + #ifdef CONFIG_SCHED_DEBUG struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) { @@ -675,19 +675,19 @@ index 262b02d75007..cf3ae2a8568b 100644 + return se_of(cfs_rq->tail); +#else struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); - + if (!last) return NULL; - + return rb_entry(last, struct sched_entity, run_node); +#endif /* CONFIG_CACULE_SCHED */ } - + /************************************************************** @@ -730,6 +981,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) return slice; } - + +#if !defined(CONFIG_CACULE_SCHED) /* * We calculate the vruntime slice of a to-be-inserted task. @@ -697,13 +697,13 @@ index 262b02d75007..cf3ae2a8568b 100644 return calc_delta_fair(sched_slice(cfs_rq, se), se); } +#endif /* CONFIG_CACULE_SCHED */ - + #include "pelt.h" #ifdef CONFIG_SMP @@ -846,14 +1099,55 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq) } #endif /* CONFIG_SMP */ - + +#ifdef CONFIG_CACULE_SCHED +static void normalize_lifetime(u64 now, struct sched_entity *se) +{ @@ -753,13 +753,13 @@ index 262b02d75007..cf3ae2a8568b 100644 u64 now = rq_clock_task(rq_of(cfs_rq)); u64 delta_exec; +#endif - + if (unlikely(!curr)) return; @@ -870,8 +1164,16 @@ static void update_curr(struct cfs_rq *cfs_rq) curr->sum_exec_runtime += delta_exec; schedstat_add(cfs_rq->exec_clock, delta_exec); - + +#ifdef CONFIG_CACULE_SCHED + curr->cacule_node.last_run = now; + delta_fair = calc_delta_fair(delta_exec, curr); @@ -770,7 +770,7 @@ index 262b02d75007..cf3ae2a8568b 100644 curr->vruntime += calc_delta_fair(delta_exec, curr); update_min_vruntime(cfs_rq); +#endif - + if (entity_is_task(curr)) { struct task_struct *curtask = task_of(curr); @@ -1030,7 +1332,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) @@ -780,7 +780,7 @@ index 262b02d75007..cf3ae2a8568b 100644 - if (!schedstat_enabled()) return; - + @@ -1062,7 +1363,12 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) /* * We are starting a new run period: @@ -792,21 +792,21 @@ index 262b02d75007..cf3ae2a8568b 100644 se->exec_start = rq_clock_task(rq_of(cfs_rq)); +#endif } - + /************************************************** @@ -4129,7 +4435,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} - + static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) { -#ifdef CONFIG_SCHED_DEBUG +#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_CACULE_SCHED) s64 d = se->vruntime - cfs_rq->min_vruntime; - + if (d < 0) @@ -4140,6 +4446,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) #endif } - + +#if !defined(CONFIG_CACULE_SCHED) static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) @@ -816,9 +816,9 @@ index 262b02d75007..cf3ae2a8568b 100644 se->vruntime = max_vruntime(se->vruntime, vruntime); } +#endif /* CONFIG_CACULE_SCHED */ - + static void check_enqueue_throttle(struct cfs_rq *cfs_rq); - + @@ -4229,18 +4537,23 @@ static inline bool cfs_bandwidth_used(void); static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) @@ -827,7 +827,7 @@ index 262b02d75007..cf3ae2a8568b 100644 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED); +#endif bool curr = cfs_rq->curr == se; - + +#if !defined(CONFIG_CACULE_SCHED) /* * If we're the current task, we must renormalise before calling @@ -836,9 +836,9 @@ index 262b02d75007..cf3ae2a8568b 100644 if (renorm && curr) se->vruntime += cfs_rq->min_vruntime; +#endif - + update_curr(cfs_rq); - + +#if !defined(CONFIG_CACULE_SCHED) /* * Otherwise, renormalise after, such that we're placed at the current @@ -848,24 +848,24 @@ index 262b02d75007..cf3ae2a8568b 100644 if (renorm && !curr) se->vruntime += cfs_rq->min_vruntime; +#endif - + /* * When enqueuing a sched_entity, we must: @@ -4263,8 +4577,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_cfs_group(se); account_entity_enqueue(cfs_rq, se); - + +#if !defined(CONFIG_CACULE_SCHED) if (flags & ENQUEUE_WAKEUP) place_entity(cfs_rq, se, 0); +#endif - + check_schedstat_required(); update_stats_enqueue(cfs_rq, se, flags); @@ -4285,6 +4601,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) check_enqueue_throttle(cfs_rq); } - + +#if !defined(CONFIG_CACULE_SCHED) static void __clear_buddies_last(struct sched_entity *se) { @@ -875,22 +875,22 @@ index 262b02d75007..cf3ae2a8568b 100644 __clear_buddies_skip(se); } +#endif /* !CONFIG_CACULE_SCHED */ - + static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); - + @@ -4353,13 +4671,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) - + update_stats_dequeue(cfs_rq, se, flags); - + +#if !defined(CONFIG_CACULE_SCHED) clear_buddies(cfs_rq, se); +#endif - + if (se != cfs_rq->curr) __dequeue_entity(cfs_rq, se); se->on_rq = 0; account_entity_dequeue(cfs_rq, se); - + +#if !defined(CONFIG_CACULE_SCHED) /* * Normalize after update_curr(); which will also have moved @@ -900,12 +900,12 @@ index 262b02d75007..cf3ae2a8568b 100644 if (!(flags & DEQUEUE_SLEEP)) se->vruntime -= cfs_rq->min_vruntime; +#endif - + /* return excess runtime on last dequeue */ return_cfs_rq_runtime(cfs_rq); - + update_cfs_group(se); - + +#if !defined(CONFIG_CACULE_SCHED) /* * Now advance min_vruntime if @se was the entity holding it back, @@ -916,7 +916,7 @@ index 262b02d75007..cf3ae2a8568b 100644 update_min_vruntime(cfs_rq); +#endif } - + +#ifdef CONFIG_CACULE_SCHED +static struct sched_entity * +pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr); @@ -939,13 +939,13 @@ index 262b02d75007..cf3ae2a8568b 100644 resched_curr(rq_of(cfs_rq)); } +#endif /* CONFIG_CACULE_SCHED */ - + static void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) @@ -4457,6 +4796,31 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) se->prev_sum_exec_runtime = se->sum_exec_runtime; } - + +#ifdef CONFIG_CACULE_SCHED +static struct sched_entity * +pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) @@ -973,29 +973,29 @@ index 262b02d75007..cf3ae2a8568b 100644 +#else static int wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); - + @@ -4517,6 +4881,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) - + return se; } +#endif /* CONFIG_CACULE_SCHED */ - + static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); - + @@ -5608,9 +5973,15 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) assert_list_leaf_cfs_rq(rq); - + hrtick_update(rq); + +#ifdef CONFIG_CACULE_RDB + update_IS(rq); +#endif } - + +#if !defined(CONFIG_CACULE_SCHED) static void set_next_buddy(struct sched_entity *se); +#endif - + /* * The dequeue_task method is called before nr_running is @@ -5642,12 +6013,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) @@ -1022,12 +1022,12 @@ index 262b02d75007..cf3ae2a8568b 100644 + update_IS(rq); +#endif } - + #ifdef CONFIG_SMP @@ -5763,6 +6140,7 @@ static unsigned long capacity_of(int cpu) return cpu_rq(cpu)->cpu_capacity; } - + +#if !defined(CONFIG_CACULE_SCHED) static void record_wakee(struct task_struct *p) { @@ -1037,39 +1037,39 @@ index 262b02d75007..cf3ae2a8568b 100644 return 1; } +#endif /* CONFIG_CACULE_SCHED */ - + /* * The purpose of wake_affine() is to quickly determine on which CPU we can run @@ -6485,6 +6864,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p) return min_t(unsigned long, util, capacity_orig_of(cpu)); } - + +#if !defined(CONFIG_CACULE_SCHED) /* * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued) * to @dst_cpu. @@ -6718,6 +7098,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) - + return -1; } +#endif /* CONFIG_CACULE_SCHED */ - + /* * select_task_rq_fair: Select target runqueue for the waking task in domains @@ -6740,6 +7121,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f int want_affine = 0; int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING); - + +#if !defined(CONFIG_CACULE_SCHED) if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); - + @@ -6752,6 +7134,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f - + want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr); } +#endif /* CONFIG_CACULE_SCHED */ - + rcu_read_lock(); for_each_domain(cpu, tmp) { @@ -6799,6 +7182,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se); @@ -1081,17 +1081,17 @@ index 262b02d75007..cf3ae2a8568b 100644 * As blocked tasks retain absolute vruntime the migration needs to * deal with this by subtracting the old and adding the new @@ -6824,6 +7208,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) - + se->vruntime -= min_vruntime; } +#endif /* CONFIG_CACULE_SCHED */ - + if (p->on_rq == TASK_ON_RQ_MIGRATING) { /* @@ -6869,6 +7254,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } #endif /* CONFIG_SMP */ - + +#if !defined(CONFIG_CACULE_SCHED) static unsigned long wakeup_gran(struct sched_entity *se) { @@ -1101,7 +1101,7 @@ index 262b02d75007..cf3ae2a8568b 100644 cfs_rq_of(se)->skip = se; } +#endif /* CONFIG_CACULE_SCHED */ - + /* * Preempt the current task with a newly woken task if needed: @@ -6955,9 +7342,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ @@ -1114,20 +1114,20 @@ index 262b02d75007..cf3ae2a8568b 100644 int scale = cfs_rq->nr_running >= sched_nr_latency; int next_buddy_marked = 0; +#endif /* CONFIG_CACULE_SCHED */ - + if (unlikely(se == pse)) return; @@ -6971,10 +7361,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) return; - + +#if !defined(CONFIG_CACULE_SCHED) if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { set_next_buddy(pse); next_buddy_marked = 1; } +#endif /* CONFIG_CACULE_SCHED */ - + /* * We can come here with TIF_NEED_RESCHED already set from new task @@ -7004,6 +7396,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ @@ -1147,9 +1147,9 @@ index 262b02d75007..cf3ae2a8568b 100644 goto preempt; } +#endif /* CONFIG_CACULE_SCHED */ - + return; - + preempt: resched_curr(rq); + @@ -1158,29 +1158,29 @@ index 262b02d75007..cf3ae2a8568b 100644 * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved @@ -7032,6 +7432,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ - + if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) set_last_buddy(se); +#endif /* CONFIG_CACULE_SCHED */ } - + struct task_struct * @@ -7093,6 +7494,11 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf cfs_rq = group_cfs_rq(se); } while (cfs_rq); - + + /* + * Here we picked a sched_entity starting from + * the same group of curr, but the task could + * be a child of the selected sched_entity. + */ p = task_of(se); - + /* @@ -7103,6 +7509,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf if (prev != p) { struct sched_entity *pse = &prev->se; - + + /* while se and pse are not in the same group */ while (!(cfs_rq = is_same_group(se, pse))) { int se_depth = se->depth; @@ -1188,7 +1188,7 @@ index 262b02d75007..cf3ae2a8568b 100644 @@ -7117,6 +7524,9 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf } } - + + /* Here we reached the point were both + * sched_entities are in the same group. + */ @@ -1198,14 +1198,14 @@ index 262b02d75007..cf3ae2a8568b 100644 @@ -7127,6 +7537,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf if (prev) put_prev_task(rq, prev); - + + /* Going down the hierarchy */ do { se = pick_next_entity(cfs_rq, NULL); set_next_entity(cfs_rq, se); @@ -7136,6 +7547,14 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf p = task_of(se); - + done: __maybe_unused; +#ifdef CONFIG_CACULE_SCHED + if (prev) @@ -1220,7 +1220,7 @@ index 262b02d75007..cf3ae2a8568b 100644 * Move the next running task to the front of @@ -7153,6 +7572,11 @@ done: __maybe_unused; return p; - + idle: +#ifdef CONFIG_CACULE_RDB + WRITE_ONCE(rq->max_IS_score, ~0); @@ -1229,7 +1229,7 @@ index 262b02d75007..cf3ae2a8568b 100644 + if (!rf) return NULL; - + @@ -7206,7 +7630,10 @@ static void yield_task_fair(struct rq *rq) { struct task_struct *curr = rq->curr; @@ -1238,44 +1238,44 @@ index 262b02d75007..cf3ae2a8568b 100644 +#if !defined(CONFIG_CACULE_SCHED) struct sched_entity *se = &curr->se; +#endif - + /* * Are we the only task in the tree? @@ -7214,7 +7641,9 @@ static void yield_task_fair(struct rq *rq) if (unlikely(rq->nr_running == 1)) return; - + +#if !defined(CONFIG_CACULE_SCHED) clear_buddies(cfs_rq, se); +#endif - + if (curr->policy != SCHED_BATCH) { update_rq_clock(rq); @@ -7230,7 +7659,9 @@ static void yield_task_fair(struct rq *rq) rq_clock_skip_update(rq); } - + +#if !defined(CONFIG_CACULE_SCHED) set_skip_buddy(se); +#endif } - + static bool yield_to_task_fair(struct rq *rq, struct task_struct *p) @@ -7241,8 +7672,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p) if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) return false; - + +#if !defined(CONFIG_CACULE_SCHED) /* Tell the scheduler that we'd really like pse to run next. */ set_next_buddy(se); +#endif - + yield_task_fair(rq); - + @@ -7451,6 +7884,7 @@ struct lb_env { struct list_head tasks; }; - + +#if !defined(CONFIG_CACULE_RDB) /* * Is this task likely cache-hot: @@ -1283,7 +1283,7 @@ index 262b02d75007..cf3ae2a8568b 100644 @@ -7470,6 +7904,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env) if (env->sd->flags & SD_SHARE_CPUCAPACITY) return 0; - + +#if !defined(CONFIG_CACULE_SCHED) /* * Buddy candidates are cache hot: @@ -1293,53 +1293,53 @@ index 262b02d75007..cf3ae2a8568b 100644 &p->se == cfs_rq_of(&p->se)->last)) return 1; +#endif - + if (sysctl_sched_migration_cost == -1) return 1; @@ -7854,6 +8290,7 @@ static void attach_tasks(struct lb_env *env) - + rq_unlock(env->dst_rq, &rf); } +#endif - + #ifdef CONFIG_NO_HZ_COMMON static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) @@ -7899,6 +8336,7 @@ static inline bool others_have_blocked(struct rq *rq) { return false; } static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {} #endif - + +#if !defined(CONFIG_CACULE_RDB) static bool __update_blocked_others(struct rq *rq, bool *done) { const struct sched_class *curr_class; @@ -7924,6 +8362,7 @@ static bool __update_blocked_others(struct rq *rq, bool *done) - + return decayed; } +#endif - + #ifdef CONFIG_FAIR_GROUP_SCHED - + @@ -7944,6 +8383,7 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) return true; } - + +#if !defined(CONFIG_CACULE_RDB) static bool __update_blocked_fair(struct rq *rq, bool *done) { struct cfs_rq *cfs_rq, *pos; @@ -7983,6 +8423,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done) - + return decayed; } +#endif - + /* * Compute the hierarchical load factor for cfs_rq and all its ascendants. @@ -8049,6 +8490,7 @@ static unsigned long task_h_load(struct task_struct *p) } #endif - + +#if !defined(CONFIG_CACULE_RDB) static void update_blocked_averages(int cpu) { @@ -1349,39 +1349,39 @@ index 262b02d75007..cf3ae2a8568b 100644 rq_unlock_irqrestore(rq, &rf); } +#endif - + /********** Helpers for find_busiest_group ************************/ - + @@ -8400,7 +8843,9 @@ static bool update_nohz_stats(struct rq *rq, bool force) if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick)) return true; - + +#if !defined(CONFIG_CACULE_RDB) update_blocked_averages(cpu); +#endif - + return rq->has_blocked_load; #else @@ -9211,6 +9656,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * different in groups. */ - + +#if !defined(CONFIG_CACULE_RDB) /** * find_busiest_group - Returns the busiest group within the sched_domain * if there is an imbalance. @@ -9476,6 +9922,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, - + return busiest; } +#endif - + /* * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but @@ -9495,6 +9942,7 @@ asym_active_balance(struct lb_env *env) sched_asym_prefer(env->dst_cpu, env->src_cpu); } - + +#if !defined(CONFIG_CACULE_RDB) static inline bool voluntary_active_balance(struct lb_env *env) @@ -1391,29 +1391,29 @@ index 262b02d75007..cf3ae2a8568b 100644 return ld_moved; } +#endif - + static inline unsigned long get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) @@ -9881,6 +10330,7 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance) *next_balance = next; } - + +#if !defined(CONFIG_CACULE_RDB) /* * active_load_balance_cpu_stop is run by the CPU stopper. It pushes * running tasks off the busiest CPU onto idle CPUs. It requires at @@ -9972,6 +10422,7 @@ static int active_load_balance_cpu_stop(void *data) } - + static DEFINE_SPINLOCK(balancing); +#endif - + /* * Scale the max load_balance interval with the number of CPUs in the system. @@ -9982,6 +10433,7 @@ void update_max_interval(void) max_load_balance_interval = HZ*num_online_cpus()/10; } - + +#if !defined(CONFIG_CACULE_RDB) /* * It checks each scheduling domain to see if it is due to be balanced, @@ -1423,13 +1423,13 @@ index 262b02d75007..cf3ae2a8568b 100644 } } +#endif - + static inline int on_null_domain(struct rq *rq) { @@ -10116,6 +10569,7 @@ static inline int find_new_ilb(void) return nr_cpu_ids; } - + +#if !defined(CONFIG_CACULE_RDB) /* * Kick a CPU to do the nohz balancing, if it is time for it. We pick any @@ -1439,72 +1439,73 @@ index 262b02d75007..cf3ae2a8568b 100644 kick_ilb(flags); } +#endif /* CONFIG_CACULE_RDB */ - + static void set_cpu_sd_state_busy(int cpu) { @@ -10373,6 +10828,7 @@ void nohz_balance_enter_idle(int cpu) WRITE_ONCE(nohz.has_blocked, 1); } - + +#if !defined(CONFIG_CACULE_RDB) /* * Internal function that runs load balance for all idle cpus. The load balance * can be a simple update of blocked load or a complete load balance with @@ -10442,6 +10898,7 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, - + if (flags & NOHZ_BALANCE_KICK) rebalance_domains(rq, CPU_IDLE); + } - + if (time_after(next_balance, rq->next_balance)) { @@ -10458,6 +10915,7 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, if (likely(update_next_balance)) nohz.next_balance = next_balance; - + +#if !defined(CONFIG_CACULE_RDB) /* Newly idle CPU doesn't need an update */ if (idle != CPU_NEWLY_IDLE) { update_blocked_averages(this_cpu); @@ -10466,6 +10924,7 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, - + if (flags & NOHZ_BALANCE_KICK) rebalance_domains(this_rq, CPU_IDLE); +#endif - + WRITE_ONCE(nohz.next_blocked, now + msecs_to_jiffies(LOAD_AVG_PERIOD)); @@ -10513,9 +10972,11 @@ static void nohz_newidle_balance(struct rq *this_rq) if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED)) return; - + +#if !defined(CONFIG_CACULE_SCHED) /* Will wake up very soon. No time for doing anything else*/ if (this_rq->avg_idle < sysctl_sched_migration_cost) return; +#endif - + /* Don't need to update blocked load of idle CPUs*/ if (!READ_ONCE(nohz.has_blocked) || -@@ -10533,8 +10994,10 @@ static void nohz_newidle_balance(struct rq *this_rq) +@@ -10533,18 +10994,146 @@ static void nohz_newidle_balance(struct rq *this_rq) kick_ilb(NOHZ_STATS_KICK); raw_spin_lock(&this_rq->lock); } +#endif - + #else /* !CONFIG_NO_HZ_COMMON */ +#if !defined(CONFIG_CACULE_RDB) static inline void nohz_balancer_kick(struct rq *rq) { } - + static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) -@@ -10543,8 +11006,134 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle + { + return false; } - - static inline void nohz_newidle_balance(struct rq *this_rq) { } +#endif + + static inline void nohz_newidle_balance(struct rq *this_rq) { } + #endif /* CONFIG_NO_HZ_COMMON */ - + +#ifdef CONFIG_CACULE_RDB +static int +can_migrate_task(struct task_struct *p, int dst_cpu, struct rq *src_rq) @@ -1745,17 +1746,17 @@ index 262b02d75007..cf3ae2a8568b 100644 @@ -10583,7 +11275,10 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) */ rq_unpin_lock(this_rq, rf); - + - if (this_rq->avg_idle < sysctl_sched_migration_cost || + if ( +#if !defined(CONFIG_CACULE_SCHED) + this_rq->avg_idle < sysctl_sched_migration_cost || +#endif !READ_ONCE(this_rq->rd->overload)) { - + rcu_read_lock(); @@ -10705,6 +11400,217 @@ void trigger_load_balance(struct rq *rq) - + nohz_balancer_kick(rq); } +#endif @@ -1969,24 +1970,24 @@ index 262b02d75007..cf3ae2a8568b 100644 + } +} +#endif - + static void rq_online_fair(struct rq *rq) { @@ -10741,6 +11647,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) entity_tick(cfs_rq, se, queued); } - + +#ifdef CONFIG_CACULE_RDB + update_IS(rq); +#endif + if (static_branch_unlikely(&sched_numa_balancing)) task_tick_numa(rq, curr); - + @@ -10748,11 +11658,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) update_overutilized_status(task_rq(curr)); } - + +#ifdef CONFIG_CACULE_SCHED /* * called on fork with the child task as argument from the parent's context @@ -2019,7 +2020,7 @@ index 262b02d75007..cf3ae2a8568b 100644 rq_unlock(rq, &rf); } +#endif /* CONFIG_CACULE_SCHED */ - + /* * Priority of the task has changed. Check to see if we preempt @@ -10901,6 +11831,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se) @@ -2029,14 +2030,14 @@ index 262b02d75007..cf3ae2a8568b 100644 + +#if !defined(CONFIG_CACULE_SCHED) struct cfs_rq *cfs_rq = cfs_rq_of(se); - + if (!vruntime_normalized(p)) { @@ -10911,6 +11843,7 @@ static void detach_task_cfs_rq(struct task_struct *p) place_entity(cfs_rq, se, 0); se->vruntime -= cfs_rq->min_vruntime; } +#endif - + detach_entity_cfs_rq(se); } @@ -10918,12 +11851,17 @@ static void detach_task_cfs_rq(struct task_struct *p) @@ -2047,15 +2048,15 @@ index 262b02d75007..cf3ae2a8568b 100644 +#if !defined(CONFIG_CACULE_SCHED) struct cfs_rq *cfs_rq = cfs_rq_of(se); +#endif - + attach_entity_cfs_rq(se); - + +#if !defined(CONFIG_CACULE_SCHED) if (!vruntime_normalized(p)) se->vruntime += cfs_rq->min_vruntime; +#endif } - + static void switched_from_fair(struct rq *rq, struct task_struct *p) @@ -10979,13 +11917,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) void init_cfs_rq(struct cfs_rq *cfs_rq) @@ -2078,7 +2079,7 @@ index 262b02d75007..cf3ae2a8568b 100644 + cfs_rq->tail = NULL; +#endif } - + #ifdef CONFIG_FAIR_GROUP_SCHED @@ -11310,7 +12257,9 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m) __init void init_sched_fair_class(void) @@ -2087,7 +2088,7 @@ index 262b02d75007..cf3ae2a8568b 100644 +#if !defined(CONFIG_CACULE_RDB) open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); +#endif - + #ifdef CONFIG_NO_HZ_COMMON nohz.next_balance = jiffies; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h @@ -2097,7 +2098,7 @@ index 39112ac7ab34..5881814c7e1c 100644 @@ -158,6 +158,11 @@ extern void call_trace_sched_update_nr_running(struct rq *rq, int count); */ #define RUNTIME_INF ((u64)~0ULL) - + +#ifdef CONFIG_CACULE_SCHED +#define YIELD_MARK 0x8000000000000000ULL +#define YIELD_UNMARK 0x7FFFFFFFFFFFFFFFULL @@ -2108,7 +2109,7 @@ index 39112ac7ab34..5881814c7e1c 100644 return policy == SCHED_IDLE; @@ -524,10 +529,13 @@ struct cfs_rq { unsigned int idle_h_nr_running; /* SCHED_IDLE */ - + u64 exec_clock; + +#if !defined(CONFIG_CACULE_SCHED) @@ -2117,9 +2118,9 @@ index 39112ac7ab34..5881814c7e1c 100644 u64 min_vruntime_copy; #endif +#endif /* CONFIG_CACULE_SCHED */ - + struct rb_root_cached tasks_timeline; - + @@ -536,9 +544,14 @@ struct cfs_rq { * It is set to NULL otherwise (i.e when none are currently running). */ @@ -2132,13 +2133,13 @@ index 39112ac7ab34..5881814c7e1c 100644 struct sched_entity *last; struct sched_entity *skip; +#endif // CONFIG_CACULE_SCHED - + #ifdef CONFIG_SCHED_DEBUG unsigned int nr_spread_over; @@ -933,6 +946,11 @@ struct rq { struct rt_rq rt; struct dl_rq dl; - + +#ifdef CONFIG_CACULE_RDB + unsigned int max_IS_score; + struct task_struct *to_migrate_task; @@ -2211,4 +2212,3 @@ index b9306d2bb426..20f07aa87b8e 100644 #ifdef CONFIG_SCHED_DEBUG { .procname = "sched_min_granularity_ns", - |