summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorptr13372021-05-01 13:39:34 +0200
committerptr13372021-05-01 13:39:34 +0200
commit7e2ffc2d1f36c4278c8f420369bacb4aaf5ddc6c (patch)
tree7dd37d58227169d6662eec7b19faa87eb1e1f3ee
parent987507ede1ab0c18e4f98b77b070f7293f48eba7 (diff)
downloadaur-7e2ffc2d1f36c4278c8f420369bacb4aaf5ddc6c.tar.gz
updated cacule patch, config changes
-rw-r--r--.SRCINFO15
-rw-r--r--PKGBUILD14
-rw-r--r--cacule-32bit-converter.patch8
-rw-r--r--cacule-5.11.patch1389
-rw-r--r--config5
-rw-r--r--config85
6 files changed, 188 insertions, 1248 deletions
diff --git a/.SRCINFO b/.SRCINFO
index 1b61dfbfc6fe..eb54cd60438d 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,6 +1,6 @@
pkgbase = linux-raspberrypi4-cacule-stable
pkgdesc = Raspberry Pi 4 Kernel with the cacule schedeuler, aarch64 and armv7
- pkgver = 5.11.15
+ pkgver = 5.11.17
pkgrel = 1
url = http://www.kernel.org/
arch = armv7h
@@ -13,31 +13,32 @@ pkgbase = linux-raspberrypi4-cacule-stable
makedepends = bc
makedepends = git
options = !strip
- source = https://github.com/raspberrypi/linux/archive/4c6a570ea7d7586ff17a0166a222de88d0ea7050.tar.gz
+ source = https://github.com/raspberrypi/linux/archive/e99921bb4319ce35ce2e9841a51c4fbb6fb9cf2c.tar.gz
source = cmdline.txt
source = linux.preset
source = 60-linux.hook
source = 90-linux.hook
source = 0001-Make-proc-cpuinfo-consistent-on-arm64-and-arm.patch
source = cacule-5.11.patch
- md5sums = 4dbbc81c46a9f467dcba755575eac72b
+ md5sums = 25cf66285e262398acb1adb5c2e5311c
md5sums = 31c02f4518d46deb5f0c2ad1f8b083cd
md5sums = 86d4a35722b5410e3b29fc92dae15d4b
md5sums = ce6c81ad1ad1f8b333fd6077d47abdaf
md5sums = 441ec084c47cddc53e592fb0cbce4edf
md5sums = f66a7ea3feb708d398ef57e4da4815e9
- md5sums = b85d9c75a137a4278537386ca274da9d
+ md5sums = 8585be816a07e27a86cd739c1d89cc7b
source_armv7h = config
source_armv7h = config.txt
source_armv7h = cacule-32bit-converter.patch
- md5sums_armv7h = 0ce3d9c05a72b33a1b98ecbc64870eb1
+ md5sums_armv7h = 5697f0e23f8329047cfce076f0e904b4
md5sums_armv7h = 9669d916a5929a2eedbd64477f83d99e
- md5sums_armv7h = 02808e3fb2f6b142e0cd9f1ae50a8d46
+ md5sums_armv7h = 60020b476ade77049c832f55fc0bea1f
source_aarch64 = config8
source_aarch64 = config8.txt
- md5sums_aarch64 = 4ac3dea5e9b48c55a48e881dc5151423
+ md5sums_aarch64 = c1fa6fc261864237ecdcd4daae0920be
md5sums_aarch64 = 9669d916a5929a2eedbd64477f83d99e
pkgname = linux-raspberrypi4-cacule-stable
pkgname = linux-raspberrypi4-cacule-stable-headers
+
diff --git a/PKGBUILD b/PKGBUILD
index e6bf4c50ae39..c69139fefaf9 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -7,11 +7,11 @@
buildarch=12
pkgbase=linux-raspberrypi4-cacule-stable
-_commit=4c6a570ea7d7586ff17a0166a222de88d0ea7050
+_commit=e99921bb4319ce35ce2e9841a51c4fbb6fb9cf2c
_srcname=linux-${_commit}
_kernelname=${pkgbase#linux}
_desc="Raspberry Pi 4 with the cacule scheduler"
-pkgver=5.11.15
+pkgver=5.11.17
pkgrel=1
pkgdesc="Raspberry Pi 4 Kernel with the cacule schedeuler, aarch64 and armv7"
arch=('armv7h' 'aarch64')
@@ -29,17 +29,17 @@ source=("https://github.com/raspberrypi/linux/archive/${_commit}.tar.gz"
)
source_armv7h=('config' 'config.txt' 'cacule-32bit-converter.patch')
source_aarch64=('config8' 'config8.txt')
-md5sums=('4dbbc81c46a9f467dcba755575eac72b'
+md5sums=('25cf66285e262398acb1adb5c2e5311c'
'31c02f4518d46deb5f0c2ad1f8b083cd'
'86d4a35722b5410e3b29fc92dae15d4b'
'ce6c81ad1ad1f8b333fd6077d47abdaf'
'441ec084c47cddc53e592fb0cbce4edf'
'f66a7ea3feb708d398ef57e4da4815e9'
- 'b85d9c75a137a4278537386ca274da9d')
-md5sums_armv7h=('0ce3d9c05a72b33a1b98ecbc64870eb1'
+ '8585be816a07e27a86cd739c1d89cc7b')
+md5sums_armv7h=('5697f0e23f8329047cfce076f0e904b4'
'9669d916a5929a2eedbd64477f83d99e'
- '02808e3fb2f6b142e0cd9f1ae50a8d46')
-md5sums_aarch64=('4ac3dea5e9b48c55a48e881dc5151423'
+ '60020b476ade77049c832f55fc0bea1f')
+md5sums_aarch64=('c1fa6fc261864237ecdcd4daae0920be'
'9669d916a5929a2eedbd64477f83d99e')
# setup vars
diff --git a/cacule-32bit-converter.patch b/cacule-32bit-converter.patch
index 7d14829ecb37..b5d87f08d6a1 100644
--- a/cacule-32bit-converter.patch
+++ b/cacule-32bit-converter.patch
@@ -15,10 +15,10 @@ index c99fc326ec24..71c27133c53c 100644
#endif
#ifdef CONFIG_CACULE_SCHED
--int cacule_max_lifetime = 30000; // in ms
-+int cacule_max_lifetime = 4000; // in ms
- int cacule_harsh_mode = 0;
- int interactivity_factor = 32768;
+-int __read_mostly cacule_max_lifetime = 22000; // in ms
++int __read_mostly cacule_max_lifetime = 4000; // in ms
+ int __read_mostly interactivity_factor = 32768;
+ unsigned int __read_mostly interactivity_threshold = 20480;
#endif
@@ -602,6 +604,7 @@ calc_interactivity(u64 now, struct cacule_node *se)
{
diff --git a/cacule-5.11.patch b/cacule-5.11.patch
index e437547eb20f..99941578e3e9 100644
--- a/cacule-5.11.patch
+++ b/cacule-5.11.patch
@@ -131,7 +131,7 @@ index 6e3a5eeec509..e5da9a62fe4e 100644
u64 nr_migrations;
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
-index 3c31ba88aca5..774de59e8111 100644
+index 3c31ba88aca5..cb819c3d86f3 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -31,6 +31,12 @@ extern unsigned int sysctl_sched_min_granularity;
@@ -140,30 +140,18 @@ index 3c31ba88aca5..774de59e8111 100644
+#ifdef CONFIG_CACULE_SCHED
+extern int interactivity_factor;
++extern unsigned int interactivity_threshold;
+extern int cacule_max_lifetime;
-+extern int cacule_harsh_mode;
+#endif
+
enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE,
SCHED_TUNABLESCALING_LOG,
-@@ -46,6 +52,11 @@ extern unsigned int sysctl_numa_balancing_scan_size;
-
- #ifdef CONFIG_SCHED_DEBUG
- extern __read_mostly unsigned int sysctl_sched_migration_cost;
-+#elif CONFIG_CACULE_RDB
-+extern unsigned int sysctl_sched_migration_cost;
-+#endif
-+
-+#ifdef CONFIG_SCHED_DEBUG
- extern __read_mostly unsigned int sysctl_sched_nr_migrate;
-
- int sched_proc_update_handler(struct ctl_table *table, int write,
diff --git a/init/Kconfig b/init/Kconfig
-index b7d3c6a12196..cae5b7447f48 100644
+index a3d27421de8f..d0cfdf6e9bed 100644
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -825,6 +825,27 @@ config UCLAMP_BUCKETS_COUNT
+@@ -824,6 +824,17 @@ config UCLAMP_BUCKETS_COUNT
endmenu
@@ -177,38 +165,20 @@ index b7d3c6a12196..cae5b7447f48 100644
+
+ If unsure, say Y here.
+
-+config CACULE_RDB
-+ bool "RDB (Response Driven Balancer)"
-+ default n
-+ depends on CACULE_SCHED
-+ help
-+ This is an experimental load balancer for CacULE. It is a lightweight
-+ load balancer which is a replacement of CFS load balancer. It migrates
-+ tasks based on their interactivity scores.
-+
-+ If unsure, say N.
+
#
# For architectures that want to enable the support for NUMA-affine scheduler
# balancing logic:
-@@ -942,6 +963,7 @@ config CGROUP_WRITEBACK
-
- menuconfig CGROUP_SCHED
- bool "CPU controller"
-+ depends on !CACULE_RDB
- default n
- help
- This feature lets CPU scheduler recognize task groups and control CPU
-@@ -1205,6 +1227,7 @@ config CHECKPOINT_RESTORE
-
- config SCHED_AUTOGROUP
- bool "Automatic process group scheduling"
-+ depends on !CACULE_RDB
+@@ -1207,6 +1218,7 @@ config SCHED_AUTOGROUP
select CGROUPS
select CGROUP_SCHED
select FAIR_GROUP_SCHED
++ default y
+ help
+ This option optimizes the scheduler for common desktop workloads by
+ automatically creating and populating task groups. This separation
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index f0056507a373..7b643bc0a281 100644
+index f0056507a373..4d8a3b232ae9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3554,7 +3554,13 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
@@ -225,32 +195,25 @@ index f0056507a373..7b643bc0a281 100644
INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_FAIR_GROUP_SCHED
-@@ -3840,6 +3846,13 @@ void wake_up_new_task(struct task_struct *p)
+@@ -3840,6 +3846,10 @@ void wake_up_new_task(struct task_struct *p)
update_rq_clock(rq);
post_init_entity_util_avg(p);
+#ifdef CONFIG_CACULE_SCHED
-+ if (cacule_harsh_mode)
-+ p->se.cacule_node.cacule_start_time = p->start_time;
-+ else
-+ p->se.cacule_node.cacule_start_time = sched_clock();
++ p->se.cacule_node.cacule_start_time = sched_clock();
+#endif
+
activate_task(rq, p, ENQUEUE_NOCLOCK);
trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK);
-@@ -7727,6 +7740,14 @@ void __init sched_init(void)
+@@ -7727,6 +7737,10 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
#endif
-+#if defined(CONFIG_CACULE_SCHED) && !defined(CONFIG_CACULE_RDB)
++#ifdef CONFIG_CACULE_SCHED
+ printk(KERN_INFO "CacULE CPU scheduler v5.11 by Hamad Al Marri.");
+#endif
+
-+#ifdef CONFIG_CACULE_RDB
-+ printk(KERN_INFO "CacULE CPU scheduler (RDB) v5.11 by Hamad Al Marri.");
-+#endif
-+
wait_bit_init();
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -352,7 +315,7 @@ index 2357921580f9..fb4ef69724c3 100644
nr_switches = p->nvcsw + p->nivcsw;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index bbc78794224a..c99fc326ec24 100644
+index bbc78794224a..7e9ce056efd7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,10 @@
@@ -366,36 +329,19 @@ index bbc78794224a..c99fc326ec24 100644
*/
#include "sched.h"
-@@ -82,7 +86,15 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
- unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
- static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
-
--const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
-+#ifdef CONFIG_CACULE_RDB
-+#ifdef CONFIG_SCHED_DEBUG
-+const_debug unsigned int sysctl_sched_migration_cost = 750000UL;
-+#else
-+unsigned int sysctl_sched_migration_cost = 750000UL;
-+#endif
-+#else
-+const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
-+#endif
-
- int sched_thermal_decay_shift;
- static int __init setup_sched_thermal_decay_shift(char *str)
-@@ -113,6 +125,11 @@ int __weak arch_asym_cpu_priority(int cpu)
+@@ -113,6 +117,11 @@ int __weak arch_asym_cpu_priority(int cpu)
*/
#define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
+#endif
+#ifdef CONFIG_CACULE_SCHED
-+int cacule_max_lifetime = 30000; // in ms
-+int cacule_harsh_mode = 0;
-+int interactivity_factor = 32768;
++int __read_mostly cacule_max_lifetime = 22000; // in ms
++int __read_mostly interactivity_factor = 32768;
++unsigned int __read_mostly interactivity_threshold = 20480;
#endif
#ifdef CONFIG_CFS_BANDWIDTH
-@@ -253,6 +270,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
+@@ -253,6 +262,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
const struct sched_class fair_sched_class;
@@ -410,7 +356,7 @@ index bbc78794224a..c99fc326ec24 100644
/**************************************************************
* CFS operations on generic schedulable entities:
*/
-@@ -512,7 +537,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
+@@ -512,7 +529,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
/**************************************************************
* Scheduling class tree data structure manipulation methods:
*/
@@ -419,14 +365,13 @@ index bbc78794224a..c99fc326ec24 100644
static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - max_vruntime);
-@@ -568,7 +593,166 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
+@@ -568,7 +585,169 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
}
+#endif /* CONFIG_CACULE_SCHED */
+
+#ifdef CONFIG_CACULE_SCHED
-+
+static unsigned int
+calc_interactivity(u64 now, struct cacule_node *se)
+{
@@ -453,6 +398,14 @@ index bbc78794224a..c99fc326ec24 100644
+ return score_se;
+}
+
++static inline int is_interactive(struct cacule_node *cn)
++{
++ if (cn->vruntime == 0)
++ return 0;
++
++ return calc_interactivity(sched_clock(), cn) < interactivity_threshold;
++}
++
+static inline int
+entity_before_cached(u64 now, unsigned int score_curr, struct cacule_node *se)
+{
@@ -554,10 +507,6 @@ index bbc78794224a..c99fc326ec24 100644
+ cfs_rq->head = NULL;
+ cfs_rq->tail = NULL;
+
-+#ifdef CONFIG_CACULE_RDB
-+ WRITE_ONCE(cfs_rq->IS_head, 0);
-+#endif
-+
+ } else if (se == cfs_rq->head) {
+ // if it is the head
+ cfs_rq->head = cfs_rq->head->next;
@@ -586,7 +535,7 @@ index bbc78794224a..c99fc326ec24 100644
/*
* Enqueue an entity into the rb-tree:
*/
-@@ -626,16 +810,29 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
+@@ -626,16 +805,29 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
return rb_entry(next, struct sched_entity, run_node);
}
@@ -616,7 +565,7 @@ index bbc78794224a..c99fc326ec24 100644
}
/**************************************************************
-@@ -720,6 +917,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -720,6 +912,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
return slice;
}
@@ -624,7 +573,7 @@ index bbc78794224a..c99fc326ec24 100644
/*
* We calculate the vruntime slice of a to-be-inserted task.
*
-@@ -729,6 +927,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -729,6 +922,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return calc_delta_fair(sched_slice(cfs_rq, se), se);
}
@@ -632,12 +581,12 @@ index bbc78794224a..c99fc326ec24 100644
#include "pelt.h"
#ifdef CONFIG_SMP
-@@ -836,13 +1035,49 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
+@@ -836,13 +1030,49 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
}
#endif /* CONFIG_SMP */
+#ifdef CONFIG_CACULE_SCHED
-+static void reset_lifetime(u64 now, struct sched_entity *se)
++static void normalize_lifetime(u64 now, struct sched_entity *se)
+{
+ struct cacule_node *cn;
+ u64 max_life_ns, life_time;
@@ -683,14 +632,13 @@ index bbc78794224a..c99fc326ec24 100644
u64 delta_exec;
if (unlikely(!curr))
-@@ -860,13 +1095,23 @@ static void update_curr(struct cfs_rq *cfs_rq)
+@@ -860,13 +1090,22 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
-+
+#ifdef CONFIG_CACULE_SCHED
+ curr->cacule_node.vruntime += calc_delta_fair(delta_exec, curr);
-+ reset_lifetime(now, curr);
++ normalize_lifetime(now, curr);
+#else
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_min_vruntime(cfs_rq);
@@ -699,91 +647,23 @@ index bbc78794224a..c99fc326ec24 100644
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
-- trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
+#ifdef CONFIG_CACULE_SCHED
-+ trace_sched_stat_runtime(curtask, delta_exec, curr->cacule_node.vruntime);
++ trace_sched_stat_runtime(curtask, delta_exec, curr->cacule_node.vruntime);
+#else
-+ trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
+ trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
+#endif
cgroup_account_cputime(curtask, delta_exec);
account_group_exec_runtime(curtask, delta_exec);
}
-@@ -882,6 +1127,7 @@ static void update_curr_fair(struct rq *rq)
- static inline void
- update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-+#if !defined(CONFIG_CACULE_RDB)
- u64 wait_start, prev_wait_start;
-
- if (!schedstat_enabled())
-@@ -895,11 +1141,13 @@ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
- wait_start -= prev_wait_start;
-
- __schedstat_set(se->statistics.wait_start, wait_start);
-+#endif
- }
-
- static inline void
- update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-+#if !defined(CONFIG_CACULE_RDB)
- struct task_struct *p;
- u64 delta;
-
-@@ -936,11 +1184,13 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
- __schedstat_inc(se->statistics.wait_count);
- __schedstat_add(se->statistics.wait_sum, delta);
- __schedstat_set(se->statistics.wait_start, 0);
-+#endif
- }
-
- static inline void
- update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-+#if !defined(CONFIG_CACULE_RDB)
- struct task_struct *tsk = NULL;
- u64 sleep_start, block_start;
-
-@@ -1004,6 +1254,7 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
- account_scheduler_latency(tsk, delta >> 10, 0);
- }
- }
-+#endif
- }
-
- /*
-@@ -1012,6 +1263,7 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
- static inline void
- update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- {
-+#if !defined(CONFIG_CACULE_RDB)
- if (!schedstat_enabled())
- return;
-
-@@ -1024,12 +1276,13 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
-
- if (flags & ENQUEUE_WAKEUP)
- update_stats_enqueue_sleeper(cfs_rq, se);
-+#endif
- }
-
+@@ -1029,7 +1268,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
-
-+#if !defined(CONFIG_CACULE_RDB)
if (!schedstat_enabled())
return;
-@@ -1050,6 +1303,7 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- __schedstat_set(se->statistics.block_start,
- rq_clock(rq_of(cfs_rq)));
- }
-+#endif
- }
-
- /*
-@@ -1061,7 +1315,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -1061,7 +1299,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
/*
* We are starting a new run period:
*/
@@ -792,129 +672,7 @@ index bbc78794224a..c99fc326ec24 100644
}
/**************************************************
-@@ -3076,15 +3330,19 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
- static inline void
- enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-+#if !defined(CONFIG_CACULE_RDB)
- cfs_rq->avg.load_avg += se->avg.load_avg;
- cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
-+#endif
- }
-
- static inline void
- dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-+#if !defined(CONFIG_CACULE_RDB)
- sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
- sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
-+#endif
- }
- #else
- static inline void
-@@ -3339,6 +3597,7 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
- void set_task_rq_fair(struct sched_entity *se,
- struct cfs_rq *prev, struct cfs_rq *next)
- {
-+#if !defined(CONFIG_CACULE_RDB)
- u64 p_last_update_time;
- u64 n_last_update_time;
-
-@@ -3378,6 +3637,7 @@ void set_task_rq_fair(struct sched_entity *se,
- #endif
- __update_load_avg_blocked_se(p_last_update_time, se);
- se->avg.last_update_time = n_last_update_time;
-+#endif
- }
-
-
-@@ -3657,6 +3917,9 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum
- static inline int
- update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
- {
-+#ifdef CONFIG_CACULE_RDB
-+ return 0;
-+#else
- unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0;
- struct sched_avg *sa = &cfs_rq->avg;
- int decayed = 0;
-@@ -3702,8 +3965,10 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
- #endif
-
- return decayed;
-+#endif
- }
-
-+#if !defined(CONFIG_CACULE_RDB)
- /**
- * attach_entity_load_avg - attach this entity to its cfs_rq load avg
- * @cfs_rq: cfs_rq to attach to
-@@ -3781,6 +4046,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
-
- trace_pelt_cfs_tp(cfs_rq);
- }
-+#endif
-
- /*
- * Optional action to be done while updating the load average
-@@ -3792,6 +4058,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
- /* Update task and its cfs_rq load average */
- static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- {
-+#if !defined(CONFIG_CACULE_RDB)
- u64 now = cfs_rq_clock_pelt(cfs_rq);
- int decayed;
-
-@@ -3823,8 +4090,10 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
- if (flags & UPDATE_TG)
- update_tg_load_avg(cfs_rq);
- }
-+#endif
- }
-
-+#if !defined(CONFIG_CACULE_RDB)
- #ifndef CONFIG_64BIT
- static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
- {
-@@ -3845,6 +4114,7 @@ static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
- return cfs_rq->avg.last_update_time;
- }
- #endif
-+#endif
-
- /*
- * Synchronize entity load avg of dequeued entity without locking
-@@ -3852,11 +4122,13 @@ static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
- */
- static void sync_entity_load_avg(struct sched_entity *se)
- {
-+#if !defined(CONFIG_CACULE_RDB)
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- u64 last_update_time;
-
- last_update_time = cfs_rq_last_update_time(cfs_rq);
- __update_load_avg_blocked_se(last_update_time, se);
-+#endif
- }
-
- /*
-@@ -3865,6 +4137,7 @@ static void sync_entity_load_avg(struct sched_entity *se)
- */
- static void remove_entity_load_avg(struct sched_entity *se)
- {
-+#if !defined(CONFIG_CACULE_RDB)
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- unsigned long flags;
-
-@@ -3882,6 +4155,7 @@ static void remove_entity_load_avg(struct sched_entity *se)
- cfs_rq->removed.load_avg += se->avg.load_avg;
- cfs_rq->removed.runnable_avg += se->avg.runnable_avg;
- raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
-+#endif
- }
-
- static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq)
-@@ -4115,7 +4389,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
+@@ -4115,7 +4353,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
@@ -923,7 +681,7 @@ index bbc78794224a..c99fc326ec24 100644
s64 d = se->vruntime - cfs_rq->min_vruntime;
if (d < 0)
-@@ -4126,6 +4400,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4126,6 +4364,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}
@@ -931,31 +689,15 @@ index bbc78794224a..c99fc326ec24 100644
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
-@@ -4157,11 +4432,15 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+@@ -4157,6 +4396,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
/* ensure we never gain time by being placed backwards. */
se->vruntime = max_vruntime(se->vruntime, vruntime);
}
+#endif /* CONFIG_CACULE_SCHED */
-+#if !defined(CONFIG_CACULE_RDB)
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
-+#endif
- static inline void check_schedstat_required(void)
- {
-+#if !defined(CONFIG_CACULE_RDB)
- #ifdef CONFIG_SCHEDSTATS
- if (schedstat_enabled())
- return;
-@@ -4178,6 +4457,7 @@ static inline void check_schedstat_required(void)
- "kernel.sched_schedstats=1\n");
- }
- #endif
-+#endif
- }
-
- static inline bool cfs_bandwidth_used(void);
-@@ -4215,18 +4495,23 @@ static inline bool cfs_bandwidth_used(void);
+@@ -4215,18 +4455,23 @@ static inline bool cfs_bandwidth_used(void);
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
@@ -979,7 +721,7 @@ index bbc78794224a..c99fc326ec24 100644
/*
* Otherwise, renormalise after, such that we're placed at the current
* moment in time, instead of some random moment in the past. Being
-@@ -4235,6 +4520,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4235,6 +4480,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (renorm && !curr)
se->vruntime += cfs_rq->min_vruntime;
@@ -987,7 +729,7 @@ index bbc78794224a..c99fc326ec24 100644
/*
* When enqueuing a sched_entity, we must:
-@@ -4249,8 +4535,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4249,8 +4495,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_cfs_group(se);
account_entity_enqueue(cfs_rq, se);
@@ -998,34 +740,23 @@ index bbc78794224a..c99fc326ec24 100644
check_schedstat_required();
update_stats_enqueue(cfs_rq, se, flags);
-@@ -4259,6 +4547,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- __enqueue_entity(cfs_rq, se);
- se->on_rq = 1;
-
-+#if !defined(CONFIG_CACULE_RDB)
- /*
- * When bandwidth control is enabled, cfs might have been removed
- * because of a parent been throttled but cfs->nr_running > 1. Try to
-@@ -4269,8 +4558,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
-
- if (cfs_rq->nr_running == 1)
+@@ -4271,6 +4519,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
check_enqueue_throttle(cfs_rq);
-+#endif
}
+#if !defined(CONFIG_CACULE_SCHED)
static void __clear_buddies_last(struct sched_entity *se)
{
for_each_sched_entity(se) {
-@@ -4315,6 +4606,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4315,6 +4564,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (cfs_rq->skip == se)
__clear_buddies_skip(se);
}
-+#endif // !CONFIG_CACULE_SCHED
++#endif /* !CONFIG_CACULE_SCHED */
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-@@ -4339,13 +4631,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4339,13 +4589,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_stats_dequeue(cfs_rq, se, flags);
@@ -1042,7 +773,7 @@ index bbc78794224a..c99fc326ec24 100644
/*
* Normalize after update_curr(); which will also have moved
* min_vruntime if @se is the one holding it back. But before doing
-@@ -4354,12 +4649,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4354,12 +4607,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if (!(flags & DEQUEUE_SLEEP))
se->vruntime -= cfs_rq->min_vruntime;
@@ -1057,7 +788,7 @@ index bbc78794224a..c99fc326ec24 100644
/*
* Now advance min_vruntime if @se was the entity holding it back,
* except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
-@@ -4368,8 +4665,23 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -4368,8 +4623,23 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
@@ -1081,7 +812,7 @@ index bbc78794224a..c99fc326ec24 100644
/*
* Preempt the current task with a newly woken task if needed:
*/
-@@ -4409,6 +4721,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4409,6 +4679,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
if (delta > ideal_runtime)
resched_curr(rq_of(cfs_rq));
}
@@ -1089,7 +820,7 @@ index bbc78794224a..c99fc326ec24 100644
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
-@@ -4443,6 +4756,21 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4443,6 +4714,21 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
@@ -1111,7 +842,7 @@ index bbc78794224a..c99fc326ec24 100644
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
-@@ -4503,6 +4831,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4503,6 +4789,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
return se;
}
@@ -1119,97 +850,7 @@ index bbc78794224a..c99fc326ec24 100644
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-@@ -4762,6 +5091,9 @@ static int tg_throttle_down(struct task_group *tg, void *data)
-
- static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
- {
-+#ifdef CONFIG_CACULE_RDB
-+ return false;
-+#else
- struct rq *rq = rq_of(cfs_rq);
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
- struct sched_entity *se;
-@@ -4839,10 +5171,12 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
- cfs_rq->throttled = 1;
- cfs_rq->throttled_clock = rq_clock(rq);
- return true;
-+#endif
- }
-
- void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
- {
-+#if !defined(CONFIG_CACULE_RDB)
- struct rq *rq = rq_of(cfs_rq);
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
- struct sched_entity *se;
-@@ -4924,6 +5258,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
- /* Determine whether we need to wake up potentially idle CPU: */
- if (rq->curr == rq->idle && rq->cfs.nr_running)
- resched_curr(rq);
-+#endif
- }
-
- static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
-@@ -5376,7 +5711,11 @@ static inline bool cfs_bandwidth_used(void)
-
- static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
- static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
-+
-+#if !defined(CONFIG_CACULE_RDB)
- static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
-+#endif
-+
- static inline void sync_throttle(struct task_group *tg, int cpu) {}
- static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
-
-@@ -5507,7 +5846,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- {
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
-+#if !defined(CONFIG_CACULE_RDB)
- int idle_h_nr_running = task_has_idle_policy(p);
-+#endif
- int task_new = !(flags & ENQUEUE_WAKEUP);
-
- /*
-@@ -5526,6 +5867,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- if (p->in_iowait)
- cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
-
-+#ifdef CONFIG_CACULE_RDB
-+ if (!se->on_rq) {
-+ cfs_rq = cfs_rq_of(se);
-+ enqueue_entity(cfs_rq, se, flags);
-+ cfs_rq->h_nr_running++;
-+ }
-+#else
- for_each_sched_entity(se) {
- if (se->on_rq)
- break;
-@@ -5563,6 +5911,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- if (throttled_hierarchy(cfs_rq))
- list_add_leaf_cfs_rq(cfs_rq);
- }
-+#endif
-
- /* At this point se is NULL and we are at root level*/
- add_nr_running(rq, 1);
-@@ -5584,6 +5933,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- if (!task_new)
- update_overutilized_status(rq);
-
-+#if !defined(CONFIG_CACULE_RDB)
- enqueue_throttle:
- if (cfs_bandwidth_used()) {
- /*
-@@ -5599,13 +5949,16 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- break;
- }
- }
-+#endif
-
- assert_list_leaf_cfs_rq(rq);
-
+@@ -5605,7 +5892,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
hrtick_update(rq);
}
@@ -1219,20 +860,7 @@ index bbc78794224a..c99fc326ec24 100644
/*
* The dequeue_task method is called before nr_running is
-@@ -5617,6 +5970,12 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
- int task_sleep = flags & DEQUEUE_SLEEP;
-+
-+#ifdef CONFIG_CACULE_RDB
-+ cfs_rq = cfs_rq_of(se);
-+ dequeue_entity(cfs_rq, se, flags);
-+ cfs_rq->h_nr_running--;
-+#else
- int idle_h_nr_running = task_has_idle_policy(p);
- bool was_sched_idle = sched_idle_rq(rq);
-
-@@ -5637,12 +5996,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+@@ -5637,12 +5926,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq->load.weight) {
/* Avoid re-evaluating load for this entity: */
se = parent_entity(se);
@@ -1247,26 +875,7 @@ index bbc78794224a..c99fc326ec24 100644
break;
}
flags |= DEQUEUE_SLEEP;
-@@ -5663,15 +6024,18 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- goto dequeue_throttle;
-
- }
-+#endif
-
- /* At this point se is NULL and we are at root level*/
- sub_nr_running(rq, 1);
-
-+#if !defined(CONFIG_CACULE_RDB)
- /* balance early to pull high priority tasks */
- if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
- rq->next_balance = jiffies;
-
- dequeue_throttle:
-+#endif
- util_est_update(&rq->cfs, p, task_sleep);
- hrtick_update(rq);
- }
-@@ -5758,6 +6122,7 @@ static unsigned long capacity_of(int cpu)
+@@ -5758,6 +6049,7 @@ static unsigned long capacity_of(int cpu)
return cpu_rq(cpu)->cpu_capacity;
}
@@ -1274,25 +883,15 @@ index bbc78794224a..c99fc326ec24 100644
static void record_wakee(struct task_struct *p)
{
/*
-@@ -5804,7 +6169,9 @@ static int wake_wide(struct task_struct *p)
+@@ -5804,6 +6096,7 @@ static int wake_wide(struct task_struct *p)
return 0;
return 1;
}
+#endif
-+#if !defined(CONFIG_CACULE_RDB)
/*
* The purpose of wake_affine() is to quickly determine on which CPU we can run
- * soonest. For the purpose of speed we only consider the waking and previous
-@@ -5906,6 +6273,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
- schedstat_inc(p->se.statistics.nr_wakeups_affine);
- return target;
- }
-+#endif
-
- static struct sched_group *
- find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
-@@ -6484,6 +6852,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
+@@ -6484,6 +6777,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
return min_t(unsigned long, util, capacity_orig_of(cpu));
}
@@ -1300,54 +899,99 @@ index bbc78794224a..c99fc326ec24 100644
/*
* Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
* to @dst_cpu.
-@@ -6717,6 +7086,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+@@ -6717,6 +7011,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
return -1;
}
+#endif /* CONFIG_CACULE_SCHED */
++
++#ifdef CONFIG_CACULE_SCHED
++static int
++find_least_IS_cpu(struct task_struct *p)
++{
++ struct cfs_rq *cfs_rq;
++ unsigned int max_IS = 0;
++ unsigned int IS, IS_c, IS_h;
++ struct sched_entity *curr_se;
++ struct cacule_node *cn, *head;
++ int cpu_i;
++ int new_cpu = -1;
++
++ for_each_online_cpu(cpu_i) {
++ if (!cpumask_test_cpu(cpu_i, p->cpus_ptr))
++ continue;
++
++ cn = NULL;
++ cfs_rq = &cpu_rq(cpu_i)->cfs;
++
++ curr_se = cfs_rq->curr;
++ head = cfs_rq->head;
++
++ if (!curr_se && head)
++ cn = head;
++ else if (curr_se && !head)
++ cn = &curr_se->cacule_node;
++ else if (curr_se && head) {
++ IS_c = calc_interactivity(sched_clock(), &curr_se->cacule_node);
++ IS_h = calc_interactivity(sched_clock(), head);
++
++ IS = IS_c > IS_h? IS_c : IS_h;
++ goto compare;
++ }
++
++ if (!cn)
++ return cpu_i;
++
++ IS = calc_interactivity(sched_clock(), cn);
++
++compare:
++ if (IS > max_IS) {
++ max_IS = IS;
++ new_cpu = cpu_i;
++ }
++ }
++
++ return new_cpu;
++}
++#endif
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
-@@ -6733,6 +7103,9 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
- static int
- select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
- {
-+#ifdef CONFIG_CACULE_RDB
-+ return select_idle_sibling(p, prev_cpu, prev_cpu);
-+#else
- int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
- struct sched_domain *tmp, *sd = NULL;
- int cpu = smp_processor_id();
-@@ -6741,6 +7114,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
+@@ -6741,6 +7086,25 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
/* SD_flags and WF_flags share the first nibble */
int sd_flag = wake_flags & 0xF;
-+#if !defined(CONFIG_CACULE_SCHED)
++#ifdef CONFIG_CACULE_SCHED
++ struct sched_entity *se = &p->se;
++ unsigned int autogroup_enabled = 0;
++
++#ifdef CONFIG_SCHED_AUTOGROUP
++ autogroup_enabled = sysctl_sched_autogroup_enabled;
++#endif
++
++ if (autogroup_enabled || !is_interactive(&se->cacule_node))
++ goto cfs_way;
++
++ new_cpu = find_least_IS_cpu(p);
++
++ if (likely(new_cpu != -1))
++ return new_cpu;
++
++ new_cpu = prev_cpu;
++cfs_way:
++#else
if (wake_flags & WF_TTWU) {
record_wakee(p);
-@@ -6753,6 +7127,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
+@@ -6753,6 +7117,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
}
-+#endif
++#endif /* CONFIG_CACULE_SCHED */
rcu_read_lock();
for_each_domain(cpu, tmp) {
-@@ -6788,9 +7163,12 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
- rcu_read_unlock();
-
- return new_cpu;
-+#endif
- }
-
-+#if !defined(CONFIG_CACULE_RDB)
- static void detach_entity_cfs_rq(struct sched_entity *se);
-+#endif
-
- /*
- * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
-@@ -6799,6 +7177,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
+@@ -6799,6 +7164,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
*/
static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
{
@@ -1355,25 +999,15 @@ index bbc78794224a..c99fc326ec24 100644
/*
* As blocked tasks retain absolute vruntime the migration needs to
* deal with this by subtracting the old and adding the new
-@@ -6824,7 +7203,9 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
+@@ -6824,6 +7190,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
se->vruntime -= min_vruntime;
}
+#endif /* CONFIG_CACULE_SCHED */
-+#if !defined(CONFIG_CACULE_RDB)
if (p->on_rq == TASK_ON_RQ_MIGRATING) {
/*
- * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
-@@ -6844,6 +7225,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
- */
- remove_entity_load_avg(&p->se);
- }
-+#endif
-
- /* Tell new CPU we are migrated */
- p->se.avg.last_update_time = 0;
-@@ -6869,6 +7251,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+@@ -6869,6 +7236,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
#endif /* CONFIG_SMP */
@@ -1381,7 +1015,7 @@ index bbc78794224a..c99fc326ec24 100644
static unsigned long wakeup_gran(struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;
-@@ -6947,6 +7330,7 @@ static void set_skip_buddy(struct sched_entity *se)
+@@ -6947,6 +7315,7 @@ static void set_skip_buddy(struct sched_entity *se)
for_each_sched_entity(se)
cfs_rq_of(se)->skip = se;
}
@@ -1389,7 +1023,7 @@ index bbc78794224a..c99fc326ec24 100644
/*
* Preempt the current task with a newly woken task if needed:
-@@ -6955,9 +7339,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6955,9 +7324,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
@@ -1402,7 +1036,7 @@ index bbc78794224a..c99fc326ec24 100644
if (unlikely(se == pse))
return;
-@@ -6971,10 +7358,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -6971,10 +7343,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
return;
@@ -1415,7 +1049,7 @@ index bbc78794224a..c99fc326ec24 100644
/*
* We can come here with TIF_NEED_RESCHED already set from new task
-@@ -7004,6 +7393,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7004,6 +7378,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
find_matching_se(&se, &pse);
update_curr(cfs_rq_of(se));
BUG_ON(!pse);
@@ -1427,7 +1061,7 @@ index bbc78794224a..c99fc326ec24 100644
if (wakeup_preempt_entity(se, pse) == 1) {
/*
* Bias pick_next to pick the sched entity that is
-@@ -7013,11 +7407,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7013,11 +7392,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
set_next_buddy(pse);
goto preempt;
}
@@ -1442,7 +1076,7 @@ index bbc78794224a..c99fc326ec24 100644
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -7032,6 +7429,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7032,6 +7414,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
set_last_buddy(se);
@@ -1450,42 +1084,7 @@ index bbc78794224a..c99fc326ec24 100644
}
struct task_struct *
-@@ -7127,11 +7525,23 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
- if (prev)
- put_prev_task(rq, prev);
-
-+#ifdef CONFIG_CACULE_RDB
-+ se = pick_next_entity(cfs_rq, NULL);
-+ set_next_entity(cfs_rq, se);
-+
-+ if (cfs_rq->head) {
-+ unsigned int IS_head = calc_interactivity(sched_clock(), cfs_rq->head);
-+ WRITE_ONCE(cfs_rq->IS_head, IS_head);
-+ } else {
-+ WRITE_ONCE(cfs_rq->IS_head, 0);
-+ }
-+#else
- do {
- se = pick_next_entity(cfs_rq, NULL);
- set_next_entity(cfs_rq, se);
- cfs_rq = group_cfs_rq(se);
- } while (cfs_rq);
-+#endif
-
- p = task_of(se);
-
-@@ -7153,6 +7563,10 @@ done: __maybe_unused;
- return p;
-
- idle:
-+#ifdef CONFIG_CACULE_RDB
-+ WRITE_ONCE(cfs_rq->IS_head, 0);
-+#endif
-+
- if (!rf)
- return NULL;
-
-@@ -7206,7 +7620,10 @@ static void yield_task_fair(struct rq *rq)
+@@ -7206,7 +7589,10 @@ static void yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
@@ -1496,7 +1095,7 @@ index bbc78794224a..c99fc326ec24 100644
/*
* Are we the only task in the tree?
-@@ -7214,7 +7631,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7214,7 +7600,9 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(rq->nr_running == 1))
return;
@@ -1506,7 +1105,7 @@ index bbc78794224a..c99fc326ec24 100644
if (curr->policy != SCHED_BATCH) {
update_rq_clock(rq);
-@@ -7230,7 +7649,9 @@ static void yield_task_fair(struct rq *rq)
+@@ -7230,7 +7618,9 @@ static void yield_task_fair(struct rq *rq)
rq_clock_skip_update(rq);
}
@@ -1516,7 +1115,7 @@ index bbc78794224a..c99fc326ec24 100644
}
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
-@@ -7241,8 +7662,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
+@@ -7241,8 +7631,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
return false;
@@ -1527,42 +1126,7 @@ index bbc78794224a..c99fc326ec24 100644
yield_task_fair(rq);
-@@ -7451,6 +7874,34 @@ struct lb_env {
- struct list_head tasks;
- };
-
-+#ifdef CONFIG_CACULE_RDB
-+static int task_hot(struct rq *src_rq)
-+{
-+ s64 delta;
-+ struct task_struct *p;
-+ struct cacule_node *cn = src_rq->cfs.head;
-+
-+ if (!cn)
-+ return 0;
-+
-+ p = task_of(se_of(cn));
-+
-+ if (p->sched_class != &fair_sched_class)
-+ return 0;
-+
-+ if (unlikely(task_has_idle_policy(p)))
-+ return 0;
-+
-+ if (sysctl_sched_migration_cost == -1)
-+ return 1;
-+ if (sysctl_sched_migration_cost == 0)
-+ return 0;
-+
-+ delta = sched_clock() - p->se.exec_start;
-+
-+ return delta < (s64)sysctl_sched_migration_cost;
-+}
-+#else
- /*
- * Is this task likely cache-hot:
- */
-@@ -7470,6 +7921,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+@@ -7470,6 +7862,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
if (env->sd->flags & SD_SHARE_CPUCAPACITY)
return 0;
@@ -1570,7 +1134,7 @@ index bbc78794224a..c99fc326ec24 100644
/*
* Buddy candidates are cache hot:
*/
-@@ -7477,6 +7929,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+@@ -7477,6 +7870,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
(&p->se == cfs_rq_of(&p->se)->next ||
&p->se == cfs_rq_of(&p->se)->last))
return 1;
@@ -1578,545 +1142,7 @@ index bbc78794224a..c99fc326ec24 100644
if (sysctl_sched_migration_cost == -1)
return 1;
-@@ -7851,6 +8304,7 @@ static void attach_tasks(struct lb_env *env)
-
- rq_unlock(env->dst_rq, &rf);
- }
-+#endif
-
- #ifdef CONFIG_NO_HZ_COMMON
- static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
-@@ -7896,6 +8350,7 @@ static inline bool others_have_blocked(struct rq *rq) { return false; }
- static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
- #endif
-
-+#if !defined(CONFIG_CACULE_RDB)
- static bool __update_blocked_others(struct rq *rq, bool *done)
- {
- const struct sched_class *curr_class;
-@@ -7921,6 +8376,7 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
-
- return decayed;
- }
-+#endif
-
- #ifdef CONFIG_FAIR_GROUP_SCHED
-
-@@ -8028,6 +8484,7 @@ static unsigned long task_h_load(struct task_struct *p)
- cfs_rq_load_avg(cfs_rq) + 1);
- }
- #else
-+#if !defined(CONFIG_CACULE_RDB)
- static bool __update_blocked_fair(struct rq *rq, bool *done)
- {
- struct cfs_rq *cfs_rq = &rq->cfs;
-@@ -8039,6 +8496,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
-
- return decayed;
- }
-+#endif
-
- static unsigned long task_h_load(struct task_struct *p)
- {
-@@ -8048,6 +8506,7 @@ static unsigned long task_h_load(struct task_struct *p)
-
- static void update_blocked_averages(int cpu)
- {
-+#if !defined(CONFIG_CACULE_RDB)
- bool decayed = false, done = true;
- struct rq *rq = cpu_rq(cpu);
- struct rq_flags rf;
-@@ -8062,6 +8521,7 @@ static void update_blocked_averages(int cpu)
- if (decayed)
- cpufreq_update_util(rq, 0);
- rq_unlock_irqrestore(rq, &rf);
-+#endif
- }
-
- /********** Helpers for find_busiest_group ************************/
-@@ -9224,6 +9684,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
- * different in groups.
- */
-
-+#if !defined(CONFIG_CACULE_RDB)
- /**
- * find_busiest_group - Returns the busiest group within the sched_domain
- * if there is an imbalance.
-@@ -9489,6 +9950,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
-
- return busiest;
- }
-+#endif
-
- /*
- * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
-@@ -9535,6 +9997,7 @@ voluntary_active_balance(struct lb_env *env)
- return 0;
- }
-
-+#if !defined(CONFIG_CACULE_RDB)
- static int need_active_balance(struct lb_env *env)
- {
- struct sched_domain *sd = env->sd;
-@@ -9856,6 +10319,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
- out:
- return ld_moved;
- }
-+#endif
-
- static inline unsigned long
- get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
-@@ -9894,6 +10358,7 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
- *next_balance = next;
- }
-
-+#if !defined(CONFIG_CACULE_RDB)
- /*
- * active_load_balance_cpu_stop is run by the CPU stopper. It pushes
- * running tasks off the busiest CPU onto idle CPUs. It requires at
-@@ -9985,6 +10450,7 @@ static int active_load_balance_cpu_stop(void *data)
- }
-
- static DEFINE_SPINLOCK(balancing);
-+#endif
-
- /*
- * Scale the max load_balance interval with the number of CPUs in the system.
-@@ -9995,6 +10461,7 @@ void update_max_interval(void)
- max_load_balance_interval = HZ*num_online_cpus()/10;
- }
-
-+#if !defined(CONFIG_CACULE_RDB)
- /*
- * It checks each scheduling domain to see if it is due to be balanced,
- * and initiates a balancing operation if so.
-@@ -10100,6 +10567,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
- #endif
- }
- }
-+#endif
-
- static inline int on_null_domain(struct rq *rq)
- {
-@@ -10133,6 +10601,7 @@ static inline int find_new_ilb(void)
- return nr_cpu_ids;
- }
-
-+#if !defined(CONFIG_CACULE_RDB)
- /*
- * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
- * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
-@@ -10283,6 +10752,7 @@ static void nohz_balancer_kick(struct rq *rq)
- if (flags)
- kick_ilb(flags);
- }
-+#endif
-
- static void set_cpu_sd_state_busy(int cpu)
- {
-@@ -10390,6 +10860,7 @@ void nohz_balance_enter_idle(int cpu)
- WRITE_ONCE(nohz.has_blocked, 1);
- }
-
-+#if !defined(CONFIG_CACULE_RDB)
- /*
- * Internal function that runs load balance for all idle cpus. The load balance
- * can be a simple update of blocked load or a complete load balance with
-@@ -10550,8 +11021,10 @@ static void nohz_newidle_balance(struct rq *this_rq)
- kick_ilb(NOHZ_STATS_KICK);
- raw_spin_lock(&this_rq->lock);
- }
-+#endif
-
- #else /* !CONFIG_NO_HZ_COMMON */
-+#if !defined(CONFIG_CACULE_RDB)
- static inline void nohz_balancer_kick(struct rq *rq) { }
-
- static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
-@@ -10560,8 +11033,108 @@ static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle
- }
-
- static inline void nohz_newidle_balance(struct rq *this_rq) { }
-+#endif
-+
- #endif /* CONFIG_NO_HZ_COMMON */
-
-+#ifdef CONFIG_CACULE_RDB
-+static int
-+can_migrate_task(struct task_struct *p, int dst_cpu, struct rq *src_rq)
-+{
-+ if (task_running(src_rq, p))
-+ return 0;
-+
-+ if (!cpumask_test_cpu(dst_cpu, p->cpus_ptr))
-+ return 0;
-+
-+ if (p->se.exec_start == 0)
-+ return 0;
-+
-+ return 1;
-+}
-+
-+static void pull_from_unlock(struct rq *this_rq,
-+ struct rq *src_rq,
-+ struct rq_flags *rf,
-+ struct task_struct *p,
-+ int dst_cpu)
-+{
-+ // detach task
-+ deactivate_task(src_rq, p, DEQUEUE_NOCLOCK);
-+ set_task_cpu(p, dst_cpu);
-+
-+ // unlock src rq
-+ rq_unlock(src_rq, rf);
-+ local_irq_restore(rf->flags);
-+
-+ // lock this rq
-+ raw_spin_lock(&this_rq->lock);
-+ update_rq_clock(this_rq);
-+
-+ activate_task(this_rq, p, ENQUEUE_NOCLOCK);
-+ check_preempt_curr(this_rq, p, 0);
-+
-+ // unlock this rq
-+ raw_spin_unlock(&this_rq->lock);
-+}
-+
-+static inline struct rq *
-+find_max_IS_rq(struct cfs_rq *cfs_rq, int dst_cpu)
-+{
-+ struct rq *tmp_rq, *max_rq = NULL;
-+ int cpu;
-+ u32 max_IS = cfs_rq->IS_head;
-+ u32 local_IS;
-+
-+ // find max hrrn
-+ for_each_online_cpu(cpu) {
-+ if (cpu == dst_cpu)
-+ continue;
-+
-+ tmp_rq = cpu_rq(cpu);
-+
-+ if (tmp_rq->cfs.nr_running < 2 || !tmp_rq->cfs.head)
-+ continue;
-+
-+ /* check if cache hot */
-+ if (!cpus_share_cache(cpu, dst_cpu) && task_hot(tmp_rq))
-+ continue;
-+
-+ local_IS = READ_ONCE(tmp_rq->cfs.IS_head);
-+
-+ if (local_IS > max_IS) {
-+ max_IS = local_IS;
-+ max_rq = tmp_rq;
-+ }
-+ }
-+
-+ return max_rq;
-+}
-+
-+static int try_pull_from(struct rq *src_rq, struct rq *this_rq)
-+{
-+ struct rq_flags rf;
-+ int dst_cpu = cpu_of(this_rq);
-+ struct task_struct *p;
-+
-+ rq_lock_irqsave(src_rq, &rf);
-+ update_rq_clock(src_rq);
-+
-+ if (src_rq->cfs.head && src_rq->cfs.nr_running > 1) {
-+ p = task_of(se_of(src_rq->cfs.head));
-+
-+ if (can_migrate_task(p, dst_cpu, src_rq)) {
-+ pull_from_unlock(this_rq, src_rq, &rf, p, dst_cpu);
-+ return 1;
-+ }
-+ }
-+
-+ rq_unlock(src_rq, &rf);
-+ local_irq_restore(rf.flags);
-+
-+ return 0;
-+}
-+
- /*
- * newidle_balance is called by schedule() if this_cpu is about to become
- * idle. Attempts to pull tasks from other CPUs.
-@@ -10572,6 +11145,105 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { }
- * > 0 - success, new (fair) tasks present
- */
- static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
-+{
-+ int this_cpu = this_rq->cpu;
-+ struct task_struct *p = NULL;
-+ struct rq *src_rq;
-+ int src_cpu;
-+ struct rq_flags src_rf;
-+ int pulled_task = 0;
-+ int cores_round = 1;
-+
-+ update_misfit_status(NULL, this_rq);
-+ /*
-+ * We must set idle_stamp _before_ calling idle_balance(), such that we
-+ * measure the duration of idle_balance() as idle time.
-+ */
-+ this_rq->idle_stamp = rq_clock(this_rq);
-+
-+ /*
-+ * Do not pull tasks towards !active CPUs...
-+ */
-+ if (!cpu_active(this_cpu))
-+ return 0;
-+
-+ /*
-+ * This is OK, because current is on_cpu, which avoids it being picked
-+ * for load-balance and preemption/IRQs are still disabled avoiding
-+ * further scheduler activity on it and we're being very careful to
-+ * re-start the picking loop.
-+ */
-+ rq_unpin_lock(this_rq, rf);
-+ raw_spin_unlock(&this_rq->lock);
-+
-+again:
-+ for_each_online_cpu(src_cpu) {
-+
-+ if (src_cpu == this_cpu)
-+ continue;
-+
-+ if (cores_round && !cpus_share_cache(src_cpu, this_cpu))
-+ continue;
-+
-+ src_rq = cpu_rq(src_cpu);
-+
-+ rq_lock_irqsave(src_rq, &src_rf);
-+ update_rq_clock(src_rq);
-+
-+ if (src_rq->cfs.nr_running < 2 || !(src_rq->cfs.head))
-+ goto next;
-+
-+ p = task_of(se_of(src_rq->cfs.head));
-+
-+ if (can_migrate_task(p, this_cpu, src_rq)) {
-+ pull_from_unlock(this_rq, src_rq, &src_rf, p, this_cpu);
-+
-+ pulled_task = 1;
-+ goto out;
-+ }
-+
-+next:
-+ rq_unlock(src_rq, &src_rf);
-+ local_irq_restore(src_rf.flags);
-+
-+ /*
-+ * Stop searching for tasks to pull if there are
-+ * now runnable tasks on this rq.
-+ */
-+ if (pulled_task || this_rq->nr_running > 0)
-+ goto out;
-+ }
-+
-+ if (cores_round) {
-+ // now search for all cpus
-+ cores_round = 0;
-+ goto again;
-+ }
-+
-+out:
-+ raw_spin_lock(&this_rq->lock);
-+
-+ /*
-+ * While browsing the domains, we released the rq lock, a task could
-+ * have been enqueued in the meantime. Since we're not going idle,
-+ * pretend we pulled a task.
-+ */
-+ if (this_rq->cfs.h_nr_running && !pulled_task)
-+ pulled_task = 1;
-+
-+ /* Is there a task of a high priority class? */
-+ if (this_rq->nr_running != this_rq->cfs.h_nr_running)
-+ pulled_task = -1;
-+
-+ if (pulled_task)
-+ this_rq->idle_stamp = 0;
-+
-+ rq_repin_lock(this_rq, rf);
-+
-+ return pulled_task;
-+}
-+#else
-+static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
- {
- unsigned long next_balance = jiffies + HZ;
- int this_cpu = this_rq->cpu;
-@@ -10722,6 +11394,167 @@ void trigger_load_balance(struct rq *rq)
-
- nohz_balancer_kick(rq);
- }
-+#endif
-+
-+#ifdef CONFIG_CACULE_RDB
-+static int
-+idle_try_pull_any(struct cfs_rq *cfs_rq)
-+{
-+ struct task_struct *p = NULL;
-+ struct rq *this_rq = rq_of(cfs_rq), *src_rq;
-+ int dst_cpu = cpu_of(this_rq);
-+ int src_cpu;
-+ struct rq_flags rf;
-+ int pulled = 0;
-+ int cores_round = 1;
-+
-+again:
-+ for_each_online_cpu(src_cpu) {
-+
-+ if (src_cpu == dst_cpu)
-+ continue;
-+
-+ if (cores_round && !cpus_share_cache(src_cpu, dst_cpu))
-+ continue;
-+
-+ src_rq = cpu_rq(src_cpu);
-+
-+ rq_lock_irqsave(src_rq, &rf);
-+ update_rq_clock(src_rq);
-+
-+ if (src_rq->cfs.nr_running < 2 || !(src_rq->cfs.head))
-+ goto next;
-+
-+ p = task_of(se_of(src_rq->cfs.head));
-+
-+ if (can_migrate_task(p, dst_cpu, src_rq)) {
-+ pull_from_unlock(this_rq, src_rq, &rf, p, dst_cpu);
-+ pulled = 1;
-+ goto out;
-+ }
-+
-+next:
-+ rq_unlock(src_rq, &rf);
-+ local_irq_restore(rf.flags);
-+ }
-+
-+ if (cores_round) {
-+ // now search for all cpus
-+ cores_round = 0;
-+ goto again;
-+ }
-+
-+out:
-+ return pulled;
-+}
-+
-+
-+static int
-+try_pull_higher_IS(struct cfs_rq *cfs_rq)
-+{
-+ struct rq *this_rq = rq_of(cfs_rq), *max_rq;
-+ int dst_cpu = cpu_of(this_rq);
-+
-+ max_rq = find_max_IS_rq(cfs_rq, dst_cpu);
-+
-+ if (!max_rq)
-+ return 0;
-+
-+ if (try_pull_from(max_rq, this_rq))
-+ return 1;
-+
-+ return 0;
-+}
-+
-+static void try_pull_any(struct rq *this_rq)
-+{
-+ struct task_struct *p = NULL;
-+ struct rq *src_rq;
-+ int dst_cpu = cpu_of(this_rq);
-+ int src_cpu;
-+ struct rq_flags src_rf;
-+ int cores_round = 1;
-+
-+again:
-+ for_each_online_cpu(src_cpu) {
-+
-+ if (src_cpu == dst_cpu)
-+ continue;
-+
-+ src_rq = cpu_rq(src_cpu);
-+
-+ if (cores_round) {
-+ if (!cpus_share_cache(src_cpu, dst_cpu))
-+ continue;
-+ } else if (!cpus_share_cache(src_cpu, dst_cpu) && task_hot(src_rq)) {
-+ /* check if cache hot */
-+ continue;
-+ }
-+
-+ if (src_rq->cfs.nr_running < 2 || !(src_rq->cfs.head)
-+ || src_rq->cfs.nr_running <= this_rq->cfs.nr_running)
-+ continue;
-+
-+ rq_lock_irqsave(src_rq, &src_rf);
-+ update_rq_clock(src_rq);
-+
-+ if (src_rq->cfs.nr_running < 2 || !(src_rq->cfs.head)
-+ || src_rq->cfs.nr_running <= this_rq->cfs.nr_running)
-+ goto next;
-+
-+ p = task_of(se_of(src_rq->cfs.head));
-+
-+ if (can_migrate_task(p, dst_cpu, src_rq)) {
-+ pull_from_unlock(this_rq, src_rq, &src_rf, p, dst_cpu);
-+ return;
-+ }
-+
-+next:
-+ rq_unlock(src_rq, &src_rf);
-+ local_irq_restore(src_rf.flags);
-+ }
-+
-+ if (cores_round) {
-+ // now search for all cpus
-+ cores_round = 0;
-+ goto again;
-+ }
-+}
-+
-+static inline void
-+active_balance(struct rq *rq)
-+{
-+ struct cfs_rq *cfs_rq = &rq->cfs;
-+
-+ if (!cfs_rq->head || cfs_rq->nr_running < 2)
-+ try_pull_higher_IS(&rq->cfs);
-+ else
-+ try_pull_any(rq);
-+}
-+
-+void trigger_load_balance(struct rq *rq)
-+{
-+ unsigned long interval = 3UL;
-+
-+ /* Don't need to rebalance while attached to NULL domain */
-+ if (unlikely(on_null_domain(rq)))
-+ return;
-+
-+ if (time_before(jiffies, rq->next_balance))
-+ return;
-+
-+ if (rq->idle_balance) {
-+ idle_try_pull_any(&rq->cfs);
-+ }
-+ else {
-+ active_balance(rq);
-+
-+ /* scale ms to jiffies */
-+ interval = msecs_to_jiffies(interval);
-+ rq->next_balance = jiffies + interval;
-+ }
-+}
-+#endif
-
- static void rq_online_fair(struct rq *rq)
- {
-@@ -10765,11 +11598,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
+@@ -10765,11 +11159,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
update_overutilized_status(task_rq(curr));
}
@@ -2147,7 +1173,7 @@ index bbc78794224a..c99fc326ec24 100644
static void task_fork_fair(struct task_struct *p)
{
struct cfs_rq *cfs_rq;
-@@ -10800,6 +11652,7 @@ static void task_fork_fair(struct task_struct *p)
+@@ -10800,6 +11213,7 @@ static void task_fork_fair(struct task_struct *p)
se->vruntime -= cfs_rq->min_vruntime;
rq_unlock(rq, &rf);
}
@@ -2155,60 +1181,26 @@ index bbc78794224a..c99fc326ec24 100644
/*
* Priority of the task has changed. Check to see if we preempt
-@@ -10876,9 +11729,12 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
- }
- }
- #else
-+#if !defined(CONFIG_CACULE_RDB)
- static void propagate_entity_cfs_rq(struct sched_entity *se) { }
- #endif
-+#endif
-
-+#if !defined(CONFIG_CACULE_RDB)
- static void detach_entity_cfs_rq(struct sched_entity *se)
- {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
-@@ -10889,9 +11745,11 @@ static void detach_entity_cfs_rq(struct sched_entity *se)
- update_tg_load_avg(cfs_rq);
- propagate_entity_cfs_rq(se);
- }
-+#endif
-
- static void attach_entity_cfs_rq(struct sched_entity *se)
- {
-+#if !defined(CONFIG_CACULE_RDB)
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
- #ifdef CONFIG_FAIR_GROUP_SCHED
-@@ -10907,11 +11765,15 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
- attach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq);
- propagate_entity_cfs_rq(se);
-+#endif
- }
-
+@@ -10912,6 +11326,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
static void detach_task_cfs_rq(struct task_struct *p)
{
-+#if !defined(CONFIG_CACULE_RDB)
struct sched_entity *se = &p->se;
+
+#if !defined(CONFIG_CACULE_SCHED)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
if (!vruntime_normalized(p)) {
-@@ -10922,19 +11784,28 @@ static void detach_task_cfs_rq(struct task_struct *p)
+@@ -10922,6 +11338,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
place_entity(cfs_rq, se, 0);
se->vruntime -= cfs_rq->min_vruntime;
}
+#endif
detach_entity_cfs_rq(se);
-+#endif
}
-
+@@ -10929,12 +11346,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
static void attach_task_cfs_rq(struct task_struct *p)
{
-+#if !defined(CONFIG_CACULE_RDB)
struct sched_entity *se = &p->se;
+
+#if !defined(CONFIG_CACULE_SCHED)
@@ -2221,11 +1213,10 @@ index bbc78794224a..c99fc326ec24 100644
if (!vruntime_normalized(p))
se->vruntime += cfs_rq->min_vruntime;
+#endif
-+#endif
}
static void switched_from_fair(struct rq *rq, struct task_struct *p)
-@@ -10990,13 +11861,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+@@ -10990,13 +11412,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
void init_cfs_rq(struct cfs_rq *cfs_rq)
{
cfs_rq->tasks_timeline = RB_ROOT_CACHED;
@@ -2248,18 +1239,8 @@ index bbc78794224a..c99fc326ec24 100644
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-@@ -11321,7 +12201,9 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
- __init void init_sched_fair_class(void)
- {
- #ifdef CONFIG_SMP
-+#if !defined(CONFIG_CACULE_RDB)
- open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
-+#endif
-
- #ifdef CONFIG_NO_HZ_COMMON
- nohz.next_balance = jiffies;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index 282a6bbaacd7..ee0e31c1ce16 100644
+index 282a6bbaacd7..a3b7316dd537 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -516,10 +516,13 @@ struct cfs_rq {
@@ -2272,11 +1253,11 @@ index 282a6bbaacd7..ee0e31c1ce16 100644
#ifndef CONFIG_64BIT
u64 min_vruntime_copy;
#endif
-+#endif // CONFIG_CACULE_SCHED
++#endif /* CONFIG_CACULE_SCHED */
struct rb_root_cached tasks_timeline;
-@@ -528,9 +531,19 @@ struct cfs_rq {
+@@ -528,9 +531,15 @@ struct cfs_rq {
* It is set to NULL otherwise (i.e when none are currently running).
*/
struct sched_entity *curr;
@@ -2284,10 +1265,6 @@ index 282a6bbaacd7..ee0e31c1ce16 100644
+ struct cacule_node *head;
+ struct cacule_node *tail;
+
-+#ifdef CONFIG_CACULE_RDB
-+ unsigned int IS_head;
-+#endif
-+
+#else
struct sched_entity *next;
struct sched_entity *last;
@@ -2296,24 +1273,11 @@ index 282a6bbaacd7..ee0e31c1ce16 100644
#ifdef CONFIG_SCHED_DEBUG
unsigned int nr_spread_over;
-@@ -2094,7 +2107,12 @@ extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
- extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
-
- extern const_debug unsigned int sysctl_sched_nr_migrate;
-+
-+#ifdef CONFIG_CACULE_RDB
-+extern unsigned int sysctl_sched_migration_cost;
-+#else
- extern const_debug unsigned int sysctl_sched_migration_cost;
-+#endif
-
- #ifdef CONFIG_SCHED_HRTICK
-
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 62fbd09b5dc1..c6b24b552656 100644
+index 62fbd09b5dc1..a0bf55bbb3a7 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
-@@ -1659,6 +1659,38 @@ static struct ctl_table kern_table[] = {
+@@ -1659,6 +1659,29 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
@@ -2326,43 +1290,20 @@ index 62fbd09b5dc1..c6b24b552656 100644
+ .proc_handler = proc_dointvec,
+ },
+ {
-+ .procname = "sched_max_lifetime_ms",
-+ .data = &cacule_max_lifetime,
-+ .maxlen = sizeof(int),
++ .procname = "sched_interactivity_threshold",
++ .data = &interactivity_threshold,
++ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
-+ .procname = "sched_harsh_mode_enabled",
-+ .data = &cacule_harsh_mode,
++ .procname = "sched_max_lifetime_ms",
++ .data = &cacule_max_lifetime,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
-+#if defined(CONFIG_CACULE_RDB) || defined(CONFIG_SCHED_DEBUG)
-+ {
-+ .procname = "sched_migration_cost_ns",
-+ .data = &sysctl_sched_migration_cost,
-+ .maxlen = sizeof(unsigned int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec,
-+ },
-+#endif
#ifdef CONFIG_SCHED_DEBUG
{
.procname = "sched_min_granularity_ns",
-@@ -1697,13 +1729,6 @@ static struct ctl_table kern_table[] = {
- .extra1 = &min_sched_tunable_scaling,
- .extra2 = &max_sched_tunable_scaling,
- },
-- {
-- .procname = "sched_migration_cost_ns",
-- .data = &sysctl_sched_migration_cost,
-- .maxlen = sizeof(unsigned int),
-- .mode = 0644,
-- .proc_handler = proc_dointvec,
-- },
- {
- .procname = "sched_nr_migrate",
- .data = &sysctl_sched_nr_migrate,
diff --git a/config b/config
index 159db03d01fc..4d8b111ed805 100644
--- a/config
+++ b/config
@@ -143,10 +143,9 @@ CONFIG_MEMCG_KMEM=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_WRITEBACK=y
CONFIG_CGROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=n
+CONFIG_FAIR_GROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_CACULE_SCHED=y
-CONFIG_CACULE_RDB=n
# CONFIG_RT_GROUP_SCHED is not set
CONFIG_CGROUP_PIDS=y
# CONFIG_CGROUP_RDMA is not set
@@ -166,7 +165,7 @@ CONFIG_USER_NS=y
CONFIG_PID_NS=y
CONFIG_NET_NS=y
CONFIG_CHECKPOINT_RESTORE=y
-CONFIG_SCHED_AUTOGROUP=n
+CONFIG_SCHED_AUTOGROUP=y
# CONFIG_SYSFS_DEPRECATED is not set
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
diff --git a/config8 b/config8
index 9029982e640d..5823e8ed2596 100644
--- a/config8
+++ b/config8
@@ -142,10 +142,9 @@ CONFIG_MEMCG_KMEM=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_WRITEBACK=y
CONFIG_CGROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=n
+CONFIG_FAIR_GROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_CACULE_SCHED=y
-CONFIG_CACULE_RDB=n
# CONFIG_RT_GROUP_SCHED is not set
CONFIG_CGROUP_PIDS=y
# CONFIG_CGROUP_RDMA is not set
@@ -166,7 +165,7 @@ CONFIG_USER_NS=y
CONFIG_PID_NS=y
CONFIG_NET_NS=y
CONFIG_CHECKPOINT_RESTORE=y
-CONFIG_SCHED_AUTOGROUP=n
+CONFIG_SCHED_AUTOGROUP=y
# CONFIG_SYSFS_DEPRECATED is not set
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y