diff options
author | Kyle De'Vir | 2019-07-12 21:36:46 +1000 |
---|---|---|
committer | Kyle De'Vir | 2019-07-12 21:36:46 +1000 |
commit | e1186773b9750814faabfab6ddd2c1f402b6613f (patch) | |
tree | 88236825654ae1d4738a82299b49005b169fe2f3 | |
parent | b7490442e9f64c10d231a8900c7e947a4ecd9539 (diff) | |
download | aur-e1186773b9750814faabfab6ddd2c1f402b6613f.tar.gz |
4.2
-rw-r--r-- | .SRCINFO | 20 | ||||
-rw-r--r-- | 01-Undead-PDS-0.99o-rebase-by-TkG.patch | 16737 | ||||
-rw-r--r-- | PKGBUILD | 31 | ||||
-rw-r--r-- | config | 654 |
4 files changed, 8911 insertions, 8531 deletions
@@ -1,5 +1,5 @@ pkgbase = linux-pds - pkgver = 5.1.16.arch1 + pkgver = 5.2.arch2 pkgrel = 1 url = https://git.archlinux.org/linux.git/log/?h=v arch = x86_64 @@ -10,8 +10,12 @@ pkgbase = linux-pds makedepends = bc makedepends = libelf makedepends = git + makedepends = python-sphinx + makedepends = python-sphinx_rtd_theme + makedepends = graphviz + makedepends = imagemagick options = !strip - source = git+https://git.archlinux.org/linux?signed#tag=v5.1.16-arch1 + source = git+https://git.archlinux.org/linux?signed#tag=v5.2-arch2 source = git+https://github.com/graysky2/kernel_gcc_patch source = config source = 60-linux.hook @@ -24,11 +28,11 @@ pkgbase = linux-pds validpgpkeys = 8218F88849AAC522E94CF470A5E9288C4FA415FA sha512sums = SKIP sha512sums = SKIP - sha512sums = 5d78839a3df30a667d1e64458add6a8c01557a21a4eb8b25ff084b95bc5efbbfb1e02f5dd2b5485dc6829647d7024450ab4886a7b11e2f5c334caeddd05810af + sha512sums = 951ad205d211ef965e6136e0b8447aeeb522229a74b45ce497f5ff31f2059db23b36fb8035f72798d15be45440396f901a98857f961ab0acf9bfbe25c955bb32 sha512sums = 7ad5be75ee422dda3b80edd2eb614d8a9181e2c8228cd68b3881e2fb95953bf2dea6cbe7900ce1013c9de89b2802574b7b24869fc5d7a95d3cc3112c4d27063a sha512sums = 2718b58dbbb15063bacb2bde6489e5b3c59afac4c0e0435b97fe720d42c711b6bcba926f67a8687878bd51373c9cf3adb1915a11666d79ccb220bf36e0788ab7 sha512sums = 2dc6b0ba8f7dbf19d2446c5c5f1823587de89f4e28e9595937dd51a87755099656f2acec50e3e2546ea633ad1bfd1c722e0c2b91eef1d609103d8abdc0a7cbaf - sha512sums = cdfa59b9f369a5795c93ced526e7f480851ef439f3379e6c1a32b9cf29232cd4671fe4b0ddb50c5d996e23db71582844e233fee96bb551827eaf70b0be1d18dc + sha512sums = 3ba2ea015485795930fe17231f0ba7755522ea675f149b4d42b056827196b4f98aea3cd027c3bd9a5934ff4b541aec30ff62c179dd38de908c0ce884af8560c9 sha512sums = 3ff796cbc213ae5f43a55f1ba92406bba04703db3459040beacacd9baceb3138021e908f440bd101cc76cb725e418ebdc8ab776327801690da30a1477bc84753 pkgname = linux-pds @@ -44,11 +48,11 @@ pkgname = linux-pds pkgname = linux-pds-headers pkgdesc = Header files and scripts for building modules for Linux-pds kernel ~ featuring Alfred Chen's PDS CPU scheduler, rebased by TkG - provides = linux-pds-headers=5.1.16.arch1 - provides = linux-headers=5.1.16.arch1 + provides = linux-pds-headers=5.2.arch2 + provides = linux-headers=5.2.arch2 pkgname = linux-pds-docs pkgdesc = Kernel hackers manual - HTML documentation that comes with the Linux-pds kernel ~ featuring Alfred Chen's PDS CPU scheduler, rebased by TkG - provides = linux-pds-docs=5.1.16.arch1 - provides = linux-docs=5.1.16.arch1 + provides = linux-pds-docs=5.2.arch2 + provides = linux-docs=5.2.arch2 diff --git a/01-Undead-PDS-0.99o-rebase-by-TkG.patch b/01-Undead-PDS-0.99o-rebase-by-TkG.patch index d66a7b12dd40..c7248693acf5 100644 --- a/01-Undead-PDS-0.99o-rebase-by-TkG.patch +++ b/01-Undead-PDS-0.99o-rebase-by-TkG.patch @@ -1,8397 +1,8340 @@ -From 9be9808d7744da988ce921476581ae0feab4e304 Mon Sep 17 00:00:00 2001 -From: Tk-Glitch <ti3nou@gmail.com> -Date: Mon, 6 May 2019 15:49:36 +0200 -Subject: PDS 099o, 5.1 rebase - - -diff --git a/Documentation/scheduler/sched-PDS-mq.txt b/Documentation/scheduler/sched-PDS-mq.txt -new file mode 100644 -index 000000000000..709e86f6487e ---- /dev/null -+++ b/Documentation/scheduler/sched-PDS-mq.txt -@@ -0,0 +1,56 @@ -+ Priority and Deadline based Skiplist multiple queue Scheduler -+ ------------------------------------------------------------- -+ -+CONTENT -+======== -+ -+ 0. Development -+ 1. Overview -+ 1.1 Design goal -+ 1.2 Design summary -+ 2. Design Detail -+ 2.1 Skip list implementation -+ 2.2 Task preempt -+ 2.3 Task policy, priority and deadline -+ 2.4 Task selection -+ 2.5 Run queue balance -+ 2.6 Task migration -+ -+ -+0. Development -+============== -+ -+Priority and Deadline based Skiplist multiple queue scheduler, referred to as -+PDS from here on, is developed upon the enhancement patchset VRQ(Variable Run -+Queue) for BFS(Brain Fuck Scheduler by Con Kolivas). PDS inherits the existing -+design from VRQ and inspired by the introduction of skiplist data structure -+to the scheduler by Con Kolivas. However, PDS is different from MuQSS(Multiple -+Queue Skiplist Scheduler, the successor after BFS) in many ways. -+ -+1. Overview -+=========== -+ -+1.1 Design goal -+--------------- -+ -+PDS is designed to make the cpu process scheduler code to be simple, but while -+efficiency and scalable. Be Simple, the scheduler code will be easy to be read -+and the behavious of scheduler will be easy to predict. Be efficiency, the -+scheduler shall be well balance the thoughput performance and task interactivity -+at the same time for different properties the tasks behave. Be scalable, the -+performance of the scheduler should be in good shape with the glowing of -+workload or with the growing of the cpu numbers. -+ -+1.2 Design summary -+------------------ -+ -+PDS is described as a multiple run queues cpu scheduler. Each cpu has its own -+run queue. A heavry customized skiplist is used as the backend data structure -+of the cpu run queue. Tasks in run queue is sorted by priority then virtual -+deadline(simplfy to just deadline from here on). In PDS, balance action among -+run queues are kept as less as possible to reduce the migration cost. Cpumask -+data structure is widely used in cpu affinity checking and cpu preemption/ -+selection to make PDS scalable with increasing cpu number. -+ -+ -+To be continued... -diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt -index aa058aa7bf28..8bea8e9ca77d 100644 ---- a/Documentation/sysctl/kernel.txt -+++ b/Documentation/sysctl/kernel.txt -@@ -77,6 +77,7 @@ show up in /proc/sys/kernel: - - randomize_va_space - - real-root-dev ==> Documentation/admin-guide/initrd.rst - - reboot-cmd [ SPARC only ] -+- rr_interval - - rtsig-max - - rtsig-nr - - sched_energy_aware -@@ -100,6 +101,7 @@ show up in /proc/sys/kernel: - - unknown_nmi_panic - - watchdog - - watchdog_thresh -+- yield_type - - version - - ============================================================== -@@ -881,6 +883,20 @@ rebooting. ??? - - ============================================================== - -+rr_interval: (PDS CPU scheduler only) -+ -+This is the smallest duration that any cpu process scheduling unit -+will run for. Increasing this value can increase throughput of cpu -+bound tasks substantially but at the expense of increased latencies -+overall. Conversely decreasing it will decrease average and maximum -+latencies but at the expense of throughput. This value is in -+milliseconds and the default value chosen depends on the number of -+cpus available at scheduler initialisation with a minimum of 6. -+ -+Valid values are from 1-1000. -+ -+============================================================== -+ - rtsig-max & rtsig-nr: - - The file rtsig-max can be used to tune the maximum number -@@ -1143,3 +1159,13 @@ The softlockup threshold is (2 * watchdog_thresh). Setting this - tunable to zero will disable lockup detection altogether. - - ============================================================== -+ -+yield_type: (MuQSS/VRQ CPU scheduler only) -+ -+This determines what type of yield calls to sched_yield will perform. -+ -+ 0 - No yield. -+ 1 - Yield only to better priority/deadline tasks. (default) -+ 2 - Expire timeslice and recalculate deadline. -+ -+============================================================== -diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c -index 9fcccb4490b9..7f2b6c226eed 100644 ---- a/arch/powerpc/platforms/cell/spufs/sched.c -+++ b/arch/powerpc/platforms/cell/spufs/sched.c -@@ -64,11 +64,6 @@ static struct task_struct *spusched_task; - static struct timer_list spusched_timer; - static struct timer_list spuloadavg_timer; - --/* -- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0). -- */ --#define NORMAL_PRIO 120 -- - /* - * Frequency of the spu scheduler tick. By default we do one SPU scheduler - * tick for every 10 CPU scheduler ticks. -diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index 62fc3fda1a05..764fc6eef19f 100644 ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -1017,6 +1017,22 @@ config NR_CPUS - config SCHED_SMT - def_bool y if SMP - -+config SMT_NICE -+ bool "SMT (Hyperthreading) aware nice priority and policy support" -+ depends on SCHED_PDS && SCHED_SMT -+ default y -+ ---help--- -+ Enabling Hyperthreading on Intel CPUs decreases the effectiveness -+ of the use of 'nice' levels and different scheduling policies -+ (e.g. realtime) due to sharing of CPU power between hyperthreads. -+ SMT nice support makes each logical CPU aware of what is running on -+ its hyperthread siblings, maintaining appropriate distribution of -+ CPU according to nice levels and scheduling policies at the expense -+ of slightly increased overhead. -+ -+ If unsure say Y here. -+ -+ - config SCHED_MC - def_bool y - prompt "Multi-core scheduler support" -diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c -index 4268f87e99fc..47fa3a161770 100644 ---- a/drivers/cpufreq/cpufreq_conservative.c -+++ b/drivers/cpufreq/cpufreq_conservative.c -@@ -31,8 +31,8 @@ struct cs_dbs_tuners { - }; - - /* Conservative governor macros */ --#define DEF_FREQUENCY_UP_THRESHOLD (80) --#define DEF_FREQUENCY_DOWN_THRESHOLD (20) -+#define DEF_FREQUENCY_UP_THRESHOLD (63) -+#define DEF_FREQUENCY_DOWN_THRESHOLD (26) - #define DEF_FREQUENCY_STEP (5) - #define DEF_SAMPLING_DOWN_FACTOR (1) - #define MAX_SAMPLING_DOWN_FACTOR (10) -diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c -index 6b423eebfd5d..e8c8aff4cba4 100644 ---- a/drivers/cpufreq/cpufreq_ondemand.c -+++ b/drivers/cpufreq/cpufreq_ondemand.c -@@ -21,7 +21,7 @@ - #include "cpufreq_ondemand.h" - - /* On-demand governor macros */ --#define DEF_FREQUENCY_UP_THRESHOLD (80) -+#define DEF_FREQUENCY_UP_THRESHOLD (63) - #define DEF_SAMPLING_DOWN_FACTOR (1) - #define MAX_SAMPLING_DOWN_FACTOR (100000) - #define MICRO_FREQUENCY_UP_THRESHOLD (95) -@@ -130,7 +130,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq) - } - - /* -- * Every sampling_rate, we check, if current idle time is less than 20% -+ * Every sampling_rate, we check, if current idle time is less than 37% - * (default), then we try to increase frequency. Else, we adjust the frequency - * proportional to load. - */ -diff --git a/fs/proc/base.c b/fs/proc/base.c -index 6a803a0b75df..c75b7be0f94b 100644 ---- a/fs/proc/base.c -+++ b/fs/proc/base.c -@@ -463,7 +463,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns, - seq_puts(m, "0 0 0\n"); - else - seq_printf(m, "%llu %llu %lu\n", -- (unsigned long long)task->se.sum_exec_runtime, -+ (unsigned long long)tsk_seruntime(task), - (unsigned long long)task->sched_info.run_delay, - task->sched_info.pcount); - -diff --git a/include/linux/init_task.h b/include/linux/init_task.h -index 6049baa5b8bc..87355efcc13d 100644 ---- a/include/linux/init_task.h -+++ b/include/linux/init_task.h -@@ -47,7 +47,11 @@ extern struct cred init_cred; - #define INIT_CPU_TIMERS(s) - #endif - -+#ifdef CONFIG_SCHED_PDS -+#define INIT_TASK_COMM "PDS" -+#else - #define INIT_TASK_COMM "swapper" -+#endif /* !CONFIG_SCHED_PDS */ - - /* Attach to the init_task data structure for proper alignment */ - #ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK -diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h -index fa928242567d..fa456190e315 100644 ---- a/include/linux/jiffies.h -+++ b/include/linux/jiffies.h -@@ -171,7 +171,7 @@ static inline u64 get_jiffies_64(void) - * Have the 32 bit jiffies value wrap 5 minutes after boot - * so jiffies wrap bugs show up earlier. - */ --#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) -+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-10*HZ)) - - /* - * Change timeval to jiffies, trying to avoid the -diff --git a/include/linux/sched.h b/include/linux/sched.h -index 1549584a1538..ec5a3f7af424 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -30,6 +30,7 @@ - #include <linux/mm_types_task.h> - #include <linux/task_io_accounting.h> - #include <linux/rseq.h> -+#include <linux/skip_list.h> - - /* task_struct member predeclarations (sorted alphabetically): */ - struct audit_context; -@@ -605,9 +606,13 @@ struct task_struct { - unsigned int flags; - unsigned int ptrace; - --#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_PDS) - struct llist_node wake_entry; -+#endif -+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_PDS) - int on_cpu; -+#endif -+#ifdef CONFIG_SMP - #ifdef CONFIG_THREAD_INFO_IN_TASK - /* Current CPU: */ - unsigned int cpu; -@@ -616,6 +621,7 @@ struct task_struct { - unsigned long wakee_flip_decay_ts; - struct task_struct *last_wakee; - -+#ifndef CONFIG_SCHED_PDS - /* - * recent_used_cpu is initially set as the last CPU used by a task - * that wakes affine another task. Waker/wakee relationships can -@@ -624,6 +630,7 @@ struct task_struct { - * used CPU that may be idle. - */ - int recent_used_cpu; -+#endif /* CONFIG_SCHED_PDS */ - int wake_cpu; - #endif - int on_rq; -@@ -633,13 +640,27 @@ struct task_struct { - int normal_prio; - unsigned int rt_priority; - -+#ifdef CONFIG_SCHED_PDS -+ int time_slice; -+ u64 deadline; -+ /* skip list level */ -+ int sl_level; -+ /* skip list node */ -+ struct skiplist_node sl_node; -+ /* 8bits prio and 56bits deadline for quick processing */ -+ u64 priodl; -+ u64 last_ran; -+ /* sched_clock time spent running */ -+ u64 sched_time; -+#else /* CONFIG_SCHED_PDS */ - const struct sched_class *sched_class; - struct sched_entity se; - struct sched_rt_entity rt; -+ struct sched_dl_entity dl; -+#endif - #ifdef CONFIG_CGROUP_SCHED - struct task_group *sched_task_group; - #endif -- struct sched_dl_entity dl; - - #ifdef CONFIG_PREEMPT_NOTIFIERS - /* List of struct preempt_notifier: */ -@@ -1217,6 +1238,29 @@ struct task_struct { - */ - }; - -+#ifdef CONFIG_SCHED_PDS -+void cpu_scaling(int cpu); -+void cpu_nonscaling(int cpu); -+#define tsk_seruntime(t) ((t)->sched_time) -+/* replace the uncertian rt_timeout with 0UL */ -+#define tsk_rttimeout(t) (0UL) -+ -+#define task_running_idle(p) ((p)->prio == IDLE_PRIO) -+#else /* CFS */ -+extern int runqueue_is_locked(int cpu); -+static inline void cpu_scaling(int cpu) -+{ -+} -+ -+static inline void cpu_nonscaling(int cpu) -+{ -+} -+#define tsk_seruntime(t) ((t)->se.sum_exec_runtime) -+#define tsk_rttimeout(t) ((t)->rt.timeout) -+ -+#define iso_task(p) (false) -+#endif /* CONFIG_SCHED_PDS */ -+ - static inline struct pid *task_pid(struct task_struct *task) - { - return task->thread_pid; -diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h -index 0cb034331cbb..eb2d51ef8afa 100644 ---- a/include/linux/sched/deadline.h -+++ b/include/linux/sched/deadline.h -@@ -1,5 +1,22 @@ - /* SPDX-License-Identifier: GPL-2.0 */ - -+#ifdef CONFIG_SCHED_PDS -+ -+#define __tsk_deadline(p) ((p)->deadline) -+ -+static inline int dl_prio(int prio) -+{ -+ return 1; -+} -+ -+static inline int dl_task(struct task_struct *p) -+{ -+ return 1; -+} -+#else -+ -+#define __tsk_deadline(p) ((p)->dl.deadline) -+ - /* - * SCHED_DEADLINE tasks has negative priorities, reflecting - * the fact that any of them has higher prio than RT and -@@ -19,6 +36,7 @@ static inline int dl_task(struct task_struct *p) - { - return dl_prio(p->prio); - } -+#endif /* CONFIG_SCHED_PDS */ - - static inline bool dl_time_before(u64 a, u64 b) - { -diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h -index b36f4cf38111..46bbab702a3b 100644 ---- a/include/linux/sched/nohz.h -+++ b/include/linux/sched/nohz.h -@@ -6,7 +6,7 @@ - * This is the interface between the scheduler and nohz/dynticks: - */ - --#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_PDS) - extern void cpu_load_update_nohz_start(void); - extern void cpu_load_update_nohz_stop(void); - #else -diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h -index 7d64feafc408..fba04bb91492 100644 ---- a/include/linux/sched/prio.h -+++ b/include/linux/sched/prio.h -@@ -20,7 +20,18 @@ - */ - - #define MAX_USER_RT_PRIO 100 -+ -+#ifdef CONFIG_SCHED_PDS -+#define ISO_PRIO (MAX_USER_RT_PRIO) -+ -+#define MAX_RT_PRIO ((MAX_USER_RT_PRIO) + 1) -+ -+#define NORMAL_PRIO (MAX_RT_PRIO) -+#define IDLE_PRIO ((MAX_RT_PRIO) + 1) -+#define PRIO_LIMIT ((IDLE_PRIO) + 1) -+#else /* !CONFIG_SCHED_PDS */ - #define MAX_RT_PRIO MAX_USER_RT_PRIO -+#endif /* CONFIG_SCHED_PDS */ - - #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) - #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) -diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h -index e5af028c08b4..a96012e6f15e 100644 ---- a/include/linux/sched/rt.h -+++ b/include/linux/sched/rt.h -@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk) - - if (policy == SCHED_FIFO || policy == SCHED_RR) - return true; -+#ifndef CONFIG_SCHED_PDS - if (policy == SCHED_DEADLINE) - return true; -+#endif - return false; - } - -diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h -index 2e97a2227045..8f58bb311c00 100644 ---- a/include/linux/sched/task.h -+++ b/include/linux/sched/task.h -@@ -82,7 +82,7 @@ extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); - extern void free_task(struct task_struct *tsk); - - /* sched_exec is called by processes performing an exec */ --#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_PDS) - extern void sched_exec(void); - #else - #define sched_exec() {} -diff --git a/include/linux/skip_list.h b/include/linux/skip_list.h -new file mode 100644 -index 000000000000..713fedd8034f ---- /dev/null -+++ b/include/linux/skip_list.h -@@ -0,0 +1,177 @@ -+/* -+ Copyright (C) 2016 Alfred Chen. -+ -+ Code based on Con Kolivas's skip list implementation for BFS, and -+ which is based on example originally by William Pugh. -+ -+Skip Lists are a probabilistic alternative to balanced trees, as -+described in the June 1990 issue of CACM and were invented by -+William Pugh in 1987. -+ -+A couple of comments about this implementation: -+ -+This file only provides a infrastructure of skip list. -+ -+skiplist_node is embedded into container data structure, to get rid the -+dependency of kmalloc/kfree operation in scheduler code. -+ -+A customized search function should be defined using DEFINE_SKIPLIST_INSERT -+macro and be used for skip list insert operation. -+ -+Random Level is also not defined in this file, instead, it should be customized -+implemented and set to node->level then pass to the customized skiplist_insert -+function. -+ -+Levels start at zero and go up to (NUM_SKIPLIST_LEVEL -1) -+ -+NUM_SKIPLIST_LEVEL in this implementation is 8 instead of origin 16, -+considering that there will be 256 entries to enable the top level when using -+random level p=0.5, and that number is more than enough for a run queue usage -+in a scheduler usage. And it also help to reduce the memory usage of the -+embedded skip list node in task_struct to about 50%. -+ -+The insertion routine has been implemented so as to use the -+dirty hack described in the CACM paper: if a random level is -+generated that is more than the current maximum level, the -+current maximum level plus one is used instead. -+ -+BFS Notes: In this implementation of skiplists, there are bidirectional -+next/prev pointers and the insert function returns a pointer to the actual -+node the value is stored. The key here is chosen by the scheduler so as to -+sort tasks according to the priority list requirements and is no longer used -+by the scheduler after insertion. The scheduler lookup, however, occurs in -+O(1) time because it is always the first item in the level 0 linked list. -+Since the task struct stores a copy of the node pointer upon skiplist_insert, -+it can also remove it much faster than the original implementation with the -+aid of prev<->next pointer manipulation and no searching. -+*/ -+#ifndef _LINUX_SKIP_LIST_H -+#define _LINUX_SKIP_LIST_H -+ -+#include <linux/kernel.h> -+ -+#define NUM_SKIPLIST_LEVEL (8) -+ -+struct skiplist_node { -+ int level; /* Levels in this node */ -+ struct skiplist_node *next[NUM_SKIPLIST_LEVEL]; -+ struct skiplist_node *prev[NUM_SKIPLIST_LEVEL]; -+}; -+ -+#define SKIPLIST_NODE_INIT(name) { 0,\ -+ {&name, &name, &name, &name,\ -+ &name, &name, &name, &name},\ -+ {&name, &name, &name, &name,\ -+ &name, &name, &name, &name},\ -+ } -+ -+static inline void INIT_SKIPLIST_NODE(struct skiplist_node *node) -+{ -+ /* only level 0 ->next matters in skiplist_empty()*/ -+ WRITE_ONCE(node->next[0], node); -+} -+ -+/** -+ * FULL_INIT_SKIPLIST_NODE -- fully init a skiplist_node, expecially for header -+ * @node: the skip list node to be inited. -+ */ -+static inline void FULL_INIT_SKIPLIST_NODE(struct skiplist_node *node) -+{ -+ int i; -+ -+ node->level = 0; -+ for (i = 0; i < NUM_SKIPLIST_LEVEL; i++) { -+ WRITE_ONCE(node->next[i], node); -+ node->prev[i] = node; -+ } -+} -+ -+/** -+ * skiplist_empty - test whether a skip list is empty -+ * @head: the skip list to test. -+ */ -+static inline int skiplist_empty(const struct skiplist_node *head) -+{ -+ return READ_ONCE(head->next[0]) == head; -+} -+ -+/** -+ * skiplist_entry - get the struct for this entry -+ * @ptr: the &struct skiplist_node pointer. -+ * @type: the type of the struct this is embedded in. -+ * @member: the name of the skiplist_node within the struct. -+ */ -+#define skiplist_entry(ptr, type, member) \ -+ container_of(ptr, type, member) -+ -+/** -+ * DEFINE_SKIPLIST_INSERT_FUNC -- macro to define a customized skip list insert -+ * function, which takes two parameters, first one is the header node of the -+ * skip list, second one is the skip list node to be inserted -+ * @func_name: the customized skip list insert function name -+ * @search_func: the search function to be used, which takes two parameters, -+ * 1st one is the itrator of skiplist_node in the list, the 2nd is the skip list -+ * node to be inserted, the function should return true if search should be -+ * continued, otherwise return false. -+ * Returns 1 if @node is inserted as the first item of skip list at level zero, -+ * otherwise 0 -+ */ -+#define DEFINE_SKIPLIST_INSERT_FUNC(func_name, search_func)\ -+static inline int func_name(struct skiplist_node *head, struct skiplist_node *node)\ -+{\ -+ struct skiplist_node *update[NUM_SKIPLIST_LEVEL];\ -+ struct skiplist_node *p, *q;\ -+ int k = head->level;\ -+\ -+ p = head;\ -+ do {\ -+ while (q = p->next[k], q != head && search_func(q, node))\ -+ p = q;\ -+ update[k] = p;\ -+ } while (--k >= 0);\ -+\ -+ k = node->level;\ -+ if (unlikely(k > head->level)) {\ -+ node->level = k = ++head->level;\ -+ update[k] = head;\ -+ }\ -+\ -+ do {\ -+ p = update[k];\ -+ q = p->next[k];\ -+ node->next[k] = q;\ -+ p->next[k] = node;\ -+ node->prev[k] = p;\ -+ q->prev[k] = node;\ -+ } while (--k >= 0);\ -+\ -+ return (p == head);\ -+} -+ -+/** -+ * skiplist_del_init -- delete skip list node from a skip list and reset it's -+ * init state -+ * @head: the header node of the skip list to be deleted from. -+ * @node: the skip list node to be deleted, the caller need to ensure @node is -+ * in skip list which @head represent. -+ * Returns 1 if @node is the first item of skip level at level zero, otherwise 0 -+ */ -+static inline int -+skiplist_del_init(struct skiplist_node *head, struct skiplist_node *node) -+{ -+ int l, m = node->level; -+ -+ for (l = 0; l <= m; l++) { -+ node->prev[l]->next[l] = node->next[l]; -+ node->next[l]->prev[l] = node->prev[l]; -+ } -+ if (m == head->level && m > 0) { -+ while (head->next[m] == head && m > 0) -+ m--; -+ head->level = m; -+ } -+ INIT_SKIPLIST_NODE(node); -+ -+ return (node->prev[0] == head); -+} -+#endif /* _LINUX_SKIP_LIST_H */ -diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h -index 22627f80063e..ea555021c0fb 100644 ---- a/include/uapi/linux/sched.h -+++ b/include/uapi/linux/sched.h -@@ -37,7 +37,10 @@ - #define SCHED_FIFO 1 - #define SCHED_RR 2 - #define SCHED_BATCH 3 --/* SCHED_ISO: reserved but not implemented yet */ -+/* SCHED_ISO: Implemented in BFS/MuQSSPDS only */ -+#ifdef CONFIG_SCHED_PDS -+#define SCHED_ISO 4 -+#endif - #define SCHED_IDLE 5 - #define SCHED_DEADLINE 6 - -diff --git a/init/Kconfig b/init/Kconfig -index 4592bf7997c0..6357a0eea78b 100644 ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -64,6 +64,21 @@ config THREAD_INFO_IN_TASK - - menu "General setup" - -+config SCHED_PDS -+ bool "PDS-mq cpu scheduler" -+ help -+ The Priority and Deadline based Skip list multiple queue CPU -+ Scheduler for excellent interactivity and responsiveness on the -+ desktop and solid scalability on normal hardware and commodity -+ servers. -+ -+ Currently incompatible with the Group CPU scheduler, and RCU TORTURE -+ TEST so these options are disabled. -+ -+ Say Y here. -+ default y -+ -+ - config BROKEN - bool - -@@ -702,6 +717,7 @@ config NUMA_BALANCING - depends on ARCH_SUPPORTS_NUMA_BALANCING - depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY - depends on SMP && NUMA && MIGRATION -+ depends on !SCHED_PDS - help - This option adds support for automatic NUMA aware memory/task placement. - The mechanism is quite primitive and is based on migrating memory when -@@ -811,7 +827,7 @@ menuconfig CGROUP_SCHED - bandwidth allocation to such task groups. It uses cgroups to group - tasks. - --if CGROUP_SCHED -+if CGROUP_SCHED && !SCHED_PDS - config FAIR_GROUP_SCHED - bool "Group scheduling for SCHED_OTHER" - depends on CGROUP_SCHED -@@ -918,6 +934,7 @@ config CGROUP_DEVICE - - config CGROUP_CPUACCT - bool "Simple CPU accounting controller" -+ depends on !SCHED_PDS - help - Provides a simple controller for monitoring the - total CPU consumed by the tasks in a cgroup. -@@ -1036,6 +1053,7 @@ config CHECKPOINT_RESTORE - - config SCHED_AUTOGROUP - bool "Automatic process group scheduling" -+ depends on !SCHED_PDS - select CGROUPS - select CGROUP_SCHED - select FAIR_GROUP_SCHED -diff --git a/init/init_task.c b/init/init_task.c -index c70ef656d0f4..051fb66f53b7 100644 ---- a/init/init_task.c -+++ b/init/init_task.c -@@ -60,6 +60,125 @@ struct task_struct init_task - __init_task_data - #endif - = { -+#ifdef CONFIG_SCHED_PDS -+#ifdef CONFIG_THREAD_INFO_IN_TASK -+ .thread_info = INIT_THREAD_INFO(init_task), -+ .stack_refcount = ATOMIC_INIT(1), -+#endif -+ .state = 0, -+ .stack = init_stack, -+ .usage = ATOMIC_INIT(2), -+ .flags = PF_KTHREAD, -+ .prio = NORMAL_PRIO, -+ .static_prio = MAX_PRIO - 20, -+ .normal_prio = NORMAL_PRIO, -+ .deadline = 0, /* PDS only */ -+ .policy = SCHED_NORMAL, -+ .cpus_allowed = CPU_MASK_ALL, -+ .nr_cpus_allowed= NR_CPUS, -+ .mm = NULL, -+ .active_mm = &init_mm, -+ .restart_block = { -+ .fn = do_no_restart_syscall, -+ }, -+ .sl_level = 0, /* PDS only */ -+ .sl_node = SKIPLIST_NODE_INIT(init_task.sl_node), /* PDS only */ -+ .time_slice = HZ, /* PDS only */ -+ .tasks = LIST_HEAD_INIT(init_task.tasks), -+#ifdef CONFIG_SMP -+ .pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO), -+#endif -+#ifdef CONFIG_CGROUP_SCHED -+ .sched_task_group = &root_task_group, -+#endif -+ .ptraced = LIST_HEAD_INIT(init_task.ptraced), -+ .ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry), -+ .real_parent = &init_task, -+ .parent = &init_task, -+ .children = LIST_HEAD_INIT(init_task.children), -+ .sibling = LIST_HEAD_INIT(init_task.sibling), -+ .group_leader = &init_task, -+ RCU_POINTER_INITIALIZER(real_cred, &init_cred), -+ RCU_POINTER_INITIALIZER(cred, &init_cred), -+ .comm = INIT_TASK_COMM, -+ .thread = INIT_THREAD, -+ .fs = &init_fs, -+ .files = &init_files, -+ .signal = &init_signals, -+ .sighand = &init_sighand, -+ .nsproxy = &init_nsproxy, -+ .pending = { -+ .list = LIST_HEAD_INIT(init_task.pending.list), -+ .signal = {{0}} -+ }, -+ .blocked = {{0}}, -+ .alloc_lock = __SPIN_LOCK_UNLOCKED(init_task.alloc_lock), -+ .journal_info = NULL, -+ INIT_CPU_TIMERS(init_task) -+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock), -+ .timer_slack_ns = 50000, /* 50 usec default slack */ -+ .thread_pid = &init_struct_pid, -+ .thread_group = LIST_HEAD_INIT(init_task.thread_group), -+ .thread_node = LIST_HEAD_INIT(init_signals.thread_head), -+#ifdef CONFIG_AUDITSYSCALL -+ .loginuid = INVALID_UID, -+ .sessionid = AUDIT_SID_UNSET, -+#endif -+#ifdef CONFIG_PERF_EVENTS -+ .perf_event_mutex = __MUTEX_INITIALIZER(init_task.perf_event_mutex), -+ .perf_event_list = LIST_HEAD_INIT(init_task.perf_event_list), -+#endif -+#ifdef CONFIG_PREEMPT_RCU -+ .rcu_read_lock_nesting = 0, -+ .rcu_read_unlock_special.s = 0, -+ .rcu_node_entry = LIST_HEAD_INIT(init_task.rcu_node_entry), -+ .rcu_blocked_node = NULL, -+#endif -+#ifdef CONFIG_TASKS_RCU -+ .rcu_tasks_holdout = false, -+ .rcu_tasks_holdout_list = LIST_HEAD_INIT(init_task.rcu_tasks_holdout_list), -+ .rcu_tasks_idle_cpu = -1, -+#endif -+#ifdef CONFIG_CPUSETS -+ .mems_allowed_seq = SEQCNT_ZERO(init_task.mems_allowed_seq), -+#endif -+#ifdef CONFIG_RT_MUTEXES -+ .pi_waiters = RB_ROOT_CACHED, -+ .pi_top_task = NULL, -+#endif -+ INIT_PREV_CPUTIME(init_task) -+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -+ .vtime.seqcount = SEQCNT_ZERO(init_task.vtime_seqcount), -+ .vtime.starttime = 0, -+ .vtime.state = VTIME_SYS, -+#endif -+#ifdef CONFIG_NUMA_BALANCING -+ .numa_preferred_nid = -1, -+ .numa_group = NULL, -+ .numa_faults = NULL, -+#endif -+#ifdef CONFIG_KASAN -+ .kasan_depth = 1, -+#endif -+#ifdef CONFIG_TRACE_IRQFLAGS -+ .softirqs_enabled = 1, -+#endif -+#ifdef CONFIG_LOCKDEP -+ .lockdep_recursion = 0, -+#endif -+#ifdef CONFIG_FUNCTION_GRAPH_TRACER -+ .ret_stack = NULL, -+#endif -+#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPT) -+ .trace_recursion = 0, -+#endif -+#ifdef CONFIG_LIVEPATCH -+ .patch_state = KLP_UNDEFINED, -+#endif -+#ifdef CONFIG_SECURITY -+ .security = NULL, -+#endif -+#else /* CONFIG_SCHED_PDS */ - #ifdef CONFIG_THREAD_INFO_IN_TASK - .thread_info = INIT_THREAD_INFO(init_task), - .stack_refcount = REFCOUNT_INIT(1), -@@ -180,6 +299,7 @@ struct task_struct init_task - #ifdef CONFIG_SECURITY - .security = NULL, - #endif -+#endif /* CONFIG_SCHED_PDS */ - }; - EXPORT_SYMBOL(init_task); - -diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c -index 4834c4214e9c..a1f36086b861 100644 ---- a/kernel/cgroup/cpuset.c -+++ b/kernel/cgroup/cpuset.c -@@ -673,7 +673,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) - return ret; - } - --#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_PDS) - /* - * Helper routine for generate_sched_domains(). - * Do cpusets a, b have overlapping effective cpus_allowed masks? -@@ -989,7 +989,7 @@ static void rebuild_sched_domains_locked(void) - out: - put_online_cpus(); - } --#else /* !CONFIG_SMP */ -+#else /* !CONFIG_SMP || CONFIG_SCHED_PDS */ - static void rebuild_sched_domains_locked(void) - { - } -diff --git a/kernel/delayacct.c b/kernel/delayacct.c -index 2a12b988c717..dba268ca115f 100644 ---- a/kernel/delayacct.c -+++ b/kernel/delayacct.c -@@ -115,7 +115,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) - */ - t1 = tsk->sched_info.pcount; - t2 = tsk->sched_info.run_delay; -- t3 = tsk->se.sum_exec_runtime; -+ t3 = tsk_seruntime(tsk); - - d->cpu_count += t1; - -diff --git a/kernel/exit.c b/kernel/exit.c -index 2166c2d92ddc..c4eef2f20036 100644 ---- a/kernel/exit.c -+++ b/kernel/exit.c -@@ -130,7 +130,7 @@ static void __exit_signal(struct task_struct *tsk) - sig->curr_target = next_thread(tsk); - } - -- add_device_randomness((const void*) &tsk->se.sum_exec_runtime, -+ add_device_randomness((const void*) &tsk_seruntime(tsk), - sizeof(unsigned long long)); - - /* -@@ -151,7 +151,7 @@ static void __exit_signal(struct task_struct *tsk) - sig->inblock += task_io_get_inblock(tsk); - sig->oublock += task_io_get_oublock(tsk); - task_io_accounting_add(&sig->ioac, &tsk->ioac); -- sig->sum_sched_runtime += tsk->se.sum_exec_runtime; -+ sig->sum_sched_runtime += tsk_seruntime(tsk); - sig->nr_threads--; - __unhash_process(tsk, group_dead); - write_sequnlock(&sig->stats_lock); -diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c -index 9c89ae8b337a..cac70077b88c 100644 ---- a/kernel/livepatch/transition.c -+++ b/kernel/livepatch/transition.c -@@ -316,7 +316,11 @@ static bool klp_try_switch_task(struct task_struct *task) - */ - rq = task_rq_lock(task, &flags); - -+#ifdef CONFIG_SCHED_PDS -+ if (task_running(task) && task != current) { -+#else - if (task_running(rq, task) && task != current) { -+#endif - snprintf(err_buf, STACK_ERR_BUF_SIZE, - "%s: %s:%d is running\n", __func__, task->comm, - task->pid); -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index 978d63a8261c..4aeb8aec5e88 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -228,7 +228,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, - * Only use with rt_mutex_waiter_{less,equal}() - */ - #define task_to_waiter(p) \ -- &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } -+ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = __tsk_deadline(p) } - - static inline int - rt_mutex_waiter_less(struct rt_mutex_waiter *left, -@@ -680,7 +680,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, - * the values of the node being removed. - */ - waiter->prio = task->prio; -- waiter->deadline = task->dl.deadline; -+ waiter->deadline = __tsk_deadline(task); - - rt_mutex_enqueue(lock, waiter); - -@@ -954,7 +954,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, - waiter->task = task; - waiter->lock = lock; - waiter->prio = task->prio; -- waiter->deadline = task->dl.deadline; -+ waiter->deadline = __tsk_deadline(task); - - /* Get the top priority waiter on the lock */ - if (rt_mutex_has_waiters(lock)) -diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile -index 21fb5a5662b5..8ebe4e33fb5f 100644 ---- a/kernel/sched/Makefile -+++ b/kernel/sched/Makefile -@@ -16,15 +16,21 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) - CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer - endif - --obj-y += core.o loadavg.o clock.o cputime.o --obj-y += idle.o fair.o rt.o deadline.o --obj-y += wait.o wait_bit.o swait.o completion.o -- --obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o -+ifdef CONFIG_SCHED_PDS -+obj-y += pds.o -+else -+obj-y += core.o -+obj-y += fair.o rt.o deadline.o -+obj-$(CONFIG_SMP) += cpudeadline.o topology.o stop_task.o - obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o --obj-$(CONFIG_SCHEDSTATS) += stats.o - obj-$(CONFIG_SCHED_DEBUG) += debug.o - obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o -+endif -+obj-y += loadavg.o clock.o cputime.o -+obj-y += idle.o -+obj-y += wait.o wait_bit.o swait.o completion.o -+obj-$(CONFIG_SMP) += cpupri.o pelt.o -+obj-$(CONFIG_SCHEDSTATS) += stats.o - obj-$(CONFIG_CPU_FREQ) += cpufreq.o - obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o - obj-$(CONFIG_MEMBARRIER) += membarrier.o -diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c -index 3638d2377e3c..1a317fca651b 100644 ---- a/kernel/sched/cpufreq_schedutil.c -+++ b/kernel/sched/cpufreq_schedutil.c -@@ -175,6 +175,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, - return cpufreq_driver_resolve_freq(policy, freq); - } - -+#ifndef CONFIG_SCHED_PDS - /* - * This function computes an effective utilization for the given CPU, to be - * used for frequency selection given the linear relation: f = u * f_max. -@@ -282,6 +283,13 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) - - return schedutil_freq_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL); - } -+#else /* CONFIG_SCHED_PDS */ -+static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) -+{ -+ sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); -+ return sg_cpu->max; -+} -+#endif - - /** - * sugov_iowait_reset() - Reset the IO boost status of a CPU. -@@ -424,7 +432,9 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } - */ - static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) - { -+#ifndef CONFIG_SCHED_PDS - if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) -+#endif - sg_policy->need_freq_update = true; - } - -@@ -665,6 +675,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) - } - - ret = sched_setattr_nocheck(thread, &attr); -+ - if (ret) { - kthread_stop(thread); - pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); -@@ -897,6 +908,7 @@ static int __init sugov_register(void) - fs_initcall(sugov_register); - - #ifdef CONFIG_ENERGY_MODEL -+#ifndef CONFIG_SCHED_PDS - extern bool sched_energy_update; - extern struct mutex sched_energy_mutex; - -@@ -927,4 +939,10 @@ void sched_cpufreq_governor_change(struct cpufreq_policy *policy, - } - - } -+#else /* CONFIG_SCHED_PDS */ -+void sched_cpufreq_governor_change(struct cpufreq_policy *policy, -+ struct cpufreq_governor *old_gov) -+{ -+} -+#endif - #endif -diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c -index ba4a143bdcf3..76a9cbb51f55 100644 ---- a/kernel/sched/cputime.c -+++ b/kernel/sched/cputime.c -@@ -121,7 +121,12 @@ void account_user_time(struct task_struct *p, u64 cputime) - p->utime += cputime; - account_group_user_time(p, cputime); - -+#ifdef CONFIG_SCHED_PDS -+ index = (task_nice(p) > 0 || task_running_idle(p)) ? CPUTIME_NICE : -+ CPUTIME_USER; -+#else - index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; -+#endif - - /* Add user time to cpustat. */ - task_group_account_field(p, index, cputime); -@@ -145,7 +150,11 @@ void account_guest_time(struct task_struct *p, u64 cputime) - p->gtime += cputime; - - /* Add guest time to cpustat. */ -+#ifdef CONFIG_SCHED_PDS -+ if (task_nice(p) > 0 || task_running_idle(p)) { -+#else - if (task_nice(p) > 0) { -+#endif - cpustat[CPUTIME_NICE] += cputime; - cpustat[CPUTIME_GUEST_NICE] += cputime; - } else { -@@ -268,7 +277,7 @@ static inline u64 account_other_time(u64 max) - #ifdef CONFIG_64BIT - static inline u64 read_sum_exec_runtime(struct task_struct *t) - { -- return t->se.sum_exec_runtime; -+ return tsk_seruntime(t); - } - #else - static u64 read_sum_exec_runtime(struct task_struct *t) -@@ -278,7 +287,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t) - struct rq *rq; - - rq = task_rq_lock(t, &rf); -- ns = t->se.sum_exec_runtime; -+ ns = tsk_seruntime(t); - task_rq_unlock(rq, t, &rf); - - return ns; -@@ -662,7 +671,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, - void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) - { - struct task_cputime cputime = { -- .sum_exec_runtime = p->se.sum_exec_runtime, -+ .sum_exec_runtime = tsk_seruntime(p), - }; - - task_cputime(p, &cputime.utime, &cputime.stime); -diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c -index f5516bae0c1b..fe1d4aeb6d69 100644 ---- a/kernel/sched/idle.c -+++ b/kernel/sched/idle.c -@@ -353,6 +353,7 @@ void cpu_startup_entry(enum cpuhp_state state) - do_idle(); - } - -+#ifndef CONFIG_SCHED_PDS - /* - * idle-task scheduling class. - */ -@@ -465,3 +466,4 @@ const struct sched_class idle_sched_class = { - .switched_to = switched_to_idle, - .update_curr = update_curr_idle, - }; -+#endif -diff --git a/kernel/sched/pds.c b/kernel/sched/pds.c -new file mode 100644 -index 000000000000..80dbb9866e01 ---- /dev/null -+++ b/kernel/sched/pds.c -@@ -0,0 +1,6554 @@ -+/* -+ * kernel/sched/pds.c, was kernel/sched.c -+ * -+ * PDS-mq Core kernel scheduler code and related syscalls -+ * -+ * Copyright (C) 1991-2002 Linus Torvalds -+ * -+ * 2009-08-13 Brainfuck deadline scheduling policy by Con Kolivas deletes -+ * a whole lot of those previous things. -+ * 2017-09-06 Priority and Deadline based Skip list multiple queue kernel -+ * scheduler by Alfred Chen. -+ */ -+#include "pds_sched.h" -+ -+#include <linux/sched/rt.h> -+ -+#include <linux/context_tracking.h> -+#include <linux/compat.h> -+#include <linux/blkdev.h> -+#include <linux/delayacct.h> -+#include <linux/freezer.h> -+#include <linux/init_task.h> -+#include <linux/kprobes.h> -+#include <linux/mmu_context.h> -+#include <linux/nmi.h> -+#include <linux/profile.h> -+#include <linux/rcupdate_wait.h> -+#include <linux/security.h> -+#include <linux/syscalls.h> -+#include <linux/wait_bit.h> -+ -+#include <linux/kcov.h> -+ -+#include <asm/switch_to.h> -+ -+#include "../workqueue_internal.h" -+#include "../smpboot.h" -+ -+#include "pelt.h" -+ -+#define CREATE_TRACE_POINTS -+#include <trace/events/sched.h> -+ -+ -+#define rt_prio(prio) ((prio) < MAX_RT_PRIO) -+#define rt_task(p) rt_prio((p)->prio) -+#define rt_policy(policy) ((policy) == SCHED_FIFO || \ -+ (policy) == SCHED_RR || \ -+ (policy) == SCHED_ISO) -+#define task_has_rt_policy(p) (rt_policy((p)->policy)) -+ -+#define idle_policy(policy) ((policy) == SCHED_IDLE) -+#define idleprio_task(p) unlikely(idle_policy((p)->policy)) -+ -+#define STOP_PRIO (MAX_RT_PRIO - 1) -+ -+/* -+ * Some helpers for converting to/from various scales. Use shifts to get -+ * approximate multiples of ten for less overhead. -+ */ -+#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ)) -+#define JIFFY_NS (1000000000 / HZ) -+#define HALF_JIFFY_NS (1000000000 / HZ / 2) -+#define HALF_JIFFY_US (1000000 / HZ / 2) -+#define MS_TO_NS(TIME) ((TIME) << 20) -+#define MS_TO_US(TIME) ((TIME) << 10) -+#define NS_TO_MS(TIME) ((TIME) >> 20) -+#define NS_TO_US(TIME) ((TIME) >> 10) -+#define US_TO_NS(TIME) ((TIME) << 10) -+ -+#define RESCHED_US (100) /* Reschedule if less than this many μs left */ -+ -+enum { -+ BASE_CPU_AFFINITY_CHK_LEVEL = 1, -+#ifdef CONFIG_SCHED_SMT -+ SMT_CPU_AFFINITY_CHK_LEVEL_SPACE_HOLDER, -+#endif -+#ifdef CONFIG_SCHED_MC -+ MC_CPU_AFFINITY_CHK_LEVEL_SPACE_HOLDER, -+#endif -+ NR_CPU_AFFINITY_CHK_LEVEL -+}; -+ -+static inline void print_scheduler_version(void) -+{ -+ printk(KERN_INFO "pds: PDS-mq CPU Scheduler 0.99o by Alfred Chen.\n"); -+} -+ -+/* -+ * This is the time all tasks within the same priority round robin. -+ * Value is in ms and set to a minimum of 6ms. Scales with number of cpus. -+ * Tunable via /proc interface. -+ */ -+#define SCHED_DEFAULT_RR (4) -+int rr_interval __read_mostly = SCHED_DEFAULT_RR; -+ -+static int __init rr_interval_set(char *str) -+{ -+ u32 rr; -+ -+ pr_info("rr_interval: "); -+ if (kstrtouint(str, 0, &rr)) { -+ pr_cont("using default of %u, unable to parse %s\n", -+ rr_interval, str); -+ return 1; -+ } -+ -+ rr_interval = rr; -+ pr_cont("%d\n", rr_interval); -+ -+ return 1; -+} -+__setup("rr_interval=", rr_interval_set); -+ -+ -+static const u64 sched_prio2deadline[NICE_WIDTH] = { -+/* -20 */ 6291456, 6920601, 7612661, 8373927, 9211319, -+/* -15 */ 10132450, 11145695, 12260264, 13486290, 14834919, -+/* -10 */ 16318410, 17950251, 19745276, 21719803, 23891783, -+/* -5 */ 26280961, 28909057, 31799962, 34979958, 38477953, -+/* 0 */ 42325748, 46558322, 51214154, 56335569, 61969125, -+/* 5 */ 68166037, 74982640, 82480904, 90728994, 99801893, -+/* 10 */ 109782082, 120760290, 132836319, 146119950, 160731945, -+/* 15 */ 176805139, 194485652, 213934217, 235327638, 258860401 -+}; -+ -+/** -+ * sched_yield_type - Choose what sort of yield sched_yield will perform. -+ * 0: No yield. -+ * 1: Yield only to better priority/deadline tasks. (default) -+ * 2: Expire timeslice and recalculate deadline. -+ */ -+int sched_yield_type __read_mostly = 1; -+ -+/* -+ * The quota handed out to tasks of all priority levels when refilling their -+ * time_slice. -+ */ -+static inline int timeslice(void) -+{ -+ return MS_TO_US(rr_interval); -+} -+ -+#ifdef CONFIG_SMP -+enum { -+SCHED_RQ_EMPTY = 0, -+SCHED_RQ_IDLE, -+SCHED_RQ_NORMAL_0, -+SCHED_RQ_NORMAL_1, -+SCHED_RQ_NORMAL_2, -+SCHED_RQ_NORMAL_3, -+SCHED_RQ_NORMAL_4, -+SCHED_RQ_NORMAL_5, -+SCHED_RQ_NORMAL_6, -+SCHED_RQ_NORMAL_7, -+SCHED_RQ_ISO, -+SCHED_RQ_RT, -+NR_SCHED_RQ_QUEUED_LEVEL -+}; -+ -+static cpumask_t sched_rq_queued_masks[NR_SCHED_RQ_QUEUED_LEVEL] -+____cacheline_aligned_in_smp; -+ -+static DECLARE_BITMAP(sched_rq_queued_masks_bitmap, NR_SCHED_RQ_QUEUED_LEVEL) -+____cacheline_aligned_in_smp; -+ -+static cpumask_t sched_rq_pending_masks[NR_SCHED_RQ_QUEUED_LEVEL] -+____cacheline_aligned_in_smp; -+ -+static DECLARE_BITMAP(sched_rq_pending_masks_bitmap, NR_SCHED_RQ_QUEUED_LEVEL) -+____cacheline_aligned_in_smp; -+ -+DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_CHK_LEVEL], sched_cpu_affinity_chk_masks); -+DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_start_mask); -+DEFINE_PER_CPU(cpumask_t *, sched_cpu_affinity_chk_end_masks); -+ -+#ifdef CONFIG_SCHED_SMT -+DEFINE_PER_CPU(int, sched_sibling_cpu); -+DEFINE_STATIC_KEY_FALSE(sched_smt_present); -+EXPORT_SYMBOL_GPL(sched_smt_present); -+ -+static cpumask_t sched_cpu_sg_idle_mask ____cacheline_aligned_in_smp; -+ -+#ifdef CONFIG_SMT_NICE -+/* -+ * Preemptible sibling group mask -+ * Which all sibling cpus are running at PRIO_LIMIT or IDLE_PRIO -+ */ -+static cpumask_t sched_cpu_psg_mask ____cacheline_aligned_in_smp; -+/* -+ * SMT supressed mask -+ * When a cpu is running task with NORMAL/ISO/RT policy, its sibling cpu -+ * will be supressed to run IDLE priority task. -+ */ -+static cpumask_t sched_smt_supressed_mask ____cacheline_aligned_in_smp; -+#endif /* CONFIG_SMT_NICE */ -+#endif -+ -+static int sched_rq_prio[NR_CPUS] ____cacheline_aligned; -+ -+/* -+ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of -+ * the domain), this allows us to quickly tell if two cpus are in the same cache -+ * domain, see cpus_share_cache(). -+ */ -+DEFINE_PER_CPU(int, sd_llc_id); -+ -+int __weak arch_sd_sibling_asym_packing(void) -+{ -+ return 0*SD_ASYM_PACKING; -+} -+#else -+struct rq *uprq; -+#endif /* CONFIG_SMP */ -+ -+static DEFINE_MUTEX(sched_hotcpu_mutex); -+ -+DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); -+ -+#ifndef prepare_arch_switch -+# define prepare_arch_switch(next) do { } while (0) -+#endif -+#ifndef finish_arch_post_lock_switch -+# define finish_arch_post_lock_switch() do { } while (0) -+#endif -+ -+/* -+ * Context: p->pi_lock -+ */ -+static inline struct rq -+*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock) -+{ -+ struct rq *rq; -+ for (;;) { -+ rq = task_rq(p); -+ if (p->on_cpu || task_on_rq_queued(p)) { -+ raw_spin_lock(&rq->lock); -+ if (likely((p->on_cpu || task_on_rq_queued(p)) -+ && rq == task_rq(p))) { -+ *plock = &rq->lock; -+ return rq; -+ } -+ raw_spin_unlock(&rq->lock); -+ } else if (task_on_rq_migrating(p)) { -+ do { -+ cpu_relax(); -+ } while (unlikely(task_on_rq_migrating(p))); -+ } else { -+ *plock = NULL; -+ return rq; -+ } -+ } -+} -+ -+static inline void -+__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock) -+{ -+ if (NULL != lock) -+ raw_spin_unlock(lock); -+} -+ -+static inline struct rq -+*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock, -+ unsigned long *flags) -+{ -+ struct rq *rq; -+ for (;;) { -+ rq = task_rq(p); -+ if (p->on_cpu || task_on_rq_queued(p)) { -+ raw_spin_lock_irqsave(&rq->lock, *flags); -+ if (likely((p->on_cpu || task_on_rq_queued(p)) -+ && rq == task_rq(p))) { -+ *plock = &rq->lock; -+ return rq; -+ } -+ raw_spin_unlock_irqrestore(&rq->lock, *flags); -+ } else if (task_on_rq_migrating(p)) { -+ do { -+ cpu_relax(); -+ } while (unlikely(task_on_rq_migrating(p))); -+ } else { -+ raw_spin_lock_irqsave(&p->pi_lock, *flags); -+ if (likely(!p->on_cpu && !p->on_rq && -+ rq == task_rq(p))) { -+ *plock = &p->pi_lock; -+ return rq; -+ } -+ raw_spin_unlock_irqrestore(&p->pi_lock, *flags); -+ } -+ } -+} -+ -+static inline void -+task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock, -+ unsigned long *flags) -+{ -+ raw_spin_unlock_irqrestore(lock, *flags); -+} -+ -+/* -+ * __task_rq_lock - lock the rq @p resides on. -+ */ -+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) -+ __acquires(rq->lock) -+{ -+ struct rq *rq; -+ -+ lockdep_assert_held(&p->pi_lock); -+ -+ for (;;) { -+ rq = task_rq(p); -+ raw_spin_lock(&rq->lock); -+ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) -+ return rq; -+ raw_spin_unlock(&rq->lock); -+ -+ while (unlikely(task_on_rq_migrating(p))) -+ cpu_relax(); -+ } -+} -+ -+/* -+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. -+ */ -+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) -+ __acquires(p->pi_lock) -+ __acquires(rq->lock) -+{ -+ struct rq *rq; -+ -+ for (;;) { -+ raw_spin_lock_irqsave(&p->pi_lock, rf->flags); -+ rq = task_rq(p); -+ raw_spin_lock(&rq->lock); -+ /* -+ * move_queued_task() task_rq_lock() -+ * -+ * ACQUIRE (rq->lock) -+ * [S] ->on_rq = MIGRATING [L] rq = task_rq() -+ * WMB (__set_task_cpu()) ACQUIRE (rq->lock); -+ * [S] ->cpu = new_cpu [L] task_rq() -+ * [L] ->on_rq -+ * RELEASE (rq->lock) -+ * -+ * If we observe the old CPU in task_rq_lock(), the acquire of -+ * the old rq->lock will fully serialize against the stores. -+ * -+ * If we observe the new CPU in task_rq_lock(), the address -+ * dependency headed by '[L] rq = task_rq()' and the acquire -+ * will pair with the WMB to ensure we then also see migrating. -+ */ -+ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { -+ return rq; -+ } -+ raw_spin_unlock(&rq->lock); -+ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); -+ -+ while (unlikely(task_on_rq_migrating(p))) -+ cpu_relax(); -+ } -+} -+ -+/* -+ * RQ-clock updating methods: -+ */ -+ -+static void update_rq_clock_task(struct rq *rq, s64 delta) -+{ -+/* -+ * In theory, the compile should just see 0 here, and optimize out the call -+ * to sched_rt_avg_update. But I don't trust it... -+ */ -+ s64 __maybe_unused steal = 0, irq_delta = 0; -+ -+#ifdef CONFIG_IRQ_TIME_ACCOUNTING -+ irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; -+ -+ /* -+ * Since irq_time is only updated on {soft,}irq_exit, we might run into -+ * this case when a previous update_rq_clock() happened inside a -+ * {soft,}irq region. -+ * -+ * When this happens, we stop ->clock_task and only update the -+ * prev_irq_time stamp to account for the part that fit, so that a next -+ * update will consume the rest. This ensures ->clock_task is -+ * monotonic. -+ * -+ * It does however cause some slight miss-attribution of {soft,}irq -+ * time, a more accurate solution would be to update the irq_time using -+ * the current rq->clock timestamp, except that would require using -+ * atomic ops. -+ */ -+ if (irq_delta > delta) -+ irq_delta = delta; -+ -+ rq->prev_irq_time += irq_delta; -+ delta -= irq_delta; -+#endif -+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING -+ if (static_key_false((¶virt_steal_rq_enabled))) { -+ steal = paravirt_steal_clock(cpu_of(rq)); -+ steal -= rq->prev_steal_time_rq; -+ -+ if (unlikely(steal > delta)) -+ steal = delta; -+ -+ rq->prev_steal_time_rq += steal; -+ -+ delta -= steal; -+ } -+#endif -+ -+ rq->clock_task += delta; -+ -+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ -+ if ((irq_delta + steal)) -+ update_irq_load_avg(rq, irq_delta + steal); -+#endif -+} -+ -+static inline void update_rq_clock(struct rq *rq) -+{ -+ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; -+ -+ if (unlikely(delta <= 0)) -+ return; -+ rq->clock += delta; -+ update_rq_clock_task(rq, delta); -+} -+ -+static inline void update_task_priodl(struct task_struct *p) -+{ -+ p->priodl = (((u64) (p->prio))<<56) | ((p->deadline)>>8); -+} -+ -+/* -+ * Deadline is "now" in niffies + (offset by priority). Setting the deadline -+ * is the key to everything. It distributes CPU fairly amongst tasks of the -+ * same nice value, it proportions CPU according to nice level, it means the -+ * task that last woke up the longest ago has the earliest deadline, thus -+ * ensuring that interactive tasks get low latency on wake up. The CPU -+ * proportion works out to the square of the virtual deadline difference, so -+ * this equation will give nice 19 3% CPU compared to nice 0. -+ */ -+static inline u64 task_deadline_diff(const struct task_struct *p) -+{ -+ return sched_prio2deadline[TASK_USER_PRIO(p)]; -+} -+ -+static inline u64 static_deadline_diff(int static_prio) -+{ -+ return sched_prio2deadline[USER_PRIO(static_prio)]; -+} -+ -+/* -+ * The time_slice is only refilled when it is empty and that is when we set a -+ * new deadline for non-rt tasks. -+ */ -+static inline void time_slice_expired(struct task_struct *p, struct rq *rq) -+{ -+ p->time_slice = timeslice(); -+ if (p->prio >= NORMAL_PRIO) -+ p->deadline = rq->clock + task_deadline_diff(p); -+ -+ update_task_priodl(p); -+} -+ -+static inline struct task_struct *rq_first_queued_task(struct rq *rq) -+{ -+ struct skiplist_node *node = rq->sl_header.next[0]; -+ -+ if (node == &rq->sl_header) -+ return rq->idle; -+ -+ return skiplist_entry(node, struct task_struct, sl_node); -+} -+ -+static inline struct task_struct *rq_second_queued_task(struct rq *rq) -+{ -+ struct skiplist_node *node = rq->sl_header.next[0]->next[0]; -+ -+ if (node == &rq->sl_header) -+ return rq->idle; -+ -+ return skiplist_entry(node, struct task_struct, sl_node); -+} -+ -+static inline int is_second_in_rq(struct task_struct *p, struct rq *rq) -+{ -+ return (p->sl_node.prev[0]->prev[0] == &rq->sl_header); -+} -+ -+static const int task_dl_hash_tbl[] = { -+/* 0 4 8 12 */ -+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, -+/* 16 20 24 28 */ -+ 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 6, 7 -+}; -+ -+static inline int -+task_deadline_level(const struct task_struct *p, const struct rq *rq) -+{ -+ u64 delta = (rq->clock + sched_prio2deadline[39] - p->deadline) >> 23; -+ -+ delta = min((size_t)delta, ARRAY_SIZE(task_dl_hash_tbl) - 1); -+ return task_dl_hash_tbl[delta]; -+} -+ -+/* -+ * cmpxchg based fetch_or, macro so it works for different integer types -+ */ -+#define fetch_or(ptr, mask) \ -+ ({ \ -+ typeof(ptr) _ptr = (ptr); \ -+ typeof(mask) _mask = (mask); \ -+ typeof(*_ptr) _old, _val = *_ptr; \ -+ \ -+ for (;;) { \ -+ _old = cmpxchg(_ptr, _val, _val | _mask); \ -+ if (_old == _val) \ -+ break; \ -+ _val = _old; \ -+ } \ -+ _old; \ -+}) -+ -+#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) -+/* -+ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, -+ * this avoids any races wrt polling state changes and thereby avoids -+ * spurious IPIs. -+ */ -+static bool set_nr_and_not_polling(struct task_struct *p) -+{ -+ struct thread_info *ti = task_thread_info(p); -+ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); -+} -+ -+/* -+ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. -+ * -+ * If this returns true, then the idle task promises to call -+ * sched_ttwu_pending() and reschedule soon. -+ */ -+static bool set_nr_if_polling(struct task_struct *p) -+{ -+ struct thread_info *ti = task_thread_info(p); -+ typeof(ti->flags) old, val = READ_ONCE(ti->flags); -+ -+ for (;;) { -+ if (!(val & _TIF_POLLING_NRFLAG)) -+ return false; -+ if (val & _TIF_NEED_RESCHED) -+ return true; -+ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); -+ if (old == val) -+ break; -+ val = old; -+ } -+ return true; -+} -+ -+#else -+static bool set_nr_and_not_polling(struct task_struct *p) -+{ -+ set_tsk_need_resched(p); -+ return true; -+} -+ -+#ifdef CONFIG_SMP -+static bool set_nr_if_polling(struct task_struct *p) -+{ -+ return false; -+} -+#endif -+#endif -+ -+#ifdef CONFIG_SMP -+#ifdef CONFIG_SMT_NICE -+static void resched_cpu_if_curr_is(int cpu, int priority) -+{ -+ struct rq *rq = cpu_rq(cpu); -+ -+ rcu_read_lock(); -+ -+ if (rcu_dereference(rq->curr)->prio != priority) -+ goto out; -+ -+ if (set_nr_if_polling(rq->idle)) { -+ trace_sched_wake_idle_without_ipi(cpu); -+ } else { -+ if (!do_raw_spin_trylock(&rq->lock)) -+ goto out; -+ spin_acquire(&rq->lock.dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_); -+ -+ if (priority == rq->curr->prio) -+ smp_send_reschedule(cpu); -+ /* Else CPU is not idle, do nothing here */ -+ -+ spin_release(&rq->lock.dep_map, 1, _RET_IP_); -+ do_raw_spin_unlock(&rq->lock); -+ } -+ -+out: -+ rcu_read_unlock(); -+} -+#endif /* CONFIG_SMT_NICE */ -+ -+static inline bool -+__update_cpumasks_bitmap(int cpu, unsigned long *plevel, unsigned long level, -+ cpumask_t cpumasks[], unsigned long bitmap[]) -+{ -+ if (*plevel == level) -+ return false; -+ -+ cpumask_clear_cpu(cpu, cpumasks + *plevel); -+ if (cpumask_empty(cpumasks + *plevel)) -+ clear_bit(*plevel, bitmap); -+ cpumask_set_cpu(cpu, cpumasks + level); -+ set_bit(level, bitmap); -+ -+ *plevel = level; -+ -+ return true; -+} -+ -+static inline int -+task_running_policy_level(const struct task_struct *p, const struct rq *rq) -+{ -+ int prio = p->prio; -+ -+ if (NORMAL_PRIO == prio) -+ return SCHED_RQ_NORMAL_0 + task_deadline_level(p, rq); -+ -+ if (ISO_PRIO == prio) -+ return SCHED_RQ_ISO; -+ if (prio < MAX_RT_PRIO) -+ return SCHED_RQ_RT; -+ return PRIO_LIMIT - prio; -+} -+ -+static inline void update_sched_rq_queued_masks_normal(struct rq *rq) -+{ -+ struct task_struct *p = rq_first_queued_task(rq); -+ -+ if (p->prio != NORMAL_PRIO) -+ return; -+ -+ __update_cpumasks_bitmap(cpu_of(rq), &rq->queued_level, -+ task_running_policy_level(p, rq), -+ &sched_rq_queued_masks[0], -+ &sched_rq_queued_masks_bitmap[0]); -+} -+ -+#ifdef CONFIG_SMT_NICE -+static inline void update_sched_cpu_psg_mask(const int cpu) -+{ -+ cpumask_t tmp; -+ -+ cpumask_or(&tmp, &sched_rq_queued_masks[SCHED_RQ_EMPTY], -+ &sched_rq_queued_masks[SCHED_RQ_IDLE]); -+ cpumask_and(&tmp, &tmp, cpu_smt_mask(cpu)); -+ if (cpumask_equal(&tmp, cpu_smt_mask(cpu))) -+ cpumask_or(&sched_cpu_psg_mask, &sched_cpu_psg_mask, -+ cpu_smt_mask(cpu)); -+ else -+ cpumask_andnot(&sched_cpu_psg_mask, &sched_cpu_psg_mask, -+ cpu_smt_mask(cpu)); -+} -+#endif -+ -+static inline void update_sched_rq_queued_masks(struct rq *rq) -+{ -+ int cpu = cpu_of(rq); -+ struct task_struct *p = rq_first_queued_task(rq); -+ unsigned long level; -+#ifdef CONFIG_SCHED_SMT -+ unsigned long last_level = rq->queued_level; -+#endif -+ -+ level = task_running_policy_level(p, rq); -+ sched_rq_prio[cpu] = p->prio; -+ -+ if (!__update_cpumasks_bitmap(cpu, &rq->queued_level, level, -+ &sched_rq_queued_masks[0], -+ &sched_rq_queued_masks_bitmap[0])) -+ return; -+ -+#ifdef CONFIG_SCHED_SMT -+ if (cpu == per_cpu(sched_sibling_cpu, cpu)) -+ return; -+ -+ if (SCHED_RQ_EMPTY == last_level) { -+ cpumask_andnot(&sched_cpu_sg_idle_mask, &sched_cpu_sg_idle_mask, -+ cpu_smt_mask(cpu)); -+ } else if (SCHED_RQ_EMPTY == level) { -+ cpumask_t tmp; -+ -+ cpumask_and(&tmp, cpu_smt_mask(cpu), -+ &sched_rq_queued_masks[SCHED_RQ_EMPTY]); -+ if (cpumask_equal(&tmp, cpu_smt_mask(cpu))) -+ cpumask_or(&sched_cpu_sg_idle_mask, cpu_smt_mask(cpu), -+ &sched_cpu_sg_idle_mask); -+ } -+ -+#ifdef CONFIG_SMT_NICE -+ if (level <= SCHED_RQ_IDLE && last_level > SCHED_RQ_IDLE) { -+ cpumask_clear_cpu(per_cpu(sched_sibling_cpu, cpu), -+ &sched_smt_supressed_mask); -+ update_sched_cpu_psg_mask(cpu); -+ resched_cpu_if_curr_is(per_cpu(sched_sibling_cpu, cpu), PRIO_LIMIT); -+ } else if (last_level <= SCHED_RQ_IDLE && level > SCHED_RQ_IDLE) { -+ cpumask_set_cpu(per_cpu(sched_sibling_cpu, cpu), -+ &sched_smt_supressed_mask); -+ update_sched_cpu_psg_mask(cpu); -+ resched_cpu_if_curr_is(per_cpu(sched_sibling_cpu, cpu), IDLE_PRIO); -+ } -+#endif /* CONFIG_SMT_NICE */ -+#endif -+} -+ -+static inline void update_sched_rq_pending_masks(struct rq *rq) -+{ -+ unsigned long level; -+ struct task_struct *p = rq_second_queued_task(rq); -+ -+ level = task_running_policy_level(p, rq); -+ -+ __update_cpumasks_bitmap(cpu_of(rq), &rq->pending_level, level, -+ &sched_rq_pending_masks[0], -+ &sched_rq_pending_masks_bitmap[0]); -+} -+ -+#else /* CONFIG_SMP */ -+static inline void update_sched_rq_queued_masks(struct rq *rq) {} -+static inline void update_sched_rq_queued_masks_normal(struct rq *rq) {} -+static inline void update_sched_rq_pending_masks(struct rq *rq) {} -+#endif -+ -+#ifdef CONFIG_NO_HZ_FULL -+/* -+ * Tick may be needed by tasks in the runqueue depending on their policy and -+ * requirements. If tick is needed, lets send the target an IPI to kick it out -+ * of nohz mode if necessary. -+ */ -+static inline void sched_update_tick_dependency(struct rq *rq) -+{ -+ int cpu; -+ -+ if (!tick_nohz_full_enabled()) -+ return; -+ -+ cpu = cpu_of(rq); -+ -+ if (!tick_nohz_full_cpu(cpu)) -+ return; -+ -+ if (rq->nr_running < 2) -+ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); -+ else -+ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); -+} -+#else /* !CONFIG_NO_HZ_FULL */ -+static inline void sched_update_tick_dependency(struct rq *rq) { } -+#endif -+ -+/* -+ * Removing from the runqueue. Deleting a task from the skip list is done -+ * via the stored node reference in the task struct and does not require a full -+ * look up. Thus it occurs in O(k) time where k is the "level" of the list the -+ * task was stored at - usually < 4, max 16. -+ * -+ * Context: rq->lock -+ */ -+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags) -+{ -+ lockdep_assert_held(&rq->lock); -+ -+ WARN_ONCE(task_rq(p) != rq, "pds: dequeue task reside on cpu%d from cpu%d\n", -+ task_cpu(p), cpu_of(rq)); -+ if (skiplist_del_init(&rq->sl_header, &p->sl_node)) { -+ update_sched_rq_queued_masks(rq); -+ update_sched_rq_pending_masks(rq); -+ } else if (is_second_in_rq(p, rq)) -+ update_sched_rq_pending_masks(rq); -+ rq->nr_running--; -+ -+ sched_update_tick_dependency(rq); -+ psi_dequeue(p, flags & DEQUEUE_SLEEP); -+ -+ sched_info_dequeued(rq, p); -+} -+ -+/* -+ * To determine if it's safe for a task of SCHED_IDLE to actually run as -+ * an idle task, we ensure none of the following conditions are met. -+ */ -+static inline bool idleprio_suitable(struct task_struct *p) -+{ -+ return (!freezing(p) && !signal_pending(p) && -+ !(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING))); -+} -+ -+/* -+ * pds_skiplist_random_level -- Returns a pseudo-random level number for skip -+ * list node which is used in PDS run queue. -+ * -+ * In current implementation, based on testing, the first 8 bits in microseconds -+ * of niffies are suitable for random level population. -+ * find_first_bit() is used to satisfy p = 0.5 between each levels, and there -+ * should be platform hardware supported instruction(known as ctz/clz) to speed -+ * up this function. -+ * The skiplist level for a task is populated when task is created and doesn't -+ * change in task's life time. When task is being inserted into run queue, this -+ * skiplist level is set to task's sl_node->level, the skiplist insert function -+ * may change it based on current level of the skip lsit. -+ */ -+static inline int pds_skiplist_random_level(const struct task_struct *p) -+{ -+ long unsigned int randseed; -+ -+ /* -+ * 1. Some architectures don't have better than microsecond resolution -+ * so mask out ~microseconds as a factor of the random seed for skiplist -+ * insertion. -+ * 2. Use address of task structure pointer as another factor of the -+ * random seed for task burst forking scenario. -+ */ -+ randseed = (task_rq(p)->clock ^ (long unsigned int)p) >> 10; -+ -+ return find_first_bit(&randseed, NUM_SKIPLIST_LEVEL - 1); -+} -+ -+/** -+ * pds_skiplist_task_search -- search function used in PDS run queue skip list -+ * node insert operation. -+ * @it: iterator pointer to the node in the skip list -+ * @node: pointer to the skiplist_node to be inserted -+ * -+ * Returns true if key of @it is less or equal to key value of @node, otherwise -+ * false. -+ */ -+static inline bool -+pds_skiplist_task_search(struct skiplist_node *it, struct skiplist_node *node) -+{ -+ return (skiplist_entry(it, struct task_struct, sl_node)->priodl <= -+ skiplist_entry(node, struct task_struct, sl_node)->priodl); -+} -+ -+/* -+ * Define the skip list insert function for PDS -+ */ -+DEFINE_SKIPLIST_INSERT_FUNC(pds_skiplist_insert, pds_skiplist_task_search); -+ -+/* -+ * Adding task to the runqueue. -+ * -+ * Context: rq->lock -+ */ -+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags) -+{ -+ lockdep_assert_held(&rq->lock); -+ -+ WARN_ONCE(task_rq(p) != rq, "pds: enqueue task reside on cpu%d to cpu%d\n", -+ task_cpu(p), cpu_of(rq)); -+ -+ p->sl_node.level = p->sl_level; -+ if (pds_skiplist_insert(&rq->sl_header, &p->sl_node)) { -+ update_sched_rq_queued_masks(rq); -+ update_sched_rq_pending_masks(rq); -+ } else if (is_second_in_rq(p, rq)) -+ update_sched_rq_pending_masks(rq); -+ rq->nr_running++; -+ -+ sched_update_tick_dependency(rq); -+ -+ sched_info_queued(rq, p); -+ psi_enqueue(p, flags); -+ -+ /* -+ * If in_iowait is set, the code below may not trigger any cpufreq -+ * utilization updates, so do it here explicitly with the IOWAIT flag -+ * passed. -+ */ -+ if (p->in_iowait) -+ cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_IOWAIT); -+} -+ -+static inline void requeue_task(struct task_struct *p, struct rq *rq) -+{ -+ bool b_first, b_second; -+ -+ lockdep_assert_held(&rq->lock); -+ -+ WARN_ONCE(task_rq(p) != rq, "pds: cpu[%d] requeue task reside on cpu%d\n", -+ cpu_of(rq), task_cpu(p)); -+ -+ b_first = skiplist_del_init(&rq->sl_header, &p->sl_node); -+ b_second = is_second_in_rq(p, rq); -+ -+ p->sl_node.level = p->sl_level; -+ if (pds_skiplist_insert(&rq->sl_header, &p->sl_node) || b_first) { -+ update_sched_rq_queued_masks(rq); -+ update_sched_rq_pending_masks(rq); -+ } else if (is_second_in_rq(p, rq) || b_second) -+ update_sched_rq_pending_masks(rq); -+} -+ -+/* -+ * resched_curr - mark rq's current task 'to be rescheduled now'. -+ * -+ * On UP this means the setting of the need_resched flag, on SMP it -+ * might also involve a cross-CPU call to trigger the scheduler on -+ * the target CPU. -+ */ -+void resched_curr(struct rq *rq) -+{ -+ struct task_struct *curr = rq->curr; -+ int cpu; -+ -+ lockdep_assert_held(&rq->lock); -+ -+ if (test_tsk_need_resched(curr)) -+ return; -+ -+ cpu = cpu_of(rq); -+ if (cpu == smp_processor_id()) { -+ set_tsk_need_resched(curr); -+ set_preempt_need_resched(); -+ return; -+ } -+ -+ if (set_nr_and_not_polling(curr)) -+ smp_send_reschedule(cpu); -+ else -+ trace_sched_wake_idle_without_ipi(cpu); -+} -+ -+static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) -+{ -+ struct task_struct *curr = rq->curr; -+ -+ if (curr->prio == PRIO_LIMIT) -+ resched_curr(rq); -+ -+ if (task_running_idle(p)) -+ return; -+ -+ if (p->priodl < curr->priodl) -+ resched_curr(rq); -+} -+ -+#ifdef CONFIG_SCHED_HRTICK -+/* -+ * Use HR-timers to deliver accurate preemption points. -+ */ -+ -+static void hrtick_clear(struct rq *rq) -+{ -+ if (hrtimer_active(&rq->hrtick_timer)) -+ hrtimer_cancel(&rq->hrtick_timer); -+} -+ -+/* -+ * High-resolution timer tick. -+ * Runs from hardirq context with interrupts disabled. -+ */ -+static enum hrtimer_restart hrtick(struct hrtimer *timer) -+{ -+ struct rq *rq = container_of(timer, struct rq, hrtick_timer); -+ struct task_struct *p; -+ -+ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); -+ -+ raw_spin_lock(&rq->lock); -+ p = rq->curr; -+ p->time_slice = 0; -+ resched_curr(rq); -+ raw_spin_unlock(&rq->lock); -+ -+ return HRTIMER_NORESTART; -+} -+ -+/* -+ * Use hrtick when: -+ * - enabled by features -+ * - hrtimer is actually high res -+ */ -+static inline int hrtick_enabled(struct rq *rq) -+{ -+ /** -+ * PDS doesn't support sched_feat yet -+ if (!sched_feat(HRTICK)) -+ return 0; -+ */ -+ if (!cpu_active(cpu_of(rq))) -+ return 0; -+ return hrtimer_is_hres_active(&rq->hrtick_timer); -+} -+ -+#ifdef CONFIG_SMP -+ -+static void __hrtick_restart(struct rq *rq) -+{ -+ struct hrtimer *timer = &rq->hrtick_timer; -+ -+ hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); -+} -+ -+/* -+ * called from hardirq (IPI) context -+ */ -+static void __hrtick_start(void *arg) -+{ -+ struct rq *rq = arg; -+ -+ raw_spin_lock(&rq->lock); -+ __hrtick_restart(rq); -+ rq->hrtick_csd_pending = 0; -+ raw_spin_unlock(&rq->lock); -+} -+ -+/* -+ * Called to set the hrtick timer state. -+ * -+ * called with rq->lock held and irqs disabled -+ */ -+void hrtick_start(struct rq *rq, u64 delay) -+{ -+ struct hrtimer *timer = &rq->hrtick_timer; -+ ktime_t time; -+ s64 delta; -+ -+ /* -+ * Don't schedule slices shorter than 10000ns, that just -+ * doesn't make sense and can cause timer DoS. -+ */ -+ delta = max_t(s64, delay, 10000LL); -+ time = ktime_add_ns(timer->base->get_time(), delta); -+ -+ hrtimer_set_expires(timer, time); -+ -+ if (rq == this_rq()) { -+ __hrtick_restart(rq); -+ } else if (!rq->hrtick_csd_pending) { -+ smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); -+ rq->hrtick_csd_pending = 1; -+ } -+} -+ -+#else -+/* -+ * Called to set the hrtick timer state. -+ * -+ * called with rq->lock held and irqs disabled -+ */ -+void hrtick_start(struct rq *rq, u64 delay) -+{ -+ /* -+ * Don't schedule slices shorter than 10000ns, that just -+ * doesn't make sense. Rely on vruntime for fairness. -+ */ -+ delay = max_t(u64, delay, 10000LL); -+ hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), -+ HRTIMER_MODE_REL_PINNED); -+} -+#endif /* CONFIG_SMP */ -+ -+static void hrtick_rq_init(struct rq *rq) -+{ -+#ifdef CONFIG_SMP -+ rq->hrtick_csd_pending = 0; -+ -+ rq->hrtick_csd.flags = 0; -+ rq->hrtick_csd.func = __hrtick_start; -+ rq->hrtick_csd.info = rq; -+#endif -+ -+ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ rq->hrtick_timer.function = hrtick; -+} -+ -+static inline int rq_dither(struct rq *rq) -+{ -+ if ((rq->clock - rq->last_tick > HALF_JIFFY_NS) || hrtick_enabled(rq)) -+ return 0; -+ -+ return HALF_JIFFY_NS; -+} -+ -+#else /* CONFIG_SCHED_HRTICK */ -+static inline int hrtick_enabled(struct rq *rq) -+{ -+ return 0; -+} -+ -+static inline void hrtick_clear(struct rq *rq) -+{ -+} -+ -+static inline void hrtick_rq_init(struct rq *rq) -+{ -+} -+ -+static inline int rq_dither(struct rq *rq) -+{ -+ return (rq->clock - rq->last_tick > HALF_JIFFY_NS)? 0:HALF_JIFFY_NS; -+} -+#endif /* CONFIG_SCHED_HRTICK */ -+ -+static inline int normal_prio(struct task_struct *p) -+{ -+ static const int policy_to_prio[] = { -+ NORMAL_PRIO, /* SCHED_NORMAL */ -+ 0, /* SCHED_FIFO */ -+ 0, /* SCHED_RR */ -+ IDLE_PRIO, /* SCHED_BATCH */ -+ ISO_PRIO, /* SCHED_ISO */ -+ IDLE_PRIO /* SCHED_IDLE */ -+ }; -+ -+ if (task_has_rt_policy(p)) -+ return MAX_RT_PRIO - 1 - p->rt_priority; -+ return policy_to_prio[p->policy]; -+} -+ -+/* -+ * Calculate the current priority, i.e. the priority -+ * taken into account by the scheduler. This value might -+ * be boosted by RT tasks as it will be RT if the task got -+ * RT-boosted. If not then it returns p->normal_prio. -+ */ -+static int effective_prio(struct task_struct *p) -+{ -+ p->normal_prio = normal_prio(p); -+ /* -+ * If we are RT tasks or we were boosted to RT priority, -+ * keep the priority unchanged. Otherwise, update priority -+ * to the normal priority: -+ */ -+ if (!rt_prio(p->prio)) -+ return p->normal_prio; -+ return p->prio; -+} -+ -+/* -+ * activate_task - move a task to the runqueue. -+ * -+ * Context: rq->lock -+ */ -+static void activate_task(struct task_struct *p, struct rq *rq) -+{ -+ if (task_contributes_to_load(p)) -+ rq->nr_uninterruptible--; -+ enqueue_task(p, rq, ENQUEUE_WAKEUP); -+ p->on_rq = 1; -+ cpufreq_update_this_cpu(rq, 0); -+} -+ -+/* -+ * deactivate_task - remove a task from the runqueue. -+ * -+ * Context: rq->lock -+ */ -+static inline void deactivate_task(struct task_struct *p, struct rq *rq) -+{ -+ if (task_contributes_to_load(p)) -+ rq->nr_uninterruptible++; -+ dequeue_task(p, rq, DEQUEUE_SLEEP); -+ p->on_rq = 0; -+ cpufreq_update_this_cpu(rq, 0); -+} -+ -+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) -+{ -+#ifdef CONFIG_SMP -+ /* -+ * After ->cpu is set up to a new value, task_access_lock(p, ...) can be -+ * successfully executed on another CPU. We must ensure that updates of -+ * per-task data have been completed by this moment. -+ */ -+ smp_wmb(); -+ -+#ifdef CONFIG_THREAD_INFO_IN_TASK -+ WRITE_ONCE(p->cpu, cpu); -+#else -+ WRITE_ONCE(task_thread_info(p)->cpu, cpu); -+#endif -+#endif -+} -+ -+#ifdef CONFIG_SMP -+void set_task_cpu(struct task_struct *p, unsigned int new_cpu) -+{ -+#ifdef CONFIG_SCHED_DEBUG -+ /* -+ * We should never call set_task_cpu() on a blocked task, -+ * ttwu() will sort out the placement. -+ */ -+ WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && -+ !p->on_rq); -+#ifdef CONFIG_LOCKDEP -+ /* -+ * The caller should hold either p->pi_lock or rq->lock, when changing -+ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. -+ * -+ * sched_move_task() holds both and thus holding either pins the cgroup, -+ * see task_group(). -+ */ -+ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || -+ lockdep_is_held(&task_rq(p)->lock))); -+#endif -+ /* -+ * Clearly, migrating tasks to offline CPUs is a fairly daft thing. -+ */ -+ WARN_ON_ONCE(!cpu_online(new_cpu)); -+#endif -+ if (task_cpu(p) == new_cpu) -+ return; -+ trace_sched_migrate_task(p, new_cpu); -+ rseq_migrate(p); -+ perf_event_task_migrate(p); -+ -+ __set_task_cpu(p, new_cpu); -+} -+ -+static inline bool is_per_cpu_kthread(struct task_struct *p) -+{ -+ return ((p->flags & PF_KTHREAD) && (1 == p->nr_cpus_allowed)); -+} -+ -+/* -+ * Per-CPU kthreads are allowed to run on !actie && online CPUs, see -+ * __set_cpus_allowed_ptr() and select_fallback_rq(). -+ */ -+static inline bool is_cpu_allowed(struct task_struct *p, int cpu) -+{ -+ if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) -+ return false; -+ -+ if (is_per_cpu_kthread(p)) -+ return cpu_online(cpu); -+ -+ return cpu_active(cpu); -+} -+ -+/* -+ * This is how migration works: -+ * -+ * 1) we invoke migration_cpu_stop() on the target CPU using -+ * stop_one_cpu(). -+ * 2) stopper starts to run (implicitly forcing the migrated thread -+ * off the CPU) -+ * 3) it checks whether the migrated task is still in the wrong runqueue. -+ * 4) if it's in the wrong runqueue then the migration thread removes -+ * it and puts it into the right queue. -+ * 5) stopper completes and stop_one_cpu() returns and the migration -+ * is done. -+ */ -+ -+/* -+ * detach_task() -- detach the task for the migration specified in @target_cpu -+ */ -+static void detach_task(struct rq *rq, struct task_struct *p, int target_cpu) -+{ -+ lockdep_assert_held(&rq->lock); -+ -+ WRITE_ONCE(p->on_rq ,TASK_ON_RQ_MIGRATING); -+ if (task_contributes_to_load(p)) -+ rq->nr_uninterruptible++; -+ dequeue_task(p, rq, 0); -+ -+ set_task_cpu(p, target_cpu); -+} -+ -+/* -+ * attach_task() -- attach the task detached by detach_task() to its new rq. -+ */ -+static void attach_task(struct rq *rq, struct task_struct *p) -+{ -+ lockdep_assert_held(&rq->lock); -+ -+ BUG_ON(task_rq(p) != rq); -+ -+ if (task_contributes_to_load(p)) -+ rq->nr_uninterruptible--; -+ enqueue_task(p, rq, 0); -+ p->on_rq = TASK_ON_RQ_QUEUED; -+ cpufreq_update_this_cpu(rq, 0); -+} -+ -+/* -+ * move_queued_task - move a queued task to new rq. -+ * -+ * Returns (locked) new rq. Old rq's lock is released. -+ */ -+static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int -+ new_cpu) -+{ -+ detach_task(rq, p, new_cpu); -+ raw_spin_unlock(&rq->lock); -+ -+ rq = cpu_rq(new_cpu); -+ -+ raw_spin_lock(&rq->lock); -+ update_rq_clock(rq); -+ -+ attach_task(rq, p); -+ -+ check_preempt_curr(rq, p); -+ -+ return rq; -+} -+ -+struct migration_arg { -+ struct task_struct *task; -+ int dest_cpu; -+}; -+ -+/* -+ * Move (not current) task off this CPU, onto the destination CPU. We're doing -+ * this because either it can't run here any more (set_cpus_allowed() -+ * away from this CPU, or CPU going down), or because we're -+ * attempting to rebalance this task on exec (sched_exec). -+ * -+ * So we race with normal scheduler movements, but that's OK, as long -+ * as the task is no longer on this CPU. -+ */ -+static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int -+ dest_cpu) -+{ -+ /* Affinity changed (again). */ -+ if (!is_cpu_allowed(p, dest_cpu)) -+ return rq; -+ -+ update_rq_clock(rq); -+ return move_queued_task(rq, p, dest_cpu); -+} -+ -+/* -+ * migration_cpu_stop - this will be executed by a highprio stopper thread -+ * and performs thread migration by bumping thread off CPU then -+ * 'pushing' onto another runqueue. -+ */ -+static int migration_cpu_stop(void *data) -+{ -+ struct migration_arg *arg = data; -+ struct task_struct *p = arg->task; -+ struct rq *rq = this_rq(); -+ -+ /* -+ * The original target CPU might have gone down and we might -+ * be on another CPU but it doesn't matter. -+ */ -+ local_irq_disable(); -+ -+ raw_spin_lock(&p->pi_lock); -+ raw_spin_lock(&rq->lock); -+ /* -+ * If task_rq(p) != rq, it cannot be migrated here, because we're -+ * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because -+ * we're holding p->pi_lock. -+ */ -+ if (task_rq(p) == rq) -+ if (task_on_rq_queued(p)) -+ rq = __migrate_task(rq, p, arg->dest_cpu); -+ raw_spin_unlock(&rq->lock); -+ raw_spin_unlock(&p->pi_lock); -+ -+ local_irq_enable(); -+ return 0; -+} -+ -+static inline void -+set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) -+{ -+ cpumask_copy(&p->cpus_allowed, new_mask); -+ p->nr_cpus_allowed = cpumask_weight(new_mask); -+} -+ -+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) -+{ -+ set_cpus_allowed_common(p, new_mask); -+} -+#endif -+ -+/* Enter with rq lock held. We know p is on the local CPU */ -+static inline void __set_tsk_resched(struct task_struct *p) -+{ -+ set_tsk_need_resched(p); -+ set_preempt_need_resched(); -+} -+ -+/** -+ * task_curr - is this task currently executing on a CPU? -+ * @p: the task in question. -+ * -+ * Return: 1 if the task is currently executing. 0 otherwise. -+ */ -+inline int task_curr(const struct task_struct *p) -+{ -+ return cpu_curr(task_cpu(p)) == p; -+} -+ -+#ifdef CONFIG_SMP -+/* -+ * wait_task_inactive - wait for a thread to unschedule. -+ * -+ * If @match_state is nonzero, it's the @p->state value just checked and -+ * not expected to change. If it changes, i.e. @p might have woken up, -+ * then return zero. When we succeed in waiting for @p to be off its CPU, -+ * we return a positive number (its total switch count). If a second call -+ * a short while later returns the same number, the caller can be sure that -+ * @p has remained unscheduled the whole time. -+ * -+ * The caller must ensure that the task *will* unschedule sometime soon, -+ * else this function might spin for a *long* time. This function can't -+ * be called with interrupts off, or it may introduce deadlock with -+ * smp_call_function() if an IPI is sent by the same process we are -+ * waiting to become inactive. -+ */ -+unsigned long wait_task_inactive(struct task_struct *p, long match_state) -+{ -+ unsigned long flags; -+ bool running, on_rq; -+ unsigned long ncsw; -+ struct rq *rq; -+ raw_spinlock_t *lock; -+ -+ for (;;) { -+ rq = task_rq(p); -+ -+ /* -+ * If the task is actively running on another CPU -+ * still, just relax and busy-wait without holding -+ * any locks. -+ * -+ * NOTE! Since we don't hold any locks, it's not -+ * even sure that "rq" stays as the right runqueue! -+ * But we don't care, since this will return false -+ * if the runqueue has changed and p is actually now -+ * running somewhere else! -+ */ -+ while (task_running(p) && p == rq->curr) { -+ if (match_state && unlikely(p->state != match_state)) -+ return 0; -+ cpu_relax(); -+ } -+ -+ /* -+ * Ok, time to look more closely! We need the rq -+ * lock now, to be *sure*. If we're wrong, we'll -+ * just go back and repeat. -+ */ -+ task_access_lock_irqsave(p, &lock, &flags); -+ trace_sched_wait_task(p); -+ running = task_running(p); -+ on_rq = p->on_rq; -+ ncsw = 0; -+ if (!match_state || p->state == match_state) -+ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ -+ task_access_unlock_irqrestore(p, lock, &flags); -+ -+ /* -+ * If it changed from the expected state, bail out now. -+ */ -+ if (unlikely(!ncsw)) -+ break; -+ -+ /* -+ * Was it really running after all now that we -+ * checked with the proper locks actually held? -+ * -+ * Oops. Go back and try again.. -+ */ -+ if (unlikely(running)) { -+ cpu_relax(); -+ continue; -+ } -+ -+ /* -+ * It's not enough that it's not actively running, -+ * it must be off the runqueue _entirely_, and not -+ * preempted! -+ * -+ * So if it was still runnable (but just not actively -+ * running right now), it's preempted, and we should -+ * yield - it could be a while. -+ */ -+ if (unlikely(on_rq)) { -+ ktime_t to = NSEC_PER_SEC / HZ; -+ -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ schedule_hrtimeout(&to, HRTIMER_MODE_REL); -+ continue; -+ } -+ -+ /* -+ * Ahh, all good. It wasn't running, and it wasn't -+ * runnable, which means that it will never become -+ * running in the future either. We're all done! -+ */ -+ break; -+ } -+ -+ return ncsw; -+} -+ -+/*** -+ * kick_process - kick a running thread to enter/exit the kernel -+ * @p: the to-be-kicked thread -+ * -+ * Cause a process which is running on another CPU to enter -+ * kernel-mode, without any delay. (to get signals handled.) -+ * -+ * NOTE: this function doesn't have to take the runqueue lock, -+ * because all it wants to ensure is that the remote task enters -+ * the kernel. If the IPI races and the task has been migrated -+ * to another CPU then no harm is done and the purpose has been -+ * achieved as well. -+ */ -+void kick_process(struct task_struct *p) -+{ -+ int cpu; -+ -+ preempt_disable(); -+ cpu = task_cpu(p); -+ if ((cpu != smp_processor_id()) && task_curr(p)) -+ smp_send_reschedule(cpu); -+ preempt_enable(); -+} -+EXPORT_SYMBOL_GPL(kick_process); -+ -+/* -+ * ->cpus_allowed is protected by both rq->lock and p->pi_lock -+ * -+ * A few notes on cpu_active vs cpu_online: -+ * -+ * - cpu_active must be a subset of cpu_online -+ * -+ * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, -+ * see __set_cpus_allowed_ptr(). At this point the newly online -+ * CPU isn't yet part of the sched domains, and balancing will not -+ * see it. -+ * -+ * - on cpu-down we clear cpu_active() to mask the sched domains and -+ * avoid the load balancer to place new tasks on the to be removed -+ * CPU. Existing tasks will remain running there and will be taken -+ * off. -+ * -+ * This means that fallback selection must not select !active CPUs. -+ * And can assume that any active CPU must be online. Conversely -+ * select_task_rq() below may allow selection of !active CPUs in order -+ * to satisfy the above rules. -+ */ -+static int select_fallback_rq(int cpu, struct task_struct *p) -+{ -+ int nid = cpu_to_node(cpu); -+ const struct cpumask *nodemask = NULL; -+ enum { cpuset, possible, fail } state = cpuset; -+ int dest_cpu; -+ -+ /* -+ * If the node that the CPU is on has been offlined, cpu_to_node() -+ * will return -1. There is no CPU on the node, and we should -+ * select the CPU on the other node. -+ */ -+ if (nid != -1) { -+ nodemask = cpumask_of_node(nid); -+ -+ /* Look for allowed, online CPU in same node. */ -+ for_each_cpu(dest_cpu, nodemask) { -+ if (!cpu_active(dest_cpu)) -+ continue; -+ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) -+ return dest_cpu; -+ } -+ } -+ -+ for (;;) { -+ /* Any allowed, online CPU? */ -+ for_each_cpu(dest_cpu, &p->cpus_allowed) { -+ if (!is_cpu_allowed(p, dest_cpu)) -+ continue; -+ goto out; -+ } -+ -+ /* No more Mr. Nice Guy. */ -+ switch (state) { -+ case cpuset: -+ if (IS_ENABLED(CONFIG_CPUSETS)) { -+ cpuset_cpus_allowed_fallback(p); -+ state = possible; -+ break; -+ } -+ /* Fall-through */ -+ case possible: -+ do_set_cpus_allowed(p, cpu_possible_mask); -+ state = fail; -+ break; -+ -+ case fail: -+ BUG(); -+ break; -+ } -+ } -+ -+out: -+ if (state != cpuset) { -+ /* -+ * Don't tell them about moving exiting tasks or -+ * kernel threads (both mm NULL), since they never -+ * leave kernel. -+ */ -+ if (p->mm && printk_ratelimit()) { -+ printk_deferred("process %d (%s) no longer affine to cpu%d\n", -+ task_pid_nr(p), p->comm, cpu); -+ } -+ } -+ -+ return dest_cpu; -+} -+ -+static inline int best_mask_cpu(int cpu, cpumask_t *cpumask) -+{ -+ cpumask_t *mask; -+ -+ if (cpumask_test_cpu(cpu, cpumask)) -+ return cpu; -+ -+ mask = &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[0]); -+ while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids) -+ mask++; -+ -+ return cpu; -+} -+ -+/* -+ * task_preemptible_rq - return the rq which the given task can preempt on -+ * @p: task wants to preempt CPU -+ * @only_preempt_low_policy: indicate only preempt rq running low policy than @p -+ */ -+static inline int -+task_preemptible_rq_idle(struct task_struct *p, cpumask_t *chk_mask) -+{ -+ cpumask_t tmp; -+ -+#ifdef CONFIG_SCHED_SMT -+ if (cpumask_and(&tmp, chk_mask, &sched_cpu_sg_idle_mask)) -+ return best_mask_cpu(task_cpu(p), &tmp); -+#endif -+ -+#ifdef CONFIG_SMT_NICE -+ /* Only ttwu on cpu which is not smt supressed */ -+ if (cpumask_andnot(&tmp, chk_mask, &sched_smt_supressed_mask)) { -+ cpumask_t t; -+ if (cpumask_and(&t, &tmp, &sched_rq_queued_masks[SCHED_RQ_EMPTY])) -+ return best_mask_cpu(task_cpu(p), &t); -+ return best_mask_cpu(task_cpu(p), &tmp); -+ } -+#endif -+ -+ if (cpumask_and(&tmp, chk_mask, &sched_rq_queued_masks[SCHED_RQ_EMPTY])) -+ return best_mask_cpu(task_cpu(p), &tmp); -+ return best_mask_cpu(task_cpu(p), chk_mask); -+} -+ -+static inline int -+task_preemptible_rq(struct task_struct *p, cpumask_t *chk_mask, -+ int preempt_level) -+{ -+ cpumask_t tmp; -+ int level; -+ -+#ifdef CONFIG_SCHED_SMT -+#ifdef CONFIG_SMT_NICE -+ if (cpumask_and(&tmp, chk_mask, &sched_cpu_psg_mask)) -+ return best_mask_cpu(task_cpu(p), &tmp); -+#else -+ if (cpumask_and(&tmp, chk_mask, &sched_cpu_sg_idle_mask)) -+ return best_mask_cpu(task_cpu(p), &tmp); -+#endif -+#endif -+ -+ level = find_first_bit(sched_rq_queued_masks_bitmap, -+ NR_SCHED_RQ_QUEUED_LEVEL); -+ -+ while (level < preempt_level) { -+ if (cpumask_and(&tmp, chk_mask, &sched_rq_queued_masks[level])) -+ return best_mask_cpu(task_cpu(p), &tmp); -+ -+ level = find_next_bit(sched_rq_queued_masks_bitmap, -+ NR_SCHED_RQ_QUEUED_LEVEL, -+ level + 1); -+ } -+ -+ if (unlikely(SCHED_RQ_RT == level && -+ level == preempt_level && -+ cpumask_and(&tmp, chk_mask, -+ &sched_rq_queued_masks[SCHED_RQ_RT]))) { -+ unsigned int cpu; -+ -+ for_each_cpu (cpu, &tmp) -+ if (p->prio < sched_rq_prio[cpu]) -+ return cpu; -+ } -+ -+ return best_mask_cpu(task_cpu(p), chk_mask); -+} -+ -+/* -+ * wake flags -+ */ -+#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ -+#define WF_FORK 0x02 /* child wakeup after fork */ -+#define WF_MIGRATED 0x04 /* internal use, task got migrated */ -+ -+static inline int select_task_rq(struct task_struct *p) -+{ -+ cpumask_t chk_mask; -+ -+ if (unlikely(!cpumask_and(&chk_mask, &p->cpus_allowed, cpu_online_mask))) -+ return select_fallback_rq(task_cpu(p), p); -+ -+ /* Check IDLE tasks suitable to run normal priority */ -+ if (idleprio_task(p)) { -+ if (idleprio_suitable(p)) { -+ p->prio = p->normal_prio; -+ update_task_priodl(p); -+ return task_preemptible_rq_idle(p, &chk_mask); -+ } -+ p->prio = NORMAL_PRIO; -+ update_task_priodl(p); -+ } -+ -+ return task_preemptible_rq(p, &chk_mask, -+ task_running_policy_level(p, this_rq())); -+} -+#else /* CONFIG_SMP */ -+static inline int select_task_rq(struct task_struct *p) -+{ -+ return 0; -+} -+#endif /* CONFIG_SMP */ -+ -+static void -+ttwu_stat(struct task_struct *p, int cpu, int wake_flags) -+{ -+ struct rq *rq; -+ -+ if (!schedstat_enabled()) -+ return; -+ -+ rq= this_rq(); -+ -+#ifdef CONFIG_SMP -+ if (cpu == rq->cpu) -+ __schedstat_inc(rq->ttwu_local); -+ else { -+ /** PDS ToDo: -+ * How to do ttwu_wake_remote -+ */ -+ } -+#endif /* CONFIG_SMP */ -+ -+ __schedstat_inc(rq->ttwu_count); -+} -+ -+static inline void ttwu_activate(struct task_struct *p, struct rq *rq) -+{ -+ activate_task(p, rq); -+ -+ /* -+ * if a worker is waking up, notify workqueue. Note that on PDS, we -+ * don't really know what CPU it will be, so we fake it for -+ * wq_worker_waking_up :/ -+ */ -+ if (p->flags & PF_WQ_WORKER) -+ wq_worker_waking_up(p, cpu_of(rq)); -+} -+ -+/* -+ * Mark the task runnable and perform wakeup-preemption. -+ */ -+static inline void -+ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) -+{ -+ p->state = TASK_RUNNING; -+ trace_sched_wakeup(p); -+} -+ -+static inline void -+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) -+{ -+#ifdef CONFIG_SMP -+ if (p->sched_contributes_to_load) -+ rq->nr_uninterruptible--; -+#endif -+ -+ ttwu_activate(p, rq); -+ ttwu_do_wakeup(rq, p, 0); -+} -+ -+static int ttwu_remote(struct task_struct *p, int wake_flags) -+{ -+ struct rq *rq; -+ raw_spinlock_t *lock; -+ int ret = 0; -+ -+ rq = __task_access_lock(p, &lock); -+ if (task_on_rq_queued(p)) { -+ ttwu_do_wakeup(rq, p, wake_flags); -+ ret = 1; -+ } -+ __task_access_unlock(p, lock); -+ -+ return ret; -+} -+ -+/* -+ * Notes on Program-Order guarantees on SMP systems. -+ * -+ * MIGRATION -+ * -+ * The basic program-order guarantee on SMP systems is that when a task [t] -+ * migrates, all its activity on its old CPU [c0] happens-before any subsequent -+ * execution on its new CPU [c1]. -+ * -+ * For migration (of runnable tasks) this is provided by the following means: -+ * -+ * A) UNLOCK of the rq(c0)->lock scheduling out task t -+ * B) migration for t is required to synchronize *both* rq(c0)->lock and -+ * rq(c1)->lock (if not at the same time, then in that order). -+ * C) LOCK of the rq(c1)->lock scheduling in task -+ * -+ * Transitivity guarantees that B happens after A and C after B. -+ * Note: we only require RCpc transitivity. -+ * Note: the CPU doing B need not be c0 or c1 -+ * -+ * Example: -+ * -+ * CPU0 CPU1 CPU2 -+ * -+ * LOCK rq(0)->lock -+ * sched-out X -+ * sched-in Y -+ * UNLOCK rq(0)->lock -+ * -+ * LOCK rq(0)->lock // orders against CPU0 -+ * dequeue X -+ * UNLOCK rq(0)->lock -+ * -+ * LOCK rq(1)->lock -+ * enqueue X -+ * UNLOCK rq(1)->lock -+ * -+ * LOCK rq(1)->lock // orders against CPU2 -+ * sched-out Z -+ * sched-in X -+ * UNLOCK rq(1)->lock -+ * -+ * -+ * BLOCKING -- aka. SLEEP + WAKEUP -+ * -+ * For blocking we (obviously) need to provide the same guarantee as for -+ * migration. However the means are completely different as there is no lock -+ * chain to provide order. Instead we do: -+ * -+ * 1) smp_store_release(X->on_cpu, 0) -+ * 2) smp_cond_load_acquire(!X->on_cpu) -+ * -+ * Example: -+ * -+ * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) -+ * -+ * LOCK rq(0)->lock LOCK X->pi_lock -+ * dequeue X -+ * sched-out X -+ * smp_store_release(X->on_cpu, 0); -+ * -+ * smp_cond_load_acquire(&X->on_cpu, !VAL); -+ * X->state = WAKING -+ * set_task_cpu(X,2) -+ * -+ * LOCK rq(2)->lock -+ * enqueue X -+ * X->state = RUNNING -+ * UNLOCK rq(2)->lock -+ * -+ * LOCK rq(2)->lock // orders against CPU1 -+ * sched-out Z -+ * sched-in X -+ * UNLOCK rq(2)->lock -+ * -+ * UNLOCK X->pi_lock -+ * UNLOCK rq(0)->lock -+ * -+ * -+ * However; for wakeups there is a second guarantee we must provide, namely we -+ * must observe the state that lead to our wakeup. That is, not only must our -+ * task observe its own prior state, it must also observe the stores prior to -+ * its wakeup. -+ * -+ * This means that any means of doing remote wakeups must order the CPU doing -+ * the wakeup against the CPU the task is going to end up running on. This, -+ * however, is already required for the regular Program-Order guarantee above, -+ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire). -+ * -+ */ -+ -+/*** -+ * try_to_wake_up - wake up a thread -+ * @p: the thread to be awakened -+ * @state: the mask of task states that can be woken -+ * @wake_flags: wake modifier flags (WF_*) -+ * -+ * Put it on the run-queue if it's not already there. The "current" -+ * thread is always on the run-queue (except when the actual -+ * re-schedule is in progress), and as such you're allowed to do -+ * the simpler "current->state = TASK_RUNNING" to mark yourself -+ * runnable without the overhead of this. -+ * -+ * Return: %true if @p was woken up, %false if it was already running. -+ * or @state didn't match @p's state. -+ */ -+static int try_to_wake_up(struct task_struct *p, unsigned int state, -+ int wake_flags) -+{ -+ unsigned long flags; -+ struct rq *rq; -+ int cpu, success = 0; -+ -+ /* -+ * If we are going to wake up a thread waiting for CONDITION we -+ * need to ensure that CONDITION=1 done by the caller can not be -+ * reordered with p->state check below. This pairs with mb() in -+ * set_current_state() the waiting thread does. -+ */ -+ raw_spin_lock_irqsave(&p->pi_lock, flags); -+ smp_mb__after_spinlock(); -+ if (!(p->state & state)) -+ goto out; -+ -+ trace_sched_waking(p); -+ -+ /* We're going to change ->state: */ -+ success = 1; -+ cpu = task_cpu(p); -+ -+ /* -+ * Ensure we load p->on_rq _after_ p->state, otherwise it would -+ * be possible to, falsely, observe p->on_rq == 0 and get stuck -+ * in smp_cond_load_acquire() below. -+ * -+ * sched_ttwu_pending() try_to_wake_up() -+ * STORE p->on_rq = 1 LOAD p->state -+ * UNLOCK rq->lock -+ * -+ * __schedule() (switch to task 'p') -+ * LOCK rq->lock smp_rmb(); -+ * smp_mb__after_spinlock(); -+ * UNLOCK rq->lock -+ * -+ * [task p] -+ * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq -+ * -+ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in -+ * __schedule(). See the comment for smp_mb__after_spinlock(). -+ */ -+ smp_rmb(); -+ if (p->on_rq && ttwu_remote(p, wake_flags)) -+ goto stat; -+ -+#ifdef CONFIG_SMP -+ /* -+ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be -+ * possible to, falsely, observe p->on_cpu == 0. -+ * -+ * One must be running (->on_cpu == 1) in order to remove oneself -+ * from the runqueue. -+ * -+ * __schedule() (switch to task 'p') try_to_wake_up() -+ * STORE p->on_cpu = 1 LOAD p->on_rq -+ * UNLOCK rq->lock -+ * -+ * __schedule() (put 'p' to sleep) -+ * LOCK rq->lock smp_rmb(); -+ * smp_mb__after_spinlock(); -+ * STORE p->on_rq = 0 LOAD p->on_cpu -+ * -+ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in -+ * __schedule(). See the comment for smp_mb__after_spinlock(). -+ */ -+ smp_rmb(); -+ -+ /* -+ * If the owning (remote) CPU is still in the middle of schedule() with -+ * this task as prev, wait until its done referencing the task. -+ * -+ * Pairs with the smp_store_release() in finish_task(). -+ * -+ * This ensures that tasks getting woken will be fully ordered against -+ * their previous state and preserve Program Order. -+ */ -+ smp_cond_load_acquire(&p->on_cpu, !VAL); -+ -+ p->sched_contributes_to_load = !!task_contributes_to_load(p); -+ p->state = TASK_WAKING; -+ -+ if (p->in_iowait) { -+ delayacct_blkio_end(p); -+ atomic_dec(&task_rq(p)->nr_iowait); -+ } -+ -+ if (SCHED_ISO == p->policy && ISO_PRIO != p->prio) { -+ p->prio = ISO_PRIO; -+ p->deadline = 0UL; -+ update_task_priodl(p); -+ } -+ -+ cpu = select_task_rq(p); -+ -+ if (cpu != task_cpu(p)) { -+ wake_flags |= WF_MIGRATED; -+ psi_ttwu_dequeue(p); -+ set_task_cpu(p, cpu); -+ } -+#else /* CONFIG_SMP */ -+ if (p->in_iowait) { -+ delayacct_blkio_end(p); -+ atomic_dec(&task_rq(p)->nr_iowait); -+ } -+#endif -+ -+ rq = cpu_rq(cpu); -+ raw_spin_lock(&rq->lock); -+ -+ update_rq_clock(rq); -+ ttwu_do_activate(rq, p, wake_flags); -+ check_preempt_curr(rq, p); -+ -+ raw_spin_unlock(&rq->lock); -+ -+stat: -+ ttwu_stat(p, cpu, wake_flags); -+out: -+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); -+ -+ return success; -+} -+ -+/** -+ * try_to_wake_up_local - try to wake up a local task with rq lock held -+ * @p: the thread to be awakened -+ * -+ * Put @p on the run-queue if it's not already there. The caller must -+ * ensure that local rq is locked and, @p is not the current task. -+ */ -+static void try_to_wake_up_local(struct task_struct *p) -+{ -+ struct rq *rq = task_rq(p); -+ -+ if (WARN_ON_ONCE(rq != this_rq()) || -+ WARN_ON_ONCE(p == current)) -+ return; -+ -+ lockdep_assert_held(&rq->lock); -+ -+ if (!raw_spin_trylock(&p->pi_lock)) { -+ /* -+ * This is OK, because current is on_cpu, which avoids it being -+ * picked for load-balance and preemption/IRQs are still -+ * disabled avoiding further scheduler activity on it and we've -+ * not yet picked a replacement task. -+ */ -+ raw_spin_unlock(&rq->lock); -+ raw_spin_lock(&p->pi_lock); -+ raw_spin_lock(&rq->lock); -+ } -+ -+ if (!(p->state & TASK_NORMAL)) -+ goto out; -+ -+ trace_sched_waking(p); -+ -+ if (!task_on_rq_queued(p)) { -+ if (p->in_iowait) { -+ delayacct_blkio_end(p); -+ atomic_dec(&task_rq(p)->nr_iowait); -+ } -+ -+ ttwu_activate(p, rq); -+ } -+ -+ ttwu_do_wakeup(rq, p, 0); -+ ttwu_stat(p, smp_processor_id(), 0); -+ -+out: -+ raw_spin_unlock(&p->pi_lock); -+} -+ -+/** -+ * wake_up_process - Wake up a specific process -+ * @p: The process to be woken up. -+ * -+ * Attempt to wake up the nominated process and move it to the set of runnable -+ * processes. -+ * -+ * Return: 1 if the process was woken up, 0 if it was already running. -+ * -+ * This function executes a full memory barrier before accessing the task state. -+ */ -+int wake_up_process(struct task_struct *p) -+{ -+ return try_to_wake_up(p, TASK_NORMAL, 0); -+} -+EXPORT_SYMBOL(wake_up_process); -+ -+int wake_up_state(struct task_struct *p, unsigned int state) -+{ -+ return try_to_wake_up(p, state, 0); -+} -+ -+/* -+ * Perform scheduler related setup for a newly forked process p. -+ * p is forked by current. -+ */ -+int sched_fork(unsigned long __maybe_unused clone_flags, struct task_struct *p) -+{ -+ unsigned long flags; -+ int cpu = get_cpu(); -+ struct rq *rq = this_rq(); -+ -+#ifdef CONFIG_PREEMPT_NOTIFIERS -+ INIT_HLIST_HEAD(&p->preempt_notifiers); -+#endif -+ /* Should be reset in fork.c but done here for ease of PDS patching */ -+ p->on_cpu = -+ p->on_rq = -+ p->utime = -+ p->stime = -+ p->sched_time = 0; -+ -+ p->sl_level = pds_skiplist_random_level(p); -+ INIT_SKIPLIST_NODE(&p->sl_node); -+ -+#ifdef CONFIG_COMPACTION -+ p->capture_control = NULL; -+#endif -+ -+ /* -+ * We mark the process as NEW here. This guarantees that -+ * nobody will actually run it, and a signal or other external -+ * event cannot wake it up and insert it on the runqueue either. -+ */ -+ p->state = TASK_NEW; -+ -+ /* -+ * Make sure we do not leak PI boosting priority to the child. -+ */ -+ p->prio = current->normal_prio; -+ -+ /* -+ * Revert to default priority/policy on fork if requested. -+ */ -+ if (unlikely(p->sched_reset_on_fork)) { -+ if (task_has_rt_policy(p)) { -+ p->policy = SCHED_NORMAL; -+ p->static_prio = NICE_TO_PRIO(0); -+ p->rt_priority = 0; -+ } else if (PRIO_TO_NICE(p->static_prio) < 0) -+ p->static_prio = NICE_TO_PRIO(0); -+ -+ p->prio = p->normal_prio = normal_prio(p); -+ -+ /* -+ * We don't need the reset flag anymore after the fork. It has -+ * fulfilled its duty: -+ */ -+ p->sched_reset_on_fork = 0; -+ } -+ -+ /* -+ * Share the timeslice between parent and child, thus the -+ * total amount of pending timeslices in the system doesn't change, -+ * resulting in more scheduling fairness. -+ */ -+ raw_spin_lock_irqsave(&rq->lock, flags); -+ rq->curr->time_slice /= 2; -+ p->time_slice = rq->curr->time_slice; -+#ifdef CONFIG_SCHED_HRTICK -+ hrtick_start(rq, US_TO_NS(rq->curr->time_slice)); -+#endif -+ -+ if (p->time_slice < RESCHED_US) { -+ update_rq_clock(rq); -+ time_slice_expired(p, rq); -+ resched_curr(rq); -+ } else -+ update_task_priodl(p); -+ raw_spin_unlock_irqrestore(&rq->lock, flags); -+ -+ /* -+ * The child is not yet in the pid-hash so no cgroup attach races, -+ * and the cgroup is pinned to this child due to cgroup_fork() -+ * is ran before sched_fork(). -+ * -+ * Silence PROVE_RCU. -+ */ -+ raw_spin_lock_irqsave(&p->pi_lock, flags); -+ /* -+ * We're setting the CPU for the first time, we don't migrate, -+ * so use __set_task_cpu(). -+ */ -+ __set_task_cpu(p, cpu); -+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); -+ -+#ifdef CONFIG_SCHED_INFO -+ if (unlikely(sched_info_on())) -+ memset(&p->sched_info, 0, sizeof(p->sched_info)); -+#endif -+ init_task_preempt_count(p); -+ -+ put_cpu(); -+ return 0; -+} -+ -+#ifdef CONFIG_SCHEDSTATS -+ -+DEFINE_STATIC_KEY_FALSE(sched_schedstats); -+static bool __initdata __sched_schedstats = false; -+ -+static void set_schedstats(bool enabled) -+{ -+ if (enabled) -+ static_branch_enable(&sched_schedstats); -+ else -+ static_branch_disable(&sched_schedstats); -+} -+ -+void force_schedstat_enabled(void) -+{ -+ if (!schedstat_enabled()) { -+ pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); -+ static_branch_enable(&sched_schedstats); -+ } -+} -+ -+static int __init setup_schedstats(char *str) -+{ -+ int ret = 0; -+ if (!str) -+ goto out; -+ -+ /* -+ * This code is called before jump labels have been set up, so we can't -+ * change the static branch directly just yet. Instead set a temporary -+ * variable so init_schedstats() can do it later. -+ */ -+ if (!strcmp(str, "enable")) { -+ __sched_schedstats = true; -+ ret = 1; -+ } else if (!strcmp(str, "disable")) { -+ __sched_schedstats = false; -+ ret = 1; -+ } -+out: -+ if (!ret) -+ pr_warn("Unable to parse schedstats=\n"); -+ -+ return ret; -+} -+__setup("schedstats=", setup_schedstats); -+ -+static void __init init_schedstats(void) -+{ -+ set_schedstats(__sched_schedstats); -+} -+ -+#ifdef CONFIG_PROC_SYSCTL -+int sysctl_schedstats(struct ctl_table *table, int write, -+ void __user *buffer, size_t *lenp, loff_t *ppos) -+{ -+ struct ctl_table t; -+ int err; -+ int state = static_branch_likely(&sched_schedstats); -+ -+ if (write && !capable(CAP_SYS_ADMIN)) -+ return -EPERM; -+ -+ t = *table; -+ t.data = &state; -+ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); -+ if (err < 0) -+ return err; -+ if (write) -+ set_schedstats(state); -+ return err; -+} -+#endif /* CONFIG_PROC_SYSCTL */ -+#else /* !CONFIG_SCHEDSTATS */ -+static inline void init_schedstats(void) {} -+#endif /* CONFIG_SCHEDSTATS */ -+ -+/* -+ * wake_up_new_task - wake up a newly created task for the first time. -+ * -+ * This function will do some initial scheduler statistics housekeeping -+ * that must be done for every newly created context, then puts the task -+ * on the runqueue and wakes it. -+ */ -+void wake_up_new_task(struct task_struct *p) -+{ -+ unsigned long flags; -+ struct rq *rq; -+ -+ raw_spin_lock_irqsave(&p->pi_lock, flags); -+ -+ p->state = TASK_RUNNING; -+ -+ rq = cpu_rq(select_task_rq(p)); -+#ifdef CONFIG_SMP -+ /* -+ * Fork balancing, do it here and not earlier because: -+ * - cpus_allowed can change in the fork path -+ * - any previously selected CPU might disappear through hotplug -+ * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, -+ * as we're not fully set-up yet. -+ */ -+ __set_task_cpu(p, cpu_of(rq)); -+#endif -+ -+ raw_spin_lock(&rq->lock); -+ -+ update_rq_clock(rq); -+ activate_task(p, rq); -+ trace_sched_wakeup_new(p); -+ check_preempt_curr(rq, p); -+ -+ raw_spin_unlock(&rq->lock); -+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); -+} -+ -+#ifdef CONFIG_PREEMPT_NOTIFIERS -+ -+static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); -+ -+void preempt_notifier_inc(void) -+{ -+ static_branch_inc(&preempt_notifier_key); -+} -+EXPORT_SYMBOL_GPL(preempt_notifier_inc); -+ -+void preempt_notifier_dec(void) -+{ -+ static_branch_dec(&preempt_notifier_key); -+} -+EXPORT_SYMBOL_GPL(preempt_notifier_dec); -+ -+/** -+ * preempt_notifier_register - tell me when current is being preempted & rescheduled -+ * @notifier: notifier struct to register -+ */ -+void preempt_notifier_register(struct preempt_notifier *notifier) -+{ -+ if (!static_branch_unlikely(&preempt_notifier_key)) -+ WARN(1, "registering preempt_notifier while notifiers disabled\n"); -+ -+ hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); -+} -+EXPORT_SYMBOL_GPL(preempt_notifier_register); -+ -+/** -+ * preempt_notifier_unregister - no longer interested in preemption notifications -+ * @notifier: notifier struct to unregister -+ * -+ * This is *not* safe to call from within a preemption notifier. -+ */ -+void preempt_notifier_unregister(struct preempt_notifier *notifier) -+{ -+ hlist_del(¬ifier->link); -+} -+EXPORT_SYMBOL_GPL(preempt_notifier_unregister); -+ -+static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) -+{ -+ struct preempt_notifier *notifier; -+ -+ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) -+ notifier->ops->sched_in(notifier, raw_smp_processor_id()); -+} -+ -+static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) -+{ -+ if (static_branch_unlikely(&preempt_notifier_key)) -+ __fire_sched_in_preempt_notifiers(curr); -+} -+ -+static void -+__fire_sched_out_preempt_notifiers(struct task_struct *curr, -+ struct task_struct *next) -+{ -+ struct preempt_notifier *notifier; -+ -+ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) -+ notifier->ops->sched_out(notifier, next); -+} -+ -+static __always_inline void -+fire_sched_out_preempt_notifiers(struct task_struct *curr, -+ struct task_struct *next) -+{ -+ if (static_branch_unlikely(&preempt_notifier_key)) -+ __fire_sched_out_preempt_notifiers(curr, next); -+} -+ -+#else /* !CONFIG_PREEMPT_NOTIFIERS */ -+ -+static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) -+{ -+} -+ -+static inline void -+fire_sched_out_preempt_notifiers(struct task_struct *curr, -+ struct task_struct *next) -+{ -+} -+ -+#endif /* CONFIG_PREEMPT_NOTIFIERS */ -+ -+static inline void prepare_task(struct task_struct *next) -+{ -+ /* -+ * Claim the task as running, we do this before switching to it -+ * such that any running task will have this set. -+ */ -+ next->on_cpu = 1; -+} -+ -+static inline void finish_task(struct task_struct *prev) -+{ -+#ifdef CONFIG_SMP -+ /* -+ * After ->on_cpu is cleared, the task can be moved to a different CPU. -+ * We must ensure this doesn't happen until the switch is completely -+ * finished. -+ * -+ * In particular, the load of prev->state in finish_task_switch() must -+ * happen before this. -+ * -+ * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). -+ */ -+ smp_store_release(&prev->on_cpu, 0); -+#else -+ prev->on_cpu = 0; -+#endif -+} -+ -+static inline void -+prepare_lock_switch(struct rq *rq, struct task_struct *next) -+{ -+ /* -+ * Since the runqueue lock will be released by the next -+ * task (which is an invalid locking op but in the case -+ * of the scheduler it's an obvious special-case), so we -+ * do an early lockdep release here: -+ */ -+ spin_release(&rq->lock.dep_map, 1, _THIS_IP_); -+#ifdef CONFIG_DEBUG_SPINLOCK -+ /* this is a valid case when another task releases the spinlock */ -+ rq->lock.owner = next; -+#endif -+} -+ -+static inline void finish_lock_switch(struct rq *rq) -+{ -+ /* -+ * If we are tracking spinlock dependencies then we have to -+ * fix up the runqueue lock - which gets 'carried over' from -+ * prev into current: -+ */ -+ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); -+ raw_spin_unlock_irq(&rq->lock); -+} -+ -+/** -+ * prepare_task_switch - prepare to switch tasks -+ * @rq: the runqueue preparing to switch -+ * @next: the task we are going to switch to. -+ * -+ * This is called with the rq lock held and interrupts off. It must -+ * be paired with a subsequent finish_task_switch after the context -+ * switch. -+ * -+ * prepare_task_switch sets up locking and calls architecture specific -+ * hooks. -+ */ -+static inline void -+prepare_task_switch(struct rq *rq, struct task_struct *prev, -+ struct task_struct *next) -+{ -+ kcov_prepare_switch(prev); -+ sched_info_switch(rq, prev, next); -+ perf_event_task_sched_out(prev, next); -+ rseq_preempt(prev); -+ fire_sched_out_preempt_notifiers(prev, next); -+ prepare_task(next); -+ prepare_arch_switch(next); -+} -+ -+/** -+ * finish_task_switch - clean up after a task-switch -+ * @rq: runqueue associated with task-switch -+ * @prev: the thread we just switched away from. -+ * -+ * finish_task_switch must be called after the context switch, paired -+ * with a prepare_task_switch call before the context switch. -+ * finish_task_switch will reconcile locking set up by prepare_task_switch, -+ * and do any other architecture-specific cleanup actions. -+ * -+ * Note that we may have delayed dropping an mm in context_switch(). If -+ * so, we finish that here outside of the runqueue lock. (Doing it -+ * with the lock held can cause deadlocks; see schedule() for -+ * details.) -+ * -+ * The context switch have flipped the stack from under us and restored the -+ * local variables which were saved when this task called schedule() in the -+ * past. prev == current is still correct but we need to recalculate this_rq -+ * because prev may have moved to another CPU. -+ */ -+static struct rq *finish_task_switch(struct task_struct *prev) -+ __releases(rq->lock) -+{ -+ struct rq *rq = this_rq(); -+ struct mm_struct *mm = rq->prev_mm; -+ long prev_state; -+ -+ /* -+ * The previous task will have left us with a preempt_count of 2 -+ * because it left us after: -+ * -+ * schedule() -+ * preempt_disable(); // 1 -+ * __schedule() -+ * raw_spin_lock_irq(&rq->lock) // 2 -+ * -+ * Also, see FORK_PREEMPT_COUNT. -+ */ -+ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, -+ "corrupted preempt_count: %s/%d/0x%x\n", -+ current->comm, current->pid, preempt_count())) -+ preempt_count_set(FORK_PREEMPT_COUNT); -+ -+ rq->prev_mm = NULL; -+ -+ /* -+ * A task struct has one reference for the use as "current". -+ * If a task dies, then it sets TASK_DEAD in tsk->state and calls -+ * schedule one last time. The schedule call will never return, and -+ * the scheduled task must drop that reference. -+ * -+ * We must observe prev->state before clearing prev->on_cpu (in -+ * finish_task), otherwise a concurrent wakeup can get prev -+ * running on another CPU and we could rave with its RUNNING -> DEAD -+ * transition, resulting in a double drop. -+ */ -+ prev_state = prev->state; -+ vtime_task_switch(prev); -+ perf_event_task_sched_in(prev, current); -+ finish_task(prev); -+ finish_lock_switch(rq); -+ finish_arch_post_lock_switch(); -+ kcov_finish_switch(current); -+ -+ fire_sched_in_preempt_notifiers(current); -+ /* -+ * When switching through a kernel thread, the loop in -+ * membarrier_{private,global}_expedited() may have observed that -+ * kernel thread and not issued an IPI. It is therefore possible to -+ * schedule between user->kernel->user threads without passing though -+ * switch_mm(). Membarrier requires a barrier after storing to -+ * rq->curr, before returning to userspace, so provide them here: -+ * -+ * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly -+ * provided by mmdrop(), -+ * - a sync_core for SYNC_CORE. -+ */ -+ if (mm) { -+ membarrier_mm_sync_core_before_usermode(mm); -+ mmdrop(mm); -+ } -+ if (unlikely(prev_state == TASK_DEAD)) { -+ /* -+ * Remove function-return probe instances associated with this -+ * task and put them back on the free list. -+ */ -+ kprobe_flush_task(prev); -+ -+ /* Task is done with its stack. */ -+ put_task_stack(prev); -+ -+ put_task_struct(prev); -+ } -+ -+ tick_nohz_task_switch(); -+ return rq; -+} -+ -+/** -+ * schedule_tail - first thing a freshly forked thread must call. -+ * @prev: the thread we just switched away from. -+ */ -+asmlinkage __visible void schedule_tail(struct task_struct *prev) -+ __releases(rq->lock) -+{ -+ struct rq *rq; -+ -+ /* -+ * New tasks start with FORK_PREEMPT_COUNT, see there and -+ * finish_task_switch() for details. -+ * -+ * finish_task_switch() will drop rq->lock() and lower preempt_count -+ * and the preempt_enable() will end up enabling preemption (on -+ * PREEMPT_COUNT kernels). -+ */ -+ -+ rq = finish_task_switch(prev); -+ preempt_enable(); -+ -+ if (current->set_child_tid) -+ put_user(task_pid_vnr(current), current->set_child_tid); -+ -+ calculate_sigpending(); -+} -+ -+/* -+ * context_switch - switch to the new MM and the new thread's register state. -+ */ -+static __always_inline struct rq * -+context_switch(struct rq *rq, struct task_struct *prev, -+ struct task_struct *next) -+{ -+ struct mm_struct *mm, *oldmm; -+ -+ prepare_task_switch(rq, prev, next); -+ -+ mm = next->mm; -+ oldmm = prev->active_mm; -+ /* -+ * For paravirt, this is coupled with an exit in switch_to to -+ * combine the page table reload and the switch backend into -+ * one hypercall. -+ */ -+ arch_start_context_switch(prev); -+ -+ /* -+ * If mm is non-NULL, we pass through switch_mm(). If mm is -+ * NULL, we will pass through mmdrop() in finish_task_switch(). -+ * Both of these contain the full memory barrier required by -+ * membarrier after storing to rq->curr, before returning to -+ * user-space. -+ */ -+ if (!mm) { -+ next->active_mm = oldmm; -+ mmgrab(oldmm); -+ enter_lazy_tlb(oldmm, next); -+ } else -+ switch_mm_irqs_off(oldmm, mm, next); -+ -+ if (!prev->mm) { -+ prev->active_mm = NULL; -+ rq->prev_mm = oldmm; -+ } -+ -+ prepare_lock_switch(rq, next); -+ -+ /* Here we just switch the register state and the stack. */ -+ switch_to(prev, next, prev); -+ barrier(); -+ -+ return finish_task_switch(prev); -+} -+ -+/* -+ * nr_running, nr_uninterruptible and nr_context_switches: -+ * -+ * externally visible scheduler statistics: current number of runnable -+ * threads, total number of context switches performed since bootup. -+ */ -+unsigned long nr_running(void) -+{ -+ unsigned long i, sum = 0; -+ -+ for_each_online_cpu(i) -+ sum += cpu_rq(i)->nr_running; -+ -+ return sum; -+} -+ -+/* -+ * Check if only the current task is running on the CPU. -+ * -+ * Caution: this function does not check that the caller has disabled -+ * preemption, thus the result might have a time-of-check-to-time-of-use -+ * race. The caller is responsible to use it correctly, for example: -+ * -+ * - from a non-preemptible section (of course) -+ * -+ * - from a thread that is bound to a single CPU -+ * -+ * - in a loop with very short iterations (e.g. a polling loop) -+ */ -+bool single_task_running(void) -+{ -+ return raw_rq()->nr_running == 1; -+} -+EXPORT_SYMBOL(single_task_running); -+ -+unsigned long long nr_context_switches(void) -+{ -+ int i; -+ unsigned long long sum = 0; -+ -+ for_each_possible_cpu(i) -+ sum += cpu_rq(i)->nr_switches; -+ -+ return sum; -+} -+ -+/* -+ * Consumers of these two interfaces, like for example the cpuidle menu -+ * governor, are using nonsensical data. Preferring shallow idle state selection -+ * for a CPU that has IO-wait which might not even end up running the task when -+ * it does become runnable. -+ */ -+ -+unsigned long nr_iowait_cpu(int cpu) -+{ -+ return atomic_read(&cpu_rq(cpu)->nr_iowait); -+} -+ -+/* -+ * IO-wait accounting, and how its mostly bollocks (on SMP). -+ * -+ * The idea behind IO-wait account is to account the idle time that we could -+ * have spend running if it were not for IO. That is, if we were to improve the -+ * storage performance, we'd have a proportional reduction in IO-wait time. -+ * -+ * This all works nicely on UP, where, when a task blocks on IO, we account -+ * idle time as IO-wait, because if the storage were faster, it could've been -+ * running and we'd not be idle. -+ * -+ * This has been extended to SMP, by doing the same for each CPU. This however -+ * is broken. -+ * -+ * Imagine for instance the case where two tasks block on one CPU, only the one -+ * CPU will have IO-wait accounted, while the other has regular idle. Even -+ * though, if the storage were faster, both could've ran at the same time, -+ * utilising both CPUs. -+ * -+ * This means, that when looking globally, the current IO-wait accounting on -+ * SMP is a lower bound, by reason of under accounting. -+ * -+ * Worse, since the numbers are provided per CPU, they are sometimes -+ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly -+ * associated with any one particular CPU, it can wake to another CPU than it -+ * blocked on. This means the per CPU IO-wait number is meaningless. -+ * -+ * Task CPU affinities can make all that even more 'interesting'. -+ */ -+ -+unsigned long nr_iowait(void) -+{ -+ unsigned long i, sum = 0; -+ -+ for_each_possible_cpu(i) -+ sum += nr_iowait_cpu(i); -+ -+ return sum; -+} -+ -+DEFINE_PER_CPU(struct kernel_stat, kstat); -+DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); -+ -+EXPORT_PER_CPU_SYMBOL(kstat); -+EXPORT_PER_CPU_SYMBOL(kernel_cpustat); -+ -+static inline void pds_update_curr(struct rq *rq, struct task_struct *p) -+{ -+ s64 ns = rq->clock_task - p->last_ran; -+ -+ p->sched_time += ns; -+ account_group_exec_runtime(p, ns); -+ -+ /* time_slice accounting is done in usecs to avoid overflow on 32bit */ -+ p->time_slice -= NS_TO_US(ns); -+ p->last_ran = rq->clock_task; -+} -+ -+/* -+ * Return accounted runtime for the task. -+ * Return separately the current's pending runtime that have not been -+ * accounted yet. -+ */ -+unsigned long long task_sched_runtime(struct task_struct *p) -+{ -+ unsigned long flags; -+ struct rq *rq; -+ raw_spinlock_t *lock; -+ u64 ns; -+ -+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) -+ /* -+ * 64-bit doesn't need locks to atomically read a 64-bit value. -+ * So we have a optimization chance when the task's delta_exec is 0. -+ * Reading ->on_cpu is racy, but this is ok. -+ * -+ * If we race with it leaving CPU, we'll take a lock. So we're correct. -+ * If we race with it entering CPU, unaccounted time is 0. This is -+ * indistinguishable from the read occurring a few cycles earlier. -+ * If we see ->on_cpu without ->on_rq, the task is leaving, and has -+ * been accounted, so we're correct here as well. -+ */ -+ if (!p->on_cpu || !task_on_rq_queued(p)) -+ return tsk_seruntime(p); -+#endif -+ -+ rq = task_access_lock_irqsave(p, &lock, &flags); -+ /* -+ * Must be ->curr _and_ ->on_rq. If dequeued, we would -+ * project cycles that may never be accounted to this -+ * thread, breaking clock_gettime(). -+ */ -+ if (p == rq->curr && task_on_rq_queued(p)) { -+ update_rq_clock(rq); -+ pds_update_curr(rq, p); -+ } -+ ns = tsk_seruntime(p); -+ task_access_unlock_irqrestore(p, lock, &flags); -+ -+ return ns; -+} -+ -+/* This manages tasks that have run out of timeslice during a scheduler_tick */ -+static inline void pds_scheduler_task_tick(struct rq *rq) -+{ -+ struct task_struct *p = rq->curr; -+ -+ if (is_idle_task(p)) -+ return; -+ -+ pds_update_curr(rq, p); -+ -+ cpufreq_update_util(rq, 0); -+ -+ /* -+ * Tasks that were scheduled in the first half of a tick are not -+ * allowed to run into the 2nd half of the next tick if they will -+ * run out of time slice in the interim. Otherwise, if they have -+ * less than RESCHED_US μs of time slice left they will be rescheduled. -+ */ -+ if (p->time_slice - rq->dither >= RESCHED_US) -+ return; -+ -+ /** -+ * p->time_slice < RESCHED_US. We will modify task_struct under -+ * rq lock as p is rq->curr -+ */ -+ __set_tsk_resched(p); -+} -+ -+#ifdef CONFIG_SMP -+ -+#ifdef CONFIG_SCHED_SMT -+static int active_load_balance_cpu_stop(void *data) -+{ -+ struct rq *rq = this_rq(); -+ struct task_struct *p = data; -+ int cpu; -+ unsigned long flags; -+ -+ local_irq_save(flags); -+ -+ raw_spin_lock(&p->pi_lock); -+ raw_spin_lock(&rq->lock); -+ -+ rq->active_balance = 0; -+ /* -+ * _something_ may have changed the task, double check again -+ */ -+ if (task_on_rq_queued(p) && task_rq(p) == rq && -+ (cpu = cpumask_any_and(&p->cpus_allowed, &sched_cpu_sg_idle_mask)) < nr_cpu_ids) -+ rq = __migrate_task(rq, p, cpu); -+ -+ raw_spin_unlock(&rq->lock); -+ raw_spin_unlock(&p->pi_lock); -+ -+ local_irq_restore(flags); -+ -+ return 0; -+} -+ -+/* pds_sg_balance_trigger - trigger slibing group balance for @cpu */ -+static void pds_sg_balance_trigger(const int cpu) -+{ -+ struct rq *rq = cpu_rq(cpu); -+ unsigned long flags; -+ struct task_struct *curr; -+ -+ if (!raw_spin_trylock_irqsave(&rq->lock, flags)) -+ return; -+ curr = rq->curr; -+ if (!is_idle_task(curr) && -+ cpumask_intersects(&curr->cpus_allowed, &sched_cpu_sg_idle_mask)) { -+ int active_balance = 0; -+ -+ if (likely(!rq->active_balance)) { -+ rq->active_balance = 1; -+ active_balance = 1; -+ } -+ -+ raw_spin_unlock_irqrestore(&rq->lock, flags); -+ -+ if (likely(active_balance)) -+ stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, -+ curr, &rq->active_balance_work); -+ } else -+ raw_spin_unlock_irqrestore(&rq->lock, flags); -+} -+ -+/* -+ * pds_sg_balance_check - slibing group balance check for run queue @rq -+ */ -+static inline void pds_sg_balance_check(const struct rq *rq) -+{ -+ cpumask_t chk; -+ int i; -+ -+ /* Only online cpu will do sg balance checking */ -+ if (unlikely(!rq->online)) -+ return; -+ -+ /* Only cpu in slibing idle group will do the checking */ -+ if (!cpumask_test_cpu(cpu_of(rq), &sched_cpu_sg_idle_mask)) -+ return; -+ -+ /* Find potential cpus which can migrate the currently running task */ -+ if (!cpumask_andnot(&chk, &sched_rq_pending_masks[SCHED_RQ_EMPTY], -+ &sched_rq_queued_masks[SCHED_RQ_EMPTY])) -+ return; -+ -+ for_each_cpu(i, &chk) { -+ /* skip the cpu which has idle slibing cpu */ -+ if (cpumask_test_cpu(per_cpu(sched_sibling_cpu, i), -+ &sched_rq_queued_masks[SCHED_RQ_EMPTY])) -+ continue; -+ pds_sg_balance_trigger(i); -+ } -+} -+#endif /* CONFIG_SCHED_SMT */ -+#endif /* CONFIG_SMP */ -+ -+/* -+ * This function gets called by the timer code, with HZ frequency. -+ * We call it with interrupts disabled. -+ */ -+void scheduler_tick(void) -+{ -+ int cpu __maybe_unused = smp_processor_id(); -+ struct rq *rq = cpu_rq(cpu); -+ -+ sched_clock_tick(); -+ -+ raw_spin_lock(&rq->lock); -+ update_rq_clock(rq); -+ -+ pds_scheduler_task_tick(rq); -+ update_sched_rq_queued_masks_normal(rq); -+ calc_global_load_tick(rq); -+ psi_task_tick(rq); -+ -+ rq->last_tick = rq->clock; -+ raw_spin_unlock(&rq->lock); -+ -+ perf_event_task_tick(); -+} -+ -+#ifdef CONFIG_NO_HZ_FULL -+struct tick_work { -+ int cpu; -+ struct delayed_work work; -+}; -+ -+static struct tick_work __percpu *tick_work_cpu; -+ -+static void sched_tick_remote(struct work_struct *work) -+{ -+ struct delayed_work *dwork = to_delayed_work(work); -+ struct tick_work *twork = container_of(dwork, struct tick_work, work); -+ int cpu = twork->cpu; -+ struct rq *rq = cpu_rq(cpu); -+ struct task_struct *curr; -+ unsigned long flags; -+ u64 delta; -+ -+ /* -+ * Handle the tick only if it appears the remote CPU is running in full -+ * dynticks mode. The check is racy by nature, but missing a tick or -+ * having one too much is no big deal because the scheduler tick updates -+ * statistics and checks timeslices in a time-independent way, regardless -+ * of when exactly it is running. -+ */ -+ if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu)) -+ goto out_requeue; -+ -+ raw_spin_lock_irqsave(&rq->lock, flags); -+ curr = rq->curr; -+ -+ if (is_idle_task(curr)) -+ goto out_unlock; -+ -+ update_rq_clock(rq); -+ delta = rq_clock_task(rq) - curr->last_ran; -+ -+ /* -+ * Make sure the next tick runs within a reasonable -+ * amount of time. -+ */ -+ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); -+ pds_scheduler_task_tick(rq); -+ update_sched_rq_queued_masks_normal(rq); -+ -+out_unlock: -+ raw_spin_unlock_irqrestore(&rq->lock, flags); -+ -+out_requeue: -+ /* -+ * Run the remote tick once per second (1Hz). This arbitrary -+ * frequency is large enough to avoid overload but short enough -+ * to keep scheduler internal stats reasonably up to date. -+ */ -+ queue_delayed_work(system_unbound_wq, dwork, HZ); -+} -+ -+static void sched_tick_start(int cpu) -+{ -+ struct tick_work *twork; -+ -+ if (housekeeping_cpu(cpu, HK_FLAG_TICK)) -+ return; -+ -+ WARN_ON_ONCE(!tick_work_cpu); -+ -+ twork = per_cpu_ptr(tick_work_cpu, cpu); -+ twork->cpu = cpu; -+ INIT_DELAYED_WORK(&twork->work, sched_tick_remote); -+ queue_delayed_work(system_unbound_wq, &twork->work, HZ); -+} -+ -+#ifdef CONFIG_HOTPLUG_CPU -+static void sched_tick_stop(int cpu) -+{ -+ struct tick_work *twork; -+ -+ if (housekeeping_cpu(cpu, HK_FLAG_TICK)) -+ return; -+ -+ WARN_ON_ONCE(!tick_work_cpu); -+ -+ twork = per_cpu_ptr(tick_work_cpu, cpu); -+ cancel_delayed_work_sync(&twork->work); -+} -+#endif /* CONFIG_HOTPLUG_CPU */ -+ -+int __init sched_tick_offload_init(void) -+{ -+ tick_work_cpu = alloc_percpu(struct tick_work); -+ BUG_ON(!tick_work_cpu); -+ -+ return 0; -+} -+ -+#else /* !CONFIG_NO_HZ_FULL */ -+static inline void sched_tick_start(int cpu) { } -+static inline void sched_tick_stop(int cpu) { } -+#endif -+ -+#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ -+ defined(CONFIG_PREEMPT_TRACER)) -+/* -+ * If the value passed in is equal to the current preempt count -+ * then we just disabled preemption. Start timing the latency. -+ */ -+static inline void preempt_latency_start(int val) -+{ -+ if (preempt_count() == val) { -+ unsigned long ip = get_lock_parent_ip(); -+#ifdef CONFIG_DEBUG_PREEMPT -+ current->preempt_disable_ip = ip; -+#endif -+ trace_preempt_off(CALLER_ADDR0, ip); -+ } -+} -+ -+void preempt_count_add(int val) -+{ -+#ifdef CONFIG_DEBUG_PREEMPT -+ /* -+ * Underflow? -+ */ -+ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) -+ return; -+#endif -+ __preempt_count_add(val); -+#ifdef CONFIG_DEBUG_PREEMPT -+ /* -+ * Spinlock count overflowing soon? -+ */ -+ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= -+ PREEMPT_MASK - 10); -+#endif -+ preempt_latency_start(val); -+} -+EXPORT_SYMBOL(preempt_count_add); -+NOKPROBE_SYMBOL(preempt_count_add); -+ -+/* -+ * If the value passed in equals to the current preempt count -+ * then we just enabled preemption. Stop timing the latency. -+ */ -+static inline void preempt_latency_stop(int val) -+{ -+ if (preempt_count() == val) -+ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); -+} -+ -+void preempt_count_sub(int val) -+{ -+#ifdef CONFIG_DEBUG_PREEMPT -+ /* -+ * Underflow? -+ */ -+ if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) -+ return; -+ /* -+ * Is the spinlock portion underflowing? -+ */ -+ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && -+ !(preempt_count() & PREEMPT_MASK))) -+ return; -+#endif -+ -+ preempt_latency_stop(val); -+ __preempt_count_sub(val); -+} -+EXPORT_SYMBOL(preempt_count_sub); -+NOKPROBE_SYMBOL(preempt_count_sub); -+ -+#else -+static inline void preempt_latency_start(int val) { } -+static inline void preempt_latency_stop(int val) { } -+#endif -+ -+/* -+ * Timeslices below RESCHED_US are considered as good as expired as there's no -+ * point rescheduling when there's so little time left. SCHED_BATCH tasks -+ * have been flagged be not latency sensitive and likely to be fully CPU -+ * bound so every time they're rescheduled they have their time_slice -+ * refilled, but get a new later deadline to have little effect on -+ * SCHED_NORMAL tasks. -+ -+ */ -+static inline void check_deadline(struct task_struct *p, struct rq *rq) -+{ -+ if (rq->idle == p) -+ return; -+ -+ pds_update_curr(rq, p); -+ -+ if (p->time_slice < RESCHED_US) { -+ time_slice_expired(p, rq); -+ if (SCHED_ISO == p->policy && ISO_PRIO == p->prio) { -+ p->prio = NORMAL_PRIO; -+ p->deadline = rq->clock + task_deadline_diff(p); -+ update_task_priodl(p); -+ } -+ if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) -+ requeue_task(p, rq); -+ } -+} -+ -+#ifdef CONFIG_SMP -+ -+#define SCHED_RQ_NR_MIGRATION (32UL) -+/* -+ * Migrate pending tasks in @rq to @dest_cpu -+ * Will try to migrate mininal of half of @rq nr_running tasks and -+ * SCHED_RQ_NR_MIGRATION to @dest_cpu -+ */ -+static inline int -+migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, int filter_prio) -+{ -+ struct task_struct *p; -+ int dest_cpu = cpu_of(dest_rq); -+ int nr_migrated = 0; -+ int nr_tries = min((rq->nr_running + 1) / 2, SCHED_RQ_NR_MIGRATION); -+ struct skiplist_node *node = rq->sl_header.next[0]; -+ -+ while (nr_tries && node != &rq->sl_header) { -+ p = skiplist_entry(node, struct task_struct, sl_node); -+ node = node->next[0]; -+ -+ if (task_running(p)) -+ continue; -+ if (p->prio >= filter_prio) -+ break; -+ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) { -+ detach_task(rq, p, dest_cpu); -+ attach_task(dest_rq, p); -+ nr_migrated++; -+ } -+ nr_tries--; -+ /* make a jump */ -+ if (node == &rq->sl_header) -+ break; -+ node = node->next[0]; -+ } -+ -+ return nr_migrated; -+} -+ -+static inline int -+take_queued_task_cpumask(struct rq *rq, cpumask_t *chk_mask, int filter_prio) -+{ -+ int src_cpu; -+ -+ for_each_cpu(src_cpu, chk_mask) { -+ int nr_migrated; -+ struct rq *src_rq = cpu_rq(src_cpu); -+ -+ if (!do_raw_spin_trylock(&src_rq->lock)) { -+ if (PRIO_LIMIT == filter_prio) -+ continue; -+ return 0; -+ } -+ spin_acquire(&src_rq->lock.dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_); -+ -+ update_rq_clock(src_rq); -+ nr_migrated = migrate_pending_tasks(src_rq, rq, filter_prio); -+ -+ spin_release(&src_rq->lock.dep_map, 1, _RET_IP_); -+ do_raw_spin_unlock(&src_rq->lock); -+ -+ if (nr_migrated || PRIO_LIMIT != filter_prio) -+ return nr_migrated; -+ } -+ return 0; -+} -+ -+static inline int take_other_rq_task(struct rq *rq, int cpu, int filter_prio) -+{ -+ struct cpumask *affinity_mask, *end; -+ struct cpumask chk; -+ -+ if (PRIO_LIMIT == filter_prio) { -+ cpumask_complement(&chk, &sched_rq_pending_masks[SCHED_RQ_EMPTY]); -+#ifdef CONFIG_SMT_NICE -+ { -+ /* also try to take IDLE priority tasks from smt supressed cpu */ -+ struct cpumask t; -+ if (cpumask_and(&t, &sched_smt_supressed_mask, -+ &sched_rq_queued_masks[SCHED_RQ_IDLE])) -+ cpumask_or(&chk, &chk, &t); -+ } -+#endif -+ } else if (NORMAL_PRIO == filter_prio) { -+ cpumask_or(&chk, &sched_rq_pending_masks[SCHED_RQ_RT], -+ &sched_rq_pending_masks[SCHED_RQ_ISO]); -+ } else if (IDLE_PRIO == filter_prio) { -+ cpumask_complement(&chk, &sched_rq_pending_masks[SCHED_RQ_EMPTY]); -+ cpumask_andnot(&chk, &chk, &sched_rq_pending_masks[SCHED_RQ_IDLE]); -+ } else -+ cpumask_copy(&chk, &sched_rq_pending_masks[SCHED_RQ_RT]); -+ -+ if (cpumask_empty(&chk)) -+ return 0; -+ -+ affinity_mask = per_cpu(sched_cpu_llc_start_mask, cpu); -+ end = per_cpu(sched_cpu_affinity_chk_end_masks, cpu); -+ do { -+ struct cpumask tmp; -+ -+ if (cpumask_and(&tmp, &chk, affinity_mask) && -+ take_queued_task_cpumask(rq, &tmp, filter_prio)) -+ return 1; -+ } while (++affinity_mask < end); -+ -+ return 0; -+} -+#endif -+ -+static inline struct task_struct * -+choose_next_task(struct rq *rq, int cpu, struct task_struct *prev) -+{ -+ struct task_struct *next = rq_first_queued_task(rq); -+ -+#ifdef CONFIG_SMT_NICE -+ if (cpumask_test_cpu(cpu, &sched_smt_supressed_mask)) { -+ if (next->prio >= IDLE_PRIO) { -+ if (rq->online && -+ take_other_rq_task(rq, cpu, IDLE_PRIO)) -+ return rq_first_queued_task(rq); -+ return rq->idle; -+ } -+ } -+#endif -+ -+#ifdef CONFIG_SMP -+ if (likely(rq->online)) -+ if (take_other_rq_task(rq, cpu, next->prio)) { -+ resched_curr(rq); -+ return rq_first_queued_task(rq); -+ } -+#endif -+ return next; -+} -+ -+static inline unsigned long get_preempt_disable_ip(struct task_struct *p) -+{ -+#ifdef CONFIG_DEBUG_PREEMPT -+ return p->preempt_disable_ip; -+#else -+ return 0; -+#endif -+} -+ -+/* -+ * Print scheduling while atomic bug: -+ */ -+static noinline void __schedule_bug(struct task_struct *prev) -+{ -+ /* Save this before calling printk(), since that will clobber it */ -+ unsigned long preempt_disable_ip = get_preempt_disable_ip(current); -+ -+ if (oops_in_progress) -+ return; -+ -+ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", -+ prev->comm, prev->pid, preempt_count()); -+ -+ debug_show_held_locks(prev); -+ print_modules(); -+ if (irqs_disabled()) -+ print_irqtrace_events(prev); -+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) -+ && in_atomic_preempt_off()) { -+ pr_err("Preemption disabled at:"); -+ print_ip_sym(preempt_disable_ip); -+ pr_cont("\n"); -+ } -+ if (panic_on_warn) -+ panic("scheduling while atomic\n"); -+ -+ dump_stack(); -+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); -+} -+ -+/* -+ * Various schedule()-time debugging checks and statistics: -+ */ -+static inline void schedule_debug(struct task_struct *prev) -+{ -+#ifdef CONFIG_SCHED_STACK_END_CHECK -+ if (task_stack_end_corrupted(prev)) -+ panic("corrupted stack end detected inside scheduler\n"); -+#endif -+ -+ if (unlikely(in_atomic_preempt_off())) { -+ __schedule_bug(prev); -+ preempt_count_set(PREEMPT_DISABLED); -+ } -+ rcu_sleep_check(); -+ -+ profile_hit(SCHED_PROFILING, __builtin_return_address(0)); -+ -+ schedstat_inc(this_rq()->sched_count); -+} -+ -+static inline void set_rq_task(struct rq *rq, struct task_struct *p) -+{ -+ p->last_ran = rq->clock_task; -+ -+#ifdef CONFIG_HIGH_RES_TIMERS -+ if (p != rq->idle) -+ hrtick_start(rq, US_TO_NS(p->time_slice)); -+#endif -+ /* update rq->dither */ -+ rq->dither = rq_dither(rq); -+} -+ -+/* -+ * schedule() is the main scheduler function. -+ * -+ * The main means of driving the scheduler and thus entering this function are: -+ * -+ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. -+ * -+ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return -+ * paths. For example, see arch/x86/entry_64.S. -+ * -+ * To drive preemption between tasks, the scheduler sets the flag in timer -+ * interrupt handler scheduler_tick(). -+ * -+ * 3. Wakeups don't really cause entry into schedule(). They add a -+ * task to the run-queue and that's it. -+ * -+ * Now, if the new task added to the run-queue preempts the current -+ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets -+ * called on the nearest possible occasion: -+ * -+ * - If the kernel is preemptible (CONFIG_PREEMPT=y): -+ * -+ * - in syscall or exception context, at the next outmost -+ * preempt_enable(). (this might be as soon as the wake_up()'s -+ * spin_unlock()!) -+ * -+ * - in IRQ context, return from interrupt-handler to -+ * preemptible context -+ * -+ * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) -+ * then at the next: -+ * -+ * - cond_resched() call -+ * - explicit schedule() call -+ * - return from syscall or exception to user-space -+ * - return from interrupt-handler to user-space -+ * -+ * WARNING: must be called with preemption disabled! -+ */ -+static void __sched notrace __schedule(bool preempt) -+{ -+ struct task_struct *prev, *next; -+ unsigned long *switch_count; -+ struct rq *rq; -+ int cpu; -+ -+ cpu = smp_processor_id(); -+ rq = cpu_rq(cpu); -+ prev = rq->curr; -+ -+ schedule_debug(prev); -+ -+ /* by passing sched_feat(HRTICK) checking which PDS doesn't support */ -+ hrtick_clear(rq); -+ -+ local_irq_disable(); -+ rcu_note_context_switch(preempt); -+ -+ /* -+ * Make sure that signal_pending_state()->signal_pending() below -+ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) -+ * done by the caller to avoid the race with signal_wake_up(). -+ * -+ * The membarrier system call requires a full memory barrier -+ * after coming from user-space, before storing to rq->curr. -+ */ -+ raw_spin_lock(&rq->lock); -+ smp_mb__after_spinlock(); -+ -+ update_rq_clock(rq); -+ -+ switch_count = &prev->nivcsw; -+ if (!preempt && prev->state) { -+ if (signal_pending_state(prev->state, prev)) { -+ prev->state = TASK_RUNNING; -+ } else { -+ deactivate_task(prev, rq); -+ -+ if (prev->in_iowait) { -+ atomic_inc(&rq->nr_iowait); -+ delayacct_blkio_start(); -+ } -+ -+ /* -+ * If a worker is going to sleep, notify and -+ * ask workqueue whether it wants to wake up a -+ * task to maintain concurrency. If so, wake -+ * up the task. -+ */ -+ if (prev->flags & PF_WQ_WORKER) { -+ struct task_struct *to_wakeup; -+ -+ to_wakeup = wq_worker_sleeping(prev); -+ if (to_wakeup) -+ try_to_wake_up_local(to_wakeup); -+ } -+ } -+ switch_count = &prev->nvcsw; -+ } -+ -+ clear_tsk_need_resched(prev); -+ clear_preempt_need_resched(); -+ -+ check_deadline(prev, rq); -+ -+ next = choose_next_task(rq, cpu, prev); -+ -+ set_rq_task(rq, next); -+ -+ if (prev != next) { -+ if (next->prio == PRIO_LIMIT) -+ schedstat_inc(rq->sched_goidle); -+ -+ rq->curr = next; -+ /* -+ * The membarrier system call requires each architecture -+ * to have a full memory barrier after updating -+ * rq->curr, before returning to user-space. -+ * -+ * Here are the schemes providing that barrier on the -+ * various architectures: -+ * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. -+ * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. -+ * - finish_lock_switch() for weakly-ordered -+ * architectures where spin_unlock is a full barrier, -+ * - switch_to() for arm64 (weakly-ordered, spin_unlock -+ * is a RELEASE barrier), -+ */ -+ ++*switch_count; -+ rq->nr_switches++; -+ -+ trace_sched_switch(preempt, prev, next); -+ -+ /* Also unlocks the rq: */ -+ rq = context_switch(rq, prev, next); -+#ifdef CONFIG_SCHED_SMT -+ pds_sg_balance_check(rq); -+#endif -+ } else -+ raw_spin_unlock_irq(&rq->lock); -+} -+ -+void __noreturn do_task_dead(void) -+{ -+ /* Causes final put_task_struct in finish_task_switch(): */ -+ set_special_state(TASK_DEAD); -+ -+ /* Tell freezer to ignore us: */ -+ current->flags |= PF_NOFREEZE; -+ __schedule(false); -+ -+ BUG(); -+ -+ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ -+ for (;;) -+ cpu_relax(); -+} -+ -+static inline void sched_submit_work(struct task_struct *tsk) -+{ -+ if (!tsk->state || tsk_is_pi_blocked(tsk) || -+ signal_pending_state(tsk->state, tsk)) -+ return; -+ -+ /* -+ * If we are going to sleep and we have plugged IO queued, -+ * make sure to submit it to avoid deadlocks. -+ */ -+ if (blk_needs_flush_plug(tsk)) -+ blk_schedule_flush_plug(tsk); -+} -+ -+asmlinkage __visible void __sched schedule(void) -+{ -+ struct task_struct *tsk = current; -+ -+ sched_submit_work(tsk); -+ do { -+ preempt_disable(); -+ __schedule(false); -+ sched_preempt_enable_no_resched(); -+ } while (need_resched()); -+} -+EXPORT_SYMBOL(schedule); -+ -+/* -+ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted -+ * state (have scheduled out non-voluntarily) by making sure that all -+ * tasks have either left the run queue or have gone into user space. -+ * As idle tasks do not do either, they must not ever be preempted -+ * (schedule out non-voluntarily). -+ * -+ * schedule_idle() is similar to schedule_preempt_disable() except that it -+ * never enables preemption because it does not call sched_submit_work(). -+ */ -+void __sched schedule_idle(void) -+{ -+ /* -+ * As this skips calling sched_submit_work(), which the idle task does -+ * regardless because that function is a nop when the task is in a -+ * TASK_RUNNING state, make sure this isn't used someplace that the -+ * current task can be in any other state. Note, idle is always in the -+ * TASK_RUNNING state. -+ */ -+ WARN_ON_ONCE(current->state); -+ do { -+ __schedule(false); -+ } while (need_resched()); -+} -+ -+#ifdef CONFIG_CONTEXT_TRACKING -+asmlinkage __visible void __sched schedule_user(void) -+{ -+ /* -+ * If we come here after a random call to set_need_resched(), -+ * or we have been woken up remotely but the IPI has not yet arrived, -+ * we haven't yet exited the RCU idle mode. Do it here manually until -+ * we find a better solution. -+ * -+ * NB: There are buggy callers of this function. Ideally we -+ * should warn if prev_state != CONTEXT_USER, but that will trigger -+ * too frequently to make sense yet. -+ */ -+ enum ctx_state prev_state = exception_enter(); -+ schedule(); -+ exception_exit(prev_state); -+} -+#endif -+ -+/** -+ * schedule_preempt_disabled - called with preemption disabled -+ * -+ * Returns with preemption disabled. Note: preempt_count must be 1 -+ */ -+void __sched schedule_preempt_disabled(void) -+{ -+ sched_preempt_enable_no_resched(); -+ schedule(); -+ preempt_disable(); -+} -+ -+static void __sched notrace preempt_schedule_common(void) -+{ -+ do { -+ /* -+ * Because the function tracer can trace preempt_count_sub() -+ * and it also uses preempt_enable/disable_notrace(), if -+ * NEED_RESCHED is set, the preempt_enable_notrace() called -+ * by the function tracer will call this function again and -+ * cause infinite recursion. -+ * -+ * Preemption must be disabled here before the function -+ * tracer can trace. Break up preempt_disable() into two -+ * calls. One to disable preemption without fear of being -+ * traced. The other to still record the preemption latency, -+ * which can also be traced by the function tracer. -+ */ -+ preempt_disable_notrace(); -+ preempt_latency_start(1); -+ __schedule(true); -+ preempt_latency_stop(1); -+ preempt_enable_no_resched_notrace(); -+ -+ /* -+ * Check again in case we missed a preemption opportunity -+ * between schedule and now. -+ */ -+ } while (need_resched()); -+} -+ -+#ifdef CONFIG_PREEMPT -+/* -+ * this is the entry point to schedule() from in-kernel preemption -+ * off of preempt_enable. Kernel preemptions off return from interrupt -+ * occur there and call schedule directly. -+ */ -+asmlinkage __visible void __sched notrace preempt_schedule(void) -+{ -+ /* -+ * If there is a non-zero preempt_count or interrupts are disabled, -+ * we do not want to preempt the current task. Just return.. -+ */ -+ if (likely(!preemptible())) -+ return; -+ -+ preempt_schedule_common(); -+} -+NOKPROBE_SYMBOL(preempt_schedule); -+EXPORT_SYMBOL(preempt_schedule); -+ -+/** -+ * preempt_schedule_notrace - preempt_schedule called by tracing -+ * -+ * The tracing infrastructure uses preempt_enable_notrace to prevent -+ * recursion and tracing preempt enabling caused by the tracing -+ * infrastructure itself. But as tracing can happen in areas coming -+ * from userspace or just about to enter userspace, a preempt enable -+ * can occur before user_exit() is called. This will cause the scheduler -+ * to be called when the system is still in usermode. -+ * -+ * To prevent this, the preempt_enable_notrace will use this function -+ * instead of preempt_schedule() to exit user context if needed before -+ * calling the scheduler. -+ */ -+asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) -+{ -+ enum ctx_state prev_ctx; -+ -+ if (likely(!preemptible())) -+ return; -+ -+ do { -+ /* -+ * Because the function tracer can trace preempt_count_sub() -+ * and it also uses preempt_enable/disable_notrace(), if -+ * NEED_RESCHED is set, the preempt_enable_notrace() called -+ * by the function tracer will call this function again and -+ * cause infinite recursion. -+ * -+ * Preemption must be disabled here before the function -+ * tracer can trace. Break up preempt_disable() into two -+ * calls. One to disable preemption without fear of being -+ * traced. The other to still record the preemption latency, -+ * which can also be traced by the function tracer. -+ */ -+ preempt_disable_notrace(); -+ preempt_latency_start(1); -+ /* -+ * Needs preempt disabled in case user_exit() is traced -+ * and the tracer calls preempt_enable_notrace() causing -+ * an infinite recursion. -+ */ -+ prev_ctx = exception_enter(); -+ __schedule(true); -+ exception_exit(prev_ctx); -+ -+ preempt_latency_stop(1); -+ preempt_enable_no_resched_notrace(); -+ } while (need_resched()); -+} -+EXPORT_SYMBOL_GPL(preempt_schedule_notrace); -+ -+#endif /* CONFIG_PREEMPT */ -+ -+/* -+ * this is the entry point to schedule() from kernel preemption -+ * off of irq context. -+ * Note, that this is called and return with irqs disabled. This will -+ * protect us against recursive calling from irq. -+ */ -+asmlinkage __visible void __sched preempt_schedule_irq(void) -+{ -+ enum ctx_state prev_state; -+ -+ /* Catch callers which need to be fixed */ -+ BUG_ON(preempt_count() || !irqs_disabled()); -+ -+ prev_state = exception_enter(); -+ -+ do { -+ preempt_disable(); -+ local_irq_enable(); -+ __schedule(true); -+ local_irq_disable(); -+ sched_preempt_enable_no_resched(); -+ } while (need_resched()); -+ -+ exception_exit(prev_state); -+} -+ -+int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, -+ void *key) -+{ -+ return try_to_wake_up(curr->private, mode, wake_flags); -+} -+EXPORT_SYMBOL(default_wake_function); -+ -+static inline void -+check_task_changed(struct rq *rq, struct task_struct *p) -+{ -+ /* -+ * Trigger changes when task priority/deadline modified. -+ */ -+ if (task_on_rq_queued(p)) { -+ struct task_struct *first; -+ -+ requeue_task(p, rq); -+ -+ /* Resched if first queued task not running and not IDLE */ -+ if ((first = rq_first_queued_task(rq)) != rq->curr && -+ !task_running_idle(first)) -+ resched_curr(rq); -+ } -+} -+ -+#ifdef CONFIG_RT_MUTEXES -+ -+static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) -+{ -+ if (pi_task) -+ prio = min(prio, pi_task->prio); -+ -+ return prio; -+} -+ -+static inline int rt_effective_prio(struct task_struct *p, int prio) -+{ -+ struct task_struct *pi_task = rt_mutex_get_top_task(p); -+ -+ return __rt_effective_prio(pi_task, prio); -+} -+ -+/* -+ * rt_mutex_setprio - set the current priority of a task -+ * @p: task to boost -+ * @pi_task: donor task -+ * -+ * This function changes the 'effective' priority of a task. It does -+ * not touch ->normal_prio like __setscheduler(). -+ * -+ * Used by the rt_mutex code to implement priority inheritance -+ * logic. Call site only calls if the priority of the task changed. -+ */ -+void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) -+{ -+ int prio; -+ struct rq *rq; -+ raw_spinlock_t *lock; -+ -+ /* XXX used to be waiter->prio, not waiter->task->prio */ -+ prio = __rt_effective_prio(pi_task, p->normal_prio); -+ -+ /* -+ * If nothing changed; bail early. -+ */ -+ if (p->pi_top_task == pi_task && prio == p->prio) -+ return; -+ -+ rq = __task_access_lock(p, &lock); -+ /* -+ * Set under pi_lock && rq->lock, such that the value can be used under -+ * either lock. -+ * -+ * Note that there is loads of tricky to make this pointer cache work -+ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to -+ * ensure a task is de-boosted (pi_task is set to NULL) before the -+ * task is allowed to run again (and can exit). This ensures the pointer -+ * points to a blocked task -- which guaratees the task is present. -+ */ -+ p->pi_top_task = pi_task; -+ -+ /* -+ * For FIFO/RR we only need to set prio, if that matches we're done. -+ */ -+ if (prio == p->prio) -+ goto out_unlock; -+ -+ /* -+ * Idle task boosting is a nono in general. There is one -+ * exception, when PREEMPT_RT and NOHZ is active: -+ * -+ * The idle task calls get_next_timer_interrupt() and holds -+ * the timer wheel base->lock on the CPU and another CPU wants -+ * to access the timer (probably to cancel it). We can safely -+ * ignore the boosting request, as the idle CPU runs this code -+ * with interrupts disabled and will complete the lock -+ * protected section without being interrupted. So there is no -+ * real need to boost. -+ */ -+ if (unlikely(p == rq->idle)) { -+ WARN_ON(p != rq->curr); -+ WARN_ON(p->pi_blocked_on); -+ goto out_unlock; -+ } -+ -+ trace_sched_pi_setprio(p, pi_task); -+ p->prio = prio; -+ update_task_priodl(p); -+ -+ check_task_changed(rq, p); -+ -+out_unlock: -+ __task_access_unlock(p, lock); -+} -+#else -+static inline int rt_effective_prio(struct task_struct *p, int prio) -+{ -+ return prio; -+} -+#endif -+ -+void set_user_nice(struct task_struct *p, long nice) -+{ -+ int new_static; -+ unsigned long flags; -+ struct rq *rq; -+ raw_spinlock_t *lock; -+ -+ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) -+ return; -+ new_static = NICE_TO_PRIO(nice); -+ /* -+ * We have to be careful, if called from sys_setpriority(), -+ * the task might be in the middle of scheduling on another CPU. -+ */ -+ raw_spin_lock_irqsave(&p->pi_lock, flags); -+ rq = __task_access_lock(p, &lock); -+ -+ /* rq lock may not held!! */ -+ update_rq_clock(rq); -+ -+ p->static_prio = new_static; -+ /* -+ * The RT priorities are set via sched_setscheduler(), but we still -+ * allow the 'normal' nice value to be set - but as expected -+ * it wont have any effect on scheduling until the task is -+ * not SCHED_NORMAL/SCHED_BATCH: -+ */ -+ if (task_has_rt_policy(p)) -+ goto out_unlock; -+ -+ p->deadline -= task_deadline_diff(p); -+ p->deadline += static_deadline_diff(new_static); -+ p->prio = effective_prio(p); -+ update_task_priodl(p); -+ -+ check_task_changed(rq, p); -+out_unlock: -+ __task_access_unlock(p, lock); -+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); -+} -+EXPORT_SYMBOL(set_user_nice); -+ -+/* -+ * can_nice - check if a task can reduce its nice value -+ * @p: task -+ * @nice: nice value -+ */ -+int can_nice(const struct task_struct *p, const int nice) -+{ -+ /* Convert nice value [19,-20] to rlimit style value [1,40] */ -+ int nice_rlim = nice_to_rlimit(nice); -+ -+ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || -+ capable(CAP_SYS_NICE)); -+} -+ -+#ifdef __ARCH_WANT_SYS_NICE -+ -+/* -+ * sys_nice - change the priority of the current process. -+ * @increment: priority increment -+ * -+ * sys_setpriority is a more generic, but much slower function that -+ * does similar things. -+ */ -+SYSCALL_DEFINE1(nice, int, increment) -+{ -+ long nice, retval; -+ -+ /* -+ * Setpriority might change our priority at the same moment. -+ * We don't have to worry. Conceptually one call occurs first -+ * and we have a single winner. -+ */ -+ -+ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); -+ nice = task_nice(current) + increment; -+ -+ nice = clamp_val(nice, MIN_NICE, MAX_NICE); -+ if (increment < 0 && !can_nice(current, nice)) -+ return -EPERM; -+ -+ retval = security_task_setnice(current, nice); -+ if (retval) -+ return retval; -+ -+ set_user_nice(current, nice); -+ return 0; -+} -+ -+#endif -+ -+/** -+ * task_prio - return the priority value of a given task. -+ * @p: the task in question. -+ * -+ * Return: The priority value as seen by users in /proc. -+ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes -+ * from 0(SCHED_ISO) up to 82 (nice +19 SCHED_IDLE). -+ */ -+int task_prio(const struct task_struct *p) -+{ -+ int level, prio = p->prio - MAX_RT_PRIO; -+ static const int level_to_nice_prio[] = {39, 33, 26, 20, 14, 7, 0, 0}; -+ -+ /* rt tasks */ -+ if (prio <= 0) -+ goto out; -+ -+ preempt_disable(); -+ level = task_deadline_level(p, this_rq()); -+ preempt_enable(); -+ prio += level_to_nice_prio[level]; -+ if (idleprio_task(p)) -+ prio += NICE_WIDTH; -+out: -+ return prio; -+} -+ -+/** -+ * idle_cpu - is a given CPU idle currently? -+ * @cpu: the processor in question. -+ * -+ * Return: 1 if the CPU is currently idle. 0 otherwise. -+ */ -+int idle_cpu(int cpu) -+{ -+ return cpu_curr(cpu) == cpu_rq(cpu)->idle; -+} -+ -+/** -+ * idle_task - return the idle task for a given CPU. -+ * @cpu: the processor in question. -+ * -+ * Return: The idle task for the cpu @cpu. -+ */ -+struct task_struct *idle_task(int cpu) -+{ -+ return cpu_rq(cpu)->idle; -+} -+ -+/** -+ * find_process_by_pid - find a process with a matching PID value. -+ * @pid: the pid in question. -+ * -+ * The task of @pid, if found. %NULL otherwise. -+ */ -+static inline struct task_struct *find_process_by_pid(pid_t pid) -+{ -+ return pid ? find_task_by_vpid(pid) : current; -+} -+ -+#ifdef CONFIG_SMP -+void sched_set_stop_task(int cpu, struct task_struct *stop) -+{ -+ struct sched_param stop_param = { .sched_priority = STOP_PRIO }; -+ struct sched_param start_param = { .sched_priority = 0 }; -+ struct task_struct *old_stop = cpu_rq(cpu)->stop; -+ -+ if (stop) { -+ /* -+ * Make it appear like a SCHED_FIFO task, its something -+ * userspace knows about and won't get confused about. -+ * -+ * Also, it will make PI more or less work without too -+ * much confusion -- but then, stop work should not -+ * rely on PI working anyway. -+ */ -+ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param); -+ } -+ -+ cpu_rq(cpu)->stop = stop; -+ -+ if (old_stop) { -+ /* -+ * Reset it back to a normal scheduling policy so that -+ * it can die in pieces. -+ */ -+ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param); -+ } -+} -+ -+/* -+ * Change a given task's CPU affinity. Migrate the thread to a -+ * proper CPU and schedule it away if the CPU it's executing on -+ * is removed from the allowed bitmask. -+ * -+ * NOTE: the caller must have a valid reference to the task, the -+ * task must not exit() & deallocate itself prematurely. The -+ * call is not atomic; no spinlocks may be held. -+ */ -+static int __set_cpus_allowed_ptr(struct task_struct *p, -+ const struct cpumask *new_mask, bool check) -+{ -+ const struct cpumask *cpu_valid_mask = cpu_active_mask; -+ int dest_cpu; -+ unsigned long flags; -+ struct rq *rq; -+ raw_spinlock_t *lock; -+ int ret = 0; -+ -+ raw_spin_lock_irqsave(&p->pi_lock, flags); -+ rq = __task_access_lock(p, &lock); -+ -+ if (p->flags & PF_KTHREAD) { -+ /* -+ * Kernel threads are allowed on online && !active CPUs -+ */ -+ cpu_valid_mask = cpu_online_mask; -+ } -+ -+ /* -+ * Must re-check here, to close a race against __kthread_bind(), -+ * sched_setaffinity() is not guaranteed to observe the flag. -+ */ -+ if (check && (p->flags & PF_NO_SETAFFINITY)) { -+ ret = -EINVAL; -+ goto out; -+ } -+ -+ if (cpumask_equal(&p->cpus_allowed, new_mask)) -+ goto out; -+ -+ if (!cpumask_intersects(new_mask, cpu_valid_mask)) { -+ ret = -EINVAL; -+ goto out; -+ } -+ -+ do_set_cpus_allowed(p, new_mask); -+ -+ if (p->flags & PF_KTHREAD) { -+ /* -+ * For kernel threads that do indeed end up on online && -+ * !active we want to ensure they are strict per-CPU threads. -+ */ -+ WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) && -+ !cpumask_intersects(new_mask, cpu_active_mask) && -+ p->nr_cpus_allowed != 1); -+ } -+ -+ /* Can the task run on the task's current CPU? If so, we're done */ -+ if (cpumask_test_cpu(task_cpu(p), new_mask)) -+ goto out; -+ -+ dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); -+ if (task_running(p) || p->state == TASK_WAKING) { -+ struct migration_arg arg = { p, dest_cpu }; -+ -+ /* Need help from migration thread: drop lock and wait. */ -+ __task_access_unlock(p, lock); -+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); -+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); -+ tlb_migrate_finish(p->mm); -+ return 0; -+ } -+ if (task_on_rq_queued(p)) { -+ /* -+ * OK, since we're going to drop the lock immediately -+ * afterwards anyway. -+ */ -+ update_rq_clock(rq); -+ rq = move_queued_task(rq, p, dest_cpu); -+ lock = &rq->lock; -+ } -+ -+out: -+ __task_access_unlock(p, lock); -+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); -+ -+ return ret; -+} -+ -+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) -+{ -+ return __set_cpus_allowed_ptr(p, new_mask, false); -+} -+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); -+ -+#else -+static inline int -+__set_cpus_allowed_ptr(struct task_struct *p, -+ const struct cpumask *new_mask, bool check) -+{ -+ return set_cpus_allowed_ptr(p, new_mask); -+} -+#endif -+ -+static u64 task_init_deadline(const struct task_struct *p) -+{ -+ return task_rq(p)->clock + task_deadline_diff(p); -+} -+ -+u64 (* task_init_deadline_func_tbl[])(const struct task_struct *p) = { -+ task_init_deadline, /* SCHED_NORMAL */ -+ NULL, /* SCHED_FIFO */ -+ NULL, /* SCHED_RR */ -+ task_init_deadline, /* SCHED_BATCH */ -+ NULL, /* SCHED_ISO */ -+ task_init_deadline /* SCHED_IDLE */ -+}; -+ -+/* -+ * sched_setparam() passes in -1 for its policy, to let the functions -+ * it calls know not to change it. -+ */ -+#define SETPARAM_POLICY -1 -+ -+static void __setscheduler_params(struct task_struct *p, -+ const struct sched_attr *attr) -+{ -+ int old_policy = p->policy; -+ int policy = attr->sched_policy; -+ -+ if (policy == SETPARAM_POLICY) -+ policy = p->policy; -+ -+ p->policy = policy; -+ -+ /* -+ * allow normal nice value to be set, but will not have any -+ * effect on scheduling until the task not SCHED_NORMAL/ -+ * SCHED_BATCH -+ */ -+ p->static_prio = NICE_TO_PRIO(attr->sched_nice); -+ -+ /* -+ * __sched_setscheduler() ensures attr->sched_priority == 0 when -+ * !rt_policy. Always setting this ensures that things like -+ * getparam()/getattr() don't report silly values for !rt tasks. -+ */ -+ p->rt_priority = attr->sched_priority; -+ p->normal_prio = normal_prio(p); -+ -+ if (old_policy != policy) -+ p->deadline = (task_init_deadline_func_tbl[p->policy])? -+ task_init_deadline_func_tbl[p->policy](p):0ULL; -+} -+ -+/* Actually do priority change: must hold rq lock. */ -+static void __setscheduler(struct rq *rq, struct task_struct *p, -+ const struct sched_attr *attr, bool keep_boost) -+{ -+ __setscheduler_params(p, attr); -+ -+ /* -+ * Keep a potential priority boosting if called from -+ * sched_setscheduler(). -+ */ -+ p->prio = normal_prio(p); -+ if (keep_boost) -+ p->prio = rt_effective_prio(p, p->prio); -+ update_task_priodl(p); -+} -+ -+/* -+ * check the target process has a UID that matches the current process's -+ */ -+static bool check_same_owner(struct task_struct *p) -+{ -+ const struct cred *cred = current_cred(), *pcred; -+ bool match; -+ -+ rcu_read_lock(); -+ pcred = __task_cred(p); -+ match = (uid_eq(cred->euid, pcred->euid) || -+ uid_eq(cred->euid, pcred->uid)); -+ rcu_read_unlock(); -+ return match; -+} -+ -+static int -+__sched_setscheduler(struct task_struct *p, -+ const struct sched_attr *attr, bool user, bool pi) -+{ -+ const struct sched_attr dl_squash_attr = { -+ .size = sizeof(struct sched_attr), -+ .sched_policy = SCHED_FIFO, -+ .sched_nice = 0, -+ .sched_priority = 99, -+ }; -+ int newprio = MAX_RT_PRIO - 1 - attr->sched_priority; -+ int retval, oldpolicy = -1; -+ int policy = attr->sched_policy; -+ unsigned long flags; -+ struct rq *rq; -+ int reset_on_fork; -+ raw_spinlock_t *lock; -+ -+ /* The pi code expects interrupts enabled */ -+ BUG_ON(pi && in_interrupt()); -+ -+ /* -+ * PDS supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO -+ */ -+ if (unlikely(SCHED_DEADLINE == policy)) { -+ attr = &dl_squash_attr; -+ policy = attr->sched_policy; -+ newprio = MAX_RT_PRIO - 1 - attr->sched_priority; -+ } -+recheck: -+ /* Double check policy once rq lock held */ -+ if (policy < 0) { -+ reset_on_fork = p->sched_reset_on_fork; -+ policy = oldpolicy = p->policy; -+ } else { -+ reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK); -+ -+ if (policy > SCHED_IDLE) -+ return -EINVAL; -+ } -+ -+ if (attr->sched_flags & ~(SCHED_FLAG_ALL)) -+ return -EINVAL; -+ -+ /* -+ * Valid priorities for SCHED_FIFO and SCHED_RR are -+ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and -+ * SCHED_BATCH and SCHED_IDLE is 0. -+ */ -+ if (attr->sched_priority < 0 || -+ (p->mm && attr->sched_priority > MAX_USER_RT_PRIO - 1) || -+ (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1)) -+ return -EINVAL; -+ if ((SCHED_RR == policy || SCHED_FIFO == policy) != -+ (attr->sched_priority != 0)) -+ return -EINVAL; -+ -+ /* -+ * Allow unprivileged RT tasks to decrease priority: -+ */ -+ if (user && !capable(CAP_SYS_NICE)) { -+ if (SCHED_FIFO == policy || SCHED_RR == policy) { -+ unsigned long rlim_rtprio = -+ task_rlimit(p, RLIMIT_RTPRIO); -+ -+ /* Can't set/change the rt policy */ -+ if (policy != p->policy && !rlim_rtprio) -+ return -EPERM; -+ -+ /* Can't increase priority */ -+ if (attr->sched_priority > p->rt_priority && -+ attr->sched_priority > rlim_rtprio) -+ return -EPERM; -+ } -+ -+ /* Can't change other user's priorities */ -+ if (!check_same_owner(p)) -+ return -EPERM; -+ -+ /* Normal users shall not reset the sched_reset_on_fork flag */ -+ if (p->sched_reset_on_fork && !reset_on_fork) -+ return -EPERM; -+ } -+ -+ if (user) { -+ retval = security_task_setscheduler(p); -+ if (retval) -+ return retval; -+ } -+ -+ /* -+ * make sure no PI-waiters arrive (or leave) while we are -+ * changing the priority of the task: -+ */ -+ raw_spin_lock_irqsave(&p->pi_lock, flags); -+ -+ /* -+ * To be able to change p->policy safely, task_access_lock() -+ * must be called. -+ * IF use task_access_lock() here: -+ * For the task p which is not running, reading rq->stop is -+ * racy but acceptable as ->stop doesn't change much. -+ * An enhancemnet can be made to read rq->stop saftly. -+ */ -+ rq = __task_access_lock(p, &lock); -+ -+ /* -+ * Changing the policy of the stop threads its a very bad idea -+ */ -+ if (p == rq->stop) { -+ __task_access_unlock(p, lock); -+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); -+ return -EINVAL; -+ } -+ -+ /* -+ * If not changing anything there's no need to proceed further: -+ */ -+ if (unlikely(policy == p->policy)) { -+ if (rt_policy(policy) && attr->sched_priority != p->rt_priority) -+ goto change; -+ if (!rt_policy(policy) && -+ NICE_TO_PRIO(attr->sched_nice) != p->static_prio) -+ goto change; -+ -+ p->sched_reset_on_fork = reset_on_fork; -+ __task_access_unlock(p, lock); -+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); -+ return 0; -+ } -+change: -+ -+ /* Re-check policy now with rq lock held */ -+ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { -+ policy = oldpolicy = -1; -+ __task_access_unlock(p, lock); -+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); -+ goto recheck; -+ } -+ -+ p->sched_reset_on_fork = reset_on_fork; -+ -+ if (pi) { -+ /* -+ * Take priority boosted tasks into account. If the new -+ * effective priority is unchanged, we just store the new -+ * normal parameters and do not touch the scheduler class and -+ * the runqueue. This will be done when the task deboost -+ * itself. -+ */ -+ if (rt_effective_prio(p, newprio) == p->prio) { -+ __setscheduler_params(p, attr); -+ __task_access_unlock(p, lock); -+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); -+ return 0; -+ } -+ } -+ -+ __setscheduler(rq, p, attr, pi); -+ -+ check_task_changed(rq, p); -+ -+ /* Avoid rq from going away on us: */ -+ preempt_disable(); -+ __task_access_unlock(p, lock); -+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); -+ -+ if (pi) -+ rt_mutex_adjust_pi(p); -+ -+ preempt_enable(); -+ -+ return 0; -+} -+ -+static int _sched_setscheduler(struct task_struct *p, int policy, -+ const struct sched_param *param, bool check) -+{ -+ struct sched_attr attr = { -+ .sched_policy = policy, -+ .sched_priority = param->sched_priority, -+ .sched_nice = PRIO_TO_NICE(p->static_prio), -+ }; -+ -+ /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ -+ if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { -+ attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; -+ policy &= ~SCHED_RESET_ON_FORK; -+ attr.sched_policy = policy; -+ } -+ -+ return __sched_setscheduler(p, &attr, check, true); -+} -+ -+/** -+ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. -+ * @p: the task in question. -+ * @policy: new policy. -+ * @param: structure containing the new RT priority. -+ * -+ * Return: 0 on success. An error code otherwise. -+ * -+ * NOTE that the task may be already dead. -+ */ -+int sched_setscheduler(struct task_struct *p, int policy, -+ const struct sched_param *param) -+{ -+ return _sched_setscheduler(p, policy, param, true); -+} -+ -+EXPORT_SYMBOL_GPL(sched_setscheduler); -+ -+int sched_setattr(struct task_struct *p, const struct sched_attr *attr) -+{ -+ return __sched_setscheduler(p, attr, true, true); -+} -+EXPORT_SYMBOL_GPL(sched_setattr); -+ -+int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) -+{ -+ return __sched_setscheduler(p, attr, false, true); -+} -+ -+/** -+ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. -+ * @p: the task in question. -+ * @policy: new policy. -+ * @param: structure containing the new RT priority. -+ * -+ * Just like sched_setscheduler, only don't bother checking if the -+ * current context has permission. For example, this is needed in -+ * stop_machine(): we create temporary high priority worker threads, -+ * but our caller might not have that capability. -+ * -+ * Return: 0 on success. An error code otherwise. -+ */ -+int sched_setscheduler_nocheck(struct task_struct *p, int policy, -+ const struct sched_param *param) -+{ -+ return _sched_setscheduler(p, policy, param, false); -+} -+EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); -+ -+static int -+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) -+{ -+ struct sched_param lparam; -+ struct task_struct *p; -+ int retval; -+ -+ if (!param || pid < 0) -+ return -EINVAL; -+ if (copy_from_user(&lparam, param, sizeof(struct sched_param))) -+ return -EFAULT; -+ -+ rcu_read_lock(); -+ retval = -ESRCH; -+ p = find_process_by_pid(pid); -+ if (p != NULL) -+ retval = sched_setscheduler(p, policy, &lparam); -+ rcu_read_unlock(); -+ -+ return retval; -+} -+ -+/* -+ * Mimics kernel/events/core.c perf_copy_attr(). -+ */ -+static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) -+{ -+ u32 size; -+ int ret; -+ -+ if (!access_ok(uattr, SCHED_ATTR_SIZE_VER0)) -+ return -EFAULT; -+ -+ /* Zero the full structure, so that a short copy will be nice: */ -+ memset(attr, 0, sizeof(*attr)); -+ -+ ret = get_user(size, &uattr->size); -+ if (ret) -+ return ret; -+ -+ /* Bail out on silly large: */ -+ if (size > PAGE_SIZE) -+ goto err_size; -+ -+ /* ABI compatibility quirk: */ -+ if (!size) -+ size = SCHED_ATTR_SIZE_VER0; -+ -+ if (size < SCHED_ATTR_SIZE_VER0) -+ goto err_size; -+ -+ /* -+ * If we're handed a bigger struct than we know of, -+ * ensure all the unknown bits are 0 - i.e. new -+ * user-space does not rely on any kernel feature -+ * extensions we dont know about yet. -+ */ -+ if (size > sizeof(*attr)) { -+ unsigned char __user *addr; -+ unsigned char __user *end; -+ unsigned char val; -+ -+ addr = (void __user *)uattr + sizeof(*attr); -+ end = (void __user *)uattr + size; -+ -+ for (; addr < end; addr++) { -+ ret = get_user(val, addr); -+ if (ret) -+ return ret; -+ if (val) -+ goto err_size; -+ } -+ size = sizeof(*attr); -+ } -+ -+ ret = copy_from_user(attr, uattr, size); -+ if (ret) -+ return -EFAULT; -+ -+ /* -+ * XXX: Do we want to be lenient like existing syscalls; or do we want -+ * to be strict and return an error on out-of-bounds values? -+ */ -+ attr->sched_nice = clamp(attr->sched_nice, -20, 19); -+ -+ /* sched/core.c uses zero here but we already know ret is zero */ -+ return 0; -+ -+err_size: -+ put_user(sizeof(*attr), &uattr->size); -+ return -E2BIG; -+} -+ -+/** -+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority -+ * @pid: the pid in question. -+ * @policy: new policy. -+ * -+ * Return: 0 on success. An error code otherwise. -+ * @param: structure containing the new RT priority. -+ */ -+SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) -+{ -+ if (policy < 0) -+ return -EINVAL; -+ -+ return do_sched_setscheduler(pid, policy, param); -+} -+ -+/** -+ * sys_sched_setparam - set/change the RT priority of a thread -+ * @pid: the pid in question. -+ * @param: structure containing the new RT priority. -+ * -+ * Return: 0 on success. An error code otherwise. -+ */ -+SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) -+{ -+ return do_sched_setscheduler(pid, SETPARAM_POLICY, param); -+} -+ -+/** -+ * sys_sched_setattr - same as above, but with extended sched_attr -+ * @pid: the pid in question. -+ * @uattr: structure containing the extended parameters. -+ */ -+SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, -+ unsigned int, flags) -+{ -+ struct sched_attr attr; -+ struct task_struct *p; -+ int retval; -+ -+ if (!uattr || pid < 0 || flags) -+ return -EINVAL; -+ -+ retval = sched_copy_attr(uattr, &attr); -+ if (retval) -+ return retval; -+ -+ if ((int)attr.sched_policy < 0) -+ return -EINVAL; -+ -+ rcu_read_lock(); -+ retval = -ESRCH; -+ p = find_process_by_pid(pid); -+ if (p != NULL) -+ retval = sched_setattr(p, &attr); -+ rcu_read_unlock(); -+ -+ return retval; -+} -+ -+/** -+ * sys_sched_getscheduler - get the policy (scheduling class) of a thread -+ * @pid: the pid in question. -+ * -+ * Return: On success, the policy of the thread. Otherwise, a negative error -+ * code. -+ */ -+SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) -+{ -+ struct task_struct *p; -+ int retval = -EINVAL; -+ -+ if (pid < 0) -+ goto out_nounlock; -+ -+ retval = -ESRCH; -+ rcu_read_lock(); -+ p = find_process_by_pid(pid); -+ if (p) { -+ retval = security_task_getscheduler(p); -+ if (!retval) -+ retval = p->policy; -+ } -+ rcu_read_unlock(); -+ -+out_nounlock: -+ return retval; -+} -+ -+/** -+ * sys_sched_getscheduler - get the RT priority of a thread -+ * @pid: the pid in question. -+ * @param: structure containing the RT priority. -+ * -+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error -+ * code. -+ */ -+SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) -+{ -+ struct sched_param lp = { .sched_priority = 0 }; -+ struct task_struct *p; -+ int retval = -EINVAL; -+ -+ if (!param || pid < 0) -+ goto out_nounlock; -+ -+ rcu_read_lock(); -+ p = find_process_by_pid(pid); -+ retval = -ESRCH; -+ if (!p) -+ goto out_unlock; -+ -+ retval = security_task_getscheduler(p); -+ if (retval) -+ goto out_unlock; -+ -+ if (task_has_rt_policy(p)) -+ lp.sched_priority = p->rt_priority; -+ rcu_read_unlock(); -+ -+ /* -+ * This one might sleep, we cannot do it with a spinlock held ... -+ */ -+ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; -+ -+out_nounlock: -+ return retval; -+ -+out_unlock: -+ rcu_read_unlock(); -+ return retval; -+} -+ -+static int sched_read_attr(struct sched_attr __user *uattr, -+ struct sched_attr *attr, -+ unsigned int usize) -+{ -+ int ret; -+ -+ if (!access_ok(uattr, usize)) -+ return -EFAULT; -+ -+ /* -+ * If we're handed a smaller struct than we know of, -+ * ensure all the unknown bits are 0 - i.e. old -+ * user-space does not get uncomplete information. -+ */ -+ if (usize < sizeof(*attr)) { -+ unsigned char *addr; -+ unsigned char *end; -+ -+ addr = (void *)attr + usize; -+ end = (void *)attr + sizeof(*attr); -+ -+ for (; addr < end; addr++) { -+ if (*addr) -+ return -EFBIG; -+ } -+ -+ attr->size = usize; -+ } -+ -+ ret = copy_to_user(uattr, attr, attr->size); -+ if (ret) -+ return -EFAULT; -+ -+ /* sched/core.c uses zero here but we already know ret is zero */ -+ return ret; -+} -+ -+/** -+ * sys_sched_getattr - similar to sched_getparam, but with sched_attr -+ * @pid: the pid in question. -+ * @uattr: structure containing the extended parameters. -+ * @size: sizeof(attr) for fwd/bwd comp. -+ * @flags: for future extension. -+ */ -+SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, -+ unsigned int, size, unsigned int, flags) -+{ -+ struct sched_attr attr = { -+ .size = sizeof(struct sched_attr), -+ }; -+ struct task_struct *p; -+ int retval; -+ -+ if (!uattr || pid < 0 || size > PAGE_SIZE || -+ size < SCHED_ATTR_SIZE_VER0 || flags) -+ return -EINVAL; -+ -+ rcu_read_lock(); -+ p = find_process_by_pid(pid); -+ retval = -ESRCH; -+ if (!p) -+ goto out_unlock; -+ -+ retval = security_task_getscheduler(p); -+ if (retval) -+ goto out_unlock; -+ -+ attr.sched_policy = p->policy; -+ if (rt_task(p)) -+ attr.sched_priority = p->rt_priority; -+ else -+ attr.sched_nice = task_nice(p); -+ -+ rcu_read_unlock(); -+ -+ retval = sched_read_attr(uattr, &attr, size); -+ return retval; -+ -+out_unlock: -+ rcu_read_unlock(); -+ return retval; -+} -+ -+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) -+{ -+ cpumask_var_t cpus_allowed, new_mask; -+ struct task_struct *p; -+ int retval; -+ -+ get_online_cpus(); -+ rcu_read_lock(); -+ -+ p = find_process_by_pid(pid); -+ if (!p) { -+ rcu_read_unlock(); -+ put_online_cpus(); -+ return -ESRCH; -+ } -+ -+ /* Prevent p going away */ -+ get_task_struct(p); -+ rcu_read_unlock(); -+ -+ if (p->flags & PF_NO_SETAFFINITY) { -+ retval = -EINVAL; -+ goto out_put_task; -+ } -+ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { -+ retval = -ENOMEM; -+ goto out_put_task; -+ } -+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { -+ retval = -ENOMEM; -+ goto out_free_cpus_allowed; -+ } -+ retval = -EPERM; -+ if (!check_same_owner(p)) { -+ rcu_read_lock(); -+ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { -+ rcu_read_unlock(); -+ goto out_unlock; -+ } -+ rcu_read_unlock(); -+ } -+ -+ retval = security_task_setscheduler(p); -+ if (retval) -+ goto out_unlock; -+ -+ cpuset_cpus_allowed(p, cpus_allowed); -+ cpumask_and(new_mask, in_mask, cpus_allowed); -+again: -+ retval = __set_cpus_allowed_ptr(p, new_mask, true); -+ -+ if (!retval) { -+ cpuset_cpus_allowed(p, cpus_allowed); -+ if (!cpumask_subset(new_mask, cpus_allowed)) { -+ /* -+ * We must have raced with a concurrent cpuset -+ * update. Just reset the cpus_allowed to the -+ * cpuset's cpus_allowed -+ */ -+ cpumask_copy(new_mask, cpus_allowed); -+ goto again; -+ } -+ } -+out_unlock: -+ free_cpumask_var(new_mask); -+out_free_cpus_allowed: -+ free_cpumask_var(cpus_allowed); -+out_put_task: -+ put_task_struct(p); -+ put_online_cpus(); -+ return retval; -+} -+ -+static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, -+ struct cpumask *new_mask) -+{ -+ if (len < cpumask_size()) -+ cpumask_clear(new_mask); -+ else if (len > cpumask_size()) -+ len = cpumask_size(); -+ -+ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; -+} -+ -+/** -+ * sys_sched_setaffinity - set the CPU affinity of a process -+ * @pid: pid of the process -+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr -+ * @user_mask_ptr: user-space pointer to the new CPU mask -+ * -+ * Return: 0 on success. An error code otherwise. -+ */ -+SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, -+ unsigned long __user *, user_mask_ptr) -+{ -+ cpumask_var_t new_mask; -+ int retval; -+ -+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) -+ return -ENOMEM; -+ -+ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); -+ if (retval == 0) -+ retval = sched_setaffinity(pid, new_mask); -+ free_cpumask_var(new_mask); -+ return retval; -+} -+ -+long sched_getaffinity(pid_t pid, cpumask_t *mask) -+{ -+ struct task_struct *p; -+ raw_spinlock_t *lock; -+ unsigned long flags; -+ int retval; -+ -+ rcu_read_lock(); -+ -+ retval = -ESRCH; -+ p = find_process_by_pid(pid); -+ if (!p) -+ goto out_unlock; -+ -+ retval = security_task_getscheduler(p); -+ if (retval) -+ goto out_unlock; -+ -+ task_access_lock_irqsave(p, &lock, &flags); -+ cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); -+ task_access_unlock_irqrestore(p, lock, &flags); -+ -+out_unlock: -+ rcu_read_unlock(); -+ -+ return retval; -+} -+ -+/** -+ * sys_sched_getaffinity - get the CPU affinity of a process -+ * @pid: pid of the process -+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr -+ * @user_mask_ptr: user-space pointer to hold the current CPU mask -+ * -+ * Return: size of CPU mask copied to user_mask_ptr on success. An -+ * error code otherwise. -+ */ -+SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, -+ unsigned long __user *, user_mask_ptr) -+{ -+ int ret; -+ cpumask_var_t mask; -+ -+ if ((len * BITS_PER_BYTE) < nr_cpu_ids) -+ return -EINVAL; -+ if (len & (sizeof(unsigned long)-1)) -+ return -EINVAL; -+ -+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) -+ return -ENOMEM; -+ -+ ret = sched_getaffinity(pid, mask); -+ if (ret == 0) { -+ unsigned int retlen = min_t(size_t, len, cpumask_size()); -+ -+ if (copy_to_user(user_mask_ptr, mask, retlen)) -+ ret = -EFAULT; -+ else -+ ret = retlen; -+ } -+ free_cpumask_var(mask); -+ -+ return ret; -+} -+ -+/** -+ * sys_sched_yield - yield the current processor to other threads. -+ * -+ * This function yields the current CPU to other tasks. It does this by -+ * scheduling away the current task. If it still has the earliest deadline -+ * it will be scheduled again as the next task. -+ * -+ * Return: 0. -+ */ -+static void do_sched_yield(void) -+{ -+ struct rq *rq; -+ struct rq_flags rf; -+ -+ if (!sched_yield_type) -+ return; -+ -+ rq = this_rq_lock_irq(&rf); -+ -+ if (sched_yield_type > 1) { -+ time_slice_expired(current, rq); -+ requeue_task(current, rq); -+ } -+ schedstat_inc(rq->yld_count); -+ -+ /* -+ * Since we are going to call schedule() anyway, there's -+ * no need to preempt or enable interrupts: -+ */ -+ preempt_disable(); -+ raw_spin_unlock(&rq->lock); -+ sched_preempt_enable_no_resched(); -+ -+ schedule(); -+} -+ -+SYSCALL_DEFINE0(sched_yield) -+{ -+ do_sched_yield(); -+ return 0; -+} -+ -+#ifndef CONFIG_PREEMPT -+int __sched _cond_resched(void) -+{ -+ if (should_resched(0)) { -+ preempt_schedule_common(); -+ return 1; -+ } -+ rcu_all_qs(); -+ return 0; -+} -+EXPORT_SYMBOL(_cond_resched); -+#endif -+ -+/* -+ * __cond_resched_lock() - if a reschedule is pending, drop the given lock, -+ * call schedule, and on return reacquire the lock. -+ * -+ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level -+ * operations here to prevent schedule() from being called twice (once via -+ * spin_unlock(), once by hand). -+ */ -+int __cond_resched_lock(spinlock_t *lock) -+{ -+ int resched = should_resched(PREEMPT_LOCK_OFFSET); -+ int ret = 0; -+ -+ lockdep_assert_held(lock); -+ -+ if (spin_needbreak(lock) || resched) { -+ spin_unlock(lock); -+ if (resched) -+ preempt_schedule_common(); -+ else -+ cpu_relax(); -+ ret = 1; -+ spin_lock(lock); -+ } -+ return ret; -+} -+EXPORT_SYMBOL(__cond_resched_lock); -+ -+/** -+ * yield - yield the current processor to other threads. -+ * -+ * Do not ever use this function, there's a 99% chance you're doing it wrong. -+ * -+ * The scheduler is at all times free to pick the calling task as the most -+ * eligible task to run, if removing the yield() call from your code breaks -+ * it, its already broken. -+ * -+ * Typical broken usage is: -+ * -+ * while (!event) -+ * yield(); -+ * -+ * where one assumes that yield() will let 'the other' process run that will -+ * make event true. If the current task is a SCHED_FIFO task that will never -+ * happen. Never use yield() as a progress guarantee!! -+ * -+ * If you want to use yield() to wait for something, use wait_event(). -+ * If you want to use yield() to be 'nice' for others, use cond_resched(). -+ * If you still want to use yield(), do not! -+ */ -+void __sched yield(void) -+{ -+ set_current_state(TASK_RUNNING); -+ do_sched_yield(); -+} -+EXPORT_SYMBOL(yield); -+ -+/** -+ * yield_to - yield the current processor to another thread in -+ * your thread group, or accelerate that thread toward the -+ * processor it's on. -+ * @p: target task -+ * @preempt: whether task preemption is allowed or not -+ * -+ * It's the caller's job to ensure that the target task struct -+ * can't go away on us before we can do any checks. -+ * -+ * In PDS, yield_to is not supported. -+ * -+ * Return: -+ * true (>0) if we indeed boosted the target task. -+ * false (0) if we failed to boost the target. -+ * -ESRCH if there's no task to yield to. -+ */ -+int __sched yield_to(struct task_struct *p, bool preempt) -+{ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(yield_to); -+ -+int io_schedule_prepare(void) -+{ -+ int old_iowait = current->in_iowait; -+ -+ current->in_iowait = 1; -+ blk_schedule_flush_plug(current); -+ -+ return old_iowait; -+} -+ -+void io_schedule_finish(int token) -+{ -+ current->in_iowait = token; -+} -+ -+/* -+ * This task is about to go to sleep on IO. Increment rq->nr_iowait so -+ * that process accounting knows that this is a task in IO wait state. -+ * -+ * But don't do that if it is a deliberate, throttling IO wait (this task -+ * has set its backing_dev_info: the queue against which it should throttle) -+ */ -+ -+long __sched io_schedule_timeout(long timeout) -+{ -+ int token; -+ long ret; -+ -+ token = io_schedule_prepare(); -+ ret = schedule_timeout(timeout); -+ io_schedule_finish(token); -+ -+ return ret; -+} -+EXPORT_SYMBOL(io_schedule_timeout); -+ -+void io_schedule(void) -+{ -+ int token; -+ -+ token = io_schedule_prepare(); -+ schedule(); -+ io_schedule_finish(token); -+} -+EXPORT_SYMBOL(io_schedule); -+ -+/** -+ * sys_sched_get_priority_max - return maximum RT priority. -+ * @policy: scheduling class. -+ * -+ * Return: On success, this syscall returns the maximum -+ * rt_priority that can be used by a given scheduling class. -+ * On failure, a negative error code is returned. -+ */ -+SYSCALL_DEFINE1(sched_get_priority_max, int, policy) -+{ -+ int ret = -EINVAL; -+ -+ switch (policy) { -+ case SCHED_FIFO: -+ case SCHED_RR: -+ ret = MAX_USER_RT_PRIO-1; -+ break; -+ case SCHED_NORMAL: -+ case SCHED_BATCH: -+ case SCHED_ISO: -+ case SCHED_IDLE: -+ ret = 0; -+ break; -+ } -+ return ret; -+} -+ -+/** -+ * sys_sched_get_priority_min - return minimum RT priority. -+ * @policy: scheduling class. -+ * -+ * Return: On success, this syscall returns the minimum -+ * rt_priority that can be used by a given scheduling class. -+ * On failure, a negative error code is returned. -+ */ -+SYSCALL_DEFINE1(sched_get_priority_min, int, policy) -+{ -+ int ret = -EINVAL; -+ -+ switch (policy) { -+ case SCHED_FIFO: -+ case SCHED_RR: -+ ret = 1; -+ break; -+ case SCHED_NORMAL: -+ case SCHED_BATCH: -+ case SCHED_ISO: -+ case SCHED_IDLE: -+ ret = 0; -+ break; -+ } -+ return ret; -+} -+ -+static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) -+{ -+ struct task_struct *p; -+ int retval; -+ -+ if (pid < 0) -+ return -EINVAL; -+ -+ retval = -ESRCH; -+ rcu_read_lock(); -+ p = find_process_by_pid(pid); -+ if (!p) -+ goto out_unlock; -+ -+ retval = security_task_getscheduler(p); -+ if (retval) -+ goto out_unlock; -+ rcu_read_unlock(); -+ -+ *t = ns_to_timespec64(MS_TO_NS(rr_interval)); -+ return 0; -+ -+out_unlock: -+ rcu_read_unlock(); -+ return retval; -+} -+ -+/** -+ * sys_sched_rr_get_interval - return the default timeslice of a process. -+ * @pid: pid of the process. -+ * @interval: userspace pointer to the timeslice value. -+ * -+ * -+ * Return: On success, 0 and the timeslice is in @interval. Otherwise, -+ * an error code. -+ */ -+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, -+ struct __kernel_timespec __user *, interval) -+{ -+ struct timespec64 t; -+ int retval = sched_rr_get_interval(pid, &t); -+ -+ if (retval == 0) -+ retval = put_timespec64(&t, interval); -+ -+ return retval; -+} -+ -+#ifdef CONFIG_COMPAT_32BIT_TIME -+SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, -+ struct old_timespec32 __user *, interval) -+{ -+ struct timespec64 t; -+ int retval = sched_rr_get_interval(pid, &t); -+ -+ if (retval == 0) -+ retval = put_old_timespec32(&t, interval); -+ return retval; -+} -+#endif -+ -+void sched_show_task(struct task_struct *p) -+{ -+ unsigned long free = 0; -+ int ppid; -+ -+ if (!try_get_task_stack(p)) -+ return; -+ -+ printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p)); -+ -+ if (p->state == TASK_RUNNING) -+ printk(KERN_CONT " running task "); -+#ifdef CONFIG_DEBUG_STACK_USAGE -+ free = stack_not_used(p); -+#endif -+ ppid = 0; -+ rcu_read_lock(); -+ if (pid_alive(p)) -+ ppid = task_pid_nr(rcu_dereference(p->real_parent)); -+ rcu_read_unlock(); -+ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, -+ task_pid_nr(p), ppid, -+ (unsigned long)task_thread_info(p)->flags); -+ -+ print_worker_info(KERN_INFO, p); -+ show_stack(p, NULL); -+ put_task_stack(p); -+} -+EXPORT_SYMBOL_GPL(sched_show_task); -+ -+static inline bool -+state_filter_match(unsigned long state_filter, struct task_struct *p) -+{ -+ /* no filter, everything matches */ -+ if (!state_filter) -+ return true; -+ -+ /* filter, but doesn't match */ -+ if (!(p->state & state_filter)) -+ return false; -+ -+ /* -+ * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows -+ * TASK_KILLABLE). -+ */ -+ if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) -+ return false; -+ -+ return true; -+} -+ -+ -+void show_state_filter(unsigned long state_filter) -+{ -+ struct task_struct *g, *p; -+ -+#if BITS_PER_LONG == 32 -+ printk(KERN_INFO -+ " task PC stack pid father\n"); -+#else -+ printk(KERN_INFO -+ " task PC stack pid father\n"); -+#endif -+ rcu_read_lock(); -+ for_each_process_thread(g, p) { -+ /* -+ * reset the NMI-timeout, listing all files on a slow -+ * console might take a lot of time: -+ * Also, reset softlockup watchdogs on all CPUs, because -+ * another CPU might be blocked waiting for us to process -+ * an IPI. -+ */ -+ touch_nmi_watchdog(); -+ touch_all_softlockup_watchdogs(); -+ if (state_filter_match(state_filter, p)) -+ sched_show_task(p); -+ } -+ -+#ifdef CONFIG_SCHED_DEBUG -+ /* PDS TODO: should support this -+ if (!state_filter) -+ sysrq_sched_debug_show(); -+ */ -+#endif -+ rcu_read_unlock(); -+ /* -+ * Only show locks if all tasks are dumped: -+ */ -+ if (!state_filter) -+ debug_show_all_locks(); -+} -+ -+void dump_cpu_task(int cpu) -+{ -+ pr_info("Task dump for CPU %d:\n", cpu); -+ sched_show_task(cpu_curr(cpu)); -+} -+ -+/** -+ * init_idle - set up an idle thread for a given CPU -+ * @idle: task in question -+ * @cpu: cpu the idle task belongs to -+ * -+ * NOTE: this function does not set the idle thread's NEED_RESCHED -+ * flag, to make booting more robust. -+ */ -+void init_idle(struct task_struct *idle, int cpu) -+{ -+ struct rq *rq = cpu_rq(cpu); -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&idle->pi_lock, flags); -+ raw_spin_lock(&rq->lock); -+ update_rq_clock(rq); -+ -+ idle->last_ran = rq->clock_task; -+ idle->state = TASK_RUNNING; -+ idle->flags |= PF_IDLE; -+ /* Setting prio to illegal value shouldn't matter when never queued */ -+ idle->prio = PRIO_LIMIT; -+ idle->deadline = rq_clock(rq) + task_deadline_diff(idle); -+ update_task_priodl(idle); -+ -+ kasan_unpoison_task_stack(idle); -+ -+#ifdef CONFIG_SMP -+ /* -+ * It's possible that init_idle() gets called multiple times on a task, -+ * in that case do_set_cpus_allowed() will not do the right thing. -+ * -+ * And since this is boot we can forgo the serialisation. -+ */ -+ set_cpus_allowed_common(idle, cpumask_of(cpu)); -+#endif -+ -+ /* Silence PROVE_RCU */ -+ rcu_read_lock(); -+ __set_task_cpu(idle, cpu); -+ rcu_read_unlock(); -+ -+ rq->curr = rq->idle = idle; -+ idle->on_cpu = 1; -+ -+ raw_spin_unlock(&rq->lock); -+ raw_spin_unlock_irqrestore(&idle->pi_lock, flags); -+ -+ /* Set the preempt count _outside_ the spinlocks! */ -+ init_idle_preempt_count(idle, cpu); -+ -+ ftrace_graph_init_idle_task(idle, cpu); -+ vtime_init_idle(idle, cpu); -+#ifdef CONFIG_SMP -+ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); -+#endif -+} -+ -+void resched_cpu(int cpu) -+{ -+ struct rq *rq = cpu_rq(cpu); -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&rq->lock, flags); -+ if (cpu_online(cpu) || cpu == smp_processor_id()) -+ resched_curr(cpu_rq(cpu)); -+ raw_spin_unlock_irqrestore(&rq->lock, flags); -+} -+ -+static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) -+{ -+ struct wake_q_node *node = &task->wake_q; -+ -+ /* -+ * Atomically grab the task, if ->wake_q is !nil already it means -+ * its already queued (either by us or someone else) and will get the -+ * wakeup due to that. -+ * -+ * In order to ensure that a pending wakeup will observe our pending -+ * state, even in the failed case, an explicit smp_mb() must be used. -+ */ -+ smp_mb__before_atomic(); -+ if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) -+ return false; -+ -+ /* -+ * The head is context local, there can be no concurrency. -+ */ -+ *head->lastp = node; -+ head->lastp = &node->next; -+ return true; -+} -+ -+/** -+ * wake_q_add() - queue a wakeup for 'later' waking. -+ * @head: the wake_q_head to add @task to -+ * @task: the task to queue for 'later' wakeup -+ * -+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the -+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come -+ * instantly. -+ * -+ * This function must be used as-if it were wake_up_process(); IOW the task -+ * must be ready to be woken at this location. -+ */ -+void wake_q_add(struct wake_q_head *head, struct task_struct *task) -+{ -+ if (__wake_q_add(head, task)) -+ get_task_struct(task); -+} -+ -+/** -+ * wake_q_add_safe() - safely queue a wakeup for 'later' waking. -+ * @head: the wake_q_head to add @task to -+ * @task: the task to queue for 'later' wakeup -+ * -+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the -+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come -+ * instantly. -+ * -+ * This function must be used as-if it were wake_up_process(); IOW the task -+ * must be ready to be woken at this location. -+ * -+ * This function is essentially a task-safe equivalent to wake_q_add(). Callers -+ * that already hold reference to @task can call the 'safe' version and trust -+ * wake_q to do the right thing depending whether or not the @task is already -+ * queued for wakeup. -+ */ -+void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) -+{ -+ if (!__wake_q_add(head, task)) -+ put_task_struct(task); -+} -+ -+void wake_up_q(struct wake_q_head *head) -+{ -+ struct wake_q_node *node = head->first; -+ -+ while (node != WAKE_Q_TAIL) { -+ struct task_struct *task; -+ -+ task = container_of(node, struct task_struct, wake_q); -+ BUG_ON(!task); -+ /* task can safely be re-inserted now: */ -+ node = node->next; -+ task->wake_q.next = NULL; -+ -+ /* -+ * wake_up_process() executes a full barrier, which pairs with -+ * the queueing in wake_q_add() so as not to miss wakeups. -+ */ -+ wake_up_process(task); -+ put_task_struct(task); -+ } -+} -+ -+#ifdef CONFIG_SMP -+ -+int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur, -+ const struct cpumask __maybe_unused *trial) -+{ -+ return 1; -+} -+ -+int task_can_attach(struct task_struct *p, -+ const struct cpumask *cs_cpus_allowed) -+{ -+ int ret = 0; -+ -+ /* -+ * Kthreads which disallow setaffinity shouldn't be moved -+ * to a new cpuset; we don't want to change their CPU -+ * affinity and isolating such threads by their set of -+ * allowed nodes is unnecessary. Thus, cpusets are not -+ * applicable for such threads. This prevents checking for -+ * success of set_cpus_allowed_ptr() on all attached tasks -+ * before cpus_allowed may be changed. -+ */ -+ if (p->flags & PF_NO_SETAFFINITY) -+ ret = -EINVAL; -+ -+ return ret; -+} -+ -+static bool sched_smp_initialized __read_mostly; -+ -+#ifdef CONFIG_NO_HZ_COMMON -+void nohz_balance_enter_idle(int cpu) -+{ -+} -+ -+void select_nohz_load_balancer(int stop_tick) -+{ -+} -+ -+void set_cpu_sd_state_idle(void) {} -+ -+/* -+ * In the semi idle case, use the nearest busy CPU for migrating timers -+ * from an idle CPU. This is good for power-savings. -+ * -+ * We don't do similar optimization for completely idle system, as -+ * selecting an idle CPU will add more delays to the timers than intended -+ * (as that CPU's timer base may not be uptodate wrt jiffies etc). -+ */ -+int get_nohz_timer_target(void) -+{ -+ int i, cpu = smp_processor_id(); -+ struct cpumask *mask; -+ -+ if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER)) -+ return cpu; -+ -+ for (mask = &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[0]); -+ mask < per_cpu(sched_cpu_affinity_chk_end_masks, cpu); mask++) -+ for_each_cpu(i, mask) -+ if (!idle_cpu(i) && housekeeping_cpu(i, HK_FLAG_TIMER)) -+ return i; -+ -+ if (!housekeeping_cpu(cpu, HK_FLAG_TIMER)) -+ cpu = housekeeping_any_cpu(HK_FLAG_TIMER); -+ -+ return cpu; -+} -+ -+/* -+ * When add_timer_on() enqueues a timer into the timer wheel of an -+ * idle CPU then this timer might expire before the next timer event -+ * which is scheduled to wake up that CPU. In case of a completely -+ * idle system the next event might even be infinite time into the -+ * future. wake_up_idle_cpu() ensures that the CPU is woken up and -+ * leaves the inner idle loop so the newly added timer is taken into -+ * account when the CPU goes back to idle and evaluates the timer -+ * wheel for the next timer event. -+ */ -+void wake_up_idle_cpu(int cpu) -+{ -+ if (cpu == smp_processor_id()) -+ return; -+ -+ set_tsk_need_resched(cpu_rq(cpu)->idle); -+ smp_send_reschedule(cpu); -+} -+ -+void wake_up_nohz_cpu(int cpu) -+{ -+ wake_up_idle_cpu(cpu); -+} -+#endif /* CONFIG_NO_HZ_COMMON */ -+ -+#ifdef CONFIG_HOTPLUG_CPU -+/* -+ * Ensures that the idle task is using init_mm right before its CPU goes -+ * offline. -+ */ -+void idle_task_exit(void) -+{ -+ struct mm_struct *mm = current->active_mm; -+ -+ BUG_ON(cpu_online(smp_processor_id())); -+ -+ if (mm != &init_mm) { -+ switch_mm(mm, &init_mm, current); -+ current->active_mm = &init_mm; -+ finish_arch_post_lock_switch(); -+ } -+ mmdrop(mm); -+} -+ -+/* -+ * Migrate all tasks from the rq, sleeping tasks will be migrated by -+ * try_to_wake_up()->select_task_rq(). -+ * -+ * Called with rq->lock held even though we'er in stop_machine() and -+ * there's no concurrency possible, we hold the required locks anyway -+ * because of lock validation efforts. -+ */ -+static void migrate_tasks(struct rq *dead_rq) -+{ -+ struct rq *rq = dead_rq; -+ struct task_struct *p, *stop = rq->stop; -+ struct skiplist_node *node; -+ int count = 0; -+ -+ /* -+ * Fudge the rq selection such that the below task selection loop -+ * doesn't get stuck on the currently eligible stop task. -+ * -+ * We're currently inside stop_machine() and the rq is either stuck -+ * in the stop_machine_cpu_stop() loop, or we're executing this code, -+ * either way we should never end up calling schedule() until we're -+ * done here. -+ */ -+ rq->stop = NULL; -+ -+ node = &rq->sl_header; -+ while ((node = node->next[0]) != &rq->sl_header) { -+ int dest_cpu; -+ -+ p = skiplist_entry(node, struct task_struct, sl_node); -+ -+ /* skip the running task */ -+ if (task_running(p)) -+ continue; -+ -+ /* -+ * Rules for changing task_struct::cpus_allowed are holding -+ * both pi_lock and rq->lock, such that holding either -+ * stabilizes the mask. -+ * -+ * Drop rq->lock is not quite as disastrous as it usually is -+ * because !cpu_active at this point, which means load-balance -+ * will not interfere. Also, stop-machine. -+ */ -+ raw_spin_unlock(&rq->lock); -+ raw_spin_lock(&p->pi_lock); -+ raw_spin_lock(&rq->lock); -+ -+ /* -+ * Since we're inside stop-machine, _nothing_ should have -+ * changed the task, WARN if weird stuff happened, because in -+ * that case the above rq->lock drop is a fail too. -+ */ -+ if (WARN_ON(task_rq(p) != rq || !task_on_rq_queued(p))) { -+ raw_spin_unlock(&p->pi_lock); -+ continue; -+ } -+ -+ count++; -+ /* Find suitable destination for @next, with force if needed. */ -+ dest_cpu = select_fallback_rq(dead_rq->cpu, p); -+ -+ rq = __migrate_task(rq, p, dest_cpu); -+ raw_spin_unlock(&rq->lock); -+ raw_spin_unlock(&p->pi_lock); -+ -+ rq = dead_rq; -+ raw_spin_lock(&rq->lock); -+ /* Check queued task all over from the header again */ -+ node = &rq->sl_header; -+ } -+ -+ rq->stop = stop; -+} -+ -+static void set_rq_offline(struct rq *rq) -+{ -+ if (rq->online) -+ rq->online = false; -+} -+#endif /* CONFIG_HOTPLUG_CPU */ -+ -+static void set_rq_online(struct rq *rq) -+{ -+ if (!rq->online) -+ rq->online = true; -+} -+ -+#ifdef CONFIG_SCHED_DEBUG -+ -+static __read_mostly int sched_debug_enabled; -+ -+static int __init sched_debug_setup(char *str) -+{ -+ sched_debug_enabled = 1; -+ -+ return 0; -+} -+early_param("sched_debug", sched_debug_setup); -+ -+static inline bool sched_debug(void) -+{ -+ return sched_debug_enabled; -+} -+#else /* !CONFIG_SCHED_DEBUG */ -+static inline bool sched_debug(void) -+{ -+ return false; -+} -+#endif /* CONFIG_SCHED_DEBUG */ -+ -+#ifdef CONFIG_SMP -+void scheduler_ipi(void) -+{ -+ /* -+ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting -+ * TIF_NEED_RESCHED remotely (for the first time) will also send -+ * this IPI. -+ */ -+ preempt_fold_need_resched(); -+ -+ if (!idle_cpu(smp_processor_id()) || need_resched()) -+ return; -+ -+ irq_enter(); -+ irq_exit(); -+} -+ -+void wake_up_if_idle(int cpu) -+{ -+ struct rq *rq = cpu_rq(cpu); -+ unsigned long flags; -+ -+ rcu_read_lock(); -+ -+ if (!is_idle_task(rcu_dereference(rq->curr))) -+ goto out; -+ -+ if (set_nr_if_polling(rq->idle)) { -+ trace_sched_wake_idle_without_ipi(cpu); -+ } else { -+ raw_spin_lock_irqsave(&rq->lock, flags); -+ if (is_idle_task(rq->curr)) -+ smp_send_reschedule(cpu); -+ /* Else CPU is not idle, do nothing here */ -+ raw_spin_unlock_irqrestore(&rq->lock, flags); -+ } -+ -+out: -+ rcu_read_unlock(); -+} -+ -+bool cpus_share_cache(int this_cpu, int that_cpu) -+{ -+ return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); -+} -+#endif /* CONFIG_SMP */ -+ -+/* -+ * Topology list, bottom-up. -+ */ -+static struct sched_domain_topology_level default_topology[] = { -+#ifdef CONFIG_SCHED_SMT -+ { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, -+#endif -+#ifdef CONFIG_SCHED_MC -+ { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, -+#endif -+ { cpu_cpu_mask, SD_INIT_NAME(DIE) }, -+ { NULL, }, -+}; -+ -+static struct sched_domain_topology_level *sched_domain_topology = -+ default_topology; -+ -+#define for_each_sd_topology(tl) \ -+ for (tl = sched_domain_topology; tl->mask; tl++) -+ -+void set_sched_topology(struct sched_domain_topology_level *tl) -+{ -+ if (WARN_ON_ONCE(sched_smp_initialized)) -+ return; -+ -+ sched_domain_topology = tl; -+} -+ -+/* -+ * Initializers for schedule domains -+ * Non-inlined to reduce accumulated stack pressure in build_sched_domains() -+ */ -+ -+int sched_domain_level_max; -+ -+/* -+ * Partition sched domains as specified by the 'ndoms_new' -+ * cpumasks in the array doms_new[] of cpumasks. This compares -+ * doms_new[] to the current sched domain partitioning, doms_cur[]. -+ * It destroys each deleted domain and builds each new domain. -+ * -+ * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. -+ * The masks don't intersect (don't overlap.) We should setup one -+ * sched domain for each mask. CPUs not in any of the cpumasks will -+ * not be load balanced. If the same cpumask appears both in the -+ * current 'doms_cur' domains and in the new 'doms_new', we can leave -+ * it as it is. -+ * -+ * The passed in 'doms_new' should be allocated using -+ * alloc_sched_domains. This routine takes ownership of it and will -+ * free_sched_domains it when done with it. If the caller failed the -+ * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, -+ * and partition_sched_domains() will fallback to the single partition -+ * 'fallback_doms', it also forces the domains to be rebuilt. -+ * -+ * If doms_new == NULL it will be replaced with cpu_online_mask. -+ * ndoms_new == 0 is a special case for destroying existing domains, -+ * and it will not create the default domain. -+ * -+ * Call with hotplug lock held -+ */ -+void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], -+ struct sched_domain_attr *dattr_new) -+{ -+ /** -+ * PDS doesn't depend on sched domains, but just keep this api -+ */ -+} -+ -+/* -+ * used to mark begin/end of suspend/resume: -+ */ -+static int num_cpus_frozen; -+ -+/* -+ * Update cpusets according to cpu_active mask. If cpusets are -+ * disabled, cpuset_update_active_cpus() becomes a simple wrapper -+ * around partition_sched_domains(). -+ * -+ * If we come here as part of a suspend/resume, don't touch cpusets because we -+ * want to restore it back to its original state upon resume anyway. -+ */ -+static void cpuset_cpu_active(void) -+{ -+ if (cpuhp_tasks_frozen) { -+ /* -+ * num_cpus_frozen tracks how many CPUs are involved in suspend -+ * resume sequence. As long as this is not the last online -+ * operation in the resume sequence, just build a single sched -+ * domain, ignoring cpusets. -+ */ -+ partition_sched_domains(1, NULL, NULL); -+ if (--num_cpus_frozen) -+ return; -+ /* -+ * This is the last CPU online operation. So fall through and -+ * restore the original sched domains by considering the -+ * cpuset configurations. -+ */ -+ cpuset_force_rebuild(); -+ } -+ -+ cpuset_update_active_cpus(); -+} -+ -+static int cpuset_cpu_inactive(unsigned int cpu) -+{ -+ if (!cpuhp_tasks_frozen) { -+ cpuset_update_active_cpus(); -+ } else { -+ num_cpus_frozen++; -+ partition_sched_domains(1, NULL, NULL); -+ } -+ return 0; -+} -+ -+int sched_cpu_activate(unsigned int cpu) -+{ -+ struct rq *rq = cpu_rq(cpu); -+ unsigned long flags; -+ -+#ifdef CONFIG_SCHED_SMT -+ /* -+ * When going up, increment the number of cores with SMT present. -+ */ -+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) -+ static_branch_inc_cpuslocked(&sched_smt_present); -+#endif -+ set_cpu_active(cpu, true); -+ -+ if (sched_smp_initialized) -+ cpuset_cpu_active(); -+ -+ /* -+ * Put the rq online, if not already. This happens: -+ * -+ * 1) In the early boot process, because we build the real domains -+ * after all cpus have been brought up. -+ * -+ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the -+ * domains. -+ */ -+ raw_spin_lock_irqsave(&rq->lock, flags); -+ set_rq_online(rq); -+ raw_spin_unlock_irqrestore(&rq->lock, flags); -+ -+ return 0; -+} -+ -+int sched_cpu_deactivate(unsigned int cpu) -+{ -+ int ret; -+ -+ set_cpu_active(cpu, false); -+ /* -+ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU -+ * users of this state to go away such that all new such users will -+ * observe it. -+ * -+ * Do sync before park smpboot threads to take care the rcu boost case. -+ */ -+ synchronize_rcu(); -+ -+#ifdef CONFIG_SCHED_SMT -+ /* -+ * When going down, decrement the number of cores with SMT present. -+ */ -+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) -+ static_branch_dec_cpuslocked(&sched_smt_present); -+#endif -+ -+ if (!sched_smp_initialized) -+ return 0; -+ -+ ret = cpuset_cpu_inactive(cpu); -+ if (ret) { -+ set_cpu_active(cpu, true); -+ return ret; -+ } -+ return 0; -+} -+ -+static void sched_rq_cpu_starting(unsigned int cpu) -+{ -+ struct rq *rq = cpu_rq(cpu); -+ -+ rq->calc_load_update = calc_load_update; -+} -+ -+int sched_cpu_starting(unsigned int cpu) -+{ -+ sched_rq_cpu_starting(cpu); -+ sched_tick_start(cpu); -+ return 0; -+} -+ -+#ifdef CONFIG_HOTPLUG_CPU -+int sched_cpu_dying(unsigned int cpu) -+{ -+ struct rq *rq = cpu_rq(cpu); -+ unsigned long flags; -+ -+ sched_tick_stop(cpu); -+ raw_spin_lock_irqsave(&rq->lock, flags); -+ set_rq_offline(rq); -+ migrate_tasks(rq); -+ raw_spin_unlock_irqrestore(&rq->lock, flags); -+ -+ hrtick_clear(rq); -+ return 0; -+} -+#endif -+ -+#ifdef CONFIG_SMP -+static void sched_init_topology_cpumask_early(void) -+{ -+ int cpu, level; -+ cpumask_t *tmp; -+ -+ for_each_possible_cpu(cpu) { -+ for (level = 0; level < NR_CPU_AFFINITY_CHK_LEVEL; level++) { -+ tmp = &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[level]); -+ cpumask_copy(tmp, cpu_possible_mask); -+ cpumask_clear_cpu(cpu, tmp); -+ } -+ per_cpu(sched_cpu_llc_start_mask, cpu) = -+ &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[0]); -+ per_cpu(sched_cpu_affinity_chk_end_masks, cpu) = -+ &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[1]); -+ } -+} -+ -+static void sched_init_topology_cpumask(void) -+{ -+ int cpu; -+ cpumask_t *chk; -+ -+ for_each_online_cpu(cpu) { -+ chk = &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[0]); -+ -+#ifdef CONFIG_SCHED_SMT -+ cpumask_setall(chk); -+ cpumask_clear_cpu(cpu, chk); -+ if (cpumask_and(chk, chk, topology_sibling_cpumask(cpu))) { -+ per_cpu(sched_sibling_cpu, cpu) = cpumask_first(chk); -+ printk(KERN_INFO "pds: cpu #%d affinity check mask - smt 0x%08lx", -+ cpu, (chk++)->bits[0]); -+ } -+#endif -+#ifdef CONFIG_SCHED_MC -+ cpumask_setall(chk); -+ cpumask_clear_cpu(cpu, chk); -+ if (cpumask_and(chk, chk, cpu_coregroup_mask(cpu))) { -+ per_cpu(sched_cpu_llc_start_mask, cpu) = chk; -+ printk(KERN_INFO "pds: cpu #%d affinity check mask - coregroup 0x%08lx", -+ cpu, (chk++)->bits[0]); -+ } -+ cpumask_complement(chk, cpu_coregroup_mask(cpu)); -+ -+ /** -+ * Set up sd_llc_id per CPU -+ */ -+ per_cpu(sd_llc_id, cpu) = -+ cpumask_first(cpu_coregroup_mask(cpu)); -+#else -+ per_cpu(sd_llc_id, cpu) = -+ cpumask_first(topology_core_cpumask(cpu)); -+ -+ per_cpu(sched_cpu_llc_start_mask, cpu) = chk; -+ -+ cpumask_setall(chk); -+ cpumask_clear_cpu(cpu, chk); -+#endif /* NOT CONFIG_SCHED_MC */ -+ if (cpumask_and(chk, chk, topology_core_cpumask(cpu))) -+ printk(KERN_INFO "pds: cpu #%d affinity check mask - core 0x%08lx", -+ cpu, (chk++)->bits[0]); -+ cpumask_complement(chk, topology_core_cpumask(cpu)); -+ -+ if (cpumask_and(chk, chk, cpu_online_mask)) -+ printk(KERN_INFO "pds: cpu #%d affinity check mask - others 0x%08lx", -+ cpu, (chk++)->bits[0]); -+ -+ per_cpu(sched_cpu_affinity_chk_end_masks, cpu) = chk; -+ } -+} -+#endif -+ -+void __init sched_init_smp(void) -+{ -+ /* Move init over to a non-isolated CPU */ -+ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) -+ BUG(); -+ -+ cpumask_copy(&sched_rq_queued_masks[SCHED_RQ_EMPTY], cpu_online_mask); -+ -+ sched_init_topology_cpumask(); -+ -+ sched_smp_initialized = true; -+} -+#else -+void __init sched_init_smp(void) -+{ -+} -+#endif /* CONFIG_SMP */ -+ -+int in_sched_functions(unsigned long addr) -+{ -+ return in_lock_functions(addr) || -+ (addr >= (unsigned long)__sched_text_start -+ && addr < (unsigned long)__sched_text_end); -+} -+ -+#ifdef CONFIG_CGROUP_SCHED -+/* task group related information */ -+struct task_group { -+ struct cgroup_subsys_state css; -+ -+ struct rcu_head rcu; -+ struct list_head list; -+ -+ struct task_group *parent; -+ struct list_head siblings; -+ struct list_head children; -+}; -+ -+/* -+ * Default task group. -+ * Every task in system belongs to this group at bootup. -+ */ -+struct task_group root_task_group; -+LIST_HEAD(task_groups); -+ -+/* Cacheline aligned slab cache for task_group */ -+static struct kmem_cache *task_group_cache __read_mostly; -+#endif /* CONFIG_CGROUP_SCHED */ -+ -+void __init sched_init(void) -+{ -+ int i; -+ struct rq *rq; -+ -+ print_scheduler_version(); -+ -+ wait_bit_init(); -+ -+#ifdef CONFIG_SMP -+ for (i = 0; i < NR_SCHED_RQ_QUEUED_LEVEL; i++) -+ cpumask_clear(&sched_rq_queued_masks[i]); -+ cpumask_setall(&sched_rq_queued_masks[SCHED_RQ_EMPTY]); -+ set_bit(SCHED_RQ_EMPTY, sched_rq_queued_masks_bitmap); -+ -+ cpumask_setall(&sched_rq_pending_masks[SCHED_RQ_EMPTY]); -+ set_bit(SCHED_RQ_EMPTY, sched_rq_pending_masks_bitmap); -+#else -+ uprq = &per_cpu(runqueues, 0); -+#endif -+ -+#ifdef CONFIG_CGROUP_SCHED -+ task_group_cache = KMEM_CACHE(task_group, 0); -+ -+ list_add(&root_task_group.list, &task_groups); -+ INIT_LIST_HEAD(&root_task_group.children); -+ INIT_LIST_HEAD(&root_task_group.siblings); -+#endif /* CONFIG_CGROUP_SCHED */ -+ for_each_possible_cpu(i) { -+ rq = cpu_rq(i); -+ FULL_INIT_SKIPLIST_NODE(&rq->sl_header); -+ raw_spin_lock_init(&rq->lock); -+ rq->dither = 0; -+ rq->nr_running = rq->nr_uninterruptible = 0; -+ rq->calc_load_active = 0; -+ rq->calc_load_update = jiffies + LOAD_FREQ; -+#ifdef CONFIG_SMP -+ rq->online = false; -+ rq->cpu = i; -+ -+ rq->queued_level = SCHED_RQ_EMPTY; -+ rq->pending_level = SCHED_RQ_EMPTY; -+#ifdef CONFIG_SCHED_SMT -+ per_cpu(sched_sibling_cpu, i) = i; -+ rq->active_balance = 0; -+#endif -+#endif -+ rq->nr_switches = 0; -+ atomic_set(&rq->nr_iowait, 0); -+ hrtick_rq_init(rq); -+ } -+#ifdef CONFIG_SMP -+ /* Set rq->online for cpu 0 */ -+ cpu_rq(0)->online = true; -+#endif -+ -+ /* -+ * The boot idle thread does lazy MMU switching as well: -+ */ -+ mmgrab(&init_mm); -+ enter_lazy_tlb(&init_mm, current); -+ -+ /* -+ * Make us the idle thread. Technically, schedule() should not be -+ * called from this thread, however somewhere below it might be, -+ * but because we are the idle thread, we just pick up running again -+ * when this runqueue becomes "idle". -+ */ -+ init_idle(current, smp_processor_id()); -+ -+ calc_load_update = jiffies + LOAD_FREQ; -+ -+#ifdef CONFIG_SMP -+ idle_thread_set_boot_cpu(); -+ -+ sched_init_topology_cpumask_early(); -+#endif /* SMP */ -+ -+ init_schedstats(); -+ -+ psi_init(); -+} -+ -+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP -+static inline int preempt_count_equals(int preempt_offset) -+{ -+ int nested = preempt_count() + rcu_preempt_depth(); -+ -+ return (nested == preempt_offset); -+} -+ -+void __might_sleep(const char *file, int line, int preempt_offset) -+{ -+ /* -+ * Blocking primitives will set (and therefore destroy) current->state, -+ * since we will exit with TASK_RUNNING make sure we enter with it, -+ * otherwise we will destroy state. -+ */ -+ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, -+ "do not call blocking ops when !TASK_RUNNING; " -+ "state=%lx set at [<%p>] %pS\n", -+ current->state, -+ (void *)current->task_state_change, -+ (void *)current->task_state_change); -+ -+ ___might_sleep(file, line, preempt_offset); -+} -+EXPORT_SYMBOL(__might_sleep); -+ -+void ___might_sleep(const char *file, int line, int preempt_offset) -+{ -+ /* Ratelimiting timestamp: */ -+ static unsigned long prev_jiffy; -+ -+ unsigned long preempt_disable_ip; -+ -+ /* WARN_ON_ONCE() by default, no rate limit required: */ -+ rcu_sleep_check(); -+ -+ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && -+ !is_idle_task(current)) || -+ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || -+ oops_in_progress) -+ return; -+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) -+ return; -+ prev_jiffy = jiffies; -+ -+ /* Save this before calling printk(), since that will clobber it: */ -+ preempt_disable_ip = get_preempt_disable_ip(current); -+ -+ printk(KERN_ERR -+ "BUG: sleeping function called from invalid context at %s:%d\n", -+ file, line); -+ printk(KERN_ERR -+ "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", -+ in_atomic(), irqs_disabled(), -+ current->pid, current->comm); -+ -+ if (task_stack_end_corrupted(current)) -+ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); -+ -+ debug_show_held_locks(current); -+ if (irqs_disabled()) -+ print_irqtrace_events(current); -+#ifdef CONFIG_DEBUG_PREEMPT -+ if (!preempt_count_equals(preempt_offset)) { -+ pr_err("Preemption disabled at:"); -+ print_ip_sym(preempt_disable_ip); -+ pr_cont("\n"); -+ } -+#endif -+ dump_stack(); -+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); -+} -+EXPORT_SYMBOL(___might_sleep); -+ -+void __cant_sleep(const char *file, int line, int preempt_offset) -+{ -+ static unsigned long prev_jiffy; -+ -+ if (irqs_disabled()) -+ return; -+ -+ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) -+ return; -+ -+ if (preempt_count() > preempt_offset) -+ return; -+ -+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) -+ return; -+ prev_jiffy = jiffies; -+ -+ printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); -+ printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", -+ in_atomic(), irqs_disabled(), -+ current->pid, current->comm); -+ -+ debug_show_held_locks(current); -+ dump_stack(); -+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); -+} -+EXPORT_SYMBOL_GPL(__cant_sleep); -+#endif -+ -+#ifdef CONFIG_MAGIC_SYSRQ -+void normalize_rt_tasks(void) -+{ -+ struct task_struct *g, *p; -+ struct sched_attr attr = { -+ .sched_policy = SCHED_NORMAL, -+ }; -+ -+ read_lock(&tasklist_lock); -+ for_each_process_thread(g, p) { -+ /* -+ * Only normalize user tasks: -+ */ -+ if (p->flags & PF_KTHREAD) -+ continue; -+ -+ if (!rt_task(p)) { -+ /* -+ * Renice negative nice level userspace -+ * tasks back to 0: -+ */ -+ if (task_nice(p) < 0) -+ set_user_nice(p, 0); -+ continue; -+ } -+ -+ __sched_setscheduler(p, &attr, false, false); -+ } -+ read_unlock(&tasklist_lock); -+} -+#endif /* CONFIG_MAGIC_SYSRQ */ -+ -+#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) -+/* -+ * These functions are only useful for the IA64 MCA handling, or kdb. -+ * -+ * They can only be called when the whole system has been -+ * stopped - every CPU needs to be quiescent, and no scheduling -+ * activity can take place. Using them for anything else would -+ * be a serious bug, and as a result, they aren't even visible -+ * under any other configuration. -+ */ -+ -+/** -+ * curr_task - return the current task for a given CPU. -+ * @cpu: the processor in question. -+ * -+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! -+ * -+ * Return: The current task for @cpu. -+ */ -+struct task_struct *curr_task(int cpu) -+{ -+ return cpu_curr(cpu); -+} -+ -+#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ -+ -+#ifdef CONFIG_IA64 -+/** -+ * set_curr_task - set the current task for a given CPU. -+ * @cpu: the processor in question. -+ * @p: the task pointer to set. -+ * -+ * Description: This function must only be used when non-maskable interrupts -+ * are serviced on a separate stack. It allows the architecture to switch the -+ * notion of the current task on a CPU in a non-blocking manner. This function -+ * must be called with all CPU's synchronised, and interrupts disabled, the -+ * and caller must save the original value of the current task (see -+ * curr_task() above) and restore that value before reenabling interrupts and -+ * re-starting the system. -+ * -+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! -+ */ -+void ia64_set_curr_task(int cpu, struct task_struct *p) -+{ -+ cpu_curr(cpu) = p; -+} -+ -+#endif -+ -+#ifdef CONFIG_SCHED_DEBUG -+void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, -+ struct seq_file *m) -+{} -+ -+void proc_sched_set_task(struct task_struct *p) -+{} -+#endif -+ -+#ifdef CONFIG_CGROUP_SCHED -+static void sched_free_group(struct task_group *tg) -+{ -+ kmem_cache_free(task_group_cache, tg); -+} -+ -+/* allocate runqueue etc for a new task group */ -+struct task_group *sched_create_group(struct task_group *parent) -+{ -+ struct task_group *tg; -+ -+ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); -+ if (!tg) -+ return ERR_PTR(-ENOMEM); -+ -+ return tg; -+} -+ -+void sched_online_group(struct task_group *tg, struct task_group *parent) -+{ -+} -+ -+/* rcu callback to free various structures associated with a task group */ -+static void sched_free_group_rcu(struct rcu_head *rhp) -+{ -+ /* Now it should be safe to free those cfs_rqs */ -+ sched_free_group(container_of(rhp, struct task_group, rcu)); -+} -+ -+void sched_destroy_group(struct task_group *tg) -+{ -+ /* Wait for possible concurrent references to cfs_rqs complete */ -+ call_rcu(&tg->rcu, sched_free_group_rcu); -+} -+ -+void sched_offline_group(struct task_group *tg) -+{ -+} -+ -+static inline struct task_group *css_tg(struct cgroup_subsys_state *css) -+{ -+ return css ? container_of(css, struct task_group, css) : NULL; -+} -+ -+static struct cgroup_subsys_state * -+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) -+{ -+ struct task_group *parent = css_tg(parent_css); -+ struct task_group *tg; -+ -+ if (!parent) { -+ /* This is early initialization for the top cgroup */ -+ return &root_task_group.css; -+ } -+ -+ tg = sched_create_group(parent); -+ if (IS_ERR(tg)) -+ return ERR_PTR(-ENOMEM); -+ return &tg->css; -+} -+ -+/* Expose task group only after completing cgroup initialization */ -+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) -+{ -+ struct task_group *tg = css_tg(css); -+ struct task_group *parent = css_tg(css->parent); -+ -+ if (parent) -+ sched_online_group(tg, parent); -+ return 0; -+} -+ -+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) -+{ -+ struct task_group *tg = css_tg(css); -+ -+ sched_offline_group(tg); -+} -+ -+static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) -+{ -+ struct task_group *tg = css_tg(css); -+ -+ /* -+ * Relies on the RCU grace period between css_released() and this. -+ */ -+ sched_free_group(tg); -+} -+ -+static void cpu_cgroup_fork(struct task_struct *task) -+{ -+} -+ -+static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) -+{ -+ return 0; -+} -+ -+static void cpu_cgroup_attach(struct cgroup_taskset *tset) -+{ -+} -+ -+static struct cftype cpu_legacy_files[] = { -+ { } /* Terminate */ -+}; -+ -+static struct cftype cpu_files[] = { -+ { } /* terminate */ -+}; -+ -+static int cpu_extra_stat_show(struct seq_file *sf, -+ struct cgroup_subsys_state *css) -+{ -+ return 0; -+} -+ -+struct cgroup_subsys cpu_cgrp_subsys = { -+ .css_alloc = cpu_cgroup_css_alloc, -+ .css_online = cpu_cgroup_css_online, -+ .css_released = cpu_cgroup_css_released, -+ .css_free = cpu_cgroup_css_free, -+ .css_extra_stat_show = cpu_extra_stat_show, -+ .fork = cpu_cgroup_fork, -+ .can_attach = cpu_cgroup_can_attach, -+ .attach = cpu_cgroup_attach, -+ .legacy_cftypes = cpu_files, -+ .legacy_cftypes = cpu_legacy_files, -+ .dfl_cftypes = cpu_files, -+ .early_init = true, -+ .threaded = true, -+}; -+#endif /* CONFIG_CGROUP_SCHED */ -+ -+#undef CREATE_TRACE_POINTS -diff --git a/kernel/sched/pds_sched.h b/kernel/sched/pds_sched.h -new file mode 100644 -index 000000000000..87cebcba69f9 ---- /dev/null -+++ b/kernel/sched/pds_sched.h -@@ -0,0 +1,431 @@ -+#ifndef PDS_SCHED_H -+#define PDS_SCHED_H -+ -+#include <linux/sched.h> -+ -+#include <linux/sched/clock.h> -+#include <linux/sched/cpufreq.h> -+#include <linux/sched/cputime.h> -+#include <linux/sched/debug.h> -+#include <linux/sched/init.h> -+#include <linux/sched/isolation.h> -+#include <linux/sched/loadavg.h> -+#include <linux/sched/mm.h> -+#include <linux/sched/nohz.h> -+#include <linux/sched/signal.h> -+#include <linux/sched/stat.h> -+#include <linux/sched/sysctl.h> -+#include <linux/sched/task.h> -+#include <linux/sched/topology.h> -+#include <linux/sched/wake_q.h> -+ -+#include <uapi/linux/sched/types.h> -+ -+#include <linux/cpufreq.h> -+#include <linux/cpuidle.h> -+#include <linux/cpuset.h> -+#include <linux/ctype.h> -+#include <linux/kthread.h> -+#include <linux/livepatch.h> -+#include <linux/membarrier.h> -+#include <linux/proc_fs.h> -+#include <linux/psi.h> -+#include <linux/slab.h> -+#include <linux/stop_machine.h> -+#include <linux/suspend.h> -+#include <linux/swait.h> -+#include <linux/syscalls.h> -+#include <linux/tsacct_kern.h> -+ -+#include <asm/tlb.h> -+ -+#ifdef CONFIG_PARAVIRT -+# include <asm/paravirt.h> -+#endif -+ -+#include "cpupri.h" -+ -+/* task_struct::on_rq states: */ -+#define TASK_ON_RQ_QUEUED 1 -+#define TASK_ON_RQ_MIGRATING 2 -+ -+static inline int task_on_rq_queued(struct task_struct *p) -+{ -+ return p->on_rq == TASK_ON_RQ_QUEUED; -+} -+ -+static inline int task_on_rq_migrating(struct task_struct *p) -+{ -+ return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; -+} -+ -+/* -+ * This is the main, per-CPU runqueue data structure. -+ * This data should only be modified by the local cpu. -+ */ -+struct rq { -+ /* runqueue lock: */ -+ raw_spinlock_t lock; -+ -+ struct task_struct *curr, *idle, *stop; -+ struct mm_struct *prev_mm; -+ -+ struct skiplist_node sl_header; -+ -+ /* switch count */ -+ u64 nr_switches; -+ -+ atomic_t nr_iowait; -+ -+#ifdef CONFIG_SMP -+ int cpu; /* cpu of this runqueue */ -+ bool online; -+ -+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ -+ struct sched_avg avg_irq; -+#endif -+ -+ unsigned long queued_level; -+ unsigned long pending_level; -+ -+#ifdef CONFIG_SCHED_SMT -+ int active_balance; -+ struct cpu_stop_work active_balance_work; -+#endif -+#endif /* CONFIG_SMP */ -+#ifdef CONFIG_IRQ_TIME_ACCOUNTING -+ u64 prev_irq_time; -+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ -+#ifdef CONFIG_PARAVIRT -+ u64 prev_steal_time; -+#endif /* CONFIG_PARAVIRT */ -+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING -+ u64 prev_steal_time_rq; -+#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */ -+ -+ /* calc_load related fields */ -+ unsigned long calc_load_update; -+ long calc_load_active; -+ -+ u64 clock, last_tick; -+ u64 clock_task; -+ int dither; -+ -+ unsigned long nr_running; -+ unsigned long nr_uninterruptible; -+ -+#ifdef CONFIG_SCHED_HRTICK -+#ifdef CONFIG_SMP -+ int hrtick_csd_pending; -+ call_single_data_t hrtick_csd; -+#endif -+ struct hrtimer hrtick_timer; -+#endif -+ -+#ifdef CONFIG_SCHEDSTATS -+ -+ /* latency stats */ -+ struct sched_info rq_sched_info; -+ unsigned long long rq_cpu_time; -+ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ -+ -+ /* sys_sched_yield() stats */ -+ unsigned int yld_count; -+ -+ /* schedule() stats */ -+ unsigned int sched_switch; -+ unsigned int sched_count; -+ unsigned int sched_goidle; -+ -+ /* try_to_wake_up() stats */ -+ unsigned int ttwu_count; -+ unsigned int ttwu_local; -+#endif /* CONFIG_SCHEDSTATS */ -+#ifdef CONFIG_CPU_IDLE -+ /* Must be inspected within a rcu lock section */ -+ struct cpuidle_state *idle_state; -+#endif -+}; -+ -+extern unsigned long calc_load_update; -+extern atomic_long_t calc_load_tasks; -+ -+extern void calc_global_load_tick(struct rq *this_rq); -+extern long calc_load_fold_active(struct rq *this_rq, long adjust); -+ -+#ifndef CONFIG_SMP -+extern struct rq *uprq; -+#define cpu_rq(cpu) (uprq) -+#define this_rq() (uprq) -+#define raw_rq() (uprq) -+#define task_rq(p) (uprq) -+#define cpu_curr(cpu) ((uprq)->curr) -+#else /* CONFIG_SMP */ -+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); -+#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) -+#define this_rq() this_cpu_ptr(&runqueues) -+#define raw_rq() raw_cpu_ptr(&runqueues) -+#define task_rq(p) cpu_rq(task_cpu(p)) -+#define cpu_curr(cpu) (cpu_rq(cpu)->curr) -+ -+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) -+void register_sched_domain_sysctl(void); -+void unregister_sched_domain_sysctl(void); -+#else -+static inline void register_sched_domain_sysctl(void) -+{ -+} -+static inline void unregister_sched_domain_sysctl(void) -+{ -+} -+#endif -+ -+#endif /* CONFIG_SMP */ -+ -+#ifndef arch_scale_freq_capacity -+static __always_inline -+unsigned long arch_scale_freq_capacity(int cpu) -+{ -+ return SCHED_CAPACITY_SCALE; -+} -+#endif -+ -+static inline u64 __rq_clock_broken(struct rq *rq) -+{ -+ return READ_ONCE(rq->clock); -+} -+ -+static inline u64 rq_clock(struct rq *rq) -+{ -+ /* -+ * Relax lockdep_assert_held() checking as in VRQ, call to -+ * sched_info_xxxx() may not held rq->lock -+ * lockdep_assert_held(&rq->lock); -+ */ -+ return rq->clock; -+} -+ -+static inline u64 rq_clock_task(struct rq *rq) -+{ -+ /* -+ * Relax lockdep_assert_held() checking as in VRQ, call to -+ * sched_info_xxxx() may not held rq->lock -+ * lockdep_assert_held(&rq->lock); -+ */ -+ return rq->clock_task; -+} -+ -+/* -+ * {de,en}queue flags: -+ * -+ * DEQUEUE_SLEEP - task is no longer runnable -+ * ENQUEUE_WAKEUP - task just became runnable -+ * -+ */ -+ -+#define DEQUEUE_SLEEP 0x01 -+ -+#define ENQUEUE_WAKEUP 0x01 -+ -+ -+/* -+ * Below are scheduler API which using in other kernel code -+ * It use the dummy rq_flags -+ * ToDo : PDS need to support these APIs for compatibility with mainline -+ * scheduler code. -+ */ -+struct rq_flags { -+ unsigned long flags; -+}; -+ -+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) -+ __acquires(rq->lock); -+ -+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) -+ __acquires(p->pi_lock) -+ __acquires(rq->lock); -+ -+static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) -+ __releases(rq->lock) -+{ -+ raw_spin_unlock(&rq->lock); -+} -+ -+static inline void -+task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) -+ __releases(rq->lock) -+ __releases(p->pi_lock) -+{ -+ raw_spin_unlock(&rq->lock); -+ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); -+} -+ -+static inline void -+rq_unlock_irq(struct rq *rq, struct rq_flags *rf) -+ __releases(rq->lock) -+{ -+ raw_spin_unlock_irq(&rq->lock); -+} -+ -+static inline struct rq * -+this_rq_lock_irq(struct rq_flags *rf) -+ __acquires(rq->lock) -+{ -+ struct rq *rq; -+ -+ local_irq_disable(); -+ rq = this_rq(); -+ raw_spin_lock(&rq->lock); -+ -+ return rq; -+} -+ -+static inline bool task_running(struct task_struct *p) -+{ -+ return p->on_cpu; -+} -+ -+extern struct static_key_false sched_schedstats; -+ -+static inline void sched_ttwu_pending(void) { } -+ -+#ifdef CONFIG_CPU_IDLE -+static inline void idle_set_state(struct rq *rq, -+ struct cpuidle_state *idle_state) -+{ -+ rq->idle_state = idle_state; -+} -+ -+static inline struct cpuidle_state *idle_get_state(struct rq *rq) -+{ -+ WARN_ON(!rcu_read_lock_held()); -+ return rq->idle_state; -+} -+#else -+static inline void idle_set_state(struct rq *rq, -+ struct cpuidle_state *idle_state) -+{ -+} -+ -+static inline struct cpuidle_state *idle_get_state(struct rq *rq) -+{ -+ return NULL; -+} -+#endif -+ -+static inline int cpu_of(const struct rq *rq) -+{ -+#ifdef CONFIG_SMP -+ return rq->cpu; -+#else -+ return 0; -+#endif -+} -+ -+#include "stats.h" -+ -+#ifdef CONFIG_IRQ_TIME_ACCOUNTING -+struct irqtime { -+ u64 total; -+ u64 tick_delta; -+ u64 irq_start_time; -+ struct u64_stats_sync sync; -+}; -+ -+DECLARE_PER_CPU(struct irqtime, cpu_irqtime); -+ -+/* -+ * Returns the irqtime minus the softirq time computed by ksoftirqd. -+ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime -+ * and never move forward. -+ */ -+static inline u64 irq_time_read(int cpu) -+{ -+ struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); -+ unsigned int seq; -+ u64 total; -+ -+ do { -+ seq = __u64_stats_fetch_begin(&irqtime->sync); -+ total = irqtime->total; -+ } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); -+ -+ return total; -+} -+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ -+ -+#ifdef CONFIG_CPU_FREQ -+DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); -+ -+/** -+ * cpufreq_update_util - Take a note about CPU utilization changes. -+ * @rq: Runqueue to carry out the update for. -+ * @flags: Update reason flags. -+ * -+ * This function is called by the scheduler on the CPU whose utilization is -+ * being updated. -+ * -+ * It can only be called from RCU-sched read-side critical sections. -+ * -+ * The way cpufreq is currently arranged requires it to evaluate the CPU -+ * performance state (frequency/voltage) on a regular basis to prevent it from -+ * being stuck in a completely inadequate performance level for too long. -+ * That is not guaranteed to happen if the updates are only triggered from CFS -+ * and DL, though, because they may not be coming in if only RT tasks are -+ * active all the time (or there are RT tasks only). -+ * -+ * As a workaround for that issue, this function is called periodically by the -+ * RT sched class to trigger extra cpufreq updates to prevent it from stalling, -+ * but that really is a band-aid. Going forward it should be replaced with -+ * solutions targeted more specifically at RT tasks. -+ */ -+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) -+{ -+ struct update_util_data *data; -+ -+ data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)); -+ if (data) -+ data->func(data, rq_clock(rq), flags); -+} -+ -+static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) -+{ -+ if (cpu_of(rq) == smp_processor_id()) -+ cpufreq_update_util(rq, flags); -+} -+#else -+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} -+static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {} -+#endif /* CONFIG_CPU_FREQ */ -+ -+#ifdef CONFIG_NO_HZ_FULL -+extern int __init sched_tick_offload_init(void); -+#else -+static inline int sched_tick_offload_init(void) { return 0; } -+#endif -+ -+#ifdef arch_scale_freq_capacity -+#ifndef arch_scale_freq_invariant -+#define arch_scale_freq_invariant() (true) -+#endif -+#else /* arch_scale_freq_capacity */ -+#define arch_scale_freq_invariant() (false) -+#endif -+ -+extern void schedule_idle(void); -+ -+/* -+ * !! For sched_setattr_nocheck() (kernel) only !! -+ * -+ * This is actually gross. :( -+ * -+ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE -+ * tasks, but still be able to sleep. We need this on platforms that cannot -+ * atomically change clock frequency. Remove once fast switching will be -+ * available on such platforms. -+ * -+ * SUGOV stands for SchedUtil GOVernor. -+ */ -+#define SCHED_FLAG_SUGOV 0x10000000 -+ -+#endif /* PDS_SCHED_H */ -diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c -index befce29bd882..48ef3e62e7d4 100644 ---- a/kernel/sched/pelt.c -+++ b/kernel/sched/pelt.c -@@ -234,6 +234,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runna - WRITE_ONCE(sa->util_avg, sa->util_sum / divider); - } - -+#ifndef CONFIG_SCHED_PDS - /* - * sched_entity: - * -@@ -345,6 +346,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) - - return 0; - } -+#endif - - #ifdef CONFIG_HAVE_SCHED_AVG_IRQ - /* -diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h -index 7489d5f56960..6dc3c79da1ec 100644 ---- a/kernel/sched/pelt.h -+++ b/kernel/sched/pelt.h -@@ -1,11 +1,13 @@ - #ifdef CONFIG_SMP - #include "sched-pelt.h" - -+#ifndef CONFIG_SCHED_PDS - int __update_load_avg_blocked_se(u64 now, struct sched_entity *se); - int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se); - int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq); - int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); - int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); -+#endif - - #ifdef CONFIG_HAVE_SCHED_AVG_IRQ - int update_irq_load_avg(struct rq *rq, u64 running); -@@ -17,6 +19,7 @@ update_irq_load_avg(struct rq *rq, u64 running) - } - #endif - -+#ifndef CONFIG_SCHED_PDS - /* - * When a task is dequeued, its estimated utilization should not be update if - * its util_avg has not been updated at least once. -@@ -137,9 +140,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) - return rq_clock_pelt(rq_of(cfs_rq)); - } - #endif -+#endif /* CONFIG_SCHED_PDS */ - - #else - -+#ifndef CONFIG_SCHED_PDS - static inline int - update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) - { -@@ -157,6 +162,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running) - { - return 0; - } -+#endif - - static inline int - update_irq_load_avg(struct rq *rq, u64 running) -diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index efa686eeff26..758881a25f15 100644 ---- a/kernel/sched/sched.h -+++ b/kernel/sched/sched.h -@@ -2,6 +2,10 @@ - /* - * Scheduler internal types and methods: - */ -+#ifdef CONFIG_SCHED_PDS -+#include "pds_sched.h" -+#else -+ - #include <linux/sched.h> - - #include <linux/sched/autogroup.h> -@@ -2341,3 +2345,4 @@ static inline bool sched_energy_enabled(void) - static inline bool sched_energy_enabled(void) { return false; } - - #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ -+#endif /* !CONFIG_SCHED_PDS */ -diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c -index 750fb3c67eed..45bd43942575 100644 ---- a/kernel/sched/stats.c -+++ b/kernel/sched/stats.c -@@ -22,8 +22,10 @@ static int show_schedstat(struct seq_file *seq, void *v) - } else { - struct rq *rq; - #ifdef CONFIG_SMP -+#ifndef CONFIG_SCHED_PDS - struct sched_domain *sd; - int dcount = 0; -+#endif - #endif - cpu = (unsigned long)(v - 2); - rq = cpu_rq(cpu); -@@ -40,6 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v) - seq_printf(seq, "\n"); - - #ifdef CONFIG_SMP -+#ifndef CONFIG_SCHED_PDS - /* domain-specific stats */ - rcu_read_lock(); - for_each_domain(cpu, sd) { -@@ -68,6 +71,7 @@ static int show_schedstat(struct seq_file *seq, void *v) - sd->ttwu_move_balance); - } - rcu_read_unlock(); -+#endif - #endif - } - return 0; -diff --git a/kernel/sysctl.c b/kernel/sysctl.c -index c9ec050bcf46..9e642c1d75e6 100644 ---- a/kernel/sysctl.c -+++ b/kernel/sysctl.c -@@ -131,8 +131,12 @@ static int __maybe_unused four = 4; - static unsigned long zero_ul; - static unsigned long one_ul = 1; - static unsigned long long_max = LONG_MAX; --static int one_hundred = 100; --static int one_thousand = 1000; -+static int __read_mostly one_hundred = 100; -+static int __read_mostly one_thousand = 1000; -+#ifdef CONFIG_SCHED_PDS -+extern int rr_interval; -+extern int sched_yield_type; -+#endif - #ifdef CONFIG_PRINTK - static int ten_thousand = 10000; - #endif -@@ -305,7 +309,7 @@ static struct ctl_table sysctl_base_table[] = { - { } - }; - --#ifdef CONFIG_SCHED_DEBUG -+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_PDS) - static int min_sched_granularity_ns = 100000; /* 100 usecs */ - static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ - static int min_wakeup_granularity_ns; /* 0 usecs */ -@@ -322,6 +326,7 @@ static int max_extfrag_threshold = 1000; - #endif - - static struct ctl_table kern_table[] = { -+#ifndef CONFIG_SCHED_PDS - { - .procname = "sched_child_runs_first", - .data = &sysctl_sched_child_runs_first, -@@ -487,6 +492,7 @@ static struct ctl_table kern_table[] = { - .extra2 = &one, - }, - #endif -+#endif /* !CONFIG_SCHED_PDS */ - #ifdef CONFIG_PROVE_LOCKING - { - .procname = "prove_locking", -@@ -1059,6 +1065,26 @@ static struct ctl_table kern_table[] = { - .proc_handler = proc_dointvec, - }, - #endif -+#ifdef CONFIG_SCHED_PDS -+ { -+ .procname = "rr_interval", -+ .data = &rr_interval, -+ .maxlen = sizeof (int), -+ .mode = 0644, -+ .proc_handler = &proc_dointvec_minmax, -+ .extra1 = &one, -+ .extra2 = &one_thousand, -+ }, -+ { -+ .procname = "yield_type", -+ .data = &sched_yield_type, -+ .maxlen = sizeof (int), -+ .mode = 0644, -+ .proc_handler = &proc_dointvec_minmax, -+ .extra1 = &zero, -+ .extra2 = &two, -+ }, -+#endif - #if defined(CONFIG_S390) && defined(CONFIG_SMP) - { - .procname = "spin_retry", -diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c -index 0a426f4e3125..2692b89a70d5 100644 ---- a/kernel/time/posix-cpu-timers.c -+++ b/kernel/time/posix-cpu-timers.c -@@ -791,6 +791,7 @@ check_timers_list(struct list_head *timers, - return 0; - } - -+#ifndef CONFIG_SCHED_PDS - static inline void check_dl_overrun(struct task_struct *tsk) - { - if (tsk->dl.dl_overrun) { -@@ -798,6 +799,7 @@ static inline void check_dl_overrun(struct task_struct *tsk) - __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); - } - } -+#endif - - /* - * Check for any per-thread CPU timers that have fired and move them off -@@ -812,8 +814,10 @@ static void check_thread_timers(struct task_struct *tsk, - u64 expires; - unsigned long soft; - -+#ifndef CONFIG_SCHED_PDS - if (dl_task(tsk)) - check_dl_overrun(tsk); -+#endif - - /* - * If cputime_expires is zero, then there are no active -@@ -829,7 +833,7 @@ static void check_thread_timers(struct task_struct *tsk, - tsk_expires->virt_exp = expires; - - tsk_expires->sched_exp = check_timers_list(++timers, firing, -- tsk->se.sum_exec_runtime); -+ tsk_seruntime(tsk)); - - /* - * Check for the special case thread timers. -@@ -839,7 +843,7 @@ static void check_thread_timers(struct task_struct *tsk, - unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); - - if (hard != RLIM_INFINITY && -- tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { -+ tsk_rttimeout(tsk) > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { - /* - * At the hard limit, we just die. - * No need to calculate anything else now. -@@ -851,7 +855,7 @@ static void check_thread_timers(struct task_struct *tsk, - __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); - return; - } -- if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { -+ if (tsk_rttimeout(tsk) > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { - /* - * At the soft limit, send a SIGXCPU every second. - */ -@@ -1091,7 +1095,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk) - struct task_cputime task_sample; - - task_cputime(tsk, &task_sample.utime, &task_sample.stime); -- task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; -+ task_sample.sum_exec_runtime = tsk_seruntime(tsk); - if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) - return 1; - } -@@ -1121,8 +1125,10 @@ static inline int fastpath_timer_check(struct task_struct *tsk) - return 1; - } - -+#ifndef CONFIG_SCHED_PDS - if (dl_task(tsk) && tsk->dl.dl_overrun) - return 1; -+#endif - - return 0; - } -diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c -index 9d402e7fc949..89e56560cba2 100644 ---- a/kernel/trace/trace_selftest.c -+++ b/kernel/trace/trace_selftest.c -@@ -1045,10 +1045,15 @@ static int trace_wakeup_test_thread(void *data) - { - /* Make this a -deadline thread */ - static const struct sched_attr attr = { -+#ifdef CONFIG_SCHED_PDS -+ /* No deadline on BFS, use RR */ -+ .sched_policy = SCHED_RR, -+#else - .sched_policy = SCHED_DEADLINE, - .sched_runtime = 100000ULL, - .sched_deadline = 10000000ULL, - .sched_period = 10000000ULL -+#endif - }; - struct wakeup_test_data *x = data; - +From 1f5dc25333082122907dee4306627bd6da82b4bc Mon Sep 17 00:00:00 2001
+From: Tk-Glitch <ti3nou@gmail.com>
+Date: Mon, 8 Jul 2019 03:48:37 +0200
+Subject: PDS 099o, 5.2 rebase
+
+
+diff --git a/Documentation/scheduler/sched-PDS-mq.txt b/Documentation/scheduler/sched-PDS-mq.txt
+new file mode 100644
+index 000000000000..709e86f6487e
+--- /dev/null
++++ b/Documentation/scheduler/sched-PDS-mq.txt
+@@ -0,0 +1,56 @@
++ Priority and Deadline based Skiplist multiple queue Scheduler
++ -------------------------------------------------------------
++
++CONTENT
++========
++
++ 0. Development
++ 1. Overview
++ 1.1 Design goal
++ 1.2 Design summary
++ 2. Design Detail
++ 2.1 Skip list implementation
++ 2.2 Task preempt
++ 2.3 Task policy, priority and deadline
++ 2.4 Task selection
++ 2.5 Run queue balance
++ 2.6 Task migration
++
++
++0. Development
++==============
++
++Priority and Deadline based Skiplist multiple queue scheduler, referred to as
++PDS from here on, is developed upon the enhancement patchset VRQ(Variable Run
++Queue) for BFS(Brain Fuck Scheduler by Con Kolivas). PDS inherits the existing
++design from VRQ and inspired by the introduction of skiplist data structure
++to the scheduler by Con Kolivas. However, PDS is different from MuQSS(Multiple
++Queue Skiplist Scheduler, the successor after BFS) in many ways.
++
++1. Overview
++===========
++
++1.1 Design goal
++---------------
++
++PDS is designed to make the cpu process scheduler code to be simple, but while
++efficiency and scalable. Be Simple, the scheduler code will be easy to be read
++and the behavious of scheduler will be easy to predict. Be efficiency, the
++scheduler shall be well balance the thoughput performance and task interactivity
++at the same time for different properties the tasks behave. Be scalable, the
++performance of the scheduler should be in good shape with the glowing of
++workload or with the growing of the cpu numbers.
++
++1.2 Design summary
++------------------
++
++PDS is described as a multiple run queues cpu scheduler. Each cpu has its own
++run queue. A heavry customized skiplist is used as the backend data structure
++of the cpu run queue. Tasks in run queue is sorted by priority then virtual
++deadline(simplfy to just deadline from here on). In PDS, balance action among
++run queues are kept as less as possible to reduce the migration cost. Cpumask
++data structure is widely used in cpu affinity checking and cpu preemption/
++selection to make PDS scalable with increasing cpu number.
++
++
++To be continued...
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index f0c86fbb3b48..0e4ff65132c9 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -77,6 +77,7 @@ show up in /proc/sys/kernel:
+ - randomize_va_space
+ - real-root-dev ==> Documentation/admin-guide/initrd.rst
+ - reboot-cmd [ SPARC only ]
++- rr_interval
+ - rtsig-max
+ - rtsig-nr
+ - sched_energy_aware
+@@ -100,6 +101,7 @@ show up in /proc/sys/kernel:
+ - unknown_nmi_panic
+ - watchdog
+ - watchdog_thresh
++- yield_type
+ - version
+
+ ==============================================================
+@@ -881,6 +883,20 @@ rebooting. ???
+
+ ==============================================================
+
++rr_interval: (PDS CPU scheduler only)
++
++This is the smallest duration that any cpu process scheduling unit
++will run for. Increasing this value can increase throughput of cpu
++bound tasks substantially but at the expense of increased latencies
++overall. Conversely decreasing it will decrease average and maximum
++latencies but at the expense of throughput. This value is in
++milliseconds and the default value chosen depends on the number of
++cpus available at scheduler initialisation with a minimum of 6.
++
++Valid values are from 1-1000.
++
++==============================================================
++
+ rtsig-max & rtsig-nr:
+
+ The file rtsig-max can be used to tune the maximum number
+@@ -1143,3 +1159,13 @@ The softlockup threshold is (2 * watchdog_thresh). Setting this
+ tunable to zero will disable lockup detection altogether.
+
+ ==============================================================
++
++yield_type: (MuQSS/VRQ CPU scheduler only)
++
++This determines what type of yield calls to sched_yield will perform.
++
++ 0 - No yield.
++ 1 - Yield only to better priority/deadline tasks. (default)
++ 2 - Expire timeslice and recalculate deadline.
++
++==============================================================
+diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
+index e56b553de27b..ef94224efa5b 100644
+--- a/arch/powerpc/platforms/cell/spufs/sched.c
++++ b/arch/powerpc/platforms/cell/spufs/sched.c
+@@ -51,11 +51,6 @@ static struct task_struct *spusched_task;
+ static struct timer_list spusched_timer;
+ static struct timer_list spuloadavg_timer;
+
+-/*
+- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
+- */
+-#define NORMAL_PRIO 120
+-
+ /*
+ * Frequency of the spu scheduler tick. By default we do one SPU scheduler
+ * tick for every 10 CPU scheduler ticks.
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 2bbbd4d1ba31..2ed85f4b5086 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1007,6 +1007,22 @@ config NR_CPUS
+ config SCHED_SMT
+ def_bool y if SMP
+
++config SMT_NICE
++ bool "SMT (Hyperthreading) aware nice priority and policy support"
++ depends on SCHED_PDS && SCHED_SMT
++ default y
++ ---help---
++ Enabling Hyperthreading on Intel CPUs decreases the effectiveness
++ of the use of 'nice' levels and different scheduling policies
++ (e.g. realtime) due to sharing of CPU power between hyperthreads.
++ SMT nice support makes each logical CPU aware of what is running on
++ its hyperthread siblings, maintaining appropriate distribution of
++ CPU according to nice levels and scheduling policies at the expense
++ of slightly increased overhead.
++
++ If unsure say Y here.
++
++
+ config SCHED_MC
+ def_bool y
+ prompt "Multi-core scheduler support"
+diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
+index b66e81c06a57..a294f8f5fd75 100644
+--- a/drivers/cpufreq/cpufreq_conservative.c
++++ b/drivers/cpufreq/cpufreq_conservative.c
+@@ -28,8 +28,8 @@ struct cs_dbs_tuners {
+ };
+
+ /* Conservative governor macros */
+-#define DEF_FREQUENCY_UP_THRESHOLD (80)
+-#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
++#define DEF_FREQUENCY_UP_THRESHOLD (63)
++#define DEF_FREQUENCY_DOWN_THRESHOLD (26)
+ #define DEF_FREQUENCY_STEP (5)
+ #define DEF_SAMPLING_DOWN_FACTOR (1)
+ #define MAX_SAMPLING_DOWN_FACTOR (10)
+diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
+index dced033875bf..d2cd03766b09 100644
+--- a/drivers/cpufreq/cpufreq_ondemand.c
++++ b/drivers/cpufreq/cpufreq_ondemand.c
+@@ -18,7 +18,7 @@
+ #include "cpufreq_ondemand.h"
+
+ /* On-demand governor macros */
+-#define DEF_FREQUENCY_UP_THRESHOLD (80)
++#define DEF_FREQUENCY_UP_THRESHOLD (63)
+ #define DEF_SAMPLING_DOWN_FACTOR (1)
+ #define MAX_SAMPLING_DOWN_FACTOR (100000)
+ #define MICRO_FREQUENCY_UP_THRESHOLD (95)
+@@ -127,7 +127,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
+ }
+
+ /*
+- * Every sampling_rate, we check, if current idle time is less than 20%
++ * Every sampling_rate, we check, if current idle time is less than 37%
+ * (default), then we try to increase frequency. Else, we adjust the frequency
+ * proportional to load.
+ */
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 255f6754c70d..3d375c10ddc6 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -459,7 +459,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
+ seq_puts(m, "0 0 0\n");
+ else
+ seq_printf(m, "%llu %llu %lu\n",
+- (unsigned long long)task->se.sum_exec_runtime,
++ (unsigned long long)tsk_seruntime(task),
+ (unsigned long long)task->sched_info.run_delay,
+ task->sched_info.pcount);
+
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index 6049baa5b8bc..87355efcc13d 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -47,7 +47,11 @@ extern struct cred init_cred;
+ #define INIT_CPU_TIMERS(s)
+ #endif
+
++#ifdef CONFIG_SCHED_PDS
++#define INIT_TASK_COMM "PDS"
++#else
+ #define INIT_TASK_COMM "swapper"
++#endif /* !CONFIG_SCHED_PDS */
+
+ /* Attach to the init_task data structure for proper alignment */
+ #ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
+diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
+index 1b6d31da7cbc..dea181bdb1dd 100644
+--- a/include/linux/jiffies.h
++++ b/include/linux/jiffies.h
+@@ -171,7 +171,7 @@ static inline u64 get_jiffies_64(void)
+ * Have the 32 bit jiffies value wrap 5 minutes after boot
+ * so jiffies wrap bugs show up earlier.
+ */
+-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
++#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-10*HZ))
+
+ /*
+ * Change timeval to jiffies, trying to avoid the
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 11837410690f..9f1d95666df2 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -29,6 +29,7 @@
+ #include <linux/mm_types_task.h>
+ #include <linux/task_io_accounting.h>
+ #include <linux/rseq.h>
++#include <linux/skip_list.h>
+
+ /* task_struct member predeclarations (sorted alphabetically): */
+ struct audit_context;
+@@ -604,9 +605,13 @@ struct task_struct {
+ unsigned int flags;
+ unsigned int ptrace;
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_PDS)
+ struct llist_node wake_entry;
++#endif
++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_PDS)
+ int on_cpu;
++#endif
++#ifdef CONFIG_SMP
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
+ /* Current CPU: */
+ unsigned int cpu;
+@@ -615,6 +620,7 @@ struct task_struct {
+ unsigned long wakee_flip_decay_ts;
+ struct task_struct *last_wakee;
+
++#ifndef CONFIG_SCHED_PDS
+ /*
+ * recent_used_cpu is initially set as the last CPU used by a task
+ * that wakes affine another task. Waker/wakee relationships can
+@@ -623,6 +629,7 @@ struct task_struct {
+ * used CPU that may be idle.
+ */
+ int recent_used_cpu;
++#endif /* CONFIG_SCHED_PDS */
+ int wake_cpu;
+ #endif
+ int on_rq;
+@@ -632,13 +639,27 @@ struct task_struct {
+ int normal_prio;
+ unsigned int rt_priority;
+
++#ifdef CONFIG_SCHED_PDS
++ int time_slice;
++ u64 deadline;
++ /* skip list level */
++ int sl_level;
++ /* skip list node */
++ struct skiplist_node sl_node;
++ /* 8bits prio and 56bits deadline for quick processing */
++ u64 priodl;
++ u64 last_ran;
++ /* sched_clock time spent running */
++ u64 sched_time;
++#else /* CONFIG_SCHED_PDS */
+ const struct sched_class *sched_class;
+ struct sched_entity se;
+ struct sched_rt_entity rt;
++ struct sched_dl_entity dl;
++#endif
+ #ifdef CONFIG_CGROUP_SCHED
+ struct task_group *sched_task_group;
+ #endif
+- struct sched_dl_entity dl;
+
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
+ /* List of struct preempt_notifier: */
+@@ -1217,6 +1238,29 @@ struct task_struct {
+ */
+ };
+
++#ifdef CONFIG_SCHED_PDS
++void cpu_scaling(int cpu);
++void cpu_nonscaling(int cpu);
++#define tsk_seruntime(t) ((t)->sched_time)
++/* replace the uncertian rt_timeout with 0UL */
++#define tsk_rttimeout(t) (0UL)
++
++#define task_running_idle(p) ((p)->prio == IDLE_PRIO)
++#else /* CFS */
++extern int runqueue_is_locked(int cpu);
++static inline void cpu_scaling(int cpu)
++{
++}
++
++static inline void cpu_nonscaling(int cpu)
++{
++}
++#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
++#define tsk_rttimeout(t) ((t)->rt.timeout)
++
++#define iso_task(p) (false)
++#endif /* CONFIG_SCHED_PDS */
++
+ static inline struct pid *task_pid(struct task_struct *task)
+ {
+ return task->thread_pid;
+diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
+index 0cb034331cbb..eb2d51ef8afa 100644
+--- a/include/linux/sched/deadline.h
++++ b/include/linux/sched/deadline.h
+@@ -1,5 +1,22 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+
++#ifdef CONFIG_SCHED_PDS
++
++#define __tsk_deadline(p) ((p)->deadline)
++
++static inline int dl_prio(int prio)
++{
++ return 1;
++}
++
++static inline int dl_task(struct task_struct *p)
++{
++ return 1;
++}
++#else
++
++#define __tsk_deadline(p) ((p)->dl.deadline)
++
+ /*
+ * SCHED_DEADLINE tasks has negative priorities, reflecting
+ * the fact that any of them has higher prio than RT and
+@@ -19,6 +36,7 @@ static inline int dl_task(struct task_struct *p)
+ {
+ return dl_prio(p->prio);
+ }
++#endif /* CONFIG_SCHED_PDS */
+
+ static inline bool dl_time_before(u64 a, u64 b)
+ {
+diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
+index b36f4cf38111..46bbab702a3b 100644
+--- a/include/linux/sched/nohz.h
++++ b/include/linux/sched/nohz.h
+@@ -6,7 +6,7 @@
+ * This is the interface between the scheduler and nohz/dynticks:
+ */
+
+-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
++#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_PDS)
+ extern void cpu_load_update_nohz_start(void);
+ extern void cpu_load_update_nohz_stop(void);
+ #else
+diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
+index 7d64feafc408..fba04bb91492 100644
+--- a/include/linux/sched/prio.h
++++ b/include/linux/sched/prio.h
+@@ -20,7 +20,18 @@
+ */
+
+ #define MAX_USER_RT_PRIO 100
++
++#ifdef CONFIG_SCHED_PDS
++#define ISO_PRIO (MAX_USER_RT_PRIO)
++
++#define MAX_RT_PRIO ((MAX_USER_RT_PRIO) + 1)
++
++#define NORMAL_PRIO (MAX_RT_PRIO)
++#define IDLE_PRIO ((MAX_RT_PRIO) + 1)
++#define PRIO_LIMIT ((IDLE_PRIO) + 1)
++#else /* !CONFIG_SCHED_PDS */
+ #define MAX_RT_PRIO MAX_USER_RT_PRIO
++#endif /* CONFIG_SCHED_PDS */
+
+ #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
+ #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
+diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
+index e5af028c08b4..a96012e6f15e 100644
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
+
+ if (policy == SCHED_FIFO || policy == SCHED_RR)
+ return true;
++#ifndef CONFIG_SCHED_PDS
+ if (policy == SCHED_DEADLINE)
+ return true;
++#endif
+ return false;
+ }
+
+diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
+index f1227f2c38a4..429689b92569 100644
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -83,7 +83,7 @@ extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
+ extern void free_task(struct task_struct *tsk);
+
+ /* sched_exec is called by processes performing an exec */
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_PDS)
+ extern void sched_exec(void);
+ #else
+ #define sched_exec() {}
+diff --git a/include/linux/skip_list.h b/include/linux/skip_list.h
+new file mode 100644
+index 000000000000..713fedd8034f
+--- /dev/null
++++ b/include/linux/skip_list.h
+@@ -0,0 +1,177 @@
++/*
++ Copyright (C) 2016 Alfred Chen.
++
++ Code based on Con Kolivas's skip list implementation for BFS, and
++ which is based on example originally by William Pugh.
++
++Skip Lists are a probabilistic alternative to balanced trees, as
++described in the June 1990 issue of CACM and were invented by
++William Pugh in 1987.
++
++A couple of comments about this implementation:
++
++This file only provides a infrastructure of skip list.
++
++skiplist_node is embedded into container data structure, to get rid the
++dependency of kmalloc/kfree operation in scheduler code.
++
++A customized search function should be defined using DEFINE_SKIPLIST_INSERT
++macro and be used for skip list insert operation.
++
++Random Level is also not defined in this file, instead, it should be customized
++implemented and set to node->level then pass to the customized skiplist_insert
++function.
++
++Levels start at zero and go up to (NUM_SKIPLIST_LEVEL -1)
++
++NUM_SKIPLIST_LEVEL in this implementation is 8 instead of origin 16,
++considering that there will be 256 entries to enable the top level when using
++random level p=0.5, and that number is more than enough for a run queue usage
++in a scheduler usage. And it also help to reduce the memory usage of the
++embedded skip list node in task_struct to about 50%.
++
++The insertion routine has been implemented so as to use the
++dirty hack described in the CACM paper: if a random level is
++generated that is more than the current maximum level, the
++current maximum level plus one is used instead.
++
++BFS Notes: In this implementation of skiplists, there are bidirectional
++next/prev pointers and the insert function returns a pointer to the actual
++node the value is stored. The key here is chosen by the scheduler so as to
++sort tasks according to the priority list requirements and is no longer used
++by the scheduler after insertion. The scheduler lookup, however, occurs in
++O(1) time because it is always the first item in the level 0 linked list.
++Since the task struct stores a copy of the node pointer upon skiplist_insert,
++it can also remove it much faster than the original implementation with the
++aid of prev<->next pointer manipulation and no searching.
++*/
++#ifndef _LINUX_SKIP_LIST_H
++#define _LINUX_SKIP_LIST_H
++
++#include <linux/kernel.h>
++
++#define NUM_SKIPLIST_LEVEL (8)
++
++struct skiplist_node {
++ int level; /* Levels in this node */
++ struct skiplist_node *next[NUM_SKIPLIST_LEVEL];
++ struct skiplist_node *prev[NUM_SKIPLIST_LEVEL];
++};
++
++#define SKIPLIST_NODE_INIT(name) { 0,\
++ {&name, &name, &name, &name,\
++ &name, &name, &name, &name},\
++ {&name, &name, &name, &name,\
++ &name, &name, &name, &name},\
++ }
++
++static inline void INIT_SKIPLIST_NODE(struct skiplist_node *node)
++{
++ /* only level 0 ->next matters in skiplist_empty()*/
++ WRITE_ONCE(node->next[0], node);
++}
++
++/**
++ * FULL_INIT_SKIPLIST_NODE -- fully init a skiplist_node, expecially for header
++ * @node: the skip list node to be inited.
++ */
++static inline void FULL_INIT_SKIPLIST_NODE(struct skiplist_node *node)
++{
++ int i;
++
++ node->level = 0;
++ for (i = 0; i < NUM_SKIPLIST_LEVEL; i++) {
++ WRITE_ONCE(node->next[i], node);
++ node->prev[i] = node;
++ }
++}
++
++/**
++ * skiplist_empty - test whether a skip list is empty
++ * @head: the skip list to test.
++ */
++static inline int skiplist_empty(const struct skiplist_node *head)
++{
++ return READ_ONCE(head->next[0]) == head;
++}
++
++/**
++ * skiplist_entry - get the struct for this entry
++ * @ptr: the &struct skiplist_node pointer.
++ * @type: the type of the struct this is embedded in.
++ * @member: the name of the skiplist_node within the struct.
++ */
++#define skiplist_entry(ptr, type, member) \
++ container_of(ptr, type, member)
++
++/**
++ * DEFINE_SKIPLIST_INSERT_FUNC -- macro to define a customized skip list insert
++ * function, which takes two parameters, first one is the header node of the
++ * skip list, second one is the skip list node to be inserted
++ * @func_name: the customized skip list insert function name
++ * @search_func: the search function to be used, which takes two parameters,
++ * 1st one is the itrator of skiplist_node in the list, the 2nd is the skip list
++ * node to be inserted, the function should return true if search should be
++ * continued, otherwise return false.
++ * Returns 1 if @node is inserted as the first item of skip list at level zero,
++ * otherwise 0
++ */
++#define DEFINE_SKIPLIST_INSERT_FUNC(func_name, search_func)\
++static inline int func_name(struct skiplist_node *head, struct skiplist_node *node)\
++{\
++ struct skiplist_node *update[NUM_SKIPLIST_LEVEL];\
++ struct skiplist_node *p, *q;\
++ int k = head->level;\
++\
++ p = head;\
++ do {\
++ while (q = p->next[k], q != head && search_func(q, node))\
++ p = q;\
++ update[k] = p;\
++ } while (--k >= 0);\
++\
++ k = node->level;\
++ if (unlikely(k > head->level)) {\
++ node->level = k = ++head->level;\
++ update[k] = head;\
++ }\
++\
++ do {\
++ p = update[k];\
++ q = p->next[k];\
++ node->next[k] = q;\
++ p->next[k] = node;\
++ node->prev[k] = p;\
++ q->prev[k] = node;\
++ } while (--k >= 0);\
++\
++ return (p == head);\
++}
++
++/**
++ * skiplist_del_init -- delete skip list node from a skip list and reset it's
++ * init state
++ * @head: the header node of the skip list to be deleted from.
++ * @node: the skip list node to be deleted, the caller need to ensure @node is
++ * in skip list which @head represent.
++ * Returns 1 if @node is the first item of skip level at level zero, otherwise 0
++ */
++static inline int
++skiplist_del_init(struct skiplist_node *head, struct skiplist_node *node)
++{
++ int l, m = node->level;
++
++ for (l = 0; l <= m; l++) {
++ node->prev[l]->next[l] = node->next[l];
++ node->next[l]->prev[l] = node->prev[l];
++ }
++ if (m == head->level && m > 0) {
++ while (head->next[m] == head && m > 0)
++ m--;
++ head->level = m;
++ }
++ INIT_SKIPLIST_NODE(node);
++
++ return (node->prev[0] == head);
++}
++#endif /* _LINUX_SKIP_LIST_H */
+diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
+index ed4ee170bee2..17ccfadf3455 100644
+--- a/include/uapi/linux/sched.h
++++ b/include/uapi/linux/sched.h
+@@ -38,7 +38,10 @@
+ #define SCHED_FIFO 1
+ #define SCHED_RR 2
+ #define SCHED_BATCH 3
+-/* SCHED_ISO: reserved but not implemented yet */
++/* SCHED_ISO: Implemented in BFS/MuQSSPDS only */
++#ifdef CONFIG_SCHED_PDS
++#define SCHED_ISO 4
++#endif
+ #define SCHED_IDLE 5
+ #define SCHED_DEADLINE 6
+
+diff --git a/init/Kconfig b/init/Kconfig
+index 0e2344389501..6463b6b6fab8 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -65,6 +65,21 @@ config THREAD_INFO_IN_TASK
+
+ menu "General setup"
+
++config SCHED_PDS
++ bool "PDS-mq cpu scheduler"
++ help
++ The Priority and Deadline based Skip list multiple queue CPU
++ Scheduler for excellent interactivity and responsiveness on the
++ desktop and solid scalability on normal hardware and commodity
++ servers.
++
++ Currently incompatible with the Group CPU scheduler, and RCU TORTURE
++ TEST so these options are disabled.
++
++ Say Y here.
++ default y
++
++
+ config BROKEN
+ bool
+
+@@ -711,6 +726,7 @@ config NUMA_BALANCING
+ depends on ARCH_SUPPORTS_NUMA_BALANCING
+ depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ depends on SMP && NUMA && MIGRATION
++ depends on !SCHED_PDS
+ help
+ This option adds support for automatic NUMA aware memory/task placement.
+ The mechanism is quite primitive and is based on migrating memory when
+@@ -820,7 +836,7 @@ menuconfig CGROUP_SCHED
+ bandwidth allocation to such task groups. It uses cgroups to group
+ tasks.
+
+-if CGROUP_SCHED
++if CGROUP_SCHED && !SCHED_PDS
+ config FAIR_GROUP_SCHED
+ bool "Group scheduling for SCHED_OTHER"
+ depends on CGROUP_SCHED
+@@ -927,6 +943,7 @@ config CGROUP_DEVICE
+
+ config CGROUP_CPUACCT
+ bool "Simple CPU accounting controller"
++ depends on !SCHED_PDS
+ help
+ Provides a simple controller for monitoring the
+ total CPU consumed by the tasks in a cgroup.
+@@ -1045,6 +1062,7 @@ config CHECKPOINT_RESTORE
+
+ config SCHED_AUTOGROUP
+ bool "Automatic process group scheduling"
++ depends on !SCHED_PDS
+ select CGROUPS
+ select CGROUP_SCHED
+ select FAIR_GROUP_SCHED
+diff --git a/init/init_task.c b/init/init_task.c
+index c70ef656d0f4..051fb66f53b7 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -60,6 +60,125 @@ struct task_struct init_task
+ __init_task_data
+ #endif
+ = {
++#ifdef CONFIG_SCHED_PDS
++#ifdef CONFIG_THREAD_INFO_IN_TASK
++ .thread_info = INIT_THREAD_INFO(init_task),
++ .stack_refcount = ATOMIC_INIT(1),
++#endif
++ .state = 0,
++ .stack = init_stack,
++ .usage = ATOMIC_INIT(2),
++ .flags = PF_KTHREAD,
++ .prio = NORMAL_PRIO,
++ .static_prio = MAX_PRIO - 20,
++ .normal_prio = NORMAL_PRIO,
++ .deadline = 0, /* PDS only */
++ .policy = SCHED_NORMAL,
++ .cpus_allowed = CPU_MASK_ALL,
++ .nr_cpus_allowed= NR_CPUS,
++ .mm = NULL,
++ .active_mm = &init_mm,
++ .restart_block = {
++ .fn = do_no_restart_syscall,
++ },
++ .sl_level = 0, /* PDS only */
++ .sl_node = SKIPLIST_NODE_INIT(init_task.sl_node), /* PDS only */
++ .time_slice = HZ, /* PDS only */
++ .tasks = LIST_HEAD_INIT(init_task.tasks),
++#ifdef CONFIG_SMP
++ .pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
++#endif
++#ifdef CONFIG_CGROUP_SCHED
++ .sched_task_group = &root_task_group,
++#endif
++ .ptraced = LIST_HEAD_INIT(init_task.ptraced),
++ .ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry),
++ .real_parent = &init_task,
++ .parent = &init_task,
++ .children = LIST_HEAD_INIT(init_task.children),
++ .sibling = LIST_HEAD_INIT(init_task.sibling),
++ .group_leader = &init_task,
++ RCU_POINTER_INITIALIZER(real_cred, &init_cred),
++ RCU_POINTER_INITIALIZER(cred, &init_cred),
++ .comm = INIT_TASK_COMM,
++ .thread = INIT_THREAD,
++ .fs = &init_fs,
++ .files = &init_files,
++ .signal = &init_signals,
++ .sighand = &init_sighand,
++ .nsproxy = &init_nsproxy,
++ .pending = {
++ .list = LIST_HEAD_INIT(init_task.pending.list),
++ .signal = {{0}}
++ },
++ .blocked = {{0}},
++ .alloc_lock = __SPIN_LOCK_UNLOCKED(init_task.alloc_lock),
++ .journal_info = NULL,
++ INIT_CPU_TIMERS(init_task)
++ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock),
++ .timer_slack_ns = 50000, /* 50 usec default slack */
++ .thread_pid = &init_struct_pid,
++ .thread_group = LIST_HEAD_INIT(init_task.thread_group),
++ .thread_node = LIST_HEAD_INIT(init_signals.thread_head),
++#ifdef CONFIG_AUDITSYSCALL
++ .loginuid = INVALID_UID,
++ .sessionid = AUDIT_SID_UNSET,
++#endif
++#ifdef CONFIG_PERF_EVENTS
++ .perf_event_mutex = __MUTEX_INITIALIZER(init_task.perf_event_mutex),
++ .perf_event_list = LIST_HEAD_INIT(init_task.perf_event_list),
++#endif
++#ifdef CONFIG_PREEMPT_RCU
++ .rcu_read_lock_nesting = 0,
++ .rcu_read_unlock_special.s = 0,
++ .rcu_node_entry = LIST_HEAD_INIT(init_task.rcu_node_entry),
++ .rcu_blocked_node = NULL,
++#endif
++#ifdef CONFIG_TASKS_RCU
++ .rcu_tasks_holdout = false,
++ .rcu_tasks_holdout_list = LIST_HEAD_INIT(init_task.rcu_tasks_holdout_list),
++ .rcu_tasks_idle_cpu = -1,
++#endif
++#ifdef CONFIG_CPUSETS
++ .mems_allowed_seq = SEQCNT_ZERO(init_task.mems_allowed_seq),
++#endif
++#ifdef CONFIG_RT_MUTEXES
++ .pi_waiters = RB_ROOT_CACHED,
++ .pi_top_task = NULL,
++#endif
++ INIT_PREV_CPUTIME(init_task)
++#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
++ .vtime.seqcount = SEQCNT_ZERO(init_task.vtime_seqcount),
++ .vtime.starttime = 0,
++ .vtime.state = VTIME_SYS,
++#endif
++#ifdef CONFIG_NUMA_BALANCING
++ .numa_preferred_nid = -1,
++ .numa_group = NULL,
++ .numa_faults = NULL,
++#endif
++#ifdef CONFIG_KASAN
++ .kasan_depth = 1,
++#endif
++#ifdef CONFIG_TRACE_IRQFLAGS
++ .softirqs_enabled = 1,
++#endif
++#ifdef CONFIG_LOCKDEP
++ .lockdep_recursion = 0,
++#endif
++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
++ .ret_stack = NULL,
++#endif
++#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPT)
++ .trace_recursion = 0,
++#endif
++#ifdef CONFIG_LIVEPATCH
++ .patch_state = KLP_UNDEFINED,
++#endif
++#ifdef CONFIG_SECURITY
++ .security = NULL,
++#endif
++#else /* CONFIG_SCHED_PDS */
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
+ .thread_info = INIT_THREAD_INFO(init_task),
+ .stack_refcount = REFCOUNT_INIT(1),
+@@ -180,6 +299,7 @@ struct task_struct init_task
+ #ifdef CONFIG_SECURITY
+ .security = NULL,
+ #endif
++#endif /* CONFIG_SCHED_PDS */
+ };
+ EXPORT_SYMBOL(init_task);
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 515525ff1cfd..406c8f07fbe9 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -673,7 +673,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
+ return ret;
+ }
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_PDS)
+ /*
+ * Helper routine for generate_sched_domains().
+ * Do cpusets a, b have overlapping effective cpus_allowed masks?
+@@ -988,7 +988,7 @@ static void rebuild_sched_domains_locked(void)
+ out:
+ put_online_cpus();
+ }
+-#else /* !CONFIG_SMP */
++#else /* !CONFIG_SMP || CONFIG_SCHED_PDS */
+ static void rebuild_sched_domains_locked(void)
+ {
+ }
+diff --git a/kernel/delayacct.c b/kernel/delayacct.c
+index 27725754ac99..769d773c7182 100644
+--- a/kernel/delayacct.c
++++ b/kernel/delayacct.c
+@@ -106,7 +106,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
+ */
+ t1 = tsk->sched_info.pcount;
+ t2 = tsk->sched_info.run_delay;
+- t3 = tsk->se.sum_exec_runtime;
++ t3 = tsk_seruntime(tsk);
+
+ d->cpu_count += t1;
+
+diff --git a/kernel/exit.c b/kernel/exit.c
+index a75b6a7f458a..b63fdd44efe1 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -131,7 +131,7 @@ static void __exit_signal(struct task_struct *tsk)
+ sig->curr_target = next_thread(tsk);
+ }
+
+- add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
++ add_device_randomness((const void*) &tsk_seruntime(tsk),
+ sizeof(unsigned long long));
+
+ /*
+@@ -152,7 +152,7 @@ static void __exit_signal(struct task_struct *tsk)
+ sig->inblock += task_io_get_inblock(tsk);
+ sig->oublock += task_io_get_oublock(tsk);
+ task_io_accounting_add(&sig->ioac, &tsk->ioac);
+- sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
++ sig->sum_sched_runtime += tsk_seruntime(tsk);
+ sig->nr_threads--;
+ __unhash_process(tsk, group_dead);
+ write_sequnlock(&sig->stats_lock);
+diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
+index abb2a4a2cbb2..d8a53ca034f5 100644
+--- a/kernel/livepatch/transition.c
++++ b/kernel/livepatch/transition.c
+@@ -300,7 +300,11 @@ static bool klp_try_switch_task(struct task_struct *task)
+ */
+ rq = task_rq_lock(task, &flags);
+
++#ifdef CONFIG_SCHED_PDS
++ if (task_running(task) && task != current) {
++#else
+ if (task_running(rq, task) && task != current) {
++#endif
+ snprintf(err_buf, STACK_ERR_BUF_SIZE,
+ "%s: %s:%d is running\n", __func__, task->comm,
+ task->pid);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 38fbf9fa7f1b..0b86890edb63 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -229,7 +229,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
+ * Only use with rt_mutex_waiter_{less,equal}()
+ */
+ #define task_to_waiter(p) \
+- &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
++ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = __tsk_deadline(p) }
+
+ static inline int
+ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+@@ -681,7 +681,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ * the values of the node being removed.
+ */
+ waiter->prio = task->prio;
+- waiter->deadline = task->dl.deadline;
++ waiter->deadline = __tsk_deadline(task);
+
+ rt_mutex_enqueue(lock, waiter);
+
+@@ -955,7 +955,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ waiter->task = task;
+ waiter->lock = lock;
+ waiter->prio = task->prio;
+- waiter->deadline = task->dl.deadline;
++ waiter->deadline = __tsk_deadline(task);
+
+ /* Get the top priority waiter on the lock */
+ if (rt_mutex_has_waiters(lock))
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index 21fb5a5662b5..8ebe4e33fb5f 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -16,15 +16,21 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
+ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
+ endif
+
+-obj-y += core.o loadavg.o clock.o cputime.o
+-obj-y += idle.o fair.o rt.o deadline.o
+-obj-y += wait.o wait_bit.o swait.o completion.o
+-
+-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o
++ifdef CONFIG_SCHED_PDS
++obj-y += pds.o
++else
++obj-y += core.o
++obj-y += fair.o rt.o deadline.o
++obj-$(CONFIG_SMP) += cpudeadline.o topology.o stop_task.o
+ obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
+-obj-$(CONFIG_SCHEDSTATS) += stats.o
+ obj-$(CONFIG_SCHED_DEBUG) += debug.o
+ obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
++endif
++obj-y += loadavg.o clock.o cputime.o
++obj-y += idle.o
++obj-y += wait.o wait_bit.o swait.o completion.o
++obj-$(CONFIG_SMP) += cpupri.o pelt.o
++obj-$(CONFIG_SCHEDSTATS) += stats.o
+ obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
+ obj-$(CONFIG_MEMBARRIER) += membarrier.o
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 962cf343f798..2821ce592b89 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -176,6 +176,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
+ return cpufreq_driver_resolve_freq(policy, freq);
+ }
+
++#ifndef CONFIG_SCHED_PDS
+ /*
+ * This function computes an effective utilization for the given CPU, to be
+ * used for frequency selection given the linear relation: f = u * f_max.
+@@ -283,6 +284,13 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
+
+ return schedutil_freq_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL);
+ }
++#else /* CONFIG_SCHED_PDS */
++static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
++{
++ sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
++ return sg_cpu->max;
++}
++#endif
+
+ /**
+ * sugov_iowait_reset() - Reset the IO boost status of a CPU.
+@@ -426,7 +434,9 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+ */
+ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
+ {
++#ifndef CONFIG_SCHED_PDS
+ if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
++#endif
+ sg_policy->need_freq_update = true;
+ }
+
+@@ -668,6 +678,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
+ }
+
+ ret = sched_setattr_nocheck(thread, &attr);
++
+ if (ret) {
+ kthread_stop(thread);
+ pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
+@@ -897,6 +908,7 @@ static int __init sugov_register(void)
+ fs_initcall(sugov_register);
+
+ #ifdef CONFIG_ENERGY_MODEL
++#ifndef CONFIG_SCHED_PDS
+ extern bool sched_energy_update;
+ extern struct mutex sched_energy_mutex;
+
+@@ -927,4 +939,10 @@ void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+ }
+
+ }
++#else /* CONFIG_SCHED_PDS */
++void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
++ struct cpufreq_governor *old_gov)
++{
++}
++#endif
+ #endif
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 2305ce89a26c..b730af0e679e 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -122,7 +122,12 @@ void account_user_time(struct task_struct *p, u64 cputime)
+ p->utime += cputime;
+ account_group_user_time(p, cputime);
+
++#ifdef CONFIG_SCHED_PDS
++ index = (task_nice(p) > 0 || task_running_idle(p)) ? CPUTIME_NICE :
++ CPUTIME_USER;
++#else
+ index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
++#endif
+
+ /* Add user time to cpustat. */
+ task_group_account_field(p, index, cputime);
+@@ -146,7 +151,11 @@ void account_guest_time(struct task_struct *p, u64 cputime)
+ p->gtime += cputime;
+
+ /* Add guest time to cpustat. */
++#ifdef CONFIG_SCHED_PDS
++ if (task_nice(p) > 0 || task_running_idle(p)) {
++#else
+ if (task_nice(p) > 0) {
++#endif
+ cpustat[CPUTIME_NICE] += cputime;
+ cpustat[CPUTIME_GUEST_NICE] += cputime;
+ } else {
+@@ -269,7 +278,7 @@ static inline u64 account_other_time(u64 max)
+ #ifdef CONFIG_64BIT
+ static inline u64 read_sum_exec_runtime(struct task_struct *t)
+ {
+- return t->se.sum_exec_runtime;
++ return tsk_seruntime(t);
+ }
+ #else
+ static u64 read_sum_exec_runtime(struct task_struct *t)
+@@ -279,7 +288,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
+ struct rq *rq;
+
+ rq = task_rq_lock(t, &rf);
+- ns = t->se.sum_exec_runtime;
++ ns = tsk_seruntime(t);
+ task_rq_unlock(rq, t, &rf);
+
+ return ns;
+@@ -663,7 +672,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+ {
+ struct task_cputime cputime = {
+- .sum_exec_runtime = p->se.sum_exec_runtime,
++ .sum_exec_runtime = tsk_seruntime(p),
+ };
+
+ task_cputime(p, &cputime.utime, &cputime.stime);
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index 80940939b733..3e09d3ed3fa3 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -354,6 +354,7 @@ void cpu_startup_entry(enum cpuhp_state state)
+ do_idle();
+ }
+
++#ifndef CONFIG_SCHED_PDS
+ /*
+ * idle-task scheduling class.
+ */
+@@ -466,3 +467,4 @@ const struct sched_class idle_sched_class = {
+ .switched_to = switched_to_idle,
+ .update_curr = update_curr_idle,
+ };
++#endif
+diff --git a/kernel/sched/pds.c b/kernel/sched/pds.c
+new file mode 100644
+index 000000000000..3d9cab6ef354
+--- /dev/null
++++ b/kernel/sched/pds.c
+@@ -0,0 +1,6496 @@
++/*
++ * kernel/sched/pds.c, was kernel/sched.c
++ *
++ * PDS-mq Core kernel scheduler code and related syscalls
++ *
++ * Copyright (C) 1991-2002 Linus Torvalds
++ *
++ * 2009-08-13 Brainfuck deadline scheduling policy by Con Kolivas deletes
++ * a whole lot of those previous things.
++ * 2017-09-06 Priority and Deadline based Skip list multiple queue kernel
++ * scheduler by Alfred Chen.
++ */
++#include "pds_sched.h"
++
++#include <linux/sched/rt.h>
++
++#include <linux/context_tracking.h>
++#include <linux/compat.h>
++#include <linux/blkdev.h>
++#include <linux/delayacct.h>
++#include <linux/freezer.h>
++#include <linux/init_task.h>
++#include <linux/kprobes.h>
++#include <linux/mmu_context.h>
++#include <linux/nmi.h>
++#include <linux/profile.h>
++#include <linux/rcupdate_wait.h>
++#include <linux/security.h>
++#include <linux/syscalls.h>
++#include <linux/wait_bit.h>
++
++#include <linux/kcov.h>
++
++#include <asm/switch_to.h>
++
++#include "../workqueue_internal.h"
++#include "../smpboot.h"
++
++#include "pelt.h"
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/sched.h>
++
++
++#define rt_prio(prio) ((prio) < MAX_RT_PRIO)
++#define rt_task(p) rt_prio((p)->prio)
++#define rt_policy(policy) ((policy) == SCHED_FIFO || \
++ (policy) == SCHED_RR || \
++ (policy) == SCHED_ISO)
++#define task_has_rt_policy(p) (rt_policy((p)->policy))
++
++#define idle_policy(policy) ((policy) == SCHED_IDLE)
++#define idleprio_task(p) unlikely(idle_policy((p)->policy))
++
++#define STOP_PRIO (MAX_RT_PRIO - 1)
++
++/*
++ * Some helpers for converting to/from various scales. Use shifts to get
++ * approximate multiples of ten for less overhead.
++ */
++#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
++#define JIFFY_NS (1000000000 / HZ)
++#define HALF_JIFFY_NS (1000000000 / HZ / 2)
++#define HALF_JIFFY_US (1000000 / HZ / 2)
++#define MS_TO_NS(TIME) ((TIME) << 20)
++#define MS_TO_US(TIME) ((TIME) << 10)
++#define NS_TO_MS(TIME) ((TIME) >> 20)
++#define NS_TO_US(TIME) ((TIME) >> 10)
++#define US_TO_NS(TIME) ((TIME) << 10)
++
++#define RESCHED_US (100) /* Reschedule if less than this many μs left */
++
++enum {
++ BASE_CPU_AFFINITY_CHK_LEVEL = 1,
++#ifdef CONFIG_SCHED_SMT
++ SMT_CPU_AFFINITY_CHK_LEVEL_SPACE_HOLDER,
++#endif
++#ifdef CONFIG_SCHED_MC
++ MC_CPU_AFFINITY_CHK_LEVEL_SPACE_HOLDER,
++#endif
++ NR_CPU_AFFINITY_CHK_LEVEL
++};
++
++static inline void print_scheduler_version(void)
++{
++ printk(KERN_INFO "pds: PDS-mq CPU Scheduler 0.99o by Alfred Chen.\n");
++}
++
++/*
++ * This is the time all tasks within the same priority round robin.
++ * Value is in ms and set to a minimum of 6ms. Scales with number of cpus.
++ * Tunable via /proc interface.
++ */
++#define SCHED_DEFAULT_RR (4)
++int rr_interval __read_mostly = SCHED_DEFAULT_RR;
++
++static int __init rr_interval_set(char *str)
++{
++ u32 rr;
++
++ pr_info("rr_interval: ");
++ if (kstrtouint(str, 0, &rr)) {
++ pr_cont("using default of %u, unable to parse %s\n",
++ rr_interval, str);
++ return 1;
++ }
++
++ rr_interval = rr;
++ pr_cont("%d\n", rr_interval);
++
++ return 1;
++}
++__setup("rr_interval=", rr_interval_set);
++
++
++static const u64 sched_prio2deadline[NICE_WIDTH] = {
++/* -20 */ 6291456, 6920601, 7612661, 8373927, 9211319,
++/* -15 */ 10132450, 11145695, 12260264, 13486290, 14834919,
++/* -10 */ 16318410, 17950251, 19745276, 21719803, 23891783,
++/* -5 */ 26280961, 28909057, 31799962, 34979958, 38477953,
++/* 0 */ 42325748, 46558322, 51214154, 56335569, 61969125,
++/* 5 */ 68166037, 74982640, 82480904, 90728994, 99801893,
++/* 10 */ 109782082, 120760290, 132836319, 146119950, 160731945,
++/* 15 */ 176805139, 194485652, 213934217, 235327638, 258860401
++};
++
++/**
++ * sched_yield_type - Choose what sort of yield sched_yield will perform.
++ * 0: No yield.
++ * 1: Yield only to better priority/deadline tasks. (default)
++ * 2: Expire timeslice and recalculate deadline.
++ */
++int sched_yield_type __read_mostly = 1;
++
++/*
++ * The quota handed out to tasks of all priority levels when refilling their
++ * time_slice.
++ */
++static inline int timeslice(void)
++{
++ return MS_TO_US(rr_interval);
++}
++
++#ifdef CONFIG_SMP
++enum {
++SCHED_RQ_EMPTY = 0,
++SCHED_RQ_IDLE,
++SCHED_RQ_NORMAL_0,
++SCHED_RQ_NORMAL_1,
++SCHED_RQ_NORMAL_2,
++SCHED_RQ_NORMAL_3,
++SCHED_RQ_NORMAL_4,
++SCHED_RQ_NORMAL_5,
++SCHED_RQ_NORMAL_6,
++SCHED_RQ_NORMAL_7,
++SCHED_RQ_ISO,
++SCHED_RQ_RT,
++NR_SCHED_RQ_QUEUED_LEVEL
++};
++
++static cpumask_t sched_rq_queued_masks[NR_SCHED_RQ_QUEUED_LEVEL]
++____cacheline_aligned_in_smp;
++
++static DECLARE_BITMAP(sched_rq_queued_masks_bitmap, NR_SCHED_RQ_QUEUED_LEVEL)
++____cacheline_aligned_in_smp;
++
++static cpumask_t sched_rq_pending_masks[NR_SCHED_RQ_QUEUED_LEVEL]
++____cacheline_aligned_in_smp;
++
++static DECLARE_BITMAP(sched_rq_pending_masks_bitmap, NR_SCHED_RQ_QUEUED_LEVEL)
++____cacheline_aligned_in_smp;
++
++DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_CHK_LEVEL], sched_cpu_affinity_chk_masks);
++DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_start_mask);
++DEFINE_PER_CPU(cpumask_t *, sched_cpu_affinity_chk_end_masks);
++
++#ifdef CONFIG_SCHED_SMT
++DEFINE_PER_CPU(int, sched_sibling_cpu);
++DEFINE_STATIC_KEY_FALSE(sched_smt_present);
++EXPORT_SYMBOL_GPL(sched_smt_present);
++
++static cpumask_t sched_cpu_sg_idle_mask ____cacheline_aligned_in_smp;
++
++#ifdef CONFIG_SMT_NICE
++/*
++ * Preemptible sibling group mask
++ * Which all sibling cpus are running at PRIO_LIMIT or IDLE_PRIO
++ */
++static cpumask_t sched_cpu_psg_mask ____cacheline_aligned_in_smp;
++/*
++ * SMT supressed mask
++ * When a cpu is running task with NORMAL/ISO/RT policy, its sibling cpu
++ * will be supressed to run IDLE priority task.
++ */
++static cpumask_t sched_smt_supressed_mask ____cacheline_aligned_in_smp;
++#endif /* CONFIG_SMT_NICE */
++#endif
++
++static int sched_rq_prio[NR_CPUS] ____cacheline_aligned;
++
++/*
++ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
++ * the domain), this allows us to quickly tell if two cpus are in the same cache
++ * domain, see cpus_share_cache().
++ */
++DEFINE_PER_CPU(int, sd_llc_id);
++
++int __weak arch_sd_sibling_asym_packing(void)
++{
++ return 0*SD_ASYM_PACKING;
++}
++#else
++struct rq *uprq;
++#endif /* CONFIG_SMP */
++
++static DEFINE_MUTEX(sched_hotcpu_mutex);
++
++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next) do { } while (0)
++#endif
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch() do { } while (0)
++#endif
++
++/*
++ * Context: p->pi_lock
++ */
++static inline struct rq
++*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
++{
++ struct rq *rq;
++ for (;;) {
++ rq = task_rq(p);
++ if (p->on_cpu || task_on_rq_queued(p)) {
++ raw_spin_lock(&rq->lock);
++ if (likely((p->on_cpu || task_on_rq_queued(p))
++ && rq == task_rq(p))) {
++ *plock = &rq->lock;
++ return rq;
++ }
++ raw_spin_unlock(&rq->lock);
++ } else if (task_on_rq_migrating(p)) {
++ do {
++ cpu_relax();
++ } while (unlikely(task_on_rq_migrating(p)));
++ } else {
++ *plock = NULL;
++ return rq;
++ }
++ }
++}
++
++static inline void
++__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
++{
++ if (NULL != lock)
++ raw_spin_unlock(lock);
++}
++
++static inline struct rq
++*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock,
++ unsigned long *flags)
++{
++ struct rq *rq;
++ for (;;) {
++ rq = task_rq(p);
++ if (p->on_cpu || task_on_rq_queued(p)) {
++ raw_spin_lock_irqsave(&rq->lock, *flags);
++ if (likely((p->on_cpu || task_on_rq_queued(p))
++ && rq == task_rq(p))) {
++ *plock = &rq->lock;
++ return rq;
++ }
++ raw_spin_unlock_irqrestore(&rq->lock, *flags);
++ } else if (task_on_rq_migrating(p)) {
++ do {
++ cpu_relax();
++ } while (unlikely(task_on_rq_migrating(p)));
++ } else {
++ raw_spin_lock_irqsave(&p->pi_lock, *flags);
++ if (likely(!p->on_cpu && !p->on_rq &&
++ rq == task_rq(p))) {
++ *plock = &p->pi_lock;
++ return rq;
++ }
++ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++ }
++ }
++}
++
++static inline void
++task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock,
++ unsigned long *flags)
++{
++ raw_spin_unlock_irqrestore(lock, *flags);
++}
++
++/*
++ * __task_rq_lock - lock the rq @p resides on.
++ */
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++ lockdep_assert_held(&p->pi_lock);
++
++ for (;;) {
++ rq = task_rq(p);
++ raw_spin_lock(&rq->lock);
++ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
++ return rq;
++ raw_spin_unlock(&rq->lock);
++
++ while (unlikely(task_on_rq_migrating(p)))
++ cpu_relax();
++ }
++}
++
++/*
++ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
++ */
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++ __acquires(p->pi_lock)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++ for (;;) {
++ raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
++ rq = task_rq(p);
++ raw_spin_lock(&rq->lock);
++ /*
++ * move_queued_task() task_rq_lock()
++ *
++ * ACQUIRE (rq->lock)
++ * [S] ->on_rq = MIGRATING [L] rq = task_rq()
++ * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
++ * [S] ->cpu = new_cpu [L] task_rq()
++ * [L] ->on_rq
++ * RELEASE (rq->lock)
++ *
++ * If we observe the old CPU in task_rq_lock(), the acquire of
++ * the old rq->lock will fully serialize against the stores.
++ *
++ * If we observe the new CPU in task_rq_lock(), the address
++ * dependency headed by '[L] rq = task_rq()' and the acquire
++ * will pair with the WMB to ensure we then also see migrating.
++ */
++ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
++ return rq;
++ }
++ raw_spin_unlock(&rq->lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++
++ while (unlikely(task_on_rq_migrating(p)))
++ cpu_relax();
++ }
++}
++
++/*
++ * RQ-clock updating methods:
++ */
++
++static void update_rq_clock_task(struct rq *rq, s64 delta)
++{
++/*
++ * In theory, the compile should just see 0 here, and optimize out the call
++ * to sched_rt_avg_update. But I don't trust it...
++ */
++ s64 __maybe_unused steal = 0, irq_delta = 0;
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++ irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
++
++ /*
++ * Since irq_time is only updated on {soft,}irq_exit, we might run into
++ * this case when a previous update_rq_clock() happened inside a
++ * {soft,}irq region.
++ *
++ * When this happens, we stop ->clock_task and only update the
++ * prev_irq_time stamp to account for the part that fit, so that a next
++ * update will consume the rest. This ensures ->clock_task is
++ * monotonic.
++ *
++ * It does however cause some slight miss-attribution of {soft,}irq
++ * time, a more accurate solution would be to update the irq_time using
++ * the current rq->clock timestamp, except that would require using
++ * atomic ops.
++ */
++ if (irq_delta > delta)
++ irq_delta = delta;
++
++ rq->prev_irq_time += irq_delta;
++ delta -= irq_delta;
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++ if (static_key_false((¶virt_steal_rq_enabled))) {
++ steal = paravirt_steal_clock(cpu_of(rq));
++ steal -= rq->prev_steal_time_rq;
++
++ if (unlikely(steal > delta))
++ steal = delta;
++
++ rq->prev_steal_time_rq += steal;
++
++ delta -= steal;
++ }
++#endif
++
++ rq->clock_task += delta;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++ if ((irq_delta + steal))
++ update_irq_load_avg(rq, irq_delta + steal);
++#endif
++}
++
++static inline void update_rq_clock(struct rq *rq)
++{
++ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
++
++ if (unlikely(delta <= 0))
++ return;
++ rq->clock += delta;
++ update_rq_clock_task(rq, delta);
++}
++
++static inline void update_task_priodl(struct task_struct *p)
++{
++ p->priodl = (((u64) (p->prio))<<56) | ((p->deadline)>>8);
++}
++
++/*
++ * Deadline is "now" in niffies + (offset by priority). Setting the deadline
++ * is the key to everything. It distributes CPU fairly amongst tasks of the
++ * same nice value, it proportions CPU according to nice level, it means the
++ * task that last woke up the longest ago has the earliest deadline, thus
++ * ensuring that interactive tasks get low latency on wake up. The CPU
++ * proportion works out to the square of the virtual deadline difference, so
++ * this equation will give nice 19 3% CPU compared to nice 0.
++ */
++static inline u64 task_deadline_diff(const struct task_struct *p)
++{
++ return sched_prio2deadline[TASK_USER_PRIO(p)];
++}
++
++static inline u64 static_deadline_diff(int static_prio)
++{
++ return sched_prio2deadline[USER_PRIO(static_prio)];
++}
++
++/*
++ * The time_slice is only refilled when it is empty and that is when we set a
++ * new deadline for non-rt tasks.
++ */
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++ p->time_slice = timeslice();
++ if (p->prio >= NORMAL_PRIO)
++ p->deadline = rq->clock + task_deadline_diff(p);
++
++ update_task_priodl(p);
++}
++
++static inline struct task_struct *rq_first_queued_task(struct rq *rq)
++{
++ struct skiplist_node *node = rq->sl_header.next[0];
++
++ if (node == &rq->sl_header)
++ return rq->idle;
++
++ return skiplist_entry(node, struct task_struct, sl_node);
++}
++
++static inline struct task_struct *rq_second_queued_task(struct rq *rq)
++{
++ struct skiplist_node *node = rq->sl_header.next[0]->next[0];
++
++ if (node == &rq->sl_header)
++ return rq->idle;
++
++ return skiplist_entry(node, struct task_struct, sl_node);
++}
++
++static inline int is_second_in_rq(struct task_struct *p, struct rq *rq)
++{
++ return (p->sl_node.prev[0]->prev[0] == &rq->sl_header);
++}
++
++static const int task_dl_hash_tbl[] = {
++/* 0 4 8 12 */
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
++/* 16 20 24 28 */
++ 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 6, 7
++};
++
++static inline int
++task_deadline_level(const struct task_struct *p, const struct rq *rq)
++{
++ u64 delta = (rq->clock + sched_prio2deadline[39] - p->deadline) >> 23;
++
++ delta = min((size_t)delta, ARRAY_SIZE(task_dl_hash_tbl) - 1);
++ return task_dl_hash_tbl[delta];
++}
++
++/*
++ * cmpxchg based fetch_or, macro so it works for different integer types
++ */
++#define fetch_or(ptr, mask) \
++ ({ \
++ typeof(ptr) _ptr = (ptr); \
++ typeof(mask) _mask = (mask); \
++ typeof(*_ptr) _old, _val = *_ptr; \
++ \
++ for (;;) { \
++ _old = cmpxchg(_ptr, _val, _val | _mask); \
++ if (_old == _val) \
++ break; \
++ _val = _old; \
++ } \
++ _old; \
++})
++
++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
++/*
++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * this avoids any races wrt polling state changes and thereby avoids
++ * spurious IPIs.
++ */
++static bool set_nr_and_not_polling(struct task_struct *p)
++{
++ struct thread_info *ti = task_thread_info(p);
++ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++}
++
++/*
++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
++ *
++ * If this returns true, then the idle task promises to call
++ * sched_ttwu_pending() and reschedule soon.
++ */
++static bool set_nr_if_polling(struct task_struct *p)
++{
++ struct thread_info *ti = task_thread_info(p);
++ typeof(ti->flags) old, val = READ_ONCE(ti->flags);
++
++ for (;;) {
++ if (!(val & _TIF_POLLING_NRFLAG))
++ return false;
++ if (val & _TIF_NEED_RESCHED)
++ return true;
++ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
++ if (old == val)
++ break;
++ val = old;
++ }
++ return true;
++}
++
++#else
++static bool set_nr_and_not_polling(struct task_struct *p)
++{
++ set_tsk_need_resched(p);
++ return true;
++}
++
++#ifdef CONFIG_SMP
++static bool set_nr_if_polling(struct task_struct *p)
++{
++ return false;
++}
++#endif
++#endif
++
++#ifdef CONFIG_SMP
++#ifdef CONFIG_SMT_NICE
++static void resched_cpu_if_curr_is(int cpu, int priority)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ rcu_read_lock();
++
++ if (rcu_dereference(rq->curr)->prio != priority)
++ goto out;
++
++ if (set_nr_if_polling(rq->idle)) {
++ trace_sched_wake_idle_without_ipi(cpu);
++ } else {
++ if (!do_raw_spin_trylock(&rq->lock))
++ goto out;
++ spin_acquire(&rq->lock.dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_);
++
++ if (priority == rq->curr->prio)
++ smp_send_reschedule(cpu);
++ /* Else CPU is not idle, do nothing here */
++
++ spin_release(&rq->lock.dep_map, 1, _RET_IP_);
++ do_raw_spin_unlock(&rq->lock);
++ }
++
++out:
++ rcu_read_unlock();
++}
++#endif /* CONFIG_SMT_NICE */
++
++static inline bool
++__update_cpumasks_bitmap(int cpu, unsigned long *plevel, unsigned long level,
++ cpumask_t cpumasks[], unsigned long bitmap[])
++{
++ if (*plevel == level)
++ return false;
++
++ cpumask_clear_cpu(cpu, cpumasks + *plevel);
++ if (cpumask_empty(cpumasks + *plevel))
++ clear_bit(*plevel, bitmap);
++ cpumask_set_cpu(cpu, cpumasks + level);
++ set_bit(level, bitmap);
++
++ *plevel = level;
++
++ return true;
++}
++
++static inline int
++task_running_policy_level(const struct task_struct *p, const struct rq *rq)
++{
++ int prio = p->prio;
++
++ if (NORMAL_PRIO == prio)
++ return SCHED_RQ_NORMAL_0 + task_deadline_level(p, rq);
++
++ if (ISO_PRIO == prio)
++ return SCHED_RQ_ISO;
++ if (prio < MAX_RT_PRIO)
++ return SCHED_RQ_RT;
++ return PRIO_LIMIT - prio;
++}
++
++static inline void update_sched_rq_queued_masks_normal(struct rq *rq)
++{
++ struct task_struct *p = rq_first_queued_task(rq);
++
++ if (p->prio != NORMAL_PRIO)
++ return;
++
++ __update_cpumasks_bitmap(cpu_of(rq), &rq->queued_level,
++ task_running_policy_level(p, rq),
++ &sched_rq_queued_masks[0],
++ &sched_rq_queued_masks_bitmap[0]);
++}
++
++#ifdef CONFIG_SMT_NICE
++static inline void update_sched_cpu_psg_mask(const int cpu)
++{
++ cpumask_t tmp;
++
++ cpumask_or(&tmp, &sched_rq_queued_masks[SCHED_RQ_EMPTY],
++ &sched_rq_queued_masks[SCHED_RQ_IDLE]);
++ cpumask_and(&tmp, &tmp, cpu_smt_mask(cpu));
++ if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
++ cpumask_or(&sched_cpu_psg_mask, &sched_cpu_psg_mask,
++ cpu_smt_mask(cpu));
++ else
++ cpumask_andnot(&sched_cpu_psg_mask, &sched_cpu_psg_mask,
++ cpu_smt_mask(cpu));
++}
++#endif
++
++static inline void update_sched_rq_queued_masks(struct rq *rq)
++{
++ int cpu = cpu_of(rq);
++ struct task_struct *p = rq_first_queued_task(rq);
++ unsigned long level;
++#ifdef CONFIG_SCHED_SMT
++ unsigned long last_level = rq->queued_level;
++#endif
++
++ level = task_running_policy_level(p, rq);
++ sched_rq_prio[cpu] = p->prio;
++
++ if (!__update_cpumasks_bitmap(cpu, &rq->queued_level, level,
++ &sched_rq_queued_masks[0],
++ &sched_rq_queued_masks_bitmap[0]))
++ return;
++
++#ifdef CONFIG_SCHED_SMT
++ if (cpu == per_cpu(sched_sibling_cpu, cpu))
++ return;
++
++ if (SCHED_RQ_EMPTY == last_level) {
++ cpumask_andnot(&sched_cpu_sg_idle_mask, &sched_cpu_sg_idle_mask,
++ cpu_smt_mask(cpu));
++ } else if (SCHED_RQ_EMPTY == level) {
++ cpumask_t tmp;
++
++ cpumask_and(&tmp, cpu_smt_mask(cpu),
++ &sched_rq_queued_masks[SCHED_RQ_EMPTY]);
++ if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
++ cpumask_or(&sched_cpu_sg_idle_mask, cpu_smt_mask(cpu),
++ &sched_cpu_sg_idle_mask);
++ }
++
++#ifdef CONFIG_SMT_NICE
++ if (level <= SCHED_RQ_IDLE && last_level > SCHED_RQ_IDLE) {
++ cpumask_clear_cpu(per_cpu(sched_sibling_cpu, cpu),
++ &sched_smt_supressed_mask);
++ update_sched_cpu_psg_mask(cpu);
++ resched_cpu_if_curr_is(per_cpu(sched_sibling_cpu, cpu), PRIO_LIMIT);
++ } else if (last_level <= SCHED_RQ_IDLE && level > SCHED_RQ_IDLE) {
++ cpumask_set_cpu(per_cpu(sched_sibling_cpu, cpu),
++ &sched_smt_supressed_mask);
++ update_sched_cpu_psg_mask(cpu);
++ resched_cpu_if_curr_is(per_cpu(sched_sibling_cpu, cpu), IDLE_PRIO);
++ }
++#endif /* CONFIG_SMT_NICE */
++#endif
++}
++
++static inline void update_sched_rq_pending_masks(struct rq *rq)
++{
++ unsigned long level;
++ struct task_struct *p = rq_second_queued_task(rq);
++
++ level = task_running_policy_level(p, rq);
++
++ __update_cpumasks_bitmap(cpu_of(rq), &rq->pending_level, level,
++ &sched_rq_pending_masks[0],
++ &sched_rq_pending_masks_bitmap[0]);
++}
++
++#else /* CONFIG_SMP */
++static inline void update_sched_rq_queued_masks(struct rq *rq) {}
++static inline void update_sched_rq_queued_masks_normal(struct rq *rq) {}
++static inline void update_sched_rq_pending_masks(struct rq *rq) {}
++#endif
++
++#ifdef CONFIG_NO_HZ_FULL
++/*
++ * Tick may be needed by tasks in the runqueue depending on their policy and
++ * requirements. If tick is needed, lets send the target an IPI to kick it out
++ * of nohz mode if necessary.
++ */
++static inline void sched_update_tick_dependency(struct rq *rq)
++{
++ int cpu;
++
++ if (!tick_nohz_full_enabled())
++ return;
++
++ cpu = cpu_of(rq);
++
++ if (!tick_nohz_full_cpu(cpu))
++ return;
++
++ if (rq->nr_running < 2)
++ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
++ else
++ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_update_tick_dependency(struct rq *rq) { }
++#endif
++
++/*
++ * Removing from the runqueue. Deleting a task from the skip list is done
++ * via the stored node reference in the task struct and does not require a full
++ * look up. Thus it occurs in O(k) time where k is the "level" of the list the
++ * task was stored at - usually < 4, max 16.
++ *
++ * Context: rq->lock
++ */
++static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++ lockdep_assert_held(&rq->lock);
++
++ WARN_ONCE(task_rq(p) != rq, "pds: dequeue task reside on cpu%d from cpu%d\n",
++ task_cpu(p), cpu_of(rq));
++ if (skiplist_del_init(&rq->sl_header, &p->sl_node)) {
++ update_sched_rq_queued_masks(rq);
++ update_sched_rq_pending_masks(rq);
++ } else if (is_second_in_rq(p, rq))
++ update_sched_rq_pending_masks(rq);
++ rq->nr_running--;
++
++ sched_update_tick_dependency(rq);
++ psi_dequeue(p, flags & DEQUEUE_SLEEP);
++
++ sched_info_dequeued(rq, p);
++}
++
++/*
++ * To determine if it's safe for a task of SCHED_IDLE to actually run as
++ * an idle task, we ensure none of the following conditions are met.
++ */
++static inline bool idleprio_suitable(struct task_struct *p)
++{
++ return (!freezing(p) && !signal_pending(p) &&
++ !(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING)));
++}
++
++/*
++ * pds_skiplist_random_level -- Returns a pseudo-random level number for skip
++ * list node which is used in PDS run queue.
++ *
++ * In current implementation, based on testing, the first 8 bits in microseconds
++ * of niffies are suitable for random level population.
++ * find_first_bit() is used to satisfy p = 0.5 between each levels, and there
++ * should be platform hardware supported instruction(known as ctz/clz) to speed
++ * up this function.
++ * The skiplist level for a task is populated when task is created and doesn't
++ * change in task's life time. When task is being inserted into run queue, this
++ * skiplist level is set to task's sl_node->level, the skiplist insert function
++ * may change it based on current level of the skip lsit.
++ */
++static inline int pds_skiplist_random_level(const struct task_struct *p)
++{
++ long unsigned int randseed;
++
++ /*
++ * 1. Some architectures don't have better than microsecond resolution
++ * so mask out ~microseconds as a factor of the random seed for skiplist
++ * insertion.
++ * 2. Use address of task structure pointer as another factor of the
++ * random seed for task burst forking scenario.
++ */
++ randseed = (task_rq(p)->clock ^ (long unsigned int)p) >> 10;
++
++ return find_first_bit(&randseed, NUM_SKIPLIST_LEVEL - 1);
++}
++
++/**
++ * pds_skiplist_task_search -- search function used in PDS run queue skip list
++ * node insert operation.
++ * @it: iterator pointer to the node in the skip list
++ * @node: pointer to the skiplist_node to be inserted
++ *
++ * Returns true if key of @it is less or equal to key value of @node, otherwise
++ * false.
++ */
++static inline bool
++pds_skiplist_task_search(struct skiplist_node *it, struct skiplist_node *node)
++{
++ return (skiplist_entry(it, struct task_struct, sl_node)->priodl <=
++ skiplist_entry(node, struct task_struct, sl_node)->priodl);
++}
++
++/*
++ * Define the skip list insert function for PDS
++ */
++DEFINE_SKIPLIST_INSERT_FUNC(pds_skiplist_insert, pds_skiplist_task_search);
++
++/*
++ * Adding task to the runqueue.
++ *
++ * Context: rq->lock
++ */
++static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++ lockdep_assert_held(&rq->lock);
++
++ WARN_ONCE(task_rq(p) != rq, "pds: enqueue task reside on cpu%d to cpu%d\n",
++ task_cpu(p), cpu_of(rq));
++
++ p->sl_node.level = p->sl_level;
++ if (pds_skiplist_insert(&rq->sl_header, &p->sl_node)) {
++ update_sched_rq_queued_masks(rq);
++ update_sched_rq_pending_masks(rq);
++ } else if (is_second_in_rq(p, rq))
++ update_sched_rq_pending_masks(rq);
++ rq->nr_running++;
++
++ sched_update_tick_dependency(rq);
++
++ sched_info_queued(rq, p);
++ psi_enqueue(p, flags);
++
++ /*
++ * If in_iowait is set, the code below may not trigger any cpufreq
++ * utilization updates, so do it here explicitly with the IOWAIT flag
++ * passed.
++ */
++ if (p->in_iowait)
++ cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_IOWAIT);
++}
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq)
++{
++ bool b_first, b_second;
++
++ lockdep_assert_held(&rq->lock);
++
++ WARN_ONCE(task_rq(p) != rq, "pds: cpu[%d] requeue task reside on cpu%d\n",
++ cpu_of(rq), task_cpu(p));
++
++ b_first = skiplist_del_init(&rq->sl_header, &p->sl_node);
++ b_second = is_second_in_rq(p, rq);
++
++ p->sl_node.level = p->sl_level;
++ if (pds_skiplist_insert(&rq->sl_header, &p->sl_node) || b_first) {
++ update_sched_rq_queued_masks(rq);
++ update_sched_rq_pending_masks(rq);
++ } else if (is_second_in_rq(p, rq) || b_second)
++ update_sched_rq_pending_masks(rq);
++}
++
++/*
++ * resched_curr - mark rq's current task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++void resched_curr(struct rq *rq)
++{
++ struct task_struct *curr = rq->curr;
++ int cpu;
++
++ lockdep_assert_held(&rq->lock);
++
++ if (test_tsk_need_resched(curr))
++ return;
++
++ cpu = cpu_of(rq);
++ if (cpu == smp_processor_id()) {
++ set_tsk_need_resched(curr);
++ set_preempt_need_resched();
++ return;
++ }
++
++ if (set_nr_and_not_polling(curr))
++ smp_send_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++}
++
++static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
++{
++ struct task_struct *curr = rq->curr;
++
++ if (curr->prio == PRIO_LIMIT)
++ resched_curr(rq);
++
++ if (task_running_idle(p))
++ return;
++
++ if (p->priodl < curr->priodl)
++ resched_curr(rq);
++}
++
++#ifdef CONFIG_SCHED_HRTICK
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ */
++
++static void hrtick_clear(struct rq *rq)
++{
++ if (hrtimer_active(&rq->hrtick_timer))
++ hrtimer_cancel(&rq->hrtick_timer);
++}
++
++/*
++ * High-resolution timer tick.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrtick(struct hrtimer *timer)
++{
++ struct rq *rq = container_of(timer, struct rq, hrtick_timer);
++ struct task_struct *p;
++
++ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
++
++ raw_spin_lock(&rq->lock);
++ p = rq->curr;
++ p->time_slice = 0;
++ resched_curr(rq);
++ raw_spin_unlock(&rq->lock);
++
++ return HRTIMER_NORESTART;
++}
++
++/*
++ * Use hrtick when:
++ * - enabled by features
++ * - hrtimer is actually high res
++ */
++static inline int hrtick_enabled(struct rq *rq)
++{
++ /**
++ * PDS doesn't support sched_feat yet
++ if (!sched_feat(HRTICK))
++ return 0;
++ */
++ if (!cpu_active(cpu_of(rq)))
++ return 0;
++ return hrtimer_is_hres_active(&rq->hrtick_timer);
++}
++
++#ifdef CONFIG_SMP
++
++static void __hrtick_restart(struct rq *rq)
++{
++ struct hrtimer *timer = &rq->hrtick_timer;
++
++ hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
++}
++
++/*
++ * called from hardirq (IPI) context
++ */
++static void __hrtick_start(void *arg)
++{
++ struct rq *rq = arg;
++
++ raw_spin_lock(&rq->lock);
++ __hrtick_restart(rq);
++ rq->hrtick_csd_pending = 0;
++ raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++ struct hrtimer *timer = &rq->hrtick_timer;
++ ktime_t time;
++ s64 delta;
++
++ /*
++ * Don't schedule slices shorter than 10000ns, that just
++ * doesn't make sense and can cause timer DoS.
++ */
++ delta = max_t(s64, delay, 10000LL);
++ time = ktime_add_ns(timer->base->get_time(), delta);
++
++ hrtimer_set_expires(timer, time);
++
++ if (rq == this_rq()) {
++ __hrtick_restart(rq);
++ } else if (!rq->hrtick_csd_pending) {
++ smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
++ rq->hrtick_csd_pending = 1;
++ }
++}
++
++#else
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++ /*
++ * Don't schedule slices shorter than 10000ns, that just
++ * doesn't make sense. Rely on vruntime for fairness.
++ */
++ delay = max_t(u64, delay, 10000LL);
++ hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
++ HRTIMER_MODE_REL_PINNED);
++}
++#endif /* CONFIG_SMP */
++
++static void hrtick_rq_init(struct rq *rq)
++{
++#ifdef CONFIG_SMP
++ rq->hrtick_csd_pending = 0;
++
++ rq->hrtick_csd.flags = 0;
++ rq->hrtick_csd.func = __hrtick_start;
++ rq->hrtick_csd.info = rq;
++#endif
++
++ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ rq->hrtick_timer.function = hrtick;
++}
++
++static inline int rq_dither(struct rq *rq)
++{
++ if ((rq->clock - rq->last_tick > HALF_JIFFY_NS) || hrtick_enabled(rq))
++ return 0;
++
++ return HALF_JIFFY_NS;
++}
++
++#else /* CONFIG_SCHED_HRTICK */
++static inline int hrtick_enabled(struct rq *rq)
++{
++ return 0;
++}
++
++static inline void hrtick_clear(struct rq *rq)
++{
++}
++
++static inline void hrtick_rq_init(struct rq *rq)
++{
++}
++
++static inline int rq_dither(struct rq *rq)
++{
++ return (rq->clock - rq->last_tick > HALF_JIFFY_NS)? 0:HALF_JIFFY_NS;
++}
++#endif /* CONFIG_SCHED_HRTICK */
++
++static inline int normal_prio(struct task_struct *p)
++{
++ static const int policy_to_prio[] = {
++ NORMAL_PRIO, /* SCHED_NORMAL */
++ 0, /* SCHED_FIFO */
++ 0, /* SCHED_RR */
++ IDLE_PRIO, /* SCHED_BATCH */
++ ISO_PRIO, /* SCHED_ISO */
++ IDLE_PRIO /* SCHED_IDLE */
++ };
++
++ if (task_has_rt_policy(p))
++ return MAX_RT_PRIO - 1 - p->rt_priority;
++ return policy_to_prio[p->policy];
++}
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks as it will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++ p->normal_prio = normal_prio(p);
++ /*
++ * If we are RT tasks or we were boosted to RT priority,
++ * keep the priority unchanged. Otherwise, update priority
++ * to the normal priority:
++ */
++ if (!rt_prio(p->prio))
++ return p->normal_prio;
++ return p->prio;
++}
++
++/*
++ * activate_task - move a task to the runqueue.
++ *
++ * Context: rq->lock
++ */
++static void activate_task(struct task_struct *p, struct rq *rq)
++{
++ if (task_contributes_to_load(p))
++ rq->nr_uninterruptible--;
++ enqueue_task(p, rq, ENQUEUE_WAKEUP);
++ p->on_rq = 1;
++ cpufreq_update_this_cpu(rq, 0);
++}
++
++/*
++ * deactivate_task - remove a task from the runqueue.
++ *
++ * Context: rq->lock
++ */
++static inline void deactivate_task(struct task_struct *p, struct rq *rq)
++{
++ if (task_contributes_to_load(p))
++ rq->nr_uninterruptible++;
++ dequeue_task(p, rq, DEQUEUE_SLEEP);
++ p->on_rq = 0;
++ cpufreq_update_this_cpu(rq, 0);
++}
++
++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
++{
++#ifdef CONFIG_SMP
++ /*
++ * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
++ * successfully executed on another CPU. We must ensure that updates of
++ * per-task data have been completed by this moment.
++ */
++ smp_wmb();
++
++#ifdef CONFIG_THREAD_INFO_IN_TASK
++ WRITE_ONCE(p->cpu, cpu);
++#else
++ WRITE_ONCE(task_thread_info(p)->cpu, cpu);
++#endif
++#endif
++}
++
++#ifdef CONFIG_SMP
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++#ifdef CONFIG_SCHED_DEBUG
++ /*
++ * We should never call set_task_cpu() on a blocked task,
++ * ttwu() will sort out the placement.
++ */
++ WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
++ !p->on_rq);
++#ifdef CONFIG_LOCKDEP
++ /*
++ * The caller should hold either p->pi_lock or rq->lock, when changing
++ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
++ *
++ * sched_move_task() holds both and thus holding either pins the cgroup,
++ * see task_group().
++ */
++ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
++ lockdep_is_held(&task_rq(p)->lock)));
++#endif
++ /*
++ * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
++ */
++ WARN_ON_ONCE(!cpu_online(new_cpu));
++#endif
++ if (task_cpu(p) == new_cpu)
++ return;
++ trace_sched_migrate_task(p, new_cpu);
++ rseq_migrate(p);
++ perf_event_task_migrate(p);
++
++ __set_task_cpu(p, new_cpu);
++}
++
++static inline bool is_per_cpu_kthread(struct task_struct *p)
++{
++ return ((p->flags & PF_KTHREAD) && (1 == p->nr_cpus_allowed));
++}
++
++/*
++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
++ * __set_cpus_allowed_ptr() and select_fallback_rq().
++ */
++static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
++{
++ if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
++ return false;
++
++ if (is_per_cpu_kthread(p))
++ return cpu_online(cpu);
++
++ return cpu_active(cpu);
++}
++
++/*
++ * This is how migration works:
++ *
++ * 1) we invoke migration_cpu_stop() on the target CPU using
++ * stop_one_cpu().
++ * 2) stopper starts to run (implicitly forcing the migrated thread
++ * off the CPU)
++ * 3) it checks whether the migrated task is still in the wrong runqueue.
++ * 4) if it's in the wrong runqueue then the migration thread removes
++ * it and puts it into the right queue.
++ * 5) stopper completes and stop_one_cpu() returns and the migration
++ * is done.
++ */
++
++/*
++ * detach_task() -- detach the task for the migration specified in @target_cpu
++ */
++static void detach_task(struct rq *rq, struct task_struct *p, int target_cpu)
++{
++ lockdep_assert_held(&rq->lock);
++
++ WRITE_ONCE(p->on_rq ,TASK_ON_RQ_MIGRATING);
++ if (task_contributes_to_load(p))
++ rq->nr_uninterruptible++;
++ dequeue_task(p, rq, 0);
++
++ set_task_cpu(p, target_cpu);
++}
++
++/*
++ * attach_task() -- attach the task detached by detach_task() to its new rq.
++ */
++static void attach_task(struct rq *rq, struct task_struct *p)
++{
++ lockdep_assert_held(&rq->lock);
++
++ BUG_ON(task_rq(p) != rq);
++
++ if (task_contributes_to_load(p))
++ rq->nr_uninterruptible--;
++ enqueue_task(p, rq, 0);
++ p->on_rq = TASK_ON_RQ_QUEUED;
++ cpufreq_update_this_cpu(rq, 0);
++}
++
++/*
++ * move_queued_task - move a queued task to new rq.
++ *
++ * Returns (locked) new rq. Old rq's lock is released.
++ */
++static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
++ new_cpu)
++{
++ detach_task(rq, p, new_cpu);
++ raw_spin_unlock(&rq->lock);
++
++ rq = cpu_rq(new_cpu);
++
++ raw_spin_lock(&rq->lock);
++ update_rq_clock(rq);
++
++ attach_task(rq, p);
++
++ check_preempt_curr(rq, p);
++
++ return rq;
++}
++
++struct migration_arg {
++ struct task_struct *task;
++ int dest_cpu;
++};
++
++/*
++ * Move (not current) task off this CPU, onto the destination CPU. We're doing
++ * this because either it can't run here any more (set_cpus_allowed()
++ * away from this CPU, or CPU going down), or because we're
++ * attempting to rebalance this task on exec (sched_exec).
++ *
++ * So we race with normal scheduler movements, but that's OK, as long
++ * as the task is no longer on this CPU.
++ */
++static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int
++ dest_cpu)
++{
++ /* Affinity changed (again). */
++ if (!is_cpu_allowed(p, dest_cpu))
++ return rq;
++
++ update_rq_clock(rq);
++ return move_queued_task(rq, p, dest_cpu);
++}
++
++/*
++ * migration_cpu_stop - this will be executed by a highprio stopper thread
++ * and performs thread migration by bumping thread off CPU then
++ * 'pushing' onto another runqueue.
++ */
++static int migration_cpu_stop(void *data)
++{
++ struct migration_arg *arg = data;
++ struct task_struct *p = arg->task;
++ struct rq *rq = this_rq();
++
++ /*
++ * The original target CPU might have gone down and we might
++ * be on another CPU but it doesn't matter.
++ */
++ local_irq_disable();
++
++ raw_spin_lock(&p->pi_lock);
++ raw_spin_lock(&rq->lock);
++ /*
++ * If task_rq(p) != rq, it cannot be migrated here, because we're
++ * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
++ * we're holding p->pi_lock.
++ */
++ if (task_rq(p) == rq)
++ if (task_on_rq_queued(p))
++ rq = __migrate_task(rq, p, arg->dest_cpu);
++ raw_spin_unlock(&rq->lock);
++ raw_spin_unlock(&p->pi_lock);
++
++ local_irq_enable();
++ return 0;
++}
++
++static inline void
++set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
++{
++ cpumask_copy(&p->cpus_allowed, new_mask);
++ p->nr_cpus_allowed = cpumask_weight(new_mask);
++}
++
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++ set_cpus_allowed_common(p, new_mask);
++}
++#endif
++
++/* Enter with rq lock held. We know p is on the local CPU */
++static inline void __set_tsk_resched(struct task_struct *p)
++{
++ set_tsk_need_resched(p);
++ set_preempt_need_resched();
++}
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ *
++ * Return: 1 if the task is currently executing. 0 otherwise.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++ return cpu_curr(task_cpu(p)) == p;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * If @match_state is nonzero, it's the @p->state value just checked and
++ * not expected to change. If it changes, i.e. @p might have woken up,
++ * then return zero. When we succeed in waiting for @p to be off its CPU,
++ * we return a positive number (its total switch count). If a second call
++ * a short while later returns the same number, the caller can be sure that
++ * @p has remained unscheduled the whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, long match_state)
++{
++ unsigned long flags;
++ bool running, on_rq;
++ unsigned long ncsw;
++ struct rq *rq;
++ raw_spinlock_t *lock;
++
++ for (;;) {
++ rq = task_rq(p);
++
++ /*
++ * If the task is actively running on another CPU
++ * still, just relax and busy-wait without holding
++ * any locks.
++ *
++ * NOTE! Since we don't hold any locks, it's not
++ * even sure that "rq" stays as the right runqueue!
++ * But we don't care, since this will return false
++ * if the runqueue has changed and p is actually now
++ * running somewhere else!
++ */
++ while (task_running(p) && p == rq->curr) {
++ if (match_state && unlikely(p->state != match_state))
++ return 0;
++ cpu_relax();
++ }
++
++ /*
++ * Ok, time to look more closely! We need the rq
++ * lock now, to be *sure*. If we're wrong, we'll
++ * just go back and repeat.
++ */
++ task_access_lock_irqsave(p, &lock, &flags);
++ trace_sched_wait_task(p);
++ running = task_running(p);
++ on_rq = p->on_rq;
++ ncsw = 0;
++ if (!match_state || p->state == match_state)
++ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
++ task_access_unlock_irqrestore(p, lock, &flags);
++
++ /*
++ * If it changed from the expected state, bail out now.
++ */
++ if (unlikely(!ncsw))
++ break;
++
++ /*
++ * Was it really running after all now that we
++ * checked with the proper locks actually held?
++ *
++ * Oops. Go back and try again..
++ */
++ if (unlikely(running)) {
++ cpu_relax();
++ continue;
++ }
++
++ /*
++ * It's not enough that it's not actively running,
++ * it must be off the runqueue _entirely_, and not
++ * preempted!
++ *
++ * So if it was still runnable (but just not actively
++ * running right now), it's preempted, and we should
++ * yield - it could be a while.
++ */
++ if (unlikely(on_rq)) {
++ ktime_t to = NSEC_PER_SEC / HZ;
++
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_hrtimeout(&to, HRTIMER_MODE_REL);
++ continue;
++ }
++
++ /*
++ * Ahh, all good. It wasn't running, and it wasn't
++ * runnable, which means that it will never become
++ * running in the future either. We're all done!
++ */
++ break;
++ }
++
++ return ncsw;
++}
++
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesn't have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++ int cpu;
++
++ preempt_disable();
++ cpu = task_cpu(p);
++ if ((cpu != smp_processor_id()) && task_curr(p))
++ smp_send_reschedule(cpu);
++ preempt_enable();
++}
++EXPORT_SYMBOL_GPL(kick_process);
++
++/*
++ * ->cpus_allowed is protected by both rq->lock and p->pi_lock
++ *
++ * A few notes on cpu_active vs cpu_online:
++ *
++ * - cpu_active must be a subset of cpu_online
++ *
++ * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
++ * see __set_cpus_allowed_ptr(). At this point the newly online
++ * CPU isn't yet part of the sched domains, and balancing will not
++ * see it.
++ *
++ * - on cpu-down we clear cpu_active() to mask the sched domains and
++ * avoid the load balancer to place new tasks on the to be removed
++ * CPU. Existing tasks will remain running there and will be taken
++ * off.
++ *
++ * This means that fallback selection must not select !active CPUs.
++ * And can assume that any active CPU must be online. Conversely
++ * select_task_rq() below may allow selection of !active CPUs in order
++ * to satisfy the above rules.
++ */
++static int select_fallback_rq(int cpu, struct task_struct *p)
++{
++ int nid = cpu_to_node(cpu);
++ const struct cpumask *nodemask = NULL;
++ enum { cpuset, possible, fail } state = cpuset;
++ int dest_cpu;
++
++ /*
++ * If the node that the CPU is on has been offlined, cpu_to_node()
++ * will return -1. There is no CPU on the node, and we should
++ * select the CPU on the other node.
++ */
++ if (nid != -1) {
++ nodemask = cpumask_of_node(nid);
++
++ /* Look for allowed, online CPU in same node. */
++ for_each_cpu(dest_cpu, nodemask) {
++ if (!cpu_active(dest_cpu))
++ continue;
++ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
++ return dest_cpu;
++ }
++ }
++
++ for (;;) {
++ /* Any allowed, online CPU? */
++ for_each_cpu(dest_cpu, &p->cpus_allowed) {
++ if (!is_cpu_allowed(p, dest_cpu))
++ continue;
++ goto out;
++ }
++
++ /* No more Mr. Nice Guy. */
++ switch (state) {
++ case cpuset:
++ if (IS_ENABLED(CONFIG_CPUSETS)) {
++ cpuset_cpus_allowed_fallback(p);
++ state = possible;
++ break;
++ }
++ /* Fall-through */
++ case possible:
++ do_set_cpus_allowed(p, cpu_possible_mask);
++ state = fail;
++ break;
++
++ case fail:
++ BUG();
++ break;
++ }
++ }
++
++out:
++ if (state != cpuset) {
++ /*
++ * Don't tell them about moving exiting tasks or
++ * kernel threads (both mm NULL), since they never
++ * leave kernel.
++ */
++ if (p->mm && printk_ratelimit()) {
++ printk_deferred("process %d (%s) no longer affine to cpu%d\n",
++ task_pid_nr(p), p->comm, cpu);
++ }
++ }
++
++ return dest_cpu;
++}
++
++static inline int best_mask_cpu(int cpu, cpumask_t *cpumask)
++{
++ cpumask_t *mask;
++
++ if (cpumask_test_cpu(cpu, cpumask))
++ return cpu;
++
++ mask = &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[0]);
++ while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
++ mask++;
++
++ return cpu;
++}
++
++/*
++ * task_preemptible_rq - return the rq which the given task can preempt on
++ * @p: task wants to preempt CPU
++ * @only_preempt_low_policy: indicate only preempt rq running low policy than @p
++ */
++static inline int
++task_preemptible_rq_idle(struct task_struct *p, cpumask_t *chk_mask)
++{
++ cpumask_t tmp;
++
++#ifdef CONFIG_SCHED_SMT
++ if (cpumask_and(&tmp, chk_mask, &sched_cpu_sg_idle_mask))
++ return best_mask_cpu(task_cpu(p), &tmp);
++#endif
++
++#ifdef CONFIG_SMT_NICE
++ /* Only ttwu on cpu which is not smt supressed */
++ if (cpumask_andnot(&tmp, chk_mask, &sched_smt_supressed_mask)) {
++ cpumask_t t;
++ if (cpumask_and(&t, &tmp, &sched_rq_queued_masks[SCHED_RQ_EMPTY]))
++ return best_mask_cpu(task_cpu(p), &t);
++ return best_mask_cpu(task_cpu(p), &tmp);
++ }
++#endif
++
++ if (cpumask_and(&tmp, chk_mask, &sched_rq_queued_masks[SCHED_RQ_EMPTY]))
++ return best_mask_cpu(task_cpu(p), &tmp);
++ return best_mask_cpu(task_cpu(p), chk_mask);
++}
++
++static inline int
++task_preemptible_rq(struct task_struct *p, cpumask_t *chk_mask,
++ int preempt_level)
++{
++ cpumask_t tmp;
++ int level;
++
++#ifdef CONFIG_SCHED_SMT
++#ifdef CONFIG_SMT_NICE
++ if (cpumask_and(&tmp, chk_mask, &sched_cpu_psg_mask))
++ return best_mask_cpu(task_cpu(p), &tmp);
++#else
++ if (cpumask_and(&tmp, chk_mask, &sched_cpu_sg_idle_mask))
++ return best_mask_cpu(task_cpu(p), &tmp);
++#endif
++#endif
++
++ level = find_first_bit(sched_rq_queued_masks_bitmap,
++ NR_SCHED_RQ_QUEUED_LEVEL);
++
++ while (level < preempt_level) {
++ if (cpumask_and(&tmp, chk_mask, &sched_rq_queued_masks[level]))
++ return best_mask_cpu(task_cpu(p), &tmp);
++
++ level = find_next_bit(sched_rq_queued_masks_bitmap,
++ NR_SCHED_RQ_QUEUED_LEVEL,
++ level + 1);
++ }
++
++ if (unlikely(SCHED_RQ_RT == level &&
++ level == preempt_level &&
++ cpumask_and(&tmp, chk_mask,
++ &sched_rq_queued_masks[SCHED_RQ_RT]))) {
++ unsigned int cpu;
++
++ for_each_cpu (cpu, &tmp)
++ if (p->prio < sched_rq_prio[cpu])
++ return cpu;
++ }
++
++ return best_mask_cpu(task_cpu(p), chk_mask);
++}
++
++/*
++ * wake flags
++ */
++#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
++#define WF_FORK 0x02 /* child wakeup after fork */
++#define WF_MIGRATED 0x04 /* internal use, task got migrated */
++
++static inline int select_task_rq(struct task_struct *p)
++{
++ cpumask_t chk_mask;
++
++ if (unlikely(!cpumask_and(&chk_mask, &p->cpus_allowed, cpu_online_mask)))
++ return select_fallback_rq(task_cpu(p), p);
++
++ /* Check IDLE tasks suitable to run normal priority */
++ if (idleprio_task(p)) {
++ if (idleprio_suitable(p)) {
++ p->prio = p->normal_prio;
++ update_task_priodl(p);
++ return task_preemptible_rq_idle(p, &chk_mask);
++ }
++ p->prio = NORMAL_PRIO;
++ update_task_priodl(p);
++ }
++
++ return task_preemptible_rq(p, &chk_mask,
++ task_running_policy_level(p, this_rq()));
++}
++#else /* CONFIG_SMP */
++static inline int select_task_rq(struct task_struct *p)
++{
++ return 0;
++}
++#endif /* CONFIG_SMP */
++
++static void
++ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
++{
++ struct rq *rq;
++
++ if (!schedstat_enabled())
++ return;
++
++ rq= this_rq();
++
++#ifdef CONFIG_SMP
++ if (cpu == rq->cpu)
++ __schedstat_inc(rq->ttwu_local);
++ else {
++ /** PDS ToDo:
++ * How to do ttwu_wake_remote
++ */
++ }
++#endif /* CONFIG_SMP */
++
++ __schedstat_inc(rq->ttwu_count);
++}
++
++/*
++ * Mark the task runnable and perform wakeup-preemption.
++ */
++static inline void
++ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++ p->state = TASK_RUNNING;
++ trace_sched_wakeup(p);
++}
++
++static inline void
++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++#ifdef CONFIG_SMP
++ if (p->sched_contributes_to_load)
++ rq->nr_uninterruptible--;
++#endif
++
++ activate_task(p, rq);
++ ttwu_do_wakeup(rq, p, 0);
++}
++
++static int ttwu_remote(struct task_struct *p, int wake_flags)
++{
++ struct rq *rq;
++ raw_spinlock_t *lock;
++ int ret = 0;
++
++ rq = __task_access_lock(p, &lock);
++ if (task_on_rq_queued(p)) {
++ ttwu_do_wakeup(rq, p, wake_flags);
++ ret = 1;
++ }
++ __task_access_unlock(p, lock);
++
++ return ret;
++}
++
++/*
++ * Notes on Program-Order guarantees on SMP systems.
++ *
++ * MIGRATION
++ *
++ * The basic program-order guarantee on SMP systems is that when a task [t]
++ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
++ * execution on its new CPU [c1].
++ *
++ * For migration (of runnable tasks) this is provided by the following means:
++ *
++ * A) UNLOCK of the rq(c0)->lock scheduling out task t
++ * B) migration for t is required to synchronize *both* rq(c0)->lock and
++ * rq(c1)->lock (if not at the same time, then in that order).
++ * C) LOCK of the rq(c1)->lock scheduling in task
++ *
++ * Transitivity guarantees that B happens after A and C after B.
++ * Note: we only require RCpc transitivity.
++ * Note: the CPU doing B need not be c0 or c1
++ *
++ * Example:
++ *
++ * CPU0 CPU1 CPU2
++ *
++ * LOCK rq(0)->lock
++ * sched-out X
++ * sched-in Y
++ * UNLOCK rq(0)->lock
++ *
++ * LOCK rq(0)->lock // orders against CPU0
++ * dequeue X
++ * UNLOCK rq(0)->lock
++ *
++ * LOCK rq(1)->lock
++ * enqueue X
++ * UNLOCK rq(1)->lock
++ *
++ * LOCK rq(1)->lock // orders against CPU2
++ * sched-out Z
++ * sched-in X
++ * UNLOCK rq(1)->lock
++ *
++ *
++ * BLOCKING -- aka. SLEEP + WAKEUP
++ *
++ * For blocking we (obviously) need to provide the same guarantee as for
++ * migration. However the means are completely different as there is no lock
++ * chain to provide order. Instead we do:
++ *
++ * 1) smp_store_release(X->on_cpu, 0)
++ * 2) smp_cond_load_acquire(!X->on_cpu)
++ *
++ * Example:
++ *
++ * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
++ *
++ * LOCK rq(0)->lock LOCK X->pi_lock
++ * dequeue X
++ * sched-out X
++ * smp_store_release(X->on_cpu, 0);
++ *
++ * smp_cond_load_acquire(&X->on_cpu, !VAL);
++ * X->state = WAKING
++ * set_task_cpu(X,2)
++ *
++ * LOCK rq(2)->lock
++ * enqueue X
++ * X->state = RUNNING
++ * UNLOCK rq(2)->lock
++ *
++ * LOCK rq(2)->lock // orders against CPU1
++ * sched-out Z
++ * sched-in X
++ * UNLOCK rq(2)->lock
++ *
++ * UNLOCK X->pi_lock
++ * UNLOCK rq(0)->lock
++ *
++ *
++ * However; for wakeups there is a second guarantee we must provide, namely we
++ * must observe the state that lead to our wakeup. That is, not only must our
++ * task observe its own prior state, it must also observe the stores prior to
++ * its wakeup.
++ *
++ * This means that any means of doing remote wakeups must order the CPU doing
++ * the wakeup against the CPU the task is going to end up running on. This,
++ * however, is already required for the regular Program-Order guarantee above,
++ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
++ *
++ */
++
++/***
++ * try_to_wake_up - wake up a thread
++ * @p: the thread to be awakened
++ * @state: the mask of task states that can be woken
++ * @wake_flags: wake modifier flags (WF_*)
++ *
++ * Put it on the run-queue if it's not already there. The "current"
++ * thread is always on the run-queue (except when the actual
++ * re-schedule is in progress), and as such you're allowed to do
++ * the simpler "current->state = TASK_RUNNING" to mark yourself
++ * runnable without the overhead of this.
++ *
++ * Return: %true if @p was woken up, %false if it was already running.
++ * or @state didn't match @p's state.
++ */
++static int try_to_wake_up(struct task_struct *p, unsigned int state,
++ int wake_flags)
++{
++ unsigned long flags;
++ struct rq *rq;
++ int cpu, success = 0;
++
++ /*
++ * If we are going to wake up a thread waiting for CONDITION we
++ * need to ensure that CONDITION=1 done by the caller can not be
++ * reordered with p->state check below. This pairs with mb() in
++ * set_current_state() the waiting thread does.
++ */
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ smp_mb__after_spinlock();
++ if (!(p->state & state))
++ goto out;
++
++ trace_sched_waking(p);
++
++ /* We're going to change ->state: */
++ success = 1;
++ cpu = task_cpu(p);
++
++ /*
++ * Ensure we load p->on_rq _after_ p->state, otherwise it would
++ * be possible to, falsely, observe p->on_rq == 0 and get stuck
++ * in smp_cond_load_acquire() below.
++ *
++ * sched_ttwu_pending() try_to_wake_up()
++ * STORE p->on_rq = 1 LOAD p->state
++ * UNLOCK rq->lock
++ *
++ * __schedule() (switch to task 'p')
++ * LOCK rq->lock smp_rmb();
++ * smp_mb__after_spinlock();
++ * UNLOCK rq->lock
++ *
++ * [task p]
++ * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
++ *
++ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++ * __schedule(). See the comment for smp_mb__after_spinlock().
++ */
++ smp_rmb();
++ if (p->on_rq && ttwu_remote(p, wake_flags))
++ goto stat;
++
++#ifdef CONFIG_SMP
++ /*
++ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
++ * possible to, falsely, observe p->on_cpu == 0.
++ *
++ * One must be running (->on_cpu == 1) in order to remove oneself
++ * from the runqueue.
++ *
++ * __schedule() (switch to task 'p') try_to_wake_up()
++ * STORE p->on_cpu = 1 LOAD p->on_rq
++ * UNLOCK rq->lock
++ *
++ * __schedule() (put 'p' to sleep)
++ * LOCK rq->lock smp_rmb();
++ * smp_mb__after_spinlock();
++ * STORE p->on_rq = 0 LOAD p->on_cpu
++ *
++ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++ * __schedule(). See the comment for smp_mb__after_spinlock().
++ */
++ smp_rmb();
++
++ /*
++ * If the owning (remote) CPU is still in the middle of schedule() with
++ * this task as prev, wait until its done referencing the task.
++ *
++ * Pairs with the smp_store_release() in finish_task().
++ *
++ * This ensures that tasks getting woken will be fully ordered against
++ * their previous state and preserve Program Order.
++ */
++ smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++ p->sched_contributes_to_load = !!task_contributes_to_load(p);
++ p->state = TASK_WAKING;
++
++ if (p->in_iowait) {
++ delayacct_blkio_end(p);
++ atomic_dec(&task_rq(p)->nr_iowait);
++ }
++
++ if (SCHED_ISO == p->policy && ISO_PRIO != p->prio) {
++ p->prio = ISO_PRIO;
++ p->deadline = 0UL;
++ update_task_priodl(p);
++ }
++
++ cpu = select_task_rq(p);
++
++ if (cpu != task_cpu(p)) {
++ wake_flags |= WF_MIGRATED;
++ psi_ttwu_dequeue(p);
++ set_task_cpu(p, cpu);
++ }
++#else /* CONFIG_SMP */
++ if (p->in_iowait) {
++ delayacct_blkio_end(p);
++ atomic_dec(&task_rq(p)->nr_iowait);
++ }
++#endif
++
++ rq = cpu_rq(cpu);
++ raw_spin_lock(&rq->lock);
++
++ update_rq_clock(rq);
++ ttwu_do_activate(rq, p, wake_flags);
++ check_preempt_curr(rq, p);
++
++ raw_spin_unlock(&rq->lock);
++
++stat:
++ ttwu_stat(p, cpu, wake_flags);
++out:
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++ return success;
++}
++
++/**
++ * wake_up_process - Wake up a specific process
++ * @p: The process to be woken up.
++ *
++ * Attempt to wake up the nominated process and move it to the set of runnable
++ * processes.
++ *
++ * Return: 1 if the process was woken up, 0 if it was already running.
++ *
++ * This function executes a full memory barrier before accessing the task state.
++ */
++int wake_up_process(struct task_struct *p)
++{
++ return try_to_wake_up(p, TASK_NORMAL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++ return try_to_wake_up(p, state, 0);
++}
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ */
++int sched_fork(unsigned long __maybe_unused clone_flags, struct task_struct *p)
++{
++ unsigned long flags;
++ int cpu = get_cpu();
++ struct rq *rq = this_rq();
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++ INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++ /* Should be reset in fork.c but done here for ease of PDS patching */
++ p->on_cpu =
++ p->on_rq =
++ p->utime =
++ p->stime =
++ p->sched_time = 0;
++
++ p->sl_level = pds_skiplist_random_level(p);
++ INIT_SKIPLIST_NODE(&p->sl_node);
++
++#ifdef CONFIG_COMPACTION
++ p->capture_control = NULL;
++#endif
++
++ /*
++ * We mark the process as NEW here. This guarantees that
++ * nobody will actually run it, and a signal or other external
++ * event cannot wake it up and insert it on the runqueue either.
++ */
++ p->state = TASK_NEW;
++
++ /*
++ * Make sure we do not leak PI boosting priority to the child.
++ */
++ p->prio = current->normal_prio;
++
++ /*
++ * Revert to default priority/policy on fork if requested.
++ */
++ if (unlikely(p->sched_reset_on_fork)) {
++ if (task_has_rt_policy(p)) {
++ p->policy = SCHED_NORMAL;
++ p->static_prio = NICE_TO_PRIO(0);
++ p->rt_priority = 0;
++ } else if (PRIO_TO_NICE(p->static_prio) < 0)
++ p->static_prio = NICE_TO_PRIO(0);
++
++ p->prio = p->normal_prio = normal_prio(p);
++
++ /*
++ * We don't need the reset flag anymore after the fork. It has
++ * fulfilled its duty:
++ */
++ p->sched_reset_on_fork = 0;
++ }
++
++ /*
++ * Share the timeslice between parent and child, thus the
++ * total amount of pending timeslices in the system doesn't change,
++ * resulting in more scheduling fairness.
++ */
++ raw_spin_lock_irqsave(&rq->lock, flags);
++ rq->curr->time_slice /= 2;
++ p->time_slice = rq->curr->time_slice;
++#ifdef CONFIG_SCHED_HRTICK
++ hrtick_start(rq, US_TO_NS(rq->curr->time_slice));
++#endif
++
++ if (p->time_slice < RESCHED_US) {
++ update_rq_clock(rq);
++ time_slice_expired(p, rq);
++ resched_curr(rq);
++ } else
++ update_task_priodl(p);
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++ /*
++ * The child is not yet in the pid-hash so no cgroup attach races,
++ * and the cgroup is pinned to this child due to cgroup_fork()
++ * is ran before sched_fork().
++ *
++ * Silence PROVE_RCU.
++ */
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ /*
++ * We're setting the CPU for the first time, we don't migrate,
++ * so use __set_task_cpu().
++ */
++ __set_task_cpu(p, cpu);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++#ifdef CONFIG_SCHED_INFO
++ if (unlikely(sched_info_on()))
++ memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++ init_task_preempt_count(p);
++
++ put_cpu();
++ return 0;
++}
++
++#ifdef CONFIG_SCHEDSTATS
++
++DEFINE_STATIC_KEY_FALSE(sched_schedstats);
++static bool __initdata __sched_schedstats = false;
++
++static void set_schedstats(bool enabled)
++{
++ if (enabled)
++ static_branch_enable(&sched_schedstats);
++ else
++ static_branch_disable(&sched_schedstats);
++}
++
++void force_schedstat_enabled(void)
++{
++ if (!schedstat_enabled()) {
++ pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
++ static_branch_enable(&sched_schedstats);
++ }
++}
++
++static int __init setup_schedstats(char *str)
++{
++ int ret = 0;
++ if (!str)
++ goto out;
++
++ /*
++ * This code is called before jump labels have been set up, so we can't
++ * change the static branch directly just yet. Instead set a temporary
++ * variable so init_schedstats() can do it later.
++ */
++ if (!strcmp(str, "enable")) {
++ __sched_schedstats = true;
++ ret = 1;
++ } else if (!strcmp(str, "disable")) {
++ __sched_schedstats = false;
++ ret = 1;
++ }
++out:
++ if (!ret)
++ pr_warn("Unable to parse schedstats=\n");
++
++ return ret;
++}
++__setup("schedstats=", setup_schedstats);
++
++static void __init init_schedstats(void)
++{
++ set_schedstats(__sched_schedstats);
++}
++
++#ifdef CONFIG_PROC_SYSCTL
++int sysctl_schedstats(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ struct ctl_table t;
++ int err;
++ int state = static_branch_likely(&sched_schedstats);
++
++ if (write && !capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ t = *table;
++ t.data = &state;
++ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
++ if (err < 0)
++ return err;
++ if (write)
++ set_schedstats(state);
++ return err;
++}
++#endif /* CONFIG_PROC_SYSCTL */
++#else /* !CONFIG_SCHEDSTATS */
++static inline void init_schedstats(void) {}
++#endif /* CONFIG_SCHEDSTATS */
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p)
++{
++ unsigned long flags;
++ struct rq *rq;
++
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++
++ p->state = TASK_RUNNING;
++
++ rq = cpu_rq(select_task_rq(p));
++#ifdef CONFIG_SMP
++ /*
++ * Fork balancing, do it here and not earlier because:
++ * - cpus_allowed can change in the fork path
++ * - any previously selected CPU might disappear through hotplug
++ * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
++ * as we're not fully set-up yet.
++ */
++ __set_task_cpu(p, cpu_of(rq));
++#endif
++
++ raw_spin_lock(&rq->lock);
++
++ update_rq_clock(rq);
++ activate_task(p, rq);
++ trace_sched_wakeup_new(p);
++ check_preempt_curr(rq, p);
++
++ raw_spin_unlock(&rq->lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
++
++void preempt_notifier_inc(void)
++{
++ static_branch_inc(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_inc);
++
++void preempt_notifier_dec(void)
++{
++ static_branch_dec(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_dec);
++
++/**
++ * preempt_notifier_register - tell me when current is being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++ if (!static_branch_unlikely(&preempt_notifier_key))
++ WARN(1, "registering preempt_notifier while notifiers disabled\n");
++
++ hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is *not* safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++ hlist_del(¬ifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++ struct preempt_notifier *notifier;
++
++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++ notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++ if (static_branch_unlikely(&preempt_notifier_key))
++ __fire_sched_in_preempt_notifiers(curr);
++}
++
++static void
++__fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++ struct preempt_notifier *notifier;
++
++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++ notifier->ops->sched_out(notifier, next);
++}
++
++static __always_inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++ if (static_branch_unlikely(&preempt_notifier_key))
++ __fire_sched_out_preempt_notifiers(curr, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void prepare_task(struct task_struct *next)
++{
++ /*
++ * Claim the task as running, we do this before switching to it
++ * such that any running task will have this set.
++ */
++ next->on_cpu = 1;
++}
++
++static inline void finish_task(struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++ /*
++ * After ->on_cpu is cleared, the task can be moved to a different CPU.
++ * We must ensure this doesn't happen until the switch is completely
++ * finished.
++ *
++ * In particular, the load of prev->state in finish_task_switch() must
++ * happen before this.
++ *
++ * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
++ */
++ smp_store_release(&prev->on_cpu, 0);
++#else
++ prev->on_cpu = 0;
++#endif
++}
++
++static inline void
++prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++ /*
++ * Since the runqueue lock will be released by the next
++ * task (which is an invalid locking op but in the case
++ * of the scheduler it's an obvious special-case), so we
++ * do an early lockdep release here:
++ */
++ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
++#ifdef CONFIG_DEBUG_SPINLOCK
++ /* this is a valid case when another task releases the spinlock */
++ rq->lock.owner = next;
++#endif
++}
++
++static inline void finish_lock_switch(struct rq *rq)
++{
++ /*
++ * If we are tracking spinlock dependencies then we have to
++ * fix up the runqueue lock - which gets 'carried over' from
++ * prev into current:
++ */
++ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++ raw_spin_unlock_irq(&rq->lock);
++}
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++ struct task_struct *next)
++{
++ kcov_prepare_switch(prev);
++ sched_info_switch(rq, prev, next);
++ perf_event_task_sched_out(prev, next);
++ rseq_preempt(prev);
++ fire_sched_out_preempt_notifiers(prev, next);
++ prepare_task(next);
++ prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock. (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ *
++ * The context switch have flipped the stack from under us and restored the
++ * local variables which were saved when this task called schedule() in the
++ * past. prev == current is still correct but we need to recalculate this_rq
++ * because prev may have moved to another CPU.
++ */
++static struct rq *finish_task_switch(struct task_struct *prev)
++ __releases(rq->lock)
++{
++ struct rq *rq = this_rq();
++ struct mm_struct *mm = rq->prev_mm;
++ long prev_state;
++
++ /*
++ * The previous task will have left us with a preempt_count of 2
++ * because it left us after:
++ *
++ * schedule()
++ * preempt_disable(); // 1
++ * __schedule()
++ * raw_spin_lock_irq(&rq->lock) // 2
++ *
++ * Also, see FORK_PREEMPT_COUNT.
++ */
++ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
++ "corrupted preempt_count: %s/%d/0x%x\n",
++ current->comm, current->pid, preempt_count()))
++ preempt_count_set(FORK_PREEMPT_COUNT);
++
++ rq->prev_mm = NULL;
++
++ /*
++ * A task struct has one reference for the use as "current".
++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++ * schedule one last time. The schedule call will never return, and
++ * the scheduled task must drop that reference.
++ *
++ * We must observe prev->state before clearing prev->on_cpu (in
++ * finish_task), otherwise a concurrent wakeup can get prev
++ * running on another CPU and we could rave with its RUNNING -> DEAD
++ * transition, resulting in a double drop.
++ */
++ prev_state = prev->state;
++ vtime_task_switch(prev);
++ perf_event_task_sched_in(prev, current);
++ finish_task(prev);
++ finish_lock_switch(rq);
++ finish_arch_post_lock_switch();
++ kcov_finish_switch(current);
++
++ fire_sched_in_preempt_notifiers(current);
++ /*
++ * When switching through a kernel thread, the loop in
++ * membarrier_{private,global}_expedited() may have observed that
++ * kernel thread and not issued an IPI. It is therefore possible to
++ * schedule between user->kernel->user threads without passing though
++ * switch_mm(). Membarrier requires a barrier after storing to
++ * rq->curr, before returning to userspace, so provide them here:
++ *
++ * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
++ * provided by mmdrop(),
++ * - a sync_core for SYNC_CORE.
++ */
++ if (mm) {
++ membarrier_mm_sync_core_before_usermode(mm);
++ mmdrop(mm);
++ }
++ if (unlikely(prev_state == TASK_DEAD)) {
++ /*
++ * Remove function-return probe instances associated with this
++ * task and put them back on the free list.
++ */
++ kprobe_flush_task(prev);
++
++ /* Task is done with its stack. */
++ put_task_stack(prev);
++
++ put_task_struct(prev);
++ }
++
++ tick_nohz_task_switch();
++ return rq;
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage __visible void schedule_tail(struct task_struct *prev)
++ __releases(rq->lock)
++{
++ struct rq *rq;
++
++ /*
++ * New tasks start with FORK_PREEMPT_COUNT, see there and
++ * finish_task_switch() for details.
++ *
++ * finish_task_switch() will drop rq->lock() and lower preempt_count
++ * and the preempt_enable() will end up enabling preemption (on
++ * PREEMPT_COUNT kernels).
++ */
++
++ rq = finish_task_switch(prev);
++ preempt_enable();
++
++ if (current->set_child_tid)
++ put_user(task_pid_vnr(current), current->set_child_tid);
++
++ calculate_sigpending();
++}
++
++/*
++ * context_switch - switch to the new MM and the new thread's register state.
++ */
++static __always_inline struct rq *
++context_switch(struct rq *rq, struct task_struct *prev,
++ struct task_struct *next)
++{
++ struct mm_struct *mm, *oldmm;
++
++ prepare_task_switch(rq, prev, next);
++
++ mm = next->mm;
++ oldmm = prev->active_mm;
++ /*
++ * For paravirt, this is coupled with an exit in switch_to to
++ * combine the page table reload and the switch backend into
++ * one hypercall.
++ */
++ arch_start_context_switch(prev);
++
++ /*
++ * If mm is non-NULL, we pass through switch_mm(). If mm is
++ * NULL, we will pass through mmdrop() in finish_task_switch().
++ * Both of these contain the full memory barrier required by
++ * membarrier after storing to rq->curr, before returning to
++ * user-space.
++ */
++ if (!mm) {
++ next->active_mm = oldmm;
++ mmgrab(oldmm);
++ enter_lazy_tlb(oldmm, next);
++ } else
++ switch_mm_irqs_off(oldmm, mm, next);
++
++ if (!prev->mm) {
++ prev->active_mm = NULL;
++ rq->prev_mm = oldmm;
++ }
++
++ prepare_lock_switch(rq, next);
++
++ /* Here we just switch the register state and the stack. */
++ switch_to(prev, next, prev);
++ barrier();
++
++ return finish_task_switch(prev);
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, total number of context switches performed since bootup.
++ */
++unsigned long nr_running(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_online_cpu(i)
++ sum += cpu_rq(i)->nr_running;
++
++ return sum;
++}
++
++/*
++ * Check if only the current task is running on the CPU.
++ *
++ * Caution: this function does not check that the caller has disabled
++ * preemption, thus the result might have a time-of-check-to-time-of-use
++ * race. The caller is responsible to use it correctly, for example:
++ *
++ * - from a non-preemptible section (of course)
++ *
++ * - from a thread that is bound to a single CPU
++ *
++ * - in a loop with very short iterations (e.g. a polling loop)
++ */
++bool single_task_running(void)
++{
++ return raw_rq()->nr_running == 1;
++}
++EXPORT_SYMBOL(single_task_running);
++
++unsigned long long nr_context_switches(void)
++{
++ int i;
++ unsigned long long sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += cpu_rq(i)->nr_switches;
++
++ return sum;
++}
++
++/*
++ * Consumers of these two interfaces, like for example the cpuidle menu
++ * governor, are using nonsensical data. Preferring shallow idle state selection
++ * for a CPU that has IO-wait which might not even end up running the task when
++ * it does become runnable.
++ */
++
++unsigned long nr_iowait_cpu(int cpu)
++{
++ return atomic_read(&cpu_rq(cpu)->nr_iowait);
++}
++
++/*
++ * IO-wait accounting, and how its mostly bollocks (on SMP).
++ *
++ * The idea behind IO-wait account is to account the idle time that we could
++ * have spend running if it were not for IO. That is, if we were to improve the
++ * storage performance, we'd have a proportional reduction in IO-wait time.
++ *
++ * This all works nicely on UP, where, when a task blocks on IO, we account
++ * idle time as IO-wait, because if the storage were faster, it could've been
++ * running and we'd not be idle.
++ *
++ * This has been extended to SMP, by doing the same for each CPU. This however
++ * is broken.
++ *
++ * Imagine for instance the case where two tasks block on one CPU, only the one
++ * CPU will have IO-wait accounted, while the other has regular idle. Even
++ * though, if the storage were faster, both could've ran at the same time,
++ * utilising both CPUs.
++ *
++ * This means, that when looking globally, the current IO-wait accounting on
++ * SMP is a lower bound, by reason of under accounting.
++ *
++ * Worse, since the numbers are provided per CPU, they are sometimes
++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
++ * associated with any one particular CPU, it can wake to another CPU than it
++ * blocked on. This means the per CPU IO-wait number is meaningless.
++ *
++ * Task CPU affinities can make all that even more 'interesting'.
++ */
++
++unsigned long nr_iowait(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += nr_iowait_cpu(i);
++
++ return sum;
++}
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
++
++static inline void pds_update_curr(struct rq *rq, struct task_struct *p)
++{
++ s64 ns = rq->clock_task - p->last_ran;
++
++ p->sched_time += ns;
++ account_group_exec_runtime(p, ns);
++
++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */
++ p->time_slice -= NS_TO_US(ns);
++ p->last_ran = rq->clock_task;
++}
++
++/*
++ * Return accounted runtime for the task.
++ * Return separately the current's pending runtime that have not been
++ * accounted yet.
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++ unsigned long flags;
++ struct rq *rq;
++ raw_spinlock_t *lock;
++ u64 ns;
++
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
++ /*
++ * 64-bit doesn't need locks to atomically read a 64-bit value.
++ * So we have a optimization chance when the task's delta_exec is 0.
++ * Reading ->on_cpu is racy, but this is ok.
++ *
++ * If we race with it leaving CPU, we'll take a lock. So we're correct.
++ * If we race with it entering CPU, unaccounted time is 0. This is
++ * indistinguishable from the read occurring a few cycles earlier.
++ * If we see ->on_cpu without ->on_rq, the task is leaving, and has
++ * been accounted, so we're correct here as well.
++ */
++ if (!p->on_cpu || !task_on_rq_queued(p))
++ return tsk_seruntime(p);
++#endif
++
++ rq = task_access_lock_irqsave(p, &lock, &flags);
++ /*
++ * Must be ->curr _and_ ->on_rq. If dequeued, we would
++ * project cycles that may never be accounted to this
++ * thread, breaking clock_gettime().
++ */
++ if (p == rq->curr && task_on_rq_queued(p)) {
++ update_rq_clock(rq);
++ pds_update_curr(rq, p);
++ }
++ ns = tsk_seruntime(p);
++ task_access_unlock_irqrestore(p, lock, &flags);
++
++ return ns;
++}
++
++/* This manages tasks that have run out of timeslice during a scheduler_tick */
++static inline void pds_scheduler_task_tick(struct rq *rq)
++{
++ struct task_struct *p = rq->curr;
++
++ if (is_idle_task(p))
++ return;
++
++ pds_update_curr(rq, p);
++
++ cpufreq_update_util(rq, 0);
++
++ /*
++ * Tasks that were scheduled in the first half of a tick are not
++ * allowed to run into the 2nd half of the next tick if they will
++ * run out of time slice in the interim. Otherwise, if they have
++ * less than RESCHED_US μs of time slice left they will be rescheduled.
++ */
++ if (p->time_slice - rq->dither >= RESCHED_US)
++ return;
++
++ /**
++ * p->time_slice < RESCHED_US. We will modify task_struct under
++ * rq lock as p is rq->curr
++ */
++ __set_tsk_resched(p);
++}
++
++#ifdef CONFIG_SMP
++
++#ifdef CONFIG_SCHED_SMT
++static int active_load_balance_cpu_stop(void *data)
++{
++ struct rq *rq = this_rq();
++ struct task_struct *p = data;
++ int cpu;
++ unsigned long flags;
++
++ local_irq_save(flags);
++
++ raw_spin_lock(&p->pi_lock);
++ raw_spin_lock(&rq->lock);
++
++ rq->active_balance = 0;
++ /*
++ * _something_ may have changed the task, double check again
++ */
++ if (task_on_rq_queued(p) && task_rq(p) == rq &&
++ (cpu = cpumask_any_and(&p->cpus_allowed, &sched_cpu_sg_idle_mask)) < nr_cpu_ids)
++ rq = __migrate_task(rq, p, cpu);
++
++ raw_spin_unlock(&rq->lock);
++ raw_spin_unlock(&p->pi_lock);
++
++ local_irq_restore(flags);
++
++ return 0;
++}
++
++/* pds_sg_balance_trigger - trigger slibing group balance for @cpu */
++static void pds_sg_balance_trigger(const int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++ struct task_struct *curr;
++
++ if (!raw_spin_trylock_irqsave(&rq->lock, flags))
++ return;
++ curr = rq->curr;
++ if (!is_idle_task(curr) &&
++ cpumask_intersects(&curr->cpus_allowed, &sched_cpu_sg_idle_mask)) {
++ int active_balance = 0;
++
++ if (likely(!rq->active_balance)) {
++ rq->active_balance = 1;
++ active_balance = 1;
++ }
++
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++ if (likely(active_balance))
++ stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop,
++ curr, &rq->active_balance_work);
++ } else
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++/*
++ * pds_sg_balance_check - slibing group balance check for run queue @rq
++ */
++static inline void pds_sg_balance_check(const struct rq *rq)
++{
++ cpumask_t chk;
++ int i;
++
++ /* Only online cpu will do sg balance checking */
++ if (unlikely(!rq->online))
++ return;
++
++ /* Only cpu in slibing idle group will do the checking */
++ if (!cpumask_test_cpu(cpu_of(rq), &sched_cpu_sg_idle_mask))
++ return;
++
++ /* Find potential cpus which can migrate the currently running task */
++ if (!cpumask_andnot(&chk, &sched_rq_pending_masks[SCHED_RQ_EMPTY],
++ &sched_rq_queued_masks[SCHED_RQ_EMPTY]))
++ return;
++
++ for_each_cpu(i, &chk) {
++ /* skip the cpu which has idle slibing cpu */
++ if (cpumask_test_cpu(per_cpu(sched_sibling_cpu, i),
++ &sched_rq_queued_masks[SCHED_RQ_EMPTY]))
++ continue;
++ pds_sg_balance_trigger(i);
++ }
++}
++#endif /* CONFIG_SCHED_SMT */
++#endif /* CONFIG_SMP */
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ */
++void scheduler_tick(void)
++{
++ int cpu __maybe_unused = smp_processor_id();
++ struct rq *rq = cpu_rq(cpu);
++
++ sched_clock_tick();
++
++ raw_spin_lock(&rq->lock);
++ update_rq_clock(rq);
++
++ pds_scheduler_task_tick(rq);
++ update_sched_rq_queued_masks_normal(rq);
++ calc_global_load_tick(rq);
++ psi_task_tick(rq);
++
++ rq->last_tick = rq->clock;
++ raw_spin_unlock(&rq->lock);
++
++ perf_event_task_tick();
++}
++
++#ifdef CONFIG_NO_HZ_FULL
++struct tick_work {
++ int cpu;
++ struct delayed_work work;
++};
++
++static struct tick_work __percpu *tick_work_cpu;
++
++static void sched_tick_remote(struct work_struct *work)
++{
++ struct delayed_work *dwork = to_delayed_work(work);
++ struct tick_work *twork = container_of(dwork, struct tick_work, work);
++ int cpu = twork->cpu;
++ struct rq *rq = cpu_rq(cpu);
++ struct task_struct *curr;
++ unsigned long flags;
++ u64 delta;
++
++ /*
++ * Handle the tick only if it appears the remote CPU is running in full
++ * dynticks mode. The check is racy by nature, but missing a tick or
++ * having one too much is no big deal because the scheduler tick updates
++ * statistics and checks timeslices in a time-independent way, regardless
++ * of when exactly it is running.
++ */
++ if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu))
++ goto out_requeue;
++
++ raw_spin_lock_irqsave(&rq->lock, flags);
++ curr = rq->curr;
++
++ if (is_idle_task(curr))
++ goto out_unlock;
++
++ update_rq_clock(rq);
++ delta = rq_clock_task(rq) - curr->last_ran;
++
++ /*
++ * Make sure the next tick runs within a reasonable
++ * amount of time.
++ */
++ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
++ pds_scheduler_task_tick(rq);
++ update_sched_rq_queued_masks_normal(rq);
++
++out_unlock:
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++out_requeue:
++ /*
++ * Run the remote tick once per second (1Hz). This arbitrary
++ * frequency is large enough to avoid overload but short enough
++ * to keep scheduler internal stats reasonably up to date.
++ */
++ queue_delayed_work(system_unbound_wq, dwork, HZ);
++}
++
++static void sched_tick_start(int cpu)
++{
++ struct tick_work *twork;
++
++ if (housekeeping_cpu(cpu, HK_FLAG_TICK))
++ return;
++
++ WARN_ON_ONCE(!tick_work_cpu);
++
++ twork = per_cpu_ptr(tick_work_cpu, cpu);
++ twork->cpu = cpu;
++ INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
++ queue_delayed_work(system_unbound_wq, &twork->work, HZ);
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void sched_tick_stop(int cpu)
++{
++ struct tick_work *twork;
++
++ if (housekeeping_cpu(cpu, HK_FLAG_TICK))
++ return;
++
++ WARN_ON_ONCE(!tick_work_cpu);
++
++ twork = per_cpu_ptr(tick_work_cpu, cpu);
++ cancel_delayed_work_sync(&twork->work);
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __init sched_tick_offload_init(void)
++{
++ tick_work_cpu = alloc_percpu(struct tick_work);
++ BUG_ON(!tick_work_cpu);
++
++ return 0;
++}
++
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_tick_start(int cpu) { }
++static inline void sched_tick_stop(int cpu) { }
++#endif
++
++#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
++ defined(CONFIG_PREEMPT_TRACER))
++/*
++ * If the value passed in is equal to the current preempt count
++ * then we just disabled preemption. Start timing the latency.
++ */
++static inline void preempt_latency_start(int val)
++{
++ if (preempt_count() == val) {
++ unsigned long ip = get_lock_parent_ip();
++#ifdef CONFIG_DEBUG_PREEMPT
++ current->preempt_disable_ip = ip;
++#endif
++ trace_preempt_off(CALLER_ADDR0, ip);
++ }
++}
++
++void preempt_count_add(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Underflow?
++ */
++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++ return;
++#endif
++ __preempt_count_add(val);
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Spinlock count overflowing soon?
++ */
++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++ PREEMPT_MASK - 10);
++#endif
++ preempt_latency_start(val);
++}
++EXPORT_SYMBOL(preempt_count_add);
++NOKPROBE_SYMBOL(preempt_count_add);
++
++/*
++ * If the value passed in equals to the current preempt count
++ * then we just enabled preemption. Stop timing the latency.
++ */
++static inline void preempt_latency_stop(int val)
++{
++ if (preempt_count() == val)
++ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
++}
++
++void preempt_count_sub(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Underflow?
++ */
++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++ return;
++ /*
++ * Is the spinlock portion underflowing?
++ */
++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++ !(preempt_count() & PREEMPT_MASK)))
++ return;
++#endif
++
++ preempt_latency_stop(val);
++ __preempt_count_sub(val);
++}
++EXPORT_SYMBOL(preempt_count_sub);
++NOKPROBE_SYMBOL(preempt_count_sub);
++
++#else
++static inline void preempt_latency_start(int val) { }
++static inline void preempt_latency_stop(int val) { }
++#endif
++
++/*
++ * Timeslices below RESCHED_US are considered as good as expired as there's no
++ * point rescheduling when there's so little time left. SCHED_BATCH tasks
++ * have been flagged be not latency sensitive and likely to be fully CPU
++ * bound so every time they're rescheduled they have their time_slice
++ * refilled, but get a new later deadline to have little effect on
++ * SCHED_NORMAL tasks.
++
++ */
++static inline void check_deadline(struct task_struct *p, struct rq *rq)
++{
++ if (rq->idle == p)
++ return;
++
++ pds_update_curr(rq, p);
++
++ if (p->time_slice < RESCHED_US) {
++ time_slice_expired(p, rq);
++ if (SCHED_ISO == p->policy && ISO_PRIO == p->prio) {
++ p->prio = NORMAL_PRIO;
++ p->deadline = rq->clock + task_deadline_diff(p);
++ update_task_priodl(p);
++ }
++ if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
++ requeue_task(p, rq);
++ }
++}
++
++#ifdef CONFIG_SMP
++
++#define SCHED_RQ_NR_MIGRATION (32UL)
++/*
++ * Migrate pending tasks in @rq to @dest_cpu
++ * Will try to migrate mininal of half of @rq nr_running tasks and
++ * SCHED_RQ_NR_MIGRATION to @dest_cpu
++ */
++static inline int
++migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, int filter_prio)
++{
++ struct task_struct *p;
++ int dest_cpu = cpu_of(dest_rq);
++ int nr_migrated = 0;
++ int nr_tries = min((rq->nr_running + 1) / 2, SCHED_RQ_NR_MIGRATION);
++ struct skiplist_node *node = rq->sl_header.next[0];
++
++ while (nr_tries && node != &rq->sl_header) {
++ p = skiplist_entry(node, struct task_struct, sl_node);
++ node = node->next[0];
++
++ if (task_running(p))
++ continue;
++ if (p->prio >= filter_prio)
++ break;
++ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) {
++ detach_task(rq, p, dest_cpu);
++ attach_task(dest_rq, p);
++ nr_migrated++;
++ }
++ nr_tries--;
++ /* make a jump */
++ if (node == &rq->sl_header)
++ break;
++ node = node->next[0];
++ }
++
++ return nr_migrated;
++}
++
++static inline int
++take_queued_task_cpumask(struct rq *rq, cpumask_t *chk_mask, int filter_prio)
++{
++ int src_cpu;
++
++ for_each_cpu(src_cpu, chk_mask) {
++ int nr_migrated;
++ struct rq *src_rq = cpu_rq(src_cpu);
++
++ if (!do_raw_spin_trylock(&src_rq->lock)) {
++ if (PRIO_LIMIT == filter_prio)
++ continue;
++ return 0;
++ }
++ spin_acquire(&src_rq->lock.dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_);
++
++ update_rq_clock(src_rq);
++ nr_migrated = migrate_pending_tasks(src_rq, rq, filter_prio);
++
++ spin_release(&src_rq->lock.dep_map, 1, _RET_IP_);
++ do_raw_spin_unlock(&src_rq->lock);
++
++ if (nr_migrated || PRIO_LIMIT != filter_prio)
++ return nr_migrated;
++ }
++ return 0;
++}
++
++static inline int take_other_rq_task(struct rq *rq, int cpu, int filter_prio)
++{
++ struct cpumask *affinity_mask, *end;
++ struct cpumask chk;
++
++ if (PRIO_LIMIT == filter_prio) {
++ cpumask_complement(&chk, &sched_rq_pending_masks[SCHED_RQ_EMPTY]);
++#ifdef CONFIG_SMT_NICE
++ {
++ /* also try to take IDLE priority tasks from smt supressed cpu */
++ struct cpumask t;
++ if (cpumask_and(&t, &sched_smt_supressed_mask,
++ &sched_rq_queued_masks[SCHED_RQ_IDLE]))
++ cpumask_or(&chk, &chk, &t);
++ }
++#endif
++ } else if (NORMAL_PRIO == filter_prio) {
++ cpumask_or(&chk, &sched_rq_pending_masks[SCHED_RQ_RT],
++ &sched_rq_pending_masks[SCHED_RQ_ISO]);
++ } else if (IDLE_PRIO == filter_prio) {
++ cpumask_complement(&chk, &sched_rq_pending_masks[SCHED_RQ_EMPTY]);
++ cpumask_andnot(&chk, &chk, &sched_rq_pending_masks[SCHED_RQ_IDLE]);
++ } else
++ cpumask_copy(&chk, &sched_rq_pending_masks[SCHED_RQ_RT]);
++
++ if (cpumask_empty(&chk))
++ return 0;
++
++ affinity_mask = per_cpu(sched_cpu_llc_start_mask, cpu);
++ end = per_cpu(sched_cpu_affinity_chk_end_masks, cpu);
++ do {
++ struct cpumask tmp;
++
++ if (cpumask_and(&tmp, &chk, affinity_mask) &&
++ take_queued_task_cpumask(rq, &tmp, filter_prio))
++ return 1;
++ } while (++affinity_mask < end);
++
++ return 0;
++}
++#endif
++
++static inline struct task_struct *
++choose_next_task(struct rq *rq, int cpu, struct task_struct *prev)
++{
++ struct task_struct *next = rq_first_queued_task(rq);
++
++#ifdef CONFIG_SMT_NICE
++ if (cpumask_test_cpu(cpu, &sched_smt_supressed_mask)) {
++ if (next->prio >= IDLE_PRIO) {
++ if (rq->online &&
++ take_other_rq_task(rq, cpu, IDLE_PRIO))
++ return rq_first_queued_task(rq);
++ return rq->idle;
++ }
++ }
++#endif
++
++#ifdef CONFIG_SMP
++ if (likely(rq->online))
++ if (take_other_rq_task(rq, cpu, next->prio)) {
++ resched_curr(rq);
++ return rq_first_queued_task(rq);
++ }
++#endif
++ return next;
++}
++
++static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ return p->preempt_disable_ip;
++#else
++ return 0;
++#endif
++}
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++ /* Save this before calling printk(), since that will clobber it */
++ unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
++
++ if (oops_in_progress)
++ return;
++
++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++ prev->comm, prev->pid, preempt_count());
++
++ debug_show_held_locks(prev);
++ print_modules();
++ if (irqs_disabled())
++ print_irqtrace_events(prev);
++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
++ && in_atomic_preempt_off()) {
++ pr_err("Preemption disabled at:");
++ print_ip_sym(preempt_disable_ip);
++ pr_cont("\n");
++ }
++ if (panic_on_warn)
++ panic("scheduling while atomic\n");
++
++ dump_stack();
++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev)
++{
++#ifdef CONFIG_SCHED_STACK_END_CHECK
++ if (task_stack_end_corrupted(prev))
++ panic("corrupted stack end detected inside scheduler\n");
++#endif
++
++ if (unlikely(in_atomic_preempt_off())) {
++ __schedule_bug(prev);
++ preempt_count_set(PREEMPT_DISABLED);
++ }
++ rcu_sleep_check();
++
++ profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++ schedstat_inc(this_rq()->sched_count);
++}
++
++static inline void set_rq_task(struct rq *rq, struct task_struct *p)
++{
++ p->last_ran = rq->clock_task;
++
++#ifdef CONFIG_HIGH_RES_TIMERS
++ if (p != rq->idle)
++ hrtick_start(rq, US_TO_NS(p->time_slice));
++#endif
++ /* update rq->dither */
++ rq->dither = rq_dither(rq);
++}
++
++/*
++ * schedule() is the main scheduler function.
++ *
++ * The main means of driving the scheduler and thus entering this function are:
++ *
++ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
++ *
++ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
++ * paths. For example, see arch/x86/entry_64.S.
++ *
++ * To drive preemption between tasks, the scheduler sets the flag in timer
++ * interrupt handler scheduler_tick().
++ *
++ * 3. Wakeups don't really cause entry into schedule(). They add a
++ * task to the run-queue and that's it.
++ *
++ * Now, if the new task added to the run-queue preempts the current
++ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
++ * called on the nearest possible occasion:
++ *
++ * - If the kernel is preemptible (CONFIG_PREEMPT=y):
++ *
++ * - in syscall or exception context, at the next outmost
++ * preempt_enable(). (this might be as soon as the wake_up()'s
++ * spin_unlock()!)
++ *
++ * - in IRQ context, return from interrupt-handler to
++ * preemptible context
++ *
++ * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
++ * then at the next:
++ *
++ * - cond_resched() call
++ * - explicit schedule() call
++ * - return from syscall or exception to user-space
++ * - return from interrupt-handler to user-space
++ *
++ * WARNING: must be called with preemption disabled!
++ */
++static void __sched notrace __schedule(bool preempt)
++{
++ struct task_struct *prev, *next;
++ unsigned long *switch_count;
++ struct rq *rq;
++ int cpu;
++
++ cpu = smp_processor_id();
++ rq = cpu_rq(cpu);
++ prev = rq->curr;
++
++ schedule_debug(prev);
++
++ /* by passing sched_feat(HRTICK) checking which PDS doesn't support */
++ hrtick_clear(rq);
++
++ local_irq_disable();
++ rcu_note_context_switch(preempt);
++
++ /*
++ * Make sure that signal_pending_state()->signal_pending() below
++ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
++ * done by the caller to avoid the race with signal_wake_up().
++ *
++ * The membarrier system call requires a full memory barrier
++ * after coming from user-space, before storing to rq->curr.
++ */
++ raw_spin_lock(&rq->lock);
++ smp_mb__after_spinlock();
++
++ update_rq_clock(rq);
++
++ switch_count = &prev->nivcsw;
++ if (!preempt && prev->state) {
++ if (signal_pending_state(prev->state, prev)) {
++ prev->state = TASK_RUNNING;
++ } else {
++ deactivate_task(prev, rq);
++
++ if (prev->in_iowait) {
++ atomic_inc(&rq->nr_iowait);
++ delayacct_blkio_start();
++ }
++ }
++ switch_count = &prev->nvcsw;
++ }
++
++ clear_tsk_need_resched(prev);
++ clear_preempt_need_resched();
++
++ check_deadline(prev, rq);
++
++ next = choose_next_task(rq, cpu, prev);
++
++ set_rq_task(rq, next);
++
++ if (prev != next) {
++ if (next->prio == PRIO_LIMIT)
++ schedstat_inc(rq->sched_goidle);
++
++ rq->curr = next;
++ /*
++ * The membarrier system call requires each architecture
++ * to have a full memory barrier after updating
++ * rq->curr, before returning to user-space.
++ *
++ * Here are the schemes providing that barrier on the
++ * various architectures:
++ * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
++ * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
++ * - finish_lock_switch() for weakly-ordered
++ * architectures where spin_unlock is a full barrier,
++ * - switch_to() for arm64 (weakly-ordered, spin_unlock
++ * is a RELEASE barrier),
++ */
++ ++*switch_count;
++ rq->nr_switches++;
++
++ trace_sched_switch(preempt, prev, next);
++
++ /* Also unlocks the rq: */
++ rq = context_switch(rq, prev, next);
++#ifdef CONFIG_SCHED_SMT
++ pds_sg_balance_check(rq);
++#endif
++ } else
++ raw_spin_unlock_irq(&rq->lock);
++}
++
++void __noreturn do_task_dead(void)
++{
++ /* Causes final put_task_struct in finish_task_switch(): */
++ set_special_state(TASK_DEAD);
++
++ /* Tell freezer to ignore us: */
++ current->flags |= PF_NOFREEZE;
++ __schedule(false);
++
++ BUG();
++
++ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
++ for (;;)
++ cpu_relax();
++}
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++ if (!tsk->state || tsk_is_pi_blocked(tsk) ||
++ signal_pending_state(tsk->state, tsk))
++ return;
++
++ /*
++ * If a worker went to sleep, notify and ask workqueue whether
++ * it wants to wake up a task to maintain concurrency.
++ * As this function is called inside the schedule() context,
++ * we disable preemption to avoid it calling schedule() again
++ * in the possible wakeup of a kworker.
++ */
++ if (tsk->flags & PF_WQ_WORKER) {
++ preempt_disable();
++ wq_worker_sleeping(tsk);
++ preempt_enable_no_resched();
++ }
++
++ /*
++ * If we are going to sleep and we have plugged IO queued,
++ * make sure to submit it to avoid deadlocks.
++ */
++ if (blk_needs_flush_plug(tsk))
++ blk_schedule_flush_plug(tsk);
++}
++
++static void sched_update_worker(struct task_struct *tsk)
++{
++ if (tsk->flags & PF_WQ_WORKER)
++ wq_worker_running(tsk);
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++ struct task_struct *tsk = current;
++
++ sched_submit_work(tsk);
++ do {
++ preempt_disable();
++ __schedule(false);
++ sched_preempt_enable_no_resched();
++ } while (need_resched());
++ sched_update_worker(tsk);
++}
++EXPORT_SYMBOL(schedule);
++
++/*
++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
++ * state (have scheduled out non-voluntarily) by making sure that all
++ * tasks have either left the run queue or have gone into user space.
++ * As idle tasks do not do either, they must not ever be preempted
++ * (schedule out non-voluntarily).
++ *
++ * schedule_idle() is similar to schedule_preempt_disable() except that it
++ * never enables preemption because it does not call sched_submit_work().
++ */
++void __sched schedule_idle(void)
++{
++ /*
++ * As this skips calling sched_submit_work(), which the idle task does
++ * regardless because that function is a nop when the task is in a
++ * TASK_RUNNING state, make sure this isn't used someplace that the
++ * current task can be in any other state. Note, idle is always in the
++ * TASK_RUNNING state.
++ */
++ WARN_ON_ONCE(current->state);
++ do {
++ __schedule(false);
++ } while (need_resched());
++}
++
++#ifdef CONFIG_CONTEXT_TRACKING
++asmlinkage __visible void __sched schedule_user(void)
++{
++ /*
++ * If we come here after a random call to set_need_resched(),
++ * or we have been woken up remotely but the IPI has not yet arrived,
++ * we haven't yet exited the RCU idle mode. Do it here manually until
++ * we find a better solution.
++ *
++ * NB: There are buggy callers of this function. Ideally we
++ * should warn if prev_state != CONTEXT_USER, but that will trigger
++ * too frequently to make sense yet.
++ */
++ enum ctx_state prev_state = exception_enter();
++ schedule();
++ exception_exit(prev_state);
++}
++#endif
++
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++ sched_preempt_enable_no_resched();
++ schedule();
++ preempt_disable();
++}
++
++static void __sched notrace preempt_schedule_common(void)
++{
++ do {
++ /*
++ * Because the function tracer can trace preempt_count_sub()
++ * and it also uses preempt_enable/disable_notrace(), if
++ * NEED_RESCHED is set, the preempt_enable_notrace() called
++ * by the function tracer will call this function again and
++ * cause infinite recursion.
++ *
++ * Preemption must be disabled here before the function
++ * tracer can trace. Break up preempt_disable() into two
++ * calls. One to disable preemption without fear of being
++ * traced. The other to still record the preemption latency,
++ * which can also be traced by the function tracer.
++ */
++ preempt_disable_notrace();
++ preempt_latency_start(1);
++ __schedule(true);
++ preempt_latency_stop(1);
++ preempt_enable_no_resched_notrace();
++
++ /*
++ * Check again in case we missed a preemption opportunity
++ * between schedule and now.
++ */
++ } while (need_resched());
++}
++
++#ifdef CONFIG_PREEMPT
++/*
++ * this is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable. Kernel preemptions off return from interrupt
++ * occur there and call schedule directly.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule(void)
++{
++ /*
++ * If there is a non-zero preempt_count or interrupts are disabled,
++ * we do not want to preempt the current task. Just return..
++ */
++ if (likely(!preemptible()))
++ return;
++
++ preempt_schedule_common();
++}
++NOKPROBE_SYMBOL(preempt_schedule);
++EXPORT_SYMBOL(preempt_schedule);
++
++/**
++ * preempt_schedule_notrace - preempt_schedule called by tracing
++ *
++ * The tracing infrastructure uses preempt_enable_notrace to prevent
++ * recursion and tracing preempt enabling caused by the tracing
++ * infrastructure itself. But as tracing can happen in areas coming
++ * from userspace or just about to enter userspace, a preempt enable
++ * can occur before user_exit() is called. This will cause the scheduler
++ * to be called when the system is still in usermode.
++ *
++ * To prevent this, the preempt_enable_notrace will use this function
++ * instead of preempt_schedule() to exit user context if needed before
++ * calling the scheduler.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
++{
++ enum ctx_state prev_ctx;
++
++ if (likely(!preemptible()))
++ return;
++
++ do {
++ /*
++ * Because the function tracer can trace preempt_count_sub()
++ * and it also uses preempt_enable/disable_notrace(), if
++ * NEED_RESCHED is set, the preempt_enable_notrace() called
++ * by the function tracer will call this function again and
++ * cause infinite recursion.
++ *
++ * Preemption must be disabled here before the function
++ * tracer can trace. Break up preempt_disable() into two
++ * calls. One to disable preemption without fear of being
++ * traced. The other to still record the preemption latency,
++ * which can also be traced by the function tracer.
++ */
++ preempt_disable_notrace();
++ preempt_latency_start(1);
++ /*
++ * Needs preempt disabled in case user_exit() is traced
++ * and the tracer calls preempt_enable_notrace() causing
++ * an infinite recursion.
++ */
++ prev_ctx = exception_enter();
++ __schedule(true);
++ exception_exit(prev_ctx);
++
++ preempt_latency_stop(1);
++ preempt_enable_no_resched_notrace();
++ } while (need_resched());
++}
++EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
++
++#endif /* CONFIG_PREEMPT */
++
++/*
++ * this is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage __visible void __sched preempt_schedule_irq(void)
++{
++ enum ctx_state prev_state;
++
++ /* Catch callers which need to be fixed */
++ BUG_ON(preempt_count() || !irqs_disabled());
++
++ prev_state = exception_enter();
++
++ do {
++ preempt_disable();
++ local_irq_enable();
++ __schedule(true);
++ local_irq_disable();
++ sched_preempt_enable_no_resched();
++ } while (need_resched());
++
++ exception_exit(prev_state);
++}
++
++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
++ void *key)
++{
++ return try_to_wake_up(curr->private, mode, wake_flags);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++static inline void
++check_task_changed(struct rq *rq, struct task_struct *p)
++{
++ /*
++ * Trigger changes when task priority/deadline modified.
++ */
++ if (task_on_rq_queued(p)) {
++ struct task_struct *first;
++
++ requeue_task(p, rq);
++
++ /* Resched if first queued task not running and not IDLE */
++ if ((first = rq_first_queued_task(rq)) != rq->curr &&
++ !task_running_idle(first))
++ resched_curr(rq);
++ }
++}
++
++#ifdef CONFIG_RT_MUTEXES
++
++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
++{
++ if (pi_task)
++ prio = min(prio, pi_task->prio);
++
++ return prio;
++}
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++ struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++ return __rt_effective_prio(pi_task, prio);
++}
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task to boost
++ * @pi_task: donor task
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
++ */
++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
++{
++ int prio;
++ struct rq *rq;
++ raw_spinlock_t *lock;
++
++ /* XXX used to be waiter->prio, not waiter->task->prio */
++ prio = __rt_effective_prio(pi_task, p->normal_prio);
++
++ /*
++ * If nothing changed; bail early.
++ */
++ if (p->pi_top_task == pi_task && prio == p->prio)
++ return;
++
++ rq = __task_access_lock(p, &lock);
++ /*
++ * Set under pi_lock && rq->lock, such that the value can be used under
++ * either lock.
++ *
++ * Note that there is loads of tricky to make this pointer cache work
++ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
++ * ensure a task is de-boosted (pi_task is set to NULL) before the
++ * task is allowed to run again (and can exit). This ensures the pointer
++ * points to a blocked task -- which guaratees the task is present.
++ */
++ p->pi_top_task = pi_task;
++
++ /*
++ * For FIFO/RR we only need to set prio, if that matches we're done.
++ */
++ if (prio == p->prio)
++ goto out_unlock;
++
++ /*
++ * Idle task boosting is a nono in general. There is one
++ * exception, when PREEMPT_RT and NOHZ is active:
++ *
++ * The idle task calls get_next_timer_interrupt() and holds
++ * the timer wheel base->lock on the CPU and another CPU wants
++ * to access the timer (probably to cancel it). We can safely
++ * ignore the boosting request, as the idle CPU runs this code
++ * with interrupts disabled and will complete the lock
++ * protected section without being interrupted. So there is no
++ * real need to boost.
++ */
++ if (unlikely(p == rq->idle)) {
++ WARN_ON(p != rq->curr);
++ WARN_ON(p->pi_blocked_on);
++ goto out_unlock;
++ }
++
++ trace_sched_pi_setprio(p, pi_task);
++ p->prio = prio;
++ update_task_priodl(p);
++
++ check_task_changed(rq, p);
++
++out_unlock:
++ __task_access_unlock(p, lock);
++}
++#else
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++ return prio;
++}
++#endif
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++ int new_static;
++ unsigned long flags;
++ struct rq *rq;
++ raw_spinlock_t *lock;
++
++ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
++ return;
++ new_static = NICE_TO_PRIO(nice);
++ /*
++ * We have to be careful, if called from sys_setpriority(),
++ * the task might be in the middle of scheduling on another CPU.
++ */
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ rq = __task_access_lock(p, &lock);
++
++ /* rq lock may not held!! */
++ update_rq_clock(rq);
++
++ p->static_prio = new_static;
++ /*
++ * The RT priorities are set via sched_setscheduler(), but we still
++ * allow the 'normal' nice value to be set - but as expected
++ * it wont have any effect on scheduling until the task is
++ * not SCHED_NORMAL/SCHED_BATCH:
++ */
++ if (task_has_rt_policy(p))
++ goto out_unlock;
++
++ p->deadline -= task_deadline_diff(p);
++ p->deadline += static_deadline_diff(new_static);
++ p->prio = effective_prio(p);
++ update_task_priodl(p);
++
++ check_task_changed(rq, p);
++out_unlock:
++ __task_access_unlock(p, lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++ /* Convert nice value [19,-20] to rlimit style value [1,40] */
++ int nice_rlim = nice_to_rlimit(nice);
++
++ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
++ capable(CAP_SYS_NICE));
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++SYSCALL_DEFINE1(nice, int, increment)
++{
++ long nice, retval;
++
++ /*
++ * Setpriority might change our priority at the same moment.
++ * We don't have to worry. Conceptually one call occurs first
++ * and we have a single winner.
++ */
++
++ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
++ nice = task_nice(current) + increment;
++
++ nice = clamp_val(nice, MIN_NICE, MAX_NICE);
++ if (increment < 0 && !can_nice(current, nice))
++ return -EPERM;
++
++ retval = security_task_setnice(current, nice);
++ if (retval)
++ return retval;
++
++ set_user_nice(current, nice);
++ return 0;
++}
++
++#endif
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes
++ * from 0(SCHED_ISO) up to 82 (nice +19 SCHED_IDLE).
++ */
++int task_prio(const struct task_struct *p)
++{
++ int level, prio = p->prio - MAX_RT_PRIO;
++ static const int level_to_nice_prio[] = {39, 33, 26, 20, 14, 7, 0, 0};
++
++ /* rt tasks */
++ if (prio <= 0)
++ goto out;
++
++ preempt_disable();
++ level = task_deadline_level(p, this_rq());
++ preempt_enable();
++ prio += level_to_nice_prio[level];
++ if (idleprio_task(p))
++ prio += NICE_WIDTH;
++out:
++ return prio;
++}
++
++/**
++ * idle_cpu - is a given CPU idle currently?
++ * @cpu: the processor in question.
++ *
++ * Return: 1 if the CPU is currently idle. 0 otherwise.
++ */
++int idle_cpu(int cpu)
++{
++ return cpu_curr(cpu) == cpu_rq(cpu)->idle;
++}
++
++/**
++ * idle_task - return the idle task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * Return: The idle task for the cpu @cpu.
++ */
++struct task_struct *idle_task(int cpu)
++{
++ return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ *
++ * The task of @pid, if found. %NULL otherwise.
++ */
++static inline struct task_struct *find_process_by_pid(pid_t pid)
++{
++ return pid ? find_task_by_vpid(pid) : current;
++}
++
++#ifdef CONFIG_SMP
++void sched_set_stop_task(int cpu, struct task_struct *stop)
++{
++ struct sched_param stop_param = { .sched_priority = STOP_PRIO };
++ struct sched_param start_param = { .sched_priority = 0 };
++ struct task_struct *old_stop = cpu_rq(cpu)->stop;
++
++ if (stop) {
++ /*
++ * Make it appear like a SCHED_FIFO task, its something
++ * userspace knows about and won't get confused about.
++ *
++ * Also, it will make PI more or less work without too
++ * much confusion -- but then, stop work should not
++ * rely on PI working anyway.
++ */
++ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
++ }
++
++ cpu_rq(cpu)->stop = stop;
++
++ if (old_stop) {
++ /*
++ * Reset it back to a normal scheduling policy so that
++ * it can die in pieces.
++ */
++ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
++ }
++}
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * proper CPU and schedule it away if the CPU it's executing on
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++ const struct cpumask *new_mask, bool check)
++{
++ const struct cpumask *cpu_valid_mask = cpu_active_mask;
++ int dest_cpu;
++ unsigned long flags;
++ struct rq *rq;
++ raw_spinlock_t *lock;
++ int ret = 0;
++
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ rq = __task_access_lock(p, &lock);
++
++ if (p->flags & PF_KTHREAD) {
++ /*
++ * Kernel threads are allowed on online && !active CPUs
++ */
++ cpu_valid_mask = cpu_online_mask;
++ }
++
++ /*
++ * Must re-check here, to close a race against __kthread_bind(),
++ * sched_setaffinity() is not guaranteed to observe the flag.
++ */
++ if (check && (p->flags & PF_NO_SETAFFINITY)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ if (cpumask_equal(&p->cpus_allowed, new_mask))
++ goto out;
++
++ if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ do_set_cpus_allowed(p, new_mask);
++
++ if (p->flags & PF_KTHREAD) {
++ /*
++ * For kernel threads that do indeed end up on online &&
++ * !active we want to ensure they are strict per-CPU threads.
++ */
++ WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
++ !cpumask_intersects(new_mask, cpu_active_mask) &&
++ p->nr_cpus_allowed != 1);
++ }
++
++ /* Can the task run on the task's current CPU? If so, we're done */
++ if (cpumask_test_cpu(task_cpu(p), new_mask))
++ goto out;
++
++ dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
++ if (task_running(p) || p->state == TASK_WAKING) {
++ struct migration_arg arg = { p, dest_cpu };
++
++ /* Need help from migration thread: drop lock and wait. */
++ __task_access_unlock(p, lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++ return 0;
++ }
++ if (task_on_rq_queued(p)) {
++ /*
++ * OK, since we're going to drop the lock immediately
++ * afterwards anyway.
++ */
++ update_rq_clock(rq);
++ rq = move_queued_task(rq, p, dest_cpu);
++ lock = &rq->lock;
++ }
++
++out:
++ __task_access_unlock(p, lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++ return ret;
++}
++
++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++ return __set_cpus_allowed_ptr(p, new_mask, false);
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++#else
++static inline int
++__set_cpus_allowed_ptr(struct task_struct *p,
++ const struct cpumask *new_mask, bool check)
++{
++ return set_cpus_allowed_ptr(p, new_mask);
++}
++#endif
++
++static u64 task_init_deadline(const struct task_struct *p)
++{
++ return task_rq(p)->clock + task_deadline_diff(p);
++}
++
++u64 (* task_init_deadline_func_tbl[])(const struct task_struct *p) = {
++ task_init_deadline, /* SCHED_NORMAL */
++ NULL, /* SCHED_FIFO */
++ NULL, /* SCHED_RR */
++ task_init_deadline, /* SCHED_BATCH */
++ NULL, /* SCHED_ISO */
++ task_init_deadline /* SCHED_IDLE */
++};
++
++/*
++ * sched_setparam() passes in -1 for its policy, to let the functions
++ * it calls know not to change it.
++ */
++#define SETPARAM_POLICY -1
++
++static void __setscheduler_params(struct task_struct *p,
++ const struct sched_attr *attr)
++{
++ int old_policy = p->policy;
++ int policy = attr->sched_policy;
++
++ if (policy == SETPARAM_POLICY)
++ policy = p->policy;
++
++ p->policy = policy;
++
++ /*
++ * allow normal nice value to be set, but will not have any
++ * effect on scheduling until the task not SCHED_NORMAL/
++ * SCHED_BATCH
++ */
++ p->static_prio = NICE_TO_PRIO(attr->sched_nice);
++
++ /*
++ * __sched_setscheduler() ensures attr->sched_priority == 0 when
++ * !rt_policy. Always setting this ensures that things like
++ * getparam()/getattr() don't report silly values for !rt tasks.
++ */
++ p->rt_priority = attr->sched_priority;
++ p->normal_prio = normal_prio(p);
++
++ if (old_policy != policy)
++ p->deadline = (task_init_deadline_func_tbl[p->policy])?
++ task_init_deadline_func_tbl[p->policy](p):0ULL;
++}
++
++/* Actually do priority change: must hold rq lock. */
++static void __setscheduler(struct rq *rq, struct task_struct *p,
++ const struct sched_attr *attr, bool keep_boost)
++{
++ __setscheduler_params(p, attr);
++
++ /*
++ * Keep a potential priority boosting if called from
++ * sched_setscheduler().
++ */
++ p->prio = normal_prio(p);
++ if (keep_boost)
++ p->prio = rt_effective_prio(p, p->prio);
++ update_task_priodl(p);
++}
++
++/*
++ * check the target process has a UID that matches the current process's
++ */
++static bool check_same_owner(struct task_struct *p)
++{
++ const struct cred *cred = current_cred(), *pcred;
++ bool match;
++
++ rcu_read_lock();
++ pcred = __task_cred(p);
++ match = (uid_eq(cred->euid, pcred->euid) ||
++ uid_eq(cred->euid, pcred->uid));
++ rcu_read_unlock();
++ return match;
++}
++
++static int
++__sched_setscheduler(struct task_struct *p,
++ const struct sched_attr *attr, bool user, bool pi)
++{
++ const struct sched_attr dl_squash_attr = {
++ .size = sizeof(struct sched_attr),
++ .sched_policy = SCHED_FIFO,
++ .sched_nice = 0,
++ .sched_priority = 99,
++ };
++ int newprio = MAX_RT_PRIO - 1 - attr->sched_priority;
++ int retval, oldpolicy = -1;
++ int policy = attr->sched_policy;
++ unsigned long flags;
++ struct rq *rq;
++ int reset_on_fork;
++ raw_spinlock_t *lock;
++
++ /* The pi code expects interrupts enabled */
++ BUG_ON(pi && in_interrupt());
++
++ /*
++ * PDS supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
++ */
++ if (unlikely(SCHED_DEADLINE == policy)) {
++ attr = &dl_squash_attr;
++ policy = attr->sched_policy;
++ newprio = MAX_RT_PRIO - 1 - attr->sched_priority;
++ }
++recheck:
++ /* Double check policy once rq lock held */
++ if (policy < 0) {
++ reset_on_fork = p->sched_reset_on_fork;
++ policy = oldpolicy = p->policy;
++ } else {
++ reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
++
++ if (policy > SCHED_IDLE)
++ return -EINVAL;
++ }
++
++ if (attr->sched_flags & ~(SCHED_FLAG_ALL))
++ return -EINVAL;
++
++ /*
++ * Valid priorities for SCHED_FIFO and SCHED_RR are
++ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
++ * SCHED_BATCH and SCHED_IDLE is 0.
++ */
++ if (attr->sched_priority < 0 ||
++ (p->mm && attr->sched_priority > MAX_USER_RT_PRIO - 1) ||
++ (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
++ return -EINVAL;
++ if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
++ (attr->sched_priority != 0))
++ return -EINVAL;
++
++ /*
++ * Allow unprivileged RT tasks to decrease priority:
++ */
++ if (user && !capable(CAP_SYS_NICE)) {
++ if (SCHED_FIFO == policy || SCHED_RR == policy) {
++ unsigned long rlim_rtprio =
++ task_rlimit(p, RLIMIT_RTPRIO);
++
++ /* Can't set/change the rt policy */
++ if (policy != p->policy && !rlim_rtprio)
++ return -EPERM;
++
++ /* Can't increase priority */
++ if (attr->sched_priority > p->rt_priority &&
++ attr->sched_priority > rlim_rtprio)
++ return -EPERM;
++ }
++
++ /* Can't change other user's priorities */
++ if (!check_same_owner(p))
++ return -EPERM;
++
++ /* Normal users shall not reset the sched_reset_on_fork flag */
++ if (p->sched_reset_on_fork && !reset_on_fork)
++ return -EPERM;
++ }
++
++ if (user) {
++ retval = security_task_setscheduler(p);
++ if (retval)
++ return retval;
++ }
++
++ /*
++ * make sure no PI-waiters arrive (or leave) while we are
++ * changing the priority of the task:
++ */
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++
++ /*
++ * To be able to change p->policy safely, task_access_lock()
++ * must be called.
++ * IF use task_access_lock() here:
++ * For the task p which is not running, reading rq->stop is
++ * racy but acceptable as ->stop doesn't change much.
++ * An enhancemnet can be made to read rq->stop saftly.
++ */
++ rq = __task_access_lock(p, &lock);
++
++ /*
++ * Changing the policy of the stop threads its a very bad idea
++ */
++ if (p == rq->stop) {
++ __task_access_unlock(p, lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++ return -EINVAL;
++ }
++
++ /*
++ * If not changing anything there's no need to proceed further:
++ */
++ if (unlikely(policy == p->policy)) {
++ if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
++ goto change;
++ if (!rt_policy(policy) &&
++ NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
++ goto change;
++
++ p->sched_reset_on_fork = reset_on_fork;
++ __task_access_unlock(p, lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++ return 0;
++ }
++change:
++
++ /* Re-check policy now with rq lock held */
++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++ policy = oldpolicy = -1;
++ __task_access_unlock(p, lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++ goto recheck;
++ }
++
++ p->sched_reset_on_fork = reset_on_fork;
++
++ if (pi) {
++ /*
++ * Take priority boosted tasks into account. If the new
++ * effective priority is unchanged, we just store the new
++ * normal parameters and do not touch the scheduler class and
++ * the runqueue. This will be done when the task deboost
++ * itself.
++ */
++ if (rt_effective_prio(p, newprio) == p->prio) {
++ __setscheduler_params(p, attr);
++ __task_access_unlock(p, lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++ return 0;
++ }
++ }
++
++ __setscheduler(rq, p, attr, pi);
++
++ check_task_changed(rq, p);
++
++ /* Avoid rq from going away on us: */
++ preempt_disable();
++ __task_access_unlock(p, lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++ if (pi)
++ rt_mutex_adjust_pi(p);
++
++ preempt_enable();
++
++ return 0;
++}
++
++static int _sched_setscheduler(struct task_struct *p, int policy,
++ const struct sched_param *param, bool check)
++{
++ struct sched_attr attr = {
++ .sched_policy = policy,
++ .sched_priority = param->sched_priority,
++ .sched_nice = PRIO_TO_NICE(p->static_prio),
++ };
++
++ /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
++ if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
++ attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++ policy &= ~SCHED_RESET_ON_FORK;
++ attr.sched_policy = policy;
++ }
++
++ return __sched_setscheduler(p, &attr, check, true);
++}
++
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++ const struct sched_param *param)
++{
++ return _sched_setscheduler(p, policy, param, true);
++}
++
++EXPORT_SYMBOL_GPL(sched_setscheduler);
++
++int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
++{
++ return __sched_setscheduler(p, attr, true, true);
++}
++EXPORT_SYMBOL_GPL(sched_setattr);
++
++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
++{
++ return __sched_setscheduler(p, attr, false, true);
++}
++
++/**
++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Just like sched_setscheduler, only don't bother checking if the
++ * current context has permission. For example, this is needed in
++ * stop_machine(): we create temporary high priority worker threads,
++ * but our caller might not have that capability.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++int sched_setscheduler_nocheck(struct task_struct *p, int policy,
++ const struct sched_param *param)
++{
++ return _sched_setscheduler(p, policy, param, false);
++}
++EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++ struct sched_param lparam;
++ struct task_struct *p;
++ int retval;
++
++ if (!param || pid < 0)
++ return -EINVAL;
++ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++ return -EFAULT;
++
++ rcu_read_lock();
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (p != NULL)
++ retval = sched_setscheduler(p, policy, &lparam);
++ rcu_read_unlock();
++
++ return retval;
++}
++
++/*
++ * Mimics kernel/events/core.c perf_copy_attr().
++ */
++static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
++{
++ u32 size;
++ int ret;
++
++ if (!access_ok(uattr, SCHED_ATTR_SIZE_VER0))
++ return -EFAULT;
++
++ /* Zero the full structure, so that a short copy will be nice: */
++ memset(attr, 0, sizeof(*attr));
++
++ ret = get_user(size, &uattr->size);
++ if (ret)
++ return ret;
++
++ /* Bail out on silly large: */
++ if (size > PAGE_SIZE)
++ goto err_size;
++
++ /* ABI compatibility quirk: */
++ if (!size)
++ size = SCHED_ATTR_SIZE_VER0;
++
++ if (size < SCHED_ATTR_SIZE_VER0)
++ goto err_size;
++
++ /*
++ * If we're handed a bigger struct than we know of,
++ * ensure all the unknown bits are 0 - i.e. new
++ * user-space does not rely on any kernel feature
++ * extensions we dont know about yet.
++ */
++ if (size > sizeof(*attr)) {
++ unsigned char __user *addr;
++ unsigned char __user *end;
++ unsigned char val;
++
++ addr = (void __user *)uattr + sizeof(*attr);
++ end = (void __user *)uattr + size;
++
++ for (; addr < end; addr++) {
++ ret = get_user(val, addr);
++ if (ret)
++ return ret;
++ if (val)
++ goto err_size;
++ }
++ size = sizeof(*attr);
++ }
++
++ ret = copy_from_user(attr, uattr, size);
++ if (ret)
++ return -EFAULT;
++
++ /*
++ * XXX: Do we want to be lenient like existing syscalls; or do we want
++ * to be strict and return an error on out-of-bounds values?
++ */
++ attr->sched_nice = clamp(attr->sched_nice, -20, 19);
++
++ /* sched/core.c uses zero here but we already know ret is zero */
++ return 0;
++
++err_size:
++ put_user(sizeof(*attr), &uattr->size);
++ return -E2BIG;
++}
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ *
++ * Return: 0 on success. An error code otherwise.
++ * @param: structure containing the new RT priority.
++ */
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
++{
++ if (policy < 0)
++ return -EINVAL;
++
++ return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
++{
++ return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
++}
++
++/**
++ * sys_sched_setattr - same as above, but with extended sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ */
++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
++ unsigned int, flags)
++{
++ struct sched_attr attr;
++ struct task_struct *p;
++ int retval;
++
++ if (!uattr || pid < 0 || flags)
++ return -EINVAL;
++
++ retval = sched_copy_attr(uattr, &attr);
++ if (retval)
++ return retval;
++
++ if ((int)attr.sched_policy < 0)
++ return -EINVAL;
++
++ rcu_read_lock();
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (p != NULL)
++ retval = sched_setattr(p, &attr);
++ rcu_read_unlock();
++
++ return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ *
++ * Return: On success, the policy of the thread. Otherwise, a negative error
++ * code.
++ */
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
++{
++ struct task_struct *p;
++ int retval = -EINVAL;
++
++ if (pid < 0)
++ goto out_nounlock;
++
++ retval = -ESRCH;
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ if (p) {
++ retval = security_task_getscheduler(p);
++ if (!retval)
++ retval = p->policy;
++ }
++ rcu_read_unlock();
++
++out_nounlock:
++ return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ *
++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
++ * code.
++ */
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
++{
++ struct sched_param lp = { .sched_priority = 0 };
++ struct task_struct *p;
++ int retval = -EINVAL;
++
++ if (!param || pid < 0)
++ goto out_nounlock;
++
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ retval = -ESRCH;
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ if (task_has_rt_policy(p))
++ lp.sched_priority = p->rt_priority;
++ rcu_read_unlock();
++
++ /*
++ * This one might sleep, we cannot do it with a spinlock held ...
++ */
++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++
++out_nounlock:
++ return retval;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++static int sched_read_attr(struct sched_attr __user *uattr,
++ struct sched_attr *attr,
++ unsigned int usize)
++{
++ int ret;
++
++ if (!access_ok(uattr, usize))
++ return -EFAULT;
++
++ /*
++ * If we're handed a smaller struct than we know of,
++ * ensure all the unknown bits are 0 - i.e. old
++ * user-space does not get uncomplete information.
++ */
++ if (usize < sizeof(*attr)) {
++ unsigned char *addr;
++ unsigned char *end;
++
++ addr = (void *)attr + usize;
++ end = (void *)attr + sizeof(*attr);
++
++ for (; addr < end; addr++) {
++ if (*addr)
++ return -EFBIG;
++ }
++
++ attr->size = usize;
++ }
++
++ ret = copy_to_user(uattr, attr, attr->size);
++ if (ret)
++ return -EFAULT;
++
++ /* sched/core.c uses zero here but we already know ret is zero */
++ return ret;
++}
++
++/**
++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ * @size: sizeof(attr) for fwd/bwd comp.
++ * @flags: for future extension.
++ */
++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
++ unsigned int, size, unsigned int, flags)
++{
++ struct sched_attr attr = {
++ .size = sizeof(struct sched_attr),
++ };
++ struct task_struct *p;
++ int retval;
++
++ if (!uattr || pid < 0 || size > PAGE_SIZE ||
++ size < SCHED_ATTR_SIZE_VER0 || flags)
++ return -EINVAL;
++
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ retval = -ESRCH;
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ attr.sched_policy = p->policy;
++ if (rt_task(p))
++ attr.sched_priority = p->rt_priority;
++ else
++ attr.sched_nice = task_nice(p);
++
++ rcu_read_unlock();
++
++ retval = sched_read_attr(uattr, &attr, size);
++ return retval;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
++{
++ cpumask_var_t cpus_allowed, new_mask;
++ struct task_struct *p;
++ int retval;
++
++ get_online_cpus();
++ rcu_read_lock();
++
++ p = find_process_by_pid(pid);
++ if (!p) {
++ rcu_read_unlock();
++ put_online_cpus();
++ return -ESRCH;
++ }
++
++ /* Prevent p going away */
++ get_task_struct(p);
++ rcu_read_unlock();
++
++ if (p->flags & PF_NO_SETAFFINITY) {
++ retval = -EINVAL;
++ goto out_put_task;
++ }
++ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
++ retval = -ENOMEM;
++ goto out_put_task;
++ }
++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
++ retval = -ENOMEM;
++ goto out_free_cpus_allowed;
++ }
++ retval = -EPERM;
++ if (!check_same_owner(p)) {
++ rcu_read_lock();
++ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
++ rcu_read_unlock();
++ goto out_unlock;
++ }
++ rcu_read_unlock();
++ }
++
++ retval = security_task_setscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ cpuset_cpus_allowed(p, cpus_allowed);
++ cpumask_and(new_mask, in_mask, cpus_allowed);
++again:
++ retval = __set_cpus_allowed_ptr(p, new_mask, true);
++
++ if (!retval) {
++ cpuset_cpus_allowed(p, cpus_allowed);
++ if (!cpumask_subset(new_mask, cpus_allowed)) {
++ /*
++ * We must have raced with a concurrent cpuset
++ * update. Just reset the cpus_allowed to the
++ * cpuset's cpus_allowed
++ */
++ cpumask_copy(new_mask, cpus_allowed);
++ goto again;
++ }
++ }
++out_unlock:
++ free_cpumask_var(new_mask);
++out_free_cpus_allowed:
++ free_cpumask_var(cpus_allowed);
++out_put_task:
++ put_task_struct(p);
++ put_online_cpus();
++ return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++ struct cpumask *new_mask)
++{
++ if (len < cpumask_size())
++ cpumask_clear(new_mask);
++ else if (len > cpumask_size())
++ len = cpumask_size();
++
++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++/**
++ * sys_sched_setaffinity - set the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++ unsigned long __user *, user_mask_ptr)
++{
++ cpumask_var_t new_mask;
++ int retval;
++
++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
++ return -ENOMEM;
++
++ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
++ if (retval == 0)
++ retval = sched_setaffinity(pid, new_mask);
++ free_cpumask_var(new_mask);
++ return retval;
++}
++
++long sched_getaffinity(pid_t pid, cpumask_t *mask)
++{
++ struct task_struct *p;
++ raw_spinlock_t *lock;
++ unsigned long flags;
++ int retval;
++
++ rcu_read_lock();
++
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ task_access_lock_irqsave(p, &lock, &flags);
++ cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
++ task_access_unlock_irqrestore(p, lock, &flags);
++
++out_unlock:
++ rcu_read_unlock();
++
++ return retval;
++}
++
++/**
++ * sys_sched_getaffinity - get the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current CPU mask
++ *
++ * Return: size of CPU mask copied to user_mask_ptr on success. An
++ * error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++ unsigned long __user *, user_mask_ptr)
++{
++ int ret;
++ cpumask_var_t mask;
++
++ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
++ return -EINVAL;
++ if (len & (sizeof(unsigned long)-1))
++ return -EINVAL;
++
++ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++ return -ENOMEM;
++
++ ret = sched_getaffinity(pid, mask);
++ if (ret == 0) {
++ unsigned int retlen = min_t(size_t, len, cpumask_size());
++
++ if (copy_to_user(user_mask_ptr, mask, retlen))
++ ret = -EFAULT;
++ else
++ ret = retlen;
++ }
++ free_cpumask_var(mask);
++
++ return ret;
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU to other tasks. It does this by
++ * scheduling away the current task. If it still has the earliest deadline
++ * it will be scheduled again as the next task.
++ *
++ * Return: 0.
++ */
++static void do_sched_yield(void)
++{
++ struct rq *rq;
++ struct rq_flags rf;
++
++ if (!sched_yield_type)
++ return;
++
++ rq = this_rq_lock_irq(&rf);
++
++ if (sched_yield_type > 1) {
++ time_slice_expired(current, rq);
++ requeue_task(current, rq);
++ }
++ schedstat_inc(rq->yld_count);
++
++ /*
++ * Since we are going to call schedule() anyway, there's
++ * no need to preempt or enable interrupts:
++ */
++ preempt_disable();
++ raw_spin_unlock(&rq->lock);
++ sched_preempt_enable_no_resched();
++
++ schedule();
++}
++
++SYSCALL_DEFINE0(sched_yield)
++{
++ do_sched_yield();
++ return 0;
++}
++
++#ifndef CONFIG_PREEMPT
++int __sched _cond_resched(void)
++{
++ if (should_resched(0)) {
++ preempt_schedule_common();
++ return 1;
++ }
++ rcu_all_qs();
++ return 0;
++}
++EXPORT_SYMBOL(_cond_resched);
++#endif
++
++/*
++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int __cond_resched_lock(spinlock_t *lock)
++{
++ int resched = should_resched(PREEMPT_LOCK_OFFSET);
++ int ret = 0;
++
++ lockdep_assert_held(lock);
++
++ if (spin_needbreak(lock) || resched) {
++ spin_unlock(lock);
++ if (resched)
++ preempt_schedule_common();
++ else
++ cpu_relax();
++ ret = 1;
++ spin_lock(lock);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(__cond_resched_lock);
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * Do not ever use this function, there's a 99% chance you're doing it wrong.
++ *
++ * The scheduler is at all times free to pick the calling task as the most
++ * eligible task to run, if removing the yield() call from your code breaks
++ * it, its already broken.
++ *
++ * Typical broken usage is:
++ *
++ * while (!event)
++ * yield();
++ *
++ * where one assumes that yield() will let 'the other' process run that will
++ * make event true. If the current task is a SCHED_FIFO task that will never
++ * happen. Never use yield() as a progress guarantee!!
++ *
++ * If you want to use yield() to wait for something, use wait_event().
++ * If you want to use yield() to be 'nice' for others, use cond_resched().
++ * If you still want to use yield(), do not!
++ */
++void __sched yield(void)
++{
++ set_current_state(TASK_RUNNING);
++ do_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/**
++ * yield_to - yield the current processor to another thread in
++ * your thread group, or accelerate that thread toward the
++ * processor it's on.
++ * @p: target task
++ * @preempt: whether task preemption is allowed or not
++ *
++ * It's the caller's job to ensure that the target task struct
++ * can't go away on us before we can do any checks.
++ *
++ * In PDS, yield_to is not supported.
++ *
++ * Return:
++ * true (>0) if we indeed boosted the target task.
++ * false (0) if we failed to boost the target.
++ * -ESRCH if there's no task to yield to.
++ */
++int __sched yield_to(struct task_struct *p, bool preempt)
++{
++ return 0;
++}
++EXPORT_SYMBOL_GPL(yield_to);
++
++int io_schedule_prepare(void)
++{
++ int old_iowait = current->in_iowait;
++
++ current->in_iowait = 1;
++ blk_schedule_flush_plug(current);
++
++ return old_iowait;
++}
++
++void io_schedule_finish(int token)
++{
++ current->in_iowait = token;
++}
++
++/*
++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++
++long __sched io_schedule_timeout(long timeout)
++{
++ int token;
++ long ret;
++
++ token = io_schedule_prepare();
++ ret = schedule_timeout(timeout);
++ io_schedule_finish(token);
++
++ return ret;
++}
++EXPORT_SYMBOL(io_schedule_timeout);
++
++void io_schedule(void)
++{
++ int token;
++
++ token = io_schedule_prepare();
++ schedule();
++ io_schedule_finish(token);
++}
++EXPORT_SYMBOL(io_schedule);
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the maximum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
++{
++ int ret = -EINVAL;
++
++ switch (policy) {
++ case SCHED_FIFO:
++ case SCHED_RR:
++ ret = MAX_USER_RT_PRIO-1;
++ break;
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ case SCHED_ISO:
++ case SCHED_IDLE:
++ ret = 0;
++ break;
++ }
++ return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the minimum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
++{
++ int ret = -EINVAL;
++
++ switch (policy) {
++ case SCHED_FIFO:
++ case SCHED_RR:
++ ret = 1;
++ break;
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ case SCHED_ISO:
++ case SCHED_IDLE:
++ ret = 0;
++ break;
++ }
++ return ret;
++}
++
++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
++{
++ struct task_struct *p;
++ int retval;
++
++ if (pid < 0)
++ return -EINVAL;
++
++ retval = -ESRCH;
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++ rcu_read_unlock();
++
++ *t = ns_to_timespec64(MS_TO_NS(rr_interval));
++ return 0;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ *
++ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
++ * an error code.
++ */
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++ struct __kernel_timespec __user *, interval)
++{
++ struct timespec64 t;
++ int retval = sched_rr_get_interval(pid, &t);
++
++ if (retval == 0)
++ retval = put_timespec64(&t, interval);
++
++ return retval;
++}
++
++#ifdef CONFIG_COMPAT_32BIT_TIME
++SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
++ struct old_timespec32 __user *, interval)
++{
++ struct timespec64 t;
++ int retval = sched_rr_get_interval(pid, &t);
++
++ if (retval == 0)
++ retval = put_old_timespec32(&t, interval);
++ return retval;
++}
++#endif
++
++void sched_show_task(struct task_struct *p)
++{
++ unsigned long free = 0;
++ int ppid;
++
++ if (!try_get_task_stack(p))
++ return;
++
++ printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p));
++
++ if (p->state == TASK_RUNNING)
++ printk(KERN_CONT " running task ");
++#ifdef CONFIG_DEBUG_STACK_USAGE
++ free = stack_not_used(p);
++#endif
++ ppid = 0;
++ rcu_read_lock();
++ if (pid_alive(p))
++ ppid = task_pid_nr(rcu_dereference(p->real_parent));
++ rcu_read_unlock();
++ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
++ task_pid_nr(p), ppid,
++ (unsigned long)task_thread_info(p)->flags);
++
++ print_worker_info(KERN_INFO, p);
++ show_stack(p, NULL);
++ put_task_stack(p);
++}
++EXPORT_SYMBOL_GPL(sched_show_task);
++
++static inline bool
++state_filter_match(unsigned long state_filter, struct task_struct *p)
++{
++ /* no filter, everything matches */
++ if (!state_filter)
++ return true;
++
++ /* filter, but doesn't match */
++ if (!(p->state & state_filter))
++ return false;
++
++ /*
++ * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
++ * TASK_KILLABLE).
++ */
++ if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
++ return false;
++
++ return true;
++}
++
++
++void show_state_filter(unsigned long state_filter)
++{
++ struct task_struct *g, *p;
++
++#if BITS_PER_LONG == 32
++ printk(KERN_INFO
++ " task PC stack pid father\n");
++#else
++ printk(KERN_INFO
++ " task PC stack pid father\n");
++#endif
++ rcu_read_lock();
++ for_each_process_thread(g, p) {
++ /*
++ * reset the NMI-timeout, listing all files on a slow
++ * console might take a lot of time:
++ * Also, reset softlockup watchdogs on all CPUs, because
++ * another CPU might be blocked waiting for us to process
++ * an IPI.
++ */
++ touch_nmi_watchdog();
++ touch_all_softlockup_watchdogs();
++ if (state_filter_match(state_filter, p))
++ sched_show_task(p);
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ /* PDS TODO: should support this
++ if (!state_filter)
++ sysrq_sched_debug_show();
++ */
++#endif
++ rcu_read_unlock();
++ /*
++ * Only show locks if all tasks are dumped:
++ */
++ if (!state_filter)
++ debug_show_all_locks();
++}
++
++void dump_cpu_task(int cpu)
++{
++ pr_info("Task dump for CPU %d:\n", cpu);
++ sched_show_task(cpu_curr(cpu));
++}
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: cpu the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void init_idle(struct task_struct *idle, int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&idle->pi_lock, flags);
++ raw_spin_lock(&rq->lock);
++ update_rq_clock(rq);
++
++ idle->last_ran = rq->clock_task;
++ idle->state = TASK_RUNNING;
++ idle->flags |= PF_IDLE;
++ /* Setting prio to illegal value shouldn't matter when never queued */
++ idle->prio = PRIO_LIMIT;
++ idle->deadline = rq_clock(rq) + task_deadline_diff(idle);
++ update_task_priodl(idle);
++
++ kasan_unpoison_task_stack(idle);
++
++#ifdef CONFIG_SMP
++ /*
++ * It's possible that init_idle() gets called multiple times on a task,
++ * in that case do_set_cpus_allowed() will not do the right thing.
++ *
++ * And since this is boot we can forgo the serialisation.
++ */
++ set_cpus_allowed_common(idle, cpumask_of(cpu));
++#endif
++
++ /* Silence PROVE_RCU */
++ rcu_read_lock();
++ __set_task_cpu(idle, cpu);
++ rcu_read_unlock();
++
++ rq->curr = rq->idle = idle;
++ idle->on_cpu = 1;
++
++ raw_spin_unlock(&rq->lock);
++ raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
++
++ /* Set the preempt count _outside_ the spinlocks! */
++ init_idle_preempt_count(idle, cpu);
++
++ ftrace_graph_init_idle_task(idle, cpu);
++ vtime_init_idle(idle, cpu);
++#ifdef CONFIG_SMP
++ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
++}
++
++void resched_cpu(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&rq->lock, flags);
++ if (cpu_online(cpu) || cpu == smp_processor_id())
++ resched_curr(cpu_rq(cpu));
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++ struct wake_q_node *node = &task->wake_q;
++
++ /*
++ * Atomically grab the task, if ->wake_q is !nil already it means
++ * its already queued (either by us or someone else) and will get the
++ * wakeup due to that.
++ *
++ * In order to ensure that a pending wakeup will observe our pending
++ * state, even in the failed case, an explicit smp_mb() must be used.
++ */
++ smp_mb__before_atomic();
++ if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
++ return false;
++
++ /*
++ * The head is context local, there can be no concurrency.
++ */
++ *head->lastp = node;
++ head->lastp = &node->next;
++ return true;
++}
++
++/**
++ * wake_q_add() - queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ */
++void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++ if (__wake_q_add(head, task))
++ get_task_struct(task);
++}
++
++/**
++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ *
++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
++ * that already hold reference to @task can call the 'safe' version and trust
++ * wake_q to do the right thing depending whether or not the @task is already
++ * queued for wakeup.
++ */
++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
++{
++ if (!__wake_q_add(head, task))
++ put_task_struct(task);
++}
++
++void wake_up_q(struct wake_q_head *head)
++{
++ struct wake_q_node *node = head->first;
++
++ while (node != WAKE_Q_TAIL) {
++ struct task_struct *task;
++
++ task = container_of(node, struct task_struct, wake_q);
++ BUG_ON(!task);
++ /* task can safely be re-inserted now: */
++ node = node->next;
++ task->wake_q.next = NULL;
++
++ /*
++ * wake_up_process() executes a full barrier, which pairs with
++ * the queueing in wake_q_add() so as not to miss wakeups.
++ */
++ wake_up_process(task);
++ put_task_struct(task);
++ }
++}
++
++#ifdef CONFIG_SMP
++
++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
++ const struct cpumask __maybe_unused *trial)
++{
++ return 1;
++}
++
++int task_can_attach(struct task_struct *p,
++ const struct cpumask *cs_cpus_allowed)
++{
++ int ret = 0;
++
++ /*
++ * Kthreads which disallow setaffinity shouldn't be moved
++ * to a new cpuset; we don't want to change their CPU
++ * affinity and isolating such threads by their set of
++ * allowed nodes is unnecessary. Thus, cpusets are not
++ * applicable for such threads. This prevents checking for
++ * success of set_cpus_allowed_ptr() on all attached tasks
++ * before cpus_allowed may be changed.
++ */
++ if (p->flags & PF_NO_SETAFFINITY)
++ ret = -EINVAL;
++
++ return ret;
++}
++
++static bool sched_smp_initialized __read_mostly;
++
++#ifdef CONFIG_NO_HZ_COMMON
++void nohz_balance_enter_idle(int cpu)
++{
++}
++
++void select_nohz_load_balancer(int stop_tick)
++{
++}
++
++void set_cpu_sd_state_idle(void) {}
++
++/*
++ * In the semi idle case, use the nearest busy CPU for migrating timers
++ * from an idle CPU. This is good for power-savings.
++ *
++ * We don't do similar optimization for completely idle system, as
++ * selecting an idle CPU will add more delays to the timers than intended
++ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
++ */
++int get_nohz_timer_target(void)
++{
++ int i, cpu = smp_processor_id();
++ struct cpumask *mask;
++
++ if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER))
++ return cpu;
++
++ for (mask = &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[0]);
++ mask < per_cpu(sched_cpu_affinity_chk_end_masks, cpu); mask++)
++ for_each_cpu(i, mask)
++ if (!idle_cpu(i) && housekeeping_cpu(i, HK_FLAG_TIMER))
++ return i;
++
++ if (!housekeeping_cpu(cpu, HK_FLAG_TIMER))
++ cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
++
++ return cpu;
++}
++
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++void wake_up_idle_cpu(int cpu)
++{
++ if (cpu == smp_processor_id())
++ return;
++
++ set_tsk_need_resched(cpu_rq(cpu)->idle);
++ smp_send_reschedule(cpu);
++}
++
++void wake_up_nohz_cpu(int cpu)
++{
++ wake_up_idle_cpu(cpu);
++}
++#endif /* CONFIG_NO_HZ_COMMON */
++
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Ensures that the idle task is using init_mm right before its CPU goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++ struct mm_struct *mm = current->active_mm;
++
++ BUG_ON(cpu_online(smp_processor_id()));
++
++ if (mm != &init_mm) {
++ switch_mm(mm, &init_mm, current);
++ current->active_mm = &init_mm;
++ finish_arch_post_lock_switch();
++ }
++ mmdrop(mm);
++}
++
++/*
++ * Migrate all tasks from the rq, sleeping tasks will be migrated by
++ * try_to_wake_up()->select_task_rq().
++ *
++ * Called with rq->lock held even though we'er in stop_machine() and
++ * there's no concurrency possible, we hold the required locks anyway
++ * because of lock validation efforts.
++ */
++static void migrate_tasks(struct rq *dead_rq)
++{
++ struct rq *rq = dead_rq;
++ struct task_struct *p, *stop = rq->stop;
++ struct skiplist_node *node;
++ int count = 0;
++
++ /*
++ * Fudge the rq selection such that the below task selection loop
++ * doesn't get stuck on the currently eligible stop task.
++ *
++ * We're currently inside stop_machine() and the rq is either stuck
++ * in the stop_machine_cpu_stop() loop, or we're executing this code,
++ * either way we should never end up calling schedule() until we're
++ * done here.
++ */
++ rq->stop = NULL;
++
++ node = &rq->sl_header;
++ while ((node = node->next[0]) != &rq->sl_header) {
++ int dest_cpu;
++
++ p = skiplist_entry(node, struct task_struct, sl_node);
++
++ /* skip the running task */
++ if (task_running(p))
++ continue;
++
++ /*
++ * Rules for changing task_struct::cpus_allowed are holding
++ * both pi_lock and rq->lock, such that holding either
++ * stabilizes the mask.
++ *
++ * Drop rq->lock is not quite as disastrous as it usually is
++ * because !cpu_active at this point, which means load-balance
++ * will not interfere. Also, stop-machine.
++ */
++ raw_spin_unlock(&rq->lock);
++ raw_spin_lock(&p->pi_lock);
++ raw_spin_lock(&rq->lock);
++
++ /*
++ * Since we're inside stop-machine, _nothing_ should have
++ * changed the task, WARN if weird stuff happened, because in
++ * that case the above rq->lock drop is a fail too.
++ */
++ if (WARN_ON(task_rq(p) != rq || !task_on_rq_queued(p))) {
++ raw_spin_unlock(&p->pi_lock);
++ continue;
++ }
++
++ count++;
++ /* Find suitable destination for @next, with force if needed. */
++ dest_cpu = select_fallback_rq(dead_rq->cpu, p);
++
++ rq = __migrate_task(rq, p, dest_cpu);
++ raw_spin_unlock(&rq->lock);
++ raw_spin_unlock(&p->pi_lock);
++
++ rq = dead_rq;
++ raw_spin_lock(&rq->lock);
++ /* Check queued task all over from the header again */
++ node = &rq->sl_header;
++ }
++
++ rq->stop = stop;
++}
++
++static void set_rq_offline(struct rq *rq)
++{
++ if (rq->online)
++ rq->online = false;
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++static void set_rq_online(struct rq *rq)
++{
++ if (!rq->online)
++ rq->online = true;
++}
++
++#ifdef CONFIG_SCHED_DEBUG
++
++static __read_mostly int sched_debug_enabled;
++
++static int __init sched_debug_setup(char *str)
++{
++ sched_debug_enabled = 1;
++
++ return 0;
++}
++early_param("sched_debug", sched_debug_setup);
++
++static inline bool sched_debug(void)
++{
++ return sched_debug_enabled;
++}
++#else /* !CONFIG_SCHED_DEBUG */
++static inline bool sched_debug(void)
++{
++ return false;
++}
++#endif /* CONFIG_SCHED_DEBUG */
++
++#ifdef CONFIG_SMP
++void scheduler_ipi(void)
++{
++ /*
++ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
++ * TIF_NEED_RESCHED remotely (for the first time) will also send
++ * this IPI.
++ */
++ preempt_fold_need_resched();
++
++ if (!idle_cpu(smp_processor_id()) || need_resched())
++ return;
++
++ irq_enter();
++ irq_exit();
++}
++
++void wake_up_if_idle(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ rcu_read_lock();
++
++ if (!is_idle_task(rcu_dereference(rq->curr)))
++ goto out;
++
++ if (set_nr_if_polling(rq->idle)) {
++ trace_sched_wake_idle_without_ipi(cpu);
++ } else {
++ raw_spin_lock_irqsave(&rq->lock, flags);
++ if (is_idle_task(rq->curr))
++ smp_send_reschedule(cpu);
++ /* Else CPU is not idle, do nothing here */
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++ }
++
++out:
++ rcu_read_unlock();
++}
++
++bool cpus_share_cache(int this_cpu, int that_cpu)
++{
++ return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * Topology list, bottom-up.
++ */
++static struct sched_domain_topology_level default_topology[] = {
++#ifdef CONFIG_SCHED_SMT
++ { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
++#endif
++#ifdef CONFIG_SCHED_MC
++ { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
++#endif
++ { cpu_cpu_mask, SD_INIT_NAME(DIE) },
++ { NULL, },
++};
++
++static struct sched_domain_topology_level *sched_domain_topology =
++ default_topology;
++
++#define for_each_sd_topology(tl) \
++ for (tl = sched_domain_topology; tl->mask; tl++)
++
++void set_sched_topology(struct sched_domain_topology_level *tl)
++{
++ if (WARN_ON_ONCE(sched_smp_initialized))
++ return;
++
++ sched_domain_topology = tl;
++}
++
++/*
++ * Initializers for schedule domains
++ * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
++ */
++
++int sched_domain_level_max;
++
++/*
++ * Partition sched domains as specified by the 'ndoms_new'
++ * cpumasks in the array doms_new[] of cpumasks. This compares
++ * doms_new[] to the current sched domain partitioning, doms_cur[].
++ * It destroys each deleted domain and builds each new domain.
++ *
++ * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
++ * The masks don't intersect (don't overlap.) We should setup one
++ * sched domain for each mask. CPUs not in any of the cpumasks will
++ * not be load balanced. If the same cpumask appears both in the
++ * current 'doms_cur' domains and in the new 'doms_new', we can leave
++ * it as it is.
++ *
++ * The passed in 'doms_new' should be allocated using
++ * alloc_sched_domains. This routine takes ownership of it and will
++ * free_sched_domains it when done with it. If the caller failed the
++ * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
++ * and partition_sched_domains() will fallback to the single partition
++ * 'fallback_doms', it also forces the domains to be rebuilt.
++ *
++ * If doms_new == NULL it will be replaced with cpu_online_mask.
++ * ndoms_new == 0 is a special case for destroying existing domains,
++ * and it will not create the default domain.
++ *
++ * Call with hotplug lock held
++ */
++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
++ struct sched_domain_attr *dattr_new)
++{
++ /**
++ * PDS doesn't depend on sched domains, but just keep this api
++ */
++}
++
++/*
++ * used to mark begin/end of suspend/resume:
++ */
++static int num_cpus_frozen;
++
++/*
++ * Update cpusets according to cpu_active mask. If cpusets are
++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
++ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
++ */
++static void cpuset_cpu_active(void)
++{
++ if (cpuhp_tasks_frozen) {
++ /*
++ * num_cpus_frozen tracks how many CPUs are involved in suspend
++ * resume sequence. As long as this is not the last online
++ * operation in the resume sequence, just build a single sched
++ * domain, ignoring cpusets.
++ */
++ partition_sched_domains(1, NULL, NULL);
++ if (--num_cpus_frozen)
++ return;
++ /*
++ * This is the last CPU online operation. So fall through and
++ * restore the original sched domains by considering the
++ * cpuset configurations.
++ */
++ cpuset_force_rebuild();
++ }
++
++ cpuset_update_active_cpus();
++}
++
++static int cpuset_cpu_inactive(unsigned int cpu)
++{
++ if (!cpuhp_tasks_frozen) {
++ cpuset_update_active_cpus();
++ } else {
++ num_cpus_frozen++;
++ partition_sched_domains(1, NULL, NULL);
++ }
++ return 0;
++}
++
++int sched_cpu_activate(unsigned int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++#ifdef CONFIG_SCHED_SMT
++ /*
++ * When going up, increment the number of cores with SMT present.
++ */
++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++ static_branch_inc_cpuslocked(&sched_smt_present);
++#endif
++ set_cpu_active(cpu, true);
++
++ if (sched_smp_initialized)
++ cpuset_cpu_active();
++
++ /*
++ * Put the rq online, if not already. This happens:
++ *
++ * 1) In the early boot process, because we build the real domains
++ * after all cpus have been brought up.
++ *
++ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
++ * domains.
++ */
++ raw_spin_lock_irqsave(&rq->lock, flags);
++ set_rq_online(rq);
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++ return 0;
++}
++
++int sched_cpu_deactivate(unsigned int cpu)
++{
++ int ret;
++
++ set_cpu_active(cpu, false);
++ /*
++ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
++ * users of this state to go away such that all new such users will
++ * observe it.
++ *
++ * Do sync before park smpboot threads to take care the rcu boost case.
++ */
++ synchronize_rcu();
++
++#ifdef CONFIG_SCHED_SMT
++ /*
++ * When going down, decrement the number of cores with SMT present.
++ */
++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++ static_branch_dec_cpuslocked(&sched_smt_present);
++#endif
++
++ if (!sched_smp_initialized)
++ return 0;
++
++ ret = cpuset_cpu_inactive(cpu);
++ if (ret) {
++ set_cpu_active(cpu, true);
++ return ret;
++ }
++ return 0;
++}
++
++static void sched_rq_cpu_starting(unsigned int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ rq->calc_load_update = calc_load_update;
++}
++
++int sched_cpu_starting(unsigned int cpu)
++{
++ sched_rq_cpu_starting(cpu);
++ sched_tick_start(cpu);
++ return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++int sched_cpu_dying(unsigned int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ sched_tick_stop(cpu);
++ raw_spin_lock_irqsave(&rq->lock, flags);
++ set_rq_offline(rq);
++ migrate_tasks(rq);
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++ hrtick_clear(rq);
++ return 0;
++}
++#endif
++
++#ifdef CONFIG_SMP
++static void sched_init_topology_cpumask_early(void)
++{
++ int cpu, level;
++ cpumask_t *tmp;
++
++ for_each_possible_cpu(cpu) {
++ for (level = 0; level < NR_CPU_AFFINITY_CHK_LEVEL; level++) {
++ tmp = &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[level]);
++ cpumask_copy(tmp, cpu_possible_mask);
++ cpumask_clear_cpu(cpu, tmp);
++ }
++ per_cpu(sched_cpu_llc_start_mask, cpu) =
++ &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[0]);
++ per_cpu(sched_cpu_affinity_chk_end_masks, cpu) =
++ &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[1]);
++ }
++}
++
++static void sched_init_topology_cpumask(void)
++{
++ int cpu;
++ cpumask_t *chk;
++
++ for_each_online_cpu(cpu) {
++ chk = &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[0]);
++
++#ifdef CONFIG_SCHED_SMT
++ cpumask_setall(chk);
++ cpumask_clear_cpu(cpu, chk);
++ if (cpumask_and(chk, chk, topology_sibling_cpumask(cpu))) {
++ per_cpu(sched_sibling_cpu, cpu) = cpumask_first(chk);
++ printk(KERN_INFO "pds: cpu #%d affinity check mask - smt 0x%08lx",
++ cpu, (chk++)->bits[0]);
++ }
++#endif
++#ifdef CONFIG_SCHED_MC
++ cpumask_setall(chk);
++ cpumask_clear_cpu(cpu, chk);
++ if (cpumask_and(chk, chk, cpu_coregroup_mask(cpu))) {
++ per_cpu(sched_cpu_llc_start_mask, cpu) = chk;
++ printk(KERN_INFO "pds: cpu #%d affinity check mask - coregroup 0x%08lx",
++ cpu, (chk++)->bits[0]);
++ }
++ cpumask_complement(chk, cpu_coregroup_mask(cpu));
++
++ /**
++ * Set up sd_llc_id per CPU
++ */
++ per_cpu(sd_llc_id, cpu) =
++ cpumask_first(cpu_coregroup_mask(cpu));
++#else
++ per_cpu(sd_llc_id, cpu) =
++ cpumask_first(topology_core_cpumask(cpu));
++
++ per_cpu(sched_cpu_llc_start_mask, cpu) = chk;
++
++ cpumask_setall(chk);
++ cpumask_clear_cpu(cpu, chk);
++#endif /* NOT CONFIG_SCHED_MC */
++ if (cpumask_and(chk, chk, topology_core_cpumask(cpu)))
++ printk(KERN_INFO "pds: cpu #%d affinity check mask - core 0x%08lx",
++ cpu, (chk++)->bits[0]);
++ cpumask_complement(chk, topology_core_cpumask(cpu));
++
++ if (cpumask_and(chk, chk, cpu_online_mask))
++ printk(KERN_INFO "pds: cpu #%d affinity check mask - others 0x%08lx",
++ cpu, (chk++)->bits[0]);
++
++ per_cpu(sched_cpu_affinity_chk_end_masks, cpu) = chk;
++ }
++}
++#endif
++
++void __init sched_init_smp(void)
++{
++ /* Move init over to a non-isolated CPU */
++ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
++ BUG();
++
++ cpumask_copy(&sched_rq_queued_masks[SCHED_RQ_EMPTY], cpu_online_mask);
++
++ sched_init_topology_cpumask();
++
++ sched_smp_initialized = true;
++}
++#else
++void __init sched_init_smp(void)
++{
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++ return in_lock_functions(addr) ||
++ (addr >= (unsigned long)__sched_text_start
++ && addr < (unsigned long)__sched_text_end);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++/* task group related information */
++struct task_group {
++ struct cgroup_subsys_state css;
++
++ struct rcu_head rcu;
++ struct list_head list;
++
++ struct task_group *parent;
++ struct list_head siblings;
++ struct list_head children;
++};
++
++/*
++ * Default task group.
++ * Every task in system belongs to this group at bootup.
++ */
++struct task_group root_task_group;
++LIST_HEAD(task_groups);
++
++/* Cacheline aligned slab cache for task_group */
++static struct kmem_cache *task_group_cache __read_mostly;
++#endif /* CONFIG_CGROUP_SCHED */
++
++void __init sched_init(void)
++{
++ int i;
++ struct rq *rq;
++
++ print_scheduler_version();
++
++ wait_bit_init();
++
++#ifdef CONFIG_SMP
++ for (i = 0; i < NR_SCHED_RQ_QUEUED_LEVEL; i++)
++ cpumask_clear(&sched_rq_queued_masks[i]);
++ cpumask_setall(&sched_rq_queued_masks[SCHED_RQ_EMPTY]);
++ set_bit(SCHED_RQ_EMPTY, sched_rq_queued_masks_bitmap);
++
++ cpumask_setall(&sched_rq_pending_masks[SCHED_RQ_EMPTY]);
++ set_bit(SCHED_RQ_EMPTY, sched_rq_pending_masks_bitmap);
++#else
++ uprq = &per_cpu(runqueues, 0);
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++ task_group_cache = KMEM_CACHE(task_group, 0);
++
++ list_add(&root_task_group.list, &task_groups);
++ INIT_LIST_HEAD(&root_task_group.children);
++ INIT_LIST_HEAD(&root_task_group.siblings);
++#endif /* CONFIG_CGROUP_SCHED */
++ for_each_possible_cpu(i) {
++ rq = cpu_rq(i);
++ FULL_INIT_SKIPLIST_NODE(&rq->sl_header);
++ raw_spin_lock_init(&rq->lock);
++ rq->dither = 0;
++ rq->nr_running = rq->nr_uninterruptible = 0;
++ rq->calc_load_active = 0;
++ rq->calc_load_update = jiffies + LOAD_FREQ;
++#ifdef CONFIG_SMP
++ rq->online = false;
++ rq->cpu = i;
++
++ rq->queued_level = SCHED_RQ_EMPTY;
++ rq->pending_level = SCHED_RQ_EMPTY;
++#ifdef CONFIG_SCHED_SMT
++ per_cpu(sched_sibling_cpu, i) = i;
++ rq->active_balance = 0;
++#endif
++#endif
++ rq->nr_switches = 0;
++ atomic_set(&rq->nr_iowait, 0);
++ hrtick_rq_init(rq);
++ }
++#ifdef CONFIG_SMP
++ /* Set rq->online for cpu 0 */
++ cpu_rq(0)->online = true;
++#endif
++
++ /*
++ * The boot idle thread does lazy MMU switching as well:
++ */
++ mmgrab(&init_mm);
++ enter_lazy_tlb(&init_mm, current);
++
++ /*
++ * Make us the idle thread. Technically, schedule() should not be
++ * called from this thread, however somewhere below it might be,
++ * but because we are the idle thread, we just pick up running again
++ * when this runqueue becomes "idle".
++ */
++ init_idle(current, smp_processor_id());
++
++ calc_load_update = jiffies + LOAD_FREQ;
++
++#ifdef CONFIG_SMP
++ idle_thread_set_boot_cpu();
++
++ sched_init_topology_cpumask_early();
++#endif /* SMP */
++
++ init_schedstats();
++
++ psi_init();
++}
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++static inline int preempt_count_equals(int preempt_offset)
++{
++ int nested = preempt_count() + rcu_preempt_depth();
++
++ return (nested == preempt_offset);
++}
++
++void __might_sleep(const char *file, int line, int preempt_offset)
++{
++ /*
++ * Blocking primitives will set (and therefore destroy) current->state,
++ * since we will exit with TASK_RUNNING make sure we enter with it,
++ * otherwise we will destroy state.
++ */
++ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
++ "do not call blocking ops when !TASK_RUNNING; "
++ "state=%lx set at [<%p>] %pS\n",
++ current->state,
++ (void *)current->task_state_change,
++ (void *)current->task_state_change);
++
++ ___might_sleep(file, line, preempt_offset);
++}
++EXPORT_SYMBOL(__might_sleep);
++
++void ___might_sleep(const char *file, int line, int preempt_offset)
++{
++ /* Ratelimiting timestamp: */
++ static unsigned long prev_jiffy;
++
++ unsigned long preempt_disable_ip;
++
++ /* WARN_ON_ONCE() by default, no rate limit required: */
++ rcu_sleep_check();
++
++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
++ !is_idle_task(current)) ||
++ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++ oops_in_progress)
++ return;
++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++ return;
++ prev_jiffy = jiffies;
++
++ /* Save this before calling printk(), since that will clobber it: */
++ preempt_disable_ip = get_preempt_disable_ip(current);
++
++ printk(KERN_ERR
++ "BUG: sleeping function called from invalid context at %s:%d\n",
++ file, line);
++ printk(KERN_ERR
++ "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++ in_atomic(), irqs_disabled(),
++ current->pid, current->comm);
++
++ if (task_stack_end_corrupted(current))
++ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
++
++ debug_show_held_locks(current);
++ if (irqs_disabled())
++ print_irqtrace_events(current);
++#ifdef CONFIG_DEBUG_PREEMPT
++ if (!preempt_count_equals(preempt_offset)) {
++ pr_err("Preemption disabled at:");
++ print_ip_sym(preempt_disable_ip);
++ pr_cont("\n");
++ }
++#endif
++ dump_stack();
++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL(___might_sleep);
++
++void __cant_sleep(const char *file, int line, int preempt_offset)
++{
++ static unsigned long prev_jiffy;
++
++ if (irqs_disabled())
++ return;
++
++ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++ return;
++
++ if (preempt_count() > preempt_offset)
++ return;
++
++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++ return;
++ prev_jiffy = jiffies;
++
++ printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
++ printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++ in_atomic(), irqs_disabled(),
++ current->pid, current->comm);
++
++ debug_show_held_locks(current);
++ dump_stack();
++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_sleep);
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++void normalize_rt_tasks(void)
++{
++ struct task_struct *g, *p;
++ struct sched_attr attr = {
++ .sched_policy = SCHED_NORMAL,
++ };
++
++ read_lock(&tasklist_lock);
++ for_each_process_thread(g, p) {
++ /*
++ * Only normalize user tasks:
++ */
++ if (p->flags & PF_KTHREAD)
++ continue;
++
++ if (!rt_task(p)) {
++ /*
++ * Renice negative nice level userspace
++ * tasks back to 0:
++ */
++ if (task_nice(p) < 0)
++ set_user_nice(p, 0);
++ continue;
++ }
++
++ __sched_setscheduler(p, &attr, false, false);
++ }
++ read_unlock(&tasklist_lock);
++}
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
++/*
++ * These functions are only useful for the IA64 MCA handling, or kdb.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ *
++ * Return: The current task for @cpu.
++ */
++struct task_struct *curr_task(int cpu)
++{
++ return cpu_curr(cpu);
++}
++
++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
++
++#ifdef CONFIG_IA64
++/**
++ * set_curr_task - set the current task for a given CPU.
++ * @cpu: the processor in question.
++ * @p: the task pointer to set.
++ *
++ * Description: This function must only be used when non-maskable interrupts
++ * are serviced on a separate stack. It allows the architecture to switch the
++ * notion of the current task on a CPU in a non-blocking manner. This function
++ * must be called with all CPU's synchronised, and interrupts disabled, the
++ * and caller must save the original value of the current task (see
++ * curr_task() above) and restore that value before reenabling interrupts and
++ * re-starting the system.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++void ia64_set_curr_task(int cpu, struct task_struct *p)
++{
++ cpu_curr(cpu) = p;
++}
++
++#endif
++
++#ifdef CONFIG_SCHED_DEBUG
++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
++ struct seq_file *m)
++{}
++
++void proc_sched_set_task(struct task_struct *p)
++{}
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++static void sched_free_group(struct task_group *tg)
++{
++ kmem_cache_free(task_group_cache, tg);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++ struct task_group *tg;
++
++ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
++ if (!tg)
++ return ERR_PTR(-ENOMEM);
++
++ return tg;
++}
++
++void sched_online_group(struct task_group *tg, struct task_group *parent)
++{
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void sched_free_group_rcu(struct rcu_head *rhp)
++{
++ /* Now it should be safe to free those cfs_rqs */
++ sched_free_group(container_of(rhp, struct task_group, rcu));
++}
++
++void sched_destroy_group(struct task_group *tg)
++{
++ /* Wait for possible concurrent references to cfs_rqs complete */
++ call_rcu(&tg->rcu, sched_free_group_rcu);
++}
++
++void sched_offline_group(struct task_group *tg)
++{
++}
++
++static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
++{
++ return css ? container_of(css, struct task_group, css) : NULL;
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++ struct task_group *parent = css_tg(parent_css);
++ struct task_group *tg;
++
++ if (!parent) {
++ /* This is early initialization for the top cgroup */
++ return &root_task_group.css;
++ }
++
++ tg = sched_create_group(parent);
++ if (IS_ERR(tg))
++ return ERR_PTR(-ENOMEM);
++ return &tg->css;
++}
++
++/* Expose task group only after completing cgroup initialization */
++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++ struct task_group *parent = css_tg(css->parent);
++
++ if (parent)
++ sched_online_group(tg, parent);
++ return 0;
++}
++
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++
++ sched_offline_group(tg);
++}
++
++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++
++ /*
++ * Relies on the RCU grace period between css_released() and this.
++ */
++ sched_free_group(tg);
++}
++
++static void cpu_cgroup_fork(struct task_struct *task)
++{
++}
++
++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
++{
++ return 0;
++}
++
++static void cpu_cgroup_attach(struct cgroup_taskset *tset)
++{
++}
++
++static struct cftype cpu_legacy_files[] = {
++ { } /* Terminate */
++};
++
++static struct cftype cpu_files[] = {
++ { } /* terminate */
++};
++
++static int cpu_extra_stat_show(struct seq_file *sf,
++ struct cgroup_subsys_state *css)
++{
++ return 0;
++}
++
++struct cgroup_subsys cpu_cgrp_subsys = {
++ .css_alloc = cpu_cgroup_css_alloc,
++ .css_online = cpu_cgroup_css_online,
++ .css_released = cpu_cgroup_css_released,
++ .css_free = cpu_cgroup_css_free,
++ .css_extra_stat_show = cpu_extra_stat_show,
++ .fork = cpu_cgroup_fork,
++ .can_attach = cpu_cgroup_can_attach,
++ .attach = cpu_cgroup_attach,
++ .legacy_cftypes = cpu_files,
++ .legacy_cftypes = cpu_legacy_files,
++ .dfl_cftypes = cpu_files,
++ .early_init = true,
++ .threaded = true,
++};
++#endif /* CONFIG_CGROUP_SCHED */
++
++#undef CREATE_TRACE_POINTS
+diff --git a/kernel/sched/pds_sched.h b/kernel/sched/pds_sched.h
+new file mode 100644
+index 000000000000..be88ffeab6e4
+--- /dev/null
++++ b/kernel/sched/pds_sched.h
+@@ -0,0 +1,432 @@
++#ifndef PDS_SCHED_H
++#define PDS_SCHED_H
++
++#include <linux/sched.h>
++
++#include <linux/sched/clock.h>
++#include <linux/sched/cpufreq.h>
++#include <linux/sched/cputime.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/init.h>
++#include <linux/sched/isolation.h>
++#include <linux/sched/loadavg.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/nohz.h>
++#include <linux/sched/signal.h>
++#include <linux/sched/stat.h>
++#include <linux/sched/sysctl.h>
++#include <linux/sched/task.h>
++#include <linux/sched/topology.h>
++#include <linux/sched/wake_q.h>
++
++#include <uapi/linux/sched/types.h>
++
++#include <linux/cgroup.h>
++#include <linux/cpufreq.h>
++#include <linux/cpuidle.h>
++#include <linux/cpuset.h>
++#include <linux/ctype.h>
++#include <linux/kthread.h>
++#include <linux/livepatch.h>
++#include <linux/membarrier.h>
++#include <linux/proc_fs.h>
++#include <linux/psi.h>
++#include <linux/slab.h>
++#include <linux/stop_machine.h>
++#include <linux/suspend.h>
++#include <linux/swait.h>
++#include <linux/syscalls.h>
++#include <linux/tsacct_kern.h>
++
++#include <asm/tlb.h>
++
++#ifdef CONFIG_PARAVIRT
++# include <asm/paravirt.h>
++#endif
++
++#include "cpupri.h"
++
++/* task_struct::on_rq states: */
++#define TASK_ON_RQ_QUEUED 1
++#define TASK_ON_RQ_MIGRATING 2
++
++static inline int task_on_rq_queued(struct task_struct *p)
++{
++ return p->on_rq == TASK_ON_RQ_QUEUED;
++}
++
++static inline int task_on_rq_migrating(struct task_struct *p)
++{
++ return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
++}
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ * This data should only be modified by the local cpu.
++ */
++struct rq {
++ /* runqueue lock: */
++ raw_spinlock_t lock;
++
++ struct task_struct *curr, *idle, *stop;
++ struct mm_struct *prev_mm;
++
++ struct skiplist_node sl_header;
++
++ /* switch count */
++ u64 nr_switches;
++
++ atomic_t nr_iowait;
++
++#ifdef CONFIG_SMP
++ int cpu; /* cpu of this runqueue */
++ bool online;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++ struct sched_avg avg_irq;
++#endif
++
++ unsigned long queued_level;
++ unsigned long pending_level;
++
++#ifdef CONFIG_SCHED_SMT
++ int active_balance;
++ struct cpu_stop_work active_balance_work;
++#endif
++#endif /* CONFIG_SMP */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++ u64 prev_irq_time;
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++#ifdef CONFIG_PARAVIRT
++ u64 prev_steal_time;
++#endif /* CONFIG_PARAVIRT */
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++ u64 prev_steal_time_rq;
++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
++
++ /* calc_load related fields */
++ unsigned long calc_load_update;
++ long calc_load_active;
++
++ u64 clock, last_tick;
++ u64 clock_task;
++ int dither;
++
++ unsigned long nr_running;
++ unsigned long nr_uninterruptible;
++
++#ifdef CONFIG_SCHED_HRTICK
++#ifdef CONFIG_SMP
++ int hrtick_csd_pending;
++ call_single_data_t hrtick_csd;
++#endif
++ struct hrtimer hrtick_timer;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++
++ /* latency stats */
++ struct sched_info rq_sched_info;
++ unsigned long long rq_cpu_time;
++ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
++
++ /* sys_sched_yield() stats */
++ unsigned int yld_count;
++
++ /* schedule() stats */
++ unsigned int sched_switch;
++ unsigned int sched_count;
++ unsigned int sched_goidle;
++
++ /* try_to_wake_up() stats */
++ unsigned int ttwu_count;
++ unsigned int ttwu_local;
++#endif /* CONFIG_SCHEDSTATS */
++#ifdef CONFIG_CPU_IDLE
++ /* Must be inspected within a rcu lock section */
++ struct cpuidle_state *idle_state;
++#endif
++};
++
++extern unsigned long calc_load_update;
++extern atomic_long_t calc_load_tasks;
++
++extern void calc_global_load_tick(struct rq *this_rq);
++extern long calc_load_fold_active(struct rq *this_rq, long adjust);
++
++#ifndef CONFIG_SMP
++extern struct rq *uprq;
++#define cpu_rq(cpu) (uprq)
++#define this_rq() (uprq)
++#define raw_rq() (uprq)
++#define task_rq(p) (uprq)
++#define cpu_curr(cpu) ((uprq)->curr)
++#else /* CONFIG_SMP */
++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
++#define this_rq() this_cpu_ptr(&runqueues)
++#define raw_rq() raw_cpu_ptr(&runqueues)
++#define task_rq(p) cpu_rq(task_cpu(p))
++#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
++
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++void register_sched_domain_sysctl(void);
++void unregister_sched_domain_sysctl(void);
++#else
++static inline void register_sched_domain_sysctl(void)
++{
++}
++static inline void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++#endif /* CONFIG_SMP */
++
++#ifndef arch_scale_freq_capacity
++static __always_inline
++unsigned long arch_scale_freq_capacity(int cpu)
++{
++ return SCHED_CAPACITY_SCALE;
++}
++#endif
++
++static inline u64 __rq_clock_broken(struct rq *rq)
++{
++ return READ_ONCE(rq->clock);
++}
++
++static inline u64 rq_clock(struct rq *rq)
++{
++ /*
++ * Relax lockdep_assert_held() checking as in VRQ, call to
++ * sched_info_xxxx() may not held rq->lock
++ * lockdep_assert_held(&rq->lock);
++ */
++ return rq->clock;
++}
++
++static inline u64 rq_clock_task(struct rq *rq)
++{
++ /*
++ * Relax lockdep_assert_held() checking as in VRQ, call to
++ * sched_info_xxxx() may not held rq->lock
++ * lockdep_assert_held(&rq->lock);
++ */
++ return rq->clock_task;
++}
++
++/*
++ * {de,en}queue flags:
++ *
++ * DEQUEUE_SLEEP - task is no longer runnable
++ * ENQUEUE_WAKEUP - task just became runnable
++ *
++ */
++
++#define DEQUEUE_SLEEP 0x01
++
++#define ENQUEUE_WAKEUP 0x01
++
++
++/*
++ * Below are scheduler API which using in other kernel code
++ * It use the dummy rq_flags
++ * ToDo : PDS need to support these APIs for compatibility with mainline
++ * scheduler code.
++ */
++struct rq_flags {
++ unsigned long flags;
++};
++
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++ __acquires(rq->lock);
++
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++ __acquires(p->pi_lock)
++ __acquires(rq->lock);
++
++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
++ __releases(rq->lock)
++{
++ raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
++ __releases(rq->lock)
++ __releases(p->pi_lock)
++{
++ raw_spin_unlock(&rq->lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++}
++
++static inline void
++rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
++ __releases(rq->lock)
++{
++ raw_spin_unlock_irq(&rq->lock);
++}
++
++static inline struct rq *
++this_rq_lock_irq(struct rq_flags *rf)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++ local_irq_disable();
++ rq = this_rq();
++ raw_spin_lock(&rq->lock);
++
++ return rq;
++}
++
++static inline bool task_running(struct task_struct *p)
++{
++ return p->on_cpu;
++}
++
++extern struct static_key_false sched_schedstats;
++
++static inline void sched_ttwu_pending(void) { }
++
++#ifdef CONFIG_CPU_IDLE
++static inline void idle_set_state(struct rq *rq,
++ struct cpuidle_state *idle_state)
++{
++ rq->idle_state = idle_state;
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++ WARN_ON(!rcu_read_lock_held());
++ return rq->idle_state;
++}
++#else
++static inline void idle_set_state(struct rq *rq,
++ struct cpuidle_state *idle_state)
++{
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++ return NULL;
++}
++#endif
++
++static inline int cpu_of(const struct rq *rq)
++{
++#ifdef CONFIG_SMP
++ return rq->cpu;
++#else
++ return 0;
++#endif
++}
++
++#include "stats.h"
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++struct irqtime {
++ u64 total;
++ u64 tick_delta;
++ u64 irq_start_time;
++ struct u64_stats_sync sync;
++};
++
++DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
++
++/*
++ * Returns the irqtime minus the softirq time computed by ksoftirqd.
++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
++ * and never move forward.
++ */
++static inline u64 irq_time_read(int cpu)
++{
++ struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
++ unsigned int seq;
++ u64 total;
++
++ do {
++ seq = __u64_stats_fetch_begin(&irqtime->sync);
++ total = irqtime->total;
++ } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
++
++ return total;
++}
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++#ifdef CONFIG_CPU_FREQ
++DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
++
++/**
++ * cpufreq_update_util - Take a note about CPU utilization changes.
++ * @rq: Runqueue to carry out the update for.
++ * @flags: Update reason flags.
++ *
++ * This function is called by the scheduler on the CPU whose utilization is
++ * being updated.
++ *
++ * It can only be called from RCU-sched read-side critical sections.
++ *
++ * The way cpufreq is currently arranged requires it to evaluate the CPU
++ * performance state (frequency/voltage) on a regular basis to prevent it from
++ * being stuck in a completely inadequate performance level for too long.
++ * That is not guaranteed to happen if the updates are only triggered from CFS
++ * and DL, though, because they may not be coming in if only RT tasks are
++ * active all the time (or there are RT tasks only).
++ *
++ * As a workaround for that issue, this function is called periodically by the
++ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
++ * but that really is a band-aid. Going forward it should be replaced with
++ * solutions targeted more specifically at RT tasks.
++ */
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++ struct update_util_data *data;
++
++ data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
++ if (data)
++ data->func(data, rq_clock(rq), flags);
++}
++
++static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
++{
++ if (cpu_of(rq) == smp_processor_id())
++ cpufreq_update_util(rq, flags);
++}
++#else
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
++static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++extern int __init sched_tick_offload_init(void);
++#else
++static inline int sched_tick_offload_init(void) { return 0; }
++#endif
++
++#ifdef arch_scale_freq_capacity
++#ifndef arch_scale_freq_invariant
++#define arch_scale_freq_invariant() (true)
++#endif
++#else /* arch_scale_freq_capacity */
++#define arch_scale_freq_invariant() (false)
++#endif
++
++extern void schedule_idle(void);
++
++/*
++ * !! For sched_setattr_nocheck() (kernel) only !!
++ *
++ * This is actually gross. :(
++ *
++ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
++ * tasks, but still be able to sleep. We need this on platforms that cannot
++ * atomically change clock frequency. Remove once fast switching will be
++ * available on such platforms.
++ *
++ * SUGOV stands for SchedUtil GOVernor.
++ */
++#define SCHED_FLAG_SUGOV 0x10000000
++
++#endif /* PDS_SCHED_H */
+diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
+index befce29bd882..48ef3e62e7d4 100644
+--- a/kernel/sched/pelt.c
++++ b/kernel/sched/pelt.c
+@@ -234,6 +234,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runna
+ WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
+ }
+
++#ifndef CONFIG_SCHED_PDS
+ /*
+ * sched_entity:
+ *
+@@ -345,6 +346,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+
+ return 0;
+ }
++#endif
+
+ #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+ /*
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index 7489d5f56960..6dc3c79da1ec 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -1,11 +1,13 @@
+ #ifdef CONFIG_SMP
+ #include "sched-pelt.h"
+
++#ifndef CONFIG_SCHED_PDS
+ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
+ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
+ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
+ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
++#endif
+
+ #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+ int update_irq_load_avg(struct rq *rq, u64 running);
+@@ -17,6 +19,7 @@ update_irq_load_avg(struct rq *rq, u64 running)
+ }
+ #endif
+
++#ifndef CONFIG_SCHED_PDS
+ /*
+ * When a task is dequeued, its estimated utilization should not be update if
+ * its util_avg has not been updated at least once.
+@@ -137,9 +140,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+ return rq_clock_pelt(rq_of(cfs_rq));
+ }
+ #endif
++#endif /* CONFIG_SCHED_PDS */
+
+ #else
+
++#ifndef CONFIG_SCHED_PDS
+ static inline int
+ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+ {
+@@ -157,6 +162,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ {
+ return 0;
+ }
++#endif
+
+ static inline int
+ update_irq_load_avg(struct rq *rq, u64 running)
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index b52ed1ada0be..b2d67e75c361 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -2,6 +2,10 @@
+ /*
+ * Scheduler internal types and methods:
+ */
++#ifdef CONFIG_SCHED_PDS
++#include "pds_sched.h"
++#else
++
+ #include <linux/sched.h>
+
+ #include <linux/sched/autogroup.h>
+@@ -2341,3 +2345,4 @@ static inline bool sched_energy_enabled(void)
+ static inline bool sched_energy_enabled(void) { return false; }
+
+ #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
++#endif /* !CONFIG_SCHED_PDS */
+diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
+index 750fb3c67eed..45bd43942575 100644
+--- a/kernel/sched/stats.c
++++ b/kernel/sched/stats.c
+@@ -22,8 +22,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ } else {
+ struct rq *rq;
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_PDS
+ struct sched_domain *sd;
+ int dcount = 0;
++#endif
+ #endif
+ cpu = (unsigned long)(v - 2);
+ rq = cpu_rq(cpu);
+@@ -40,6 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ seq_printf(seq, "\n");
+
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_PDS
+ /* domain-specific stats */
+ rcu_read_lock();
+ for_each_domain(cpu, sd) {
+@@ -68,6 +71,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ sd->ttwu_move_balance);
+ }
+ rcu_read_unlock();
++#endif
+ #endif
+ }
+ return 0;
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 1beca96fb625..53739e0782bf 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -133,8 +133,12 @@ static int __maybe_unused four = 4;
+ static unsigned long zero_ul;
+ static unsigned long one_ul = 1;
+ static unsigned long long_max = LONG_MAX;
+-static int one_hundred = 100;
+-static int one_thousand = 1000;
++static int __read_mostly one_hundred = 100;
++static int __read_mostly one_thousand = 1000;
++#ifdef CONFIG_SCHED_PDS
++extern int rr_interval;
++extern int sched_yield_type;
++#endif
+ #ifdef CONFIG_PRINTK
+ static int ten_thousand = 10000;
+ #endif
+@@ -302,7 +306,7 @@ static struct ctl_table sysctl_base_table[] = {
+ { }
+ };
+
+-#ifdef CONFIG_SCHED_DEBUG
++#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_PDS)
+ static int min_sched_granularity_ns = 100000; /* 100 usecs */
+ static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
+ static int min_wakeup_granularity_ns; /* 0 usecs */
+@@ -319,6 +323,7 @@ static int max_extfrag_threshold = 1000;
+ #endif
+
+ static struct ctl_table kern_table[] = {
++#ifndef CONFIG_SCHED_PDS
+ {
+ .procname = "sched_child_runs_first",
+ .data = &sysctl_sched_child_runs_first,
+@@ -484,6 +489,7 @@ static struct ctl_table kern_table[] = {
+ .extra2 = &one,
+ },
+ #endif
++#endif /* !CONFIG_SCHED_PDS */
+ #ifdef CONFIG_PROVE_LOCKING
+ {
+ .procname = "prove_locking",
+@@ -1056,6 +1062,26 @@ static struct ctl_table kern_table[] = {
+ .proc_handler = proc_dointvec,
+ },
+ #endif
++#ifdef CONFIG_SCHED_PDS
++ {
++ .procname = "rr_interval",
++ .data = &rr_interval,
++ .maxlen = sizeof (int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &one_thousand,
++ },
++ {
++ .procname = "yield_type",
++ .data = &sched_yield_type,
++ .maxlen = sizeof (int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &two,
++ },
++#endif
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ {
+ .procname = "spin_retry",
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index 0a426f4e3125..2692b89a70d5 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -791,6 +791,7 @@ check_timers_list(struct list_head *timers,
+ return 0;
+ }
+
++#ifndef CONFIG_SCHED_PDS
+ static inline void check_dl_overrun(struct task_struct *tsk)
+ {
+ if (tsk->dl.dl_overrun) {
+@@ -798,6 +799,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
+ __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
+ }
+ }
++#endif
+
+ /*
+ * Check for any per-thread CPU timers that have fired and move them off
+@@ -812,8 +814,10 @@ static void check_thread_timers(struct task_struct *tsk,
+ u64 expires;
+ unsigned long soft;
+
++#ifndef CONFIG_SCHED_PDS
+ if (dl_task(tsk))
+ check_dl_overrun(tsk);
++#endif
+
+ /*
+ * If cputime_expires is zero, then there are no active
+@@ -829,7 +833,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ tsk_expires->virt_exp = expires;
+
+ tsk_expires->sched_exp = check_timers_list(++timers, firing,
+- tsk->se.sum_exec_runtime);
++ tsk_seruntime(tsk));
+
+ /*
+ * Check for the special case thread timers.
+@@ -839,7 +843,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
+
+ if (hard != RLIM_INFINITY &&
+- tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
++ tsk_rttimeout(tsk) > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
+ /*
+ * At the hard limit, we just die.
+ * No need to calculate anything else now.
+@@ -851,7 +855,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
+ return;
+ }
+- if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
++ if (tsk_rttimeout(tsk) > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
+ /*
+ * At the soft limit, send a SIGXCPU every second.
+ */
+@@ -1091,7 +1095,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
+ struct task_cputime task_sample;
+
+ task_cputime(tsk, &task_sample.utime, &task_sample.stime);
+- task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
++ task_sample.sum_exec_runtime = tsk_seruntime(tsk);
+ if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
+ return 1;
+ }
+@@ -1121,8 +1125,10 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
+ return 1;
+ }
+
++#ifndef CONFIG_SCHED_PDS
+ if (dl_task(tsk) && tsk->dl.dl_overrun)
+ return 1;
++#endif
+
+ return 0;
+ }
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index 69ee8ef12cee..3eaa2a21caa4 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -1048,10 +1048,15 @@ static int trace_wakeup_test_thread(void *data)
+ {
+ /* Make this a -deadline thread */
+ static const struct sched_attr attr = {
++#ifdef CONFIG_SCHED_PDS
++ /* No deadline on BFS, use RR */
++ .sched_policy = SCHED_RR,
++#else
+ .sched_policy = SCHED_DEADLINE,
+ .sched_runtime = 100000ULL,
+ .sched_deadline = 10000000ULL,
+ .sched_period = 10000000ULL
++#endif
+ };
+ struct wakeup_test_data *x = data;
+
@@ -65,7 +65,7 @@ _subarch= _localmodcfg= pkgbase=linux-pds -_srcver_tag=5.1.16-arch1 +_srcver_tag=5.2-arch2 pkgver="${_srcver_tag//-/.}" pkgrel=1 arch=(x86_64) @@ -78,6 +78,10 @@ makedepends=( bc libelf git + python-sphinx + python-sphinx_rtd_theme + graphviz + imagemagick ) options=('!strip') @@ -101,17 +105,17 @@ source=( 02-Glitched-PDS-by-TkG.patch ) validpgpkeys=( - 'ABAF11C65A2970B130ABE3C479BE3E4300411886' # Linus Torvalds - '647F28654894E3BD457199BE38DBBDC86092693E' # Greg Kroah-Hartman - '8218F88849AAC522E94CF470A5E9288C4FA415FA' # Jan Alexander Steffens (heftig) + 'ABAF11C65A2970B130ABE3C479BE3E4300411886' # Linus Torvalds + '647F28654894E3BD457199BE38DBBDC86092693E' # Greg Kroah-Hartman + '8218F88849AAC522E94CF470A5E9288C4FA415FA' # Jan Alexander Steffens (heftig) ) sha512sums=('SKIP' 'SKIP' - '5d78839a3df30a667d1e64458add6a8c01557a21a4eb8b25ff084b95bc5efbbfb1e02f5dd2b5485dc6829647d7024450ab4886a7b11e2f5c334caeddd05810af' + '951ad205d211ef965e6136e0b8447aeeb522229a74b45ce497f5ff31f2059db23b36fb8035f72798d15be45440396f901a98857f961ab0acf9bfbe25c955bb32' '7ad5be75ee422dda3b80edd2eb614d8a9181e2c8228cd68b3881e2fb95953bf2dea6cbe7900ce1013c9de89b2802574b7b24869fc5d7a95d3cc3112c4d27063a' '2718b58dbbb15063bacb2bde6489e5b3c59afac4c0e0435b97fe720d42c711b6bcba926f67a8687878bd51373c9cf3adb1915a11666d79ccb220bf36e0788ab7' '2dc6b0ba8f7dbf19d2446c5c5f1823587de89f4e28e9595937dd51a87755099656f2acec50e3e2546ea633ad1bfd1c722e0c2b91eef1d609103d8abdc0a7cbaf' - 'cdfa59b9f369a5795c93ced526e7f480851ef439f3379e6c1a32b9cf29232cd4671fe4b0ddb50c5d996e23db71582844e233fee96bb551827eaf70b0be1d18dc' + '3ba2ea015485795930fe17231f0ba7755522ea675f149b4d42b056827196b4f98aea3cd027c3bd9a5934ff4b541aec30ff62c179dd38de908c0ce884af8560c9' '3ff796cbc213ae5f43a55f1ba92406bba04703db3459040beacacd9baceb3138021e908f440bd101cc76cb725e418ebdc8ab776327801690da30a1477bc84753') _kernelname=${pkgbase#linux} @@ -125,7 +129,7 @@ prepare() { echo "-$pkgrel" > localversion.10-pkgrel echo "$_kernelname" > localversion.20-pkgname - msg2 "Patching Undead PDS 0.99o 5.1 rebase by TkG" + msg2 "Patching Undead PDS 0.99o 5.2 rebase by TkG" patch -Np1 -i "$srcdir/01-Undead-PDS-0.99o-rebase-by-TkG.patch" patch -Np1 -i "$srcdir/02-Glitched-PDS-by-TkG.patch" @@ -155,19 +159,19 @@ prepare() { fi fi - if [ -z "$_sched_yield_type" ]; then + if [ -z "${_sched_yield_type}" ]; then plain "" plain "CPU sched_yield_type - Choose what sort of yield sched_yield will perform." plain "" + plain "For PDS:" plain "0: No yield." plain "1: Yield only to better priority/deadline tasks." plain "2: Expire timeslice and recalculate deadline." - read -rp "`echo $'\n> 0 (Recommended option for gaming on PDS - "TkG" default)\n 1 (Default, but can lead to stability issues on some platforms)\n 2 (Usually the slowest option for PDS, not recommended) - Using this option on BMQ will fallback to "1"\n [0-2?]: '`" _sched_yield_type; + read -rp "`echo $'\n> 0 (Recommended option for gaming on PDS - "tkg" default)\n 1 (Default, but can lead to stability issues on some platforms)\n 2 (Usually the slowest option for PDS, not recommended unless you have issues with 0 or 1)\n [0-2?]: '`" _sched_yield_type; fi - - if [ "$_sched_yield_type" == "1" ]; then + if [ "${_sched_yield_type}" == "1" ]; then msg2 "Using default CPU sched yield type (1)" - elif [ "$_sched_yield_type" == "2" ]; then + elif [ "${_sched_yield_type}" == "2" ]; then sed -i -e 's/int sched_yield_type __read_mostly = 1;/int sched_yield_type __read_mostly = 2;/' ./kernel/sched/pds.c else sed -i -e 's/int sched_yield_type __read_mostly = 1;/int sched_yield_type __read_mostly = 0;/' ./kernel/sched/pds.c @@ -187,8 +191,7 @@ prepare() { build() { cd ${_reponame} - - make bzImage modules + make bzImage modules htmldocs } _package() { @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 5.1.14-arch1 Kernel Configuration +# Linux/x86 5.2.0-arch2 Kernel Configuration # # @@ -67,6 +67,8 @@ CONFIG_GENERIC_IRQ_RESERVATION_MODE=y CONFIG_IRQ_FORCED_THREADING=y CONFIG_SPARSE_IRQ=y # CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + CONFIG_CLOCKSOURCE_WATCHDOG=y CONFIG_ARCH_CLOCKSOURCE_DATA=y CONFIG_ARCH_CLOCKSOURCE_INIT=y @@ -89,6 +91,8 @@ CONFIG_CONTEXT_TRACKING=y # CONFIG_CONTEXT_TRACKING_FORCE is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + # CONFIG_PREEMPT_NONE is not set # CONFIG_PREEMPT_VOLUNTARY is not set CONFIG_PREEMPT=y @@ -109,6 +113,8 @@ CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_PSI=y # CONFIG_PSI_DEFAULT_DISABLED is not set +# end of CPU/Task time and stats accounting + CONFIG_CPU_ISOLATION=y # @@ -127,9 +133,12 @@ CONFIG_RCU_FAST_NO_HZ=y CONFIG_RCU_BOOST=y CONFIG_RCU_BOOST_DELAY=500 CONFIG_RCU_NOCB_CPU=y +# end of RCU Subsystem + CONFIG_BUILD_BIN2C=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set CONFIG_LOG_BUF_SHIFT=17 CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 @@ -179,7 +188,6 @@ CONFIG_RD_LZ4=y CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y CONFIG_HAVE_UID16=y CONFIG_SYSCTL_EXCEPTION_TRACE=y CONFIG_HAVE_PCSPKR_PLATFORM=y @@ -228,6 +236,8 @@ CONFIG_HAVE_PERF_EVENTS=y # CONFIG_PERF_EVENTS=y # CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + CONFIG_VM_EVENT_COUNTERS=y CONFIG_SLUB_DEBUG=y # CONFIG_SLUB_MEMCG_SYSFS_ON is not set @@ -238,10 +248,13 @@ CONFIG_SLUB=y CONFIG_SLAB_MERGE_DEFAULT=y CONFIG_SLAB_FREELIST_RANDOM=y CONFIG_SLAB_FREELIST_HARDENED=y +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y # CONFIG_SLUB_CPU_PARTIAL is not set CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y +# end of General setup + CONFIG_64BIT=y CONFIG_X86_64=y CONFIG_X86=y @@ -258,9 +271,7 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 CONFIG_GENERIC_ISA_DMA=y CONFIG_GENERIC_BUG=y CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y CONFIG_ARCH_MAY_HAVE_PC_FDC=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_ARCH_HAS_CPU_RELAX=y CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y @@ -274,7 +285,6 @@ CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y CONFIG_ARCH_WANT_GENERAL_HUGETLB=y CONFIG_ZONE_DMA32=y CONFIG_AUDIT_ARCH=y -CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y CONFIG_HAVE_INTEL_TXT=y CONFIG_X86_64_SMP=y @@ -307,7 +317,6 @@ CONFIG_PARAVIRT=y CONFIG_PARAVIRT_XXL=y # CONFIG_PARAVIRT_DEBUG is not set CONFIG_PARAVIRT_SPINLOCKS=y -# CONFIG_QUEUED_LOCK_STAT is not set CONFIG_XEN=y CONFIG_XEN_PV=y CONFIG_XEN_PV_SMP=y @@ -374,6 +383,8 @@ CONFIG_PERF_EVENTS_INTEL_UNCORE=m CONFIG_PERF_EVENTS_INTEL_RAPL=m CONFIG_PERF_EVENTS_INTEL_CSTATE=m CONFIG_PERF_EVENTS_AMD_POWER=m +# end of Performance monitoring + CONFIG_X86_16BIT=y CONFIG_X86_ESPFIX64=y CONFIG_X86_VSYSCALL_EMULATION=y @@ -424,11 +435,11 @@ CONFIG_EFI_MIXED=y CONFIG_SECCOMP=y # CONFIG_HZ_100 is not set # CONFIG_HZ_250 is not set -CONFIG_HZ_300=y -# CONFIG_HZ_500 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_500=y # CONFIG_HZ_750 is not set # CONFIG_HZ_1000 is not set -CONFIG_HZ=300 +CONFIG_HZ=500 CONFIG_SCHED_HRTICK=y CONFIG_KEXEC=y CONFIG_KEXEC_FILE=y @@ -454,6 +465,8 @@ CONFIG_LEGACY_VSYSCALL_NONE=y CONFIG_MODIFY_LDT_SYSCALL=y CONFIG_HAVE_LIVEPATCH=y # CONFIG_LIVEPATCH is not set +# end of Processor type and features + CONFIG_ARCH_HAS_ADD_PAGES=y CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y @@ -535,6 +548,7 @@ CONFIG_ACPI_BGRT=y # CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set CONFIG_ACPI_NFIT=m # CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_HMAT=y CONFIG_HAVE_ACPI_APEI=y CONFIG_HAVE_ACPI_APEI_NMI=y CONFIG_ACPI_APEI=y @@ -596,6 +610,7 @@ CONFIG_X86_P4_CLOCKMOD=m # shared options # CONFIG_X86_SPEEDSTEP_LIB=m +# end of CPU Frequency scaling # # CPU Idle @@ -604,7 +619,10 @@ CONFIG_CPU_IDLE=y CONFIG_CPU_IDLE_GOV_LADDER=y CONFIG_CPU_IDLE_GOV_MENU=y CONFIG_CPU_IDLE_GOV_TEO=y +# end of CPU Idle + CONFIG_INTEL_IDLE=y +# end of Power management and ACPI options # # Bus options (PCI etc.) @@ -618,6 +636,7 @@ CONFIG_MMCONF_FAM10H=y CONFIG_ISA_DMA_API=y CONFIG_AMD_NB=y # CONFIG_X86_SYSFB is not set +# end of Bus options (PCI etc.) # # Binary Emulations @@ -628,6 +647,8 @@ CONFIG_COMPAT_32=y CONFIG_COMPAT=y CONFIG_COMPAT_FOR_U64_ALIGNMENT=y CONFIG_SYSVIPC_COMPAT=y +# end of Binary Emulations + CONFIG_X86_DEV_DMA_OPS=y CONFIG_HAVE_GENERIC_GUP=y @@ -658,6 +679,8 @@ CONFIG_EFI_CAPSULE_LOADER=m # CONFIG_EFI_TEST is not set CONFIG_APPLE_PROPERTIES=y # CONFIG_RESET_ATTACK_MITIGATION is not set +# end of EFI (Extensible Firmware Interface) Support + CONFIG_UEFI_CPER=y CONFIG_UEFI_CPER_X86=y CONFIG_EFI_DEV_PATH_PARSER=y @@ -666,6 +689,9 @@ CONFIG_EFI_EARLYCON=y # # Tegra firmware driver # +# end of Tegra firmware driver +# end of Firmware Drivers + CONFIG_HAVE_KVM=y CONFIG_HAVE_KVM_IRQCHIP=y CONFIG_HAVE_KVM_IRQFD=y @@ -723,6 +749,7 @@ CONFIG_HAVE_DMA_CONTIGUOUS=y CONFIG_GENERIC_SMP_IDLE_THREAD=y CONFIG_ARCH_HAS_FORTIFY_SOURCE=y CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y @@ -739,7 +766,6 @@ CONFIG_HAVE_PERF_USER_STACK_DUMP=y CONFIG_HAVE_ARCH_JUMP_LABEL=y CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y CONFIG_HAVE_RCU_TABLE_FREE=y -CONFIG_HAVE_RCU_TABLE_INVALIDATE=y CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y CONFIG_HAVE_CMPXCHG_LOCAL=y @@ -790,15 +816,28 @@ CONFIG_ARCH_HAS_REFCOUNT=y # CONFIG_REFCOUNT_FULL is not set CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y CONFIG_ARCH_USE_MEMREMAP_PROT=y +CONFIG_LOCK_EVENT_COUNTS=y # # GCOV-based kernel profiling # # CONFIG_GCOV_KERNEL is not set CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + CONFIG_PLUGIN_HOSTCC="g++" CONFIG_HAVE_GCC_PLUGINS=y -# CONFIG_GCC_PLUGINS is not set +CONFIG_GCC_PLUGINS=y + +# +# GCC plugins +# +# CONFIG_GCC_PLUGIN_CYC_COMPLEXITY is not set +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +# CONFIG_GCC_PLUGIN_RANDSTRUCT is not set +# end of GCC plugins +# end of General architecture-dependent options + CONFIG_RT_MUTEXES=y CONFIG_BASE_SMALL=0 CONFIG_MODULES=y @@ -860,6 +899,8 @@ CONFIG_KARMA_PARTITION=y CONFIG_EFI_PARTITION=y # CONFIG_SYSV68_PARTITION is not set # CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + CONFIG_BLOCK_COMPAT=y CONFIG_BLK_MQ_PCI=y CONFIG_BLK_MQ_VIRTIO=y @@ -873,6 +914,8 @@ CONFIG_MQ_IOSCHED_DEADLINE=y CONFIG_MQ_IOSCHED_KYBER=y CONFIG_IOSCHED_BFQ=y CONFIG_BFQ_GROUP_IOSCHED=y +# end of IO Schedulers + CONFIG_PREEMPT_NOTIFIERS=y CONFIG_PADATA=y CONFIG_ASN1=y @@ -899,6 +942,7 @@ CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_BINFMT_SCRIPT=y CONFIG_BINFMT_MISC=y CONFIG_COREDUMP=y +# end of Executable file formats # # Memory Management options @@ -912,7 +956,6 @@ CONFIG_SPARSEMEM_EXTREME=y CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y CONFIG_SPARSEMEM_VMEMMAP=y CONFIG_HAVE_MEMBLOCK_NODE_MAP=y -CONFIG_ARCH_DISCARD_MEMBLOCK=y CONFIG_MEMORY_ISOLATION=y CONFIG_HAVE_BOOTMEM_INFO_NODE=y CONFIG_MEMORY_HOTPLUG=y @@ -924,6 +967,7 @@ CONFIG_MEMORY_BALLOON=y CONFIG_BALLOON_COMPACTION=y CONFIG_COMPACTION=y CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y CONFIG_PHYS_ADDR_T_64BIT=y CONFIG_BOUNCE=y CONFIG_VIRT_TO_BUS=y @@ -955,6 +999,8 @@ CONFIG_GENERIC_EARLY_IOREMAP=y # CONFIG_IDLE_PAGE_TRACKING is not set CONFIG_ARCH_HAS_ZONE_DEVICE=y CONFIG_ZONE_DEVICE=y +CONFIG_ARCH_HAS_HMM_MIRROR=y +CONFIG_ARCH_HAS_HMM_DEVICE=y CONFIG_ARCH_HAS_HMM=y CONFIG_MIGRATE_VMA_HELPER=y CONFIG_DEV_PAGEMAP_OPS=y @@ -968,6 +1014,8 @@ CONFIG_ARCH_HAS_PKEYS=y # CONFIG_PERCPU_STATS is not set # CONFIG_GUP_BENCHMARK is not set CONFIG_ARCH_HAS_PTE_SPECIAL=y +# end of Memory Management options + CONFIG_NET=y CONFIG_COMPAT_NETLINK_MESSAGES=y CONFIG_NET_INGRESS=y @@ -1029,9 +1077,6 @@ CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_TUNNEL=m CONFIG_INET_TUNNEL=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m CONFIG_INET_DIAG=m CONFIG_INET_TCP_DIAG=m CONFIG_INET_UDP_DIAG=m @@ -1070,10 +1115,6 @@ CONFIG_IPV6_MIP6=m CONFIG_IPV6_ILA=m CONFIG_INET6_XFRM_TUNNEL=m CONFIG_INET6_TUNNEL=m -CONFIG_INET6_XFRM_MODE_TRANSPORT=m -CONFIG_INET6_XFRM_MODE_TUNNEL=m -CONFIG_INET6_XFRM_MODE_BEET=m -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m CONFIG_IPV6_VTI=m CONFIG_IPV6_SIT=m CONFIG_IPV6_SIT_6RD=y @@ -1141,7 +1182,6 @@ CONFIG_NF_CT_NETLINK_TIMEOUT=m CONFIG_NF_CT_NETLINK_HELPER=m CONFIG_NETFILTER_NETLINK_GLUE_CT=y CONFIG_NF_NAT=m -CONFIG_NF_NAT_NEEDED=y CONFIG_NF_NAT_AMANDA=m CONFIG_NF_NAT_FTP=m CONFIG_NF_NAT_IRC=m @@ -1216,6 +1256,7 @@ CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m CONFIG_NETFILTER_XT_TARGET_RATEEST=m CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m @@ -1272,6 +1313,8 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + CONFIG_IP_SET=m CONFIG_IP_SET_MAX=256 CONFIG_IP_SET_BITMAP_IP=m @@ -1346,7 +1389,6 @@ CONFIG_NF_DEFRAG_IPV4=m CONFIG_NF_SOCKET_IPV4=m CONFIG_NF_TPROXY_IPV4=m CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_REJECT_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m @@ -1380,6 +1422,7 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration # # IPv6: Netfilter Configuration @@ -1387,7 +1430,6 @@ CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_SOCKET_IPV6=m CONFIG_NF_TPROXY_IPV6=m CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_REJECT_IPV6=m CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_FIB_IPV6=m @@ -1416,6 +1458,8 @@ CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + CONFIG_NF_DEFRAG_IPV6=m CONFIG_NF_TABLES_BRIDGE=y CONFIG_NFT_BRIDGE_REJECT=m @@ -1452,11 +1496,14 @@ CONFIG_INET_DCCP_DIAG=m CONFIG_IP_DCCP_CCID3=y # CONFIG_IP_DCCP_CCID3_DEBUG is not set CONFIG_IP_DCCP_TFRC_LIB=y +# end of DCCP CCIDs Configuration # # DCCP Kernel Hacking # # CONFIG_IP_DCCP_DEBUG is not set +# end of DCCP Kernel Hacking + CONFIG_IP_SCTP=m # CONFIG_SCTP_DBG_OBJCNT is not set # CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set @@ -1493,17 +1540,21 @@ CONFIG_BRIDGE_IGMP_SNOOPING=y CONFIG_BRIDGE_VLAN_FILTERING=y CONFIG_HAVE_NET_DSA=y CONFIG_NET_DSA=m -# CONFIG_NET_DSA_LEGACY is not set -CONFIG_NET_DSA_TAG_BRCM=y -CONFIG_NET_DSA_TAG_BRCM_PREPEND=y -CONFIG_NET_DSA_TAG_DSA=y -CONFIG_NET_DSA_TAG_EDSA=y -CONFIG_NET_DSA_TAG_GSWIP=y -CONFIG_NET_DSA_TAG_KSZ=y -CONFIG_NET_DSA_TAG_KSZ9477=y -CONFIG_NET_DSA_TAG_LAN9303=y -CONFIG_NET_DSA_TAG_MTK=y -CONFIG_NET_DSA_TAG_QCA=y +CONFIG_NET_DSA_TAG_8021Q=m +CONFIG_NET_DSA_TAG_BRCM_COMMON=m +CONFIG_NET_DSA_TAG_BRCM=m +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ_COMMON=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_KSZ9477=m +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y @@ -1639,6 +1690,7 @@ CONFIG_BATMAN_ADV_NC=y CONFIG_BATMAN_ADV_MCAST=y CONFIG_BATMAN_ADV_DEBUGFS=y # CONFIG_BATMAN_ADV_DEBUG is not set +CONFIG_BATMAN_ADV_SYSFS=y # CONFIG_BATMAN_ADV_TRACING is not set CONFIG_OPENVSWITCH=m CONFIG_OPENVSWITCH_GRE=m @@ -1677,6 +1729,9 @@ CONFIG_NET_FLOW_LIMIT=y # CONFIG_NET_PKTGEN=m CONFIG_NET_DROP_MONITOR=m +# end of Network testing +# end of Networking options + CONFIG_HAMRADIO=y # @@ -1697,6 +1752,8 @@ CONFIG_BAYCOM_SER_FDX=m CONFIG_BAYCOM_SER_HDX=m CONFIG_BAYCOM_PAR=m CONFIG_YAM=m +# end of AX.25 network device drivers + CONFIG_CAN=m CONFIG_CAN_RAW=m CONFIG_CAN_BCM=m @@ -1740,6 +1797,7 @@ CONFIG_CAN_SOFTING_CS=m # CONFIG_CAN_HI311X=m CONFIG_CAN_MCP251X=m +# end of CAN SPI interfaces # # CAN USB interfaces @@ -1752,7 +1810,11 @@ CONFIG_CAN_KVASER_USB=m CONFIG_CAN_MCBA_USB=m CONFIG_CAN_PEAK_USB=m CONFIG_CAN_UCAN=m +# end of CAN USB interfaces + # CONFIG_CAN_DEBUG_DEVICES is not set +# end of CAN Device Drivers + CONFIG_BT=m CONFIG_BT_BREDR=y CONFIG_BT_RFCOMM=m @@ -1806,8 +1868,11 @@ CONFIG_BT_MRVL=m CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m CONFIG_BT_WILINK=m +CONFIG_BT_MTKSDIO=m CONFIG_BT_MTKUART=m CONFIG_BT_HCIRSI=m +# end of Bluetooth device drivers + CONFIG_AF_RXRPC=m CONFIG_AF_RXRPC_IPV6=y # CONFIG_AF_RXRPC_INJECT_LOSS is not set @@ -1908,6 +1973,8 @@ CONFIG_NFC_NXP_NCI_I2C=m CONFIG_NFC_S3FWRN5=m CONFIG_NFC_S3FWRN5_I2C=m CONFIG_NFC_ST95HF=m +# end of Near Field Communication (NFC) devices + CONFIG_PSAMPLE=m CONFIG_NET_IFE=m CONFIG_LWTUNNEL=y @@ -1979,6 +2046,8 @@ CONFIG_HOTPLUG_PCI_SHPC=y CONFIG_PCIE_CADENCE=y CONFIG_PCIE_CADENCE_HOST=y CONFIG_PCIE_CADENCE_EP=y +# end of Cadence PCIe controllers support + CONFIG_PCI_FTPCI100=y CONFIG_PCI_HOST_COMMON=y CONFIG_PCI_HOST_GENERIC=y @@ -1995,6 +2064,8 @@ CONFIG_PCIE_DW_PLAT=y CONFIG_PCIE_DW_PLAT_HOST=y CONFIG_PCIE_DW_PLAT_EP=y CONFIG_PCI_MESON=y +# end of DesignWare PCI Core Support +# end of PCI controller drivers # # PCI Endpoint @@ -2002,11 +2073,14 @@ CONFIG_PCI_MESON=y CONFIG_PCI_ENDPOINT=y CONFIG_PCI_ENDPOINT_CONFIGFS=y # CONFIG_PCI_EPF_TEST is not set +# end of PCI Endpoint # # PCI switch controller drivers # CONFIG_PCI_SW_SWITCHTEC=m +# end of PCI switch controller drivers + CONFIG_PCCARD=m CONFIG_PCMCIA=m CONFIG_PCMCIA_LOAD_CIS=y @@ -2042,6 +2116,7 @@ CONFIG_RAPIDIO_CPS_XX=m CONFIG_RAPIDIO_TSI568=m CONFIG_RAPIDIO_CPS_GEN2=m CONFIG_RAPIDIO_RXS_GEN3=m +# end of RapidIO Switch drivers # # Generic Driver Options @@ -2059,12 +2134,15 @@ CONFIG_FW_LOADER=y CONFIG_EXTRA_FIRMWARE="" CONFIG_FW_LOADER_USER_HELPER=y # CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +# end of Firmware loader + CONFIG_WANT_DEV_COREDUMP=y CONFIG_ALLOW_DEV_COREDUMP=y CONFIG_DEV_COREDUMP=y # CONFIG_DEBUG_DRIVER is not set # CONFIG_DEBUG_DEVRES is not set # CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y # CONFIG_TEST_ASYNC_DRIVER_PROBE is not set CONFIG_SYS_HYPERVISOR=y CONFIG_GENERIC_CPU_AUTOPROBE=y @@ -2080,11 +2158,14 @@ CONFIG_REGMAP_IRQ=y CONFIG_REGMAP_SOUNDWIRE=m CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set +# end of Generic Driver Options # # Bus devices # CONFIG_SIMPLE_PM_BUS=y +# end of Bus devices + CONFIG_CONNECTOR=y CONFIG_PROC_EVENTS=y CONFIG_GNSS=m @@ -2105,6 +2186,7 @@ CONFIG_MTD_REDBOOT_PARTS=m CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 # CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set # CONFIG_MTD_REDBOOT_PARTS_READONLY is not set +# end of Partition parsers # # User Modules And Translation Layers @@ -2142,6 +2224,7 @@ CONFIG_MTD_CFI_UTIL=m CONFIG_MTD_RAM=m CONFIG_MTD_ROM=m CONFIG_MTD_ABSENT=m +# end of RAM/ROM/Flash chip drivers # # Mapping drivers for chip access @@ -2166,6 +2249,7 @@ CONFIG_MTD_PCMCIA=m # CONFIG_MTD_PCMCIA_ANONYMOUS is not set CONFIG_MTD_INTEL_VR_NOR=m CONFIG_MTD_PLATRAM=m +# end of Mapping drivers for chip access # # Self-contained MTD device drivers @@ -2192,30 +2276,39 @@ CONFIG_MTD_BLOCK2MTD=m CONFIG_MTD_DOCG3=m CONFIG_BCH_CONST_M=14 CONFIG_BCH_CONST_T=4 +# end of Self-contained MTD device drivers + CONFIG_MTD_NAND_CORE=m CONFIG_MTD_ONENAND=m # CONFIG_MTD_ONENAND_VERIFY_WRITE is not set CONFIG_MTD_ONENAND_GENERIC=m CONFIG_MTD_ONENAND_OTP=y CONFIG_MTD_ONENAND_2X_PROGRAM=y -CONFIG_MTD_NAND_ECC=m -CONFIG_MTD_NAND_ECC_SMC=y -CONFIG_MTD_NAND=m -CONFIG_MTD_NAND_BCH=m -CONFIG_MTD_NAND_ECC_BCH=y -CONFIG_MTD_SM_COMMON=m +CONFIG_MTD_NAND_ECC_SW_HAMMING=m +CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y +CONFIG_MTD_RAW_NAND=m +CONFIG_MTD_NAND_ECC_SW_BCH=y + +# +# Raw/parallel NAND flash controllers +# CONFIG_MTD_NAND_DENALI=m CONFIG_MTD_NAND_DENALI_PCI=m CONFIG_MTD_NAND_DENALI_DT=m +CONFIG_MTD_NAND_CAFE=m CONFIG_MTD_NAND_GPIO=m +CONFIG_MTD_NAND_PLATFORM=m + +# +# Misc +# +CONFIG_MTD_SM_COMMON=m +CONFIG_MTD_NAND_NANDSIM=m CONFIG_MTD_NAND_RICOH=m CONFIG_MTD_NAND_DISKONCHIP=m # CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0 CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y -CONFIG_MTD_NAND_CAFE=m -CONFIG_MTD_NAND_NANDSIM=m -CONFIG_MTD_NAND_PLATFORM=m CONFIG_MTD_SPI_NAND=m # @@ -2223,6 +2316,8 @@ CONFIG_MTD_SPI_NAND=m # CONFIG_MTD_LPDDR=m CONFIG_MTD_QINFO_PROBE=m +# end of LPDDR & LPDDR2 PCM memory drivers + CONFIG_MTD_SPI_NOR=m CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y CONFIG_SPI_MTK_QUADSPI=m @@ -2312,6 +2407,7 @@ CONFIG_NVME_TARGET_RDMA=m CONFIG_NVME_TARGET_FC=m CONFIG_NVME_TARGET_FCLOOP=m CONFIG_NVME_TARGET_TCP=m +# end of NVME Support # # Misc devices @@ -2358,6 +2454,8 @@ CONFIG_EEPROM_93CX6=m # CONFIG_EEPROM_93XX46 is not set CONFIG_EEPROM_IDT_89HPESX=m CONFIG_EEPROM_EE1004=m +# end of EEPROM support + CONFIG_CB710_CORE=m # CONFIG_CB710_DEBUG is not set CONFIG_CB710_DEBUG_ASSUMPTIONS=y @@ -2366,6 +2464,8 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # Texas Instruments shared transport line discipline # CONFIG_TI_ST=m +# end of Texas Instruments shared transport line discipline + CONFIG_SENSORS_LIS3_I2C=m CONFIG_ALTERA_STAPL=m CONFIG_INTEL_MEI=m @@ -2418,6 +2518,8 @@ CONFIG_MIC_COSM=m # CONFIG_VOP=m CONFIG_VHOST_RING=m +# end of Intel MIC & related support + CONFIG_GENWQE=m CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0 CONFIG_ECHO=m @@ -2425,6 +2527,8 @@ CONFIG_MISC_ALCOR_PCI=m CONFIG_MISC_RTSX_PCI=m CONFIG_MISC_RTSX_USB=m CONFIG_HABANA_AI=m +# end of Misc devices + CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -2464,6 +2568,8 @@ CONFIG_SCSI_SAS_LIBSAS=m CONFIG_SCSI_SAS_ATA=y CONFIG_SCSI_SAS_HOST_SMP=y CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + CONFIG_SCSI_LOWLEVEL=y CONFIG_ISCSI_TCP=m CONFIG_ISCSI_BOOT_SYSFS=m @@ -2577,6 +2683,8 @@ CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_HP_SW=m CONFIG_SCSI_DH_EMC=m CONFIG_SCSI_DH_ALUA=m +# end of SCSI device support + CONFIG_ATA=m CONFIG_ATA_VERBOSE_ERROR=y CONFIG_ATA_ACPI=y @@ -2712,6 +2820,7 @@ CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_ST=m CONFIG_DM_DELAY=m +CONFIG_DM_DUST=m CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m CONFIG_DM_VERITY=m @@ -2747,6 +2856,8 @@ CONFIG_FIREWIRE_OHCI=m CONFIG_FIREWIRE_SBP2=m CONFIG_FIREWIRE_NET=m CONFIG_FIREWIRE_NOSY=m +# end of IEEE 1394 (FireWire) support + CONFIG_MACINTOSH_DRIVERS=y CONFIG_MAC_EMUMOUSEBTN=m CONFIG_NETDEVICES=y @@ -2843,18 +2954,22 @@ CONFIG_NET_DSA_BCM_SF2=m CONFIG_NET_DSA_LOOP=m CONFIG_NET_DSA_LANTIQ_GSWIP=m CONFIG_NET_DSA_MT7530=m +CONFIG_NET_DSA_MV88E6060=m CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON=m CONFIG_NET_DSA_MICROCHIP_KSZ9477=m CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI=m CONFIG_NET_DSA_MV88E6XXX=m CONFIG_NET_DSA_MV88E6XXX_GLOBAL2=y CONFIG_NET_DSA_MV88E6XXX_PTP=y +CONFIG_NET_DSA_SJA1105=m CONFIG_NET_DSA_QCA8K=m CONFIG_NET_DSA_REALTEK_SMI=m CONFIG_NET_DSA_SMSC_LAN9303=m CONFIG_NET_DSA_SMSC_LAN9303_I2C=m CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m CONFIG_NET_DSA_VITESSE_VSC73XX=m +# end of Distributed Switch Architecture drivers + CONFIG_ETHERNET=y CONFIG_MDIO=m CONFIG_NET_VENDOR_3COM=y @@ -3152,7 +3267,6 @@ CONFIG_NET_VENDOR_TEHUTI=y CONFIG_TEHUTI=m CONFIG_NET_VENDOR_TI=y # CONFIG_TI_CPSW_PHY_SEL is not set -CONFIG_TI_CPSW_ALE=m CONFIG_TLAN=m CONFIG_NET_VENDOR_VIA=y CONFIG_VIA_RHINE=m @@ -3165,6 +3279,8 @@ CONFIG_WIZNET_W5300=m # CONFIG_WIZNET_BUS_INDIRECT is not set CONFIG_WIZNET_BUS_ANY=y CONFIG_WIZNET_W5100_SPI=m +CONFIG_NET_VENDOR_XILINX=y +CONFIG_XILINX_LL_TEMAC=m CONFIG_NET_VENDOR_XIRCOM=y CONFIG_PCMCIA_XIRC2PS=m CONFIG_FDDI=m @@ -3199,7 +3315,7 @@ CONFIG_LED_TRIGGER_PHY=y CONFIG_SFP=m CONFIG_AMD_PHY=m CONFIG_AQUANTIA_PHY=m -CONFIG_ASIX_PHY=m +CONFIG_AX88796B_PHY=m CONFIG_AT803X_PHY=m CONFIG_BCM7XXX_PHY=m CONFIG_BCM87XX_PHY=m @@ -3422,6 +3538,8 @@ CONFIG_IWL3945=m # CONFIG_IWLEGACY_DEBUG=y CONFIG_IWLEGACY_DEBUGFS=y +# end of iwl3945 / iwl4965 Debugging Options + CONFIG_IWLWIFI=m CONFIG_IWLWIFI_LEDS=y CONFIG_IWLDVM=m @@ -3436,6 +3554,8 @@ CONFIG_IWLWIFI_OPMODE_MODULAR=y CONFIG_IWLWIFI_DEBUG=y CONFIG_IWLWIFI_DEBUGFS=y CONFIG_IWLWIFI_DEVICE_TRACING=y +# end of Debugging Options + CONFIG_WLAN_VENDOR_INTERSIL=y CONFIG_HOSTAP=m CONFIG_HOSTAP_FIRMWARE=y @@ -3490,6 +3610,7 @@ CONFIG_MT76x2_COMMON=m CONFIG_MT76x2E=m CONFIG_MT76x2U=m CONFIG_MT7603E=m +CONFIG_MT7615E=m CONFIG_WLAN_VENDOR_RALINK=y CONFIG_RT2X00=m CONFIG_RT2400PCI=m @@ -3543,6 +3664,13 @@ CONFIG_RTL8723_COMMON=m CONFIG_RTLBTCOEXIST=m CONFIG_RTL8XXXU=m CONFIG_RTL8XXXU_UNTESTED=y +CONFIG_RTW88=m +CONFIG_RTW88_CORE=m +CONFIG_RTW88_PCI=m +CONFIG_RTW88_8822BE=y +CONFIG_RTW88_8822CE=y +CONFIG_RTW88_DEBUG=y +CONFIG_RTW88_DEBUGFS=y CONFIG_WLAN_VENDOR_RSI=y CONFIG_RSI_91X=m CONFIG_RSI_DEBUGFS=y @@ -3582,6 +3710,8 @@ CONFIG_VIRT_WIFI=m CONFIG_WIMAX_I2400M=m CONFIG_WIMAX_I2400M_USB=m CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8 +# end of WiMAX Wireless Broadband devices + # CONFIG_WAN is not set CONFIG_IEEE802154_DRIVERS=m CONFIG_IEEE802154_FAKELB=m @@ -3617,6 +3747,7 @@ CONFIG_ISDN_TTY_FAX=y # ISDN feature submodules # CONFIG_ISDN_DIVERSION=m +# end of ISDN feature submodules # # ISDN4Linux hardware drivers @@ -3677,6 +3808,8 @@ CONFIG_HISAX_ST5481=m CONFIG_HISAX_HFCUSB=m CONFIG_HISAX_HFC4S8S=m CONFIG_HISAX_FRITZ_PCIPNP=m +# end of Passive cards + CONFIG_ISDN_CAPI=m CONFIG_CAPI_TRACE=y CONFIG_ISDN_CAPI_CAPI20=m @@ -3754,6 +3887,7 @@ CONFIG_KEYBOARD_ADP5520=m CONFIG_KEYBOARD_ADP5588=m CONFIG_KEYBOARD_ADP5589=m CONFIG_KEYBOARD_ATKBD=m +CONFIG_KEYBOARD_QT1050=m CONFIG_KEYBOARD_QT1070=m CONFIG_KEYBOARD_QT2160=m CONFIG_KEYBOARD_DLINK_DIR685=m @@ -3959,6 +4093,7 @@ CONFIG_TOUCHSCREEN_ZET6223=m CONFIG_TOUCHSCREEN_ZFORCE=m CONFIG_TOUCHSCREEN_COLIBRI_VF50=m CONFIG_TOUCHSCREEN_ROHM_BU21023=m +CONFIG_TOUCHSCREEN_IQS5XX=m CONFIG_INPUT_MISC=y CONFIG_INPUT_88PM860X_ONKEY=m CONFIG_INPUT_88PM80X_ONKEY=m @@ -3971,6 +4106,7 @@ CONFIG_INPUT_BMA150=m CONFIG_INPUT_E3X0_BUTTON=m CONFIG_INPUT_MSM_VIBRATOR=m CONFIG_INPUT_PCSPKR=m +CONFIG_INPUT_MAX77650_ONKEY=m CONFIG_INPUT_MAX77693_HAPTIC=m CONFIG_INPUT_MAX8925_ONKEY=m CONFIG_INPUT_MAX8997_HAPTIC=m @@ -3980,6 +4116,7 @@ CONFIG_INPUT_APANEL=m CONFIG_INPUT_GP2A=m CONFIG_INPUT_GPIO_BEEPER=m CONFIG_INPUT_GPIO_DECODER=m +CONFIG_INPUT_GPIO_VIBRA=m CONFIG_INPUT_CPCAP_PWRBUTTON=m CONFIG_INPUT_ATLAS_BTNS=m CONFIG_INPUT_ATI_REMOTE2=m @@ -4053,7 +4190,6 @@ CONFIG_SERIO_ALTERA_PS2=m CONFIG_SERIO_PS2MULT=m CONFIG_SERIO_ARC_PS2=m # CONFIG_SERIO_APBPS2 is not set -CONFIG_SERIO_OLPC_APSP=m CONFIG_HYPERV_KEYBOARD=m CONFIG_SERIO_GPIO_PS2=m CONFIG_USERIO=m @@ -4062,6 +4198,8 @@ CONFIG_GAMEPORT_NS558=m CONFIG_GAMEPORT_L4=m CONFIG_GAMEPORT_EMU10K1=m CONFIG_GAMEPORT_FM801=m +# end of Hardware I/O ports +# end of Input device support # # Character devices @@ -4090,6 +4228,7 @@ CONFIG_N_HDLC=m CONFIG_N_GSM=m CONFIG_TRACE_ROUTER=m CONFIG_TRACE_SINK=m +CONFIG_NULL_TTY=m CONFIG_LDISC_AUTOLOAD=y CONFIG_DEVMEM=y # CONFIG_DEVKMEM is not set @@ -4133,6 +4272,7 @@ CONFIG_SERIAL_UARTLITE_NR_UARTS=1 CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_JSM=m +CONFIG_SERIAL_SIFIVE=m CONFIG_SERIAL_SCCNXP=m CONFIG_SERIAL_SC16IS7XX_CORE=m CONFIG_SERIAL_SC16IS7XX=m @@ -4151,6 +4291,8 @@ CONFIG_SERIAL_RP2_NR_UARTS=32 CONFIG_SERIAL_FSL_LPUART=m CONFIG_SERIAL_CONEXANT_DIGICOLOR=m CONFIG_SERIAL_MEN_Z135=m +# end of Serial drivers + CONFIG_SERIAL_DEV_BUS=y CONFIG_SERIAL_DEV_CTRL_TTYPORT=y # CONFIG_TTY_PRINTK is not set @@ -4188,6 +4330,8 @@ CONFIG_CARDMAN_4000=m CONFIG_CARDMAN_4040=m CONFIG_SCR24X=m CONFIG_IPWIRELESS=m +# end of PCMCIA character devices + CONFIG_MWAVE=m CONFIG_RAW_DRIVER=m CONFIG_MAX_RAW_DEVS=256 @@ -4217,6 +4361,8 @@ CONFIG_DEVPORT=y CONFIG_XILLYBUS=m CONFIG_XILLYBUS_PCIE=m CONFIG_XILLYBUS_OF=m +# end of Character devices + # CONFIG_RANDOM_TRUST_CPU is not set # @@ -4242,6 +4388,8 @@ CONFIG_I2C_MUX_PINCTRL=m CONFIG_I2C_MUX_REG=m CONFIG_I2C_DEMUX_PINCTRL=m CONFIG_I2C_MUX_MLXCPLD=m +# end of Multiplexer I2C Chip support + CONFIG_I2C_HELPER_AUTO=y CONFIG_I2C_SMBUS=m CONFIG_I2C_ALGOBIT=m @@ -4260,6 +4408,7 @@ CONFIG_I2C_ALI15X3=m CONFIG_I2C_AMD756=m CONFIG_I2C_AMD756_S4882=m CONFIG_I2C_AMD8111=m +CONFIG_I2C_AMD_MP2=m CONFIG_I2C_I801=m CONFIG_I2C_ISCH=m CONFIG_I2C_ISMT=m @@ -4316,12 +4465,16 @@ CONFIG_I2C_VIPERBOARD=m CONFIG_I2C_MLXCPLD=m CONFIG_I2C_CROS_EC_TUNNEL=m CONFIG_I2C_FSI=m +# end of I2C Hardware Bus support + # CONFIG_I2C_STUB is not set CONFIG_I2C_SLAVE=y CONFIG_I2C_SLAVE_EEPROM=m # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set # CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + CONFIG_I3C=m CONFIG_CDNS_I3C_MASTER=m CONFIG_DW_I3C_MASTER=m @@ -4401,6 +4554,8 @@ CONFIG_PPS_CLIENT_GPIO=m CONFIG_PTP_1588_CLOCK=y CONFIG_DP83640_PHY=m CONFIG_PTP_1588_CLOCK_KVM=m +# end of PTP clock support + CONFIG_PINCTRL=y CONFIG_GENERIC_PINCTRL_GROUPS=y CONFIG_PINMUX=y @@ -4414,6 +4569,7 @@ CONFIG_PINCTRL_AMD=m CONFIG_PINCTRL_MCP23S08=m CONFIG_PINCTRL_SINGLE=m CONFIG_PINCTRL_SX150X=y +CONFIG_PINCTRL_STMFX=m CONFIG_PINCTRL_MAX77620=m CONFIG_PINCTRL_PALMAS=m CONFIG_PINCTRL_RK805=m @@ -4429,6 +4585,7 @@ CONFIG_PINCTRL_GEMINILAKE=y CONFIG_PINCTRL_ICELAKE=y CONFIG_PINCTRL_LEWISBURG=y CONFIG_PINCTRL_SUNRISEPOINT=y +CONFIG_PINCTRL_LOCHNAGAR=m CONFIG_PINCTRL_MADERA=m CONFIG_PINCTRL_CS47L35=y CONFIG_PINCTRL_CS47L85=y @@ -4460,13 +4617,13 @@ CONFIG_GPIO_ICH=m CONFIG_GPIO_LYNXPOINT=m CONFIG_GPIO_MB86S7X=m CONFIG_GPIO_MENZ127=m -CONFIG_GPIO_MOCKUP=m CONFIG_GPIO_SAMA5D2_PIOBU=m CONFIG_GPIO_SIOX=m CONFIG_GPIO_SYSCON=m CONFIG_GPIO_VX855=m CONFIG_GPIO_XILINX=m CONFIG_GPIO_AMD_FCH=m +# end of Memory mapped GPIO drivers # # Port-mapped I/O GPIO drivers @@ -4477,6 +4634,7 @@ CONFIG_GPIO_SCH=m CONFIG_GPIO_SCH311X=m CONFIG_GPIO_WINBOND=m CONFIG_GPIO_WS16C48=m +# end of Port-mapped I/O GPIO drivers # # I2C GPIO expanders @@ -4489,6 +4647,7 @@ CONFIG_GPIO_MAX732X=m CONFIG_GPIO_PCA953X=m CONFIG_GPIO_PCF857X=m CONFIG_GPIO_TPIC2810=m +# end of I2C GPIO expanders # # MFD GPIO expanders @@ -4507,6 +4666,7 @@ CONFIG_GPIO_LP873X=m CONFIG_GPIO_LP87565=m CONFIG_GPIO_MADERA=m CONFIG_GPIO_MAX77620=m +CONFIG_GPIO_MAX77650=m CONFIG_GPIO_PALMAS=y CONFIG_GPIO_RC5T583=y CONFIG_GPIO_STMPE=y @@ -4525,6 +4685,7 @@ CONFIG_GPIO_WHISKEY_COVE=m CONFIG_GPIO_WM831X=m CONFIG_GPIO_WM8350=m CONFIG_GPIO_WM8994=m +# end of MFD GPIO expanders # # PCI GPIO expanders @@ -4535,6 +4696,7 @@ CONFIG_GPIO_PCI_IDIO_16=m CONFIG_GPIO_PCIE_IDIO_24=m CONFIG_GPIO_RDC321X=m CONFIG_GPIO_SODAVILLE=y +# end of PCI GPIO expanders # # SPI GPIO expanders @@ -4545,11 +4707,15 @@ CONFIG_GPIO_MAX7301=m CONFIG_GPIO_MC33880=m CONFIG_GPIO_PISOSR=m CONFIG_GPIO_XRA1403=m +# end of SPI GPIO expanders # # USB GPIO expanders # CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +CONFIG_GPIO_MOCKUP=m CONFIG_W1=m CONFIG_W1_CON=y @@ -4561,6 +4727,7 @@ CONFIG_W1_MASTER_DS2490=m CONFIG_W1_MASTER_DS2482=m CONFIG_W1_MASTER_DS1WM=m CONFIG_W1_MASTER_GPIO=m +# end of 1-wire Bus Masters # # 1-wire Slaves @@ -4582,6 +4749,8 @@ CONFIG_W1_SLAVE_DS2780=m CONFIG_W1_SLAVE_DS2781=m CONFIG_W1_SLAVE_DS28E04=m CONFIG_W1_SLAVE_DS28E17=m +# end of 1-wire Slaves + CONFIG_POWER_AVS=y CONFIG_POWER_RESET=y CONFIG_POWER_RESET_AS3722=y @@ -4641,9 +4810,10 @@ CONFIG_CHARGER_LP8727=m CONFIG_CHARGER_LP8788=m CONFIG_CHARGER_GPIO=m CONFIG_CHARGER_MANAGER=y -CONFIG_CHARGER_LTC3651=m +CONFIG_CHARGER_LT3651=m CONFIG_CHARGER_MAX14577=m CONFIG_CHARGER_DETECTOR_MAX14656=m +CONFIG_CHARGER_MAX77650=m CONFIG_CHARGER_MAX77693=m CONFIG_CHARGER_MAX8997=m CONFIG_CHARGER_MAX8998=m @@ -4659,6 +4829,7 @@ CONFIG_BATTERY_GAUGE_LTC2941=m CONFIG_BATTERY_RT5033=m CONFIG_CHARGER_RT9455=m CONFIG_CHARGER_CROS_USBPD=m +CONFIG_CHARGER_UCS1002=m CONFIG_HWMON=y CONFIG_HWMON_VID=m # CONFIG_HWMON_DEBUG_CHIP is not set @@ -4719,6 +4890,7 @@ CONFIG_SENSORS_IT87=m CONFIG_SENSORS_JC42=m CONFIG_SENSORS_POWR1220=m CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LOCHNAGAR=m CONFIG_SENSORS_LTC2945=m CONFIG_SENSORS_LTC2990=m CONFIG_SENSORS_LTC4151=m @@ -4768,15 +4940,14 @@ CONFIG_SENSORS_NCT6775=m CONFIG_SENSORS_NCT7802=m CONFIG_SENSORS_NCT7904=m CONFIG_SENSORS_NPCM7XX=m -CONFIG_SENSORS_OCC_P8_I2C=m -CONFIG_SENSORS_OCC_P9_SBE=m -CONFIG_SENSORS_OCC=y CONFIG_SENSORS_PCF8591=m CONFIG_PMBUS=m CONFIG_SENSORS_PMBUS=m CONFIG_SENSORS_ADM1275=m CONFIG_SENSORS_IBM_CFFPS=m CONFIG_SENSORS_IR35221=m +CONFIG_SENSORS_IR38064=m +CONFIG_SENSORS_ISL68137=m CONFIG_SENSORS_LM25066=m CONFIG_SENSORS_LTC2978=m # CONFIG_SENSORS_LTC2978_REGULATOR is not set @@ -4867,6 +5038,7 @@ CONFIG_CPU_THERMAL=y CONFIG_CLOCK_THERMAL=y CONFIG_DEVFREQ_THERMAL=y # CONFIG_THERMAL_EMULATION is not set +CONFIG_THERMAL_MMIO=m CONFIG_MAX77620_THERMAL=m CONFIG_QORIQ_THERMAL=m CONFIG_DA9062_THERMAL=m @@ -4885,8 +5057,12 @@ CONFIG_INTEL_SOC_DTS_THERMAL=m CONFIG_INT340X_THERMAL=m CONFIG_ACPI_THERMAL_REL=m CONFIG_INT3406_THERMAL=m +# end of ACPI INT340X thermal drivers + CONFIG_INTEL_BXT_PMIC_THERMAL=m CONFIG_INTEL_PCH_THERMAL=m +# end of Intel thermal drivers + CONFIG_GENERIC_ADC_THERMAL=m CONFIG_WATCHDOG=y CONFIG_WATCHDOG_CORE=y @@ -4895,6 +5071,16 @@ CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y CONFIG_WATCHDOG_SYSFS=y # +# Watchdog Pretimeout Governors +# +CONFIG_WATCHDOG_PRETIMEOUT_GOV=y +CONFIG_WATCHDOG_PRETIMEOUT_GOV_SEL=m +CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=m +CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=y +# CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP is not set +CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC=y + +# # Watchdog Device Drivers # CONFIG_SOFT_WATCHDOG=m @@ -4940,8 +5126,8 @@ CONFIG_ITCO_VENDOR_SUPPORT=y CONFIG_IT8712F_WDT=m CONFIG_IT87_WDT=m CONFIG_HP_WATCHDOG=m -CONFIG_KEMPLD_WDT=m CONFIG_HPWDT_NMI_DECODING=y +CONFIG_KEMPLD_WDT=m CONFIG_SC1200_WDT=m CONFIG_PC87413_WDT=m CONFIG_NV_TCO=m @@ -4972,15 +5158,6 @@ CONFIG_WDTPCI=m # USB-based Watchdog Cards # CONFIG_USBPCWATCHDOG=m - -# -# Watchdog Pretimeout Governors -# -CONFIG_WATCHDOG_PRETIMEOUT_GOV=y -# CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP is not set -CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC=y -CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=m -CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=y CONFIG_SSB_POSSIBLE=y CONFIG_SSB=m CONFIG_SSB_SPROM=y @@ -5061,6 +5238,7 @@ CONFIG_MFD_88PM805=m CONFIG_MFD_88PM860X=y CONFIG_MFD_MAX14577=m CONFIG_MFD_MAX77620=y +CONFIG_MFD_MAX77650=m CONFIG_MFD_MAX77686=m CONFIG_MFD_MAX77693=m CONFIG_MFD_MAX77843=y @@ -5099,6 +5277,8 @@ CONFIG_MFD_STMPE=y # CONFIG_STMPE_I2C=y CONFIG_STMPE_SPI=y +# end of STMicroelectronics STMPE Interface Drivers + CONFIG_MFD_SYSCON=y CONFIG_MFD_TI_AM335X_TSCADC=m CONFIG_MFD_LP3943=m @@ -5147,7 +5327,10 @@ CONFIG_MFD_WM8350_I2C=y CONFIG_MFD_WM8994=m CONFIG_MFD_ROHM_BD718XX=m CONFIG_MFD_STPMIC1=m +CONFIG_MFD_STMFX=m CONFIG_RAVE_SP_CORE=m +# end of Multifunction device drivers + CONFIG_REGULATOR=y # CONFIG_REGULATOR_DEBUG is not set CONFIG_REGULATOR_FIXED_VOLTAGE=m @@ -5198,6 +5381,7 @@ CONFIG_REGULATOR_LTC3676=m CONFIG_REGULATOR_MAX14577=m CONFIG_REGULATOR_MAX1586=m CONFIG_REGULATOR_MAX77620=m +CONFIG_REGULATOR_MAX77650=m CONFIG_REGULATOR_MAX8649=m CONFIG_REGULATOR_MAX8660=m CONFIG_REGULATOR_MAX8907=m @@ -5312,7 +5496,7 @@ CONFIG_MEDIA_SDR_SUPPORT=y CONFIG_MEDIA_CEC_SUPPORT=y # CONFIG_CEC_PIN_ERROR_INJ is not set CONFIG_MEDIA_CONTROLLER=y -# CONFIG_MEDIA_CONTROLLER_DVB is not set +CONFIG_MEDIA_CONTROLLER_DVB=y # CONFIG_MEDIA_CONTROLLER_REQUEST_API is not set CONFIG_VIDEO_DEV=m CONFIG_VIDEO_V4L2_SUBDEV_API=y @@ -5641,6 +5825,7 @@ CONFIG_RADIO_WL1273=m # Texas Instruments WL128x FM driver (ST based) # CONFIG_RADIO_WL128X=m +# end of Texas Instruments WL128x FM driver (ST based) # # Supported FireWire (IEEE 1394) Adapters @@ -5705,17 +5890,11 @@ CONFIG_VIDEO_SAA6588=m # CONFIG_VIDEO_ADV7604=m CONFIG_VIDEO_ADV7842=m -CONFIG_VIDEO_BT819=m -CONFIG_VIDEO_BT856=m -CONFIG_VIDEO_BT866=m -CONFIG_VIDEO_KS0127=m -CONFIG_VIDEO_SAA7110=m CONFIG_VIDEO_SAA711X=m CONFIG_VIDEO_TVP5150=m CONFIG_VIDEO_TW2804=m CONFIG_VIDEO_TW9903=m CONFIG_VIDEO_TW9906=m -CONFIG_VIDEO_VPX3220=m # # Video and audio decoders @@ -5727,9 +5906,6 @@ CONFIG_VIDEO_CX25840=m # Video encoders # CONFIG_VIDEO_SAA7127=m -CONFIG_VIDEO_SAA7185=m -CONFIG_VIDEO_ADV7170=m -CONFIG_VIDEO_ADV7175=m CONFIG_VIDEO_ADV7511=m # @@ -5741,6 +5917,10 @@ CONFIG_VIDEO_OV7670=m CONFIG_VIDEO_MT9V011=m # +# Lens drivers +# + +# # Flash devices # @@ -5768,6 +5948,8 @@ CONFIG_VIDEO_M52790=m # Media SPI Adapters # CONFIG_CXD2880_SPI_DRV=m +# end of Media SPI Adapters + CONFIG_MEDIA_TUNER=m CONFIG_MEDIA_TUNER_SIMPLE=m CONFIG_MEDIA_TUNER_TDA18250=m @@ -5992,7 +6174,7 @@ CONFIG_DRM_DP_CEC=y CONFIG_DRM_TTM=m CONFIG_DRM_GEM_CMA_HELPER=y CONFIG_DRM_KMS_CMA_HELPER=y -CONFIG_DRM_VM=y +CONFIG_DRM_GEM_SHMEM_HELPER=y CONFIG_DRM_SCHED=m # @@ -6002,11 +6184,14 @@ CONFIG_DRM_I2C_CH7006=m CONFIG_DRM_I2C_SIL164=m CONFIG_DRM_I2C_NXP_TDA998X=m CONFIG_DRM_I2C_NXP_TDA9950=m +# end of I2C encoder or helper chips # # ARM devices # CONFIG_DRM_KOMEDA=m +# end of ARM devices + CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_USERPTR=y CONFIG_DRM_AMDGPU=m @@ -6019,6 +6204,7 @@ CONFIG_DRM_AMDGPU_USERPTR=y # ACP (Audio CoProcessor) Configuration # CONFIG_DRM_AMD_ACP=y +# end of ACP (Audio CoProcessor) Configuration # # Display Engine Configuration @@ -6027,21 +6213,16 @@ CONFIG_DRM_AMD_DC=y CONFIG_DRM_AMD_DC_DCN1_0=y CONFIG_DRM_AMD_DC_DCN1_01=y # CONFIG_DEBUG_KERNEL_DC is not set -CONFIG_HSA_AMD=y +# end of Display Engine Configuration -# -# AMD Library routines -# -CONFIG_CHASH=m -# CONFIG_CHASH_STATS is not set -# CONFIG_CHASH_SELFTEST is not set +CONFIG_HSA_AMD=y CONFIG_DRM_NOUVEAU=m -CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y +# CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT is not set CONFIG_NOUVEAU_DEBUG=5 CONFIG_NOUVEAU_DEBUG_DEFAULT=3 # CONFIG_NOUVEAU_DEBUG_MMU is not set CONFIG_DRM_NOUVEAU_BACKLIGHT=y -# CONFIG_DRM_NOUVEAU_SVM is not set +CONFIG_DRM_NOUVEAU_SVM=y CONFIG_DRM_I915=m CONFIG_DRM_I915_ALPHA_SUPPORT=y CONFIG_DRM_I915_CAPTURE_ERROR=y @@ -6062,6 +6243,8 @@ CONFIG_DRM_I915_GVT_KVMGT=m # CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set # CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set # CONFIG_DRM_I915_DEBUG_RUNTIME_PM is not set +# end of drm/i915 Debugging + CONFIG_DRM_VGEM=m CONFIG_DRM_VKMS=m CONFIG_DRM_VMWGFX=m @@ -6086,6 +6269,7 @@ CONFIG_DRM_PANEL=y CONFIG_DRM_PANEL_ARM_VERSATILE=m CONFIG_DRM_PANEL_LVDS=m CONFIG_DRM_PANEL_SIMPLE=m +CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D=m CONFIG_DRM_PANEL_ILITEK_IL9322=m CONFIG_DRM_PANEL_ILITEK_ILI9881C=m CONFIG_DRM_PANEL_INNOLUX_P079ZCA=m @@ -6098,6 +6282,8 @@ CONFIG_DRM_PANEL_ORISETECH_OTM8009A=m CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00=m CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN=m CONFIG_DRM_PANEL_RAYDIUM_RM68200=m +CONFIG_DRM_PANEL_ROCKTECH_JH057N00900=m +CONFIG_DRM_PANEL_RONBO_RB070D30=m CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=m CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2=m CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m @@ -6109,6 +6295,8 @@ CONFIG_DRM_PANEL_SITRONIX_ST7701=m CONFIG_DRM_PANEL_SITRONIX_ST7789V=m CONFIG_DRM_PANEL_TPO_TPG110=m CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m +# end of Display Panels + CONFIG_DRM_BRIDGE=y CONFIG_DRM_PANEL_BRIDGE=y @@ -6138,6 +6326,8 @@ CONFIG_DRM_DW_HDMI=m CONFIG_DRM_DW_HDMI_AHB_AUDIO=m CONFIG_DRM_DW_HDMI_I2S_AUDIO=m CONFIG_DRM_DW_HDMI_CEC=m +# end of Display Interface Bridges + # CONFIG_DRM_ETNAVIV is not set CONFIG_DRM_ARCPGU=m CONFIG_DRM_HISI_HIBMC=m @@ -6154,6 +6344,7 @@ CONFIG_TINYDRM_ST7586=m CONFIG_TINYDRM_ST7735R=m CONFIG_DRM_XEN=y CONFIG_DRM_XEN_FRONTEND=m +CONFIG_DRM_VBOXVIDEO=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y @@ -6229,7 +6420,11 @@ CONFIG_FB_HYPERV=m CONFIG_FB_SIMPLE=y # CONFIG_FB_SSD1307 is not set # CONFIG_FB_SM712 is not set -CONFIG_BACKLIGHT_LCD_SUPPORT=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# CONFIG_LCD_CLASS_DEVICE=m CONFIG_LCD_L4F00242T03=m CONFIG_LCD_LMS283GF05=m @@ -6273,6 +6468,8 @@ CONFIG_BACKLIGHT_LV5207LP=m CONFIG_BACKLIGHT_BD6107=m CONFIG_BACKLIGHT_ARCXCNN=m CONFIG_BACKLIGHT_RAVE_SP=m +# end of Backlight & LCD device support + CONFIG_VIDEOMODE_HELPERS=y CONFIG_HDMI=y @@ -6290,7 +6487,11 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y +# end of Console display driver support + # CONFIG_LOGO is not set +# end of Graphics support + CONFIG_SOUND=m CONFIG_SOUND_OSS_CORE=y # CONFIG_SOUND_OSS_CORE_PRECLAIM is not set @@ -6447,6 +6648,8 @@ CONFIG_SND_HDA_CODEC_CMEDIA=m CONFIG_SND_HDA_CODEC_SI3054=m CONFIG_SND_HDA_GENERIC=m CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +# end of HD-Audio + CONFIG_SND_HDA_CORE=m CONFIG_SND_HDA_DSP_LOADER=y CONFIG_SND_HDA_COMPONENT=y @@ -6456,6 +6659,7 @@ CONFIG_SND_HDA_PREALLOC_SIZE=4096 CONFIG_SND_SPI=y CONFIG_SND_USB=y CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y CONFIG_SND_USB_UA101=m CONFIG_SND_USB_USX2Y=m CONFIG_SND_USB_CAIAQ=m @@ -6507,11 +6711,14 @@ CONFIG_SND_DESIGNWARE_PCM=y # # CONFIG_SND_SOC_FSL_ASRC is not set # CONFIG_SND_SOC_FSL_SAI is not set +CONFIG_SND_SOC_FSL_AUDMIX=m # CONFIG_SND_SOC_FSL_SSI is not set # CONFIG_SND_SOC_FSL_SPDIF is not set # CONFIG_SND_SOC_FSL_ESAI is not set CONFIG_SND_SOC_FSL_MICFIL=m # CONFIG_SND_SOC_IMX_AUDMUX is not set +# end of SoC Audio for Freescale CPUs + CONFIG_SND_I2S_HI6210_I2S=m CONFIG_SND_SOC_IMG=y CONFIG_SND_SOC_IMG_I2S_IN=m @@ -6568,11 +6775,51 @@ CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH=m CONFIG_SND_SOC_INTEL_KBL_RT5660_MACH=m CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH=m CONFIG_SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH=m +CONFIG_SND_SOC_INTEL_SOF_RT5682_MACH=m CONFIG_SND_SOC_MTK_BTCVSD=m +CONFIG_SND_SOC_SOF_TOPLEVEL=y +CONFIG_SND_SOC_SOF_PCI=m +CONFIG_SND_SOC_SOF_ACPI=m +CONFIG_SND_SOC_SOF_OPTIONS=m +# CONFIG_SND_SOC_SOF_NOCODEC_SUPPORT is not set +# CONFIG_SND_SOC_SOF_STRICT_ABI_CHECKS is not set +# CONFIG_SND_SOC_SOF_DEBUG is not set +CONFIG_SND_SOC_SOF=m +CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE=y +CONFIG_SND_SOC_SOF_INTEL_TOPLEVEL=y +CONFIG_SND_SOC_SOF_INTEL_ACPI=m +CONFIG_SND_SOC_SOF_INTEL_PCI=m +CONFIG_SND_SOC_SOF_INTEL_HIFI_EP_IPC=m +CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP=m +CONFIG_SND_SOC_SOF_INTEL_COMMON=m +CONFIG_SND_SOC_SOF_BAYTRAIL_SUPPORT=y +CONFIG_SND_SOC_SOF_BAYTRAIL=m +CONFIG_SND_SOC_SOF_BROADWELL_SUPPORT=y +CONFIG_SND_SOC_SOF_BROADWELL=m +CONFIG_SND_SOC_SOF_MERRIFIELD_SUPPORT=y +CONFIG_SND_SOC_SOF_MERRIFIELD=m +CONFIG_SND_SOC_SOF_APOLLOLAKE_SUPPORT=y +CONFIG_SND_SOC_SOF_APOLLOLAKE=m +CONFIG_SND_SOC_SOF_GEMINILAKE_SUPPORT=y +CONFIG_SND_SOC_SOF_GEMINILAKE=m +CONFIG_SND_SOC_SOF_CANNONLAKE_SUPPORT=y +CONFIG_SND_SOC_SOF_CANNONLAKE=m +CONFIG_SND_SOC_SOF_COFFEELAKE_SUPPORT=y +CONFIG_SND_SOC_SOF_COFFEELAKE=m +CONFIG_SND_SOC_SOF_ICELAKE_SUPPORT=y +CONFIG_SND_SOC_SOF_ICELAKE=m +CONFIG_SND_SOC_SOF_HDA_COMMON=m +CONFIG_SND_SOC_SOF_HDA_LINK=y +CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC=y +CONFIG_SND_SOC_SOF_HDA_LINK_BASELINE=m +CONFIG_SND_SOC_SOF_HDA=m +CONFIG_SND_SOC_SOF_XTENSA=m # # STMicroelectronics STM32 SOC audio support # +# end of STMicroelectronics STM32 SOC audio support + CONFIG_SND_SOC_XILINX_I2S=m CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=m CONFIG_SND_SOC_XILINX_SPDIF=m @@ -6640,6 +6887,7 @@ CONFIG_SND_SOC_GTM601=m CONFIG_SND_SOC_HDAC_HDMI=m CONFIG_SND_SOC_HDAC_HDA=m CONFIG_SND_SOC_INNO_RK3036=m +CONFIG_SND_SOC_LOCHNAGAR_SC=m CONFIG_SND_SOC_MAX98088=m CONFIG_SND_SOC_MAX98090=m CONFIG_SND_SOC_MAX98357A=m @@ -6755,6 +7003,8 @@ CONFIG_SND_SOC_NAU8822=m CONFIG_SND_SOC_NAU8824=m CONFIG_SND_SOC_NAU8825=m CONFIG_SND_SOC_TPA6130A2=m +# end of CODEC drivers + CONFIG_SND_SIMPLE_CARD_UTILS=m CONFIG_SND_SIMPLE_CARD=m CONFIG_SND_AUDIO_GRAPH_CARD=m @@ -6791,6 +7041,7 @@ CONFIG_HID_CHERRY=m CONFIG_HID_CHICONY=m CONFIG_HID_CORSAIR=m CONFIG_HID_COUGAR=m +CONFIG_HID_MACALLY=m CONFIG_HID_PRODIKEYS=m CONFIG_HID_CMEDIA=m CONFIG_HID_CP2112=m @@ -6873,6 +7124,7 @@ CONFIG_HID_THINGM=m CONFIG_HID_THRUSTMASTER=m CONFIG_THRUSTMASTER_FF=y CONFIG_HID_UDRAW_PS3=m +CONFIG_HID_U2FZERO=m CONFIG_HID_WACOM=m CONFIG_HID_WIIMOTE=m CONFIG_HID_XINMO=m @@ -6882,6 +7134,7 @@ CONFIG_HID_ZYDACRON=m CONFIG_HID_SENSOR_HUB=m # CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set CONFIG_HID_ALPS=m +# end of Special HID drivers # # USB HID support @@ -6895,16 +7148,23 @@ CONFIG_USB_HIDDEV=y # # CONFIG_USB_KBD is not set # CONFIG_USB_MOUSE is not set +# end of USB HID Boot Protocol drivers +# end of USB HID support # # I2C HID support # CONFIG_I2C_HID=m +# end of I2C HID support # # Intel ISH HID support # CONFIG_INTEL_ISH_HID=m +CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER=m +# end of Intel ISH HID support +# end of HID support + CONFIG_USB_OHCI_LITTLE_ENDIAN=y CONFIG_USB_SUPPORT=y CONFIG_USB_COMMON=y @@ -7162,6 +7422,8 @@ CONFIG_USB_GPIO_VBUS=m CONFIG_TAHVO_USB=m # CONFIG_TAHVO_USB_HOST_BY_DEFAULT is not set CONFIG_USB_ISP1301=m +# end of USB Physical Layer drivers + CONFIG_USB_GADGET=m # CONFIG_USB_GADGET_DEBUG is not set # CONFIG_USB_GADGET_DEBUG_FILES is not set @@ -7196,6 +7458,8 @@ CONFIG_USB_GOKU=m CONFIG_USB_EG20T=m CONFIG_USB_GADGET_XILINX=m CONFIG_USB_DUMMY_HCD=m +# end of USB Peripheral Controller + CONFIG_USB_LIBCOMPOSITE=m CONFIG_USB_F_ACM=m CONFIG_USB_F_SS_LB=m @@ -7284,11 +7548,15 @@ CONFIG_TYPEC_TPS6598X=m # USB Type-C Multiplexer/DeMultiplexer Switch support # CONFIG_TYPEC_MUX_PI3USB30532=m +# end of USB Type-C Multiplexer/DeMultiplexer Switch support # # USB Type-C Alternate Mode drivers # CONFIG_TYPEC_DP_ALTMODE=m +CONFIG_TYPEC_NVIDIA_ALTMODE=m +# end of USB Type-C Alternate Mode drivers + CONFIG_USB_ROLE_SWITCH=m CONFIG_USB_ROLES_INTEL_XHCI=m CONFIG_USB_LED_TRIG=y @@ -7311,6 +7579,7 @@ CONFIG_MMC_TEST=m # # CONFIG_MMC_DEBUG is not set CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y CONFIG_MMC_SDHCI_PCI=m CONFIG_MMC_RICOH_MMC=y CONFIG_MMC_SDHCI_ACPI=m @@ -7374,6 +7643,7 @@ CONFIG_LEDS_BCM6358=m CONFIG_LEDS_CPCAP=m CONFIG_LEDS_CR0014114=m CONFIG_LEDS_LM3530=m +CONFIG_LEDS_LM3532=m CONFIG_LEDS_LM3533=m CONFIG_LEDS_LM3642=m CONFIG_LEDS_LM3692X=m @@ -7409,6 +7679,7 @@ CONFIG_LEDS_ADP5520=m CONFIG_LEDS_MC13783=m CONFIG_LEDS_TCA6507=m CONFIG_LEDS_TLC591XX=m +CONFIG_LEDS_MAX77650=m CONFIG_LEDS_MAX77693=m CONFIG_LEDS_MAX8997=m CONFIG_LEDS_LM355x=m @@ -7467,6 +7738,7 @@ CONFIG_INFINIBAND_QIB=m CONFIG_INFINIBAND_QIB_DCA=y CONFIG_INFINIBAND_CXGB3=m CONFIG_INFINIBAND_CXGB4=m +CONFIG_INFINIBAND_EFA=m CONFIG_INFINIBAND_I40IW=m CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m @@ -7702,6 +7974,8 @@ CONFIG_DMA_ENGINE_RAID=y CONFIG_SYNC_FILE=y # CONFIG_SW_SYNC is not set CONFIG_UDMABUF=y +# end of DMABUF options + CONFIG_DCA=m CONFIG_AUXDISPLAY=y CONFIG_HD44780=m @@ -7762,6 +8036,7 @@ CONFIG_HYPERV=m CONFIG_HYPERV_TSCPAGE=y CONFIG_HYPERV_UTILS=m CONFIG_HYPERV_BALLOON=m +# end of Microsoft Hyper-V guest support # # Xen driver support @@ -7797,6 +8072,8 @@ CONFIG_XEN_ACPI=y CONFIG_XEN_SYMS=y CONFIG_XEN_HAVE_VPMU=y CONFIG_XEN_FRONT_PGDIR_SHBUF=m +# end of Xen driver support + CONFIG_STAGING=y CONFIG_PRISM2_USB=m CONFIG_COMEDI=m @@ -7900,8 +8177,6 @@ CONFIG_RTL8723BS=m CONFIG_R8712U=m CONFIG_R8188EU=m CONFIG_88EU_AP_MODE=y -CONFIG_R8822BE=m -CONFIG_RTLWIFI_DEBUG_ST=y CONFIG_RTS5208=m CONFIG_VT6655=m CONFIG_VT6656=m @@ -7915,14 +8190,15 @@ CONFIG_VT6656=m # CONFIG_ADIS16203=m CONFIG_ADIS16240=m +# end of Accelerometers # # Analog to digital converters # -CONFIG_AD7780=m CONFIG_AD7816=m CONFIG_AD7192=m CONFIG_AD7280=m +# end of Analog to digital converters # # Analog digital bi-direction converters @@ -7930,23 +8206,27 @@ CONFIG_AD7280=m CONFIG_ADT7316=m CONFIG_ADT7316_SPI=m CONFIG_ADT7316_I2C=m +# end of Analog digital bi-direction converters # # Capacitance to digital converters # CONFIG_AD7150=m CONFIG_AD7746=m +# end of Capacitance to digital converters # # Direct Digital Synthesis # CONFIG_AD9832=m CONFIG_AD9834=m +# end of Direct Digital Synthesis # # Network Analyzer, Impedance Converters # CONFIG_AD5933=m +# end of Network Analyzer, Impedance Converters # # Active energy metering IC @@ -7954,12 +8234,16 @@ CONFIG_AD5933=m CONFIG_ADE7854=m CONFIG_ADE7854_I2C=m CONFIG_ADE7854_SPI=m +# end of Active energy metering IC # # Resolver to digital converters # CONFIG_AD2S1210=m -CONFIG_FB_SM750=m +# end of Resolver to digital converters +# end of IIO staging drivers + +# CONFIG_FB_SM750 is not set # # Speakup console speech @@ -7976,16 +8260,10 @@ CONFIG_SPEAKUP_SYNTH_SOFT=m CONFIG_SPEAKUP_SYNTH_SPKOUT=m CONFIG_SPEAKUP_SYNTH_TXPRT=m CONFIG_SPEAKUP_SYNTH_DUMMY=m +# end of Speakup console speech + CONFIG_STAGING_MEDIA=y CONFIG_I2C_BCM2048=m -CONFIG_VIDEO_ZORAN=m -CONFIG_VIDEO_ZORAN_DC30=m -CONFIG_VIDEO_ZORAN_ZR36060=m -CONFIG_VIDEO_ZORAN_BUZ=m -CONFIG_VIDEO_ZORAN_DC10=m -CONFIG_VIDEO_ZORAN_LML33=m -CONFIG_VIDEO_ZORAN_LML33R10=m -CONFIG_VIDEO_ZORAN_AVS6EYES=m CONFIG_VIDEO_IPU3_IMGU=m # @@ -7995,6 +8273,8 @@ CONFIG_VIDEO_IPU3_IMGU=m # # Android # +# end of Android + CONFIG_STAGING_BOARD=y CONFIG_LTE_GDM724X=m CONFIG_FIREWIRE_SERIAL=m @@ -8002,6 +8282,9 @@ CONFIG_FWTTY_MAX_TOTAL_PORTS=64 CONFIG_FWTTY_MAX_CARD_PORTS=32 CONFIG_GS_FPGABOOT=m CONFIG_UNISYSSPAR=y +CONFIG_UNISYS_VISORNIC=m +CONFIG_UNISYS_VISORINPUT=m +CONFIG_UNISYS_VISORHBA=m CONFIG_COMMON_CLK_XLNX_CLKWZRD=m # CONFIG_FB_TFT is not set CONFIG_WILC1000=m @@ -8018,7 +8301,6 @@ CONFIG_MOST_I2C=m CONFIG_MOST_USB=m CONFIG_KS7010=m # CONFIG_GREYBUS is not set -CONFIG_DRM_VBOXVIDEO=m CONFIG_PI433=m # @@ -8026,6 +8308,8 @@ CONFIG_PI433=m # CONFIG_STAGING_GASKET_FRAMEWORK=m CONFIG_STAGING_APEX_DRIVER=m +# end of Gasket devices + CONFIG_XIL_AXIS_FIFO=m CONFIG_EROFS_FS=m # CONFIG_EROFS_FS_DEBUG is not set @@ -8040,6 +8324,15 @@ CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT=2 # CONFIG_EROFS_FS_ZIP_NO_CACHE is not set # CONFIG_EROFS_FS_ZIP_CACHE_UNIPOLAR is not set CONFIG_EROFS_FS_ZIP_CACHE_BIPOLAR=y +CONFIG_FIELDBUS_DEV=m +CONFIG_HMS_ANYBUSS_BUS=m +CONFIG_ARCX_ANYBUS_CONTROLLER=m +CONFIG_HMS_PROFINET=m +CONFIG_KPC2000=y +CONFIG_KPC2000_CORE=m +CONFIG_KPC2000_SPI=m +CONFIG_KPC2000_I2C=m +CONFIG_KPC2000_DMA=m CONFIG_X86_PLATFORM_DEVICES=y CONFIG_ACER_WMI=m CONFIG_ACER_WIRELESS=m @@ -8097,7 +8390,6 @@ CONFIG_ACPI_TOSHIBA=m CONFIG_TOSHIBA_BT_RFKILL=m CONFIG_TOSHIBA_HAPS=m CONFIG_TOSHIBA_WMI=m -CONFIG_ACPI_CMPC=m CONFIG_INTEL_CHT_INT33FE=m CONFIG_INTEL_INT0002_VGPIO=m CONFIG_INTEL_HID_EVENT=m @@ -8108,7 +8400,6 @@ CONFIG_IBM_RTL=m CONFIG_SAMSUNG_LAPTOP=m CONFIG_MXM_WMI=m CONFIG_INTEL_OAKTRAIL=m -CONFIG_SAMSUNG_Q10=m CONFIG_APPLE_GMUX=m CONFIG_INTEL_RST=m CONFIG_INTEL_SMARTCONNECT=m @@ -8132,6 +8423,7 @@ CONFIG_CHROMEOS_LAPTOP=m CONFIG_CHROMEOS_PSTORE=m CONFIG_CHROMEOS_TBMC=m CONFIG_CROS_EC_I2C=m +CONFIG_CROS_EC_RPMSG=m CONFIG_CROS_EC_SPI=m CONFIG_CROS_EC_LPC=m CONFIG_CROS_EC_LPC_MEC=y @@ -8141,6 +8433,7 @@ CONFIG_CROS_EC_LIGHTBAR=m CONFIG_CROS_EC_VBC=m CONFIG_CROS_EC_DEBUGFS=m CONFIG_CROS_EC_SYSFS=m +CONFIG_CROS_USBPD_LOGGER=m CONFIG_WILCO_EC=m CONFIG_WILCO_EC_DEBUGFS=m CONFIG_MELLANOX_PLATFORM=y @@ -8167,11 +8460,14 @@ CONFIG_COMMON_CLK_CDCE925=m CONFIG_COMMON_CLK_CS2000_CP=m CONFIG_COMMON_CLK_S2MPS11=m CONFIG_CLK_TWL6040=m +CONFIG_COMMON_CLK_LOCHNAGAR=m CONFIG_COMMON_CLK_PALMAS=m CONFIG_COMMON_CLK_PWM=m CONFIG_COMMON_CLK_VC5=m CONFIG_COMMON_CLK_BD718XX=m CONFIG_COMMON_CLK_FIXED_MMIO=y +# end of Common Clock Framework + CONFIG_HWSPINLOCK=y # @@ -8180,6 +8476,8 @@ CONFIG_HWSPINLOCK=y CONFIG_CLKEVT_I8253=y CONFIG_I8253_LOCK=y CONFIG_CLKBLD_I8253=y +# end of Clock Source drivers + CONFIG_MAILBOX=y CONFIG_PLATFORM_MHU=m CONFIG_PCC=y @@ -8192,6 +8490,8 @@ CONFIG_IOMMU_SUPPORT=y # # Generic IOMMU Pagetable Support # +# end of Generic IOMMU Pagetable Support + # CONFIG_IOMMU_DEBUGFS is not set # CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set CONFIG_OF_IOMMU=y @@ -8209,6 +8509,7 @@ CONFIG_HYPERV_IOMMU=y # Remoteproc drivers # CONFIG_REMOTEPROC=m +# end of Remoteproc drivers # # Rpmsg drivers @@ -8218,6 +8519,8 @@ CONFIG_RPMSG_CHAR=m CONFIG_RPMSG_QCOM_GLINK_NATIVE=m CONFIG_RPMSG_QCOM_GLINK_RPM=m CONFIG_RPMSG_VIRTIO=m +# end of Rpmsg drivers + CONFIG_SOUNDWIRE=y # @@ -8234,28 +8537,49 @@ CONFIG_SOUNDWIRE_INTEL=m # # Amlogic SoC drivers # +# end of Amlogic SoC drivers + +# +# Aspeed SoC drivers +# +# end of Aspeed SoC drivers # # Broadcom SoC drivers # +# end of Broadcom SoC drivers # # NXP/Freescale QorIQ SoC drivers # +# end of NXP/Freescale QorIQ SoC drivers # # i.MX SoC drivers # +# end of i.MX SoC drivers + +# +# IXP4xx SoC drivers +# +CONFIG_IXP4XX_QMGR=m +CONFIG_IXP4XX_NPE=m +# end of IXP4xx SoC drivers # # Qualcomm SoC drivers # +# end of Qualcomm SoC drivers + CONFIG_SOC_TI=y # # Xilinx SoC drivers # CONFIG_XILINX_VCU=m +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + CONFIG_PM_DEVFREQ=y # @@ -8348,6 +8672,7 @@ CONFIG_MXC6255=m CONFIG_SCA3000=m CONFIG_STK8312=m CONFIG_STK8BA50=m +# end of Accelerometers # # Analog to digital converters @@ -8363,6 +8688,7 @@ CONFIG_AD7606_IFACE_PARALLEL=m CONFIG_AD7606_IFACE_SPI=m CONFIG_AD7766=m CONFIG_AD7768_1=m +CONFIG_AD7780=m CONFIG_AD7791=m CONFIG_AD7793=m CONFIG_AD7887=m @@ -8409,6 +8735,7 @@ CONFIG_TI_ADC128S052=m CONFIG_TI_ADC161S626=m CONFIG_TI_ADS1015=m CONFIG_TI_ADS7950=m +CONFIG_TI_ADS8344=m CONFIG_TI_ADS8688=m CONFIG_TI_ADS124S08=m CONFIG_TI_AM335X_ADC=m @@ -8417,16 +8744,19 @@ CONFIG_TWL4030_MADC=m CONFIG_TWL6030_GPADC=m CONFIG_VF610_ADC=m CONFIG_VIPERBOARD_ADC=m +# end of Analog to digital converters # # Analog Front Ends # CONFIG_IIO_RESCALE=m +# end of Analog Front Ends # # Amplifiers # CONFIG_AD8366=m +# end of Amplifiers # # Chemical Sensors @@ -8441,6 +8771,8 @@ CONFIG_PMS7003=m CONFIG_SENSIRION_SGP30=m CONFIG_SPS30=m CONFIG_VZ89X=m +# end of Chemical Sensors + CONFIG_IIO_CROS_EC_SENSORS_CORE=m CONFIG_IIO_CROS_EC_SENSORS=m @@ -8449,6 +8781,8 @@ CONFIG_IIO_CROS_EC_SENSORS=m # CONFIG_HID_SENSOR_IIO_COMMON=m CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + CONFIG_IIO_MS_SENSORS_I2C=m # @@ -8456,15 +8790,13 @@ CONFIG_IIO_MS_SENSORS_I2C=m # CONFIG_IIO_SSP_SENSORS_COMMONS=m CONFIG_IIO_SSP_SENSORHUB=m +# end of SSP Sensor Common + CONFIG_IIO_ST_SENSORS_I2C=m CONFIG_IIO_ST_SENSORS_SPI=m CONFIG_IIO_ST_SENSORS_CORE=m # -# Counters -# - -# # Digital to analog converters # CONFIG_AD5064=m @@ -8502,11 +8834,13 @@ CONFIG_TI_DAC5571=m CONFIG_TI_DAC7311=m CONFIG_TI_DAC7612=m CONFIG_VF610_DAC=m +# end of Digital to analog converters # # IIO dummy driver # # CONFIG_IIO_SIMPLE_DUMMY is not set +# end of IIO dummy driver # # Frequency Synthesizers DDS/PLL @@ -8516,11 +8850,14 @@ CONFIG_VF610_DAC=m # Clock Generator/Distribution # CONFIG_AD9523=m +# end of Clock Generator/Distribution # # Phase-Locked Loop (PLL) frequency synthesizers # CONFIG_ADF4350=m +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL # # Digital gyroscope sensors @@ -8533,6 +8870,9 @@ CONFIG_ADXRS450=m CONFIG_BMG160=m CONFIG_BMG160_I2C=m CONFIG_BMG160_SPI=m +CONFIG_FXAS21002C=m +CONFIG_FXAS21002C_I2C=m +CONFIG_FXAS21002C_SPI=m CONFIG_HID_SENSOR_GYRO_3D=m CONFIG_MPU3050=m CONFIG_MPU3050_I2C=m @@ -8540,6 +8880,7 @@ CONFIG_IIO_ST_GYRO_3AXIS=m CONFIG_IIO_ST_GYRO_I2C_3AXIS=m CONFIG_IIO_ST_GYRO_SPI_3AXIS=m CONFIG_ITG3200=m +# end of Digital gyroscope sensors # # Health Sensors @@ -8552,6 +8893,8 @@ CONFIG_AFE4403=m CONFIG_AFE4404=m CONFIG_MAX30100=m CONFIG_MAX30102=m +# end of Heart Rate Monitors +# end of Health Sensors # # Humidity sensors @@ -8566,6 +8909,7 @@ CONFIG_HTS221_SPI=m CONFIG_HTU21=m CONFIG_SI7005=m CONFIG_SI7020=m +# end of Humidity sensors # # Inertial measurement units @@ -8582,6 +8926,8 @@ CONFIG_INV_MPU6050_SPI=m CONFIG_IIO_ST_LSM6DSX=m CONFIG_IIO_ST_LSM6DSX_I2C=m CONFIG_IIO_ST_LSM6DSX_SPI=m +# end of Inertial measurement units + CONFIG_IIO_ADIS_LIB=m CONFIG_IIO_ADIS_LIB_BUFFER=y @@ -8634,6 +8980,7 @@ CONFIG_VCNL4035=m CONFIG_VEML6070=m CONFIG_VL6180=m CONFIG_ZOPT2201=m +# end of Light sensors # # Magnetometer sensors @@ -8656,17 +9003,20 @@ CONFIG_SENSORS_HMC5843_SPI=m CONFIG_SENSORS_RM3100=m CONFIG_SENSORS_RM3100_I2C=m CONFIG_SENSORS_RM3100_SPI=m +# end of Magnetometer sensors # # Multiplexers # CONFIG_IIO_MUX=m +# end of Multiplexers # # Inclinometer sensors # CONFIG_HID_SENSOR_INCLINOMETER_3D=m CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors # # Triggers - standalone @@ -8675,6 +9025,7 @@ CONFIG_IIO_HRTIMER_TRIGGER=m CONFIG_IIO_INTERRUPT_TRIGGER=m CONFIG_IIO_TIGHTLOOP_TRIGGER=m CONFIG_IIO_SYSFS_TRIGGER=m +# end of Triggers - standalone # # Digital potentiometers @@ -8688,11 +9039,13 @@ CONFIG_MCP4131=m CONFIG_MCP4531=m CONFIG_MCP41010=m CONFIG_TPL0102=m +# end of Digital potentiometers # # Digital potentiostats # CONFIG_LMP91000=m +# end of Digital potentiostats # # Pressure sensors @@ -8720,28 +9073,33 @@ CONFIG_HP206C=m CONFIG_ZPA2326=m CONFIG_ZPA2326_I2C=m CONFIG_ZPA2326_SPI=m +# end of Pressure sensors # # Lightning sensors # CONFIG_AS3935=m +# end of Lightning sensors # # Proximity and distance sensors # CONFIG_ISL29501=m CONFIG_LIDAR_LITE_V2=m +CONFIG_MB1232=m CONFIG_RFD77402=m CONFIG_SRF04=m CONFIG_SX9500=m CONFIG_SRF08=m CONFIG_VL53L0X_I2C=m +# end of Proximity and distance sensors # # Resolver to digital converters # CONFIG_AD2S90=m CONFIG_AD2S1200=m +# end of Resolver to digital converters # # Temperature sensors @@ -8754,6 +9112,9 @@ CONFIG_TMP006=m CONFIG_TMP007=m CONFIG_TSYS01=m CONFIG_TSYS02D=m +CONFIG_MAX31856=m +# end of Temperature sensors + CONFIG_NTB=m CONFIG_NTB_AMD=m CONFIG_NTB_IDT=m @@ -8802,6 +9163,8 @@ CONFIG_PWM_TWL_LED=m CONFIG_IRQCHIP=y CONFIG_ARM_GIC_MAX_NR=1 CONFIG_MADERA_IRQ=m +# end of IRQ chip support + CONFIG_IPACK_BUS=m CONFIG_BOARD_TPCI200=m CONFIG_SERIAL_IPOCTAL=m @@ -8832,6 +9195,8 @@ CONFIG_PHY_QCOM_USB_HS=m CONFIG_PHY_QCOM_USB_HSIC=m CONFIG_PHY_SAMSUNG_USB2=m CONFIG_PHY_TUSB1210=m +# end of PHY Subsystem + CONFIG_POWERCAP=y CONFIG_INTEL_RAPL=m CONFIG_IDLE_INJECT=y @@ -8842,6 +9207,8 @@ CONFIG_MCB_LPC=m # # Performance monitor support # +# end of Performance monitor support + CONFIG_RAS=y CONFIG_RAS_CEC=y CONFIG_THUNDERBOLT=m @@ -8850,6 +9217,8 @@ CONFIG_THUNDERBOLT=m # Android # # CONFIG_ANDROID is not set +# end of Android + CONFIG_LIBNVDIMM=y CONFIG_BLK_DEV_PMEM=m CONFIG_ND_BLK=m @@ -8867,6 +9236,7 @@ CONFIG_DEV_DAX_PMEM=m CONFIG_DEV_DAX_KMEM=m CONFIG_DEV_DAX_PMEM_COMPAT=m CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y CONFIG_RAVE_SP_EEPROM=m # @@ -8887,6 +9257,8 @@ CONFIG_INTEL_TH_STH=m CONFIG_INTEL_TH_MSU=m CONFIG_INTEL_TH_PTI=m # CONFIG_INTEL_TH_DEBUG is not set +# end of HW tracing support + CONFIG_FPGA=m CONFIG_ALTERA_PR_IP_CORE=m CONFIG_ALTERA_PR_IP_CORE_PLAT=m @@ -8923,13 +9295,18 @@ CONFIG_MUX_ADG792A=m CONFIG_MUX_ADGS1408=m CONFIG_MUX_GPIO=m CONFIG_MUX_MMIO=m +# end of Multiplexer drivers + CONFIG_PM_OPP=y -# CONFIG_UNISYS_VISORBUS is not set +CONFIG_UNISYS_VISORBUS=m CONFIG_SIOX=m CONFIG_SIOX_BUS_GPIO=m CONFIG_SLIMBUS=m CONFIG_SLIM_QCOM_CTRL=m CONFIG_INTERCONNECT=m +CONFIG_COUNTER=m +CONFIG_FTM_QUADDEC=m +# end of Device Drivers # # File systems @@ -9034,6 +9411,7 @@ CONFIG_FSCACHE_HISTOGRAM=y CONFIG_CACHEFILES=m # CONFIG_CACHEFILES_DEBUG is not set # CONFIG_CACHEFILES_HISTOGRAM is not set +# end of Caches # # CD-ROM/DVD Filesystems @@ -9042,6 +9420,7 @@ CONFIG_ISO9660_FS=m CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems # # DOS/FAT/NT Filesystems @@ -9055,6 +9434,7 @@ CONFIG_FAT_DEFAULT_UTF8=y CONFIG_NTFS_FS=m # CONFIG_NTFS_DEBUG is not set CONFIG_NTFS_RW=y +# end of DOS/FAT/NT Filesystems # # Pseudo filesystems @@ -9077,6 +9457,8 @@ CONFIG_MEMFD_CREATE=y CONFIG_ARCH_HAS_GIGANTIC_PAGE=y CONFIG_CONFIGFS_FS=y CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + CONFIG_MISC_FILESYSTEMS=y CONFIG_ORANGEFS_FS=m # CONFIG_ADFS_FS is not set @@ -9281,6 +9663,9 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_NLS_UTF8=m CONFIG_DLM=m # CONFIG_DLM_DEBUG is not set +CONFIG_UNICODE=y +# CONFIG_UNICODE_NORMALIZATION_SELFTEST is not set +# end of File systems # # Security options @@ -9324,6 +9709,7 @@ CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024 # CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER is not set CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/sbin/tomoyo-init" CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/sbin/init" +# CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING is not set CONFIG_SECURITY_APPARMOR=y CONFIG_SECURITY_APPARMOR_HASH=y CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y @@ -9338,6 +9724,28 @@ CONFIG_SECURITY_SAFESETID=y # CONFIG_DEFAULT_SECURITY_APPARMOR is not set CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_LSM="yama" + +# +# Kernel hardening options +# +CONFIG_GCC_PLUGIN_STRUCTLEAK=y + +# +# Memory initialization +# +# CONFIG_INIT_STACK_NONE is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_USER is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF is not set +CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y +# CONFIG_GCC_PLUGIN_STRUCTLEAK_VERBOSE is not set +CONFIG_GCC_PLUGIN_STACKLEAK=y +CONFIG_STACKLEAK_TRACK_MIN_SIZE=100 +CONFIG_STACKLEAK_METRICS=y +CONFIG_STACKLEAK_RUNTIME_DISABLE=y +# end of Memory initialization +# end of Kernel hardening options +# end of Security options + CONFIG_XOR_BLOCKS=m CONFIG_ASYNC_CORE=m CONFIG_ASYNC_MEMCPY=m @@ -9365,9 +9773,6 @@ CONFIG_CRYPTO_AKCIPHER=y CONFIG_CRYPTO_KPP2=y CONFIG_CRYPTO_KPP=y CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_RSA=y -CONFIG_CRYPTO_DH=y -CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_MANAGER2=y CONFIG_CRYPTO_USER=m @@ -9385,6 +9790,15 @@ CONFIG_CRYPTO_GLUE_HELPER_X86=m CONFIG_CRYPTO_ENGINE=m # +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +CONFIG_CRYPTO_ECRDSA=m + +# # Authenticated Encryption with Associated Data # CONFIG_CRYPTO_CCM=m @@ -9575,6 +9989,8 @@ CONFIG_SYSTEM_TRUSTED_KEYS="" CONFIG_SECONDARY_TRUSTED_KEYRING=y CONFIG_SYSTEM_BLACKLIST_KEYRING=y CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# end of Certificates for signature checking + CONFIG_BINARY_PRINTF=y # @@ -9582,12 +9998,14 @@ CONFIG_BINARY_PRINTF=y # CONFIG_RAID6_PQ=m CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_PACKING=y CONFIG_BITREVERSE=y -CONFIG_RATIONAL=y CONFIG_GENERIC_STRNCPY_FROM_USER=y CONFIG_GENERIC_STRNLEN_USER=y CONFIG_GENERIC_NET_UTILS=y CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_CORDIC=m +CONFIG_RATIONAL=y CONFIG_GENERIC_PCI_IOMAP=y CONFIG_GENERIC_IOMAP=y CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y @@ -9641,7 +10059,6 @@ CONFIG_REED_SOLOMON_ENC8=y CONFIG_REED_SOLOMON_DEC8=y CONFIG_REED_SOLOMON_DEC16=y CONFIG_BCH=m -CONFIG_BCH_CONST_PARAMS=y CONFIG_TEXTSEARCH=y CONFIG_TEXTSEARCH_KMP=m CONFIG_TEXTSEARCH_BM=m @@ -9670,7 +10087,6 @@ CONFIG_GLOB=y CONFIG_NLATTR=y CONFIG_LRU_CACHE=m CONFIG_CLZ_TAB=y -CONFIG_CORDIC=m CONFIG_DDR=y CONFIG_IRQ_POLL=y CONFIG_MPILIB=y @@ -9696,9 +10112,12 @@ CONFIG_SG_POOL=y CONFIG_ARCH_HAS_PMEM_API=y CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y CONFIG_ARCH_HAS_UACCESS_MCSAFE=y +CONFIG_ARCH_STACKWALK=y CONFIG_SBITMAP=y CONFIG_PARMAN=m # CONFIG_STRING_SELFTEST is not set +# end of Library routines + CONFIG_OBJAGG=m # @@ -9715,6 +10134,7 @@ CONFIG_CONSOLE_LOGLEVEL_QUIET=1 CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 # CONFIG_BOOT_PRINTK_DELAY is not set CONFIG_DYNAMIC_DEBUG=y +# end of printk and dmesg options # # Compile-time checks and compiler options @@ -9727,14 +10147,18 @@ CONFIG_STRIP_ASM_SYMS=y CONFIG_UNUSED_SYMBOLS=y CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set +# CONFIG_OPTIMIZE_INLINING is not set # CONFIG_DEBUG_SECTION_MISMATCH is not set CONFIG_SECTION_MISMATCH_WARN_ONLY=y CONFIG_STACK_VALIDATION=y # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x0 CONFIG_MAGIC_SYSRQ_SERIAL=y CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y # # Memory Debugging @@ -9756,12 +10180,12 @@ CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y # CONFIG_DEBUG_VIRTUAL is not set CONFIG_DEBUG_MEMORY_INIT=y # CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_DEBUG_STACKOVERFLOW=y -# CONFIG_DEBUG_STACKOVERFLOW is not set CONFIG_HAVE_ARCH_KASAN=y CONFIG_CC_HAS_KASAN_GENERIC=y # CONFIG_KASAN is not set CONFIG_KASAN_STACK=1 +# end of Memory Debugging + CONFIG_ARCH_HAS_KCOV=y CONFIG_CC_HAS_SANCOV_TRACE_PC=y # CONFIG_KCOV is not set @@ -9784,6 +10208,8 @@ CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 # CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 # CONFIG_WQ_WATCHDOG is not set +# end of Debug Lockups and Hangs + # CONFIG_PANIC_ON_OOPS is not set CONFIG_PANIC_ON_OOPS_VALUE=0 CONFIG_PANIC_TIMEOUT=0 @@ -9810,12 +10236,14 @@ CONFIG_LOCK_DEBUGGING_SUPPORT=y # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_LOCK_TORTURE_TEST is not set # CONFIG_WW_MUTEX_SELFTEST is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + CONFIG_STACKTRACE=y # CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_LIST is not set -# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_PLIST is not set # CONFIG_DEBUG_SG is not set # CONFIG_DEBUG_NOTIFIERS is not set # CONFIG_DEBUG_CREDENTIALS is not set @@ -9828,6 +10256,8 @@ CONFIG_DEBUG_BUGVERBOSE=y CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_RCU_TRACE is not set # CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + # CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set # CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set @@ -9890,7 +10320,6 @@ CONFIG_HIST_TRIGGERS=y # CONFIG_RING_BUFFER_STARTUP_TEST is not set # CONFIG_PREEMPTIRQ_DELAY_TEST is not set # CONFIG_TRACE_EVAL_MAP_FILE is not set -CONFIG_TRACING_EVENTS_GPIO=y # CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set CONFIG_RUNTIME_TESTING_MENU=y CONFIG_LKDTM=m @@ -9905,6 +10334,7 @@ CONFIG_LKDTM=m # CONFIG_ASYNC_RAID6_TEST is not set # CONFIG_TEST_HEXDUMP is not set # CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_STRSCPY is not set # CONFIG_TEST_KSTRTOX is not set # CONFIG_TEST_PRINTF is not set # CONFIG_TEST_BITMAP is not set @@ -9965,7 +10395,6 @@ CONFIG_IO_DELAY_0X80=y CONFIG_DEFAULT_IO_DELAY_TYPE=0 CONFIG_DEBUG_BOOT_PARAMS=y # CONFIG_CPA_DEBUG is not set -# CONFIG_OPTIMIZE_INLINING is not set # CONFIG_DEBUG_ENTRY is not set # CONFIG_DEBUG_NMI_SELFTEST is not set # CONFIG_X86_DEBUG_FPU is not set @@ -9973,3 +10402,4 @@ CONFIG_DEBUG_BOOT_PARAMS=y CONFIG_UNWINDER_ORC=y # CONFIG_UNWINDER_FRAME_POINTER is not set # CONFIG_UNWINDER_GUESS is not set +# end of Kernel hacking |