summarylogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.SRCINFO12
-rw-r--r--0009-prjc_v5.13-r1.patch (renamed from 0009-prjc_v5.12-r1.patch)1406
-rw-r--r--PKGBUILD10
-rw-r--r--config216
4 files changed, 730 insertions, 914 deletions
diff --git a/.SRCINFO b/.SRCINFO
index a1938f2f942b..2c5f22297ca0 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,8 +1,8 @@
pkgbase = linux-pds
pkgdesc = Linux
- pkgver = 5.12.15.arch1
+ pkgver = 5.13.1.arch1
pkgrel = 1
- url = https://github.com/archlinux/linux/commits/v5.12.15-arch1
+ url = https://github.com/archlinux/linux/commits/v5.13.1-arch1
arch = x86_64
license = GPL2
makedepends = bc
@@ -20,18 +20,18 @@ pkgbase = linux-pds
makedepends = imagemagick
makedepends = git
options = !strip
- source = linux-archlinux::git+https://github.com/archlinux/linux.git?signed#tag=v5.12.15-arch1
+ source = linux-archlinux::git+https://github.com/archlinux/linux.git?signed#tag=v5.13.1-arch1
source = git+https://github.com/graysky2/kernel_compiler_patch.git
source = config
- source = 0009-prjc_v5.12-r1.patch
+ source = 0009-prjc_v5.13-r1.patch
source = 0005-glitched-pds.patch
validpgpkeys = ABAF11C65A2970B130ABE3C479BE3E4300411886
validpgpkeys = 647F28654894E3BD457199BE38DBBDC86092693E
validpgpkeys = A2FF3A36AAA56654109064AB19802F8B0D70FC30
sha512sums = SKIP
sha512sums = SKIP
- sha512sums = 3753e46e43c574d921418f738f7359d792629f02711c449ec485951419252bc1c27d4594a4dbca674e16e4b31763b65e3b595b4424525a9270b4f8d6dda6cbd0
- sha512sums = be1c86baa2dd5f10314817100d908763ef23d6e1bcf9869a79ecd3250fefe0f3c662d72a3b9237e3e965c72042c791570957c12257d3031ba8a439cb1b22561f
+ sha512sums = 12323ce737071f6ebc37a5c6d6cca90ae690803b58685d6091d5f2de6781d51a1f2ae1d84443e8bb18130484ef1182ceb5b982b3e7842d0c097e76723ecc7ed9
+ sha512sums = ad9276a80e28eec461a307ad44a1ed5acebf810b14ce8c9e6f1dc211be6ed7e72f535175fb65f3115fa217f8b635122c65c2c002ff00ba458c867d8bb6257f36
sha512sums = 889f0a49f326de3f119290256393b09a9e9241c2a297ca0b7967a2884e4e35d71388d2a559e4c206f55f67228b65e8f2013a1ec61f6ff8f1de3b6a725fd5fa57
pkgname = linux-pds
diff --git a/0009-prjc_v5.12-r1.patch b/0009-prjc_v5.13-r1.patch
index 693bfb761388..82d7f5a36fdb 100644
--- a/0009-prjc_v5.12-r1.patch
+++ b/0009-prjc_v5.13-r1.patch
@@ -1,8 +1,8 @@
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index 04545725f187..d560166b0cf1 100644
+index cb89dbdedc46..37192ffbd3f8 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -4725,6 +4725,12 @@
+@@ -4878,6 +4878,12 @@
sbni= [NET] Granch SBNI12 leased line adapter
@@ -12,14 +12,14 @@ index 04545725f187..d560166b0cf1 100644
+ Default: 4000
+ See Documentation/scheduler/sched-BMQ.txt
+
- sched_debug [KNL] Enables verbose scheduler debug messages.
+ sched_verbose [KNL] Enables verbose scheduler debug messages.
schedstats= [KNL,X86] Enable or disable scheduled statistics.
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
-index 1d56a6b73a4e..e08ffb857277 100644
+index 68b21395a743..0c14a4544fd6 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
-@@ -1515,3 +1515,13 @@ is 10 seconds.
+@@ -1527,3 +1527,13 @@ is 10 seconds.
The softlockup threshold is (``2 * watchdog_thresh``). Setting this
tunable to zero will disable lockup detection altogether.
@@ -150,7 +150,7 @@ index 000000000000..05c84eec0f31
+priority boost from unblocking while background threads that do most of the
+processing receive the priority penalty for using their entire timeslice.
diff --git a/fs/proc/base.c b/fs/proc/base.c
-index 3851bfcdba56..732636ac3fd3 100644
+index 9cbd915025ad..f4f05b4cb2af 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -476,7 +476,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
@@ -176,18 +176,10 @@ index 8874f681b056..59eb72bf7d5f 100644
[RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index ef00bb22164c..2290806d8af2 100644
+index 32813c345115..35f7cfe6539a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -35,6 +35,7 @@
- #include <linux/rseq.h>
- #include <linux/seqlock.h>
- #include <linux/kcsan.h>
-+#include <linux/skip_list.h>
- #include <asm/kmap_size.h>
-
- /* task_struct member predeclarations (sorted alphabetically): */
-@@ -670,12 +671,18 @@ struct task_struct {
+@@ -678,12 +678,18 @@ struct task_struct {
unsigned int ptrace;
#ifdef CONFIG_SMP
@@ -207,7 +199,7 @@ index ef00bb22164c..2290806d8af2 100644
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
-@@ -689,6 +696,7 @@ struct task_struct {
+@@ -697,6 +703,7 @@ struct task_struct {
*/
int recent_used_cpu;
int wake_cpu;
@@ -215,25 +207,20 @@ index ef00bb22164c..2290806d8af2 100644
#endif
int on_rq;
-@@ -697,13 +705,33 @@ struct task_struct {
+@@ -705,13 +712,28 @@ struct task_struct {
int normal_prio;
unsigned int rt_priority;
+#ifdef CONFIG_SCHED_ALT
+ u64 last_ran;
+ s64 time_slice;
++ int sq_idx;
++ struct list_head sq_node;
+#ifdef CONFIG_SCHED_BMQ
+ int boost_prio;
-+ int bmq_idx;
-+ struct list_head bmq_node;
+#endif /* CONFIG_SCHED_BMQ */
+#ifdef CONFIG_SCHED_PDS
+ u64 deadline;
-+ u64 priodl;
-+ /* skip list level */
-+ int sl_level;
-+ /* skip list node */
-+ struct skiplist_node sl_node;
+#endif /* CONFIG_SCHED_PDS */
+ /* sched_clock time spent running */
+ u64 sched_time;
@@ -250,7 +237,7 @@ index ef00bb22164c..2290806d8af2 100644
#ifdef CONFIG_UCLAMP_TASK
/*
-@@ -1388,6 +1416,15 @@ struct task_struct {
+@@ -1407,6 +1429,15 @@ struct task_struct {
*/
};
@@ -267,7 +254,7 @@ index ef00bb22164c..2290806d8af2 100644
{
return task->thread_pid;
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
-index 1aff00b65f3c..179d77c8360e 100644
+index 1aff00b65f3c..216fdf2fe90c 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -1,5 +1,24 @@
@@ -285,7 +272,7 @@ index 1aff00b65f3c..179d77c8360e 100644
+#endif
+
+#ifdef CONFIG_SCHED_PDS
-+#define __tsk_deadline(p) ((p)->priodl)
++#define __tsk_deadline(p) ((((u64) ((p)->prio))<<56) | (p)->deadline)
+#endif
+
+#else
@@ -304,21 +291,39 @@ index 1aff00b65f3c..179d77c8360e 100644
static inline bool dl_time_before(u64 a, u64 b)
{
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
-index ab83d85e1183..4d4f92bffeea 100644
+index ab83d85e1183..6af9ae681116 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
-@@ -18,6 +18,14 @@
+@@ -18,6 +18,32 @@
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
++#ifdef CONFIG_SCHED_ALT
++
++/* Undefine MAX_PRIO and DEFAULT_PRIO */
++#undef MAX_PRIO
++#undef DEFAULT_PRIO
++
+/* +/- priority levels from the base priority */
+#ifdef CONFIG_SCHED_BMQ
-+#define MAX_PRIORITY_ADJ 7
++#define MAX_PRIORITY_ADJ (7)
++
++#define MIN_NORMAL_PRIO (MAX_RT_PRIO)
++#define MAX_PRIO (MIN_NORMAL_PRIO + NICE_WIDTH)
++#define DEFAULT_PRIO (MIN_NORMAL_PRIO + NICE_WIDTH / 2)
+#endif
++
+#ifdef CONFIG_SCHED_PDS
-+#define MAX_PRIORITY_ADJ 0
++#define MAX_PRIORITY_ADJ (0)
++
++#define MIN_NORMAL_PRIO (128)
++#define NORMAL_PRIO_NUM (64)
++#define MAX_PRIO (MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
++#define DEFAULT_PRIO (MAX_PRIO - NICE_WIDTH / 2)
+#endif
+
++#endif /* CONFIG_SCHED_ALT */
++
/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
@@ -337,192 +342,11 @@ index e5af028c08b4..0a7565d0d3cf 100644
return false;
}
-diff --git a/include/linux/skip_list.h b/include/linux/skip_list.h
-new file mode 100644
-index 000000000000..637c83ecbd6b
---- /dev/null
-+++ b/include/linux/skip_list.h
-@@ -0,0 +1,175 @@
-+/*
-+ * Copyright (C) 2016 Alfred Chen.
-+ *
-+ * Code based on Con Kolivas's skip list implementation for BFS, and
-+ * which is based on example originally by William Pugh.
-+ *
-+ * Skip Lists are a probabilistic alternative to balanced trees, as
-+ * described in the June 1990 issue of CACM and were invented by
-+ * William Pugh in 1987.
-+ *
-+ * A couple of comments about this implementation:
-+ *
-+ * This file only provides a infrastructure of skip list.
-+ *
-+ * skiplist_node is embedded into container data structure, to get rid
-+ * the dependency of kmalloc/kfree operation in scheduler code.
-+ *
-+ * A customized search function should be defined using DEFINE_SKIPLIST_INSERT
-+ * macro and be used for skip list insert operation.
-+ *
-+ * Random Level is also not defined in this file, instead, it should be
-+ * customized implemented and set to node->level then pass to the customized
-+ * skiplist_insert function.
-+ *
-+ * Levels start at zero and go up to (NUM_SKIPLIST_LEVEL -1)
-+ *
-+ * NUM_SKIPLIST_LEVEL in this implementation is 8 instead of origin 16,
-+ * considering that there will be 256 entries to enable the top level when using
-+ * random level p=0.5, and that number is more than enough for a run queue usage
-+ * in a scheduler usage. And it also help to reduce the memory usage of the
-+ * embedded skip list node in task_struct to about 50%.
-+ *
-+ * The insertion routine has been implemented so as to use the
-+ * dirty hack described in the CACM paper: if a random level is
-+ * generated that is more than the current maximum level, the
-+ * current maximum level plus one is used instead.
-+ *
-+ * BFS Notes: In this implementation of skiplists, there are bidirectional
-+ * next/prev pointers and the insert function returns a pointer to the actual
-+ * node the value is stored. The key here is chosen by the scheduler so as to
-+ * sort tasks according to the priority list requirements and is no longer used
-+ * by the scheduler after insertion. The scheduler lookup, however, occurs in
-+ * O(1) time because it is always the first item in the level 0 linked list.
-+ * Since the task struct stores a copy of the node pointer upon skiplist_insert,
-+ * it can also remove it much faster than the original implementation with the
-+ * aid of prev<->next pointer manipulation and no searching.
-+ */
-+#ifndef _LINUX_SKIP_LIST_H
-+#define _LINUX_SKIP_LIST_H
-+
-+#include <linux/kernel.h>
-+
-+#define NUM_SKIPLIST_LEVEL (4)
-+
-+struct skiplist_node {
-+ int level; /* Levels in this node */
-+ struct skiplist_node *next[NUM_SKIPLIST_LEVEL];
-+ struct skiplist_node *prev[NUM_SKIPLIST_LEVEL];
-+};
-+
-+#define SKIPLIST_NODE_INIT(name) { 0,\
-+ {&name, &name, &name, &name},\
-+ {&name, &name, &name, &name},\
-+ }
-+
-+/**
-+ * INIT_SKIPLIST_NODE -- init a skiplist_node, expecially for header
-+ * @node: the skip list node to be inited.
-+ */
-+static inline void INIT_SKIPLIST_NODE(struct skiplist_node *node)
-+{
-+ int i;
-+
-+ node->level = 0;
-+ for (i = 0; i < NUM_SKIPLIST_LEVEL; i++) {
-+ WRITE_ONCE(node->next[i], node);
-+ node->prev[i] = node;
-+ }
-+}
-+
-+/**
-+ * skiplist_entry - get the struct for this entry
-+ * @ptr: the &struct skiplist_node pointer.
-+ * @type: the type of the struct this is embedded in.
-+ * @member: the name of the skiplist_node within the struct.
-+ */
-+#define skiplist_entry(ptr, type, member) \
-+ container_of(ptr, type, member)
-+
-+/**
-+ * DEFINE_SKIPLIST_INSERT_FUNC -- macro to define a customized skip list insert
-+ * function, which takes two parameters, first one is the header node of the
-+ * skip list, second one is the skip list node to be inserted
-+ * @func_name: the customized skip list insert function name
-+ * @search_func: the search function to be used, which takes two parameters,
-+ * 1st one is the itrator of skiplist_node in the list, the 2nd is the skip list
-+ * node to be inserted, the function should return true if search should be
-+ * continued, otherwise return false.
-+ * Returns 1 if @node is inserted as the first item of skip list at level zero,
-+ * otherwise 0
-+ */
-+#define DEFINE_SKIPLIST_INSERT_FUNC(func_name, search_func)\
-+static inline int func_name(struct skiplist_node *head, struct skiplist_node *node)\
-+{\
-+ struct skiplist_node *p, *q;\
-+ unsigned int k = head->level;\
-+ unsigned int l = node->level;\
-+\
-+ p = head;\
-+ if (l > k) {\
-+ l = node->level = ++head->level;\
-+\
-+ node->next[l] = head;\
-+ node->prev[l] = head;\
-+ head->next[l] = node;\
-+ head->prev[l] = node;\
-+\
-+ do {\
-+ while (q = p->next[k], q != head && search_func(q, node))\
-+ p = q;\
-+\
-+ node->prev[k] = p;\
-+ node->next[k] = q;\
-+ q->prev[k] = node;\
-+ p->next[k] = node;\
-+ } while (k--);\
-+\
-+ return (p == head);\
-+ }\
-+\
-+ while (k > l) {\
-+ while (q = p->next[k], q != head && search_func(q, node))\
-+ p = q;\
-+ k--;\
-+ }\
-+\
-+ do {\
-+ while (q = p->next[k], q != head && search_func(q, node))\
-+ p = q;\
-+\
-+ node->prev[k] = p;\
-+ node->next[k] = q;\
-+ q->prev[k] = node;\
-+ p->next[k] = node;\
-+ } while (k--);\
-+\
-+ return (p == head);\
-+}
-+
-+/**
-+ * skiplist_del_init -- delete skip list node from a skip list and reset it's
-+ * init state
-+ * @head: the header node of the skip list to be deleted from.
-+ * @node: the skip list node to be deleted, the caller need to ensure @node is
-+ * in skip list which @head represent.
-+ * Returns 1 if @node is the first item of skip level at level zero, otherwise 0
-+ */
-+static inline int
-+skiplist_del_init(struct skiplist_node *head, struct skiplist_node *node)
-+{
-+ unsigned int i, level = node->level;
-+
-+ for (i = 0; i <= level; i++) {
-+ node->prev[i]->next[i] = node->next[i];
-+ node->next[i]->prev[i] = node->prev[i];
-+ }
-+ if (level == head->level && level) {
-+ while (head->next[level] == head && level)
-+ level--;
-+ head->level = level;
-+ }
-+
-+ return (node->prev[0] == head);
-+}
-+#endif /* _LINUX_SKIP_LIST_H */
diff --git a/init/Kconfig b/init/Kconfig
-index 5f5c776ef192..2529408ce0b5 100644
+index a61c92066c2e..7746c8d4610b 100644
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -779,9 +779,39 @@ config GENERIC_SCHED_CLOCK
+@@ -783,9 +783,39 @@ config GENERIC_SCHED_CLOCK
menu "Scheduler features"
@@ -562,7 +386,7 @@ index 5f5c776ef192..2529408ce0b5 100644
help
This feature enables the scheduler to track the clamped utilization
of each CPU based on RUNNABLE tasks scheduled on that CPU.
-@@ -867,6 +897,7 @@ config NUMA_BALANCING
+@@ -871,6 +901,7 @@ config NUMA_BALANCING
depends on ARCH_SUPPORTS_NUMA_BALANCING
depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
depends on SMP && NUMA && MIGRATION
@@ -570,7 +394,7 @@ index 5f5c776ef192..2529408ce0b5 100644
help
This option adds support for automatic NUMA aware memory/task placement.
The mechanism is quite primitive and is based on migrating memory when
-@@ -959,6 +990,7 @@ config FAIR_GROUP_SCHED
+@@ -963,6 +994,7 @@ config FAIR_GROUP_SCHED
depends on CGROUP_SCHED
default CGROUP_SCHED
@@ -578,7 +402,7 @@ index 5f5c776ef192..2529408ce0b5 100644
config CFS_BANDWIDTH
bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
depends on FAIR_GROUP_SCHED
-@@ -981,6 +1013,7 @@ config RT_GROUP_SCHED
+@@ -985,6 +1017,7 @@ config RT_GROUP_SCHED
realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.rst for more information.
@@ -586,7 +410,7 @@ index 5f5c776ef192..2529408ce0b5 100644
endif #CGROUP_SCHED
config UCLAMP_TASK_GROUP
-@@ -1210,6 +1243,7 @@ config CHECKPOINT_RESTORE
+@@ -1228,6 +1261,7 @@ config CHECKPOINT_RESTORE
config SCHED_AUTOGROUP
bool "Automatic process group scheduling"
@@ -595,22 +419,17 @@ index 5f5c776ef192..2529408ce0b5 100644
select CGROUP_SCHED
select FAIR_GROUP_SCHED
diff --git a/init/init_task.c b/init/init_task.c
-index 3711cdaafed2..47f57d2cc488 100644
+index 8b08c2e19cbb..0dfa1a63dc4e 100644
--- a/init/init_task.c
+++ b/init/init_task.c
-@@ -75,9 +75,20 @@ struct task_struct init_task
+@@ -75,9 +75,15 @@ struct task_struct init_task
.stack = init_stack,
.usage = REFCOUNT_INIT(2),
.flags = PF_KTHREAD,
-+#ifdef CONFIG_SCHED_BMQ
++#ifdef CONFIG_SCHED_ALT
+ .prio = DEFAULT_PRIO + MAX_PRIORITY_ADJ,
+ .static_prio = DEFAULT_PRIO,
+ .normal_prio = DEFAULT_PRIO + MAX_PRIORITY_ADJ,
-+#endif
-+#ifdef CONFIG_SCHED_PDS
-+ .prio = MAX_RT_PRIO,
-+ .static_prio = DEFAULT_PRIO,
-+ .normal_prio = MAX_RT_PRIO,
+#else
.prio = MAX_PRIO - 20,
.static_prio = MAX_PRIO - 20,
@@ -619,27 +438,25 @@ index 3711cdaafed2..47f57d2cc488 100644
.policy = SCHED_NORMAL,
.cpus_ptr = &init_task.cpus_mask,
.cpus_mask = CPU_MASK_ALL,
-@@ -87,6 +98,19 @@ struct task_struct init_task
+@@ -87,6 +93,17 @@ struct task_struct init_task
.restart_block = {
.fn = do_no_restart_syscall,
},
+#ifdef CONFIG_SCHED_ALT
++ .sq_node = LIST_HEAD_INIT(init_task.sq_node),
+#ifdef CONFIG_SCHED_BMQ
+ .boost_prio = 0,
-+ .bmq_idx = 15,
-+ .bmq_node = LIST_HEAD_INIT(init_task.bmq_node),
++ .sq_idx = 15,
+#endif
+#ifdef CONFIG_SCHED_PDS
+ .deadline = 0,
-+ .sl_level = 0,
-+ .sl_node = SKIPLIST_NODE_INIT(init_task.sl_node),
+#endif
+ .time_slice = HZ,
+#else
.se = {
.group_node = LIST_HEAD_INIT(init_task.se.group_node),
},
-@@ -94,6 +118,7 @@ struct task_struct init_task
+@@ -94,6 +111,7 @@ struct task_struct init_task
.run_list = LIST_HEAD_INIT(init_task.rt.run_list),
.time_slice = RR_TIMESLICE,
},
@@ -648,7 +465,7 @@ index 3711cdaafed2..47f57d2cc488 100644
#ifdef CONFIG_SMP
.pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
-index 5258b68153e0..3eb670b1bb76 100644
+index adb5190c4429..8c02bce63146 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -636,7 +636,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
@@ -683,7 +500,7 @@ index 27725754ac99..769d773c7182 100644
d->cpu_count += t1;
diff --git a/kernel/exit.c b/kernel/exit.c
-index 04029e35e69a..5ee0dc0b9175 100644
+index 65809fac3038..9504db57d878 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -122,7 +122,7 @@ static void __exit_signal(struct task_struct *tsk)
@@ -705,10 +522,10 @@ index 04029e35e69a..5ee0dc0b9175 100644
__unhash_process(tsk, group_dead);
write_sequnlock(&sig->stats_lock);
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
-index f6310f848f34..4176ad070bc9 100644
+index 3a4beb9395c4..98a709628cb3 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
-@@ -306,7 +306,11 @@ static bool klp_try_switch_task(struct task_struct *task)
+@@ -307,7 +307,11 @@ static bool klp_try_switch_task(struct task_struct *task)
*/
rq = task_rq_lock(task, &flags);
@@ -721,19 +538,18 @@ index f6310f848f34..4176ad070bc9 100644
"%s: %s:%d is running\n", __func__, task->comm,
task->pid);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
-index 48fff6437901..40506d5b5a2e 100644
+index 406818196a9f..31c46750fa94 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -227,15 +227,19 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
+@@ -227,14 +227,18 @@ static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
* Only use with rt_mutex_waiter_{less,equal}()
*/
#define task_to_waiter(p) \
- &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
+ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = __tsk_deadline(p) }
- static inline int
- rt_mutex_waiter_less(struct rt_mutex_waiter *left,
- struct rt_mutex_waiter *right)
+ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ struct rt_mutex_waiter *right)
{
+#ifdef CONFIG_SCHED_PDS
+ return (left->deadline < right->deadline);
@@ -745,7 +561,7 @@ index 48fff6437901..40506d5b5a2e 100644
/*
* If both waiters have dl_prio(), we check the deadlines of the
* associated tasks.
-@@ -244,17 +248,23 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+@@ -243,16 +247,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
*/
if (dl_prio(left->prio))
return dl_time_before(left->deadline, right->deadline);
@@ -755,9 +571,8 @@ index 48fff6437901..40506d5b5a2e 100644
+#endif
}
- static inline int
- rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
- struct rt_mutex_waiter *right)
+ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+ struct rt_mutex_waiter *right)
{
+#ifdef CONFIG_SCHED_PDS
+ return (left->deadline == right->deadline);
@@ -769,7 +584,7 @@ index 48fff6437901..40506d5b5a2e 100644
/*
* If both waiters have dl_prio(), we check the deadlines of the
* associated tasks.
-@@ -263,8 +273,10 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+@@ -261,8 +271,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
*/
if (dl_prio(left->prio))
return left->deadline == right->deadline;
@@ -780,7 +595,7 @@ index 48fff6437901..40506d5b5a2e 100644
}
#define __node_2_waiter(node) \
-@@ -660,7 +672,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+@@ -654,7 +666,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
* the values of the node being removed.
*/
waiter->prio = task->prio;
@@ -789,7 +604,7 @@ index 48fff6437901..40506d5b5a2e 100644
rt_mutex_enqueue(lock, waiter);
-@@ -933,7 +945,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+@@ -925,7 +937,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex *lock,
waiter->task = task;
waiter->lock = lock;
waiter->prio = task->prio;
@@ -799,10 +614,10 @@ index 48fff6437901..40506d5b5a2e 100644
/* Get the top priority waiter on the lock */
if (rt_mutex_has_waiters(lock))
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
-index 5fc9c9b70862..eb6d7d87779f 100644
+index 5fc9c9b70862..06b60d612535 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
-@@ -22,14 +22,20 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
+@@ -22,14 +22,21 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
endif
@@ -812,15 +627,16 @@ index 5fc9c9b70862..eb6d7d87779f 100644
-
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o
+ifdef CONFIG_SCHED_ALT
-+obj-y += alt_core.o alt_debug.o
++obj-y += alt_core.o
++obj-$(CONFIG_SCHED_DEBUG) += alt_debug.o
+else
+obj-y += core.o
+obj-y += fair.o rt.o deadline.o
+obj-$(CONFIG_SMP) += cpudeadline.o stop_task.o
obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
-obj-$(CONFIG_SCHEDSTATS) += stats.o
- obj-$(CONFIG_SCHED_DEBUG) += debug.o
+endif
+ obj-$(CONFIG_SCHED_DEBUG) += debug.o
+obj-y += loadavg.o clock.o cputime.o
+obj-y += idle.o
+obj-y += wait.o wait_bit.o swait.o completion.o
@@ -831,10 +647,10 @@ index 5fc9c9b70862..eb6d7d87779f 100644
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644
-index 000000000000..c85e3ccf9302
+index 000000000000..b65b12c6014f
--- /dev/null
+++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,7138 @@
+@@ -0,0 +1,7249 @@
+/*
+ * kernel/sched/alt_core.c
+ *
@@ -889,7 +705,22 @@ index 000000000000..c85e3ccf9302
+ */
+EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
+
-+#define ALT_SCHED_VERSION "v5.12-r1"
++#ifdef CONFIG_SCHED_DEBUG
++#define sched_feat(x) (1)
++/*
++ * Print a warning if need_resched is set for the given duration (if
++ * LATENCY_WARN is enabled).
++ *
++ * If sysctl_resched_latency_warn_once is set, only one warning will be shown
++ * per boot.
++ */
++__read_mostly int sysctl_resched_latency_warn_ms = 100;
++__read_mostly int sysctl_resched_latency_warn_once = 1;
++#else
++#define sched_feat(x) (0)
++#endif /* CONFIG_SCHED_DEBUG */
++
++#define ALT_SCHED_VERSION "v5.13-r1"
+
+/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio)
@@ -899,22 +730,33 @@ index 000000000000..c85e3ccf9302
+#define STOP_PRIO (MAX_RT_PRIO - 1)
+
+/* Default time slice is 4 in ms, can be set via kernel parameter "sched_timeslice" */
-+u64 sched_timeslice_ns __read_mostly = (4 * 1000 * 1000);
++u64 sched_timeslice_ns __read_mostly = (4 << 20);
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq);
++
++#ifdef CONFIG_SCHED_BMQ
++#include "bmq.h"
++#endif
++#ifdef CONFIG_SCHED_PDS
++#include "pds.h"
++#endif
+
+static int __init sched_timeslice(char *str)
+{
-+ int timeslice_us;
++ int timeslice_ms;
+
-+ get_option(&str, &timeslice_us);
-+ if (timeslice_us >= 1000)
-+ sched_timeslice_ns = timeslice_us * 1000;
++ get_option(&str, &timeslice_ms);
++ if (2 != timeslice_ms)
++ timeslice_ms = 4;
++ sched_timeslice_ns = timeslice_ms << 20;
++ sched_timeslice_imp(timeslice_ms);
+
+ return 0;
+}
+early_param("sched_timeslice", sched_timeslice);
+
+/* Reschedule if less than this many μs left */
-+#define RESCHED_NS (100 * 1000)
++#define RESCHED_NS (100 << 10)
+
+/**
+ * sched_yield_type - Choose what sort of yield sched_yield will perform.
@@ -964,22 +806,36 @@ index 000000000000..c85e3ccf9302
+#endif
+static cpumask_t sched_rq_watermark[SCHED_BITS] ____cacheline_aligned_in_smp;
+
-+#ifdef CONFIG_SCHED_BMQ
-+#include "bmq_imp.h"
-+#endif
-+#ifdef CONFIG_SCHED_PDS
-+#include "pds_imp.h"
-+#endif
++/* sched_queue related functions */
++static inline void sched_queue_init(struct sched_queue *q)
++{
++ int i;
++
++ bitmap_zero(q->bitmap, SCHED_BITS);
++ for(i = 0; i < SCHED_BITS; i++)
++ INIT_LIST_HEAD(&q->heads[i]);
++}
++
++/*
++ * Init idle task and put into queue structure of rq
++ * IMPORTANT: may be called multiple times for a single cpu
++ */
++static inline void sched_queue_init_idle(struct sched_queue *q,
++ struct task_struct *idle)
++{
++ idle->sq_idx = IDLE_TASK_SCHED_PRIO;
++ INIT_LIST_HEAD(&q->heads[idle->sq_idx]);
++ list_add(&idle->sq_node, &q->heads[idle->sq_idx]);
++}
+
++/* water mark related functions */
+static inline void update_sched_rq_watermark(struct rq *rq)
+{
-+ unsigned long watermark = sched_queue_watermark(rq);
++ unsigned long watermark = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
+ unsigned long last_wm = rq->watermark;
+ unsigned long i;
+ int cpu;
+
-+ /*printk(KERN_INFO "sched: watermark(%d) %d, last %d\n",
-+ cpu_of(rq), watermark, last_wm);*/
+ if (watermark == last_wm)
+ return;
+
@@ -990,9 +846,8 @@ index 000000000000..c85e3ccf9302
+ cpumask_andnot(&sched_rq_watermark[i],
+ &sched_rq_watermark[i], cpumask_of(cpu));
+#ifdef CONFIG_SCHED_SMT
-+ if (!static_branch_likely(&sched_smt_present))
-+ return;
-+ if (IDLE_WM == last_wm)
++ if (static_branch_likely(&sched_smt_present) &&
++ IDLE_WM == last_wm)
+ cpumask_andnot(&sched_sg_idle_mask,
+ &sched_sg_idle_mask, cpu_smt_mask(cpu));
+#endif
@@ -1002,10 +857,9 @@ index 000000000000..c85e3ccf9302
+ for (i = last_wm + 1; i <= watermark; i++)
+ cpumask_set_cpu(cpu, &sched_rq_watermark[i]);
+#ifdef CONFIG_SCHED_SMT
-+ if (!static_branch_likely(&sched_smt_present))
-+ return;
-+ if (IDLE_WM == watermark) {
++ if (static_branch_likely(&sched_smt_present) && IDLE_WM == watermark) {
+ cpumask_t tmp;
++
+ cpumask_and(&tmp, cpu_smt_mask(cpu), &sched_rq_watermark[IDLE_WM]);
+ if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
+ cpumask_or(&sched_sg_idle_mask, cpu_smt_mask(cpu),
@@ -1014,6 +868,34 @@ index 000000000000..c85e3ccf9302
+#endif
+}
+
++/*
++ * This routine assume that the idle task always in queue
++ */
++static inline struct task_struct *sched_rq_first_task(struct rq *rq)
++{
++ unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
++ const struct list_head *head = &rq->queue.heads[sched_prio2idx(idx, rq)];
++
++ return list_first_entry(head, struct task_struct, sq_node);
++}
++
++static inline struct task_struct *
++sched_rq_next_task(struct task_struct *p, struct rq *rq)
++{
++ unsigned long idx = p->sq_idx;
++ struct list_head *head = &rq->queue.heads[idx];
++
++ if (list_is_last(&p->sq_node, head)) {
++ idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
++ sched_idx2prio(idx, rq) + 1);
++ head = &rq->queue.heads[sched_prio2idx(idx, rq)];
++
++ return list_first_entry(head, struct task_struct, sq_node);
++ }
++
++ return list_next_entry(p, sq_node);
++}
++
+static inline struct task_struct *rq_runnable_task(struct rq *rq)
+{
+ struct task_struct *next = sched_rq_first_task(rq);
@@ -1331,6 +1213,7 @@ index 000000000000..c85e3ccf9302
+ if (unlikely(delta <= 0))
+ return;
+ rq->clock += delta;
++ update_rq_time_edge(rq);
+ update_rq_clock_task(rq, delta);
+}
+
@@ -1360,6 +1243,25 @@ index 000000000000..c85e3ccf9302
+ * Add/Remove/Requeue task to/from the runqueue routines
+ * Context: rq->lock
+ */
++#define __SCHED_DEQUEUE_TASK(p, rq, flags, func) \
++ psi_dequeue(p, flags & DEQUEUE_SLEEP); \
++ sched_info_dequeued(rq, p); \
++ \
++ list_del(&p->sq_node); \
++ if (list_empty(&rq->queue.heads[p->sq_idx])) { \
++ clear_bit(sched_idx2prio(p->sq_idx, rq), \
++ rq->queue.bitmap); \
++ func; \
++ }
++
++#define __SCHED_ENQUEUE_TASK(p, rq, flags) \
++ sched_info_queued(rq, p); \
++ psi_enqueue(p, flags); \
++ \
++ p->sq_idx = task_sched_prio_idx(p, rq); \
++ list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]); \
++ set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++
+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
+{
+ lockdep_assert_held(&rq->lock);
@@ -1399,12 +1301,25 @@ index 000000000000..c85e3ccf9302
+
+static inline void requeue_task(struct task_struct *p, struct rq *rq)
+{
++ int idx;
++
+ lockdep_assert_held(&rq->lock);
+ /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
+ cpu_of(rq), task_cpu(p));
+
-+ __SCHED_REQUEUE_TASK(p, rq, update_sched_rq_watermark(rq));
++ idx = task_sched_prio_idx(p, rq);
++
++ list_del(&p->sq_node);
++ list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
++ if (idx != p->sq_idx) {
++ if (list_empty(&rq->queue.heads[p->sq_idx]))
++ clear_bit(sched_idx2prio(p->sq_idx, rq),
++ rq->queue.bitmap);
++ p->sq_idx = idx;
++ set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++ update_sched_rq_watermark(rq);
++ }
+}
+
+/*
@@ -1739,13 +1654,10 @@ index 000000000000..c85e3ccf9302
+static enum hrtimer_restart hrtick(struct hrtimer *timer)
+{
+ struct rq *rq = container_of(timer, struct rq, hrtick_timer);
-+ struct task_struct *p;
+
+ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
+
+ raw_spin_lock(&rq->lock);
-+ p = rq->curr;
-+ p->time_slice = 0;
+ resched_curr(rq);
+ raw_spin_unlock(&rq->lock);
+
@@ -1858,6 +1770,19 @@ index 000000000000..c85e3ccf9302
+#endif /* CONFIG_SCHED_HRTICK */
+
+/*
++ * Calculate the expected normal priority: i.e. priority
++ * without taking RT-inheritance into account. Might be
++ * boosted by interactivity modifiers. Changes upon fork,
++ * setprio syscalls, and whenever the interactivity
++ * estimator recalculates.
++ */
++static inline int normal_prio(struct task_struct *p)
++{
++ return task_has_rt_policy(p) ? (MAX_RT_PRIO - 1 - p->rt_priority) :
++ p->static_prio + MAX_PRIORITY_ADJ;
++}
++
++/*
+ * Calculate the current priority, i.e. the priority
+ * taken into account by the scheduler. This value might
+ * be boosted by RT tasks as it will be RT if the task got
@@ -2084,7 +2009,7 @@ index 000000000000..c85e3ccf9302
+ return cpu_online(cpu);
+
+ /* Regular kernel threads don't get to stay during offline. */
-+ if (cpu_rq(cpu)->balance_push)
++ if (cpu_dying(cpu))
+ return false;
+
+ /* But are allowed during online. */
@@ -2124,6 +2049,7 @@ index 000000000000..c85e3ccf9302
+
+ raw_spin_lock(&rq->lock);
+ BUG_ON(task_cpu(p) != new_cpu);
++ sched_task_sanity_check(p, rq);
+ enqueue_task(p, rq, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
+ check_preempt_curr(rq);
@@ -2166,12 +2092,13 @@ index 000000000000..c85e3ccf9302
+ struct migration_arg *arg = data;
+ struct task_struct *p = arg->task;
+ struct rq *rq = this_rq();
++ unsigned long flags;
+
+ /*
+ * The original target CPU might have gone down and we might
+ * be on another CPU but it doesn't matter.
+ */
-+ local_irq_disable();
++ local_irq_save(flags);
+ /*
+ * We need to explicitly wake pending tasks before running
+ * __migrate_task() such that we will not miss enforcing cpus_ptr
@@ -2189,9 +2116,8 @@ index 000000000000..c85e3ccf9302
+ if (task_rq(p) == rq && task_on_rq_queued(p))
+ rq = __migrate_task(rq, p, arg->dest_cpu);
+ raw_spin_unlock(&rq->lock);
-+ raw_spin_unlock(&p->pi_lock);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
-+ local_irq_enable();
+ return 0;
+}
+
@@ -2452,7 +2378,7 @@ index 000000000000..c85e3ccf9302
+ return dest_cpu;
+}
+
-+static inline int select_task_rq(struct task_struct *p, struct rq *rq)
++static inline int select_task_rq(struct task_struct *p)
+{
+ cpumask_t chk_mask, tmp;
+
@@ -2465,7 +2391,7 @@ index 000000000000..c85e3ccf9302
+#endif
+ cpumask_and(&tmp, &chk_mask, &sched_rq_watermark[IDLE_WM]) ||
+ cpumask_and(&tmp, &chk_mask,
-+ &sched_rq_watermark[task_sched_prio(p, rq) + 1]))
++ &sched_rq_watermark[task_sched_prio(p) + 1]))
+ return best_mask_cpu(task_cpu(p), &tmp);
+
+ return best_mask_cpu(task_cpu(p), &chk_mask);
@@ -2619,7 +2545,7 @@ index 000000000000..c85e3ccf9302
+
+#else /* CONFIG_SMP */
+
-+static inline int select_task_rq(struct task_struct *p, struct rq *rq)
++static inline int select_task_rq(struct task_struct *p)
+{
+ return 0;
+}
@@ -3156,7 +3082,7 @@ index 000000000000..c85e3ccf9302
+
+ sched_task_ttwu(p);
+
-+ cpu = select_task_rq(p, this_rq());
++ cpu = select_task_rq(p);
+
+ if (cpu != task_cpu(p)) {
+ if (p->in_iowait) {
@@ -3455,16 +3381,15 @@ index 000000000000..c85e3ccf9302
+ struct rq *rq;
+
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
-+
+ p->state = TASK_RUNNING;
-+
-+ rq = cpu_rq(select_task_rq(p, this_rq()));
++ rq = cpu_rq(select_task_rq(p));
+#ifdef CONFIG_SMP
+ rseq_migrate(p);
+ /*
+ * Fork balancing, do it here and not earlier because:
+ * - cpus_ptr can change in the fork path
+ * - any previously selected CPU might disappear through hotplug
++ *
+ * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
+ * as we're not fully set-up yet.
+ */
@@ -3472,8 +3397,8 @@ index 000000000000..c85e3ccf9302
+#endif
+
+ raw_spin_lock(&rq->lock);
-+
+ update_rq_clock(rq);
++
+ activate_task(p, rq);
+ trace_sched_wakeup_new(p);
+ check_preempt_curr(rq);
@@ -3866,8 +3791,6 @@ index 000000000000..c85e3ccf9302
+asmlinkage __visible void schedule_tail(struct task_struct *prev)
+ __releases(rq->lock)
+{
-+ struct rq *rq;
-+
+ /*
+ * New tasks start with FORK_PREEMPT_COUNT, see there and
+ * finish_task_switch() for details.
@@ -3877,7 +3800,7 @@ index 000000000000..c85e3ccf9302
+ * PREEMPT_COUNT kernels).
+ */
+
-+ rq = finish_task_switch(prev);
++ finish_task_switch(prev);
+ preempt_enable();
+
+ if (current->set_child_tid)
@@ -4063,7 +3986,7 @@ index 000000000000..c85e3ccf9302
+ if (rq != task_rq(p) || rq->nr_running < 2)
+ goto unlock;
+
-+ dest_cpu = select_task_rq(p, task_rq(p));
++ dest_cpu = select_task_rq(p);
+ if (dest_cpu == smp_processor_id())
+ goto unlock;
+
@@ -4162,6 +4085,55 @@ index 000000000000..c85e3ccf9302
+ set_preempt_need_resched();
+}
+
++#ifdef CONFIG_SCHED_DEBUG
++static u64 cpu_resched_latency(struct rq *rq)
++{
++ int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
++ u64 resched_latency, now = rq_clock(rq);
++ static bool warned_once;
++
++ if (sysctl_resched_latency_warn_once && warned_once)
++ return 0;
++
++ if (!need_resched() || !latency_warn_ms)
++ return 0;
++
++ if (system_state == SYSTEM_BOOTING)
++ return 0;
++
++ if (!rq->last_seen_need_resched_ns) {
++ rq->last_seen_need_resched_ns = now;
++ rq->ticks_without_resched = 0;
++ return 0;
++ }
++
++ rq->ticks_without_resched++;
++ resched_latency = now - rq->last_seen_need_resched_ns;
++ if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
++ return 0;
++
++ warned_once = true;
++
++ return resched_latency;
++}
++
++static int __init setup_resched_latency_warn_ms(char *str)
++{
++ long val;
++
++ if ((kstrtol(str, 0, &val))) {
++ pr_warn("Unable to set resched_latency_warn_ms\n");
++ return 1;
++ }
++
++ sysctl_resched_latency_warn_ms = val;
++ return 1;
++}
++__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
++#else
++static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
++#endif /* CONFIG_SCHED_DEBUG */
++
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
@@ -4170,6 +4142,7 @@ index 000000000000..c85e3ccf9302
+{
+ int cpu __maybe_unused = smp_processor_id();
+ struct rq *rq = cpu_rq(cpu);
++ u64 resched_latency;
+
+ arch_scale_freq_tick();
+ sched_clock_tick();
@@ -4178,12 +4151,16 @@ index 000000000000..c85e3ccf9302
+ update_rq_clock(rq);
+
+ scheduler_task_tick(rq);
++ if (sched_feat(LATENCY_WARN))
++ resched_latency = cpu_resched_latency(rq);
+ calc_global_load_tick(rq);
-+ psi_task_tick(rq);
+
+ rq->last_tick = rq->clock;
+ raw_spin_unlock(&rq->lock);
+
++ if (sched_feat(LATENCY_WARN) && resched_latency)
++ resched_latency_warn(cpu, resched_latency);
++
+ perf_event_task_tick();
+}
+
@@ -4858,7 +4835,9 @@ index 000000000000..c85e3ccf9302
+ next = choose_next_task(rq, cpu, prev);
+ clear_tsk_need_resched(prev);
+ clear_preempt_need_resched();
-+
++#ifdef CONFIG_SCHED_DEBUG
++ rq->last_seen_need_resched_ns = 0;
++#endif
+
+ if (likely(prev != next)) {
+ next->last_ran = rq->clock_task;
@@ -5187,23 +5166,23 @@ index 000000000000..c85e3ccf9302
+ preempt_dynamic_full,
+};
+
-+static int preempt_dynamic_mode = preempt_dynamic_full;
++int preempt_dynamic_mode = preempt_dynamic_full;
+
-+static int sched_dynamic_mode(const char *str)
++int sched_dynamic_mode(const char *str)
+{
+ if (!strcmp(str, "none"))
-+ return 0;
++ return preempt_dynamic_none;
+
+ if (!strcmp(str, "voluntary"))
-+ return 1;
++ return preempt_dynamic_voluntary;
+
+ if (!strcmp(str, "full"))
-+ return 2;
++ return preempt_dynamic_full;
+
-+ return -1;
++ return -EINVAL;
+}
+
-+static void sched_dynamic_update(int mode)
++void sched_dynamic_update(int mode)
+{
+ /*
+ * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
@@ -5218,25 +5197,25 @@ index 000000000000..c85e3ccf9302
+ switch (mode) {
+ case preempt_dynamic_none:
+ static_call_update(cond_resched, __cond_resched);
-+ static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0);
-+ static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL);
-+ static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL);
-+ static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL);
++ static_call_update(might_resched, (void *)&__static_call_return0);
++ static_call_update(preempt_schedule, NULL);
++ static_call_update(preempt_schedule_notrace, NULL);
++ static_call_update(irqentry_exit_cond_resched, NULL);
+ pr_info("Dynamic Preempt: none\n");
+ break;
+
+ case preempt_dynamic_voluntary:
+ static_call_update(cond_resched, __cond_resched);
+ static_call_update(might_resched, __cond_resched);
-+ static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL);
-+ static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL);
-+ static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL);
++ static_call_update(preempt_schedule, NULL);
++ static_call_update(preempt_schedule_notrace, NULL);
++ static_call_update(irqentry_exit_cond_resched, NULL);
+ pr_info("Dynamic Preempt: voluntary\n");
+ break;
+
+ case preempt_dynamic_full:
-+ static_call_update(cond_resched, (typeof(&__cond_resched)) __static_call_return0);
-+ static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0);
++ static_call_update(cond_resched, (void *)&__static_call_return0);
++ static_call_update(might_resched, (void *)&__static_call_return0);
+ static_call_update(preempt_schedule, __preempt_schedule_func);
+ static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
+ static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
@@ -5260,77 +5239,8 @@ index 000000000000..c85e3ccf9302
+}
+__setup("preempt=", setup_preempt_mode);
+
-+#ifdef CONFIG_SCHED_DEBUG
-+
-+static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
-+ size_t cnt, loff_t *ppos)
-+{
-+ char buf[16];
-+ int mode;
-+
-+ if (cnt > 15)
-+ cnt = 15;
-+
-+ if (copy_from_user(&buf, ubuf, cnt))
-+ return -EFAULT;
-+
-+ buf[cnt] = 0;
-+ mode = sched_dynamic_mode(strstrip(buf));
-+ if (mode < 0)
-+ return mode;
-+
-+ sched_dynamic_update(mode);
-+
-+ *ppos += cnt;
-+
-+ return cnt;
-+}
-+
-+static int sched_dynamic_show(struct seq_file *m, void *v)
-+{
-+ static const char * preempt_modes[] = {
-+ "none", "voluntary", "full"
-+ };
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
-+ if (preempt_dynamic_mode == i)
-+ seq_puts(m, "(");
-+ seq_puts(m, preempt_modes[i]);
-+ if (preempt_dynamic_mode == i)
-+ seq_puts(m, ")");
-+
-+ seq_puts(m, " ");
-+ }
-+
-+ seq_puts(m, "\n");
-+ return 0;
-+}
-+
-+static int sched_dynamic_open(struct inode *inode, struct file *filp)
-+{
-+ return single_open(filp, sched_dynamic_show, NULL);
-+}
-+
-+static const struct file_operations sched_dynamic_fops = {
-+ .open = sched_dynamic_open,
-+ .write = sched_dynamic_write,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+};
-+
-+static __init int sched_init_debug_dynamic(void)
-+{
-+ debugfs_create_file("sched_preempt", 0644, NULL, NULL, &sched_dynamic_fops);
-+ return 0;
-+}
-+late_initcall(sched_init_debug_dynamic);
-+
-+#endif /* CONFIG_SCHED_DEBUG */
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
-+
+/*
+ * This is the entry point to schedule() from kernel preemption
+ * off of irq context.
@@ -5365,10 +5275,10 @@ index 000000000000..c85e3ccf9302
+}
+EXPORT_SYMBOL(default_wake_function);
+
-+static inline void check_task_changed(struct rq *rq, struct task_struct *p)
++static inline void check_task_changed(struct task_struct *p, struct rq *rq)
+{
+ /* Trigger resched if task sched_prio has been modified. */
-+ if (task_on_rq_queued(p) && sched_task_need_requeue(p, rq)) {
++ if (task_on_rq_queued(p) && task_sched_prio_idx(p, rq) != p->sq_idx) {
+ requeue_task(p, rq);
+ check_preempt_curr(rq);
+ }
@@ -5456,9 +5366,8 @@ index 000000000000..c85e3ccf9302
+
+ trace_sched_pi_setprio(p, pi_task);
+ p->prio = prio;
-+ update_task_priodl(p);
+
-+ check_task_changed(rq, p);
++ check_task_changed(p, rq);
+out_unlock:
+ /* Avoid rq from going away on us: */
+ preempt_disable();
@@ -5501,9 +5410,8 @@ index 000000000000..c85e3ccf9302
+ goto out_unlock;
+
+ p->prio = effective_prio(p);
-+ update_task_priodl(p);
+
-+ check_task_changed(rq, p);
++ check_task_changed(p, rq);
+out_unlock:
+ __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
@@ -5561,6 +5469,24 @@ index 000000000000..c85e3ccf9302
+#endif
+
+/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ *
++ * sched policy return value kernel prio user prio/nice
++ *
++ * (BMQ)normal, batch, idle[0 ... 53] [100 ... 139] 0/[-20 ... 19]/[-7 ... 7]
++ * (PDS)normal, batch, idle[0 ... 39] 100 0/[-20 ... 19]
++ * fifo, rr [-1 ... -100] [99 ... 0] [0 ... 99]
++ */
++int task_prio(const struct task_struct *p)
++{
++ return (p->prio < MAX_RT_PRIO) ? p->prio - MAX_RT_PRIO :
++ task_sched_prio_normal(p, task_rq(p));
++}
++
++/**
+ * idle_cpu - is a given CPU idle currently?
+ * @cpu: the processor in question.
+ *
@@ -5651,7 +5577,6 @@ index 000000000000..c85e3ccf9302
+ p->prio = normal_prio(p);
+ if (keep_boost)
+ p->prio = rt_effective_prio(p, p->prio);
-+ update_task_priodl(p);
+}
+
+/*
@@ -5833,7 +5758,7 @@ index 000000000000..c85e3ccf9302
+
+ __setscheduler(rq, p, attr, pi);
+
-+ check_task_changed(rq, p);
++ check_task_changed(p, rq);
+
+ /* Avoid rq from going away on us: */
+ preempt_disable();
@@ -6898,7 +6823,7 @@ index 000000000000..c85e3ccf9302
+ idle->last_ran = rq->clock_task;
+ idle->state = TASK_RUNNING;
+ idle->flags |= PF_IDLE;
-+ sched_queue_init_idle(rq, idle);
++ sched_queue_init_idle(&rq->queue, idle);
+
+ scs_task_reset(idle);
+ kasan_unpoison_task_stack(idle);
@@ -7012,7 +6937,8 @@ index 000000000000..c85e3ccf9302
+static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
+
+/*
-+ * Ensure we only run per-cpu kthreads once the CPU goes !active.
++ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
++ * effective when the hotplug motion is down.
+ */
+static void balance_push(struct rq *rq)
+{
@@ -7020,12 +6946,19 @@ index 000000000000..c85e3ccf9302
+
+ lockdep_assert_held(&rq->lock);
+ SCHED_WARN_ON(rq->cpu != smp_processor_id());
++
+ /*
+ * Ensure the thing is persistent until balance_push_set(.on = false);
+ */
+ rq->balance_callback = &balance_push_callback;
+
+ /*
++ * Only active while going offline.
++ */
++ if (!cpu_dying(rq->cpu))
++ return;
++
++ /*
+ * Both the cpu-hotplug and stop task are in this case and are
+ * required to complete the hotplug process.
+ *
@@ -7033,7 +6966,7 @@ index 000000000000..c85e3ccf9302
+ * histerical raisins.
+ */
+ if (rq->idle == push_task ||
-+ ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
++ kthread_is_per_cpu(push_task) ||
+ is_migration_disabled(push_task)) {
+
+ /*
@@ -7078,7 +7011,6 @@ index 000000000000..c85e3ccf9302
+ struct rq_flags rf;
+
+ rq_lock_irqsave(rq, &rf);
-+ rq->balance_push = on;
+ if (on) {
+ WARN_ON_ONCE(rq->balance_callback);
+ rq->balance_callback = &balance_push_callback;
@@ -7183,8 +7115,8 @@ index 000000000000..c85e3ccf9302
+ unsigned long flags;
+
+ /*
-+ * Make sure that when the hotplug state machine does a roll-back
-+ * we clear balance_push. Ideally that would happen earlier...
++ * Clear the balance_push callback and prepare to schedule
++ * regular tasks.
+ */
+ balance_push_set(cpu, false);
+
@@ -7357,12 +7289,6 @@ index 000000000000..c85e3ccf9302
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
-+ /*
-+ * Now that the CPU is offline, make sure we're welcome
-+ * to new tasks once we come back up.
-+ */
-+ balance_push_set(cpu, false);
-+
+ calc_load_migrate(rq);
+ hrtick_clear(rq);
+ return 0;
@@ -7512,7 +7438,7 @@ index 000000000000..c85e3ccf9302
+ for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+
-+ sched_queue_init(rq);
++ sched_queue_init(&rq->queue);
+ rq->watermark = IDLE_WM;
+ rq->skip = NULL;
+
@@ -7531,7 +7457,7 @@ index 000000000000..c85e3ccf9302
+#ifdef CONFIG_NO_HZ_COMMON
+ INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
+#endif
-+ rq->balance_callback = NULL;
++ rq->balance_callback = &balance_push_callback;
+#ifdef CONFIG_HOTPLUG_CPU
+ rcuwait_init(&rq->hotplug_wait);
+#endif
@@ -7563,6 +7489,7 @@ index 000000000000..c85e3ccf9302
+
+#ifdef CONFIG_SMP
+ idle_thread_set_boot_cpu();
++ balance_push_set(smp_processor_id(), false);
+
+ sched_init_topology_cpumask_early();
+#endif /* SMP */
@@ -8012,10 +7939,10 @@ index 000000000000..1212a031700e
+{}
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
new file mode 100644
-index 000000000000..2a6a0530fbb7
+index 000000000000..f9f79422bf0e
--- /dev/null
+++ b/kernel/sched/alt_sched.h
-@@ -0,0 +1,686 @@
+@@ -0,0 +1,710 @@
+#ifndef ALT_SCHED_H
+#define ALT_SCHED_H
+
@@ -8068,16 +7995,24 @@ index 000000000000..2a6a0530fbb7
+#include <trace/events/sched.h>
+
+#ifdef CONFIG_SCHED_BMQ
-+#include "bmq.h"
++/* bits:
++ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
++#define SCHED_BITS (MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1)
+#endif
++
+#ifdef CONFIG_SCHED_PDS
-+#include "pds.h"
-+#endif
++/* bits: RT(0-99), reserved(100-127), NORMAL_PRIO_NUM, cpu idle task */
++#define SCHED_BITS (MIN_NORMAL_PRIO + NORMAL_PRIO_NUM + 1)
++#endif /* CONFIG_SCHED_PDS */
++
++#define IDLE_TASK_SCHED_PRIO (SCHED_BITS - 1)
+
+#ifdef CONFIG_SCHED_DEBUG
+# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
++extern void resched_latency_warn(int cpu, u64 latency);
+#else
+# define SCHED_WARN_ON(x) ({ (void)(x), 0; })
++static inline void resched_latency_warn(int cpu, u64 latency) {}
+#endif
+
+/*
@@ -8147,6 +8082,13 @@ index 000000000000..2a6a0530fbb7
+#define WF_MIGRATED 0x04 /* internal use, task got migrated */
+#define WF_ON_CPU 0x08 /* Wakee is on_rq */
+
++#define SCHED_QUEUE_BITS (SCHED_BITS - 1)
++
++struct sched_queue {
++ DECLARE_BITMAP(bitmap, SCHED_QUEUE_BITS);
++ struct list_head heads[SCHED_BITS];
++};
++
+/*
+ * This is the main, per-CPU runqueue data structure.
+ * This data should only be modified by the local cpu.
@@ -8159,11 +8101,9 @@ index 000000000000..2a6a0530fbb7
+ struct task_struct *idle, *stop, *skip;
+ struct mm_struct *prev_mm;
+
-+#ifdef CONFIG_SCHED_BMQ
-+ struct bmq queue;
-+#endif
++ struct sched_queue queue;
+#ifdef CONFIG_SCHED_PDS
-+ struct skiplist_node sl_header;
++ u64 time_edge;
+#endif
+ unsigned long watermark;
+
@@ -8172,6 +8112,11 @@ index 000000000000..2a6a0530fbb7
+
+ atomic_t nr_iowait;
+
++#ifdef CONFIG_SCHED_DEBUG
++ u64 last_seen_need_resched_ns;
++ int ticks_without_resched;
++#endif
++
+#ifdef CONFIG_MEMBARRIER
+ int membarrier_state;
+#endif
@@ -8193,7 +8138,6 @@ index 000000000000..2a6a0530fbb7
+ struct cpu_stop_work active_balance_work;
+#endif
+ struct callback_head *balance_callback;
-+ unsigned char balance_push;
+#ifdef CONFIG_HOTPLUG_CPU
+ struct rcuwait hotplug_wait;
+#endif
@@ -8698,36 +8642,23 @@ index 000000000000..2a6a0530fbb7
+}
+#endif
+
-+void swake_up_all_locked(struct swait_queue_head *q);
-+void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
++extern void swake_up_all_locked(struct swait_queue_head *q);
++extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++extern int preempt_dynamic_mode;
++extern int sched_dynamic_mode(const char *str);
++extern void sched_dynamic_update(int mode);
++#endif
+
++static inline void nohz_run_idle_balance(int cpu) { }
+#endif /* ALT_SCHED_H */
diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
new file mode 100644
-index 000000000000..aba3c98759f8
+index 000000000000..7635c00dde7f
--- /dev/null
+++ b/kernel/sched/bmq.h
-@@ -0,0 +1,14 @@
-+#ifndef BMQ_H
-+#define BMQ_H
-+
-+/* bits:
-+ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
-+#define SCHED_BITS (MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1)
-+#define IDLE_TASK_SCHED_PRIO (SCHED_BITS - 1)
-+
-+struct bmq {
-+ DECLARE_BITMAP(bitmap, SCHED_BITS);
-+ struct list_head heads[SCHED_BITS];
-+};
-+
-+#endif
-diff --git a/kernel/sched/bmq_imp.h b/kernel/sched/bmq_imp.h
-new file mode 100644
-index 000000000000..7c71f1141d00
---- /dev/null
-+++ b/kernel/sched/bmq_imp.h
-@@ -0,0 +1,203 @@
+@@ -0,0 +1,111 @@
+#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
+
+/*
@@ -8766,127 +8697,51 @@ index 000000000000..7c71f1141d00
+/*
+ * Common interfaces
+ */
-+static inline int normal_prio(struct task_struct *p)
-+{
-+ if (task_has_rt_policy(p))
-+ return MAX_RT_PRIO - 1 - p->rt_priority;
-+
-+ return p->static_prio + MAX_PRIORITY_ADJ;
-+}
-+
-+static inline int task_sched_prio(struct task_struct *p, struct rq *rq)
-+{
-+ return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2;
-+}
-+
-+static inline void requeue_task(struct task_struct *p, struct rq *rq);
-+
-+static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
-+{
-+ p->time_slice = sched_timeslice_ns;
-+
-+ if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) {
-+ if (SCHED_RR != p->policy)
-+ deboost_task(p);
-+ requeue_task(p, rq);
-+ }
-+}
++static inline void sched_timeslice_imp(const int timeslice_ms) {}
+
-+inline int task_running_nice(struct task_struct *p)
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
+{
-+ return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
++ return p->prio + p->boost_prio - MAX_RT_PRIO;
+}
+
-+static inline void update_task_priodl(struct task_struct *p) {}
-+
-+static inline unsigned long sched_queue_watermark(struct rq *rq)
++static inline int task_sched_prio(const struct task_struct *p)
+{
-+ return find_first_bit(rq->queue.bitmap, SCHED_BITS);
++ return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2;
+}
+
-+static inline void sched_queue_init(struct rq *rq)
++static inline int
++task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
+{
-+ struct bmq *q = &rq->queue;
-+ int i;
-+
-+ bitmap_zero(q->bitmap, SCHED_BITS);
-+ for(i = 0; i < SCHED_BITS; i++)
-+ INIT_LIST_HEAD(&q->heads[i]);
++ return task_sched_prio(p);
+}
+
-+static inline void sched_queue_init_idle(struct rq *rq, struct task_struct *idle)
++static inline int sched_prio2idx(int prio, struct rq *rq)
+{
-+ struct bmq *q = &rq->queue;
-+
-+ idle->bmq_idx = IDLE_TASK_SCHED_PRIO;
-+ INIT_LIST_HEAD(&q->heads[idle->bmq_idx]);
-+ list_add(&idle->bmq_node, &q->heads[idle->bmq_idx]);
-+ set_bit(idle->bmq_idx, q->bitmap);
++ return prio;
+}
+
-+/*
-+ * This routine used in bmq scheduler only which assume the idle task in the bmq
-+ */
-+static inline struct task_struct *sched_rq_first_task(struct rq *rq)
++static inline int sched_idx2prio(int idx, struct rq *rq)
+{
-+ unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_BITS);
-+ const struct list_head *head = &rq->queue.heads[idx];
-+
-+ return list_first_entry(head, struct task_struct, bmq_node);
++ return idx;
+}
+
-+static inline struct task_struct *
-+sched_rq_next_task(struct task_struct *p, struct rq *rq)
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
+{
-+ unsigned long idx = p->bmq_idx;
-+ struct list_head *head = &rq->queue.heads[idx];
-+
-+ if (list_is_last(&p->bmq_node, head)) {
-+ idx = find_next_bit(rq->queue.bitmap, SCHED_BITS, idx + 1);
-+ head = &rq->queue.heads[idx];
++ p->time_slice = sched_timeslice_ns;
+
-+ return list_first_entry(head, struct task_struct, bmq_node);
++ if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) {
++ if (SCHED_RR != p->policy)
++ deboost_task(p);
++ requeue_task(p, rq);
+ }
-+
-+ return list_next_entry(p, bmq_node);
+}
+
-+#define __SCHED_DEQUEUE_TASK(p, rq, flags, func) \
-+ psi_dequeue(p, flags & DEQUEUE_SLEEP); \
-+ sched_info_dequeued(rq, p); \
-+ \
-+ list_del(&p->bmq_node); \
-+ if (list_empty(&rq->queue.heads[p->bmq_idx])) { \
-+ clear_bit(p->bmq_idx, rq->queue.bitmap);\
-+ func; \
-+ }
-+
-+#define __SCHED_ENQUEUE_TASK(p, rq, flags) \
-+ sched_info_queued(rq, p); \
-+ psi_enqueue(p, flags); \
-+ \
-+ p->bmq_idx = task_sched_prio(p, rq); \
-+ list_add_tail(&p->bmq_node, &rq->queue.heads[p->bmq_idx]); \
-+ set_bit(p->bmq_idx, rq->queue.bitmap)
-+
-+#define __SCHED_REQUEUE_TASK(p, rq, func) \
-+{ \
-+ int idx = task_sched_prio(p, rq); \
-+\
-+ list_del(&p->bmq_node); \
-+ list_add_tail(&p->bmq_node, &rq->queue.heads[idx]); \
-+ if (idx != p->bmq_idx) { \
-+ if (list_empty(&rq->queue.heads[p->bmq_idx])) \
-+ clear_bit(p->bmq_idx, rq->queue.bitmap); \
-+ p->bmq_idx = idx; \
-+ set_bit(p->bmq_idx, rq->queue.bitmap); \
-+ func; \
-+ } \
-+}
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {}
+
-+static inline bool sched_task_need_requeue(struct task_struct *p, struct rq *rq)
++inline int task_running_nice(struct task_struct *p)
+{
-+ return (task_sched_prio(p, rq) != p->bmq_idx);
++ return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
+}
+
+static void sched_task_fork(struct task_struct *p, struct rq *rq)
@@ -8895,24 +8750,6 @@ index 000000000000..7c71f1141d00
+ p->boost_prio + MAX_PRIORITY_ADJ : MAX_PRIORITY_ADJ;
+}
+
-+/**
-+ * task_prio - return the priority value of a given task.
-+ * @p: the task in question.
-+ *
-+ * Return: The priority value as seen by users in /proc.
-+ *
-+ * sched policy return value kernel prio user prio/nice/boost
-+ *
-+ * normal, batch, idle [0 ... 53] [100 ... 139] 0/[-20 ... 19]/[-7 ... 7]
-+ * fifo, rr [-1 ... -100] [99 ... 0] [0 ... 99]
-+ */
-+int task_prio(const struct task_struct *p)
-+{
-+ if (p->prio < MAX_RT_PRIO)
-+ return (p->prio - MAX_RT_PRIO);
-+ return (p->prio - MAX_RT_PRIO + p->boost_prio);
-+}
-+
+static void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
+{
+ p->boost_prio = MAX_PRIORITY_ADJ;
@@ -8931,8 +8768,10 @@ index 000000000000..7c71f1141d00
+ if (rq_switch_time(rq) < boost_threshold(p))
+ boost_task(p);
+}
++
++static inline void update_rq_time_edge(struct rq *rq) {}
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
-index 50cbad89f7fa..41946f19468b 100644
+index 4f09afd2f321..805b54e517ff 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -57,6 +57,13 @@ struct sugov_cpu {
@@ -8949,7 +8788,7 @@ index 50cbad89f7fa..41946f19468b 100644
/* The field below is for single-CPU policies only: */
#ifdef CONFIG_NO_HZ_COMMON
unsigned long saved_idle_calls;
-@@ -171,6 +178,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
+@@ -160,6 +167,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
return cpufreq_driver_resolve_freq(policy, freq);
}
@@ -8957,7 +8796,7 @@ index 50cbad89f7fa..41946f19468b 100644
static void sugov_get_util(struct sugov_cpu *sg_cpu)
{
struct rq *rq = cpu_rq(sg_cpu->cpu);
-@@ -182,6 +190,55 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
+@@ -171,6 +179,55 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
FREQUENCY_UTIL, NULL);
}
@@ -9013,7 +8852,7 @@ index 50cbad89f7fa..41946f19468b 100644
/**
* sugov_iowait_reset() - Reset the IO boost status of a CPU.
* @sg_cpu: the sugov data for the CPU to boost
-@@ -322,13 +379,19 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+@@ -311,13 +368,19 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
*/
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
{
@@ -9033,7 +8872,7 @@ index 50cbad89f7fa..41946f19468b 100644
sugov_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
-@@ -446,6 +509,10 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
+@@ -438,6 +501,10 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
raw_spin_lock(&sg_policy->update_lock);
@@ -9044,7 +8883,7 @@ index 50cbad89f7fa..41946f19468b 100644
sugov_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
-@@ -603,6 +670,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
+@@ -598,6 +665,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
}
ret = sched_setattr_nocheck(thread, &attr);
@@ -9052,7 +8891,7 @@ index 50cbad89f7fa..41946f19468b 100644
if (ret) {
kthread_stop(thread);
pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
-@@ -837,7 +905,9 @@ cpufreq_governor_init(schedutil_gov);
+@@ -832,7 +900,9 @@ cpufreq_governor_init(schedutil_gov);
#ifdef CONFIG_ENERGY_MODEL
static void rebuild_sd_workfn(struct work_struct *work)
{
@@ -9063,7 +8902,7 @@ index 50cbad89f7fa..41946f19468b 100644
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
-index 5f611658eeab..631276f56ba0 100644
+index 872e481d5098..f920c8b48ec1 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -123,7 +123,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
@@ -9111,11 +8950,85 @@ index 5f611658eeab..631276f56ba0 100644
};
task_cputime(p, &cputime.utime, &cputime.stime);
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index c5aacbd492a1..105433c36b5f 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -8,6 +8,7 @@
+ */
+ #include "sched.h"
+
++#ifndef CONFIG_SCHED_ALT
+ /*
+ * This allows printing both to /proc/sched_debug and
+ * to the console
+@@ -210,6 +211,7 @@ static const struct file_operations sched_scaling_fops = {
+ };
+
+ #endif /* SMP */
++#endif /* !CONFIG_SCHED_ALT */
+
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+
+@@ -273,6 +275,7 @@ static const struct file_operations sched_dynamic_fops = {
+
+ #endif /* CONFIG_PREEMPT_DYNAMIC */
+
++#ifndef CONFIG_SCHED_ALT
+ __read_mostly bool sched_debug_verbose;
+
+ static const struct seq_operations sched_debug_sops;
+@@ -288,6 +291,7 @@ static const struct file_operations sched_debug_fops = {
+ .llseek = seq_lseek,
+ .release = seq_release,
+ };
++#endif /* !CONFIG_SCHED_ALT */
+
+ static struct dentry *debugfs_sched;
+
+@@ -297,12 +301,15 @@ static __init int sched_init_debug(void)
+
+ debugfs_sched = debugfs_create_dir("sched", NULL);
+
++#ifndef CONFIG_SCHED_ALT
+ debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
+ debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
++#endif /* !CONFIG_SCHED_ALT */
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
+ #endif
+
++#ifndef CONFIG_SCHED_ALT
+ debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
+ debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
+ debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
+@@ -330,11 +337,13 @@ static __init int sched_init_debug(void)
+ #endif
+
+ debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
++#endif /* !CONFIG_SCHED_ALT */
+
+ return 0;
+ }
+ late_initcall(sched_init_debug);
+
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_SMP
+
+ static cpumask_var_t sd_sysctl_cpus;
+@@ -1047,6 +1056,7 @@ void proc_sched_set_task(struct task_struct *p)
+ memset(&p->se.statistics, 0, sizeof(p->se.statistics));
+ #endif
+ }
++#endif /* !CONFIG_SCHED_ALT */
+
+ void resched_latency_warn(int cpu, u64 latency)
+ {
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
-index 7199e6f23789..bbdd227da3a4 100644
+index 7ca3d3d86c2a..23e890141939 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
-@@ -397,6 +397,7 @@ void cpu_startup_entry(enum cpuhp_state state)
+@@ -403,6 +403,7 @@ void cpu_startup_entry(enum cpuhp_state state)
do_idle();
}
@@ -9123,300 +9036,135 @@ index 7199e6f23789..bbdd227da3a4 100644
/*
* idle-task scheduling class.
*/
-@@ -510,3 +511,4 @@ DEFINE_SCHED_CLASS(idle) = {
+@@ -516,3 +517,4 @@ DEFINE_SCHED_CLASS(idle) = {
.switched_to = switched_to_idle,
.update_curr = update_curr_idle,
};
+#endif
diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
new file mode 100644
-index 000000000000..623908cf4380
+index 000000000000..06d88e72b543
--- /dev/null
+++ b/kernel/sched/pds.h
-@@ -0,0 +1,9 @@
-+#ifndef PDS_H
-+#define PDS_H
-+
-+/* bits:
-+ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
-+#define SCHED_BITS (MAX_RT_PRIO + NICE_WIDTH / 2 + 1)
-+#define IDLE_TASK_SCHED_PRIO (SCHED_BITS - 1)
-+
-+#endif
-diff --git a/kernel/sched/pds_imp.h b/kernel/sched/pds_imp.h
-new file mode 100644
-index 000000000000..335ce3a8e3ec
---- /dev/null
-+++ b/kernel/sched/pds_imp.h
-@@ -0,0 +1,279 @@
+@@ -0,0 +1,129 @@
+#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
+
-+static const u64 user_prio2deadline[NICE_WIDTH] = {
-+/* -20 */ 4194304, 4613734, 5075107, 5582617, 6140878,
-+/* -15 */ 6754965, 7430461, 8173507, 8990857, 9889942,
-+/* -10 */ 10878936, 11966829, 13163511, 14479862, 15927848,
-+/* -5 */ 17520632, 19272695, 21199964, 23319960, 25651956,
-+/* 0 */ 28217151, 31038866, 34142752, 37557027, 41312729,
-+/* 5 */ 45444001, 49988401, 54987241, 60485965, 66534561,
-+/* 10 */ 73188017, 80506818, 88557499, 97413248, 107154572,
-+/* 15 */ 117870029, 129657031, 142622734, 156885007, 172573507
-+};
++static int sched_timeslice_shift = 22;
+
-+static const unsigned char dl_level_map[] = {
-+/* 0 4 8 12 */
-+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 18,
-+/* 16 20 24 28 */
-+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 17, 17, 17, 17, 17,
-+/* 32 36 40 44 */
-+ 17, 17, 17, 17, 16, 16, 16, 16, 16, 16, 16, 16, 15, 15, 15, 15,
-+/* 48 52 56 60 */
-+ 15, 15, 15, 14, 14, 14, 14, 14, 14, 13, 13, 13, 13, 12, 12, 12,
-+/* 64 68 72 76 */
-+ 12, 11, 11, 11, 10, 10, 10, 9, 9, 8, 7, 6, 5, 4, 3, 2,
-+/* 80 84 88 92 */
-+ 1, 0
-+};
++#define NORMAL_PRIO_MOD(x) ((x) & (NORMAL_PRIO_NUM - 1))
+
-+/* DEFAULT_SCHED_PRIO:
-+ * dl_level_map[(user_prio2deadline[39] - user_prio2deadline[0]) >> 21] =
-+ * dl_level_map[68] =
-+ * 10
++/*
++ * Common interfaces
+ */
-+#define DEFAULT_SCHED_PRIO (MAX_RT_PRIO + 10)
-+
-+static inline int normal_prio(struct task_struct *p)
++static inline void sched_timeslice_imp(const int timeslice_ms)
+{
-+ if (task_has_rt_policy(p))
-+ return MAX_RT_PRIO - 1 - p->rt_priority;
-+
-+ return MAX_RT_PRIO;
++ if (2 == timeslice_ms)
++ sched_timeslice_shift = 21;
+}
+
+static inline int
-+task_sched_prio(const struct task_struct *p, const struct rq *rq)
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
+{
-+ size_t delta;
++ s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
+
-+ if (p == rq->idle)
-+ return IDLE_TASK_SCHED_PRIO;
-+
-+ if (p->prio < MAX_RT_PRIO)
-+ return p->prio;
-+
-+ delta = (rq->clock + user_prio2deadline[39] - p->deadline) >> 21;
-+ delta = min((size_t)delta, ARRAY_SIZE(dl_level_map) - 1);
++ if (unlikely(delta > NORMAL_PRIO_NUM - 1)) {
++ pr_info("pds: task_sched_prio_normal delta %lld, deadline %llu, time_edge %llu\n",
++ delta, p->deadline, rq->time_edge);
++ return NORMAL_PRIO_NUM - 1;
++ }
+
-+ return MAX_RT_PRIO + dl_level_map[delta];
++ return (delta < 0) ? 0 : delta;
+}
+
-+int task_running_nice(struct task_struct *p)
++static inline int task_sched_prio(const struct task_struct *p)
+{
-+ return task_sched_prio(p, task_rq(p)) > DEFAULT_SCHED_PRIO;
++ return (p->prio < MAX_RT_PRIO) ? p->prio :
++ MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
+}
+
-+static inline void update_task_priodl(struct task_struct *p)
++static inline int
++task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
+{
-+ p->priodl = (((u64) (p->prio))<<56) | ((p->deadline)>>8);
++ return (p->prio < MAX_RT_PRIO) ? p->prio : MIN_NORMAL_PRIO +
++ NORMAL_PRIO_MOD(task_sched_prio_normal(p, rq) + rq->time_edge);
+}
+
-+static inline void requeue_task(struct task_struct *p, struct rq *rq);
-+
-+static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++static inline int sched_prio2idx(int prio, struct rq *rq)
+{
-+ /*printk(KERN_INFO "sched: time_slice_expired(%d) - %px\n", cpu_of(rq), p);*/
-+ p->time_slice = sched_timeslice_ns;
-+
-+ if (p->prio >= MAX_RT_PRIO)
-+ p->deadline = rq->clock +
-+ user_prio2deadline[p->static_prio - MAX_RT_PRIO];
-+ update_task_priodl(p);
-+
-+ if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
-+ requeue_task(p, rq);
++ return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
++ MIN_NORMAL_PRIO + NORMAL_PRIO_MOD((prio - MIN_NORMAL_PRIO) +
++ rq->time_edge);
+}
+
-+/*
-+ * pds_skiplist_task_search -- search function used in PDS run queue skip list
-+ * node insert operation.
-+ * @it: iterator pointer to the node in the skip list
-+ * @node: pointer to the skiplist_node to be inserted
-+ *
-+ * Returns true if key of @it is less or equal to key value of @node, otherwise
-+ * false.
-+ */
-+static inline bool
-+pds_skiplist_task_search(struct skiplist_node *it, struct skiplist_node *node)
++static inline int sched_idx2prio(int idx, struct rq *rq)
+{
-+ return (skiplist_entry(it, struct task_struct, sl_node)->priodl <=
-+ skiplist_entry(node, struct task_struct, sl_node)->priodl);
++ return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
++ NORMAL_PRIO_MOD((idx - MIN_NORMAL_PRIO) + NORMAL_PRIO_NUM -
++ NORMAL_PRIO_MOD(rq->time_edge));
+}
+
-+/*
-+ * Define the skip list insert function for PDS
-+ */
-+DEFINE_SKIPLIST_INSERT_FUNC(pds_skiplist_insert, pds_skiplist_task_search);
-+
-+/*
-+ * Init the queue structure in rq
-+ */
-+static inline void sched_queue_init(struct rq *rq)
++static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
+{
-+ INIT_SKIPLIST_NODE(&rq->sl_header);
++ if (p->prio >= MAX_RT_PRIO)
++ p->deadline = (rq->clock >> sched_timeslice_shift) +
++ p->static_prio - (MAX_PRIO - NICE_WIDTH);
+}
+
-+/*
-+ * Init idle task and put into queue structure of rq
-+ * IMPORTANT: may be called multiple times for a single cpu
-+ */
-+static inline void sched_queue_init_idle(struct rq *rq, struct task_struct *idle)
++int task_running_nice(struct task_struct *p)
+{
-+ /*printk(KERN_INFO "sched: init(%d) - %px\n", cpu_of(rq), idle);*/
-+ int default_prio = idle->prio;
-+
-+ idle->prio = MAX_PRIO;
-+ idle->deadline = 0ULL;
-+ update_task_priodl(idle);
-+
-+ INIT_SKIPLIST_NODE(&rq->sl_header);
-+
-+ idle->sl_node.level = idle->sl_level;
-+ pds_skiplist_insert(&rq->sl_header, &idle->sl_node);
-+
-+ idle->prio = default_prio;
++ return (p->prio > DEFAULT_PRIO);
+}
+
-+/*
-+ * This routine assume that the idle task always in queue
-+ */
-+static inline struct task_struct *sched_rq_first_task(struct rq *rq)
++static inline void update_rq_time_edge(struct rq *rq)
+{
-+ struct skiplist_node *node = rq->sl_header.next[0];
-+
-+ BUG_ON(node == &rq->sl_header);
-+ return skiplist_entry(node, struct task_struct, sl_node);
-+}
++ struct list_head head;
++ u64 old = rq->time_edge;
++ u64 now = rq->clock >> sched_timeslice_shift;
++ u64 prio, delta;
+
-+static inline struct task_struct *
-+sched_rq_next_task(struct task_struct *p, struct rq *rq)
-+{
-+ struct skiplist_node *next = p->sl_node.next[0];
++ if (now == old)
++ return;
+
-+ BUG_ON(next == &rq->sl_header);
-+ return skiplist_entry(next, struct task_struct, sl_node);
-+}
++ delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
++ INIT_LIST_HEAD(&head);
+
-+static inline unsigned long sched_queue_watermark(struct rq *rq)
-+{
-+ return task_sched_prio(sched_rq_first_task(rq), rq);
-+}
++ for_each_set_bit(prio, &rq->queue.bitmap[2], delta)
++ list_splice_tail_init(rq->queue.heads + MIN_NORMAL_PRIO +
++ NORMAL_PRIO_MOD(prio + old), &head);
+
-+#define __SCHED_DEQUEUE_TASK(p, rq, flags, func) \
-+ psi_dequeue(p, flags & DEQUEUE_SLEEP); \
-+ sched_info_dequeued(rq, p); \
-+ \
-+ if (skiplist_del_init(&rq->sl_header, &p->sl_node)) { \
-+ func; \
-+ }
++ rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
++ rq->queue.bitmap[2] >> delta;
++ rq->time_edge = now;
++ if (!list_empty(&head)) {
++ u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
++ struct task_struct *p;
+
-+#define __SCHED_ENQUEUE_TASK(p, rq, flags) \
-+ sched_info_queued(rq, p); \
-+ psi_enqueue(p, flags); \
-+ \
-+ p->sl_node.level = p->sl_level; \
-+ pds_skiplist_insert(&rq->sl_header, &p->sl_node)
++ list_for_each_entry(p, &head, sq_node)
++ p->sq_idx = idx;
+
-+/*
-+ * Requeue a task @p to @rq
-+ */
-+#define __SCHED_REQUEUE_TASK(p, rq, func) \
-+{\
-+ bool b_first = skiplist_del_init(&rq->sl_header, &p->sl_node); \
-+\
-+ p->sl_node.level = p->sl_level; \
-+ if (pds_skiplist_insert(&rq->sl_header, &p->sl_node) || b_first) { \
-+ func; \
-+ } \
++ list_splice(&head, rq->queue.heads + idx);
++ rq->queue.bitmap[2] |= 1UL;
++ }
+}
+
-+static inline bool sched_task_need_requeue(struct task_struct *p, struct rq *rq)
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
+{
-+ struct skiplist_node *node;
-+
-+ node = p->sl_node.prev[0];
-+ if (node != &rq->sl_header &&
-+ skiplist_entry(node, struct task_struct, sl_node)->priodl > p->priodl)
-+ return true;
-+
-+ node = p->sl_node.next[0];
-+ if (node != &rq->sl_header &&
-+ skiplist_entry(node, struct task_struct, sl_node)->priodl < p->priodl)
-+ return true;
-+
-+ return false;
++ p->time_slice = sched_timeslice_ns;
++ sched_renew_deadline(p, rq);
++ if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
++ requeue_task(p, rq);
+}
+
-+/*
-+ * pds_skiplist_random_level -- Returns a pseudo-random level number for skip
-+ * list node which is used in PDS run queue.
-+ *
-+ * __ffs() is used to satisfy p = 0.5 between each levels, and there should be
-+ * platform instruction(known as ctz/clz) for acceleration.
-+ *
-+ * The skiplist level for a task is populated when task is created and doesn't
-+ * change in task's life time. When task is being inserted into run queue, this
-+ * skiplist level is set to task's sl_node->level, the skiplist insert function
-+ * may change it based on current level of the skip lsit.
-+ */
-+static inline int pds_skiplist_random_level(const struct task_struct *p)
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
+{
-+ /*
-+ * 1. Some architectures don't have better than microsecond resolution
-+ * so mask out ~microseconds as a factor of the random seed for skiplist
-+ * insertion.
-+ * 2. Use address of task structure pointer as another factor of the
-+ * random seed for task burst forking scenario.
-+ */
-+ unsigned long randseed = (task_rq(p)->clock ^ (unsigned long)p) >> 10;
-+
-+ randseed &= __GENMASK(NUM_SKIPLIST_LEVEL - 1, 0);
-+ if (randseed)
-+ return __ffs(randseed);
-+
-+ return (NUM_SKIPLIST_LEVEL - 1);
++ u64 max_dl = rq->time_edge + NICE_WIDTH - 1;
++ if (unlikely(p->deadline > max_dl))
++ p->deadline = max_dl;
+}
+
+static void sched_task_fork(struct task_struct *p, struct rq *rq)
+{
-+ p->sl_level = pds_skiplist_random_level(p);
-+ if (p->prio >= MAX_RT_PRIO)
-+ p->deadline = rq->clock +
-+ user_prio2deadline[p->static_prio - MAX_RT_PRIO];
-+ update_task_priodl(p);
-+}
-+
-+/**
-+ * task_prio - return the priority value of a given task.
-+ * @p: the task in question.
-+ *
-+ * Return: The priority value as seen by users in /proc.
-+ *
-+ * sched policy return value kernel prio user prio/nice
-+ *
-+ * normal, batch, idle [0 ... 39] 100 0/[-20 ... 19]
-+ * fifo, rr [-1 ... -100] [99 ... 0] [0 ... 99]
-+ */
-+int task_prio(const struct task_struct *p)
-+{
-+ int ret;
-+
-+ if (p->prio < MAX_RT_PRIO)
-+ return (p->prio - MAX_RT_PRIO);
-+
-+ preempt_disable();
-+ ret = task_sched_prio(p, this_rq()) - MAX_RT_PRIO;
-+ preempt_enable();
-+
-+ return ret;
++ sched_renew_deadline(p, rq);
+}
+
+static void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
@@ -9429,7 +9177,7 @@ index 000000000000..335ce3a8e3ec
+#endif
+static void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
-index 2c613e1cff3a..0103b2a7201d 100644
+index a554e3bbab2b..3e56f5e6ff5c 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -270,6 +270,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
@@ -9452,7 +9200,7 @@ index 2c613e1cff3a..0103b2a7201d 100644
* thermal:
*
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
-index 795e43e02afc..856163dac896 100644
+index cfe94ffd2b38..8a33dc6124aa 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -1,13 +1,15 @@
@@ -9480,7 +9228,7 @@ index 795e43e02afc..856163dac896 100644
static inline void cfs_se_util_change(struct sched_avg *avg)
{
unsigned int enqueued;
-@@ -162,9 +165,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+@@ -153,9 +156,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
return rq_clock_pelt(rq_of(cfs_rq));
}
#endif
@@ -9492,7 +9240,7 @@ index 795e43e02afc..856163dac896 100644
static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
-@@ -182,6 +187,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+@@ -173,6 +178,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
{
return 0;
}
@@ -9501,7 +9249,7 @@ index 795e43e02afc..856163dac896 100644
static inline int
update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index 10a1522b1e30..1a74a266340b 100644
+index a189bec13729..02e4234cbc1f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2,6 +2,10 @@
@@ -9515,18 +9263,17 @@ index 10a1522b1e30..1a74a266340b 100644
#include <linux/sched.h>
#include <linux/sched/autogroup.h>
-@@ -2720,3 +2724,9 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
+@@ -2749,3 +2753,8 @@ extern int sched_dynamic_mode(const char *str);
+ extern void sched_dynamic_update(int mode);
+ #endif
- void swake_up_all_locked(struct swait_queue_head *q);
- void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
-+
+static inline int task_running_nice(struct task_struct *p)
+{
+ return (task_nice(p) > 0);
+}
+#endif /* !CONFIG_SCHED_ALT */
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
-index 750fb3c67eed..108422ebc7bf 100644
+index 3f93fc3b5648..528b71e144e9 100644
--- a/kernel/sched/stats.c
+++ b/kernel/sched/stats.c
@@ -22,8 +22,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
@@ -9557,7 +9304,7 @@ index 750fb3c67eed..108422ebc7bf 100644
}
return 0;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
-index 09d35044bd88..2c146b042b51 100644
+index 55a0a243e871..fda2e8fe6ffe 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -4,6 +4,7 @@
@@ -9568,7 +9315,7 @@ index 09d35044bd88..2c146b042b51 100644
DEFINE_MUTEX(sched_domains_mutex);
/* Protected by sched_domains_mutex: */
-@@ -1241,8 +1242,10 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
+@@ -1272,8 +1273,10 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
*/
static int default_relax_domain_level = -1;
@@ -9579,7 +9326,7 @@ index 09d35044bd88..2c146b042b51 100644
static int __init setup_relax_domain_level(char *str)
{
if (kstrtoint(str, 0, &default_relax_domain_level))
-@@ -1472,6 +1475,7 @@ sd_init(struct sched_domain_topology_level *tl,
+@@ -1503,6 +1506,7 @@ sd_init(struct sched_domain_topology_level *tl,
return sd;
}
@@ -9587,7 +9334,7 @@ index 09d35044bd88..2c146b042b51 100644
/*
* Topology list, bottom-up.
-@@ -1501,6 +1505,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
+@@ -1532,6 +1536,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
sched_domain_topology = tl;
}
@@ -9595,7 +9342,7 @@ index 09d35044bd88..2c146b042b51 100644
#ifdef CONFIG_NUMA
static const struct cpumask *sd_numa_mask(int cpu)
-@@ -2371,3 +2376,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+@@ -2398,3 +2403,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
mutex_unlock(&sched_domains_mutex);
}
@@ -9614,7 +9361,7 @@ index 09d35044bd88..2c146b042b51 100644
+#endif /* CONFIG_NUMA */
+#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 62fbd09b5dc1..19f9a6185db3 100644
+index d4a78e08f6d8..403bd33e5880 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -120,6 +120,10 @@ static unsigned long long_max = LONG_MAX;
@@ -9628,16 +9375,7 @@ index 62fbd09b5dc1..19f9a6185db3 100644
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
-@@ -184,7 +188,7 @@ static enum sysctl_writes_mode sysctl_writes_strict = SYSCTL_WRITES_STRICT;
- int sysctl_legacy_va_layout;
- #endif
-
--#ifdef CONFIG_SCHED_DEBUG
-+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_ALT)
- static int min_sched_granularity_ns = 100000; /* 100 usecs */
- static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
- static int min_wakeup_granularity_ns; /* 0 usecs */
-@@ -1652,6 +1656,24 @@ int proc_do_static_key(struct ctl_table *table, int write,
+@@ -1729,6 +1733,24 @@ int proc_do_static_key(struct ctl_table *table, int write,
}
static struct ctl_table kern_table[] = {
@@ -9662,7 +9400,7 @@ index 62fbd09b5dc1..19f9a6185db3 100644
{
.procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first,
-@@ -1854,6 +1876,7 @@ static struct ctl_table kern_table[] = {
+@@ -1848,6 +1870,7 @@ static struct ctl_table kern_table[] = {
.extra2 = SYSCTL_ONE,
},
#endif
@@ -9670,7 +9408,7 @@ index 62fbd09b5dc1..19f9a6185db3 100644
#ifdef CONFIG_PROVE_LOCKING
{
.procname = "prove_locking",
-@@ -2430,6 +2453,17 @@ static struct ctl_table kern_table[] = {
+@@ -2424,6 +2447,17 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
#endif
@@ -9689,7 +9427,7 @@ index 62fbd09b5dc1..19f9a6185db3 100644
{
.procname = "spin_retry",
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index 5c9d968187ae..fe47db46303c 100644
+index 4a66725b1d4a..cb80ed5c1f5c 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1940,8 +1940,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
@@ -9704,7 +9442,7 @@ index 5c9d968187ae..fe47db46303c 100644
hrtimer_init_sleeper_on_stack(&t, clockid, mode);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
-index 9abe15255bc4..691db8192ddb 100644
+index 3bb96a8b49c9..11509fcf1d8a 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -216,7 +216,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
@@ -9764,7 +9502,7 @@ index 9abe15255bc4..691db8192ddb 100644
return false;
}
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
-index 73ef12092250..24bf8ef1249a 100644
+index adf7ef194005..11c8f36e281b 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1052,10 +1052,15 @@ static int trace_wakeup_test_thread(void *data)
diff --git a/PKGBUILD b/PKGBUILD
index 71a294a5f16a..84c530920177 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -60,7 +60,7 @@ _subarch=
_localmodcfg=
pkgbase=linux-pds
-pkgver=5.12.15.arch1
+pkgver=5.13.1.arch1
pkgrel=1
pkgdesc="Linux"
_srcver_tag=v${pkgver%.*}-${pkgver##*.}
@@ -98,7 +98,7 @@ source=(
"${_reponame}::git+${_repo_url}?signed#tag=$_srcver_tag"
"git+${_repo_url_kernel_patch}"
config # kernel config file
- 0009-prjc_v5.12-r1.patch
+ 0009-prjc_v5.13-r1.patch
0005-glitched-pds.patch
)
validpgpkeys=(
@@ -108,8 +108,8 @@ validpgpkeys=(
)
sha512sums=('SKIP'
'SKIP'
- '3753e46e43c574d921418f738f7359d792629f02711c449ec485951419252bc1c27d4594a4dbca674e16e4b31763b65e3b595b4424525a9270b4f8d6dda6cbd0'
- 'be1c86baa2dd5f10314817100d908763ef23d6e1bcf9869a79ecd3250fefe0f3c662d72a3b9237e3e965c72042c791570957c12257d3031ba8a439cb1b22561f'
+ '12323ce737071f6ebc37a5c6d6cca90ae690803b58685d6091d5f2de6781d51a1f2ae1d84443e8bb18130484ef1182ceb5b982b3e7842d0c097e76723ecc7ed9'
+ 'ad9276a80e28eec461a307ad44a1ed5acebf810b14ce8c9e6f1dc211be6ed7e72f535175fb65f3115fa217f8b635122c65c2c002ff00ba458c867d8bb6257f36'
'889f0a49f326de3f119290256393b09a9e9241c2a297ca0b7967a2884e4e35d71388d2a559e4c206f55f67228b65e8f2013a1ec61f6ff8f1de3b6a725fd5fa57')
export KBUILD_BUILD_HOST=archlinux
@@ -126,7 +126,7 @@ prepare() {
PatchesArray=(
$_reponame_kernel_patch/$_kernel_patch_name
- 0009-prjc_v5.12-r1.patch
+ 0009-prjc_v5.13-r1.patch
0005-glitched-pds.patch
)
diff --git a/config b/config
index 8ad3c3f49360..152c809587c6 100644
--- a/config
+++ b/config
@@ -1,11 +1,13 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 5.12.15-arch1 Kernel Configuration
+# Linux/x86 5.13.1-arch1 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="gcc (GCC) 11.1.0"
CONFIG_CC_IS_GCC=y
CONFIG_GCC_VERSION=110100
CONFIG_CLANG_VERSION=0
+CONFIG_AS_IS_GNU=y
+CONFIG_AS_VERSION=23601
CONFIG_LD_IS_BFD=y
CONFIG_LD_VERSION=23601
CONFIG_LLD_VERSION=0
@@ -101,6 +103,24 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
# end of Timers subsystem
+CONFIG_BPF=y
+CONFIG_HAVE_EBPF_JIT=y
+CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
+
+#
+# BPF subsystem
+#
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_JIT_DEFAULT_ON=y
+# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set
+CONFIG_USERMODE_DRIVER=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_BPF_PRELOAD_UMD=m
+CONFIG_BPF_LSM=y
+# end of BPF subsystem
+
# CONFIG_PREEMPT_NONE is not set
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
@@ -190,6 +210,7 @@ CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_MISC=y
# CONFIG_CGROUP_DEBUG is not set
CONFIG_SOCK_CGROUP_DATA=y
CONFIG_NAMESPACES=y
@@ -220,7 +241,6 @@ CONFIG_SYSCTL=y
CONFIG_HAVE_UID16=y
CONFIG_SYSCTL_EXCEPTION_TRACE=y
CONFIG_HAVE_PCSPKR_PLATFORM=y
-CONFIG_BPF=y
# CONFIG_EXPERT is not set
CONFIG_UID16=y
CONFIG_MULTIUSER=y
@@ -245,19 +265,12 @@ CONFIG_AIO=y
CONFIG_IO_URING=y
CONFIG_ADVISE_SYSCALLS=y
CONFIG_HAVE_ARCH_USERFAULTFD_WP=y
+CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y
CONFIG_MEMBARRIER=y
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y
CONFIG_KALLSYMS_BASE_RELATIVE=y
-CONFIG_BPF_LSM=y
-CONFIG_BPF_SYSCALL=y
-CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
-CONFIG_BPF_JIT_ALWAYS_ON=y
-CONFIG_BPF_JIT_DEFAULT_ON=y
-CONFIG_USERMODE_DRIVER=y
-CONFIG_BPF_PRELOAD=y
-CONFIG_BPF_PRELOAD_UMD=m
CONFIG_USERFAULTFD=y
CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y
CONFIG_KCMP=y
@@ -305,7 +318,6 @@ CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_ARCH_HAS_CPU_RELAX=y
-CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
CONFIG_ARCH_HAS_FILTER_PGPROT=y
CONFIG_HAVE_SETUP_PER_CPU_AREA=y
CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
@@ -446,6 +458,7 @@ CONFIG_X86_PMEM_LEGACY_DEVICE=y
CONFIG_X86_PMEM_LEGACY=m
CONFIG_X86_CHECK_BIOS_CORRUPTION=y
CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
+CONFIG_X86_RESERVE_LOW=64
CONFIG_MTRR=y
CONFIG_MTRR_SANITIZER=y
CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
@@ -499,12 +512,8 @@ CONFIG_HAVE_LIVEPATCH=y
# end of Processor type and features
CONFIG_ARCH_HAS_ADD_PAGES=y
-CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
-CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y
CONFIG_USE_PERCPU_NUMA_NODE_ID=y
-CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
-CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
-CONFIG_ARCH_ENABLE_THP_MIGRATION=y
#
# Power management and ACPI options
@@ -748,6 +757,7 @@ CONFIG_KVM_XFER_TO_GUEST_WORK=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_KVM_INTEL=m
+CONFIG_X86_SGX_KVM=y
CONFIG_KVM_AMD=m
CONFIG_KVM_AMD_SEV=y
CONFIG_KVM_XEN=y
@@ -854,6 +864,8 @@ CONFIG_COMPAT_OLD_SIGACTION=y
CONFIG_COMPAT_32BIT_TIME=y
CONFIG_HAVE_ARCH_VMAP_STACK=y
CONFIG_VMAP_STACK=y
+CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y
+# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set
CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
CONFIG_STRICT_KERNEL_RWX=y
CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
@@ -900,10 +912,12 @@ CONFIG_MODULE_SIG_ALL=y
# CONFIG_MODULE_SIG_SHA384 is not set
CONFIG_MODULE_SIG_SHA512=y
CONFIG_MODULE_SIG_HASH="sha512"
-CONFIG_MODULE_COMPRESS=y
+# CONFIG_MODULE_COMPRESS_NONE is not set
# CONFIG_MODULE_COMPRESS_GZIP is not set
-CONFIG_MODULE_COMPRESS_XZ=y
+# CONFIG_MODULE_COMPRESS_XZ is not set
+CONFIG_MODULE_COMPRESS_ZSTD=y
CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS=y
+CONFIG_MODPROBE_PATH="/sbin/modprobe"
CONFIG_MODULES_TREE_LOOKUP=y
CONFIG_BLOCK=y
CONFIG_BLK_RQ_ALLOC_TIME=y
@@ -1012,19 +1026,24 @@ CONFIG_HAVE_FAST_GUP=y
CONFIG_NUMA_KEEP_MEMINFO=y
CONFIG_MEMORY_ISOLATION=y
CONFIG_HAVE_BOOTMEM_INFO_NODE=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTPLUG_SPARSE=y
CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_MHP_MEMMAP_ON_MEMORY=y
CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
CONFIG_MEMORY_BALLOON=y
CONFIG_BALLOON_COMPACTION=y
CONFIG_COMPACTION=y
CONFIG_PAGE_REPORTING=y
CONFIG_MIGRATION=y
+CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
+CONFIG_ARCH_ENABLE_THP_MIGRATION=y
CONFIG_CONTIG_ALLOC=y
CONFIG_PHYS_ADDR_T_64BIT=y
-CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
CONFIG_MMU_NOTIFIER=y
CONFIG_KSM=y
@@ -1042,6 +1061,7 @@ CONFIG_FRONTSWAP=y
CONFIG_CMA=y
# CONFIG_CMA_DEBUG is not set
CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
@@ -1065,6 +1085,7 @@ CONFIG_ZSMALLOC=y
CONFIG_GENERIC_EARLY_IOREMAP=y
# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set
CONFIG_IDLE_PAGE_TRACKING=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
CONFIG_ARCH_HAS_PTE_DEVMAP=y
CONFIG_ZONE_DEVICE=y
CONFIG_DEV_PAGEMAP_OPS=y
@@ -1227,8 +1248,7 @@ CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NETFILTER_NETLINK_LOG=m
CONFIG_NETFILTER_NETLINK_OSF=m
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_COMMON=m
-CONFIG_NF_LOG_NETDEV=m
+CONFIG_NF_LOG_SYSLOG=m
CONFIG_NETFILTER_CONNCOUNT=m
CONFIG_NF_CONNTRACK_MARK=y
CONFIG_NF_CONNTRACK_SECMARK=y
@@ -1302,6 +1322,7 @@ CONFIG_NFT_REJECT_NETDEV=m
CONFIG_NF_FLOW_TABLE_INET=m
CONFIG_NF_FLOW_TABLE=m
CONFIG_NETFILTER_XTABLES=m
+CONFIG_NETFILTER_XTABLES_COMPAT=y
#
# Xtables combined modules
@@ -1542,7 +1563,6 @@ CONFIG_NF_DEFRAG_IPV6=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -1604,12 +1624,12 @@ CONFIG_BRIDGE_IGMP_SNOOPING=y
CONFIG_BRIDGE_VLAN_FILTERING=y
CONFIG_BRIDGE_MRP=y
CONFIG_BRIDGE_CFM=y
-CONFIG_HAVE_NET_DSA=y
CONFIG_NET_DSA=m
CONFIG_NET_DSA_TAG_8021Q=m
CONFIG_NET_DSA_TAG_AR9331=m
CONFIG_NET_DSA_TAG_BRCM_COMMON=m
CONFIG_NET_DSA_TAG_BRCM=m
+CONFIG_NET_DSA_TAG_BRCM_LEGACY=m
CONFIG_NET_DSA_TAG_BRCM_PREPEND=m
CONFIG_NET_DSA_TAG_HELLCREEK=m
CONFIG_NET_DSA_TAG_GSWIP=m
@@ -1797,6 +1817,7 @@ CONFIG_QRTR_TUN=m
CONFIG_QRTR_MHI=m
CONFIG_NET_NCSI=y
CONFIG_NCSI_OEM_CMD_GET_MAC=y
+CONFIG_PCPU_DEV_REFCNT=y
CONFIG_RPS=y
CONFIG_RFS_ACCEL=y
CONFIG_SOCK_RX_QUEUE_MAPPING=y
@@ -1805,7 +1826,6 @@ CONFIG_CGROUP_NET_PRIO=y
CONFIG_CGROUP_NET_CLASSID=y
CONFIG_NET_RX_BUSY_POLL=y
CONFIG_BQL=y
-CONFIG_BPF_JIT=y
CONFIG_BPF_STREAM_PARSER=y
CONFIG_NET_FLOW_LIMIT=y
@@ -1897,6 +1917,7 @@ CONFIG_CAN_MCP251XFD=m
CONFIG_CAN_8DEV_USB=m
CONFIG_CAN_EMS_USB=m
CONFIG_CAN_ESD_USB2=m
+CONFIG_CAN_ETAS_ES58X=m
CONFIG_CAN_GS_USB=m
CONFIG_CAN_KVASER_USB=m
CONFIG_CAN_MCBA_USB=m
@@ -1921,6 +1942,7 @@ CONFIG_BT_LE=y
CONFIG_BT_6LOWPAN=m
CONFIG_BT_LEDS=y
CONFIG_BT_MSFTEXT=y
+CONFIG_BT_AOSPEXT=y
CONFIG_BT_DEBUGFS=y
# CONFIG_BT_SELFTEST is not set
@@ -1964,6 +1986,7 @@ CONFIG_BT_ATH3K=m
CONFIG_BT_MTKSDIO=m
CONFIG_BT_MTKUART=m
CONFIG_BT_HCIRSI=m
+CONFIG_BT_VIRTIO=m
# end of Bluetooth device drivers
CONFIG_AF_RXRPC=m
@@ -2072,12 +2095,12 @@ CONFIG_LWTUNNEL_BPF=y
CONFIG_DST_CACHE=y
CONFIG_GRO_CELLS=y
CONFIG_SOCK_VALIDATE_XMIT=y
+CONFIG_NET_SELFTESTS=m
CONFIG_NET_SOCK_MSG=y
CONFIG_NET_DEVLINK=y
CONFIG_PAGE_POOL=y
CONFIG_FAILOVER=m
CONFIG_ETHTOOL_NETLINK=y
-CONFIG_HAVE_EBPF_JIT=y
#
# Device Drivers
@@ -2226,6 +2249,7 @@ CONFIG_REGMAP_W1=m
CONFIG_REGMAP_MMIO=y
CONFIG_REGMAP_IRQ=y
CONFIG_REGMAP_SOUNDWIRE=m
+CONFIG_REGMAP_SOUNDWIRE_MBQ=m
CONFIG_REGMAP_SCCB=m
CONFIG_REGMAP_SPI_AVMM=m
CONFIG_DMA_SHARED_BUFFER=y
@@ -2407,7 +2431,6 @@ CONFIG_ZRAM_DEF_COMP_LZORLE=y
CONFIG_ZRAM_DEF_COMP="lzo-rle"
CONFIG_ZRAM_WRITEBACK=y
# CONFIG_ZRAM_MEMORY_TRACKING is not set
-CONFIG_BLK_DEV_UMEM=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_LOOP_MIN_COUNT=0
CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -2477,10 +2500,10 @@ CONFIG_DS1682=m
CONFIG_VMWARE_BALLOON=m
CONFIG_LATTICE_ECP3_CONFIG=m
# CONFIG_SRAM is not set
+CONFIG_DW_XDATA_PCIE=m
CONFIG_PCI_ENDPOINT_TEST=m
CONFIG_XILINX_SDFEC=m
CONFIG_MISC_RTSX=m
-CONFIG_PVPANIC=m
CONFIG_C2PORT=m
CONFIG_C2PORT_DURAMAR_2150=m
@@ -2524,6 +2547,9 @@ CONFIG_MISC_RTSX_PCI=m
CONFIG_MISC_RTSX_USB=m
CONFIG_HABANA_AI=m
CONFIG_UACCE=m
+CONFIG_PVPANIC=y
+CONFIG_PVPANIC_MMIO=m
+CONFIG_PVPANIC_PCI=m
# end of Misc devices
CONFIG_HAVE_IDE=y
@@ -2960,6 +2986,7 @@ CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C=m
CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI=m
CONFIG_NET_DSA_MICROCHIP_KSZ8795=m
CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI=m
+CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI=m
CONFIG_NET_DSA_MV88E6XXX=m
CONFIG_NET_DSA_MV88E6XXX_PTP=y
CONFIG_NET_DSA_MSCC_SEVILLE=m
@@ -3122,6 +3149,8 @@ CONFIG_I40EVF=m
CONFIG_ICE=m
CONFIG_FM10K=m
CONFIG_IGC=m
+CONFIG_NET_VENDOR_MICROSOFT=y
+CONFIG_MICROSOFT_MANA=m
CONFIG_JME=m
CONFIG_NET_VENDOR_MARVELL=y
CONFIG_MVMDIO=m
@@ -3148,6 +3177,7 @@ CONFIG_MLX5_MPFS=y
CONFIG_MLX5_ESWITCH=y
CONFIG_MLX5_CLS_ACT=y
CONFIG_MLX5_TC_CT=y
+CONFIG_MLX5_TC_SAMPLE=y
CONFIG_MLX5_CORE_EN_DCB=y
CONFIG_MLX5_CORE_IPOIB=y
CONFIG_MLX5_FPGA_IPSEC=y
@@ -3306,7 +3336,6 @@ CONFIG_NET_VENDOR_XIRCOM=y
CONFIG_PCMCIA_XIRC2PS=m
CONFIG_FDDI=m
CONFIG_DEFXX=m
-CONFIG_DEFXX_MMIO=y
CONFIG_SKFP=m
# CONFIG_HIPPI is not set
CONFIG_NET_SB1000=m
@@ -3339,11 +3368,13 @@ CONFIG_INTEL_XWAY_PHY=m
CONFIG_LSI_ET1011C_PHY=m
CONFIG_MARVELL_PHY=m
CONFIG_MARVELL_10G_PHY=m
+CONFIG_MARVELL_88X2222_PHY=m
CONFIG_MICREL_PHY=m
CONFIG_MICROCHIP_PHY=m
CONFIG_MICROCHIP_T1_PHY=m
CONFIG_MICROSEMI_PHY=m
CONFIG_NATIONAL_PHY=m
+CONFIG_NXP_C45_TJA11XX_PHY=m
CONFIG_NXP_TJA11XX_PHY=m
CONFIG_AT803X_PHY=m
CONFIG_QSEMI_PHY=m
@@ -3774,6 +3805,9 @@ CONFIG_IEEE802154_CA8210=m
# CONFIG_IEEE802154_CA8210_DEBUGFS is not set
CONFIG_IEEE802154_MCR20A=m
CONFIG_IEEE802154_HWSIM=m
+CONFIG_WWAN=y
+CONFIG_WWAN_CORE=m
+CONFIG_MHI_WWAN_CTRL=m
CONFIG_XEN_NETDEV_FRONTEND=m
CONFIG_XEN_NETDEV_BACKEND=m
CONFIG_VMXNET3=m
@@ -3936,7 +3970,6 @@ CONFIG_TABLET_USB_KBTAB=m
CONFIG_TABLET_USB_PEGASUS=m
CONFIG_TABLET_SERIAL_WACOM4=m
CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_PROPERTIES=y
CONFIG_TOUCHSCREEN_88PM860X=m
CONFIG_TOUCHSCREEN_ADS7846=m
CONFIG_TOUCHSCREEN_AD7877=m
@@ -3968,7 +4001,9 @@ CONFIG_TOUCHSCREEN_EXC3000=m
CONFIG_TOUCHSCREEN_FUJITSU=m
CONFIG_TOUCHSCREEN_GOODIX=m
CONFIG_TOUCHSCREEN_HIDEEP=m
+CONFIG_TOUCHSCREEN_HYCON_HY46XX=m
CONFIG_TOUCHSCREEN_ILI210X=m
+CONFIG_TOUCHSCREEN_ILITEK=m
CONFIG_TOUCHSCREEN_S6SY761=m
CONFIG_TOUCHSCREEN_GUNZE=m
CONFIG_TOUCHSCREEN_EKTF2127=m
@@ -3980,6 +4015,7 @@ CONFIG_TOUCHSCREEN_MAX11801=m
CONFIG_TOUCHSCREEN_MCS5000=m
CONFIG_TOUCHSCREEN_MMS114=m
CONFIG_TOUCHSCREEN_MELFAS_MIP4=m
+CONFIG_TOUCHSCREEN_MSG2638=m
CONFIG_TOUCHSCREEN_MTOUCH=m
CONFIG_TOUCHSCREEN_INEXIO=m
CONFIG_TOUCHSCREEN_MK712=m
@@ -4045,6 +4081,7 @@ CONFIG_INPUT_AD714X=m
CONFIG_INPUT_AD714X_I2C=m
CONFIG_INPUT_AD714X_SPI=m
CONFIG_INPUT_ARIZONA_HAPTICS=m
+CONFIG_INPUT_ATC260X_ONKEY=m
CONFIG_INPUT_BMA150=m
CONFIG_INPUT_E3X0_BUTTON=m
CONFIG_INPUT_PCSPKR=m
@@ -4088,6 +4125,7 @@ CONFIG_INPUT_ADXL34X_I2C=m
CONFIG_INPUT_ADXL34X_SPI=m
CONFIG_INPUT_IMS_PCU=m
CONFIG_INPUT_IQS269A=m
+CONFIG_INPUT_IQS626A=m
CONFIG_INPUT_CMA3000=m
CONFIG_INPUT_CMA3000_I2C=m
CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m
@@ -4213,19 +4251,13 @@ CONFIG_SERIAL_SPRD=m
CONFIG_SERIAL_MCTRL_GPIO=y
CONFIG_SERIAL_NONSTANDARD=y
-CONFIG_ROCKETPORT=m
-CONFIG_CYCLADES=m
-CONFIG_CYZ_INTR=y
CONFIG_MOXA_INTELLIO=m
CONFIG_MOXA_SMARTIO=m
CONFIG_SYNCLINK_GT=m
-CONFIG_ISI=m
CONFIG_N_HDLC=m
CONFIG_N_GSM=m
CONFIG_NOZOMI=m
CONFIG_NULL_TTY=m
-CONFIG_TRACE_ROUTER=m
-CONFIG_TRACE_SINK=m
CONFIG_HVC_DRIVER=y
CONFIG_HVC_IRQ=y
CONFIG_HVC_XEN=y
@@ -4268,7 +4300,6 @@ CONFIG_IPWIRELESS=m
CONFIG_MWAVE=m
CONFIG_DEVMEM=y
-# CONFIG_DEVKMEM is not set
CONFIG_NVRAM=y
CONFIG_RAW_DRIVER=m
CONFIG_MAX_RAW_DEVS=8192
@@ -4385,6 +4416,7 @@ CONFIG_I2C_XILINX=m
#
CONFIG_I2C_DIOLAN_U2C=m
CONFIG_I2C_DLN2=m
+CONFIG_I2C_CP2615=m
CONFIG_I2C_PARPORT=m
CONFIG_I2C_ROBOTFUZZ_OSIF=m
CONFIG_I2C_TAOS_EVM=m
@@ -4417,6 +4449,8 @@ CONFIG_SPI_MEM=y
# SPI Master Controller Drivers
#
CONFIG_SPI_ALTERA=m
+CONFIG_SPI_ALTERA_CORE=m
+CONFIG_SPI_ALTERA_DFL=m
CONFIG_SPI_AXI_SPI_ENGINE=m
CONFIG_SPI_BITBANG=m
CONFIG_SPI_BUTTERFLY=m
@@ -4681,6 +4715,7 @@ CONFIG_W1_SLAVE_DS28E17=m
# end of 1-wire Slaves
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_ATC260X=m
CONFIG_POWER_RESET_MT6323=y
CONFIG_POWER_RESET_RESTART=y
CONFIG_POWER_SUPPLY=y
@@ -4748,11 +4783,14 @@ CONFIG_CHARGER_BQ256XX=m
CONFIG_CHARGER_SMB347=m
CONFIG_CHARGER_TPS65090=m
CONFIG_BATTERY_GAUGE_LTC2941=m
+CONFIG_BATTERY_GOLDFISH=m
CONFIG_BATTERY_RT5033=m
CONFIG_CHARGER_RT9455=m
CONFIG_CHARGER_CROS_USBPD=m
CONFIG_CHARGER_BD99954=m
CONFIG_CHARGER_WILCO=m
+CONFIG_BATTERY_SURFACE=m
+CONFIG_CHARGER_SURFACE=m
CONFIG_HWMON=y
CONFIG_HWMON_VID=m
# CONFIG_HWMON_DEBUG_CHIP is not set
@@ -4786,7 +4824,6 @@ CONFIG_SENSORS_AXI_FAN_CONTROL=m
CONFIG_SENSORS_K8TEMP=m
CONFIG_SENSORS_K10TEMP=m
CONFIG_SENSORS_FAM15H_POWER=m
-CONFIG_SENSORS_AMD_ENERGY=m
CONFIG_SENSORS_APPLESMC=m
CONFIG_SENSORS_ASB100=m
CONFIG_SENSORS_ASPEED=m
@@ -4877,15 +4914,19 @@ CONFIG_SENSORS_NCT6775=m
CONFIG_SENSORS_NCT7802=m
CONFIG_SENSORS_NCT7904=m
CONFIG_SENSORS_NPCM7XX=m
+CONFIG_SENSORS_NZXT_KRAKEN2=m
CONFIG_SENSORS_PCF8591=m
CONFIG_PMBUS=m
CONFIG_SENSORS_PMBUS=m
CONFIG_SENSORS_ADM1266=m
CONFIG_SENSORS_ADM1275=m
CONFIG_SENSORS_BEL_PFE=m
+CONFIG_SENSORS_BPA_RS600=m
+CONFIG_SENSORS_FSP_3Y=m
CONFIG_SENSORS_IBM_CFFPS=m
CONFIG_SENSORS_INSPUR_IPSPS=m
CONFIG_SENSORS_IR35221=m
+CONFIG_SENSORS_IR36021=m
CONFIG_SENSORS_IR38064=m
CONFIG_SENSORS_IRPS5401=m
CONFIG_SENSORS_ISL68137=m
@@ -4893,6 +4934,7 @@ CONFIG_SENSORS_LM25066=m
CONFIG_SENSORS_LTC2978=m
# CONFIG_SENSORS_LTC2978_REGULATOR is not set
CONFIG_SENSORS_LTC3815=m
+CONFIG_SENSORS_MAX15301=m
CONFIG_SENSORS_MAX16064=m
CONFIG_SENSORS_MAX16601=m
CONFIG_SENSORS_MAX20730=m
@@ -4904,6 +4946,7 @@ CONFIG_SENSORS_MP2975=m
CONFIG_SENSORS_PM6764TR=m
CONFIG_SENSORS_PXE1610=m
CONFIG_SENSORS_Q54SJ108A2=m
+CONFIG_SENSORS_STPDDC60=m
CONFIG_SENSORS_TPS40422=m
CONFIG_SENSORS_TPS53679=m
CONFIG_SENSORS_UCD9000=m
@@ -5006,6 +5049,7 @@ CONFIG_PROC_THERMAL_MMIO_RAPL=m
CONFIG_INTEL_BXT_PMIC_THERMAL=m
CONFIG_INTEL_PCH_THERMAL=m
+CONFIG_INTEL_TCC_COOLING=m
# end of Intel thermal drivers
CONFIG_GENERIC_ADC_THERMAL=m
@@ -5204,9 +5248,6 @@ CONFIG_MFD_SI476X_CORE=m
CONFIG_MFD_SM501=m
CONFIG_MFD_SM501_GPIO=y
CONFIG_MFD_SKY81452=m
-CONFIG_ABX500_CORE=y
-CONFIG_AB3100_CORE=y
-CONFIG_AB3100_OTP=y
CONFIG_MFD_SYSCON=y
CONFIG_MFD_TI_AM335X_TSCADC=m
CONFIG_MFD_LP3943=m
@@ -5249,6 +5290,8 @@ CONFIG_MFD_WM8350=y
CONFIG_MFD_WM8350_I2C=y
CONFIG_MFD_WM8994=m
CONFIG_MFD_WCD934X=m
+CONFIG_MFD_ATC260X=m
+CONFIG_MFD_ATC260X_I2C=m
CONFIG_RAVE_SP_CORE=m
CONFIG_MFD_INTEL_M10_BMC=m
# end of Multifunction device drivers
@@ -5267,6 +5310,7 @@ CONFIG_REGULATOR_AAT2870=m
CONFIG_REGULATOR_ARIZONA_LDO1=m
CONFIG_REGULATOR_ARIZONA_MICSUPP=m
CONFIG_REGULATOR_AS3711=m
+CONFIG_REGULATOR_ATC260X=m
CONFIG_REGULATOR_AXP20X=m
CONFIG_REGULATOR_BCM590XX=m
CONFIG_REGULATOR_BD9571MWV=m
@@ -6162,7 +6206,6 @@ CONFIG_DRM_AMDGPU=m
CONFIG_DRM_AMDGPU_SI=y
CONFIG_DRM_AMDGPU_CIK=y
CONFIG_DRM_AMDGPU_USERPTR=y
-# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set
#
# ACP (Audio CoProcessor) Configuration
@@ -6177,6 +6220,7 @@ CONFIG_DRM_AMD_DC=y
CONFIG_DRM_AMD_DC_DCN=y
CONFIG_DRM_AMD_DC_HDCP=y
CONFIG_DRM_AMD_DC_SI=y
+CONFIG_DRM_AMD_SECURE_DISPLAY=y
# end of Display Engine Configuration
CONFIG_HSA_AMD=y
@@ -6195,6 +6239,7 @@ CONFIG_DRM_I915_COMPRESS_ERROR=y
CONFIG_DRM_I915_USERPTR=y
CONFIG_DRM_I915_GVT=y
CONFIG_DRM_I915_GVT_KVMGT=m
+CONFIG_DRM_I915_REQUEST_TIMEOUT=20000
CONFIG_DRM_I915_FENCE_TIMEOUT=10000
CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250
CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500
@@ -6207,7 +6252,6 @@ CONFIG_DRM_VKMS=m
CONFIG_DRM_VMWGFX=m
CONFIG_DRM_VMWGFX_FBCON=y
CONFIG_DRM_GMA500=m
-CONFIG_DRM_GMA600=y
CONFIG_DRM_UDL=m
CONFIG_DRM_AST=m
CONFIG_DRM_MGAG200=m
@@ -6246,6 +6290,7 @@ CONFIG_TINYDRM_ST7735R=m
CONFIG_DRM_XEN=y
CONFIG_DRM_XEN_FRONTEND=m
CONFIG_DRM_VBOXVIDEO=m
+CONFIG_DRM_GUD=m
# CONFIG_DRM_LEGACY is not set
CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
@@ -6420,6 +6465,7 @@ CONFIG_SND_DEBUG=y
# CONFIG_SND_JACK_INJECTION_DEBUG is not set
CONFIG_SND_VMASTER=y
CONFIG_SND_DMA_SGBUF=y
+CONFIG_SND_CTL_LED=m
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_SEQUENCER_OSS=m
@@ -6625,6 +6671,7 @@ CONFIG_SND_DESIGNWARE_PCM=y
# CONFIG_SND_SOC_FSL_ESAI is not set
# CONFIG_SND_SOC_FSL_MICFIL is not set
CONFIG_SND_SOC_FSL_XCVR=m
+CONFIG_SND_SOC_FSL_RPMSG=m
# CONFIG_SND_SOC_IMX_AUDMUX is not set
# end of SoC Audio for Freescale CPUs
@@ -6854,8 +6901,10 @@ CONFIG_SND_SOC_RT286=m
CONFIG_SND_SOC_RT298=m
CONFIG_SND_SOC_RT1011=m
CONFIG_SND_SOC_RT1015=m
+CONFIG_SND_SOC_RT1015P=m
CONFIG_SND_SOC_RT1308=m
CONFIG_SND_SOC_RT1308_SDW=m
+CONFIG_SND_SOC_RT1316_SDW=m
CONFIG_SND_SOC_RT5514=m
CONFIG_SND_SOC_RT5514_SPI=m
CONFIG_SND_SOC_RT5616=m
@@ -6876,8 +6925,10 @@ CONFIG_SND_SOC_RT700=m
CONFIG_SND_SOC_RT700_SDW=m
CONFIG_SND_SOC_RT711=m
CONFIG_SND_SOC_RT711_SDW=m
+CONFIG_SND_SOC_RT711_SDCA_SDW=m
CONFIG_SND_SOC_RT715=m
CONFIG_SND_SOC_RT715_SDW=m
+CONFIG_SND_SOC_RT715_SDCA_SDW=m
CONFIG_SND_SOC_SGTL5000=m
CONFIG_SND_SOC_SI476X=m
CONFIG_SND_SOC_SIGMADSP=m
@@ -6912,6 +6963,8 @@ CONFIG_SND_SOC_TLV320AIC32X4=m
CONFIG_SND_SOC_TLV320AIC32X4_I2C=m
CONFIG_SND_SOC_TLV320AIC32X4_SPI=m
CONFIG_SND_SOC_TLV320AIC3X=m
+CONFIG_SND_SOC_TLV320AIC3X_I2C=m
+CONFIG_SND_SOC_TLV320AIC3X_SPI=m
CONFIG_SND_SOC_TLV320ADCX140=m
CONFIG_SND_SOC_TS3A227E=m
CONFIG_SND_SOC_TSCS42XX=m
@@ -6970,6 +7023,7 @@ CONFIG_SND_X86=y
CONFIG_HDMI_LPE_AUDIO=m
CONFIG_SND_SYNTH_EMUX=m
CONFIG_SND_XEN_FRONTEND=m
+CONFIG_SND_VIRTIO=m
CONFIG_AC97_BUS=m
#
@@ -7012,6 +7066,7 @@ CONFIG_HID_ELAN=m
CONFIG_HID_ELECOM=m
CONFIG_HID_ELO=m
CONFIG_HID_EZKEY=m
+CONFIG_HID_FT260=m
CONFIG_HID_GEMBIRD=m
CONFIG_HID_GFRM=m
CONFIG_HID_GLORIOUS=m
@@ -7069,6 +7124,7 @@ CONFIG_HID_RETRODE=m
CONFIG_HID_ROCCAT=m
CONFIG_HID_SAITEK=m
CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SEMITEK=m
CONFIG_HID_SONY=m
CONFIG_SONY_FF=y
CONFIG_HID_SPEEDLINK=m
@@ -7128,6 +7184,15 @@ CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER=m
#
CONFIG_AMD_SFH_HID=m
# end of AMD SFH HID Support
+
+#
+# Surface System Aggregator Module HID support
+#
+CONFIG_SURFACE_HID=m
+CONFIG_SURFACE_KBD=m
+# end of Surface System Aggregator Module HID support
+
+CONFIG_SURFACE_HID_CORE=m
# end of HID support
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
@@ -7516,8 +7581,8 @@ CONFIG_TYPEC_WCOVE=m
CONFIG_TYPEC_UCSI=m
CONFIG_UCSI_CCG=m
CONFIG_UCSI_ACPI=m
-CONFIG_TYPEC_HD3SS3220=m
CONFIG_TYPEC_TPS6598X=m
+CONFIG_TYPEC_HD3SS3220=m
CONFIG_TYPEC_STUSB160X=m
#
@@ -7678,11 +7743,6 @@ CONFIG_LEDS_TRIGGER_NETDEV=m
CONFIG_LEDS_TRIGGER_PATTERN=m
CONFIG_LEDS_TRIGGER_AUDIO=m
CONFIG_LEDS_TRIGGER_TTY=m
-
-#
-# LED Blink
-#
-CONFIG_LEDS_BLINK=y
CONFIG_ACCESSIBILITY=y
CONFIG_A11Y_BRAILLE_CONSOLE=y
@@ -7906,6 +7966,7 @@ CONFIG_RTC_DRV_MT6397=m
# HID Sensor RTC drivers
#
CONFIG_RTC_DRV_HID_SENSOR_TIME=m
+CONFIG_RTC_DRV_GOLDFISH=m
CONFIG_RTC_DRV_WILCO_EC=m
CONFIG_DMADEVICES=y
# CONFIG_DMADEVICES_DEBUG is not set
@@ -7920,6 +7981,7 @@ CONFIG_ALTERA_MSGDMA=m
CONFIG_INTEL_IDMA64=m
CONFIG_INTEL_IDXD=m
CONFIG_INTEL_IDXD_SVM=y
+CONFIG_INTEL_IDXD_PERFMON=y
CONFIG_INTEL_IOATDMA=m
CONFIG_PLX_DMA=m
CONFIG_QCOM_HIDMA_MGMT=m
@@ -7985,6 +8047,7 @@ CONFIG_UIO_NETX=m
CONFIG_UIO_PRUSS=m
CONFIG_UIO_MF624=m
CONFIG_UIO_HV_GENERIC=m
+CONFIG_UIO_DFL=m
CONFIG_VFIO_IOMMU_TYPE1=m
CONFIG_VFIO_VIRQFD=m
CONFIG_VFIO=m
@@ -8002,6 +8065,7 @@ CONFIG_VBOXGUEST=m
CONFIG_NITRO_ENCLAVES=m
CONFIG_ACRN_HSM=m
CONFIG_VIRTIO=y
+CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS=y
CONFIG_VIRTIO_PCI_LIB=m
CONFIG_VIRTIO_MENU=y
CONFIG_VIRTIO_PCI=m
@@ -8017,9 +8081,11 @@ CONFIG_VIRTIO_DMA_SHARED_BUFFER=m
CONFIG_VDPA=m
CONFIG_VDPA_SIM=m
CONFIG_VDPA_SIM_NET=m
+CONFIG_VDPA_SIM_BLOCK=m
CONFIG_IFCVF=m
CONFIG_MLX5_VDPA=y
CONFIG_MLX5_VDPA_NET=m
+CONFIG_VP_VDPA=m
CONFIG_VHOST_IOTLB=m
CONFIG_VHOST_RING=m
CONFIG_VHOST=m
@@ -8075,9 +8141,9 @@ CONFIG_XEN_UNPOPULATED_ALLOC=y
# end of Xen driver support
# CONFIG_GREYBUS is not set
+# CONFIG_COMEDI is not set
CONFIG_STAGING=y
CONFIG_PRISM2_USB=m
-# CONFIG_COMEDI is not set
CONFIG_RTL8192U=m
CONFIG_RTLLIB=m
CONFIG_RTLLIB_CRYPTO_CCMP=m
@@ -8121,7 +8187,6 @@ CONFIG_ADT7316_I2C=m
#
# Capacitance to digital converters
#
-CONFIG_AD7150=m
CONFIG_AD7746=m
# end of Capacitance to digital converters
@@ -8186,19 +8251,10 @@ CONFIG_UNISYS_VISORHBA=m
# CONFIG_FB_TFT is not set
CONFIG_MOST_COMPONENTS=m
CONFIG_MOST_NET=m
-CONFIG_MOST_SOUND=m
CONFIG_MOST_VIDEO=m
CONFIG_MOST_I2C=m
CONFIG_KS7010=m
CONFIG_PI433=m
-
-#
-# Gasket devices
-#
-CONFIG_STAGING_GASKET_FRAMEWORK=m
-CONFIG_STAGING_APEX_DRIVER=m
-# end of Gasket devices
-
CONFIG_FIELDBUS_DEV=m
CONFIG_KPC2000=y
CONFIG_KPC2000_CORE=m
@@ -8206,11 +8262,6 @@ CONFIG_KPC2000_SPI=m
CONFIG_KPC2000_I2C=m
CONFIG_KPC2000_DMA=m
CONFIG_QLGE=m
-CONFIG_WIMAX=m
-CONFIG_WIMAX_DEBUG_LEVEL=8
-CONFIG_WIMAX_I2400M=m
-CONFIG_WIMAX_I2400M_USB=m
-CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
CONFIG_WFX=m
CONFIG_X86_PLATFORM_DEVICES=y
CONFIG_ACPI_WMI=m
@@ -8221,10 +8272,12 @@ CONFIG_INTEL_WMI_THUNDERBOLT=m
CONFIG_MXM_WMI=m
CONFIG_PEAQ_WMI=m
CONFIG_XIAOMI_WMI=m
+CONFIG_GIGABYTE_WMI=m
CONFIG_ACERHDF=m
CONFIG_ACER_WIRELESS=m
CONFIG_ACER_WMI=m
CONFIG_AMD_PMC=m
+CONFIG_ADV_SWBUTTON=m
CONFIG_APPLE_GMUX=m
CONFIG_ASUS_LAPTOP=m
CONFIG_ASUS_WIRELESS=m
@@ -8350,8 +8403,11 @@ CONFIG_SURFACE_3_BUTTON=m
CONFIG_SURFACE_3_POWER_OPREGION=m
CONFIG_SURFACE_ACPI_NOTIFY=m
CONFIG_SURFACE_AGGREGATOR_CDEV=m
+CONFIG_SURFACE_AGGREGATOR_REGISTRY=m
+CONFIG_SURFACE_DTX=m
CONFIG_SURFACE_GPE=m
CONFIG_SURFACE_HOTPLUG=m
+CONFIG_SURFACE_PLATFORM_PROFILE=m
CONFIG_SURFACE_PRO3_BUTTON=m
CONFIG_SURFACE_AGGREGATOR=m
CONFIG_SURFACE_AGGREGATOR_BUS=y
@@ -8502,7 +8558,6 @@ CONFIG_EXTCON=y
# Extcon Device Drivers
#
CONFIG_EXTCON_ADC_JACK=m
-CONFIG_EXTCON_ARIZONA=m
CONFIG_EXTCON_AXP288=m
CONFIG_EXTCON_FSA9480=m
CONFIG_EXTCON_GPIO=m
@@ -8553,6 +8608,8 @@ CONFIG_BMA400_SPI=m
CONFIG_BMC150_ACCEL=m
CONFIG_BMC150_ACCEL_I2C=m
CONFIG_BMC150_ACCEL_SPI=m
+CONFIG_BMI088_ACCEL=m
+CONFIG_BMI088_ACCEL_SPI=m
CONFIG_DA280=m
CONFIG_DA311=m
CONFIG_DMARD09=m
@@ -8643,6 +8700,7 @@ CONFIG_TI_ADC128S052=m
CONFIG_TI_ADC161S626=m
CONFIG_TI_ADS1015=m
CONFIG_TI_ADS7950=m
+CONFIG_TI_ADS131E08=m
CONFIG_TI_AM335X_ADC=m
CONFIG_TI_TLC4541=m
CONFIG_TWL4030_MADC=m
@@ -8664,6 +8722,12 @@ CONFIG_HMC425=m
# end of Amplifiers
#
+# Capacitance to digital converters
+#
+CONFIG_AD7150=m
+# end of Capacitance to digital converters
+
+#
# Chemical Sensors
#
CONFIG_ATLAS_PH_SENSOR=m
@@ -8696,6 +8760,11 @@ CONFIG_HID_SENSOR_IIO_TRIGGER=m
CONFIG_IIO_MS_SENSORS_I2C=m
#
+# IIO SCMI Sensors
+#
+# end of IIO SCMI Sensors
+
+#
# SSP Sensor Common
#
CONFIG_IIO_SSP_SENSORS_COMMONS=m
@@ -9020,6 +9089,7 @@ CONFIG_AS3935=m
#
# Proximity and distance sensors
#
+CONFIG_CROS_EC_MKBP_PROXIMITY=m
CONFIG_ISL29501=m
CONFIG_LIDAR_LITE_V2=m
CONFIG_MB1232=m
@@ -9227,9 +9297,11 @@ CONFIG_SLIMBUS=m
CONFIG_SLIM_QCOM_CTRL=m
CONFIG_INTERCONNECT=y
CONFIG_COUNTER=m
+CONFIG_INTERRUPT_CNT=m
CONFIG_MOST=m
CONFIG_MOST_USB_HDM=m
CONFIG_MOST_CDEV=m
+CONFIG_MOST_SND=m
# end of Device Drivers
#
@@ -9340,6 +9412,8 @@ CONFIG_OVERLAY_FS_METACOPY=y
#
# Caches
#
+CONFIG_NETFS_SUPPORT=m
+CONFIG_NETFS_STATS=y
CONFIG_FSCACHE=m
CONFIG_FSCACHE_STATS=y
# CONFIG_FSCACHE_HISTOGRAM is not set
@@ -9494,7 +9568,6 @@ CONFIG_EROFS_FS_XATTR=y
CONFIG_EROFS_FS_POSIX_ACL=y
CONFIG_EROFS_FS_SECURITY=y
CONFIG_EROFS_FS_ZIP=y
-CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT=2
CONFIG_VBOXSF_FS=m
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=m
@@ -9533,7 +9606,7 @@ CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y
CONFIG_NFS_ACL_SUPPORT=m
CONFIG_NFS_COMMON=y
-CONFIG_NFS_V4_2_SSC_HELPER=m
+CONFIG_NFS_V4_2_SSC_HELPER=y
CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m
CONFIG_SUNRPC_BACKCHANNEL=y
@@ -9684,6 +9757,7 @@ CONFIG_SECURITY_LOCKDOWN_LSM=y
CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y
# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set
# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set
+CONFIG_SECURITY_LANDLOCK=y
# CONFIG_INTEGRITY is not set
# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set
# CONFIG_DEFAULT_SECURITY_SELINUX is not set
@@ -9691,7 +9765,7 @@ CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y
# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
# CONFIG_DEFAULT_SECURITY_APPARMOR is not set
CONFIG_DEFAULT_SECURITY_DAC=y
-CONFIG_LSM="lockdown,yama"
+CONFIG_LSM="lockdown,landlock,yama"
#
# Kernel hardening options
@@ -9761,6 +9835,7 @@ CONFIG_CRYPTO_RSA=y
CONFIG_CRYPTO_DH=y
CONFIG_CRYPTO_ECC=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -10019,7 +10094,7 @@ CONFIG_ZLIB_DEFLATE=y
CONFIG_LZO_COMPRESS=y
CONFIG_LZO_DECOMPRESS=y
CONFIG_LZ4_COMPRESS=y
-CONFIG_LZ4HC_COMPRESS=y
+CONFIG_LZ4HC_COMPRESS=m
CONFIG_LZ4_DECOMPRESS=y
CONFIG_ZSTD_COMPRESS=y
CONFIG_ZSTD_DECOMPRESS=y
@@ -10121,6 +10196,7 @@ CONFIG_OBJAGG=m
# end of Library routines
CONFIG_PLDMFW=y
+CONFIG_ASN1_ENCODER=m
#
# Kernel hacking
@@ -10434,6 +10510,7 @@ CONFIG_RUNTIME_TESTING_MENU=y
# CONFIG_TEST_LIST_SORT is not set
# CONFIG_TEST_MIN_HEAP is not set
# CONFIG_TEST_SORT is not set
+# CONFIG_TEST_DIV64 is not set
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_RBTREE_TEST is not set
@@ -10474,6 +10551,7 @@ CONFIG_ASYNC_RAID6_TEST=m
# CONFIG_TEST_HMM is not set
# CONFIG_TEST_FREE_PAGES is not set
# CONFIG_TEST_FPU is not set
+CONFIG_ARCH_USE_MEMTEST=y
# CONFIG_MEMTEST is not set
# CONFIG_HYPERV_TESTING is not set
# end of Kernel Testing and Coverage