blob: b9a3a9cd1f92e96c4f24eb71e5940f86343eaae7 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
|
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Sultan Alsawaf <sultan@kerneltoast.com>
Date: Sat, 24 Oct 2020 22:17:49 -0700
Subject: [PATCH] ZEN: INTERACTIVE: mm: Disable proactive compaction by default
On-demand compaction works fine assuming that you don't have a need to
spam the page allocator nonstop for large order page allocations.
Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
---
init/Kconfig | 1 +
mm/compaction.c | 4 ++++
2 files changed, 5 insertions(+)
diff --git a/init/Kconfig b/init/Kconfig
index b7c7bdbd96c6d417f7ed5f8aee558044a121e351..ecfe83ad567b94696de585082cdea4168fef2db4 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -126,6 +126,7 @@ config ZEN_INTERACTIVE
Background-reclaim hugepages...: no -> yes
MG-LRU minimum cache TTL.......: 0 -> 1000 ms
Compact Unevictable............: 1 -> 0
+ Proactive Compaction...........: 20 -> 0
--- CFS CPU Scheduler ----------------------------------
diff --git a/mm/compaction.c b/mm/compaction.c
index 877b029a87e76aaad1c36669395dd3f359e14f03..8690af0e9e33f4056a6994496ed37aad570210c7 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2739,7 +2739,11 @@ static void compact_nodes(void)
* aggressively the kernel should compact memory in the
* background. It takes values in the range [0, 100].
*/
+#ifdef CONFIG_ZEN_INTERACTIVE
+unsigned int __read_mostly sysctl_compaction_proactiveness;
+#else
unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
+#endif
int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
|