diff options
author | Björn Bidar | 2022-03-25 02:51:40 +0200 |
---|---|---|
committer | Björn Bidar | 2022-06-11 14:12:36 +0300 |
commit | 034adcf2fd3311bba3f58b8575b0be699ab3bd70 (patch) | |
tree | a8934d717407ce664ecf241b2d06d7543d7cce72 /0019-ZEN-mm-Don-t-hog-the-CPU-and-zone-lock-in-rmqueue_bu.patch | |
parent | 05a0260e8dc51ce338d4ba7d1a9ffcd6b73d04b2 (diff) | |
download | aur-034adcf2fd3311bba3f58b8575b0be699ab3bd70.tar.gz |
Update to 5.18.0.pf1
- New upstream release based on 5.18.0
- Update kernel configs from arch and arch32
- Disable projectc patch-set for now in favor of ZEN interactive
patches
- Add more sub-arches
- Remove Silvermont and Ivybridge from optimzed builds from
build_pkgs, add:
- generic-v2
- generic-v3
- generiv-v4
- rocketlake
- alderlake
Diffstat (limited to '0019-ZEN-mm-Don-t-hog-the-CPU-and-zone-lock-in-rmqueue_bu.patch')
-rw-r--r-- | 0019-ZEN-mm-Don-t-hog-the-CPU-and-zone-lock-in-rmqueue_bu.patch | 13 |
1 files changed, 5 insertions, 8 deletions
diff --git a/0019-ZEN-mm-Don-t-hog-the-CPU-and-zone-lock-in-rmqueue_bu.patch b/0019-ZEN-mm-Don-t-hog-the-CPU-and-zone-lock-in-rmqueue_bu.patch index d16660ec6b50..3ca52904c37f 100644 --- a/0019-ZEN-mm-Don-t-hog-the-CPU-and-zone-lock-in-rmqueue_bu.patch +++ b/0019-ZEN-mm-Don-t-hog-the-CPU-and-zone-lock-in-rmqueue_bu.patch @@ -21,10 +21,10 @@ Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index d3ab6e198718..4d930d095ca9 100644 +index 7e43b957aa2dbbdc20586848d51dd03e785a269b..fc402712a004482f5061bad17b27217abd4987aa 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -3018,15 +3018,16 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, +@@ -3021,15 +3021,16 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, } /* @@ -45,7 +45,7 @@ index d3ab6e198718..4d930d095ca9 100644 /* * local_lock_irq held so equivalent to spin_lock_irqsave for -@@ -3039,6 +3040,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, +@@ -3042,6 +3043,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, if (unlikely(page == NULL)) break; @@ -61,10 +61,10 @@ index d3ab6e198718..4d930d095ca9 100644 + spin_lock(&zone->lock); + } + - if (unlikely(check_pcp_refill(page))) + if (unlikely(check_pcp_refill(page, order))) continue; -@@ -3065,7 +3078,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, +@@ -3068,7 +3081,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, * on i. Do not confuse with 'allocated' which is the number of * pages added to the pcp list. */ @@ -73,6 +73,3 @@ index d3ab6e198718..4d930d095ca9 100644 spin_unlock(&zone->lock); return allocated; } --- -2.35.1 - |