summarylogtreecommitdiffstats
path: root/vmmon.patch
diff options
context:
space:
mode:
authorJean-Marc Lenoir2023-11-09 19:00:09 +0100
committerJean-Marc Lenoir2023-11-09 19:00:09 +0100
commit865444ab4a771a7af14ae4016c92ce8aea0e22bd (patch)
treeaba701417658c3cb0871fba638a13cdfd89db2ce /vmmon.patch
parente87f3d1578f7c0cdef7ad60a5a475d5494ba5e4f (diff)
downloadaur-865444ab4a771a7af14ae4016c92ce8aea0e22bd.tar.gz
Attempt to fix a bug with kernel 6.6.1
Diffstat (limited to 'vmmon.patch')
-rw-r--r--vmmon.patch94
1 files changed, 94 insertions, 0 deletions
diff --git a/vmmon.patch b/vmmon.patch
index f2e32d6d0270..546792b8feeb 100644
--- a/vmmon.patch
+++ b/vmmon.patch
@@ -12,3 +12,97 @@
# Header directory for the running kernel
ifdef LINUXINCLUDE
+From 4c2a103fd2d71f2084f1fe7ceacb816b9832ffa2 Mon Sep 17 00:00:00 2001
+From: Michal Kubecek <mkubecek@suse.cz>
+Date: Sun, 22 Oct 2023 23:24:05 +0200
+Subject: [PATCH] vmmon: use get_user_pages to get page PFN
+
+As a side effect of mainline commit 0d940a9b270b ("mm/pgtable: allow
+pte_offset_map[_lock]() to fail") in 6.5-rc1, __pte_offset_map(), called by
+pte_offset_map(), is no longer exported. WMware developers decided to hack
+around this by replacing pte_offset_map() by pte_offset_kernel() which does
+not seem to be a good idea and apparently may trigger warn checks in RCU
+code on some systems as mentioned in the discussion on issue #223.
+Therefore let's use the same solution as we had for 17.0.2 and older
+versions as it does not show these problems.
+
+Based on an upstream IRC discussion and the hva_to_pfn_*() family of
+functions in KVM code, what PgtblVa2MPNLocked() does seems to be an
+incomplete and partial open coded logic of get_user_pages() and as it is
+only used to get PFN from a virtual address, it can be easily implemented
+using get_user_pages() family.
+
+Without knowledge what exactly are the PFNs used for in VMware, it is hard
+to guess the right flags, these seem to work and have been tested by
+multiple users over last few weeks.
+
+We could likely use get_user_pages() also on older kernels and it might be
+actually cleaner and more reliable as existing open coded implementation
+does not seem to handle some corner cases but without knowledge of VMware
+internals, it will be safer to stick to existing code where possible.
+---
+ vmmon-only/include/pgtbl.h | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+diff --git a/vmmon-only/include/pgtbl.h b/vmmon-only/include/pgtbl.h
+index 3f43c62..7eaa49a 100644
+--- a/vmmon-only/include/pgtbl.h
++++ b/vmmon-only/include/pgtbl.h
+@@ -25,6 +25,7 @@
+ #include "compat_pgtable.h"
+ #include "compat_spinlock.h"
+ #include "compat_page.h"
++#include "compat_version.h"
+
+
+ /*
+@@ -45,6 +46,7 @@
+ *-----------------------------------------------------------------------------
+ */
+
++#if COMPAT_LINUX_VERSION_CHECK_LT(6, 5, 0) // only used by PgtblVa2MPN() below
+ static INLINE MPN
+ PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process
+ VA addr) // IN: Address in the virtual address
+@@ -106,6 +108,7 @@ PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process
+ }
+ return mpn;
+ }
++#endif
+
+
+ /*
+@@ -125,6 +128,8 @@ PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process
+ *-----------------------------------------------------------------------------
+ */
+
++#if COMPAT_LINUX_VERSION_CHECK_LT(6, 5, 0)
++
+ static INLINE MPN
+ PgtblVa2MPN(VA addr) // IN
+ {
+@@ -139,4 +144,24 @@ PgtblVa2MPN(VA addr) // IN
+ return mpn;
+ }
+
++#else /* COMPAT_LINUX_VERSION_CHECK_LT(6, 5, 0) */
++
++static INLINE MPN
++PgtblVa2MPN(VA addr) // IN
++{
++ struct page *page;
++ int npages;
++ MPN mpn;
++
++ npages = get_user_pages_unlocked(addr, 1, &page, FOLL_HWPOISON);
++ if (npages != 1)
++ return INVALID_MPN;
++ mpn = page_to_pfn(page);
++ put_page(page);
++
++ return mpn;
++}
++
++#endif /* COMPAT_LINUX_VERSION_CHECK_LT(6, 5, 0) */
++
+ #endif /* __PGTBL_H__ */