summarylogtreecommitdiffstats
path: root/vmmon.patch
diff options
context:
space:
mode:
Diffstat (limited to 'vmmon.patch')
-rw-r--r--vmmon.patch94
1 files changed, 94 insertions, 0 deletions
diff --git a/vmmon.patch b/vmmon.patch
index bc76665b5773..e83d629b55ed 100644
--- a/vmmon.patch
+++ b/vmmon.patch
@@ -1879,3 +1879,97 @@
break;
}
+From 4c2a103fd2d71f2084f1fe7ceacb816b9832ffa2 Mon Sep 17 00:00:00 2001
+From: Michal Kubecek <mkubecek@suse.cz>
+Date: Sun, 22 Oct 2023 23:24:05 +0200
+Subject: [PATCH] vmmon: use get_user_pages to get page PFN
+
+As a side effect of mainline commit 0d940a9b270b ("mm/pgtable: allow
+pte_offset_map[_lock]() to fail") in 6.5-rc1, __pte_offset_map(), called by
+pte_offset_map(), is no longer exported. WMware developers decided to hack
+around this by replacing pte_offset_map() by pte_offset_kernel() which does
+not seem to be a good idea and apparently may trigger warn checks in RCU
+code on some systems as mentioned in the discussion on issue #223.
+Therefore let's use the same solution as we had for 17.0.2 and older
+versions as it does not show these problems.
+
+Based on an upstream IRC discussion and the hva_to_pfn_*() family of
+functions in KVM code, what PgtblVa2MPNLocked() does seems to be an
+incomplete and partial open coded logic of get_user_pages() and as it is
+only used to get PFN from a virtual address, it can be easily implemented
+using get_user_pages() family.
+
+Without knowledge what exactly are the PFNs used for in VMware, it is hard
+to guess the right flags, these seem to work and have been tested by
+multiple users over last few weeks.
+
+We could likely use get_user_pages() also on older kernels and it might be
+actually cleaner and more reliable as existing open coded implementation
+does not seem to handle some corner cases but without knowledge of VMware
+internals, it will be safer to stick to existing code where possible.
+---
+ vmmon-only/include/pgtbl.h | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+diff --git a/vmmon-only/include/pgtbl.h b/vmmon-only/include/pgtbl.h
+index 3f43c62..7eaa49a 100644
+--- a/vmmon-only/include/pgtbl.h
++++ b/vmmon-only/include/pgtbl.h
+@@ -25,6 +25,7 @@
+ #include "compat_pgtable.h"
+ #include "compat_spinlock.h"
+ #include "compat_page.h"
++#include "compat_version.h"
+
+
+ /*
+@@ -45,6 +46,7 @@
+ *-----------------------------------------------------------------------------
+ */
+
++#if COMPAT_LINUX_VERSION_CHECK_LT(6, 5, 0) // only used by PgtblVa2MPN() below
+ static INLINE MPN64
+ PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process
+ VA addr) // IN: Address in the virtual address
+@@ -110,6 +112,7 @@ PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process
+ }
+ return mpn;
+ }
++#endif
+
+
+ /*
+@@ -129,6 +132,8 @@ PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process
+ *-----------------------------------------------------------------------------
+ */
+
++#if COMPAT_LINUX_VERSION_CHECK_LT(6, 5, 0)
++
+ static INLINE MPN64
+ PgtblVa2MPN(VA addr) // IN
+ {
+@@ -143,4 +148,24 @@ PgtblVa2MPN(VA addr) // IN
+ return mpn;
+ }
+
++#else /* COMPAT_LINUX_VERSION_CHECK_LT(6, 5, 0) */
++
++static INLINE MPN64
++PgtblVa2MPN(VA addr) // IN
++{
++ struct page *page;
++ int npages;
++ MPN mpn;
++
++ npages = get_user_pages_unlocked(addr, 1, &page, FOLL_HWPOISON);
++ if (npages != 1)
++ return INVALID_MPN;
++ mpn = page_to_pfn(page);
++ put_page(page);
++
++ return mpn;
++}
++
++#endif /* COMPAT_LINUX_VERSION_CHECK_LT(6, 5, 0) */
++
+ #endif /* __PGTBL_H__ */