summarylogtreecommitdiffstats
path: root/vmmon.patch
blob: 546792b8feebdec13f373fcdb13f6d8e9b1b026a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
--- a/vmmon/Makefile
+++ b/vmmon/Makefile
@@ -43,7 +43,11 @@ INCLUDE      += -I$(SRCROOT)/shared
 endif
 
 
+ifdef KVERSION
+VM_UNAME = $(KVERSION)
+else
 VM_UNAME = $(shell uname -r)
+endif
 
 # Header directory for the running kernel
 ifdef LINUXINCLUDE
From 4c2a103fd2d71f2084f1fe7ceacb816b9832ffa2 Mon Sep 17 00:00:00 2001
From: Michal Kubecek <mkubecek@suse.cz>
Date: Sun, 22 Oct 2023 23:24:05 +0200
Subject: [PATCH] vmmon: use get_user_pages to get page PFN

As a side effect of mainline commit 0d940a9b270b ("mm/pgtable: allow
pte_offset_map[_lock]() to fail") in 6.5-rc1, __pte_offset_map(), called by
pte_offset_map(), is no longer exported. WMware developers decided to hack
around this by replacing pte_offset_map() by pte_offset_kernel() which does
not seem to be a good idea and apparently may trigger warn checks in RCU
code on some systems as mentioned in the discussion on issue #223.
Therefore let's use the same solution as we had for 17.0.2 and older
versions as it does not show these problems.

Based on an upstream IRC discussion and the hva_to_pfn_*() family of
functions in KVM code, what PgtblVa2MPNLocked() does seems to be an
incomplete and partial open coded logic of get_user_pages() and as it is
only used to get PFN from a virtual address, it can be easily implemented
using get_user_pages() family.

Without knowledge what exactly are the PFNs used for in VMware, it is hard
to guess the right flags, these seem to work and have been tested by
multiple users over last few weeks.

We could likely use get_user_pages() also on older kernels and it might be
actually cleaner and more reliable as existing open coded implementation
does not seem to handle some corner cases but without knowledge of VMware
internals, it will be safer to stick to existing code where possible.
---
 vmmon-only/include/pgtbl.h | 25 +++++++++++++++++++++++++
 1 file changed, 25 insertions(+)

diff --git a/vmmon-only/include/pgtbl.h b/vmmon-only/include/pgtbl.h
index 3f43c62..7eaa49a 100644
--- a/vmmon-only/include/pgtbl.h
+++ b/vmmon-only/include/pgtbl.h
@@ -25,6 +25,7 @@
 #include "compat_pgtable.h"
 #include "compat_spinlock.h"
 #include "compat_page.h"
+#include "compat_version.h"
 
 
 /*
@@ -45,6 +46,7 @@
  *-----------------------------------------------------------------------------
  */
 
+#if COMPAT_LINUX_VERSION_CHECK_LT(6, 5, 0) // only used by PgtblVa2MPN() below
 static INLINE MPN
 PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process
                   VA addr)              // IN: Address in the virtual address
@@ -106,6 +108,7 @@ PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process
    }
    return mpn;
 }
+#endif
 
 
 /*
@@ -125,6 +128,8 @@ PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process
  *-----------------------------------------------------------------------------
  */
 
+#if COMPAT_LINUX_VERSION_CHECK_LT(6, 5, 0)
+
 static INLINE MPN
 PgtblVa2MPN(VA addr)  // IN
 {
@@ -139,4 +144,24 @@ PgtblVa2MPN(VA addr)  // IN
    return mpn;
 }
 
+#else /* COMPAT_LINUX_VERSION_CHECK_LT(6, 5, 0) */
+
+static INLINE MPN
+PgtblVa2MPN(VA addr)  // IN
+{
+   struct page *page;
+   int npages;
+   MPN mpn;
+
+   npages = get_user_pages_unlocked(addr, 1, &page, FOLL_HWPOISON);
+   if (npages != 1)
+	   return INVALID_MPN;
+   mpn = page_to_pfn(page);
+   put_page(page);
+
+   return mpn;
+}
+
+#endif /* COMPAT_LINUX_VERSION_CHECK_LT(6, 5, 0) */
+
 #endif /* __PGTBL_H__ */