summaryrefslogtreecommitdiffstats
path: root/queue-3.16
diff options
context:
space:
mode:
authorBen Hutchings <ben@decadent.org.uk>2019-01-02 15:46:56 +0000
committerBen Hutchings <ben@decadent.org.uk>2019-01-02 16:50:31 +0000
commita007b2a9e853246576aa3a85a7956885c262cbe3 (patch)
treeae428f6f64be37eabfe9702b0758c34cebcdc6dc /queue-3.16
parentac0ee4c80a709b42433303f31ad4f76455e27e21 (diff)
downloadlinux-stable-queue-a007b2a9e853246576aa3a85a7956885c262cbe3.tar.gz
Fix THP regression following the L1TF fix
Wenkuan Wang <Wenkuan.Wang@windriver.com> writes: > For stable tree 3.16.y, as regarding the page set into PAGE_NONE, the PFN will be inverted, > when reference it by pmd_page, it needs to be inverted again controlling by > protnone_mask(pfn). > > https://github.com/linux-test-project/ltp/blob/master/testcases/kernel/mem/thp/thp03.c > > This LTP test case thp03 will get the kernel OOPS like bellow, and > it could be reproduced every time. [...] > After applying these patches: > > thp03 1 TPASS : system didn't crash, pass.
Diffstat (limited to 'queue-3.16')
-rw-r--r--queue-3.16/series5
-rw-r--r--queue-3.16/x86-asm-add-pud-pmd-mask-interfaces-to-handle-large-pat-bit.patch90
-rw-r--r--queue-3.16/x86-asm-fix-pud-pmd-interfaces-to-handle-large-pat-bit.patch106
-rw-r--r--queue-3.16/x86-asm-move-pud_page-macros-to-page_types.h.patch56
-rw-r--r--queue-3.16/x86-mm-fix-regression-with-huge-pages-on-pae.patch194
-rw-r--r--queue-3.16/x86-mm-simplify-pd_page-macros.patch92
6 files changed, 543 insertions, 0 deletions
diff --git a/queue-3.16/series b/queue-3.16/series
new file mode 100644
index 00000000..e1b4f8bf
--- /dev/null
+++ b/queue-3.16/series
@@ -0,0 +1,5 @@
+x86-asm-add-pud-pmd-mask-interfaces-to-handle-large-pat-bit.patch
+x86-asm-move-pud_page-macros-to-page_types.h.patch
+x86-asm-fix-pud-pmd-interfaces-to-handle-large-pat-bit.patch
+x86-mm-simplify-pd_page-macros.patch
+x86-mm-fix-regression-with-huge-pages-on-pae.patch
diff --git a/queue-3.16/x86-asm-add-pud-pmd-mask-interfaces-to-handle-large-pat-bit.patch b/queue-3.16/x86-asm-add-pud-pmd-mask-interfaces-to-handle-large-pat-bit.patch
new file mode 100644
index 00000000..bc723c96
--- /dev/null
+++ b/queue-3.16/x86-asm-add-pud-pmd-mask-interfaces-to-handle-large-pat-bit.patch
@@ -0,0 +1,90 @@
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Thu, 17 Sep 2015 12:24:16 -0600
+Subject: x86/asm: Add pud/pmd mask interfaces to handle large PAT bit
+
+commit 4be4c1fb9a754b100466ebaec50f825be0b2050b upstream.
+
+The PAT bit gets relocated to bit 12 when PUD and PMD mappings are
+used. This bit 12, however, is not covered by PTE_FLAGS_MASK, which
+is used for masking pfn and flags for all levels.
+
+Add pud/pmd mask interfaces to handle pfn and flags properly by using
+P?D_PAGE_MASK when PUD/PMD mappings are used, i.e. PSE bit is set.
+
+Suggested-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Konrad Wilk <konrad.wilk@oracle.com>
+Cc: Robert Elliot <elliott@hpe.com>
+Cc: linux-mm@kvack.org
+Link: http://lkml.kernel.org/r/1442514264-12475-4-git-send-email-toshi.kani@hpe.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Wenkuan Wang <Wenkuan.Wang@windriver.com>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/x86/include/asm/pgtable_types.h | 36 ++++++++++++++++++++++++++++++++++--
+ 1 file changed, 34 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -257,10 +257,10 @@
+
+ #include <linux/types.h>
+
+-/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
++/* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
+ #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
+
+-/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
++/* Extracts the flags from a (pte|pmd|pud|pgd)val_t of a 4KB page */
+ #define PTE_FLAGS_MASK (~PTE_PFN_MASK)
+
+ typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
+@@ -329,11 +329,43 @@ static inline pmdval_t native_pmd_val(pm
+ }
+ #endif
+
++static inline pudval_t pud_pfn_mask(pud_t pud)
++{
++ if (native_pud_val(pud) & _PAGE_PSE)
++ return PUD_PAGE_MASK & PHYSICAL_PAGE_MASK;
++ else
++ return PTE_PFN_MASK;
++}
++
++static inline pudval_t pud_flags_mask(pud_t pud)
++{
++ if (native_pud_val(pud) & _PAGE_PSE)
++ return ~(PUD_PAGE_MASK & (pudval_t)PHYSICAL_PAGE_MASK);
++ else
++ return ~PTE_PFN_MASK;
++}
++
+ static inline pudval_t pud_flags(pud_t pud)
+ {
+ return native_pud_val(pud) & PTE_FLAGS_MASK;
+ }
+
++static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
++{
++ if (native_pmd_val(pmd) & _PAGE_PSE)
++ return PMD_PAGE_MASK & PHYSICAL_PAGE_MASK;
++ else
++ return PTE_PFN_MASK;
++}
++
++static inline pmdval_t pmd_flags_mask(pmd_t pmd)
++{
++ if (native_pmd_val(pmd) & _PAGE_PSE)
++ return ~(PMD_PAGE_MASK & (pmdval_t)PHYSICAL_PAGE_MASK);
++ else
++ return ~PTE_PFN_MASK;
++}
++
+ static inline pmdval_t pmd_flags(pmd_t pmd)
+ {
+ return native_pmd_val(pmd) & PTE_FLAGS_MASK;
diff --git a/queue-3.16/x86-asm-fix-pud-pmd-interfaces-to-handle-large-pat-bit.patch b/queue-3.16/x86-asm-fix-pud-pmd-interfaces-to-handle-large-pat-bit.patch
new file mode 100644
index 00000000..47d4a777
--- /dev/null
+++ b/queue-3.16/x86-asm-fix-pud-pmd-interfaces-to-handle-large-pat-bit.patch
@@ -0,0 +1,106 @@
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Thu, 17 Sep 2015 12:24:17 -0600
+Subject: x86/asm: Fix pud/pmd interfaces to handle large PAT bit
+
+commit f70abb0fc3da1b2945c92751ccda2744081bf2b7 upstream.
+
+Now that we have pud/pmd mask interfaces, which handle pfn & flags
+mask properly for the large PAT bit.
+
+Fix pud/pmd pfn & flags interfaces by replacing PTE_PFN_MASK and
+PTE_FLAGS_MASK with the pud/pmd mask interfaces.
+
+Suggested-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Konrad Wilk <konrad.wilk@oracle.com>
+Cc: Robert Elliot <elliott@hpe.com>
+Cc: linux-mm@kvack.org
+Link: http://lkml.kernel.org/r/1442514264-12475-5-git-send-email-toshi.kani@hpe.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Wenkuan Wang <Wenkuan.Wang@windriver.com>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/x86/include/asm/pgtable.h | 14 ++++++++------
+ arch/x86/include/asm/pgtable_types.h | 4 ++--
+ 2 files changed, 10 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -156,14 +156,14 @@ static inline unsigned long pmd_pfn(pmd_
+ {
+ phys_addr_t pfn = pmd_val(pmd);
+ pfn ^= protnone_mask(pfn);
+- return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
++ return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
+ }
+
+ static inline unsigned long pud_pfn(pud_t pud)
+ {
+ phys_addr_t pfn = pud_val(pud);
+ pfn ^= protnone_mask(pfn);
+- return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
++ return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
+ }
+
+ #define pte_page(pte) pfn_to_page(pte_pfn(pte))
+@@ -584,14 +584,15 @@ static inline int pmd_none(pmd_t pmd)
+
+ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+ {
+- return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
++ return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
+ }
+
+ /*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+-#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
++#define pmd_page(pmd) \
++ pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
+
+ /*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+@@ -657,14 +658,15 @@ static inline int pud_present(pud_t pud)
+
+ static inline unsigned long pud_page_vaddr(pud_t pud)
+ {
+- return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
++ return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
+ }
+
+ /*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
++#define pud_page(pud) \
++ pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
+
+ /* Find an entry in the second-level page table.. */
+ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -347,7 +347,7 @@ static inline pudval_t pud_flags_mask(pu
+
+ static inline pudval_t pud_flags(pud_t pud)
+ {
+- return native_pud_val(pud) & PTE_FLAGS_MASK;
++ return native_pud_val(pud) & pud_flags_mask(pud);
+ }
+
+ static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
+@@ -368,7 +368,7 @@ static inline pmdval_t pmd_flags_mask(pm
+
+ static inline pmdval_t pmd_flags(pmd_t pmd)
+ {
+- return native_pmd_val(pmd) & PTE_FLAGS_MASK;
++ return native_pmd_val(pmd) & pmd_flags_mask(pmd);
+ }
+
+ static inline pte_t native_make_pte(pteval_t val)
diff --git a/queue-3.16/x86-asm-move-pud_page-macros-to-page_types.h.patch b/queue-3.16/x86-asm-move-pud_page-macros-to-page_types.h.patch
new file mode 100644
index 00000000..0f269b5b
--- /dev/null
+++ b/queue-3.16/x86-asm-move-pud_page-macros-to-page_types.h.patch
@@ -0,0 +1,56 @@
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Thu, 17 Sep 2015 12:24:15 -0600
+Subject: x86/asm: Move PUD_PAGE macros to page_types.h
+
+commit 832102671855f73962e7a04fdafd48b9385ea5c6 upstream.
+
+PUD_SHIFT is defined according to a given kernel configuration, which
+allows it be commonly used by any x86 kernels. However, PUD_PAGE_SIZE
+and PUD_PAGE_MASK, which are set from PUD_SHIFT, are defined in
+page_64_types.h, which can be used by 64-bit kernel only.
+
+Move PUD_PAGE_SIZE and PUD_PAGE_MASK to page_types.h so that they can
+be used by any x86 kernels as well.
+
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Konrad Wilk <konrad.wilk@oracle.com>
+Cc: Robert Elliot <elliott@hpe.com>
+Cc: linux-mm@kvack.org
+Link: http://lkml.kernel.org/r/1442514264-12475-3-git-send-email-toshi.kani@hpe.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Wenkuan Wang <Wenkuan.Wang@windriver.com>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/x86/include/asm/page_64_types.h | 3 ---
+ arch/x86/include/asm/page_types.h | 3 +++
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/page_64_types.h
++++ b/arch/x86/include/asm/page_64_types.h
+@@ -20,9 +20,6 @@
+ #define MCE_STACK 4
+ #define N_EXCEPTION_STACKS 4 /* hw limit: 7 */
+
+-#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
+-#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
+-
+ /*
+ * Set __PAGE_OFFSET to the most negative possible address +
+ * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
+--- a/arch/x86/include/asm/page_types.h
++++ b/arch/x86/include/asm/page_types.h
+@@ -20,6 +20,9 @@
+ #define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
+ #define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
+
++#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
++#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
++
+ #define HPAGE_SHIFT PMD_SHIFT
+ #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
+ #define HPAGE_MASK (~(HPAGE_SIZE - 1))
diff --git a/queue-3.16/x86-mm-fix-regression-with-huge-pages-on-pae.patch b/queue-3.16/x86-mm-fix-regression-with-huge-pages-on-pae.patch
new file mode 100644
index 00000000..84f56e36
--- /dev/null
+++ b/queue-3.16/x86-mm-fix-regression-with-huge-pages-on-pae.patch
@@ -0,0 +1,194 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Mon, 30 Nov 2015 11:10:33 +0100
+Subject: x86/mm: Fix regression with huge pages on PAE
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+commit 70f1528747651b20c7769d3516ade369f9963237 upstream.
+
+Recent PAT patchset has caused issue on 32-bit PAE machines:
+
+ page:eea45000 count:0 mapcount:-128 mapping: (null) index:0x0 flags: 0x40000000()
+ page dumped because: VM_BUG_ON_PAGE(page_mapcount(page) < 0)
+ ------------[ cut here ]------------
+ kernel BUG at /home/build/linux-boris/mm/huge_memory.c:1485!
+ invalid opcode: 0000 [#1] SMP
+ [...]
+ Call Trace:
+ unmap_single_vma
+ ? __wake_up
+ unmap_vmas
+ unmap_region
+ do_munmap
+ vm_munmap
+ SyS_munmap
+ do_fast_syscall_32
+ ? __do_page_fault
+ sysenter_past_esp
+ Code: ...
+ EIP: [<c11bde80>] zap_huge_pmd+0x240/0x260 SS:ESP 0068:f6459d98
+
+The problem is in pmd_pfn_mask() and pmd_flags_mask(). These
+helpers use PMD_PAGE_MASK to calculate resulting mask.
+PMD_PAGE_MASK is 'unsigned long', not 'unsigned long long' as
+phys_addr_t is on 32-bit PAE (ARCH_PHYS_ADDR_T_64BIT). As a
+result, the upper bits of resulting mask get truncated.
+
+pud_pfn_mask() and pud_flags_mask() aren't problematic since we
+don't have PUD page table level on 32-bit systems, but it's
+reasonable to keep them consistent with PMD counterpart.
+
+Introduce PHYSICAL_PMD_PAGE_MASK and PHYSICAL_PUD_PAGE_MASK in
+addition to existing PHYSICAL_PAGE_MASK and reworks helpers to
+use them.
+
+Reported-and-Tested-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+[ Fix -Woverflow warnings from the realmode code. ]
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Toshi Kani <toshi.kani@hpe.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Jürgen Gross <jgross@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: elliott@hpe.com
+Cc: konrad.wilk@oracle.com
+Cc: linux-mm <linux-mm@kvack.org>
+Fixes: f70abb0fc3da ("x86/asm: Fix pud/pmd interfaces to handle large PAT bit")
+Link: http://lkml.kernel.org/r/1448878233-11390-2-git-send-email-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/x86/boot/boot.h | 1 -
+ arch/x86/boot/video-mode.c | 2 ++
+ arch/x86/boot/video.c | 2 ++
+ arch/x86/include/asm/page_types.h | 16 +++++++++-------
+ arch/x86/include/asm/pgtable_types.h | 14 ++++----------
+ arch/x86/include/asm/x86_init.h | 1 -
+ 6 files changed, 17 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/boot/boot.h
++++ b/arch/x86/boot/boot.h
+@@ -23,7 +23,6 @@
+ #include <stdarg.h>
+ #include <linux/types.h>
+ #include <linux/edd.h>
+-#include <asm/boot.h>
+ #include <asm/setup.h>
+ #include "bitops.h"
+ #include "ctype.h"
+--- a/arch/x86/boot/video-mode.c
++++ b/arch/x86/boot/video-mode.c
+@@ -19,6 +19,8 @@
+ #include "video.h"
+ #include "vesa.h"
+
++#include <uapi/asm/boot.h>
++
+ /*
+ * Common variables
+ */
+--- a/arch/x86/boot/video.c
++++ b/arch/x86/boot/video.c
+@@ -13,6 +13,8 @@
+ * Select video mode
+ */
+
++#include <uapi/asm/boot.h>
++
+ #include "boot.h"
+ #include "video.h"
+ #include "vesa.h"
+--- a/arch/x86/include/asm/page_types.h
++++ b/arch/x86/include/asm/page_types.h
+@@ -9,19 +9,21 @@
+ #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
++#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
++#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
++
++#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
++#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
++
+ #define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
+ #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
+
+-/* Cast PAGE_MASK to a signed type so that it is sign-extended if
++/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
+ virtual addresses are 32-bits but physical addresses are larger
+ (ie, 32-bit PAE). */
+ #define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
+-
+-#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
+-#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
+-
+-#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
+-#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
++#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
++#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
+
+ #define HPAGE_SHIFT PMD_SHIFT
+ #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -332,17 +332,14 @@ static inline pmdval_t native_pmd_val(pm
+ static inline pudval_t pud_pfn_mask(pud_t pud)
+ {
+ if (native_pud_val(pud) & _PAGE_PSE)
+- return PUD_PAGE_MASK & PHYSICAL_PAGE_MASK;
++ return PHYSICAL_PUD_PAGE_MASK;
+ else
+ return PTE_PFN_MASK;
+ }
+
+ static inline pudval_t pud_flags_mask(pud_t pud)
+ {
+- if (native_pud_val(pud) & _PAGE_PSE)
+- return ~(PUD_PAGE_MASK & (pudval_t)PHYSICAL_PAGE_MASK);
+- else
+- return ~PTE_PFN_MASK;
++ return ~pud_pfn_mask(pud);
+ }
+
+ static inline pudval_t pud_flags(pud_t pud)
+@@ -353,17 +350,14 @@ static inline pudval_t pud_flags(pud_t p
+ static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
+ {
+ if (native_pmd_val(pmd) & _PAGE_PSE)
+- return PMD_PAGE_MASK & PHYSICAL_PAGE_MASK;
++ return PHYSICAL_PMD_PAGE_MASK;
+ else
+ return PTE_PFN_MASK;
+ }
+
+ static inline pmdval_t pmd_flags_mask(pmd_t pmd)
+ {
+- if (native_pmd_val(pmd) & _PAGE_PSE)
+- return ~(PMD_PAGE_MASK & (pmdval_t)PHYSICAL_PAGE_MASK);
+- else
+- return ~PTE_PFN_MASK;
++ return ~pmd_pfn_mask(pmd);
+ }
+
+ static inline pmdval_t pmd_flags(pmd_t pmd)
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -1,7 +1,6 @@
+ #ifndef _ASM_X86_PLATFORM_H
+ #define _ASM_X86_PLATFORM_H
+
+-#include <asm/pgtable_types.h>
+ #include <asm/bootparam.h>
+
+ struct mpc_bus;
diff --git a/queue-3.16/x86-mm-simplify-pd_page-macros.patch b/queue-3.16/x86-mm-simplify-pd_page-macros.patch
new file mode 100644
index 00000000..39c90d2e
--- /dev/null
+++ b/queue-3.16/x86-mm-simplify-pd_page-macros.patch
@@ -0,0 +1,92 @@
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Mon, 17 Jul 2017 16:10:06 -0500
+Subject: x86/mm: Simplify p[g4um]d_page() macros
+
+commit fd7e315988b784509ba3f1b42f539bd0b1fca9bb upstream.
+
+Create a pgd_pfn() macro similar to the p[4um]d_pfn() macros and then
+use the p[g4um]d_pfn() macros in the p[g4um]d_page() macros instead of
+duplicating the code.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Cc: Dave Young <dyoung@redhat.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Larry Woodman <lwoodman@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Toshimitsu Kani <toshi.kani@hpe.com>
+Cc: kasan-dev@googlegroups.com
+Cc: kvm@vger.kernel.org
+Cc: linux-arch@vger.kernel.org
+Cc: linux-doc@vger.kernel.org
+Cc: linux-efi@vger.kernel.org
+Cc: linux-mm@kvack.org
+Link: http://lkml.kernel.org/r/e61eb533a6d0aac941db2723d8aa63ef6b882dee.1500319216.git.thomas.lendacky@amd.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+[Backported to 4.9 stable by AK, suggested by Michael Hocko]
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Wenkuan Wang <Wenkuan.Wang@windriver.com>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/x86/include/asm/pgtable.h | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -166,6 +166,11 @@ static inline unsigned long pud_pfn(pud_
+ return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
+ }
+
++static inline unsigned long pgd_pfn(pgd_t pgd)
++{
++ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
++}
++
+ #define pte_page(pte) pfn_to_page(pte_pfn(pte))
+
+ static inline int pmd_large(pmd_t pte)
+@@ -591,8 +596,7 @@ static inline unsigned long pmd_page_vad
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+-#define pmd_page(pmd) \
+- pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
++#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
+
+ /*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+@@ -665,8 +669,7 @@ static inline unsigned long pud_page_vad
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+-#define pud_page(pud) \
+- pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
++#define pud_page(pud) pfn_to_page(pud_pfn(pud))
+
+ /* Find an entry in the second-level page table.. */
+ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+@@ -706,7 +709,7 @@ static inline unsigned long pgd_page_vad
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
++#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
+
+ /* to find an entry in a page-table-directory. */
+ static inline unsigned long pud_index(unsigned long address)