summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Hutchings <ben@decadent.org.uk>2018-09-27 23:24:46 +0100
committerBen Hutchings <ben@decadent.org.uk>2018-09-27 23:24:46 +0100
commit84e721a6cc8601a459500305fc8fe4c01905696d (patch)
treef2c4ce0ef499da0b2c450772f05dfe9783f8a452
parent62a30ecac7e3a0d0b8b78914586ec858c210b58c (diff)
downloadlinux-stable-queue-84e721a6cc8601a459500305fc8fe4c01905696d.tar.gz
Remove support for non-linear mappings, in preparation for L1TF mitigation
The upstream mitigation for L1TF only considers linear mappings. Backport the rewrite of remap_file_pages() and removal of support for non-linear mappings from 4.0.
-rw-r--r--queue-3.16/alpha-drop-_page_file-and-pte_file-related-helpers.patch50
-rw-r--r--queue-3.16/arc-drop-_page_file-and-pte_file-related-helpers.patch61
-rw-r--r--queue-3.16/arm-drop-l_pte_file-and-pte_file-related-helpers.patch106
-rw-r--r--queue-3.16/arm64-drop-pte_file-and-pte_file-related-helpers.patch69
-rw-r--r--queue-3.16/asm-generic-drop-unused-pte_file-helpers.patch41
-rw-r--r--queue-3.16/avr32-drop-_page_file-and-pte_file-related-helpers.patch81
-rw-r--r--queue-3.16/blackfin-drop-pte_file.patch32
-rw-r--r--queue-3.16/c6x-drop-pte_file.patch33
-rw-r--r--queue-3.16/cris-drop-_page_file-and-pte_file-related-helpers.patch75
-rw-r--r--queue-3.16/frv-drop-_page_file-and-pte_file-related-helpers.patch75
-rw-r--r--queue-3.16/hexagon-drop-_page_file-and-pte_file-related-helpers.patch110
-rw-r--r--queue-3.16/ia64-drop-_page_file-and-pte_file-related-helpers.patch74
-rw-r--r--queue-3.16/m32r-drop-_page_file-and-pte_file-related-helpers.patch64
-rw-r--r--queue-3.16/m68k-drop-_page_file-and-pte_file-related-helpers.patch159
-rw-r--r--queue-3.16/metag-drop-_page_file-and-pte_file-related-helpers.patch47
-rw-r--r--queue-3.16/microblaze-drop-_page_file-and-pte_file-related-helpers.patch59
-rw-r--r--queue-3.16/mips-drop-_page_file-and-pte_file-related-helpers.patch156
-rw-r--r--queue-3.16/mm-drop-support-of-non-linear-mapping-from-fault-codepath.patch163
-rw-r--r--queue-3.16/mm-drop-support-of-non-linear-mapping-from-unmap-zap-codepath.patch232
-rw-r--r--queue-3.16/mm-drop-vm_ops-remap_pages-and-generic_file_remap_pages-stub.patch231
-rw-r--r--queue-3.16/mm-fix-regression-in-remap_file_pages-emulation.patch124
-rw-r--r--queue-3.16/mm-remove-rest-usage-of-vm_nonlinear-and-pte_file.patch291
-rw-r--r--queue-3.16/mm-replace-remap_file_pages-syscall-with-emulation.patch537
-rw-r--r--queue-3.16/mm-replace-vma-sharead.linear-with-vma-shared.patch93
-rw-r--r--queue-3.16/mn10300-drop-_page_file-and-pte_file-related-helpers.patch64
-rw-r--r--queue-3.16/openrisc-drop-_page_file-and-pte_file-related-helpers.patch64
-rw-r--r--queue-3.16/parisc-drop-_page_file-and-pte_file-related-helpers.patch59
-rw-r--r--queue-3.16/powerpc-drop-_page_file-and-pte_file-related-helpers.patch193
-rw-r--r--queue-3.16/proc-drop-handling-non-linear-mappings.patch83
-rw-r--r--queue-3.16/rmap-drop-support-of-non-linear-mappings.patch517
-rw-r--r--queue-3.16/s390-drop-pte_file-related-helpers.patch78
-rw-r--r--queue-3.16/score-drop-_page_file-and-pte_file-related-helpers.patch73
-rw-r--r--queue-3.16/series39
-rw-r--r--queue-3.16/sh-drop-_page_file-and-pte_file-related-helpers.patch149
-rw-r--r--queue-3.16/sparc-drop-pte_file-related-helpers.patch171
-rw-r--r--queue-3.16/tile-drop-pte_file-related-helpers.patch52
-rw-r--r--queue-3.16/um-drop-_page_file-and-pte_file-related-helpers.patch90
-rw-r--r--queue-3.16/unicore32-drop-pte_file-related-helpers.patch52
-rw-r--r--queue-3.16/x86-drop-_page_file-and-pte_file-related-helpers.patch171
-rw-r--r--queue-3.16/xtensa-drop-_page_file-and-pte_file-related-helpers.patch65
40 files changed, 4883 insertions, 0 deletions
diff --git a/queue-3.16/alpha-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/alpha-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..beffbc65
--- /dev/null
+++ b/queue-3.16/alpha-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,50 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:09 -0800
+Subject: alpha: drop _PAGE_FILE and pte_file()-related helpers
+
+commit b816157a5366550c5ee29a6431ba1abb88721266 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Richard Henderson <rth@twiddle.net>
+Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
+Cc: Matt Turner <mattst88@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/alpha/include/asm/pgtable.h | 7 -------
+ 1 file changed, 7 deletions(-)
+
+--- a/arch/alpha/include/asm/pgtable.h
++++ b/arch/alpha/include/asm/pgtable.h
+@@ -73,7 +73,6 @@ struct vm_area_struct;
+ /* .. and these are ours ... */
+ #define _PAGE_DIRTY 0x20000
+ #define _PAGE_ACCESSED 0x40000
+-#define _PAGE_FILE 0x80000 /* set:pagecache, unset:swap */
+
+ /*
+ * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
+@@ -268,7 +267,6 @@ extern inline void pgd_clear(pgd_t * pgd
+ extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); }
+ extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+ extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+-extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+ extern inline int pte_special(pte_t pte) { return 0; }
+
+ extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
+@@ -345,11 +343,6 @@ extern inline pte_t mk_swap_pte(unsigned
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-#define pte_to_pgoff(pte) (pte_val(pte) >> 32)
+-#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
+-
+-#define PTE_FILE_MAX_BITS 32
+-
+ #ifndef CONFIG_DISCONTIGMEM
+ #define kern_addr_valid(addr) (1)
+ #endif
diff --git a/queue-3.16/arc-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/arc-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..84177d01
--- /dev/null
+++ b/queue-3.16/arc-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,61 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:12 -0800
+Subject: arc: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 18747151308f9e0fb63766057957617ec4afa190 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/arc/include/asm/pgtable.h | 13 +------------
+ 1 file changed, 1 insertion(+), 12 deletions(-)
+
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -61,7 +61,6 @@
+ #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
+ #define _PAGE_READ (1<<5) /* Page has user read perm (H) */
+ #define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */
+-#define _PAGE_FILE (1<<7) /* page cache/ swap (S) */
+ #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
+ #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
+
+@@ -73,7 +72,6 @@
+ #define _PAGE_READ (1<<3) /* Page has user read perm (H) */
+ #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
+ #define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */
+-#define _PAGE_FILE (1<<6) /* page cache/ swap (S) */
+ #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
+ #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
+ #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
+@@ -269,15 +267,6 @@ static inline void pmd_set(pmd_t *pmdp,
+ pte; \
+ })
+
+-/* TBD: Non linear mapping stuff */
+-static inline int pte_file(pte_t pte)
+-{
+- return pte_val(pte) & _PAGE_FILE;
+-}
+-
+-#define PTE_FILE_MAX_BITS 30
+-#define pgoff_to_pte(x) __pte(x)
+-#define pte_to_pgoff(x) (pte_val(x) >> 2)
+ #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+ #define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+ #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+@@ -365,7 +354,7 @@ void update_mmu_cache(struct vm_area_str
+
+ /* Encode swap {type,off} tuple into PTE
+ * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
+- * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier"
++ * PAGE_PRESENT is zero in a PTE holding swap "identifier"
+ */
+ #define __swp_entry(type, off) ((swp_entry_t) { \
+ ((type) & 0x1f) | ((off) << 13) })
diff --git a/queue-3.16/arm-drop-l_pte_file-and-pte_file-related-helpers.patch b/queue-3.16/arm-drop-l_pte_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..3dcee65a
--- /dev/null
+++ b/queue-3.16/arm-drop-l_pte_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,106 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:17 -0800
+Subject: arm: drop L_PTE_FILE and pte_file()-related helpers
+
+commit b007ea798f5c568d3f464d37288220ef570f062c upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+This patch also adjust __SWP_TYPE_SHIFT, effectively increase size of
+possible swap file to 128G.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Russell King <linux@arm.linux.org.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/arm/include/asm/pgtable-2level.h | 1 -
+ arch/arm/include/asm/pgtable-3level.h | 1 -
+ arch/arm/include/asm/pgtable-nommu.h | 2 --
+ arch/arm/include/asm/pgtable.h | 20 +++-----------------
+ arch/arm/mm/proc-macros.S | 2 +-
+ 5 files changed, 4 insertions(+), 22 deletions(-)
+
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -118,7 +118,6 @@
+ #define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
+ #define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
+ #define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
+-#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
+ #define L_PTE_DIRTY (_AT(pteval_t, 1) << 6)
+ #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7)
+ #define L_PTE_USER (_AT(pteval_t, 1) << 8)
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -77,7 +77,6 @@
+ */
+ #define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
+ #define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
+-#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
+ #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
+ #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
+ #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
+--- a/arch/arm/include/asm/pgtable-nommu.h
++++ b/arch/arm/include/asm/pgtable-nommu.h
+@@ -54,8 +54,6 @@
+
+ typedef pte_t *pte_addr_t;
+
+-static inline int pte_file(pte_t pte) { return 0; }
+-
+ /*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -279,12 +279,12 @@ static inline pte_t pte_modify(pte_t pte
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+- * <--------------- offset ----------------------> < type -> 0 0 0
++ * <--------------- offset ------------------------> < type -> 0 0
+ *
+- * This gives us up to 31 swap files and 64GB per swap file. Note that
++ * This gives us up to 31 swap files and 128GB per swap file. Note that
+ * the offset field is always non-zero.
+ */
+-#define __SWP_TYPE_SHIFT 3
++#define __SWP_TYPE_SHIFT 2
+ #define __SWP_TYPE_BITS 5
+ #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
+ #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+@@ -303,20 +303,6 @@ static inline pte_t pte_modify(pte_t pte
+ */
+ #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
+
+-/*
+- * Encode and decode a file entry. File entries are stored in the Linux
+- * page tables as follows:
+- *
+- * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+- * <----------------------- offset ------------------------> 1 0 0
+- */
+-#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
+-#define pte_to_pgoff(x) (pte_val(x) >> 3)
+-#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE)
+-
+-#define PTE_FILE_MAX_BITS 29
+-
+ /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+ /* FIXME: this is not correct */
+ #define kern_addr_valid(addr) (1)
+--- a/arch/arm/mm/proc-macros.S
++++ b/arch/arm/mm/proc-macros.S
+@@ -98,7 +98,7 @@
+ #endif
+ #if !defined (CONFIG_ARM_LPAE) && \
+ (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
+- L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
++ L_PTE_PRESENT) > L_PTE_SHARED
+ #error Invalid Linux PTE bit settings
+ #endif
+ #endif /* CONFIG_MMU */
diff --git a/queue-3.16/arm64-drop-pte_file-and-pte_file-related-helpers.patch b/queue-3.16/arm64-drop-pte_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..445513bf
--- /dev/null
+++ b/queue-3.16/arm64-drop-pte_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,69 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:15 -0800
+Subject: arm64: drop PTE_FILE and pte_file()-related helpers
+
+commit 9b3e661e58b90b0c2d5c2168c23408f1e59e9e35 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+This patch also adjust __SWP_TYPE_SHIFT and increase number of bits
+availble for swap offset.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/arm64/include/asm/pgtable.h | 22 ++++------------------
+ 1 file changed, 4 insertions(+), 18 deletions(-)
+
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -25,7 +25,6 @@
+ * Software defined PTE bits definition.
+ */
+ #define PTE_VALID (_AT(pteval_t, 1) << 0)
+-#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
+ #define PTE_DIRTY (_AT(pteval_t, 1) << 55)
+ #define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
+ #define PTE_WRITE (_AT(pteval_t, 1) << 57)
+@@ -402,13 +401,12 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+ /*
+ * Encode and decode a swap entry:
+ * bits 0-1: present (must be zero)
+- * bit 2: PTE_FILE
+- * bits 3-8: swap type
+- * bits 9-57: swap offset
++ * bits 2-7: swap type
++ * bits 8-57: swap offset
+ */
+-#define __SWP_TYPE_SHIFT 3
++#define __SWP_TYPE_SHIFT 2
+ #define __SWP_TYPE_BITS 6
+-#define __SWP_OFFSET_BITS 49
++#define __SWP_OFFSET_BITS 50
+ #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
+ #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+ #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
+@@ -426,18 +424,6 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+ */
+ #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
+
+-/*
+- * Encode and decode a file entry:
+- * bits 0-1: present (must be zero)
+- * bit 2: PTE_FILE
+- * bits 3-57: file offset / PAGE_SIZE
+- */
+-#define pte_file(pte) (pte_val(pte) & PTE_FILE)
+-#define pte_to_pgoff(x) (pte_val(x) >> 3)
+-#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
+-
+-#define PTE_FILE_MAX_BITS 55
+-
+ extern int kern_addr_valid(unsigned long addr);
+
+ #include <asm-generic/pgtable.h>
diff --git a/queue-3.16/asm-generic-drop-unused-pte_file-helpers.patch b/queue-3.16/asm-generic-drop-unused-pte_file-helpers.patch
new file mode 100644
index 00000000..84f33a7f
--- /dev/null
+++ b/queue-3.16/asm-generic-drop-unused-pte_file-helpers.patch
@@ -0,0 +1,41 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:07 -0800
+Subject: asm-generic: drop unused pte_file* helpers
+
+commit 5064c8e19dc215afae8ffae95570e7f22062d49c upstream.
+
+All users are gone.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ include/asm-generic/pgtable.h | 15 ---------------
+ 1 file changed, 15 deletions(-)
+
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -445,21 +445,6 @@ static inline pte_t pte_swp_clear_soft_d
+ {
+ return pte;
+ }
+-
+-static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+-{
+- return pte;
+-}
+-
+-static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+-{
+- return pte;
+-}
+-
+-static inline int pte_file_soft_dirty(pte_t pte)
+-{
+- return 0;
+-}
+ #endif
+
+ #ifndef __HAVE_PFNMAP_TRACKING
diff --git a/queue-3.16/avr32-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/avr32-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..fcc8dbce
--- /dev/null
+++ b/queue-3.16/avr32-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,81 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:20 -0800
+Subject: avr32: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 7a7d2db4b8b3505a3195178619ffcc80985c4be1 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Haavard Skinnemoen <hskinnemoen@gmail.com>
+Acked-by: Hans-Christian Egtvedt <egtvedt@samfundet.no>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/avr32/include/asm/pgtable.h | 25 -------------------------
+ 1 file changed, 25 deletions(-)
+
+--- a/arch/avr32/include/asm/pgtable.h
++++ b/arch/avr32/include/asm/pgtable.h
+@@ -86,9 +86,6 @@ extern struct page *empty_zero_page;
+ #define _PAGE_BIT_PRESENT 10
+ #define _PAGE_BIT_ACCESSED 11 /* software: page was accessed */
+
+-/* The following flags are only valid when !PRESENT */
+-#define _PAGE_BIT_FILE 0 /* software: pagecache or swap? */
+-
+ #define _PAGE_WT (1 << _PAGE_BIT_WT)
+ #define _PAGE_DIRTY (1 << _PAGE_BIT_DIRTY)
+ #define _PAGE_EXECUTE (1 << _PAGE_BIT_EXECUTE)
+@@ -101,7 +98,6 @@ extern struct page *empty_zero_page;
+ /* Software flags */
+ #define _PAGE_ACCESSED (1 << _PAGE_BIT_ACCESSED)
+ #define _PAGE_PRESENT (1 << _PAGE_BIT_PRESENT)
+-#define _PAGE_FILE (1 << _PAGE_BIT_FILE)
+
+ /*
+ * Page types, i.e. sizes. _PAGE_TYPE_NONE corresponds to what is
+@@ -210,14 +206,6 @@ static inline int pte_special(pte_t pte)
+ return 0;
+ }
+
+-/*
+- * The following only work if pte_present() is not true.
+- */
+-static inline int pte_file(pte_t pte)
+-{
+- return pte_val(pte) & _PAGE_FILE;
+-}
+-
+ /* Mutator functions for PTE bits */
+ static inline pte_t pte_wrprotect(pte_t pte)
+ {
+@@ -329,7 +317,6 @@ extern void update_mmu_cache(struct vm_a
+ * Encode and decode a swap entry
+ *
+ * Constraints:
+- * _PAGE_FILE at bit 0
+ * _PAGE_TYPE_* at bits 2-3 (for emulating _PAGE_PROTNONE)
+ * _PAGE_PRESENT at bit 10
+ *
+@@ -346,18 +333,6 @@ extern void update_mmu_cache(struct vm_a
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-/*
+- * Encode and decode a nonlinear file mapping entry. We have to
+- * preserve _PAGE_FILE and _PAGE_PRESENT here. _PAGE_TYPE_* isn't
+- * necessary, since _PAGE_FILE implies !_PAGE_PROTNONE (?)
+- */
+-#define PTE_FILE_MAX_BITS 30
+-#define pte_to_pgoff(pte) (((pte_val(pte) >> 1) & 0x1ff) \
+- | ((pte_val(pte) >> 11) << 9))
+-#define pgoff_to_pte(off) ((pte_t) { ((((off) & 0x1ff) << 1) \
+- | (((off) >> 9) << 11) \
+- | _PAGE_FILE) })
+-
+ typedef pte_t *pte_addr_t;
+
+ #define kern_addr_valid(addr) (1)
diff --git a/queue-3.16/blackfin-drop-pte_file.patch b/queue-3.16/blackfin-drop-pte_file.patch
new file mode 100644
index 00000000..04b5502a
--- /dev/null
+++ b/queue-3.16/blackfin-drop-pte_file.patch
@@ -0,0 +1,32 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:23 -0800
+Subject: blackfin: drop pte_file()
+
+commit 2bc6ff14d46745a7728ed4ed90c5e0edca91f52e upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Steven Miao <realmz6@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/blackfin/include/asm/pgtable.h | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/arch/blackfin/include/asm/pgtable.h
++++ b/arch/blackfin/include/asm/pgtable.h
+@@ -45,11 +45,6 @@ extern void paging_init(void);
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-static inline int pte_file(pte_t pte)
+-{
+- return 0;
+-}
+-
+ #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+ #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+
diff --git a/queue-3.16/c6x-drop-pte_file.patch b/queue-3.16/c6x-drop-pte_file.patch
new file mode 100644
index 00000000..59833a0c
--- /dev/null
+++ b/queue-3.16/c6x-drop-pte_file.patch
@@ -0,0 +1,33 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:25 -0800
+Subject: c6x: drop pte_file()
+
+commit f5b45de9b00eb53d11ada85c61e4ea1c31ab8218 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: Mark Salter <msalter@redhat.com>
+Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/c6x/include/asm/pgtable.h | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/arch/c6x/include/asm/pgtable.h
++++ b/arch/c6x/include/asm/pgtable.h
+@@ -50,11 +50,6 @@ extern void paging_init(void);
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-static inline int pte_file(pte_t pte)
+-{
+- return 0;
+-}
+-
+ #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+ #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+
diff --git a/queue-3.16/cris-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/cris-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..a5e157b1
--- /dev/null
+++ b/queue-3.16/cris-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,75 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:28 -0800
+Subject: cris: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 103f3d9a26df944f4c29de190d72dfbf913c71af upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Mikael Starvik <starvik@axis.com>
+Cc: Jesper Nilsson <jesper.nilsson@axis.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/cris/include/arch-v10/arch/mmu.h | 3 ---
+ arch/cris/include/arch-v32/arch/mmu.h | 3 ---
+ arch/cris/include/asm/pgtable.h | 4 ----
+ 3 files changed, 10 deletions(-)
+
+--- a/arch/cris/include/arch-v10/arch/mmu.h
++++ b/arch/cris/include/arch-v10/arch/mmu.h
+@@ -58,7 +58,6 @@ typedef struct
+ /* Bits the HW doesn't care about but the kernel uses them in SW */
+
+ #define _PAGE_PRESENT (1<<4) /* page present in memory */
+-#define _PAGE_FILE (1<<5) /* set: pagecache, unset: swap (when !PRESENT) */
+ #define _PAGE_ACCESSED (1<<5) /* simulated in software using valid bit */
+ #define _PAGE_MODIFIED (1<<6) /* simulated in software using we bit */
+ #define _PAGE_READ (1<<7) /* read-enabled */
+@@ -105,6 +104,4 @@ typedef struct
+ #define __S110 PAGE_SHARED
+ #define __S111 PAGE_SHARED
+
+-#define PTE_FILE_MAX_BITS 26
+-
+ #endif
+--- a/arch/cris/include/arch-v32/arch/mmu.h
++++ b/arch/cris/include/arch-v32/arch/mmu.h
+@@ -53,7 +53,6 @@ typedef struct
+ * software.
+ */
+ #define _PAGE_PRESENT (1 << 5) /* Page is present in memory. */
+-#define _PAGE_FILE (1 << 6) /* 1=pagecache, 0=swap (when !present) */
+ #define _PAGE_ACCESSED (1 << 6) /* Simulated in software using valid bit. */
+ #define _PAGE_MODIFIED (1 << 7) /* Simulated in software using we bit. */
+ #define _PAGE_READ (1 << 8) /* Read enabled. */
+@@ -108,6 +107,4 @@ typedef struct
+ #define __S110 PAGE_SHARED_EXEC
+ #define __S111 PAGE_SHARED_EXEC
+
+-#define PTE_FILE_MAX_BITS 25
+-
+ #endif /* _ASM_CRIS_ARCH_MMU_H */
+--- a/arch/cris/include/asm/pgtable.h
++++ b/arch/cris/include/asm/pgtable.h
+@@ -114,7 +114,6 @@ extern unsigned long empty_zero_page;
+ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
+ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+ static inline int pte_special(pte_t pte) { return 0; }
+
+ static inline pte_t pte_wrprotect(pte_t pte)
+@@ -290,9 +289,6 @@ static inline void update_mmu_cache(stru
+ */
+ #define pgtable_cache_init() do { } while (0)
+
+-#define pte_to_pgoff(x) (pte_val(x) >> 6)
+-#define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE)
+-
+ typedef pte_t *pte_addr_t;
+
+ #endif /* __ASSEMBLY__ */
diff --git a/queue-3.16/frv-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/frv-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..3ac3be43
--- /dev/null
+++ b/queue-3.16/frv-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,75 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:31 -0800
+Subject: frv: drop _PAGE_FILE and pte_file()-related helpers
+
+commit ca5bfa7b390017f053d7581bc701518b87bc3d43 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+This patch also increase number of bits availble for swap offset.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: David Howells <dhowells@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/frv/include/asm/pgtable.h | 27 +++++----------------------
+ 1 file changed, 5 insertions(+), 22 deletions(-)
+
+--- a/arch/frv/include/asm/pgtable.h
++++ b/arch/frv/include/asm/pgtable.h
+@@ -62,10 +62,6 @@ typedef pte_t *pte_addr_t;
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-#ifndef __ASSEMBLY__
+-static inline int pte_file(pte_t pte) { return 0; }
+-#endif
+-
+ #define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
+
+ #define swapper_pg_dir ((pgd_t *) NULL)
+@@ -298,7 +294,6 @@ static inline pmd_t *pmd_offset(pud_t *d
+
+ #define _PAGE_RESERVED_MASK (xAMPRx_RESERVED8 | xAMPRx_RESERVED13)
+
+-#define _PAGE_FILE 0x002 /* set:pagecache unset:swap */
+ #define _PAGE_PROTNONE 0x000 /* If not present */
+
+ #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+@@ -463,27 +458,15 @@ static inline pte_t pte_modify(pte_t pte
+ * Handle swap and file entries
+ * - the PTE is encoded in the following format:
+ * bit 0: Must be 0 (!_PAGE_PRESENT)
+- * bit 1: Type: 0 for swap, 1 for file (_PAGE_FILE)
+- * bits 2-7: Swap type
+- * bits 8-31: Swap offset
+- * bits 2-31: File pgoff
+- */
+-#define __swp_type(x) (((x).val >> 2) & 0x1f)
+-#define __swp_offset(x) ((x).val >> 8)
+-#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 8) })
++ * bits 1-6: Swap type
++ * bits 7-31: Swap offset
++ */
++#define __swp_type(x) (((x).val >> 1) & 0x1f)
++#define __swp_offset(x) ((x).val >> 7)
++#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 7) })
+ #define __pte_to_swp_entry(_pte) ((swp_entry_t) { (_pte).pte })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-static inline int pte_file(pte_t pte)
+-{
+- return pte.pte & _PAGE_FILE;
+-}
+-
+-#define PTE_FILE_MAX_BITS 29
+-
+-#define pte_to_pgoff(PTE) ((PTE).pte >> 2)
+-#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
+-
+ /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+ #define PageSkip(page) (0)
+ #define kern_addr_valid(addr) (1)
diff --git a/queue-3.16/hexagon-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/hexagon-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..96f3790d
--- /dev/null
+++ b/queue-3.16/hexagon-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,110 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:33 -0800
+Subject: hexagon: drop _PAGE_FILE and pte_file()-related helpers
+
+commit d99f95e6522db22192c331c75de182023a49fbcc upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+This patch also increase number of bits availble for swap offset.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Richard Kuo <rkuo@codeaurora.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/hexagon/include/asm/pgtable.h | 60 ++++++++----------------------
+ 1 file changed, 16 insertions(+), 44 deletions(-)
+
+--- a/arch/hexagon/include/asm/pgtable.h
++++ b/arch/hexagon/include/asm/pgtable.h
+@@ -62,13 +62,6 @@ extern unsigned long zero_page_mask;
+ #define _PAGE_ACCESSED (1<<2)
+
+ /*
+- * _PAGE_FILE is only meaningful if _PAGE_PRESENT is false, while
+- * _PAGE_DIRTY is only meaningful if _PAGE_PRESENT is true.
+- * So we can overload the bit...
+- */
+-#define _PAGE_FILE _PAGE_DIRTY /* set: pagecache, unset = swap */
+-
+-/*
+ * For now, let's say that Valid and Present are the same thing.
+ * Alternatively, we could say that it's the "or" of R, W, and X
+ * permissions.
+@@ -456,57 +449,36 @@ static inline int pte_exec(pte_t pte)
+ #define pgtable_cache_init() do { } while (0)
+
+ /*
+- * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the
+- * PTE is interpreted as swap information. Depending on the _PAGE_FILE
+- * bit, the remaining free bits are eitehr interpreted as a file offset
+- * or a swap type/offset tuple. Rather than have the TLB fill handler
+- * test _PAGE_PRESENT, we're going to reserve the permissions bits
+- * and set them to all zeros for swap entries, which speeds up the
+- * miss handler at the cost of 3 bits of offset. That trade-off can
+- * be revisited if necessary, but Hexagon processor architecture and
+- * target applications suggest a lot of TLB misses and not much swap space.
++ * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is
++ * interpreted as swap information. The remaining free bits are interpreted as
++ * swap type/offset tuple. Rather than have the TLB fill handler test
++ * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to
++ * all zeros for swap entries, which speeds up the miss handler at the cost of
++ * 3 bits of offset. That trade-off can be revisited if necessary, but Hexagon
++ * processor architecture and target applications suggest a lot of TLB misses
++ * and not much swap space.
+ *
+ * Format of swap PTE:
+ * bit 0: Present (zero)
+- * bit 1: _PAGE_FILE (zero)
+- * bits 2-6: swap type (arch independent layer uses 5 bits max)
+- * bits 7-9: bits 2:0 of offset
+- * bits 10-12: effectively _PAGE_PROTNONE (all zero)
+- * bits 13-31: bits 21:3 of swap offset
+- *
+- * Format of file PTE:
+- * bit 0: Present (zero)
+- * bit 1: _PAGE_FILE (zero)
+- * bits 2-9: bits 7:0 of offset
+- * bits 10-12: effectively _PAGE_PROTNONE (all zero)
+- * bits 13-31: bits 26:8 of swap offset
++ * bits 1-5: swap type (arch independent layer uses 5 bits max)
++ * bits 6-9: bits 3:0 of offset
++ * bits 10-12: effectively _PAGE_PROTNONE (all zero)
++ * bits 13-31: bits 22:4 of swap offset
+ *
+ * The split offset makes some of the following macros a little gnarly,
+ * but there's plenty of precedent for this sort of thing.
+ */
+-#define PTE_FILE_MAX_BITS 27
+
+ /* Used for swap PTEs */
+-#define __swp_type(swp_pte) (((swp_pte).val >> 2) & 0x1f)
++#define __swp_type(swp_pte) (((swp_pte).val >> 1) & 0x1f)
+
+ #define __swp_offset(swp_pte) \
+- ((((swp_pte).val >> 7) & 0x7) | (((swp_pte).val >> 10) & 0x003ffff8))
++ ((((swp_pte).val >> 6) & 0xf) | (((swp_pte).val >> 9) & 0x7ffff0))
+
+ #define __swp_entry(type, offset) \
+ ((swp_entry_t) { \
+- ((type << 2) | \
+- ((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) })
+-
+-/* Used for file PTEs */
+-#define pte_file(pte) \
+- ((pte_val(pte) & (_PAGE_FILE | _PAGE_PRESENT)) == _PAGE_FILE)
+-
+-#define pte_to_pgoff(pte) \
+- (((pte_val(pte) >> 2) & 0xff) | ((pte_val(pte) >> 5) & 0x07ffff00))
+-
+-#define pgoff_to_pte(off) \
+- ((pte_t) { ((((off) & 0x7ffff00) << 5) | (((off) & 0xff) << 2)\
+- | _PAGE_FILE) })
++ ((type << 1) | \
++ ((offset & 0x7ffff0) << 9) | ((offset & 0xf) << 6)) })
+
+ /* Oh boy. There are a lot of possible arch overrides found in this file. */
+ #include <asm-generic/pgtable.h>
diff --git a/queue-3.16/ia64-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/ia64-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..ef458b85
--- /dev/null
+++ b/queue-3.16/ia64-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,74 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:36 -0800
+Subject: ia64: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 636a002b704e0a36cefb5f4cf0293fab858fc46c upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+This patch also increase number of bits availble for swap offset.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/ia64/include/asm/pgtable.h | 25 +++++--------------------
+ 1 file changed, 5 insertions(+), 20 deletions(-)
+
+--- a/arch/ia64/include/asm/pgtable.h
++++ b/arch/ia64/include/asm/pgtable.h
+@@ -57,9 +57,6 @@
+ #define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
+ #define _PAGE_PROTNONE (__IA64_UL(1) << 63)
+
+-/* Valid only for a PTE with the present bit cleared: */
+-#define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */
+-
+ #define _PFN_MASK _PAGE_PPN_MASK
+ /* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
+ #define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
+@@ -300,7 +297,6 @@ extern unsigned long VMALLOC_END;
+ #define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
+ #define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
+ #define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
+-#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
+ #define pte_special(pte) 0
+
+ /*
+@@ -472,27 +468,16 @@ extern void paging_init (void);
+ *
+ * Format of swap pte:
+ * bit 0 : present bit (must be zero)
+- * bit 1 : _PAGE_FILE (must be zero)
+- * bits 2- 8: swap-type
+- * bits 9-62: swap offset
+- * bit 63 : _PAGE_PROTNONE bit
+- *
+- * Format of file pte:
+- * bit 0 : present bit (must be zero)
+- * bit 1 : _PAGE_FILE (must be one)
+- * bits 2-62: file_offset/PAGE_SIZE
++ * bits 1- 7: swap-type
++ * bits 8-62: swap offset
+ * bit 63 : _PAGE_PROTNONE bit
+ */
+-#define __swp_type(entry) (((entry).val >> 2) & 0x7f)
+-#define __swp_offset(entry) (((entry).val << 1) >> 10)
+-#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })
++#define __swp_type(entry) (((entry).val >> 1) & 0x7f)
++#define __swp_offset(entry) (((entry).val << 1) >> 9)
++#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 8) })
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-#define PTE_FILE_MAX_BITS 61
+-#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
+-#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
+-
+ /*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
diff --git a/queue-3.16/m32r-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/m32r-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..80d0558e
--- /dev/null
+++ b/queue-3.16/m32r-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,64 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:39 -0800
+Subject: m32r: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 406b16e26d0996516c8d1641008a7d326bf282d6 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/m32r/include/asm/pgtable-2level.h | 4 ----
+ arch/m32r/include/asm/pgtable.h | 11 -----------
+ 2 files changed, 15 deletions(-)
+
+--- a/arch/m32r/include/asm/pgtable-2level.h
++++ b/arch/m32r/include/asm/pgtable-2level.h
+@@ -70,9 +70,5 @@ static inline pmd_t *pmd_offset(pgd_t *
+ #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+ #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+-#define PTE_FILE_MAX_BITS 29
+-#define pte_to_pgoff(pte) (((pte_val(pte) >> 2) & 0x7f) | (((pte_val(pte) >> 10)) << 7))
+-#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7f) << 2) | (((off) >> 7) << 10) | _PAGE_FILE })
+-
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_M32R_PGTABLE_2LEVEL_H */
+--- a/arch/m32r/include/asm/pgtable.h
++++ b/arch/m32r/include/asm/pgtable.h
+@@ -80,8 +80,6 @@ extern unsigned long empty_zero_page[102
+ */
+
+ #define _PAGE_BIT_DIRTY 0 /* software: page changed */
+-#define _PAGE_BIT_FILE 0 /* when !present: nonlinear file
+- mapping */
+ #define _PAGE_BIT_PRESENT 1 /* Valid: page is valid */
+ #define _PAGE_BIT_GLOBAL 2 /* Global */
+ #define _PAGE_BIT_LARGE 3 /* Large */
+@@ -93,7 +91,6 @@ extern unsigned long empty_zero_page[102
+ #define _PAGE_BIT_PROTNONE 9 /* software: if not present */
+
+ #define _PAGE_DIRTY (1UL << _PAGE_BIT_DIRTY)
+-#define _PAGE_FILE (1UL << _PAGE_BIT_FILE)
+ #define _PAGE_PRESENT (1UL << _PAGE_BIT_PRESENT)
+ #define _PAGE_GLOBAL (1UL << _PAGE_BIT_GLOBAL)
+ #define _PAGE_LARGE (1UL << _PAGE_BIT_LARGE)
+@@ -206,14 +203,6 @@ static inline int pte_write(pte_t pte)
+ return pte_val(pte) & _PAGE_WRITE;
+ }
+
+-/*
+- * The following only works if pte_present() is not true.
+- */
+-static inline int pte_file(pte_t pte)
+-{
+- return pte_val(pte) & _PAGE_FILE;
+-}
+-
+ static inline int pte_special(pte_t pte)
+ {
+ return 0;
diff --git a/queue-3.16/m68k-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/m68k-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..ce1ef43e
--- /dev/null
+++ b/queue-3.16/m68k-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,159 @@
+From: "Kirill A. Shutemov" <kirill@shutemov.name>
+Date: Tue, 10 Feb 2015 14:10:41 -0800
+Subject: m68k: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 1eeda0abf4425c91e7ce3ca32f1908c3a51bf84e upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/m68k/include/asm/mcf_pgtable.h | 23 ++---------------------
+ arch/m68k/include/asm/motorola_pgtable.h | 15 ---------------
+ arch/m68k/include/asm/pgtable_no.h | 2 --
+ arch/m68k/include/asm/sun3_pgtable.h | 15 ---------------
+ 4 files changed, 2 insertions(+), 53 deletions(-)
+
+--- a/arch/m68k/include/asm/mcf_pgtable.h
++++ b/arch/m68k/include/asm/mcf_pgtable.h
+@@ -35,7 +35,6 @@
+ * hitting hardware.
+ */
+ #define CF_PAGE_DIRTY 0x00000001
+-#define CF_PAGE_FILE 0x00000200
+ #define CF_PAGE_ACCESSED 0x00001000
+
+ #define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */
+@@ -243,11 +242,6 @@ static inline int pte_young(pte_t pte)
+ return pte_val(pte) & CF_PAGE_ACCESSED;
+ }
+
+-static inline int pte_file(pte_t pte)
+-{
+- return pte_val(pte) & CF_PAGE_FILE;
+-}
+-
+ static inline int pte_special(pte_t pte)
+ {
+ return 0;
+@@ -391,26 +385,13 @@ static inline void cache_page(void *vadd
+ *ptep = pte_mkcache(*ptep);
+ }
+
+-#define PTE_FILE_MAX_BITS 21
+-#define PTE_FILE_SHIFT 11
+-
+-static inline unsigned long pte_to_pgoff(pte_t pte)
+-{
+- return pte_val(pte) >> PTE_FILE_SHIFT;
+-}
+-
+-static inline pte_t pgoff_to_pte(unsigned pgoff)
+-{
+- return __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE);
+-}
+-
+ /*
+ * Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e))
+ */
+ #define __swp_type(x) ((x).val & 0xFF)
+-#define __swp_offset(x) ((x).val >> PTE_FILE_SHIFT)
++#define __swp_offset(x) ((x).val >> 11)
+ #define __swp_entry(typ, off) ((swp_entry_t) { (typ) | \
+- (off << PTE_FILE_SHIFT) })
++ (off << 11) })
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) (__pte((x).val))
+
+--- a/arch/m68k/include/asm/motorola_pgtable.h
++++ b/arch/m68k/include/asm/motorola_pgtable.h
+@@ -28,7 +28,6 @@
+ #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
+
+ #define _PAGE_PROTNONE 0x004
+-#define _PAGE_FILE 0x008 /* pagecache or swap? */
+
+ #ifndef __ASSEMBLY__
+
+@@ -168,7 +167,6 @@ static inline void pgd_set(pgd_t *pgdp,
+ static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); }
+ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+ static inline int pte_special(pte_t pte) { return 0; }
+
+ static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; }
+@@ -266,19 +264,6 @@ static inline void cache_page(void *vadd
+ }
+ }
+
+-#define PTE_FILE_MAX_BITS 28
+-
+-static inline unsigned long pte_to_pgoff(pte_t pte)
+-{
+- return pte.pte >> 4;
+-}
+-
+-static inline pte_t pgoff_to_pte(unsigned off)
+-{
+- pte_t pte = { (off << 4) + _PAGE_FILE };
+- return pte;
+-}
+-
+ /* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
+ #define __swp_type(x) (((x).val >> 4) & 0xff)
+ #define __swp_offset(x) ((x).val >> 12)
+--- a/arch/m68k/include/asm/pgtable_no.h
++++ b/arch/m68k/include/asm/pgtable_no.h
+@@ -37,8 +37,6 @@ extern void paging_init(void);
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-static inline int pte_file(pte_t pte) { return 0; }
+-
+ /*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+--- a/arch/m68k/include/asm/sun3_pgtable.h
++++ b/arch/m68k/include/asm/sun3_pgtable.h
+@@ -38,8 +38,6 @@
+ #define _PAGE_PRESENT (SUN3_PAGE_VALID)
+ #define _PAGE_ACCESSED (SUN3_PAGE_ACCESSED)
+
+-#define PTE_FILE_MAX_BITS 28
+-
+ /* Compound page protection values. */
+ //todo: work out which ones *should* have SUN3_PAGE_NOCACHE and fix...
+ // is it just PAGE_KERNEL and PAGE_SHARED?
+@@ -168,7 +166,6 @@ static inline void pgd_clear (pgd_t *pgd
+ static inline int pte_write(pte_t pte) { return pte_val(pte) & SUN3_PAGE_WRITEABLE; }
+ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & SUN3_PAGE_MODIFIED; }
+ static inline int pte_young(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; }
+-static inline int pte_file(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; }
+ static inline int pte_special(pte_t pte) { return 0; }
+
+ static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_WRITEABLE; return pte; }
+@@ -202,18 +199,6 @@ static inline pmd_t *pmd_offset (pgd_t *
+ return (pmd_t *) pgd;
+ }
+
+-static inline unsigned long pte_to_pgoff(pte_t pte)
+-{
+- return pte.pte & SUN3_PAGE_PGNUM_MASK;
+-}
+-
+-static inline pte_t pgoff_to_pte(unsigned off)
+-{
+- pte_t pte = { off + SUN3_PAGE_ACCESSED };
+- return pte;
+-}
+-
+-
+ /* Find an entry in the third-level pagetable. */
+ #define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
+ #define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address))
diff --git a/queue-3.16/metag-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/metag-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..ddec10a4
--- /dev/null
+++ b/queue-3.16/metag-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,47 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:45 -0800
+Subject: metag: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 22f9bf3950f20d24198791685f2dccac2c4ef38a upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: James Hogan <james.hogan@imgtec.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/metag/include/asm/pgtable.h | 6 ------
+ 1 file changed, 6 deletions(-)
+
+--- a/arch/metag/include/asm/pgtable.h
++++ b/arch/metag/include/asm/pgtable.h
+@@ -47,7 +47,6 @@
+ */
+ #define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
+ #define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
+-#define _PAGE_FILE _PAGE_ALWAYS_ZERO_3
+
+ /* Pages owned, and protected by, the kernel. */
+ #define _PAGE_KERNEL _PAGE_PRIV
+@@ -219,7 +218,6 @@ extern unsigned long empty_zero_page;
+ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+ static inline int pte_special(pte_t pte) { return 0; }
+
+ static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; }
+@@ -327,10 +325,6 @@ static inline void update_mmu_cache(stru
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-#define PTE_FILE_MAX_BITS 22
+-#define pte_to_pgoff(x) (pte_val(x) >> 10)
+-#define pgoff_to_pte(x) __pte(((x) << 10) | _PAGE_FILE)
+-
+ #define kern_addr_valid(addr) (1)
+
+ /*
diff --git a/queue-3.16/microblaze-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/microblaze-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..2b52318b
--- /dev/null
+++ b/queue-3.16/microblaze-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,59 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:47 -0800
+Subject: microblaze: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 937fa39fb22fea1c1d8ca9e5f31c452b91ac7239 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Michal Simek <monstr@monstr.eu>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/microblaze/include/asm/pgtable.h | 11 -----------
+ 1 file changed, 11 deletions(-)
+
+--- a/arch/microblaze/include/asm/pgtable.h
++++ b/arch/microblaze/include/asm/pgtable.h
+@@ -40,10 +40,6 @@ extern int mem_init_done;
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-#ifndef __ASSEMBLY__
+-static inline int pte_file(pte_t pte) { return 0; }
+-#endif /* __ASSEMBLY__ */
+-
+ #define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
+
+ #define swapper_pg_dir ((pgd_t *) NULL)
+@@ -207,7 +203,6 @@ static inline pte_t pte_mkspecial(pte_t
+
+ /* Definitions for MicroBlaze. */
+ #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
+-#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
+ #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
+ #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
+ #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
+@@ -337,7 +332,6 @@ static inline int pte_write(pte_t pte) {
+ static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
+ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+
+ static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
+ static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
+@@ -499,11 +493,6 @@ static inline pmd_t *pmd_offset(pgd_t *d
+
+ #define pte_unmap(pte) kunmap_atomic(pte)
+
+-/* Encode and decode a nonlinear file mapping entry */
+-#define PTE_FILE_MAX_BITS 29
+-#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
+-#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
+-
+ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+ /*
diff --git a/queue-3.16/mips-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/mips-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..6918c9da
--- /dev/null
+++ b/queue-3.16/mips-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,156 @@
+From: "Kirill A. Shutemov" <kirill@shutemov.name>
+Date: Tue, 10 Feb 2015 14:10:50 -0800
+Subject: mips: drop _PAGE_FILE and pte_file()-related helpers
+
+commit b32da82e28ce90bff4e371fc15d2816fa3175bb0 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.16: Deleted definitions are slightly different]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+--- a/arch/mips/include/asm/pgtable-32.h
++++ b/arch/mips/include/asm/pgtable-32.h
+@@ -148,20 +148,6 @@ pfn_pte(unsigned long pfn, pgprot_t prot
+ #define __swp_entry(type,offset) \
+ ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
+
+-/*
+- * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
+- */
+-#define PTE_FILE_MAX_BITS 28
+-
+-#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
+- (((_pte).pte >> 2 ) & 0x38) | \
+- (((_pte).pte >> 10) << 6 ))
+-
+-#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
+- (((off) & 0x38) << 2 ) | \
+- (((off) >> 6 ) << 10) | \
+- _PAGE_FILE })
+-
+ #else
+
+ /* Swap entries must have VALID and GLOBAL bits cleared. */
+@@ -177,31 +163,6 @@ pfn_pte(unsigned long pfn, pgprot_t prot
+ ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
+ #endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
+
+-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+-/*
+- * Bits 0 and 1 of pte_high are taken, use the rest for the page offset...
+- */
+-#define PTE_FILE_MAX_BITS 30
+-
+-#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
+-#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
+-
+-#else
+-/*
+- * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range:
+- */
+-#define PTE_FILE_MAX_BITS 28
+-
+-#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
+- (((_pte).pte >> 2) & 0x8) | \
+- (((_pte).pte >> 8) << 4))
+-
+-#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
+- (((off) & 0x8) << 2) | \
+- (((off) >> 4) << 8) | \
+- _PAGE_FILE })
+-#endif
+-
+ #endif
+
+ #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+--- a/arch/mips/include/asm/pgtable-64.h
++++ b/arch/mips/include/asm/pgtable-64.h
+@@ -291,13 +291,4 @@ static inline pte_t mk_swap_pte(unsigned
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-/*
+- * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to
+- * make things easier, and only use the upper 56 bits for the page offset...
+- */
+-#define PTE_FILE_MAX_BITS 56
+-
+-#define pte_to_pgoff(_pte) ((_pte).pte >> 8)
+-#define pgoff_to_pte(off) ((pte_t) { ((off) << 8) | _PAGE_FILE })
+-
+ #endif /* _ASM_PGTABLE_64_H */
+--- a/arch/mips/include/asm/pgtable-bits.h
++++ b/arch/mips/include/asm/pgtable-bits.h
+@@ -50,8 +50,6 @@
+
+ /*
+ * The following bits are implemented in software
+- *
+- * _PAGE_FILE semantics: set:pagecache unset:swap
+ */
+ #define _PAGE_PRESENT_SHIFT 6
+ #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
+@@ -64,14 +62,10 @@
+ #define _PAGE_MODIFIED_SHIFT 10
+ #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
+
+-#define _PAGE_FILE (1 << 10)
+-
+ #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+
+ /*
+ * The following are implemented by software
+- *
+- * _PAGE_FILE semantics: set:pagecache unset:swap
+ */
+ #define _PAGE_PRESENT_SHIFT 0
+ #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
+@@ -83,8 +77,6 @@
+ #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
+ #define _PAGE_MODIFIED_SHIFT 4
+ #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
+-#define _PAGE_FILE_SHIFT 4
+-#define _PAGE_FILE (1 << _PAGE_FILE_SHIFT)
+
+ /*
+ * And these are the hardware TLB bits
+@@ -114,7 +106,6 @@
+ * The following bits are implemented in software
+ *
+ * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi.
+- * _PAGE_FILE semantics: set:pagecache unset:swap
+ */
+ #define _PAGE_PRESENT_SHIFT (0)
+ #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
+@@ -126,7 +117,6 @@
+ #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
+ #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
+ #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
+-#define _PAGE_FILE (_PAGE_MODIFIED)
+
+ #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ /* huge tlb page */
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -253,7 +253,6 @@ extern pgd_t swapper_pg_dir[];
+ static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
+ static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
+ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
+-static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
+
+ static inline pte_t pte_wrprotect(pte_t pte)
+ {
+@@ -309,7 +308,6 @@ static inline pte_t pte_mkyoung(pte_t pt
+ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
+ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+
+ static inline pte_t pte_wrprotect(pte_t pte)
+ {
diff --git a/queue-3.16/mm-drop-support-of-non-linear-mapping-from-fault-codepath.patch b/queue-3.16/mm-drop-support-of-non-linear-mapping-from-fault-codepath.patch
new file mode 100644
index 00000000..072ab30b
--- /dev/null
+++ b/queue-3.16/mm-drop-support-of-non-linear-mapping-from-fault-codepath.patch
@@ -0,0 +1,163 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:09:51 -0800
+Subject: mm: drop support of non-linear mapping from fault codepath
+
+commit 9b4bdd2ffab9557ac43af7dff02e7dab1c8c58bd upstream.
+
+We don't create non-linear mappings anymore. Let's drop code which
+handles them on page fault.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.16:
+ - Deleted code is slightly different
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ include/linux/mm.h | 16 +++++-------
+ mm/memory.c | 65 +++++++---------------------------------------
+ 2 files changed, 16 insertions(+), 65 deletions(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -187,21 +187,19 @@ extern unsigned int kobjsize(const void
+ extern pgprot_t protection_map[16];
+
+ #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
+-#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
+-#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
+-#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
+-#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
+-#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
+-#define FAULT_FLAG_TRIED 0x40 /* second try */
+-#define FAULT_FLAG_USER 0x80 /* The fault originated in userspace */
++#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */
++#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */
++#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */
++#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
++#define FAULT_FLAG_TRIED 0x20 /* Second try */
++#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
+
+ /*
+ * vm_fault is filled by the the pagefault handler and passed to the vma's
+ * ->fault function. The vma's ->fault is responsible for returning a bitmask
+ * of VM_FAULT_xxx flags that give details about how the fault was handled.
+ *
+- * pgoff should be used in favour of virtual_address, if possible. If pgoff
+- * is used, one may implement ->remap_pages to get nonlinear mapping support.
++ * pgoff should be used in favour of virtual_address, if possible.
+ */
+ struct vm_fault {
+ unsigned int flags; /* FAULT_FLAG_xxx flags */
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1912,12 +1912,11 @@ int apply_to_page_range(struct mm_struct
+ EXPORT_SYMBOL_GPL(apply_to_page_range);
+
+ /*
+- * handle_pte_fault chooses page fault handler according to an entry
+- * which was read non-atomically. Before making any commitment, on
+- * those architectures or configurations (e.g. i386 with PAE) which
+- * might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault
+- * must check under lock before unmapping the pte and proceeding
+- * (but do_wp_page is only called after already making such a check;
++ * handle_pte_fault chooses page fault handler according to an entry which was
++ * read non-atomically. Before making any commitment, on those architectures
++ * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
++ * parts, do_swap_page must check under lock before unmapping the pte and
++ * proceeding (but do_wp_page is only called after already making such a check;
+ * and do_anonymous_page can safely check later on).
+ */
+ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
+@@ -2676,8 +2675,6 @@ void do_set_pte(struct vm_area_struct *v
+ entry = mk_pte(page, vma->vm_page_prot);
+ if (write)
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+- else if (pte_file(*pte) && pte_file_soft_dirty(*pte))
+- pte_mksoft_dirty(entry);
+ if (anon) {
+ inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
+ page_add_new_anon_rmap(page, vma, address);
+@@ -2818,8 +2815,7 @@ static int do_read_fault(struct mm_struc
+ * if page by the offset is not ready to be mapped (cold cache or
+ * something).
+ */
+- if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
+- fault_around_pages() > 1) {
++ if (vma->vm_ops->map_pages && fault_around_pages() > 1) {
+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+ do_fault_around(vma, address, pte, pgoff, flags);
+ if (!pte_same(*pte, orig_pte))
+@@ -2949,7 +2945,7 @@ static int do_shared_fault(struct mm_str
+ return ret;
+ }
+
+-static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
++static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, pte_t *page_table, pmd_t *pmd,
+ unsigned int flags, pte_t orig_pte)
+ {
+@@ -2969,44 +2965,6 @@ static int do_linear_fault(struct mm_str
+ return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
+ }
+
+-/*
+- * Fault of a previously existing named mapping. Repopulate the pte
+- * from the encoded file_pte if possible. This enables swappable
+- * nonlinear vmas.
+- *
+- * We enter with non-exclusive mmap_sem (to exclude vma changes,
+- * but allow concurrent faults), and pte mapped but not yet locked.
+- * We return with mmap_sem still held, but pte unmapped and unlocked.
+- */
+-static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+- unsigned long address, pte_t *page_table, pmd_t *pmd,
+- unsigned int flags, pte_t orig_pte)
+-{
+- pgoff_t pgoff;
+-
+- flags |= FAULT_FLAG_NONLINEAR;
+-
+- if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
+- return 0;
+-
+- if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
+- /*
+- * Page table corrupted: show pte and kill process.
+- */
+- print_bad_pte(vma, address, orig_pte, NULL);
+- return VM_FAULT_SIGBUS;
+- }
+-
+- pgoff = pte_to_pgoff(orig_pte);
+- if (!(flags & FAULT_FLAG_WRITE))
+- return do_read_fault(mm, vma, address, pmd, pgoff, flags,
+- orig_pte);
+- if (!(vma->vm_flags & VM_SHARED))
+- return do_cow_fault(mm, vma, address, pmd, pgoff, flags,
+- orig_pte);
+- return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
+-}
+-
+ static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
+ unsigned long addr, int page_nid,
+ int *flags)
+@@ -3121,15 +3079,12 @@ static int handle_pte_fault(struct mm_st
+ if (!pte_present(entry)) {
+ if (pte_none(entry)) {
+ if (vma->vm_ops)
+- return do_linear_fault(mm, vma, address,
+- pte, pmd, flags, entry);
++ return do_fault(mm, vma, address, pte,
++ pmd, flags, entry);
+
+ return do_anonymous_page(mm, vma, address,
+ pte, pmd, flags);
+ }
+- if (pte_file(entry))
+- return do_nonlinear_fault(mm, vma, address,
+- pte, pmd, flags, entry);
+ return do_swap_page(mm, vma, address,
+ pte, pmd, flags, entry);
+ }
diff --git a/queue-3.16/mm-drop-support-of-non-linear-mapping-from-unmap-zap-codepath.patch b/queue-3.16/mm-drop-support-of-non-linear-mapping-from-unmap-zap-codepath.patch
new file mode 100644
index 00000000..aa225b99
--- /dev/null
+++ b/queue-3.16/mm-drop-support-of-non-linear-mapping-from-unmap-zap-codepath.patch
@@ -0,0 +1,232 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:09:49 -0800
+Subject: mm: drop support of non-linear mapping from unmap/zap codepath
+
+commit 8a5f14a23177061ec11daeaa3d09d0765d785c47 upstream.
+
+We have remap_file_pages(2) emulation in -mm tree for few release cycles
+and we plan to have it mainline in v3.20. This patchset removes rest of
+VM_NONLINEAR infrastructure.
+
+Patches 1-8 take care about generic code. They are pretty
+straight-forward and can be applied without other of patches.
+
+Rest patches removes pte_file()-related stuff from architecture-specific
+code. It usually frees up one bit in non-present pte. I've tried to reuse
+that bit for swap offset, where I was able to figure out how to do that.
+
+For obvious reason I cannot test all that arch-specific code and would
+like to see acks from maintainers.
+
+In total, remap_file_pages(2) required about 1.4K lines of not-so-trivial
+kernel code. That's too much for functionality nobody uses.
+
+Tested-by: Felipe Balbi <balbi@ti.com>
+
+This patch (of 38):
+
+We don't create non-linear mappings anymore. Let's drop code which
+handles them on unmap/zap.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.16: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ include/linux/mm.h | 1 -
+ mm/madvise.c | 9 +----
+ mm/memory.c | 82 ++++++++++++----------------------------------
+ 3 files changed, 22 insertions(+), 70 deletions(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1103,7 +1103,6 @@ extern void user_shm_unlock(size_t, stru
+ * Parameter block passed down to zap_pte_range in exceptional cases.
+ */
+ struct zap_details {
+- struct vm_area_struct *nonlinear_vma; /* Check page->index if set */
+ struct address_space *check_mapping; /* Check page->mapping if set */
+ pgoff_t first_index; /* Lowest page->index to unmap */
+ pgoff_t last_index; /* Highest page->index to unmap */
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -277,14 +277,7 @@ static long madvise_dontneed(struct vm_a
+ if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
+ return -EINVAL;
+
+- if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
+- struct zap_details details = {
+- .nonlinear_vma = vma,
+- .last_index = ULONG_MAX,
+- };
+- zap_page_range(vma, start, end - start, &details);
+- } else
+- zap_page_range(vma, start, end - start, NULL);
++ zap_page_range(vma, start, end - start, NULL);
+ return 0;
+ }
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1081,6 +1081,7 @@ static unsigned long zap_pte_range(struc
+ spinlock_t *ptl;
+ pte_t *start_pte;
+ pte_t *pte;
++ swp_entry_t entry;
+
+ again:
+ init_rss_vec(rss);
+@@ -1106,28 +1107,12 @@ again:
+ if (details->check_mapping &&
+ details->check_mapping != page->mapping)
+ continue;
+- /*
+- * Each page->index must be checked when
+- * invalidating or truncating nonlinear.
+- */
+- if (details->nonlinear_vma &&
+- (page->index < details->first_index ||
+- page->index > details->last_index))
+- continue;
+ }
+ ptent = ptep_get_and_clear_full(mm, addr, pte,
+ tlb->fullmm);
+ tlb_remove_tlb_entry(tlb, pte, addr);
+ if (unlikely(!page))
+ continue;
+- if (unlikely(details) && details->nonlinear_vma
+- && linear_page_index(details->nonlinear_vma,
+- addr) != page->index) {
+- pte_t ptfile = pgoff_to_pte(page->index);
+- if (pte_soft_dirty(ptent))
+- ptfile = pte_file_mksoft_dirty(ptfile);
+- set_pte_at(mm, addr, pte, ptfile);
+- }
+ if (PageAnon(page))
+ rss[MM_ANONPAGES]--;
+ else {
+@@ -1150,33 +1135,25 @@ again:
+ }
+ continue;
+ }
+- /*
+- * If details->check_mapping, we leave swap entries;
+- * if details->nonlinear_vma, we leave file entries.
+- */
++ /* If details->check_mapping, we leave swap entries. */
+ if (unlikely(details))
+ continue;
+- if (pte_file(ptent)) {
+- if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
+- print_bad_pte(vma, addr, ptent, NULL);
+- } else {
+- swp_entry_t entry = pte_to_swp_entry(ptent);
+-
+- if (!non_swap_entry(entry))
+- rss[MM_SWAPENTS]--;
+- else if (is_migration_entry(entry)) {
+- struct page *page;
+-
+- page = migration_entry_to_page(entry);
+-
+- if (PageAnon(page))
+- rss[MM_ANONPAGES]--;
+- else
+- rss[MM_FILEPAGES]--;
+- }
+- if (unlikely(!free_swap_and_cache(entry)))
+- print_bad_pte(vma, addr, ptent, NULL);
++
++ entry = pte_to_swp_entry(ptent);
++ if (!non_swap_entry(entry))
++ rss[MM_SWAPENTS]--;
++ else if (is_migration_entry(entry)) {
++ struct page *page;
++
++ page = migration_entry_to_page(entry);
++
++ if (PageAnon(page))
++ rss[MM_ANONPAGES]--;
++ else
++ rss[MM_FILEPAGES]--;
+ }
++ if (unlikely(!free_swap_and_cache(entry)))
++ print_bad_pte(vma, addr, ptent, NULL);
+ pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+
+@@ -1288,7 +1265,7 @@ static void unmap_page_range(struct mmu_
+ pgd_t *pgd;
+ unsigned long next;
+
+- if (details && !details->check_mapping && !details->nonlinear_vma)
++ if (details && !details->check_mapping)
+ details = NULL;
+
+ BUG_ON(addr >= end);
+@@ -1384,7 +1361,7 @@ void unmap_vmas(struct mmu_gather *tlb,
+ * @vma: vm_area_struct holding the applicable pages
+ * @start: starting address of pages to zap
+ * @size: number of bytes to zap
+- * @details: details of nonlinear truncation or shared cache invalidation
++ * @details: details of shared cache invalidation
+ *
+ * Caller must protect the VMA list
+ */
+@@ -1410,7 +1387,7 @@ void zap_page_range(struct vm_area_struc
+ * @vma: vm_area_struct holding the applicable pages
+ * @address: starting address of pages to zap
+ * @size: number of bytes to zap
+- * @details: details of nonlinear truncation or shared cache invalidation
++ * @details: details of shared cache invalidation
+ *
+ * The range must fit into one VMA.
+ */
+@@ -2340,25 +2317,11 @@ static inline void unmap_mapping_range_t
+ }
+ }
+
+-static inline void unmap_mapping_range_list(struct list_head *head,
+- struct zap_details *details)
+-{
+- struct vm_area_struct *vma;
+-
+- /*
+- * In nonlinear VMAs there is no correspondence between virtual address
+- * offset and file offset. So we must perform an exhaustive search
+- * across *all* the pages in each nonlinear VMA, not just the pages
+- * whose virtual address lies outside the file truncation point.
+- */
+- list_for_each_entry(vma, head, shared.nonlinear) {
+- details->nonlinear_vma = vma;
+- unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
+- }
+-}
+-
+ /**
+- * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
++ * unmap_mapping_range - unmap the portion of all mmaps in the specified
++ * address_space corresponding to the specified page range in the underlying
++ * file.
++ *
+ * @mapping: the address space containing mmaps to be unmapped.
+ * @holebegin: byte in first page to unmap, relative to the start of
+ * the underlying file. This will be rounded down to a PAGE_SIZE
+@@ -2387,7 +2350,6 @@ void unmap_mapping_range(struct address_
+ }
+
+ details.check_mapping = even_cows? NULL: mapping;
+- details.nonlinear_vma = NULL;
+ details.first_index = hba;
+ details.last_index = hba + hlen - 1;
+ if (details.last_index < details.first_index)
+@@ -2397,8 +2359,6 @@ void unmap_mapping_range(struct address_
+ mutex_lock(&mapping->i_mmap_mutex);
+ if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
+ unmap_mapping_range_tree(&mapping->i_mmap, &details);
+- if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
+- unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
+ mutex_unlock(&mapping->i_mmap_mutex);
+ }
+ EXPORT_SYMBOL(unmap_mapping_range);
diff --git a/queue-3.16/mm-drop-vm_ops-remap_pages-and-generic_file_remap_pages-stub.patch b/queue-3.16/mm-drop-vm_ops-remap_pages-and-generic_file_remap_pages-stub.patch
new file mode 100644
index 00000000..8b67ecfa
--- /dev/null
+++ b/queue-3.16/mm-drop-vm_ops-remap_pages-and-generic_file_remap_pages-stub.patch
@@ -0,0 +1,231 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:09:54 -0800
+Subject: mm: drop vm_ops->remap_pages and generic_file_remap_pages() stub
+
+commit d83a08db5ba6072caa658745881f4baa9bad6a08 upstream.
+
+Nobody uses it anymore.
+
+[akpm@linux-foundation.org: fix filemap_xip.c]
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Wu Fengguang <fengguang.wu@intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.16:
+ - Deleted code is slightly different
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ fs/9p/vfs_file.c | 2 --
+ fs/btrfs/file.c | 1 -
+ fs/ceph/addr.c | 1 -
+ fs/cifs/file.c | 1 -
+ fs/ext4/file.c | 1 -
+ fs/f2fs/file.c | 1 -
+ fs/fuse/file.c | 1 -
+ fs/gfs2/file.c | 1 -
+ fs/nfs/file.c | 1 -
+ fs/nilfs2/file.c | 1 -
+ fs/ocfs2/mmap.c | 1 -
+ fs/ubifs/file.c | 1 -
+ fs/xfs/xfs_file.c | 1 -
+ include/linux/fs.h | 6 ------
+ include/linux/mm.h | 3 ---
+ mm/filemap.c | 1 -
+ mm/filemap_xip.c | 1 -
+ mm/shmem.c | 1 -
+ 18 files changed, 26 deletions(-)
+
+--- a/fs/9p/vfs_file.c
++++ b/fs/9p/vfs_file.c
+@@ -831,7 +831,6 @@ static const struct vm_operations_struct
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = v9fs_vm_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
+@@ -839,7 +838,6 @@ static const struct vm_operations_struct
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = v9fs_vm_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2025,7 +2025,6 @@ static const struct vm_operations_struct
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = btrfs_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -1327,7 +1327,6 @@ out:
+ static struct vm_operations_struct ceph_vmops = {
+ .fault = ceph_filemap_fault,
+ .page_mkwrite = ceph_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ int ceph_mmap(struct file *file, struct vm_area_struct *vma)
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -3111,7 +3111,6 @@ static struct vm_operations_struct cifs_
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = cifs_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -195,7 +195,6 @@ static const struct vm_operations_struct
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = ext4_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -87,7 +87,6 @@ static const struct vm_operations_struct
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = f2fs_vm_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ static int get_parent_ino(struct inode *inode, nid_t *pino)
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -2138,7 +2138,6 @@ static const struct vm_operations_struct
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = fuse_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -496,7 +496,6 @@ static const struct vm_operations_struct
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = gfs2_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ /**
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -618,7 +618,6 @@ static const struct vm_operations_struct
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = nfs_vm_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ static int nfs_need_sync_write(struct file *filp, struct inode *inode)
+--- a/fs/nilfs2/file.c
++++ b/fs/nilfs2/file.c
+@@ -136,7 +136,6 @@ static const struct vm_operations_struct
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = nilfs_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+--- a/fs/ocfs2/mmap.c
++++ b/fs/ocfs2/mmap.c
+@@ -173,7 +173,6 @@ out:
+ static const struct vm_operations_struct ocfs2_file_vm_ops = {
+ .fault = ocfs2_fault,
+ .page_mkwrite = ocfs2_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -1556,7 +1556,6 @@ static const struct vm_operations_struct
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = ubifs_vm_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -1481,5 +1481,4 @@ static const struct vm_operations_struct
+ .fault = xfs_filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = xfs_filemap_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2430,12 +2430,6 @@ extern int sb_min_blocksize(struct super
+
+ extern int generic_file_mmap(struct file *, struct vm_area_struct *);
+ extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
+-static inline int generic_file_remap_pages(struct vm_area_struct *vma,
+- unsigned long addr, unsigned long size, pgoff_t pgoff)
+-{
+- BUG();
+- return 0;
+-}
+ int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
+ extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
+ extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -268,9 +268,6 @@ struct vm_operations_struct {
+ int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
+ const nodemask_t *to, unsigned long flags);
+ #endif
+- /* called by sys_remap_file_pages() to populate non-linear mapping */
+- int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
+- unsigned long size, pgoff_t pgoff);
+ };
+
+ struct mmu_gather;
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2078,7 +2078,6 @@ const struct vm_operations_struct generi
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = filemap_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ /* This is used for a general mmap of a disk file */
+--- a/mm/filemap_xip.c
++++ b/mm/filemap_xip.c
+@@ -306,7 +306,6 @@ out:
+ static const struct vm_operations_struct xip_file_vm_ops = {
+ .fault = xip_file_fault,
+ .page_mkwrite = filemap_page_mkwrite,
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2793,7 +2793,6 @@ static const struct vm_operations_struct
+ .set_policy = shmem_set_policy,
+ .get_policy = shmem_get_policy,
+ #endif
+- .remap_pages = generic_file_remap_pages,
+ };
+
+ static struct dentry *shmem_mount(struct file_system_type *fs_type,
diff --git a/queue-3.16/mm-fix-regression-in-remap_file_pages-emulation.patch b/queue-3.16/mm-fix-regression-in-remap_file_pages-emulation.patch
new file mode 100644
index 00000000..9c2491b8
--- /dev/null
+++ b/queue-3.16/mm-fix-regression-in-remap_file_pages-emulation.patch
@@ -0,0 +1,124 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Wed, 17 Feb 2016 13:11:15 -0800
+Subject: mm: fix regression in remap_file_pages() emulation
+
+commit 48f7df329474b49d83d0dffec1b6186647f11976 upstream.
+
+Grazvydas Ignotas has reported a regression in remap_file_pages()
+emulation.
+
+Testcase:
+ #define _GNU_SOURCE
+ #include <assert.h>
+ #include <stdlib.h>
+ #include <stdio.h>
+ #include <sys/mman.h>
+
+ #define SIZE (4096 * 3)
+
+ int main(int argc, char **argv)
+ {
+ unsigned long *p;
+ long i;
+
+ p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (p == MAP_FAILED) {
+ perror("mmap");
+ return -1;
+ }
+
+ for (i = 0; i < SIZE / 4096; i++)
+ p[i * 4096 / sizeof(*p)] = i;
+
+ if (remap_file_pages(p, 4096, 0, 1, 0)) {
+ perror("remap_file_pages");
+ return -1;
+ }
+
+ if (remap_file_pages(p, 4096 * 2, 0, 1, 0)) {
+ perror("remap_file_pages");
+ return -1;
+ }
+
+ assert(p[0] == 1);
+
+ munmap(p, SIZE);
+
+ return 0;
+ }
+
+The second remap_file_pages() fails with -EINVAL.
+
+The reason is that remap_file_pages() emulation assumes that the target
+vma covers whole area we want to over map. That assumption is broken by
+first remap_file_pages() call: it split the area into two vma.
+
+The solution is to check next adjacent vmas, if they map the same file
+with the same flags.
+
+Fixes: c8d78c1823f4 ("mm: replace remap_file_pages() syscall with emulation")
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reported-by: Grazvydas Ignotas <notasas@gmail.com>
+Tested-by: Grazvydas Ignotas <notasas@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ mm/mmap.c | 34 +++++++++++++++++++++++++++++-----
+ 1 file changed, 29 insertions(+), 5 deletions(-)
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2656,12 +2656,29 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
+ if (!vma || !(vma->vm_flags & VM_SHARED))
+ goto out;
+
+- if (start < vma->vm_start || start + size > vma->vm_end)
++ if (start < vma->vm_start)
+ goto out;
+
+- if (pgoff == linear_page_index(vma, start)) {
+- ret = 0;
+- goto out;
++ if (start + size > vma->vm_end) {
++ struct vm_area_struct *next;
++
++ for (next = vma->vm_next; next; next = next->vm_next) {
++ /* hole between vmas ? */
++ if (next->vm_start != next->vm_prev->vm_end)
++ goto out;
++
++ if (next->vm_file != vma->vm_file)
++ goto out;
++
++ if (next->vm_flags != vma->vm_flags)
++ goto out;
++
++ if (start + size <= next->vm_end)
++ break;
++ }
++
++ if (!next)
++ goto out;
+ }
+
+ prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
+@@ -2671,9 +2688,16 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
+ flags &= MAP_NONBLOCK;
+ flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
+ if (vma->vm_flags & VM_LOCKED) {
++ struct vm_area_struct *tmp;
+ flags |= MAP_LOCKED;
++
+ /* drop PG_Mlocked flag for over-mapped range */
+- munlock_vma_pages_range(vma, start, start + size);
++ for (tmp = vma; tmp->vm_start >= start + size;
++ tmp = tmp->vm_next) {
++ munlock_vma_pages_range(tmp,
++ max(tmp->vm_start, start),
++ min(tmp->vm_end, start + size));
++ }
+ }
+
+ file = get_file(vma->vm_file);
diff --git a/queue-3.16/mm-remove-rest-usage-of-vm_nonlinear-and-pte_file.patch b/queue-3.16/mm-remove-rest-usage-of-vm_nonlinear-and-pte_file.patch
new file mode 100644
index 00000000..9031b1bc
--- /dev/null
+++ b/queue-3.16/mm-remove-rest-usage-of-vm_nonlinear-and-pte_file.patch
@@ -0,0 +1,291 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:04 -0800
+Subject: mm: remove rest usage of VM_NONLINEAR and pte_file()
+
+commit 0661a33611fca12570cba48d9344ce68834ee86c upstream.
+
+One bit in ->vm_flags is unused now!
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: Michal Hocko <mhocko@suse.cz>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.16: Drop changes in mm/debug.c]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+--- a/drivers/gpu/drm/drm_vma_manager.c
++++ b/drivers/gpu/drm/drm_vma_manager.c
+@@ -50,8 +50,7 @@
+ *
+ * You must not use multiple offset managers on a single address_space.
+ * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
+- * no longer be linear. Please use VM_NONLINEAR in that case and implement your
+- * own offset managers.
++ * no longer be linear.
+ *
+ * This offset manager works on page-based addresses. That is, every argument
+ * and return code (with the exception of drm_vma_node_offset_addr()) is given
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -125,7 +125,6 @@ extern unsigned int kobjsize(const void
+ #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
+ #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
+ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
+-#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
+ #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
+ #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
+
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -54,7 +54,7 @@ static inline pgoff_t swp_offset(swp_ent
+ /* check whether a pte points to a swap entry */
+ static inline int is_swap_pte(pte_t pte)
+ {
+- return !pte_none(pte) && !pte_present_nonuma(pte) && !pte_file(pte);
++ return !pte_none(pte) && !pte_present_nonuma(pte);
+ }
+ #endif
+
+@@ -66,7 +66,6 @@ static inline swp_entry_t pte_to_swp_ent
+ {
+ swp_entry_t arch_entry;
+
+- BUG_ON(pte_file(pte));
+ if (pte_swp_soft_dirty(pte))
+ pte = pte_swp_clear_soft_dirty(pte);
+ arch_entry = __pte_to_swp_entry(pte);
+@@ -82,7 +81,6 @@ static inline pte_t swp_entry_to_pte(swp
+ swp_entry_t arch_entry;
+
+ arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
+- BUG_ON(pte_file(__swp_entry_to_pte(arch_entry)));
+ return __swp_entry_to_pte(arch_entry);
+ }
+
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -61,7 +61,7 @@ retry:
+ */
+ if (likely(!(flags & FOLL_MIGRATION)))
+ goto no_page;
+- if (pte_none(pte) || pte_file(pte))
++ if (pte_none(pte))
+ goto no_page;
+ entry = pte_to_swp_entry(pte);
+ if (!is_migration_entry(entry))
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -1749,7 +1749,7 @@ int ksm_madvise(struct vm_area_struct *v
+ */
+ if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
+ VM_PFNMAP | VM_IO | VM_DONTEXPAND |
+- VM_HUGETLB | VM_NONLINEAR | VM_MIXEDMAP))
++ VM_HUGETLB | VM_MIXEDMAP))
+ return 0; /* just ignore the advice */
+
+ #ifdef VM_SAO
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -155,7 +155,7 @@ static int swapin_walk_pmd_entry(pmd_t *
+ pte = *(orig_pte + ((index - start) / PAGE_SIZE));
+ pte_unmap_unlock(orig_pte, ptl);
+
+- if (pte_present(pte) || pte_none(pte) || pte_file(pte))
++ if (pte_present(pte) || pte_none(pte))
+ continue;
+ entry = pte_to_swp_entry(pte);
+ if (unlikely(non_swap_entry(entry)))
+@@ -298,7 +298,7 @@ static long madvise_remove(struct vm_are
+
+ *prev = NULL; /* tell sys_madvise we drop mmap_sem */
+
+- if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
++ if (vma->vm_flags & (VM_LOCKED | VM_HUGETLB))
+ return -EINVAL;
+
+ f = vma->vm_file;
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -6580,10 +6580,7 @@ static struct page *mc_handle_file_pte(s
+ return NULL;
+
+ mapping = vma->vm_file->f_mapping;
+- if (pte_none(ptent))
+- pgoff = linear_page_index(vma, addr);
+- else /* pte_file(ptent) is true */
+- pgoff = pte_to_pgoff(ptent);
++ pgoff = linear_page_index(vma, addr);
+
+ /* page is moved even if it's not RSS of this task(page-faulted). */
+ #ifdef CONFIG_SWAP
+@@ -6616,7 +6613,7 @@ static enum mc_target_type get_mctgt_typ
+ page = mc_handle_present_pte(vma, addr, ptent);
+ else if (is_swap_pte(ptent))
+ page = mc_handle_swap_pte(vma, addr, ptent, &ent);
+- else if (pte_none(ptent) || pte_file(ptent))
++ else if (pte_none(ptent))
+ page = mc_handle_file_pte(vma, addr, ptent, &ent);
+
+ if (!page && !ent.val)
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -810,42 +810,40 @@ copy_one_pte(struct mm_struct *dst_mm, s
+
+ /* pte contains position in swap or file, so copy. */
+ if (unlikely(!pte_present(pte))) {
+- if (!pte_file(pte)) {
+- swp_entry_t entry = pte_to_swp_entry(pte);
++ swp_entry_t entry = pte_to_swp_entry(pte);
+
+- if (likely(!non_swap_entry(entry))) {
+- if (swap_duplicate(entry) < 0)
+- return entry.val;
+-
+- /* make sure dst_mm is on swapoff's mmlist. */
+- if (unlikely(list_empty(&dst_mm->mmlist))) {
+- spin_lock(&mmlist_lock);
+- if (list_empty(&dst_mm->mmlist))
+- list_add(&dst_mm->mmlist,
+- &src_mm->mmlist);
+- spin_unlock(&mmlist_lock);
+- }
+- rss[MM_SWAPENTS]++;
+- } else if (is_migration_entry(entry)) {
+- page = migration_entry_to_page(entry);
+-
+- if (PageAnon(page))
+- rss[MM_ANONPAGES]++;
+- else
+- rss[MM_FILEPAGES]++;
+-
+- if (is_write_migration_entry(entry) &&
+- is_cow_mapping(vm_flags)) {
+- /*
+- * COW mappings require pages in both
+- * parent and child to be set to read.
+- */
+- make_migration_entry_read(&entry);
+- pte = swp_entry_to_pte(entry);
+- if (pte_swp_soft_dirty(*src_pte))
+- pte = pte_swp_mksoft_dirty(pte);
+- set_pte_at(src_mm, addr, src_pte, pte);
+- }
++ if (likely(!non_swap_entry(entry))) {
++ if (swap_duplicate(entry) < 0)
++ return entry.val;
++
++ /* make sure dst_mm is on swapoff's mmlist. */
++ if (unlikely(list_empty(&dst_mm->mmlist))) {
++ spin_lock(&mmlist_lock);
++ if (list_empty(&dst_mm->mmlist))
++ list_add(&dst_mm->mmlist,
++ &src_mm->mmlist);
++ spin_unlock(&mmlist_lock);
++ }
++ rss[MM_SWAPENTS]++;
++ } else if (is_migration_entry(entry)) {
++ page = migration_entry_to_page(entry);
++
++ if (PageAnon(page))
++ rss[MM_ANONPAGES]++;
++ else
++ rss[MM_FILEPAGES]++;
++
++ if (is_write_migration_entry(entry) &&
++ is_cow_mapping(vm_flags)) {
++ /*
++ * COW mappings require pages in both
++ * parent and child to be set to read.
++ */
++ make_migration_entry_read(&entry);
++ pte = swp_entry_to_pte(entry);
++ if (pte_swp_soft_dirty(*src_pte))
++ pte = pte_swp_mksoft_dirty(pte);
++ set_pte_at(src_mm, addr, src_pte, pte);
+ }
+ }
+ goto out_set_pte;
+@@ -1019,11 +1017,9 @@ int copy_page_range(struct mm_struct *ds
+ * readonly mappings. The tradeoff is that copy_page_range is more
+ * efficient than faulting.
+ */
+- if (!(vma->vm_flags & (VM_HUGETLB | VM_NONLINEAR |
+- VM_PFNMAP | VM_MIXEDMAP))) {
+- if (!vma->anon_vma)
+- return 0;
+- }
++ if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
++ !vma->anon_vma)
++ return 0;
+
+ if (is_vm_hugetlb_page(vma))
+ return copy_hugetlb_page_range(dst_mm, src_mm, vma);
+--- a/mm/mincore.c
++++ b/mm/mincore.c
+@@ -124,17 +124,13 @@ static void mincore_pte_range(struct vm_
+ ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ do {
+ pte_t pte = *ptep;
+- pgoff_t pgoff;
+
+ next = addr + PAGE_SIZE;
+ if (pte_none(pte))
+ mincore_unmapped_range(vma, addr, next, vec);
+ else if (pte_present(pte))
+ *vec = 1;
+- else if (pte_file(pte)) {
+- pgoff = pte_to_pgoff(pte);
+- *vec = mincore_page(vma->vm_file->f_mapping, pgoff);
+- } else { /* pte is a swap entry */
++ else { /* pte is a swap entry */
+ swp_entry_t entry = pte_to_swp_entry(pte);
+
+ if (is_migration_entry(entry)) {
+@@ -142,9 +138,8 @@ static void mincore_pte_range(struct vm_
+ *vec = 1;
+ } else {
+ #ifdef CONFIG_SWAP
+- pgoff = entry.val;
+ *vec = mincore_page(swap_address_space(entry),
+- pgoff);
++ entry.val);
+ #else
+ WARN_ON(1);
+ *vec = 1;
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -110,7 +110,7 @@ static unsigned long change_pte_range(st
+ }
+ if (updated)
+ pages++;
+- } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
++ } else if (IS_ENABLED(CONFIG_MIGRATION)) {
+ swp_entry_t entry = pte_to_swp_entry(oldpte);
+
+ if (is_write_migration_entry(entry)) {
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -81,8 +81,6 @@ static pte_t move_soft_dirty_pte(pte_t p
+ pte = pte_mksoft_dirty(pte);
+ else if (is_swap_pte(pte))
+ pte = pte_swp_mksoft_dirty(pte);
+- else if (pte_file(pte))
+- pte = pte_file_mksoft_dirty(pte);
+ #endif
+ return pte;
+ }
+--- a/mm/msync.c
++++ b/mm/msync.c
+@@ -86,10 +86,7 @@ SYSCALL_DEFINE3(msync, unsigned long, st
+ (vma->vm_flags & VM_SHARED)) {
+ get_file(file);
+ up_read(&mm->mmap_sem);
+- if (vma->vm_flags & VM_NONLINEAR)
+- error = vfs_fsync(file, 1);
+- else
+- error = vfs_fsync_range(file, fstart, fend, 1);
++ error = vfs_fsync_range(file, fstart, fend, 1);
+ fput(file);
+ if (error || start >= end)
+ goto out;
diff --git a/queue-3.16/mm-replace-remap_file_pages-syscall-with-emulation.patch b/queue-3.16/mm-replace-remap_file_pages-syscall-with-emulation.patch
new file mode 100644
index 00000000..c7b602a2
--- /dev/null
+++ b/queue-3.16/mm-replace-remap_file_pages-syscall-with-emulation.patch
@@ -0,0 +1,537 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:09:46 -0800
+Subject: mm: replace remap_file_pages() syscall with emulation
+
+commit c8d78c1823f46519473949d33f0d1d33fe21ea16 upstream.
+
+remap_file_pages(2) was invented to be able efficiently map parts of
+huge file into limited 32-bit virtual address space such as in database
+workloads.
+
+Nonlinear mappings are pain to support and it seems there's no
+legitimate use-cases nowadays since 64-bit systems are widely available.
+
+Let's drop it and get rid of all these special-cased code.
+
+The patch replaces the syscall with emulation which creates new VMA on
+each remap_file_pages(), unless they it can be merged with an adjacent
+one.
+
+I didn't find *any* real code that uses remap_file_pages(2) to test
+emulation impact on. I've checked Debian code search and source of all
+packages in ALT Linux. No real users: libc wrappers, mentions in
+strace, gdb, valgrind and this kind of stuff.
+
+There are few basic tests in LTP for the syscall. They work just fine
+with emulation.
+
+To test performance impact, I've written small test case which
+demonstrate pretty much worst case scenario: map 4G shmfs file, write to
+begin of every page pgoff of the page, remap pages in reverse order,
+read every page.
+
+The test creates 1 million of VMAs if emulation is in use, so I had to
+set vm.max_map_count to 1100000 to avoid -ENOMEM.
+
+Before: 23.3 ( +- 4.31% ) seconds
+After: 43.9 ( +- 0.85% ) seconds
+Slowdown: 1.88x
+
+I believe we can live with that.
+
+Test case:
+
+ #define _GNU_SOURCE
+ #include <assert.h>
+ #include <stdlib.h>
+ #include <stdio.h>
+ #include <sys/mman.h>
+
+ #define MB (1024UL * 1024)
+ #define SIZE (4096 * MB)
+
+ int main(int argc, char **argv)
+ {
+ unsigned long *p;
+ long i, pass;
+
+ for (pass = 0; pass < 10; pass++) {
+ p = mmap(NULL, SIZE, PROT_READ|PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (p == MAP_FAILED) {
+ perror("mmap");
+ return -1;
+ }
+
+ for (i = 0; i < SIZE / 4096; i++)
+ p[i * 4096 / sizeof(*p)] = i;
+
+ for (i = 0; i < SIZE / 4096; i++) {
+ if (remap_file_pages(p + i * 4096 / sizeof(*p), 4096,
+ 0, (SIZE - 4096 * (i + 1)) >> 12, 0)) {
+ perror("remap_file_pages");
+ return -1;
+ }
+ }
+
+ for (i = SIZE / 4096 - 1; i >= 0; i--)
+ assert(p[i * 4096 / sizeof(*p)] == SIZE / 4096 - i - 1);
+
+ munmap(p, SIZE);
+ }
+
+ return 0;
+ }
+
+[akpm@linux-foundation.org: fix spello]
+[sasha.levin@oracle.com: initialize populate before usage]
+[sasha.levin@oracle.com: grab file ref to prevent race while mmaping]
+Signed-off-by: "Kirill A. Shutemov" <kirill@shutemov.name>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Dave Jones <davej@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Armin Rigo <arigo@tunes.org>
+Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
+Cc: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.16:
+ - Deleted code is slightly different
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ Documentation/vm/remap_file_pages.txt | 7 +-
+ include/linux/fs.h | 8 +-
+ mm/Makefile | 2 +-
+ mm/fremap.c | 283 --------------------------
+ mm/mmap.c | 69 +++++++
+ mm/nommu.c | 8 -
+ 6 files changed, 79 insertions(+), 298 deletions(-)
+ delete mode 100644 mm/fremap.c
+
+--- a/Documentation/vm/remap_file_pages.txt
++++ b/Documentation/vm/remap_file_pages.txt
+@@ -18,10 +18,9 @@ on 32-bit systems to map files bigger th
+ virtual address space. This use-case is not critical anymore since 64-bit
+ systems are widely available.
+
+-The plan is to deprecate the syscall and replace it with an emulation.
+-The emulation will create new VMAs instead of nonlinear mappings. It's
+-going to work slower for rare users of remap_file_pages() but ABI is
+-preserved.
++The syscall is deprecated and replaced it with an emulation now. The
++emulation creates new VMAs instead of nonlinear mappings. It's going to
++work slower for rare users of remap_file_pages() but ABI is preserved.
+
+ One side effect of emulation (apart from performance) is that user can hit
+ vm.max_map_count limit more easily due to additional VMAs. See comment for
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2430,8 +2430,12 @@ extern int sb_min_blocksize(struct super
+
+ extern int generic_file_mmap(struct file *, struct vm_area_struct *);
+ extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
+-extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
+- unsigned long size, pgoff_t pgoff);
++static inline int generic_file_remap_pages(struct vm_area_struct *vma,
++ unsigned long addr, unsigned long size, pgoff_t pgoff)
++{
++ BUG();
++ return 0;
++}
+ int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
+ extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
+ extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
+--- a/mm/Makefile
++++ b/mm/Makefile
+@@ -3,7 +3,7 @@
+ #
+
+ mmu-y := nommu.o
+-mmu-$(CONFIG_MMU) := fremap.o gup.o highmem.o madvise.o memory.o mincore.o \
++mmu-$(CONFIG_MMU) := gup.o highmem.o madvise.o memory.o mincore.o \
+ mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
+ vmalloc.o pagewalk.o pgtable-generic.o
+
+--- a/mm/fremap.c
++++ /dev/null
+@@ -1,283 +0,0 @@
+-/*
+- * linux/mm/fremap.c
+- *
+- * Explicit pagetable population and nonlinear (random) mappings support.
+- *
+- * started by Ingo Molnar, Copyright (C) 2002, 2003
+- */
+-#include <linux/export.h>
+-#include <linux/backing-dev.h>
+-#include <linux/mm.h>
+-#include <linux/swap.h>
+-#include <linux/file.h>
+-#include <linux/mman.h>
+-#include <linux/pagemap.h>
+-#include <linux/swapops.h>
+-#include <linux/rmap.h>
+-#include <linux/syscalls.h>
+-#include <linux/mmu_notifier.h>
+-
+-#include <asm/mmu_context.h>
+-#include <asm/cacheflush.h>
+-#include <asm/tlbflush.h>
+-
+-#include "internal.h"
+-
+-static int mm_counter(struct page *page)
+-{
+- return PageAnon(page) ? MM_ANONPAGES : MM_FILEPAGES;
+-}
+-
+-static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
+- unsigned long addr, pte_t *ptep)
+-{
+- pte_t pte = *ptep;
+- struct page *page;
+- swp_entry_t entry;
+-
+- if (pte_present(pte)) {
+- flush_cache_page(vma, addr, pte_pfn(pte));
+- pte = ptep_clear_flush(vma, addr, ptep);
+- page = vm_normal_page(vma, addr, pte);
+- if (page) {
+- if (pte_dirty(pte))
+- set_page_dirty(page);
+- update_hiwater_rss(mm);
+- dec_mm_counter(mm, mm_counter(page));
+- page_remove_rmap(page);
+- page_cache_release(page);
+- }
+- } else { /* zap_pte() is not called when pte_none() */
+- if (!pte_file(pte)) {
+- update_hiwater_rss(mm);
+- entry = pte_to_swp_entry(pte);
+- if (non_swap_entry(entry)) {
+- if (is_migration_entry(entry)) {
+- page = migration_entry_to_page(entry);
+- dec_mm_counter(mm, mm_counter(page));
+- }
+- } else {
+- free_swap_and_cache(entry);
+- dec_mm_counter(mm, MM_SWAPENTS);
+- }
+- }
+- pte_clear_not_present_full(mm, addr, ptep, 0);
+- }
+-}
+-
+-/*
+- * Install a file pte to a given virtual memory address, release any
+- * previously existing mapping.
+- */
+-static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
+- unsigned long addr, unsigned long pgoff, pgprot_t prot)
+-{
+- int err = -ENOMEM;
+- pte_t *pte, ptfile;
+- spinlock_t *ptl;
+-
+- pte = get_locked_pte(mm, addr, &ptl);
+- if (!pte)
+- goto out;
+-
+- ptfile = pgoff_to_pte(pgoff);
+-
+- if (!pte_none(*pte))
+- zap_pte(mm, vma, addr, pte);
+-
+- set_pte_at(mm, addr, pte, pte_file_mksoft_dirty(ptfile));
+- /*
+- * We don't need to run update_mmu_cache() here because the "file pte"
+- * being installed by install_file_pte() is not a real pte - it's a
+- * non-present entry (like a swap entry), noting what file offset should
+- * be mapped there when there's a fault (in a non-linear vma where
+- * that's not obvious).
+- */
+- pte_unmap_unlock(pte, ptl);
+- err = 0;
+-out:
+- return err;
+-}
+-
+-int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
+- unsigned long size, pgoff_t pgoff)
+-{
+- struct mm_struct *mm = vma->vm_mm;
+- int err;
+-
+- do {
+- err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
+- if (err)
+- return err;
+-
+- size -= PAGE_SIZE;
+- addr += PAGE_SIZE;
+- pgoff++;
+- } while (size);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(generic_file_remap_pages);
+-
+-/**
+- * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
+- * @start: start of the remapped virtual memory range
+- * @size: size of the remapped virtual memory range
+- * @prot: new protection bits of the range (see NOTE)
+- * @pgoff: to-be-mapped page of the backing store file
+- * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
+- *
+- * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
+- * (shared backing store file).
+- *
+- * This syscall works purely via pagetables, so it's the most efficient
+- * way to map the same (large) file into a given virtual window. Unlike
+- * mmap()/mremap() it does not create any new vmas. The new mappings are
+- * also safe across swapout.
+- *
+- * NOTE: the @prot parameter right now is ignored (but must be zero),
+- * and the vma's default protection is used. Arbitrary protections
+- * might be implemented in the future.
+- */
+-SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
+- unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
+-{
+- struct mm_struct *mm = current->mm;
+- struct address_space *mapping;
+- struct vm_area_struct *vma;
+- int err = -EINVAL;
+- int has_write_lock = 0;
+- vm_flags_t vm_flags = 0;
+-
+- pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. "
+- "See Documentation/vm/remap_file_pages.txt.\n",
+- current->comm, current->pid);
+-
+- if (prot)
+- return err;
+- /*
+- * Sanitize the syscall parameters:
+- */
+- start = start & PAGE_MASK;
+- size = size & PAGE_MASK;
+-
+- /* Does the address range wrap, or is the span zero-sized? */
+- if (start + size <= start)
+- return err;
+-
+- /* Does pgoff wrap? */
+- if (pgoff + (size >> PAGE_SHIFT) < pgoff)
+- return err;
+-
+- /* Can we represent this offset inside this architecture's pte's? */
+-#if PTE_FILE_MAX_BITS < BITS_PER_LONG
+- if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
+- return err;
+-#endif
+-
+- /* We need down_write() to change vma->vm_flags. */
+- down_read(&mm->mmap_sem);
+- retry:
+- vma = find_vma(mm, start);
+-
+- /*
+- * Make sure the vma is shared, that it supports prefaulting,
+- * and that the remapped range is valid and fully within
+- * the single existing vma.
+- */
+- if (!vma || !(vma->vm_flags & VM_SHARED))
+- goto out;
+-
+- if (!vma->vm_ops || !vma->vm_ops->remap_pages)
+- goto out;
+-
+- if (start < vma->vm_start || start + size > vma->vm_end)
+- goto out;
+-
+- /* Must set VM_NONLINEAR before any pages are populated. */
+- if (!(vma->vm_flags & VM_NONLINEAR)) {
+- /*
+- * vm_private_data is used as a swapout cursor
+- * in a VM_NONLINEAR vma.
+- */
+- if (vma->vm_private_data)
+- goto out;
+-
+- /* Don't need a nonlinear mapping, exit success */
+- if (pgoff == linear_page_index(vma, start)) {
+- err = 0;
+- goto out;
+- }
+-
+- if (!has_write_lock) {
+-get_write_lock:
+- up_read(&mm->mmap_sem);
+- down_write(&mm->mmap_sem);
+- has_write_lock = 1;
+- goto retry;
+- }
+- mapping = vma->vm_file->f_mapping;
+- /*
+- * page_mkclean doesn't work on nonlinear vmas, so if
+- * dirty pages need to be accounted, emulate with linear
+- * vmas.
+- */
+- if (mapping_cap_account_dirty(mapping)) {
+- unsigned long addr;
+- struct file *file = get_file(vma->vm_file);
+- /* mmap_region may free vma; grab the info now */
+- vm_flags = vma->vm_flags;
+-
+- addr = mmap_region(file, start, size, vm_flags, pgoff);
+- fput(file);
+- if (IS_ERR_VALUE(addr)) {
+- err = addr;
+- } else {
+- BUG_ON(addr != start);
+- err = 0;
+- }
+- goto out_freed;
+- }
+- mutex_lock(&mapping->i_mmap_mutex);
+- flush_dcache_mmap_lock(mapping);
+- vma->vm_flags |= VM_NONLINEAR;
+- vma_interval_tree_remove(vma, &mapping->i_mmap);
+- vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
+- flush_dcache_mmap_unlock(mapping);
+- mutex_unlock(&mapping->i_mmap_mutex);
+- }
+-
+- if (vma->vm_flags & VM_LOCKED) {
+- /*
+- * drop PG_Mlocked flag for over-mapped range
+- */
+- if (!has_write_lock)
+- goto get_write_lock;
+- vm_flags = vma->vm_flags;
+- munlock_vma_pages_range(vma, start, start + size);
+- vma->vm_flags = vm_flags;
+- }
+-
+- mmu_notifier_invalidate_range_start(mm, start, start + size);
+- err = vma->vm_ops->remap_pages(vma, start, size, pgoff);
+- mmu_notifier_invalidate_range_end(mm, start, start + size);
+-
+- /*
+- * We can't clear VM_NONLINEAR because we'd have to do
+- * it after ->populate completes, and that would prevent
+- * downgrading the lock. (Locks can't be upgraded).
+- */
+-
+-out:
+- if (vma)
+- vm_flags = vma->vm_flags;
+-out_freed:
+- if (likely(!has_write_lock))
+- up_read(&mm->mmap_sem);
+- else
+- up_write(&mm->mmap_sem);
+- if (!err && ((vm_flags & VM_LOCKED) || !(flags & MAP_NONBLOCK)))
+- mm_populate(start, size);
+-
+- return err;
+-}
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2620,6 +2620,75 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
+ return vm_munmap(addr, len);
+ }
+
++
++/*
++ * Emulation of deprecated remap_file_pages() syscall.
++ */
++SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
++ unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
++{
++
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ unsigned long populate = 0;
++ unsigned long ret = -EINVAL;
++ struct file *file;
++
++ pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. "
++ "See Documentation/vm/remap_file_pages.txt.\n",
++ current->comm, current->pid);
++
++ if (prot)
++ return ret;
++ start = start & PAGE_MASK;
++ size = size & PAGE_MASK;
++
++ if (start + size <= start)
++ return ret;
++
++ /* Does pgoff wrap? */
++ if (pgoff + (size >> PAGE_SHIFT) < pgoff)
++ return ret;
++
++ down_write(&mm->mmap_sem);
++ vma = find_vma(mm, start);
++
++ if (!vma || !(vma->vm_flags & VM_SHARED))
++ goto out;
++
++ if (start < vma->vm_start || start + size > vma->vm_end)
++ goto out;
++
++ if (pgoff == linear_page_index(vma, start)) {
++ ret = 0;
++ goto out;
++ }
++
++ prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
++ prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
++ prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
++
++ flags &= MAP_NONBLOCK;
++ flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
++ if (vma->vm_flags & VM_LOCKED) {
++ flags |= MAP_LOCKED;
++ /* drop PG_Mlocked flag for over-mapped range */
++ munlock_vma_pages_range(vma, start, start + size);
++ }
++
++ file = get_file(vma->vm_file);
++ ret = do_mmap_pgoff(vma->vm_file, start, size,
++ prot, flags, pgoff, &populate);
++ fput(file);
++out:
++ up_write(&mm->mmap_sem);
++ if (populate)
++ mm_populate(ret, populate);
++ if (!IS_ERR_VALUE(ret))
++ ret = 0;
++ return ret;
++}
++
+ static inline void verify_mm_writelocked(struct mm_struct *mm)
+ {
+ #ifdef CONFIG_DEBUG_VM
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -1999,14 +1999,6 @@ void filemap_map_pages(struct vm_area_st
+ }
+ EXPORT_SYMBOL(filemap_map_pages);
+
+-int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
+- unsigned long size, pgoff_t pgoff)
+-{
+- BUG();
+- return 0;
+-}
+-EXPORT_SYMBOL(generic_file_remap_pages);
+-
+ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long addr, void *buf, int len, int write)
+ {
diff --git a/queue-3.16/mm-replace-vma-sharead.linear-with-vma-shared.patch b/queue-3.16/mm-replace-vma-sharead.linear-with-vma-shared.patch
new file mode 100644
index 00000000..ff5407d9
--- /dev/null
+++ b/queue-3.16/mm-replace-vma-sharead.linear-with-vma-shared.patch
@@ -0,0 +1,93 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:02 -0800
+Subject: mm: replace vma->sharead.linear with vma->shared
+
+commit ac51b934f3912582d3c897c6c4d09b32ea57b2c7 upstream.
+
+After removing vma->shared.nonlinear we have only one member of
+vma->shared union, which doesn't make much sense.
+
+This patch drops the union and move struct vma->shared.linear to
+vma->shared.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ include/linux/mm_types.h | 8 +++-----
+ mm/interval_tree.c | 34 +++++++++++++++++-----------------
+ 2 files changed, 20 insertions(+), 22 deletions(-)
+
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -274,11 +274,9 @@ struct vm_area_struct {
+ * For areas with an address space and backing store,
+ * linkage into the address_space->i_mmap interval tree.
+ */
+- union {
+- struct {
+- struct rb_node rb;
+- unsigned long rb_subtree_last;
+- } linear;
++ struct {
++ struct rb_node rb;
++ unsigned long rb_subtree_last;
+ } shared;
+
+ /*
+--- a/mm/interval_tree.c
++++ b/mm/interval_tree.c
+@@ -21,8 +21,8 @@ static inline unsigned long vma_last_pgo
+ return v->vm_pgoff + ((v->vm_end - v->vm_start) >> PAGE_SHIFT) - 1;
+ }
+
+-INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.linear.rb,
+- unsigned long, shared.linear.rb_subtree_last,
++INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
++ unsigned long, shared.rb_subtree_last,
+ vma_start_pgoff, vma_last_pgoff,, vma_interval_tree)
+
+ /* Insert node immediately after prev in the interval tree */
+@@ -36,26 +36,26 @@ void vma_interval_tree_insert_after(stru
+
+ VM_BUG_ON(vma_start_pgoff(node) != vma_start_pgoff(prev));
+
+- if (!prev->shared.linear.rb.rb_right) {
++ if (!prev->shared.rb.rb_right) {
+ parent = prev;
+- link = &prev->shared.linear.rb.rb_right;
++ link = &prev->shared.rb.rb_right;
+ } else {
+- parent = rb_entry(prev->shared.linear.rb.rb_right,
+- struct vm_area_struct, shared.linear.rb);
+- if (parent->shared.linear.rb_subtree_last < last)
+- parent->shared.linear.rb_subtree_last = last;
+- while (parent->shared.linear.rb.rb_left) {
+- parent = rb_entry(parent->shared.linear.rb.rb_left,
+- struct vm_area_struct, shared.linear.rb);
+- if (parent->shared.linear.rb_subtree_last < last)
+- parent->shared.linear.rb_subtree_last = last;
++ parent = rb_entry(prev->shared.rb.rb_right,
++ struct vm_area_struct, shared.rb);
++ if (parent->shared.rb_subtree_last < last)
++ parent->shared.rb_subtree_last = last;
++ while (parent->shared.rb.rb_left) {
++ parent = rb_entry(parent->shared.rb.rb_left,
++ struct vm_area_struct, shared.rb);
++ if (parent->shared.rb_subtree_last < last)
++ parent->shared.rb_subtree_last = last;
+ }
+- link = &parent->shared.linear.rb.rb_left;
++ link = &parent->shared.rb.rb_left;
+ }
+
+- node->shared.linear.rb_subtree_last = last;
+- rb_link_node(&node->shared.linear.rb, &parent->shared.linear.rb, link);
+- rb_insert_augmented(&node->shared.linear.rb, root,
++ node->shared.rb_subtree_last = last;
++ rb_link_node(&node->shared.rb, &parent->shared.rb, link);
++ rb_insert_augmented(&node->shared.rb, root,
+ &vma_interval_tree_augment);
+ }
+
diff --git a/queue-3.16/mn10300-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/mn10300-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..c11efdbb
--- /dev/null
+++ b/queue-3.16/mn10300-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,64 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:53 -0800
+Subject: mn10300: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 6bf63a8ccb1dccd6ab81bc8bc46863493629cdb8 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+This patch also increases the number of bits availble for swap offset.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: David Howells <dhowells@redhat.com>
+Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
+Cc: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/mn10300/include/asm/pgtable.h | 17 +++--------------
+ 1 file changed, 3 insertions(+), 14 deletions(-)
+
+--- a/arch/mn10300/include/asm/pgtable.h
++++ b/arch/mn10300/include/asm/pgtable.h
+@@ -134,7 +134,6 @@ extern pte_t kernel_vmalloc_ptes[(VMALLO
+ #define _PAGE_NX 0 /* no-execute bit */
+
+ /* If _PAGE_VALID is clear, we use these: */
+-#define _PAGE_FILE xPTEL2_C /* set:pagecache unset:swap */
+ #define _PAGE_PROTNONE 0x000 /* If not present */
+
+ #define __PAGE_PROT_UWAUX 0x010
+@@ -241,11 +240,6 @@ static inline int pte_young(pte_t pte) {
+ static inline int pte_write(pte_t pte) { return pte_val(pte) & __PAGE_PROT_WRITE; }
+ static inline int pte_special(pte_t pte){ return 0; }
+
+-/*
+- * The following only works if pte_present() is not true.
+- */
+-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+-
+ static inline pte_t pte_rdprotect(pte_t pte)
+ {
+ pte_val(pte) &= ~(__PAGE_PROT_USER|__PAGE_PROT_UWAUX); return pte;
+@@ -338,16 +332,11 @@ static inline int pte_exec_kernel(pte_t
+ return 1;
+ }
+
+-#define PTE_FILE_MAX_BITS 30
+-
+-#define pte_to_pgoff(pte) (pte_val(pte) >> 2)
+-#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
+-
+ /* Encode and de-code a swap entry */
+-#define __swp_type(x) (((x).val >> 2) & 0x3f)
+-#define __swp_offset(x) ((x).val >> 8)
++#define __swp_type(x) (((x).val >> 1) & 0x3f)
++#define __swp_offset(x) ((x).val >> 7)
+ #define __swp_entry(type, offset) \
+- ((swp_entry_t) { ((type) << 2) | ((offset) << 8) })
++ ((swp_entry_t) { ((type) << 1) | ((offset) << 7) })
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) __pte((x).val)
+
diff --git a/queue-3.16/openrisc-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/openrisc-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..bfeda29d
--- /dev/null
+++ b/queue-3.16/openrisc-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,64 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:10:58 -0800
+Subject: openrisc: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 3824e3cf7e865b2ff0b71de23b16e332fe6a853a upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Jonas Bonn <jonas@southpole.se>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/openrisc/include/asm/pgtable.h | 8 --------
+ arch/openrisc/kernel/head.S | 5 -----
+ 2 files changed, 13 deletions(-)
+
+--- a/arch/openrisc/include/asm/pgtable.h
++++ b/arch/openrisc/include/asm/pgtable.h
+@@ -125,7 +125,6 @@ extern void paging_init(void);
+ #define _PAGE_CC 0x001 /* software: pte contains a translation */
+ #define _PAGE_CI 0x002 /* cache inhibit */
+ #define _PAGE_WBC 0x004 /* write back cache */
+-#define _PAGE_FILE 0x004 /* set: pagecache, unset: swap (when !PRESENT) */
+ #define _PAGE_WOM 0x008 /* weakly ordered memory */
+
+ #define _PAGE_A 0x010 /* accessed */
+@@ -240,7 +239,6 @@ static inline int pte_write(pte_t pte) {
+ static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
+ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+ static inline int pte_special(pte_t pte) { return 0; }
+ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+@@ -438,12 +436,6 @@ static inline void update_mmu_cache(stru
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-/* Encode and decode a nonlinear file mapping entry */
+-
+-#define PTE_FILE_MAX_BITS 26
+-#define pte_to_pgoff(x) (pte_val(x) >> 6)
+-#define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE)
+-
+ #define kern_addr_valid(addr) (1)
+
+ #include <asm-generic/pgtable.h>
+--- a/arch/openrisc/kernel/head.S
++++ b/arch/openrisc/kernel/head.S
+@@ -754,11 +754,6 @@ _dc_enable:
+
+ /* ===============================================[ page table masks ]=== */
+
+-/* bit 4 is used in hardware as write back cache bit. we never use this bit
+- * explicitly, so we can reuse it as _PAGE_FILE bit and mask it out when
+- * writing into hardware pte's
+- */
+-
+ #define DTLB_UP_CONVERT_MASK 0x3fa
+ #define ITLB_UP_CONVERT_MASK 0x3a
+
diff --git a/queue-3.16/parisc-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/parisc-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..f81f9e73
--- /dev/null
+++ b/queue-3.16/parisc-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,59 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:11:01 -0800
+Subject: parisc: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 8d55da810f1fabcf1d4c0bbc46205e5f2c0fa84b upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
+Cc: Helge Deller <deller@gmx.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/parisc/include/asm/pgtable.h | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -146,7 +146,6 @@ extern void purge_tlb_entries(struct mm_
+ #define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */
+ #define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */
+ #define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */
+-#define _PAGE_FILE_BIT _PAGE_DIRTY_BIT /* overload this bit */
+ #define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */
+ #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
+ #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
+@@ -167,13 +166,6 @@ extern void purge_tlb_entries(struct mm_
+ /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
+ #define PFN_PTE_SHIFT 12
+
+-
+-/* this is how many bits may be used by the file functions */
+-#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT)
+-
+-#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT)
+-#define pgoff_to_pte(off) ((pte_t) { ((off) << PTE_SHIFT) | _PAGE_FILE })
+-
+ #define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT))
+ #define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT))
+ #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
+@@ -186,7 +178,6 @@ extern void purge_tlb_entries(struct mm_
+ #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
+ #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
+ #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
+-#define _PAGE_FILE (1 << xlate_pabit(_PAGE_FILE_BIT))
+
+ #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
+ #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+@@ -344,7 +335,6 @@ static inline void pgd_clear(pgd_t * pgd
+ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+ static inline int pte_special(pte_t pte) { return 0; }
+
+ static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
diff --git a/queue-3.16/powerpc-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/powerpc-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..fa6539ed
--- /dev/null
+++ b/queue-3.16/powerpc-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,193 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Mon, 16 Feb 2015 16:00:18 -0800
+Subject: powerpc: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 780fc5642f59b6c6e2b05794de60b2d2ad5f040e upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.16: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/powerpc/include/asm/pgtable-ppc32.h | 9 ++-------
+ arch/powerpc/include/asm/pgtable-ppc64.h | 5 +----
+ arch/powerpc/include/asm/pgtable.h | 1 -
+ arch/powerpc/include/asm/pte-40x.h | 1 -
+ arch/powerpc/include/asm/pte-44x.h | 5 -----
+ arch/powerpc/include/asm/pte-8xx.h | 1 -
+ arch/powerpc/include/asm/pte-book3e.h | 1 -
+ arch/powerpc/include/asm/pte-fsl-booke.h | 3 ---
+ arch/powerpc/include/asm/pte-hash32.h | 1 -
+ arch/powerpc/include/asm/pte-hash64.h | 1 -
+ arch/powerpc/mm/pgtable_64.c | 2 +-
+ 11 files changed, 4 insertions(+), 26 deletions(-)
+
+--- a/arch/powerpc/include/asm/pgtable-ppc32.h
++++ b/arch/powerpc/include/asm/pgtable-ppc32.h
+@@ -314,8 +314,8 @@ static inline void __ptep_set_access_fla
+ /*
+ * Encode and decode a swap entry.
+ * Note that the bits we use in a PTE for representing a swap entry
+- * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the
+- *_PAGE_HASHPTE bit (if used). -- paulus
++ * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
++ * -- paulus
+ */
+ #define __swp_type(entry) ((entry).val & 0x1f)
+ #define __swp_offset(entry) ((entry).val >> 5)
+@@ -323,11 +323,6 @@ static inline void __ptep_set_access_fla
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
+
+-/* Encode and decode a nonlinear file mapping entry */
+-#define PTE_FILE_MAX_BITS 29
+-#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
+-#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
+-
+ /*
+ * No page table caches to initialise
+ */
+--- a/arch/powerpc/include/asm/pgtable-ppc64.h
++++ b/arch/powerpc/include/asm/pgtable-ppc64.h
+@@ -352,9 +352,6 @@ static inline void __ptep_set_access_fla
+ #define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
+ #define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT })
+-#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT)
+-#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
+-#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
+
+ void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
+ void pgtable_cache_init(void);
+@@ -389,7 +386,7 @@ void pgtable_cache_init(void);
+ * The last three bits are intentionally left to zero. This memory location
+ * are also used as normal page PTE pointers. So if we have any pointers
+ * left around while we collapse a hugepage, we need to make sure
+- * _PAGE_PRESENT and _PAGE_FILE bits of that are zero when we look at them
++ * _PAGE_PRESENT bit of that is zero when we look at them
+ */
+ static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
+ {
+--- a/arch/powerpc/include/asm/pgtable.h
++++ b/arch/powerpc/include/asm/pgtable.h
+@@ -32,7 +32,6 @@ struct mm_struct;
+ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
+ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+ static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
+ static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+ static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+--- a/arch/powerpc/include/asm/pte-40x.h
++++ b/arch/powerpc/include/asm/pte-40x.h
+@@ -38,7 +38,6 @@
+ */
+
+ #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
+-#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
+ #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
+ #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
+ #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
+--- a/arch/powerpc/include/asm/pte-44x.h
++++ b/arch/powerpc/include/asm/pte-44x.h
+@@ -44,9 +44,6 @@
+ * - PRESENT *must* be in the bottom three bits because swap cache
+ * entries use the top 29 bits for TLB2.
+ *
+- * - FILE *must* be in the bottom three bits because swap cache
+- * entries use the top 29 bits for TLB2.
+- *
+ * - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
+ * because it doesn't support SMP. However, some later 460 variants
+ * have -some- form of SMP support and so I keep the bit there for
+@@ -68,7 +65,6 @@
+ *
+ * There are three protection bits available for SWAP entry:
+ * _PAGE_PRESENT
+- * _PAGE_FILE
+ * _PAGE_HASHPTE (if HW has)
+ *
+ * So those three bits have to be inside of 0-2nd LSB of PTE.
+@@ -77,7 +73,6 @@
+
+ #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
+ #define _PAGE_RW 0x00000002 /* S: Write permission */
+-#define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */
+ #define _PAGE_EXEC 0x00000004 /* H: Execute permission */
+ #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
+ #define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
+--- a/arch/powerpc/include/asm/pte-8xx.h
++++ b/arch/powerpc/include/asm/pte-8xx.h
+@@ -29,7 +29,6 @@
+
+ /* Definitions for 8xx embedded chips. */
+ #define _PAGE_PRESENT 0x0001 /* Page is valid */
+-#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */
+ #define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
+ #define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
+ #define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */
+--- a/arch/powerpc/include/asm/pte-book3e.h
++++ b/arch/powerpc/include/asm/pte-book3e.h
+@@ -10,7 +10,6 @@
+
+ /* Architected bits */
+ #define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */
+-#define _PAGE_FILE 0x000002 /* (!present only) software: pte holds file offset */
+ #define _PAGE_SW1 0x000002
+ #define _PAGE_BAP_SR 0x000004
+ #define _PAGE_BAP_UR 0x000008
+--- a/arch/powerpc/include/asm/pte-fsl-booke.h
++++ b/arch/powerpc/include/asm/pte-fsl-booke.h
+@@ -13,14 +13,11 @@
+ - PRESENT *must* be in the bottom three bits because swap cache
+ entries use the top 29 bits.
+
+- - FILE *must* be in the bottom three bits because swap cache
+- entries use the top 29 bits.
+ */
+
+ /* Definitions for FSL Book-E Cores */
+ #define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */
+ #define _PAGE_USER 0x00002 /* S: User page (maps to UR) */
+-#define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */
+ #define _PAGE_RW 0x00004 /* S: Write permission (SW) */
+ #define _PAGE_DIRTY 0x00008 /* S: Page dirty */
+ #define _PAGE_EXEC 0x00010 /* H: SX permission */
+--- a/arch/powerpc/include/asm/pte-hash32.h
++++ b/arch/powerpc/include/asm/pte-hash32.h
+@@ -18,7 +18,6 @@
+
+ #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
+ #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
+-#define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
+ #define _PAGE_USER 0x004 /* usermode access allowed */
+ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
+ #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+--- a/arch/powerpc/include/asm/pte-hash64.h
++++ b/arch/powerpc/include/asm/pte-hash64.h
+@@ -16,7 +16,6 @@
+ */
+ #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */
+ #define _PAGE_USER 0x0002 /* matches one of the PP bits */
+-#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
+ #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
+ #define _PAGE_GUARDED 0x0008
+ /* We can derive Memory coherence from _PAGE_NO_CACHE */
+--- a/arch/powerpc/mm/pgtable_64.c
++++ b/arch/powerpc/mm/pgtable_64.c
+@@ -807,7 +807,7 @@ pmd_t pfn_pmd(unsigned long pfn, pgprot_
+ {
+ pmd_t pmd;
+ /*
+- * For a valid pte, we would have _PAGE_PRESENT or _PAGE_FILE always
++ * For a valid pte, we would have _PAGE_PRESENT always
+ * set. We use this to check THP page at pmd level.
+ * leaf pte for huge page, bottom two bits != 00
+ */
diff --git a/queue-3.16/proc-drop-handling-non-linear-mappings.patch b/queue-3.16/proc-drop-handling-non-linear-mappings.patch
new file mode 100644
index 00000000..40f8bf99
--- /dev/null
+++ b/queue-3.16/proc-drop-handling-non-linear-mappings.patch
@@ -0,0 +1,83 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:09:57 -0800
+Subject: proc: drop handling non-linear mappings
+
+commit 1da4b35b001481df99a6dcab12d5d39a876f7056 upstream.
+
+We have to handle non-linear mappings for /proc/PID/{smaps,clear_refs}
+which is unused now. Let's drop it.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.16:
+ - Deleted code is slightly different
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -436,7 +436,6 @@ struct mem_size_stats {
+ unsigned long anonymous;
+ unsigned long anonymous_thp;
+ unsigned long swap;
+- unsigned long nonlinear;
+ u64 pss;
+ };
+
+@@ -446,7 +445,6 @@ static void smaps_pte_entry(pte_t ptent,
+ {
+ struct mem_size_stats *mss = walk->private;
+ struct vm_area_struct *vma = mss->vma;
+- pgoff_t pgoff = linear_page_index(vma, addr);
+ struct page *page = NULL;
+ int mapcount;
+
+@@ -459,9 +457,6 @@ static void smaps_pte_entry(pte_t ptent,
+ mss->swap += ptent_size;
+ else if (is_migration_entry(swpent))
+ page = migration_entry_to_page(swpent);
+- } else if (pte_file(ptent)) {
+- if (pte_to_pgoff(ptent) != pgoff)
+- mss->nonlinear += ptent_size;
+ }
+
+ if (!page)
+@@ -470,9 +465,6 @@ static void smaps_pte_entry(pte_t ptent,
+ if (PageAnon(page))
+ mss->anonymous += ptent_size;
+
+- if (page->index != pgoff)
+- mss->nonlinear += ptent_size;
+-
+ mss->resident += ptent_size;
+ /* Accumulate the size in pages that have been accessed. */
+ if (pte_young(ptent) || PageReferenced(page))
+@@ -554,7 +546,6 @@ static void show_smap_vma_flags(struct s
+ [ilog2(VM_ACCOUNT)] = "ac",
+ [ilog2(VM_NORESERVE)] = "nr",
+ [ilog2(VM_HUGETLB)] = "ht",
+- [ilog2(VM_NONLINEAR)] = "nl",
+ [ilog2(VM_ARCH_1)] = "ar",
+ [ilog2(VM_DONTDUMP)] = "dd",
+ #ifdef CONFIG_MEM_SOFT_DIRTY
+@@ -628,10 +619,6 @@ static int show_smap(struct seq_file *m,
+ (vma->vm_flags & VM_LOCKED) ?
+ (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
+
+- if (vma->vm_flags & VM_NONLINEAR)
+- seq_printf(m, "Nonlinear: %8lu kB\n",
+- mss.nonlinear >> 10);
+-
+ show_smap_vma_flags(m, vma);
+
+ if (m->count < m->size) /* vma is copied successfully */
+@@ -735,8 +722,6 @@ static inline void clear_soft_dirty(stru
+ ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
+ } else if (is_swap_pte(ptent)) {
+ ptent = pte_swp_clear_soft_dirty(ptent);
+- } else if (pte_file(ptent)) {
+- ptent = pte_file_clear_soft_dirty(ptent);
+ }
+
+ set_pte_at(vma->vm_mm, addr, pte, ptent);
diff --git a/queue-3.16/rmap-drop-support-of-non-linear-mappings.patch b/queue-3.16/rmap-drop-support-of-non-linear-mappings.patch
new file mode 100644
index 00000000..31ca48b3
--- /dev/null
+++ b/queue-3.16/rmap-drop-support-of-non-linear-mappings.patch
@@ -0,0 +1,517 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:09:59 -0800
+Subject: rmap: drop support of non-linear mappings
+
+commit 27ba0644ea9dfe6e7693abc85837b60e40583b96 upstream.
+
+We don't create non-linear mappings anymore. Let's drop code which
+handles them in rmap.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.16:
+ - Deleted code is slightly different
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+--- a/Documentation/cachetlb.txt
++++ b/Documentation/cachetlb.txt
+@@ -317,10 +317,10 @@ maps this page at its virtual address.
+ about doing this.
+
+ The idea is, first at flush_dcache_page() time, if
+- page->mapping->i_mmap is an empty tree and ->i_mmap_nonlinear
+- an empty list, just mark the architecture private page flag bit.
+- Later, in update_mmu_cache(), a check is made of this flag bit,
+- and if set the flush is done and the flag bit is cleared.
++ page->mapping->i_mmap is an empty tree, just mark the architecture
++ private page flag bit. Later, in update_mmu_cache(), a check is
++ made of this flag bit, and if set the flush is done and the flag
++ bit is cleared.
+
+ IMPORTANT NOTE: It is often important, if you defer the flush,
+ that the actual flush occurs on the same CPU
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -352,7 +352,6 @@ void address_space_init_once(struct addr
+ INIT_LIST_HEAD(&mapping->private_list);
+ spin_lock_init(&mapping->private_lock);
+ mapping->i_mmap = RB_ROOT;
+- INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+ }
+ EXPORT_SYMBOL(address_space_init_once);
+
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -395,7 +395,6 @@ struct address_space {
+ spinlock_t tree_lock; /* and lock protecting it */
+ unsigned int i_mmap_writable;/* count VM_SHARED mappings */
+ struct rb_root i_mmap; /* tree of private and shared mappings */
+- struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
+ struct mutex i_mmap_mutex; /* protect tree, count, list */
+ /* Protected by tree_lock together with the radix tree */
+ unsigned long nrpages; /* number of total pages */
+@@ -467,8 +466,7 @@ int mapping_tagged(struct address_space
+ */
+ static inline int mapping_mapped(struct address_space *mapping)
+ {
+- return !RB_EMPTY_ROOT(&mapping->i_mmap) ||
+- !list_empty(&mapping->i_mmap_nonlinear);
++ return !RB_EMPTY_ROOT(&mapping->i_mmap);
+ }
+
+ /*
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1728,12 +1728,6 @@ struct vm_area_struct *vma_interval_tree
+ for (vma = vma_interval_tree_iter_first(root, start, last); \
+ vma; vma = vma_interval_tree_iter_next(vma, start, last))
+
+-static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
+- struct list_head *list)
+-{
+- list_add_tail(&vma->shared.nonlinear, list);
+-}
+-
+ void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
+ struct rb_root *root);
+ void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -272,15 +272,13 @@ struct vm_area_struct {
+
+ /*
+ * For areas with an address space and backing store,
+- * linkage into the address_space->i_mmap interval tree, or
+- * linkage of vma in the address_space->i_mmap_nonlinear list.
++ * linkage into the address_space->i_mmap interval tree.
+ */
+ union {
+ struct {
+ struct rb_node rb;
+ unsigned long rb_subtree_last;
+ } linear;
+- struct list_head nonlinear;
+ } shared;
+
+ /*
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -232,7 +232,6 @@ int page_mapped_in_vma(struct page *page
+ * arg: passed to rmap_one() and invalid_vma()
+ * rmap_one: executed on each vma where page is mapped
+ * done: for checking traversing termination condition
+- * file_nonlinear: for handling file nonlinear mapping
+ * anon_lock: for getting anon_lock by optimized way rather than default
+ * invalid_vma: for skipping uninterested vma
+ */
+@@ -241,7 +240,6 @@ struct rmap_walk_control {
+ int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
+ unsigned long addr, void *arg);
+ int (*done)(struct page *page);
+- int (*file_nonlinear)(struct page *, struct address_space *, void *arg);
+ struct anon_vma *(*anon_lock)(struct page *page);
+ bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
+ };
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -430,12 +430,8 @@ static int dup_mmap(struct mm_struct *mm
+ mapping->i_mmap_writable++;
+ flush_dcache_mmap_lock(mapping);
+ /* insert tmp into the share list, just after mpnt */
+- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
+- vma_nonlinear_insert(tmp,
+- &mapping->i_mmap_nonlinear);
+- else
+- vma_interval_tree_insert_after(tmp, mpnt,
+- &mapping->i_mmap);
++ vma_interval_tree_insert_after(tmp, mpnt,
++ &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
+ mutex_unlock(&mapping->i_mmap_mutex);
+ }
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -181,37 +181,6 @@ out:
+ }
+
+ /*
+- * Congratulations to trinity for discovering this bug.
+- * mm/fremap.c's remap_file_pages() accepts any range within a single vma to
+- * convert that vma to VM_NONLINEAR; and generic_file_remap_pages() will then
+- * replace the specified range by file ptes throughout (maybe populated after).
+- * If page migration finds a page within that range, while it's still located
+- * by vma_interval_tree rather than lost to i_mmap_nonlinear list, no problem:
+- * zap_pte() clears the temporary migration entry before mmap_sem is dropped.
+- * But if the migrating page is in a part of the vma outside the range to be
+- * remapped, then it will not be cleared, and remove_migration_ptes() needs to
+- * deal with it. Fortunately, this part of the vma is of course still linear,
+- * so we just need to use linear location on the nonlinear list.
+- */
+-static int remove_linear_migration_ptes_from_nonlinear(struct page *page,
+- struct address_space *mapping, void *arg)
+-{
+- struct vm_area_struct *vma;
+- /* hugetlbfs does not support remap_pages, so no huge pgoff worries */
+- pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+- unsigned long addr;
+-
+- list_for_each_entry(vma,
+- &mapping->i_mmap_nonlinear, shared.nonlinear) {
+-
+- addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+- if (addr >= vma->vm_start && addr < vma->vm_end)
+- remove_migration_pte(page, vma, addr, arg);
+- }
+- return SWAP_AGAIN;
+-}
+-
+-/*
+ * Get rid of all migration entries and replace them by
+ * references to the indicated page.
+ */
+@@ -220,7 +189,6 @@ static void remove_migration_ptes(struct
+ struct rmap_walk_control rwc = {
+ .rmap_one = remove_migration_pte,
+ .arg = old,
+- .file_nonlinear = remove_linear_migration_ptes_from_nonlinear,
+ };
+
+ rmap_walk(new, &rwc);
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -219,10 +219,7 @@ static void __remove_shared_vm_struct(st
+ mapping->i_mmap_writable--;
+
+ flush_dcache_mmap_lock(mapping);
+- if (unlikely(vma->vm_flags & VM_NONLINEAR))
+- list_del_init(&vma->shared.nonlinear);
+- else
+- vma_interval_tree_remove(vma, &mapping->i_mmap);
++ vma_interval_tree_remove(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
+ }
+
+@@ -639,10 +636,7 @@ static void __vma_link_file(struct vm_ar
+ mapping->i_mmap_writable++;
+
+ flush_dcache_mmap_lock(mapping);
+- if (unlikely(vma->vm_flags & VM_NONLINEAR))
+- vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
+- else
+- vma_interval_tree_insert(vma, &mapping->i_mmap);
++ vma_interval_tree_insert(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
+ }
+ }
+@@ -777,14 +771,11 @@ again: remove_next = 1 + (end > next->
+
+ if (file) {
+ mapping = file->f_mapping;
+- if (!(vma->vm_flags & VM_NONLINEAR)) {
+- root = &mapping->i_mmap;
+- uprobe_munmap(vma, vma->vm_start, vma->vm_end);
+-
+- if (adjust_next)
+- uprobe_munmap(next, next->vm_start,
+- next->vm_end);
+- }
++ root = &mapping->i_mmap;
++ uprobe_munmap(vma, vma->vm_start, vma->vm_end);
++
++ if (adjust_next)
++ uprobe_munmap(next, next->vm_start, next->vm_end);
+
+ mutex_lock(&mapping->i_mmap_mutex);
+ if (insert) {
+@@ -3187,8 +3178,7 @@ static void vm_lock_mapping(struct mm_st
+ *
+ * mmap_sem in write mode is required in order to block all operations
+ * that could modify pagetables and free pages without need of
+- * altering the vma layout (for example populate_range() with
+- * nonlinear vmas). It's also needed in write mode to avoid new
++ * altering the vma layout. It's also needed in write mode to avoid new
+ * anon_vmas to be associated with existing vmas.
+ *
+ * A single task can't take more than one mm_take_all_locks() in a row
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -597,9 +597,8 @@ unsigned long page_address_in_vma(struct
+ if (!vma->anon_vma || !page__anon_vma ||
+ vma->anon_vma->root != page__anon_vma->root)
+ return -EFAULT;
+- } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
+- if (!vma->vm_file ||
+- vma->vm_file->f_mapping != page->mapping)
++ } else if (page->mapping) {
++ if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
+ return -EFAULT;
+ } else
+ return -EFAULT;
+@@ -1286,7 +1285,6 @@ static int try_to_unmap_one(struct page
+ if (pte_soft_dirty(pteval))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ set_pte_at(mm, address, pte, swp_pte);
+- BUG_ON(pte_file(*pte));
+ } else if (IS_ENABLED(CONFIG_MIGRATION) &&
+ (flags & TTU_MIGRATION)) {
+ /* Establish migration entry for a file page */
+@@ -1328,207 +1326,6 @@ out_mlock:
+ return ret;
+ }
+
+-/*
+- * objrmap doesn't work for nonlinear VMAs because the assumption that
+- * offset-into-file correlates with offset-into-virtual-addresses does not hold.
+- * Consequently, given a particular page and its ->index, we cannot locate the
+- * ptes which are mapping that page without an exhaustive linear search.
+- *
+- * So what this code does is a mini "virtual scan" of each nonlinear VMA which
+- * maps the file to which the target page belongs. The ->vm_private_data field
+- * holds the current cursor into that scan. Successive searches will circulate
+- * around the vma's virtual address space.
+- *
+- * So as more replacement pressure is applied to the pages in a nonlinear VMA,
+- * more scanning pressure is placed against them as well. Eventually pages
+- * will become fully unmapped and are eligible for eviction.
+- *
+- * For very sparsely populated VMAs this is a little inefficient - chances are
+- * there there won't be many ptes located within the scan cluster. In this case
+- * maybe we could scan further - to the end of the pte page, perhaps.
+- *
+- * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can
+- * acquire it without blocking. If vma locked, mlock the pages in the cluster,
+- * rather than unmapping them. If we encounter the "check_page" that vmscan is
+- * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
+- */
+-#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
+-#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
+-
+-static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
+- struct vm_area_struct *vma, struct page *check_page)
+-{
+- struct mm_struct *mm = vma->vm_mm;
+- pmd_t *pmd;
+- pte_t *pte;
+- pte_t pteval;
+- spinlock_t *ptl;
+- struct page *page;
+- unsigned long address;
+- unsigned long mmun_start; /* For mmu_notifiers */
+- unsigned long mmun_end; /* For mmu_notifiers */
+- unsigned long end;
+- int ret = SWAP_AGAIN;
+- int locked_vma = 0;
+-
+- address = (vma->vm_start + cursor) & CLUSTER_MASK;
+- end = address + CLUSTER_SIZE;
+- if (address < vma->vm_start)
+- address = vma->vm_start;
+- if (end > vma->vm_end)
+- end = vma->vm_end;
+-
+- pmd = mm_find_pmd(mm, address);
+- if (!pmd)
+- return ret;
+-
+- mmun_start = address;
+- mmun_end = end;
+- mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+-
+- /*
+- * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
+- * keep the sem while scanning the cluster for mlocking pages.
+- */
+- if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
+- locked_vma = (vma->vm_flags & VM_LOCKED);
+- if (!locked_vma)
+- up_read(&vma->vm_mm->mmap_sem); /* don't need it */
+- }
+-
+- pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+-
+- /* Update high watermark before we lower rss */
+- update_hiwater_rss(mm);
+-
+- for (; address < end; pte++, address += PAGE_SIZE) {
+- if (!pte_present(*pte))
+- continue;
+- page = vm_normal_page(vma, address, *pte);
+- BUG_ON(!page || PageAnon(page));
+-
+- if (locked_vma) {
+- if (page == check_page) {
+- /* we know we have check_page locked */
+- mlock_vma_page(page);
+- ret = SWAP_MLOCK;
+- } else if (trylock_page(page)) {
+- /*
+- * If we can lock the page, perform mlock.
+- * Otherwise leave the page alone, it will be
+- * eventually encountered again later.
+- */
+- mlock_vma_page(page);
+- unlock_page(page);
+- }
+- continue; /* don't unmap */
+- }
+-
+- if (ptep_clear_flush_young_notify(vma, address, pte))
+- continue;
+-
+- /* Nuke the page table entry. */
+- flush_cache_page(vma, address, pte_pfn(*pte));
+- pteval = ptep_clear_flush(vma, address, pte);
+-
+- /* If nonlinear, store the file page offset in the pte. */
+- if (page->index != linear_page_index(vma, address)) {
+- pte_t ptfile = pgoff_to_pte(page->index);
+- if (pte_soft_dirty(pteval))
+- ptfile = pte_file_mksoft_dirty(ptfile);
+- set_pte_at(mm, address, pte, ptfile);
+- }
+-
+- /* Move the dirty bit to the physical page now the pte is gone. */
+- if (pte_dirty(pteval))
+- set_page_dirty(page);
+-
+- page_remove_rmap(page);
+- page_cache_release(page);
+- dec_mm_counter(mm, MM_FILEPAGES);
+- (*mapcount)--;
+- }
+- pte_unmap_unlock(pte - 1, ptl);
+- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+- if (locked_vma)
+- up_read(&vma->vm_mm->mmap_sem);
+- return ret;
+-}
+-
+-static int try_to_unmap_nonlinear(struct page *page,
+- struct address_space *mapping, void *arg)
+-{
+- struct vm_area_struct *vma;
+- int ret = SWAP_AGAIN;
+- unsigned long cursor;
+- unsigned long max_nl_cursor = 0;
+- unsigned long max_nl_size = 0;
+- unsigned int mapcount;
+-
+- list_for_each_entry(vma,
+- &mapping->i_mmap_nonlinear, shared.nonlinear) {
+-
+- cursor = (unsigned long) vma->vm_private_data;
+- if (cursor > max_nl_cursor)
+- max_nl_cursor = cursor;
+- cursor = vma->vm_end - vma->vm_start;
+- if (cursor > max_nl_size)
+- max_nl_size = cursor;
+- }
+-
+- if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */
+- return SWAP_FAIL;
+- }
+-
+- /*
+- * We don't try to search for this page in the nonlinear vmas,
+- * and page_referenced wouldn't have found it anyway. Instead
+- * just walk the nonlinear vmas trying to age and unmap some.
+- * The mapcount of the page we came in with is irrelevant,
+- * but even so use it as a guide to how hard we should try?
+- */
+- mapcount = page_mapcount(page);
+- if (!mapcount)
+- return ret;
+-
+- cond_resched();
+-
+- max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
+- if (max_nl_cursor == 0)
+- max_nl_cursor = CLUSTER_SIZE;
+-
+- do {
+- list_for_each_entry(vma,
+- &mapping->i_mmap_nonlinear, shared.nonlinear) {
+-
+- cursor = (unsigned long) vma->vm_private_data;
+- while (cursor < max_nl_cursor &&
+- cursor < vma->vm_end - vma->vm_start) {
+- if (try_to_unmap_cluster(cursor, &mapcount,
+- vma, page) == SWAP_MLOCK)
+- ret = SWAP_MLOCK;
+- cursor += CLUSTER_SIZE;
+- vma->vm_private_data = (void *) cursor;
+- if ((int)mapcount <= 0)
+- return ret;
+- }
+- vma->vm_private_data = (void *) max_nl_cursor;
+- }
+- cond_resched();
+- max_nl_cursor += CLUSTER_SIZE;
+- } while (max_nl_cursor <= max_nl_size);
+-
+- /*
+- * Don't loop forever (perhaps all the remaining pages are
+- * in locked vmas). Reset cursor on all unreserved nonlinear
+- * vmas, now forgetting on which ones it had fallen behind.
+- */
+- list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear)
+- vma->vm_private_data = NULL;
+-
+- return ret;
+-}
+-
+ bool is_vma_temporary_stack(struct vm_area_struct *vma)
+ {
+ int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
+@@ -1574,7 +1371,6 @@ int try_to_unmap(struct page *page, enum
+ .rmap_one = try_to_unmap_one,
+ .arg = (void *)flags,
+ .done = page_not_mapped,
+- .file_nonlinear = try_to_unmap_nonlinear,
+ .anon_lock = page_lock_anon_vma_read,
+ };
+
+@@ -1620,12 +1416,6 @@ int try_to_munlock(struct page *page)
+ .rmap_one = try_to_unmap_one,
+ .arg = (void *)TTU_MUNLOCK,
+ .done = page_not_mapped,
+- /*
+- * We don't bother to try to find the munlocked page in
+- * nonlinears. It's costly. Instead, later, page reclaim logic
+- * may call try_to_unmap() and recover PG_mlocked lazily.
+- */
+- .file_nonlinear = NULL,
+ .anon_lock = page_lock_anon_vma_read,
+
+ };
+@@ -1753,14 +1543,6 @@ static int rmap_walk_file(struct page *p
+ goto done;
+ }
+
+- if (!rwc->file_nonlinear)
+- goto done;
+-
+- if (list_empty(&mapping->i_mmap_nonlinear))
+- goto done;
+-
+- ret = rwc->file_nonlinear(page, mapping, rwc->arg);
+-
+ done:
+ mutex_unlock(&mapping->i_mmap_mutex);
+ return ret;
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -1103,10 +1103,8 @@ void __init swap_setup(void)
+
+ if (bdi_init(swapper_spaces[0].backing_dev_info))
+ panic("Failed to init swap bdi");
+- for (i = 0; i < MAX_SWAPFILES; i++) {
++ for (i = 0; i < MAX_SWAPFILES; i++)
+ spin_lock_init(&swapper_spaces[i].tree_lock);
+- INIT_LIST_HEAD(&swapper_spaces[i].i_mmap_nonlinear);
+- }
+ #endif
+
+ /* Use a smaller cluster for small-memory machines */
diff --git a/queue-3.16/s390-drop-pte_file-related-helpers.patch b/queue-3.16/s390-drop-pte_file-related-helpers.patch
new file mode 100644
index 00000000..3d6f343b
--- /dev/null
+++ b/queue-3.16/s390-drop-pte_file-related-helpers.patch
@@ -0,0 +1,78 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:11:04 -0800
+Subject: s390: drop pte_file()-related helpers
+
+commit 6e76d4b20bf6b514408ab5bd07f4a76723259b64 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/s390/include/asm/pgtable.h | 29 ++++-------------------------
+ 1 file changed, 4 insertions(+), 25 deletions(-)
+
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -237,10 +237,10 @@ extern unsigned long MODULES_END;
+ _PAGE_DIRTY | _PAGE_YOUNG)
+
+ /*
+- * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
+- * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit
+- * is used to distinguish present from not-present ptes. It is changed only
+- * with the page table lock held.
++ * handle_pte_fault uses pte_present and pte_none to find out the pte type
++ * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
++ * distinguish present from not-present ptes. It is changed only with the page
++ * table lock held.
+ *
+ * The following table gives the different possible bit combinations for
+ * the pte hardware and software bits in the last 12 bits of a pte:
+@@ -267,7 +267,6 @@ extern unsigned long MODULES_END;
+ *
+ * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
+ * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
+- * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600
+ * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
+ */
+
+@@ -644,13 +643,6 @@ static inline int pte_swap(pte_t pte)
+ == (_PAGE_INVALID | _PAGE_TYPE);
+ }
+
+-static inline int pte_file(pte_t pte)
+-{
+- /* Bit pattern: (pte & 0x601) == 0x600 */
+- return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT))
+- == (_PAGE_INVALID | _PAGE_PROTECT);
+-}
+-
+ static inline int pte_special(pte_t pte)
+ {
+ return (pte_val(pte) & _PAGE_SPECIAL);
+@@ -1710,19 +1702,6 @@ static inline pte_t mk_swap_pte(unsigned
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-#ifndef CONFIG_64BIT
+-# define PTE_FILE_MAX_BITS 26
+-#else /* CONFIG_64BIT */
+-# define PTE_FILE_MAX_BITS 59
+-#endif /* CONFIG_64BIT */
+-
+-#define pte_to_pgoff(__pte) \
+- ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
+-
+-#define pgoff_to_pte(__off) \
+- ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
+- | _PAGE_INVALID | _PAGE_PROTECT })
+-
+ #endif /* !__ASSEMBLY__ */
+
+ #define kern_addr_valid(addr) (1)
diff --git a/queue-3.16/score-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/score-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..64d18fab
--- /dev/null
+++ b/queue-3.16/score-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,73 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:11:06 -0800
+Subject: score: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 917e401ea75478d4f4575bc8b0ef3d14ecf9ef69 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation.
+Nobody creates non-linear mapping anymore.
+
+This patch also increase number of bits availble for swap offset.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Chen Liqin <liqin.linux@gmail.com>
+Cc: Lennox Wu <lennox.wu@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/score/include/asm/pgtable-bits.h | 1 -
+ arch/score/include/asm/pgtable.h | 18 ++----------------
+ 2 files changed, 2 insertions(+), 17 deletions(-)
+
+--- a/arch/score/include/asm/pgtable-bits.h
++++ b/arch/score/include/asm/pgtable-bits.h
+@@ -6,7 +6,6 @@
+ #define _PAGE_WRITE (1<<7) /* implemented in software */
+ #define _PAGE_PRESENT (1<<9) /* implemented in software */
+ #define _PAGE_MODIFIED (1<<10) /* implemented in software */
+-#define _PAGE_FILE (1<<10)
+
+ #define _PAGE_GLOBAL (1<<0)
+ #define _PAGE_VALID (1<<1)
+--- a/arch/score/include/asm/pgtable.h
++++ b/arch/score/include/asm/pgtable.h
+@@ -90,15 +90,6 @@ static inline void pmd_clear(pmd_t *pmdp
+ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
+ #define pte_unmap(pte) ((void)(pte))
+
+-/*
+- * Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken,
+- * split up 30 bits of offset into this range:
+- */
+-#define PTE_FILE_MAX_BITS 30
+-#define pte_to_pgoff(_pte) \
+- (((_pte).pte & 0x1ff) | (((_pte).pte >> 11) << 9))
+-#define pgoff_to_pte(off) \
+- ((pte_t) {((off) & 0x1ff) | (((off) >> 9) << 11) | _PAGE_FILE})
+ #define __pte_to_swp_entry(pte) \
+ ((swp_entry_t) { pte_val(pte)})
+ #define __swp_entry_to_pte(x) ((pte_t) {(x).val})
+@@ -169,8 +160,8 @@ static inline pgprot_t pgprot_noncached(
+ }
+
+ #define __swp_type(x) ((x).val & 0x1f)
+-#define __swp_offset(x) ((x).val >> 11)
+-#define __swp_entry(type, offset) ((swp_entry_t){(type) | ((offset) << 11)})
++#define __swp_offset(x) ((x).val >> 10)
++#define __swp_entry(type, offset) ((swp_entry_t){(type) | ((offset) << 10)})
+
+ extern unsigned long empty_zero_page;
+ extern unsigned long zero_page_mask;
+@@ -198,11 +189,6 @@ static inline int pte_young(pte_t pte)
+ return pte_val(pte) & _PAGE_ACCESSED;
+ }
+
+-static inline int pte_file(pte_t pte)
+-{
+- return pte_val(pte) & _PAGE_FILE;
+-}
+-
+ #define pte_special(pte) (0)
+
+ static inline pte_t pte_wrprotect(pte_t pte)
diff --git a/queue-3.16/series b/queue-3.16/series
index 7c958ddd..5de06fd9 100644
--- a/queue-3.16/series
+++ b/queue-3.16/series
@@ -49,3 +49,42 @@ kvm-vmx-expose-ssbd-properly-to-guests.patch
kvm-x86-svm-call-x86_spec_ctrl_set_guest-host-with-interrupts.patch
x86-xen-add-call-of-speculative_store_bypass_ht_init-to-pv-paths.patch
x86-cpufeatures-show-kaiser-in-cpuinfo.patch
+mm-replace-remap_file_pages-syscall-with-emulation.patch
+mm-fix-regression-in-remap_file_pages-emulation.patch
+mm-drop-support-of-non-linear-mapping-from-unmap-zap-codepath.patch
+mm-drop-support-of-non-linear-mapping-from-fault-codepath.patch
+mm-drop-vm_ops-remap_pages-and-generic_file_remap_pages-stub.patch
+proc-drop-handling-non-linear-mappings.patch
+rmap-drop-support-of-non-linear-mappings.patch
+mm-replace-vma-sharead.linear-with-vma-shared.patch
+mm-remove-rest-usage-of-vm_nonlinear-and-pte_file.patch
+asm-generic-drop-unused-pte_file-helpers.patch
+alpha-drop-_page_file-and-pte_file-related-helpers.patch
+arc-drop-_page_file-and-pte_file-related-helpers.patch
+arm64-drop-pte_file-and-pte_file-related-helpers.patch
+arm-drop-l_pte_file-and-pte_file-related-helpers.patch
+avr32-drop-_page_file-and-pte_file-related-helpers.patch
+blackfin-drop-pte_file.patch
+c6x-drop-pte_file.patch
+cris-drop-_page_file-and-pte_file-related-helpers.patch
+frv-drop-_page_file-and-pte_file-related-helpers.patch
+hexagon-drop-_page_file-and-pte_file-related-helpers.patch
+ia64-drop-_page_file-and-pte_file-related-helpers.patch
+m32r-drop-_page_file-and-pte_file-related-helpers.patch
+m68k-drop-_page_file-and-pte_file-related-helpers.patch
+metag-drop-_page_file-and-pte_file-related-helpers.patch
+microblaze-drop-_page_file-and-pte_file-related-helpers.patch
+mips-drop-_page_file-and-pte_file-related-helpers.patch
+mn10300-drop-_page_file-and-pte_file-related-helpers.patch
+openrisc-drop-_page_file-and-pte_file-related-helpers.patch
+parisc-drop-_page_file-and-pte_file-related-helpers.patch
+s390-drop-pte_file-related-helpers.patch
+score-drop-_page_file-and-pte_file-related-helpers.patch
+sh-drop-_page_file-and-pte_file-related-helpers.patch
+sparc-drop-pte_file-related-helpers.patch
+tile-drop-pte_file-related-helpers.patch
+um-drop-_page_file-and-pte_file-related-helpers.patch
+unicore32-drop-pte_file-related-helpers.patch
+x86-drop-_page_file-and-pte_file-related-helpers.patch
+xtensa-drop-_page_file-and-pte_file-related-helpers.patch
+powerpc-drop-_page_file-and-pte_file-related-helpers.patch
diff --git a/queue-3.16/sh-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/sh-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..c2a968d5
--- /dev/null
+++ b/queue-3.16/sh-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,149 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:11:09 -0800
+Subject: sh: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 8b70beac99466b6d164de9fe647b3567e6f17e3a upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/sh/include/asm/pgtable_32.h | 30 ++++--------------------------
+ arch/sh/include/asm/pgtable_64.h | 9 +--------
+ 2 files changed, 5 insertions(+), 34 deletions(-)
+
+--- a/arch/sh/include/asm/pgtable_32.h
++++ b/arch/sh/include/asm/pgtable_32.h
+@@ -26,8 +26,6 @@
+ * and timing control which (together with bit 0) are moved into the
+ * old-style PTEA on the parts that support it.
+ *
+- * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day.
+- *
+ * SH-X2 MMUs and extended PTEs
+ *
+ * SH-X2 supports an extended mode TLB with split data arrays due to the
+@@ -51,7 +49,6 @@
+ #define _PAGE_PRESENT 0x100 /* V-bit : page is valid */
+ #define _PAGE_PROTNONE 0x200 /* software: if not present */
+ #define _PAGE_ACCESSED 0x400 /* software: page referenced */
+-#define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */
+ #define _PAGE_SPECIAL 0x800 /* software: special page */
+
+ #define _PAGE_SZ_MASK (_PAGE_SZ0 | _PAGE_SZ1)
+@@ -105,14 +102,13 @@ static inline unsigned long copy_ptea_at
+ /* Mask which drops unused bits from the PTEL value */
+ #if defined(CONFIG_CPU_SH3)
+ #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED| \
+- _PAGE_FILE | _PAGE_SZ1 | \
+- _PAGE_HW_SHARED)
++ _PAGE_SZ1 | _PAGE_HW_SHARED)
+ #elif defined(CONFIG_X2TLB)
+ /* Get rid of the legacy PR/SZ bits when using extended mode */
+ #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | \
+- _PAGE_FILE | _PAGE_PR_MASK | _PAGE_SZ_MASK)
++ _PAGE_PR_MASK | _PAGE_SZ_MASK)
+ #else
+-#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE)
++#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED)
+ #endif
+
+ #define _PAGE_FLAGS_HARDWARE_MASK (phys_addr_mask() & ~(_PAGE_CLEAR_FLAGS))
+@@ -343,7 +339,6 @@ static inline void set_pte(pte_t *ptep,
+ #define pte_not_present(pte) (!((pte).pte_low & _PAGE_PRESENT))
+ #define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY)
+ #define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED)
+-#define pte_file(pte) ((pte).pte_low & _PAGE_FILE)
+ #define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL)
+
+ #ifdef CONFIG_X2TLB
+@@ -445,7 +440,6 @@ static inline pte_t pte_modify(pte_t pte
+ * Encode and de-code a swap entry
+ *
+ * Constraints:
+- * _PAGE_FILE at bit 0
+ * _PAGE_PRESENT at bit 8
+ * _PAGE_PROTNONE at bit 9
+ *
+@@ -453,9 +447,7 @@ static inline pte_t pte_modify(pte_t pte
+ * swap offset into bits 10:30. For the 64-bit PTE case, we keep the
+ * preserved bits in the low 32-bits and use the upper 32 as the swap
+ * offset (along with a 5-bit type), following the same approach as x86
+- * PAE. This keeps the logic quite simple, and allows for a full 32
+- * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with
+- * in the pte_low case.
++ * PAE. This keeps the logic quite simple.
+ *
+ * As is evident by the Alpha code, if we ever get a 64-bit unsigned
+ * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes
+@@ -471,13 +463,6 @@ static inline pte_t pte_modify(pte_t pte
+ #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
+ #define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
+
+-/*
+- * Encode and decode a nonlinear file mapping entry
+- */
+-#define pte_to_pgoff(pte) ((pte).pte_high)
+-#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
+-
+-#define PTE_FILE_MAX_BITS 32
+ #else
+ #define __swp_type(x) ((x).val & 0xff)
+ #define __swp_offset(x) ((x).val >> 10)
+@@ -485,13 +470,6 @@ static inline pte_t pte_modify(pte_t pte
+
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
+-
+-/*
+- * Encode and decode a nonlinear file mapping entry
+- */
+-#define PTE_FILE_MAX_BITS 29
+-#define pte_to_pgoff(pte) (pte_val(pte) >> 1)
+-#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE })
+ #endif
+
+ #endif /* __ASSEMBLY__ */
+--- a/arch/sh/include/asm/pgtable_64.h
++++ b/arch/sh/include/asm/pgtable_64.h
+@@ -107,7 +107,6 @@ static __inline__ void set_pte(pte_t *pt
+ #define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
+ #define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */
+ #define _PAGE_PRESENT 0x004 /* software: page referenced */
+-#define _PAGE_FILE 0x004 /* software: only when !present */
+ #define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */
+ #define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */
+ #define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */
+@@ -129,7 +128,7 @@ static __inline__ void set_pte(pte_t *pt
+ #define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */
+ #define _PAGE_SPECIAL _PAGE_EXT(0x002)
+
+-#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_FILE | _PAGE_SHARED | \
++#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_SHARED | \
+ _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
+
+ /* Mask which drops software flags */
+@@ -260,7 +259,6 @@ static __inline__ void set_pte(pte_t *pt
+ */
+ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+ static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
+
+@@ -304,11 +302,6 @@ static inline pte_t pte_modify(pte_t pte
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-/* Encode and decode a nonlinear file mapping entry */
+-#define PTE_FILE_MAX_BITS 29
+-#define pte_to_pgoff(pte) (pte_val(pte))
+-#define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE })
+-
+ #endif /* !__ASSEMBLY__ */
+
+ #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
diff --git a/queue-3.16/sparc-drop-pte_file-related-helpers.patch b/queue-3.16/sparc-drop-pte_file-related-helpers.patch
new file mode 100644
index 00000000..1f95758d
--- /dev/null
+++ b/queue-3.16/sparc-drop-pte_file-related-helpers.patch
@@ -0,0 +1,171 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:11:12 -0800
+Subject: sparc: drop pte_file()-related helpers
+
+commit 6a8c4820895cf1dd2a128aef67ce079ba6eded80 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+This patch also increase number of bits availble for swap offset.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/sparc/include/asm/pgtable_32.h | 24 -----------------
+ arch/sparc/include/asm/pgtable_64.h | 40 -----------------------------
+ arch/sparc/include/asm/pgtsrmmu.h | 14 ++++------
+ 3 files changed, 5 insertions(+), 73 deletions(-)
+
+--- a/arch/sparc/include/asm/pgtable_32.h
++++ b/arch/sparc/include/asm/pgtable_32.h
+@@ -221,14 +221,6 @@ static inline int pte_young(pte_t pte)
+ return pte_val(pte) & SRMMU_REF;
+ }
+
+-/*
+- * The following only work if pte_present() is not true.
+- */
+-static inline int pte_file(pte_t pte)
+-{
+- return pte_val(pte) & SRMMU_FILE;
+-}
+-
+ static inline int pte_special(pte_t pte)
+ {
+ return 0;
+@@ -375,22 +367,6 @@ static inline swp_entry_t __swp_entry(un
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-/* file-offset-in-pte helpers */
+-static inline unsigned long pte_to_pgoff(pte_t pte)
+-{
+- return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
+-}
+-
+-static inline pte_t pgoff_to_pte(unsigned long pgoff)
+-{
+- return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
+-}
+-
+-/*
+- * This is made a constant because mm/fremap.c required a constant.
+- */
+-#define PTE_FILE_MAX_BITS 24
+-
+ static inline unsigned long
+ __get_phys (unsigned long addr)
+ {
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -137,7 +137,6 @@ bool kern_addr_valid(unsigned long addr)
+ #define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */
+ #define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */
+ #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */
+-#define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */
+ #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
+ #define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */
+ #define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */
+@@ -167,7 +166,6 @@ bool kern_addr_valid(unsigned long addr)
+ #define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */
+ #define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */
+ #define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */
+-#define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */
+ #define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */
+ #define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */
+ #define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */
+@@ -332,22 +330,6 @@ static inline pmd_t pmd_modify(pmd_t pmd
+ }
+ #endif
+
+-static inline pte_t pgoff_to_pte(unsigned long off)
+-{
+- off <<= PAGE_SHIFT;
+-
+- __asm__ __volatile__(
+- "\n661: or %0, %2, %0\n"
+- " .section .sun4v_1insn_patch, \"ax\"\n"
+- " .word 661b\n"
+- " or %0, %3, %0\n"
+- " .previous\n"
+- : "=r" (off)
+- : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
+-
+- return __pte(off);
+-}
+-
+ static inline pgprot_t pgprot_noncached(pgprot_t prot)
+ {
+ unsigned long val = pgprot_val(prot);
+@@ -609,22 +591,6 @@ static inline unsigned long pte_exec(pte
+ return (pte_val(pte) & mask);
+ }
+
+-static inline unsigned long pte_file(pte_t pte)
+-{
+- unsigned long val = pte_val(pte);
+-
+- __asm__ __volatile__(
+- "\n661: and %0, %2, %0\n"
+- " .section .sun4v_1insn_patch, \"ax\"\n"
+- " .word 661b\n"
+- " and %0, %3, %0\n"
+- " .previous\n"
+- : "=r" (val)
+- : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
+-
+- return val;
+-}
+-
+ static inline unsigned long pte_present(pte_t pte)
+ {
+ unsigned long val = pte_val(pte);
+@@ -964,12 +930,6 @@ pgtable_t pgtable_trans_huge_withdraw(st
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-/* File offset in PTE support. */
+-unsigned long pte_file(pte_t);
+-#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
+-pte_t pgoff_to_pte(unsigned long);
+-#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
+-
+ int page_in_phys_avail(unsigned long paddr);
+
+ /*
+--- a/arch/sparc/include/asm/pgtsrmmu.h
++++ b/arch/sparc/include/asm/pgtsrmmu.h
+@@ -80,10 +80,6 @@
+ #define SRMMU_PRIV 0x1c
+ #define SRMMU_PRIV_RDONLY 0x18
+
+-#define SRMMU_FILE 0x40 /* Implemented in software */
+-
+-#define SRMMU_PTE_FILE_SHIFT 8 /* == 32-PTE_FILE_MAX_BITS */
+-
+ #define SRMMU_CHG_MASK (0xffffff00 | SRMMU_REF | SRMMU_DIRTY)
+
+ /* SRMMU swap entry encoding
+@@ -94,13 +90,13 @@
+ * oooooooooooooooooootttttRRRRRRRR
+ * fedcba9876543210fedcba9876543210
+ *
+- * The bottom 8 bits are reserved for protection and status bits, especially
+- * FILE and PRESENT.
++ * The bottom 7 bits are reserved for protection and status bits, especially
++ * PRESENT.
+ */
+ #define SRMMU_SWP_TYPE_MASK 0x1f
+-#define SRMMU_SWP_TYPE_SHIFT SRMMU_PTE_FILE_SHIFT
+-#define SRMMU_SWP_OFF_MASK 0x7ffff
+-#define SRMMU_SWP_OFF_SHIFT (SRMMU_PTE_FILE_SHIFT + 5)
++#define SRMMU_SWP_TYPE_SHIFT 7
++#define SRMMU_SWP_OFF_MASK 0xfffff
++#define SRMMU_SWP_OFF_SHIFT (SRMMU_SWP_TYPE_SHIFT + 5)
+
+ /* Some day I will implement true fine grained access bits for
+ * user pages because the SRMMU gives us the capabilities to
diff --git a/queue-3.16/tile-drop-pte_file-related-helpers.patch b/queue-3.16/tile-drop-pte_file-related-helpers.patch
new file mode 100644
index 00000000..7694e597
--- /dev/null
+++ b/queue-3.16/tile-drop-pte_file-related-helpers.patch
@@ -0,0 +1,52 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:11:14 -0800
+Subject: tile: drop pte_file()-related helpers
+
+commit eb12f4872a3845a8803f689646dea5b92a30aff7 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: Chris Metcalf <cmetcalf@ezchip.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/tile/include/asm/pgtable.h | 11 -----------
+ arch/tile/mm/homecache.c | 4 ----
+ 2 files changed, 15 deletions(-)
+
+--- a/arch/tile/include/asm/pgtable.h
++++ b/arch/tile/include/asm/pgtable.h
+@@ -285,17 +285,6 @@ extern void start_mm_caching(struct mm_s
+ extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
+
+ /*
+- * Support non-linear file mappings (see sys_remap_file_pages).
+- * This is defined by CLIENT1 set but CLIENT0 and _PAGE_PRESENT clear, and the
+- * file offset in the 32 high bits.
+- */
+-#define _PAGE_FILE HV_PTE_CLIENT1
+-#define PTE_FILE_MAX_BITS 32
+-#define pte_file(pte) (hv_pte_get_client1(pte) && !hv_pte_get_client0(pte))
+-#define pte_to_pgoff(pte) ((pte).val >> 32)
+-#define pgoff_to_pte(off) ((pte_t) { (((long long)(off)) << 32) | _PAGE_FILE })
+-
+-/*
+ * Encode and de-code a swap entry (see <linux/swapops.h>).
+ * We put the swap file type+offset in the 32 high bits;
+ * I believe we can just leave the low bits clear.
+--- a/arch/tile/mm/homecache.c
++++ b/arch/tile/mm/homecache.c
+@@ -265,10 +265,6 @@ static int pte_to_home(pte_t pte)
+ /* Update the home of a PTE if necessary (can also be used for a pgprot_t). */
+ pte_t pte_set_home(pte_t pte, int home)
+ {
+- /* Check for non-linear file mapping "PTEs" and pass them through. */
+- if (pte_file(pte))
+- return pte;
+-
+ #if CHIP_HAS_MMIO()
+ /* Check for MMIO mappings and pass them through. */
+ if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO)
diff --git a/queue-3.16/um-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/um-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..2e723281
--- /dev/null
+++ b/queue-3.16/um-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,90 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:11:17 -0800
+Subject: um: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 3513006a5691ae3629eef9ddef0b71a47c40dfbc upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Jeff Dike <jdike@addtoit.com>
+Cc: Richard Weinberger <richard@nod.at>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/um/include/asm/pgtable-2level.h | 9 ---------
+ arch/um/include/asm/pgtable-3level.h | 20 --------------------
+ arch/um/include/asm/pgtable.h | 9 ---------
+ 3 files changed, 38 deletions(-)
+
+--- a/arch/um/include/asm/pgtable-2level.h
++++ b/arch/um/include/asm/pgtable-2level.h
+@@ -41,13 +41,4 @@ static inline void pgd_mkuptodate(pgd_t
+ #define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
+ #define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
+
+-/*
+- * Bits 0 through 4 are taken
+- */
+-#define PTE_FILE_MAX_BITS 27
+-
+-#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
+-
+-#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
+-
+ #endif
+--- a/arch/um/include/asm/pgtable-3level.h
++++ b/arch/um/include/asm/pgtable-3level.h
+@@ -112,25 +112,5 @@ static inline pmd_t pfn_pmd(pfn_t page_n
+ return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
+ }
+
+-/*
+- * Bits 0 through 3 are taken in the low part of the pte,
+- * put the 32 bits of offset into the high part.
+- */
+-#define PTE_FILE_MAX_BITS 32
+-
+-#ifdef CONFIG_64BIT
+-
+-#define pte_to_pgoff(p) ((p).pte >> 32)
+-
+-#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
+-
+-#else
+-
+-#define pte_to_pgoff(pte) ((pte).pte_high)
+-
+-#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
+-
+-#endif
+-
+ #endif
+
+--- a/arch/um/include/asm/pgtable.h
++++ b/arch/um/include/asm/pgtable.h
+@@ -18,7 +18,6 @@
+ #define _PAGE_ACCESSED 0x080
+ #define _PAGE_DIRTY 0x100
+ /* If _PAGE_PRESENT is clear, we use these: */
+-#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
+ #define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
+ pte_present gives true */
+
+@@ -151,14 +150,6 @@ static inline int pte_write(pte_t pte)
+ !(pte_get_bits(pte, _PAGE_PROTNONE)));
+ }
+
+-/*
+- * The following only works if pte_present() is not true.
+- */
+-static inline int pte_file(pte_t pte)
+-{
+- return pte_get_bits(pte, _PAGE_FILE);
+-}
+-
+ static inline int pte_dirty(pte_t pte)
+ {
+ return pte_get_bits(pte, _PAGE_DIRTY);
diff --git a/queue-3.16/unicore32-drop-pte_file-related-helpers.patch b/queue-3.16/unicore32-drop-pte_file-related-helpers.patch
new file mode 100644
index 00000000..7c9b4138
--- /dev/null
+++ b/queue-3.16/unicore32-drop-pte_file-related-helpers.patch
@@ -0,0 +1,52 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:11:20 -0800
+Subject: unicore32: drop pte_file()-related helpers
+
+commit 40171798fe11a6dc1d963058b097b2c4c9d34a9c upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/unicore32/include/asm/pgtable-hwdef.h | 1 -
+ arch/unicore32/include/asm/pgtable.h | 14 --------------
+ 2 files changed, 15 deletions(-)
+
+--- a/arch/unicore32/include/asm/pgtable-hwdef.h
++++ b/arch/unicore32/include/asm/pgtable-hwdef.h
+@@ -44,7 +44,6 @@
+ #define PTE_TYPE_INVALID (3 << 0)
+
+ #define PTE_PRESENT (1 << 2)
+-#define PTE_FILE (1 << 3) /* only when !PRESENT */
+ #define PTE_YOUNG (1 << 3)
+ #define PTE_DIRTY (1 << 4)
+ #define PTE_CACHEABLE (1 << 5)
+--- a/arch/unicore32/include/asm/pgtable.h
++++ b/arch/unicore32/include/asm/pgtable.h
+@@ -283,20 +283,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD
+ #define MAX_SWAPFILES_CHECK() \
+ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
+
+-/*
+- * Encode and decode a file entry. File entries are stored in the Linux
+- * page tables as follows:
+- *
+- * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+- * <----------------------- offset ----------------------> 1 0 0 0
+- */
+-#define pte_file(pte) (pte_val(pte) & PTE_FILE)
+-#define pte_to_pgoff(x) (pte_val(x) >> 4)
+-#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE)
+-
+-#define PTE_FILE_MAX_BITS 28
+-
+ /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+ /* FIXME: this is not correct */
+ #define kern_addr_valid(addr) (1)
diff --git a/queue-3.16/x86-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/x86-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..db55e694
--- /dev/null
+++ b/queue-3.16/x86-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,171 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:11:22 -0800
+Subject: x86: drop _PAGE_FILE and pte_file()-related helpers
+
+commit 0a191362058391878cc2a4d4ccddcd8223eb4f79 upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.16: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/x86/include/asm/pgtable-2level.h | 38 +--------------------------
+ arch/x86/include/asm/pgtable-3level.h | 12 ---------
+ arch/x86/include/asm/pgtable.h | 20 --------------
+ arch/x86/include/asm/pgtable_64.h | 6 +----
+ arch/x86/include/asm/pgtable_types.h | 3 ---
+ 5 files changed, 2 insertions(+), 77 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable-2level.h
++++ b/arch/x86/include/asm/pgtable-2level.h
+@@ -62,44 +62,8 @@ static inline unsigned long pte_bitop(un
+ return ((value >> rightshift) & mask) << leftshift;
+ }
+
+-/*
+- * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
+- * split up the 29 bits of offset into this range.
+- */
+-#define PTE_FILE_MAX_BITS 29
+-#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
+-#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
+-#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
+-#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
+-#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
+-
+-#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1)
+-#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1)
+-
+-#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1)
+-#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2)
+-
+-static __always_inline pgoff_t pte_to_pgoff(pte_t pte)
+-{
+- return (pgoff_t)
+- (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) +
+- pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) +
+- pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3));
+-}
+-
+-static __always_inline pte_t pgoff_to_pte(pgoff_t off)
+-{
+- return (pte_t){
+- .pte_low =
+- pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) +
+- pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) +
+- pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) +
+- _PAGE_FILE,
+- };
+-}
+-
+ /* Encode and de-code a swap entry */
+-#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
++#define SWP_TYPE_BITS 5
+ #define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
+
+ #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -176,18 +176,6 @@ static inline pmd_t native_pmdp_get_and_
+ #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
+ #endif
+
+-/*
+- * Bits 0, 6 and 7 are taken in the low part of the pte,
+- * put the 32 bits of offset into the high part.
+- *
+- * For soft-dirty tracking 11 bit is taken from
+- * the low part of pte as well.
+- */
+-#define pte_to_pgoff(pte) ((pte).pte_high)
+-#define pgoff_to_pte(off) \
+- ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
+-#define PTE_FILE_MAX_BITS 32
+-
+ /* Encode and de-code a swap entry */
+ #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
+ #define __swp_type(x) (((x).val) & 0x1f)
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -115,11 +115,6 @@ static inline int pte_write(pte_t pte)
+ return pte_flags(pte) & _PAGE_RW;
+ }
+
+-static inline int pte_file(pte_t pte)
+-{
+- return pte_flags(pte) & _PAGE_FILE;
+-}
+-
+ static inline int pte_huge(pte_t pte)
+ {
+ return pte_flags(pte) & _PAGE_PSE;
+@@ -329,21 +324,6 @@ static inline pmd_t pmd_mksoft_dirty(pmd
+ return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
+ }
+
+-static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+-{
+- return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
+-}
+-
+-static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+-{
+- return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
+-}
+-
+-static inline int pte_file_soft_dirty(pte_t pte)
+-{
+- return pte_flags(pte) & _PAGE_SOFT_DIRTY;
+-}
+-
+ #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+ /*
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -155,10 +155,6 @@ static inline int pgd_large(pgd_t pgd) {
+ /* PUD - Level3 access */
+
+ /* PMD - Level 2 access */
+-#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
+-#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
+- _PAGE_FILE })
+-#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
+
+ /* PTE - Level 1 access. */
+
+@@ -167,7 +163,7 @@ static inline int pgd_large(pgd_t pgd) {
+ #define pte_unmap(pte) ((void)(pte))/* NOP */
+
+ /* Encode and de-code a swap entry */
+-#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
++#define SWP_TYPE_BITS 5
+ #ifdef CONFIG_NUMA_BALANCING
+ /* Automatic NUMA balancing needs to be distinguishable from swap entries */
+ #define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 2)
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -39,8 +39,6 @@
+ /* If _PAGE_BIT_PRESENT is clear, we use these: */
+ /* - if the user mapped it with PROT_NONE; pte_present gives true */
+ #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
+-/* - set: nonlinear file mapping, saved PTE; unset:swap */
+-#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
+
+ #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
+ #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
+@@ -115,7 +113,6 @@
+ #define _PAGE_NX (_AT(pteval_t, 0))
+ #endif
+
+-#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
+ #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
+
+ #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
diff --git a/queue-3.16/xtensa-drop-_page_file-and-pte_file-related-helpers.patch b/queue-3.16/xtensa-drop-_page_file-and-pte_file-related-helpers.patch
new file mode 100644
index 00000000..219e7675
--- /dev/null
+++ b/queue-3.16/xtensa-drop-_page_file-and-pte_file-related-helpers.patch
@@ -0,0 +1,65 @@
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 10 Feb 2015 14:11:25 -0800
+Subject: xtensa: drop _PAGE_FILE and pte_file()-related helpers
+
+commit d9ecee281b8f89da6d3203be62802eda991e37cc upstream.
+
+We've replaced remap_file_pages(2) implementation with emulation. Nobody
+creates non-linear mapping anymore.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ arch/xtensa/include/asm/pgtable.h | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+--- a/arch/xtensa/include/asm/pgtable.h
++++ b/arch/xtensa/include/asm/pgtable.h
+@@ -89,8 +89,6 @@
+ * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 11 | 11 |
+ * +-----------------------------------------+
+ * swap | index | type | 01 | 11 | 00 |
+- * +- - - - - - - - - - - - - - - - - - - - -+
+- * file | file offset | 01 | 11 | 10 |
+ * +-----------------------------------------+
+ *
+ * For T1050 hardware and earlier the layout differs for present and (PAGE_NONE)
+@@ -111,7 +109,6 @@
+ * index swap offset / PAGE_SIZE (bit 11-31: 21 bits -> 8 GB)
+ * (note that the index is always non-zero)
+ * type swap type (5 bits -> 32 types)
+- * file offset 26-bit offset into the file, in increments of PAGE_SIZE
+ *
+ * Notes:
+ * - (PROT_NONE) is a special case of 'present' but causes an exception for
+@@ -144,7 +141,6 @@
+ #define _PAGE_HW_VALID 0x00
+ #define _PAGE_NONE 0x0f
+ #endif
+-#define _PAGE_FILE (1<<1) /* file mapped page, only if !present */
+
+ #define _PAGE_USER (1<<4) /* user access (ring=1) */
+
+@@ -260,7 +256,6 @@ static inline void pgtable_cache_init(vo
+ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
+ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+ static inline int pte_special(pte_t pte) { return 0; }
+
+ static inline pte_t pte_wrprotect(pte_t pte)
+@@ -388,11 +383,6 @@ ptep_set_wrprotect(struct mm_struct *mm,
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+-#define PTE_FILE_MAX_BITS 26
+-#define pte_to_pgoff(pte) (pte_val(pte) >> 6)
+-#define pgoff_to_pte(off) \
+- ((pte_t) { ((off) << 6) | _PAGE_CA_INVALID | _PAGE_FILE | _PAGE_USER })
+-
+ #endif /* !defined (__ASSEMBLY__) */
+
+