summaryrefslogtreecommitdiffstats
path: root/x86-Fix-32bit-HIGHMEM-n-compile.patch
blob: 769976375d8fd6579561ef6a6e6e03c302b54915 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
From d53d7e29a67aec795c9f767829a0c427f0b7d11b Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 10 Mar 2010 23:07:50 +0100
Subject: [PATCH] x86: Fix 32bit HIGHMEM=n compile

commit 70331516e2d98b6f114fa15c9a9590275f666447 in tip.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
---
 arch/x86/Kconfig         |    2 +-
 arch/x86/mm/highmem_32.c |   42 -------------------------------------
 arch/x86/mm/iomap_32.c   |   51 ++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 52 insertions(+), 43 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f9c2be1..5446fe6 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2116,7 +2116,7 @@ endmenu
 
 config HAVE_ATOMIC_IOMAP
 	def_bool y
-	depends on X86_32
+	depends on X86_32 && !PREEMPT_RT
 
 source "net/Kconfig"
 
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 7f2ac8a..83b4efc 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -65,52 +65,11 @@ void *__kmap_atomic_direct(struct page *page, enum km_type type)
 	return __kmap_atomic_prot(page, type, kmap_prot);
 }
 
-void *__kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
-{
-	enum fixed_addresses idx;
-	unsigned long vaddr;
-
-	preempt_disable();
-	pagefault_disable();
-
-	debug_kmap_atomic(type);
-	idx = type + KM_TYPE_NR * smp_processor_id();
-	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-	set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
-	arch_flush_lazy_mmu_mode();
-
-	return (void *)vaddr;
-}
-
 void *__kmap_atomic(struct page *page, enum km_type type)
 {
 	return kmap_atomic_prot(page, type, kmap_prot);
 }
 
-void __kunmap_atomic(void *kvaddr, enum km_type type)
-{
-	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
-	/*
-	 * Force other mappings to Oops if they'll try to access this pte
-	 * without first remap it.  Keeping stale mappings around is a bad idea
-	 * also, in case the page changes cacheability attributes or becomes
-	 * a protected page in a hypervisor.
-	 */
-	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
-		kpte_clear_flush(kmap_pte-idx, vaddr);
-	else {
-#ifdef CONFIG_DEBUG_HIGHMEM
-		BUG_ON(vaddr < PAGE_OFFSET);
-		BUG_ON(vaddr >= (unsigned long)high_memory);
-#endif
-	}
-
-	pagefault_enable();
-	preempt_enable();
-}
-
 /*
  * This is the same as kmap_atomic() but can map memory that doesn't
  * have a struct page associated with it.
@@ -139,7 +98,6 @@ EXPORT_SYMBOL(kmap);
 EXPORT_SYMBOL(kunmap);
 EXPORT_SYMBOL(kunmap_virt);
 EXPORT_SYMBOL(__kmap_atomic);
-EXPORT_SYMBOL(__kunmap_atomic);
 EXPORT_SYMBOL(__kmap_atomic_prot);
 EXPORT_SYMBOL(__kmap_atomic_to_page);
 
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 38a1a68..c889bad 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -55,6 +55,56 @@ iomap_free(resource_size_t base, unsigned long size)
 }
 EXPORT_SYMBOL_GPL(iomap_free);
 
+void *__kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+{
+	enum fixed_addresses idx;
+	unsigned long vaddr;
+
+	preempt_disable();
+	pagefault_disable();
+
+	debug_kmap_atomic(type);
+	idx = type + KM_TYPE_NR * smp_processor_id();
+	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+	set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
+	arch_flush_lazy_mmu_mode();
+
+	return (void *)vaddr;
+}
+
+void __kunmap_atomic(void *kvaddr, enum km_type type)
+{
+	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+	/*
+	 * Force other mappings to Oops if they'll try to access this pte
+	 * without first remap it.  Keeping stale mappings around is a bad idea
+	 * also, in case the page changes cacheability attributes or becomes
+	 * a protected page in a hypervisor.
+	 */
+	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+		kpte_clear_flush(kmap_pte-idx, vaddr);
+	else {
+#ifdef CONFIG_DEBUG_HIGHMEM
+		BUG_ON(vaddr < PAGE_OFFSET);
+		BUG_ON(vaddr >= (unsigned long)high_memory);
+#endif
+	}
+
+	pagefault_enable();
+	preempt_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
+
+
+#ifndef CONFIG_PREEMPT_RT
+
+# ifndef CONFIG_HIGHMEM
+#  define kmap_atomic_prot_pfn(pfn, type, prot) \
+	__kmap_atomic_prot_pfn(pfn, type, prot)
+#  define kunmap_atomic(kvaddr, type)	__kunmap_atomic(kvaddr, type)
+# endif
 /*
  * Map 'pfn' using fixed map 'type' and protections 'prot'
  */
@@ -80,3 +130,4 @@ iounmap_atomic(void *kvaddr, enum km_type type)
 	kunmap_atomic(kvaddr, type);
 }
 EXPORT_SYMBOL_GPL(iounmap_atomic);
+#endif
-- 
1.7.0.4