aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@nuts.davemloft.net>2004-08-07 03:54:42 -0700
committerDavid S. Miller <davem@nuts.davemloft.net>2004-08-07 03:54:42 -0700
commitfefd14da08f4db1374a8e3c9563ca82948d61851 (patch)
tree43509cee039a55476c9f56ad945490788e656fd1 /include
parent915a29ec1c5e34283a6231af1036114e4d612cb0 (diff)
parent2b03a2af725991bc3548ef10f5a723f6a0b780fc (diff)
downloadhistory-fefd14da08f4db1374a8e3c9563ca82948d61851.tar.gz
Merge nuts.davemloft.net:/disk1/BK/sparcwork-2.4
into nuts.davemloft.net:/disk1/BK/sparc-2.6
Diffstat (limited to 'include')
-rw-r--r--include/asm-sparc64/cacheflush.h3
-rw-r--r--include/asm-sparc64/mmu_context.h2
-rw-r--r--include/asm-sparc64/pgalloc.h37
-rw-r--r--include/asm-sparc64/pgtable.h32
-rw-r--r--include/asm-sparc64/system.h1
-rw-r--r--include/asm-sparc64/tlb.h136
-rw-r--r--include/asm-sparc64/tlbflush.h85
7 files changed, 182 insertions, 114 deletions
diff --git a/include/asm-sparc64/cacheflush.h b/include/asm-sparc64/cacheflush.h
index fc193168153622..b04024ff51d24f 100644
--- a/include/asm-sparc64/cacheflush.h
+++ b/include/asm-sparc64/cacheflush.h
@@ -9,7 +9,8 @@
/* These are the same regardless of whether this is an SMP kernel or not. */
#define flush_cache_mm(__mm) \
do { if ((__mm) == current->mm) flushw_user(); } while(0)
-extern void flush_cache_range(struct vm_area_struct *, unsigned long, unsigned long);
+#define flush_cache_range(vma, start, end) \
+ flush_cache_mm((vma)->vm_mm)
#define flush_cache_page(vma, page) \
flush_cache_mm((vma)->vm_mm)
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 1081acb1c35299..89490da2c9b7c9 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -142,8 +142,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
spin_unlock(&mm->page_table_lock);
}
-extern void __flush_tlb_mm(unsigned long, unsigned long);
-
#define deactivate_mm(tsk,mm) do { } while (0)
/* Activate a new MM instance for the current task. */
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index afbf339cd2c8ca..fa5c704dedaf25 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -188,14 +188,29 @@ static __inline__ void free_pmd_slow(pmd_t *pmd)
#define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
+extern pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte = __pte_alloc_one_kernel(mm, address);
+ if (pte) {
+ struct page *page = virt_to_page(pte);
+ page->mapping = (void *) mm;
+ page->index = address & PMD_MASK;
+ }
+ return pte;
+}
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- pte_t *pte = pte_alloc_one_kernel(mm, addr);
- if (pte)
- return virt_to_page(pte);
+ pte_t *pte = __pte_alloc_one_kernel(mm, addr);
+ if (pte) {
+ struct page *page = virt_to_page(pte);
+ page->mapping = (void *) mm;
+ page->index = addr & PMD_MASK;
+ return page;
+ }
return NULL;
}
@@ -230,8 +245,18 @@ static __inline__ void free_pte_slow(pte_t *pte)
free_page((unsigned long)pte);
}
-#define pte_free_kernel(pte) free_pte_fast(pte)
-#define pte_free(pte) free_pte_fast(page_address(pte))
+static inline void pte_free_kernel(pte_t *pte)
+{
+ virt_to_page(pte)->mapping = NULL;
+ free_pte_fast(pte);
+}
+
+static inline void pte_free(struct page *ptepage)
+{
+ ptepage->mapping = NULL;
+ free_pte_fast(page_address(ptepage));
+}
+
#define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index cdb65452cadc44..cb0b46923d60da 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -67,12 +67,6 @@
#include <linux/sched.h>
-/* Certain architectures need to do special things when pte's
- * within a page table are directly modified. Thus, the following
- * hook is made available.
- */
-#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
-
/* Entries per page directory level. */
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
@@ -80,9 +74,12 @@
* is different so we can optimize correctly for 32-bit tasks.
*/
#define REAL_PTRS_PER_PMD (1UL << PMD_BITS)
-#define PTRS_PER_PMD ((const int)(test_thread_flag(TIF_32BIT) ? \
- (1UL << (32 - (PAGE_SHIFT-3) - PAGE_SHIFT)) : \
- (REAL_PTRS_PER_PMD)))
+
+/* This is gross, but unless we do this gcc retests the
+ * thread flag every interation in pmd traversal loops.
+ */
+extern unsigned long __ptrs_per_pmd(void) __attribute_const__;
+#define PTRS_PER_PMD __ptrs_per_pmd()
/*
* We cannot use the top address range because VPTE table lives there. This
@@ -273,7 +270,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
((unsigned long) __va((((unsigned long)pgd_val(pgd))<<11UL)))
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
-#define pte_clear(pte) (pte_val(*(pte)) = 0UL)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (0)
#define pmd_present(pmd) (pmd_val(pmd) != 0U)
@@ -287,7 +283,7 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
* Undefined behaviour if not..
*/
#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
-#define pte_exec(pte) pte_read(pte)
+#define pte_exec(pte) (pte_val(pte) & _PAGE_EXEC)
#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
@@ -329,6 +325,20 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
+/* Actual page table PTE updates. */
+extern void tlb_batch_add(pte_t *ptep, pte_t orig);
+
+static inline void set_pte(pte_t *ptep, pte_t pte)
+{
+ pte_t orig = *ptep;
+
+ *ptep = pte;
+ if (pte_present(orig))
+ tlb_batch_add(ptep, orig);
+}
+
+#define pte_clear(ptep) set_pte((ptep), __pte(0UL))
+
extern pgd_t swapper_pg_dir[1];
/* These do nothing with the way I have things setup. */
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index ca08f8cad847b5..d6d22f9c2a9cbb 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -175,6 +175,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \
} \
+ flush_tlb_pending(); \
save_and_clear_fpu(); \
/* If you are tempted to conditionalize the following */ \
/* so that ASI is only written if it changes, think again. */ \
diff --git a/include/asm-sparc64/tlb.h b/include/asm-sparc64/tlb.h
index 8b507a36cf957e..5854a89aecf4a5 100644
--- a/include/asm-sparc64/tlb.h
+++ b/include/asm-sparc64/tlb.h
@@ -1,27 +1,129 @@
#ifndef _SPARC64_TLB_H
#define _SPARC64_TLB_H
-#define tlb_flush(tlb) \
-do { if ((tlb)->fullmm) \
- flush_tlb_mm((tlb)->mm);\
-} while (0)
+#include <linux/config.h>
+#include <linux/swap.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
-#define tlb_start_vma(tlb, vma) \
-do { if (!(tlb)->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
+#define TLB_BATCH_NR 192
-#define tlb_end_vma(tlb, vma) \
-do { if (!(tlb)->fullmm) \
- flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
+/*
+ * For UP we don't need to worry about TLB flush
+ * and page free order so much..
+ */
+#ifdef CONFIG_SMP
+ #define FREE_PTE_NR 506
+ #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U)
+#else
+ #define FREE_PTE_NR 1
+ #define tlb_fast_mode(bp) 1
+#endif
-#define __tlb_remove_tlb_entry(tlb, ptep, address) \
- do { } while (0)
+struct mmu_gather {
+ struct mm_struct *mm;
+ unsigned int pages_nr;
+ unsigned int need_flush;
+ unsigned int tlb_frozen;
+ unsigned int tlb_nr;
+ unsigned long freed;
+ unsigned long vaddrs[TLB_BATCH_NR];
+ struct page *pages[FREE_PTE_NR];
+};
-#include <asm-generic/tlb.h>
+DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
-#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
-#define __pte_free_tlb(tlb, pte) pte_free(pte)
+#ifdef CONFIG_SMP
+extern void smp_flush_tlb_pending(struct mm_struct *,
+ unsigned long, unsigned long *);
+#endif
+
+extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
+extern void flush_tlb_pending(void);
+
+static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
+{
+ struct mmu_gather *mp = &per_cpu(mmu_gathers, smp_processor_id());
+
+ BUG_ON(mp->tlb_nr);
+
+ mp->mm = mm;
+ mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
+ mp->tlb_frozen = full_mm_flush;
+ mp->freed = 0;
+
+ return mp;
+}
+
+
+static inline void tlb_flush_mmu(struct mmu_gather *mp)
+{
+ if (mp->need_flush) {
+ mp->need_flush = 0;
+ if (!tlb_fast_mode(mp)) {
+ free_pages_and_swap_cache(mp->pages, mp->pages_nr);
+ mp->pages_nr = 0;
+ }
+ }
+
+}
+
+#ifdef CONFIG_SMP
+extern void smp_flush_tlb_mm(struct mm_struct *mm);
+#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
+#else
+#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm), SECONDARY_CONTEXT)
+#endif
+
+static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end)
+{
+ unsigned long freed = mp->freed;
+ struct mm_struct *mm = mp->mm;
+ unsigned long rss = mm->rss;
+
+ if (rss < freed)
+ freed = rss;
+ mm->rss = rss - freed;
+
+ tlb_flush_mmu(mp);
+
+ if (mp->tlb_frozen) {
+ unsigned long context = mm->context;
+
+ if (CTX_VALID(context))
+ do_flush_tlb_mm(mm);
+ mp->tlb_frozen = 0;
+ } else
+ flush_tlb_pending();
+
+ /* keep the page table cache within bounds */
+ check_pgt_cache();
+}
+
+static inline unsigned int tlb_is_full_mm(struct mmu_gather *mp)
+{
+ return mp->tlb_frozen;
+}
+
+static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
+{
+ mp->need_flush = 1;
+ if (tlb_fast_mode(mp)) {
+ free_page_and_swap_cache(page);
+ return;
+ }
+ mp->pages[mp->pages_nr++] = page;
+ if (mp->pages_nr >= FREE_PTE_NR)
+ tlb_flush_mmu(mp);
+}
+
+#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
+#define pte_free_tlb(mp,ptepage) pte_free(ptepage)
+#define pmd_free_tlb(mp,pmdp) pmd_free(pmdp)
+
+#define tlb_migrate_finish(mm) do { } while (0)
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
#endif /* _SPARC64_TLB_H */
diff --git a/include/asm-sparc64/tlbflush.h b/include/asm-sparc64/tlbflush.h
index 8c70fdd037b60c..3ef9909ac3ac28 100644
--- a/include/asm-sparc64/tlbflush.h
+++ b/include/asm-sparc64/tlbflush.h
@@ -7,11 +7,14 @@
/* TLB flush operations. */
+extern void flush_tlb_pending(void);
+
+#define flush_tlb_range(vma,start,end) \
+ do { (void)(start); flush_tlb_pending(); } while (0)
+#define flush_tlb_page(vma,addr) flush_tlb_pending()
+#define flush_tlb_mm(mm) flush_tlb_pending()
+
extern void __flush_tlb_all(void);
-extern void __flush_tlb_mm(unsigned long context, unsigned long r);
-extern void __flush_tlb_range(unsigned long context, unsigned long start,
- unsigned long r, unsigned long end,
- unsigned long pgsz, unsigned long size);
extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
@@ -22,89 +25,17 @@ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
#define flush_tlb_kernel_range(start,end) \
__flush_tlb_kernel_range(start,end)
-#define flush_tlb_mm(__mm) \
-do { if (CTX_VALID((__mm)->context)) \
- __flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
-} while (0)
-
-#define flush_tlb_range(__vma, start, end) \
-do { if (CTX_VALID((__vma)->vm_mm->context)) { \
- unsigned long __start = (start)&PAGE_MASK; \
- unsigned long __end = PAGE_ALIGN(end); \
- __flush_tlb_range(CTX_HWBITS((__vma)->vm_mm->context), __start, \
- SECONDARY_CONTEXT, __end, PAGE_SIZE, \
- (__end - __start)); \
- } \
-} while (0)
-
-#define flush_tlb_vpte_range(__mm, start, end) \
-do { if (CTX_VALID((__mm)->context)) { \
- unsigned long __start = (start)&PAGE_MASK; \
- unsigned long __end = PAGE_ALIGN(end); \
- __flush_tlb_range(CTX_HWBITS((__mm)->context), __start, \
- SECONDARY_CONTEXT, __end, PAGE_SIZE, \
- (__end - __start)); \
- } \
-} while (0)
-
-#define flush_tlb_page(vma, page) \
-do { struct mm_struct *__mm = (vma)->vm_mm; \
- if (CTX_VALID(__mm->context)) \
- __flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
- SECONDARY_CONTEXT); \
-} while (0)
-
-#define flush_tlb_vpte_page(mm, addr) \
-do { struct mm_struct *__mm = (mm); \
- if (CTX_VALID(__mm->context)) \
- __flush_tlb_page(CTX_HWBITS(__mm->context), (addr)&PAGE_MASK, \
- SECONDARY_CONTEXT); \
-} while (0)
-
#else /* CONFIG_SMP */
extern void smp_flush_tlb_all(void);
-extern void smp_flush_tlb_mm(struct mm_struct *mm);
-extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
- unsigned long end);
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
-extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define flush_tlb_all() smp_flush_tlb_all()
-#define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
-#define flush_tlb_range(vma, start, end) \
- smp_flush_tlb_range((vma)->vm_mm, start, end)
-#define flush_tlb_vpte_range(mm, start, end) \
- smp_flush_tlb_range(mm, start, end)
#define flush_tlb_kernel_range(start, end) \
smp_flush_tlb_kernel_range(start, end)
-#define flush_tlb_page(vma, page) \
- smp_flush_tlb_page((vma)->vm_mm, page)
-#define flush_tlb_vpte_page(mm, page) \
- smp_flush_tlb_page((mm), page)
#endif /* ! CONFIG_SMP */
-static __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
- unsigned long end)
-{
- /* Note the signed type. */
- long s = start, e = end, vpte_base;
- /* Nobody should call us with start below VM hole and end above.
- See if it is really true. */
- BUG_ON(s > e);
-#if 0
- /* Currently free_pgtables guarantees this. */
- s &= PMD_MASK;
- e = (e + PMD_SIZE - 1) & PMD_MASK;
-#endif
- vpte_base = (tlb_type == spitfire ?
- VPTE_BASE_SPITFIRE :
- VPTE_BASE_CHEETAH);
-
- flush_tlb_vpte_range(mm,
- vpte_base + (s >> (PAGE_SHIFT - 3)),
- vpte_base + (e >> (PAGE_SHIFT - 3)));
-}
+extern void flush_tlb_pgtables(struct mm_struct *, unsigned long, unsigned long);
#endif /* _SPARC64_TLBFLUSH_H */