diff options
author | Linus Torvalds <torvalds@home.transmeta.com> | 2002-05-20 07:34:26 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@penguin.transmeta.com> | 2002-05-20 07:34:26 -0700 |
commit | e6d19c6ab5f0f54d15277be9933183050d01ce2c (patch) | |
tree | 02e94118567d03e749fcc9473761f7a137a0a214 | |
parent | 00f423615228d4b8abb54ec1028ff042be6afe88 (diff) | |
download | history-e6d19c6ab5f0f54d15277be9933183050d01ce2c.tar.gz |
Make generic TLB shootdown friendlier to non-x86 architecturesv2.5.17
-rw-r--r-- | include/asm-generic/tlb.h | 24 | ||||
-rw-r--r-- | include/asm-i386/tlb.h | 19 | ||||
-rw-r--r-- | mm/memory.c | 2 |
3 files changed, 35 insertions, 10 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 778990a36b41f8..6e1aabd52becc9 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -16,8 +16,17 @@ #include <linux/config.h> #include <asm/tlbflush.h> -/* aim for something that fits in the L1 cache */ -#define FREE_PTE_NR 508 +/* + * For UP we don't need to worry about TLB flush + * and page free order so much.. + */ +#ifdef CONFIG_SMP + #define FREE_PTE_NR 507 + #define tlb_fast_mode(tlb) ((tlb)->nr == ~0UL) +#else + #define FREE_PTE_NR 1 + #define tlb_fast_mode(tlb) 1 +#endif /* mmu_gather_t is an opaque type used by the mm code for passing around any * data needed by arch specific code for tlb_remove_page. This structure can @@ -34,10 +43,6 @@ typedef struct free_pte_ctx { /* Users of the generic TLB shootdown code must declare this storage space. */ extern mmu_gather_t mmu_gathers[NR_CPUS]; -/* Do me later */ -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) - /* tlb_gather_mmu * Return a pointer to an initialized mmu_gather_t. */ @@ -57,9 +62,9 @@ static inline void tlb_flush_mmu(mmu_gather_t *tlb, unsigned long start, unsigne { unsigned long nr; - flush_tlb_mm(tlb->mm); + tlb_flush(tlb); nr = tlb->nr; - if (nr != ~0UL) { + if (!tlb_fast_mode(tlb)) { unsigned long i; tlb->nr = 0; for (i=0; i < nr; i++) @@ -91,8 +96,7 @@ static inline void tlb_finish_mmu(mmu_gather_t *tlb, unsigned long start, unsign */ static inline void tlb_remove_page(mmu_gather_t *tlb, struct page *page) { - /* Handle the common case fast, first. */\ - if (tlb->nr == ~0UL) { + if (tlb_fast_mode(tlb)) { free_page_and_swap_cache(page); return; } diff --git a/include/asm-i386/tlb.h b/include/asm-i386/tlb.h index 69c0faa9319458..844c3d4c9aaaff 100644 --- a/include/asm-i386/tlb.h +++ b/include/asm-i386/tlb.h @@ -1 +1,20 @@ +#ifndef _I386_TLB_H +#define _I386_TLB_H + +/* + * x86 doesn't need any special per-pte or + * per-vma handling.. + */ +#define tlb_start_vma(tlb, vma) do { } while (0) +#define tlb_end_vma(tlb, vma) do { } while (0) +#define tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) + +/* + * .. because we flush the whole mm when it + * fills up. + */ +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + #include <asm-generic/tlb.h> + +#endif diff --git a/mm/memory.c b/mm/memory.c index 8de16cbed3d59d..c43c303c4b72b6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -348,11 +348,13 @@ static void zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, pte_clear(ptep); pfn = pte_pfn(pte); + tlb_remove_tlb_entry(tlb, pte, address+offset); if (pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); if (!PageReserved(page)) { if (pte_dirty(pte)) set_page_dirty(page); + tlb->freed++; tlb_remove_page(tlb, page); } } |