From: Linus Torvalds Date: Mon, 20 May 2002 14:34:26 +0000 (-0700) Subject: Make generic TLB shootdown friendlier to non-x86 architectures X-Git-Tag: v2.5.17 X-Git-Url: http://git.neil.brown.name/?a=commitdiff_plain;h=e6d19c6ab5f0f54d15277be9933183050d01ce2c;p=history.git Make generic TLB shootdown friendlier to non-x86 architectures --- diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 778990a36b41..6e1aabd52bec 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -16,8 +16,17 @@ #include #include -/* aim for something that fits in the L1 cache */ -#define FREE_PTE_NR 508 +/* + * For UP we don't need to worry about TLB flush + * and page free order so much.. + */ +#ifdef CONFIG_SMP + #define FREE_PTE_NR 507 + #define tlb_fast_mode(tlb) ((tlb)->nr == ~0UL) +#else + #define FREE_PTE_NR 1 + #define tlb_fast_mode(tlb) 1 +#endif /* mmu_gather_t is an opaque type used by the mm code for passing around any * data needed by arch specific code for tlb_remove_page. This structure can @@ -34,10 +43,6 @@ typedef struct free_pte_ctx { /* Users of the generic TLB shootdown code must declare this storage space. */ extern mmu_gather_t mmu_gathers[NR_CPUS]; -/* Do me later */ -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) - /* tlb_gather_mmu * Return a pointer to an initialized mmu_gather_t. */ @@ -57,9 +62,9 @@ static inline void tlb_flush_mmu(mmu_gather_t *tlb, unsigned long start, unsigne { unsigned long nr; - flush_tlb_mm(tlb->mm); + tlb_flush(tlb); nr = tlb->nr; - if (nr != ~0UL) { + if (!tlb_fast_mode(tlb)) { unsigned long i; tlb->nr = 0; for (i=0; i < nr; i++) @@ -91,8 +96,7 @@ static inline void tlb_finish_mmu(mmu_gather_t *tlb, unsigned long start, unsign */ static inline void tlb_remove_page(mmu_gather_t *tlb, struct page *page) { - /* Handle the common case fast, first. */\ - if (tlb->nr == ~0UL) { + if (tlb_fast_mode(tlb)) { free_page_and_swap_cache(page); return; } diff --git a/include/asm-i386/tlb.h b/include/asm-i386/tlb.h index 69c0faa93194..844c3d4c9aaa 100644 --- a/include/asm-i386/tlb.h +++ b/include/asm-i386/tlb.h @@ -1 +1,20 @@ +#ifndef _I386_TLB_H +#define _I386_TLB_H + +/* + * x86 doesn't need any special per-pte or + * per-vma handling.. + */ +#define tlb_start_vma(tlb, vma) do { } while (0) +#define tlb_end_vma(tlb, vma) do { } while (0) +#define tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) + +/* + * .. because we flush the whole mm when it + * fills up. + */ +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + #include + +#endif diff --git a/mm/memory.c b/mm/memory.c index 8de16cbed3d5..c43c303c4b72 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -348,11 +348,13 @@ static void zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, pte_clear(ptep); pfn = pte_pfn(pte); + tlb_remove_tlb_entry(tlb, pte, address+offset); if (pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); if (!PageReserved(page)) { if (pte_dirty(pte)) set_page_dirty(page); + tlb->freed++; tlb_remove_page(tlb, page); } }