From c60ed93240d2057b6c24093775f8748ce6aa1371 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 23 Nov 2007 15:16:12 -0500 Subject: [PATCH] Import 2.1.115pre2 --- arch/alpha/mm/init.c | 15 ++++++++++++ arch/arm/kernel/process.c | 1 + arch/arm/mm/init.c | 16 +++++++++++++ arch/i386/config.in | 4 ---- arch/i386/mm/init.c | 15 ++++++++++++ arch/m68k/mm/init.c | 14 +++++++++++ arch/m68k/mm/memory.c | 5 ++-- arch/mips/mm/init.c | 16 +++++++++++++ arch/ppc/mm/init.c | 16 +++++++++++++ arch/sparc/kernel/process.c | 6 ++--- arch/sparc/mm/srmmu.c | 20 ++++++++-------- arch/sparc/mm/sun4c.c | 17 ++++++++++++++ arch/sparc64/kernel/process.c | 41 +------------------------------- arch/sparc64/mm/init.c | 44 +++++++++++++++++++++++++++++++++++ include/asm-alpha/pgtable.h | 2 ++ include/asm-arm/pgtable.h | 2 ++ include/asm-i386/pgtable.h | 2 ++ include/asm-i386/spinlock.h | 11 +++++++-- include/asm-m68k/pgtable.h | 8 ++++--- include/asm-mips/pgtable.h | 2 ++ include/asm-ppc/pgtable.h | 2 ++ include/asm-sparc/pgtable.h | 2 ++ include/asm-sparc64/pgtable.h | 2 ++ include/linux/mm.h | 4 +++- mm/memory.c | 31 +++++++++++------------- 25 files changed, 214 insertions(+), 84 deletions(-) diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 7e0a2f070c04..3e1437cc12dc 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -88,6 +88,21 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset) return (pte_t *) pmd_page(*pmd) + offset; } +int do_check_pgt_cache(int low, int high) +{ + int freed = 0; + if(pgtable_cache_size > high) { + do { + if(pgd_quicklist) + free_pgd_slow(get_pgd_fast()), freed++; + if(pmd_quicklist) + free_pmd_slow(get_pmd_fast()), freed++; + if(pte_quicklist) + free_pte_slow(get_pte_fast()), freed++; + } while(pgtable_cache_size > low); + } + return freed; +} /* * BAD_PAGE is the page that is used for page faults when linux diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index a6f1edf8bc7e..79ac806e3ced 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -71,6 +71,7 @@ asmlinkage int sys_idle(void) current->priority = -100; for (;;) { + check_pgt_cache(); #if 0 //def ARCH_IDLE_OK if (!hlt_counter && !need_resched) proc_idle (); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index b5851a854eb4..a674b578d790 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -34,6 +34,22 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern char _etext, _stext, _edata, __bss_start, _end; extern char __init_begin, __init_end; +int do_check_pgt_cache(int low, int high) +{ + int freed = 0; + if(pgtable_cache_size > high) { + do { + if(pgd_quicklist) + free_pgd_slow(get_pgd_fast()), freed++; + if(pmd_quicklist) + free_pmd_slow(get_pmd_fast()), freed++; + if(pte_quicklist) + free_pte_slow(get_pte_fast()), freed++; + } while(pgtable_cache_size > low); + } + return freed; +} + /* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a diff --git a/arch/i386/config.in b/arch/i386/config.in index 6af77b9ea9ab..17530afac8d3 100644 --- a/arch/i386/config.in +++ b/arch/i386/config.in @@ -146,10 +146,6 @@ mainmenu_option next_comment comment 'Kernel hacking' #bool 'Debug kmalloc/kfree' CONFIG_DEBUG_MALLOC -bool 'Kernel profiling support' CONFIG_PROFILE -if [ "$CONFIG_PROFILE" = "y" ]; then - int ' Profile shift count' CONFIG_PROFILE_SHIFT 2 -fi bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ endmenu diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index a93383ac6a34..7ee11e1f7fa2 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -88,6 +88,21 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset) return (pte_t *) (pmd_page(*pmd) + offset); } +int do_check_pgt_cache(int low, int high) +{ + int freed = 0; + if(pgtable_cache_size > high) { + do { + if(pgd_quicklist) + free_pgd_slow(get_pgd_fast()), freed++; + if(pmd_quicklist) + free_pmd_slow(get_pmd_fast()), freed++; + if(pte_quicklist) + free_pte_slow(get_pte_fast()), freed++; + } while(pgtable_cache_size > low); + } + return freed; +} /* * BAD_PAGE is the page that is used for page faults when linux diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 6666da67f199..7ea05b3dd47e 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -31,6 +31,20 @@ extern void die_if_kernel(char *,struct pt_regs *,long); extern void init_kpointer_table(void); extern void show_net_buffers(void); +int do_check_pgt_cache(int low, int high) +{ + int freed = 0; + if(pgtable_cache_size > high) { + do { + if(pmd_quicklist) + freed += free_pmd_slow(get_pmd_fast()); + if(pte_quicklist) + free_pte_slow(get_pte_fast()), freed++; + } while(pgtable_cache_size > low); + } + return freed; +} + /* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index 8aab92265a60..1f4d3a3fe63e 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c @@ -161,7 +161,7 @@ pmd_t *get_pointer_table (void) return pmdp; } -void free_pointer_table (pmd_t *ptable) +int free_pointer_table (pmd_t *ptable) { struct ptable_desc *dp; unsigned long page = (unsigned long)ptable & PAGE_MASK; @@ -189,7 +189,7 @@ void free_pointer_table (pmd_t *ptable) cache_page (dp->page); free_page (dp->page); kfree (dp); - return; + return 1; } else { /* * move this descriptor to the front of the list, since @@ -205,6 +205,7 @@ void free_pointer_table (pmd_t *ptable) ptable_list.next->prev = dp; ptable_list.next = dp; restore_flags(flags); + return 0; } } diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 1085c65f490a..31ce86fc02e4 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -47,6 +47,22 @@ asmlinkage int sys_cacheflush(void *addr, int bytes, int cache) return 0; } +int do_check_pgt_cache(int low, int high) +{ + int freed = 0; + if(pgtable_cache_size > high) { + do { + if(pgd_quicklist) + free_pgd_slow(get_pgd_fast()), freed++; + if(pmd_quicklist) + free_pmd_slow(get_pmd_fast()), freed++; + if(pte_quicklist) + free_pte_slow(get_pte_fast()), freed++; + } while(pgtable_cache_size > low); + } + return freed; +} + /* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c index ca9ef97eaa56..20ef1d384f8c 100644 --- a/arch/ppc/mm/init.c +++ b/arch/ppc/mm/init.c @@ -148,6 +148,22 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset) return (pte_t *) pmd_page(*pmd) + offset; } +int do_check_pgt_cache(int low, int high) +{ + int freed = 0; + if(pgtable_cache_size > high) { + do { + if(pgd_quicklist) + free_pgd_slow(get_pgd_fast()), freed++; + if(pmd_quicklist) + free_pmd_slow(get_pmd_fast()), freed++; + if(pte_quicklist) + free_pte_slow(get_pte_fast()), freed++; + } while(pgtable_cache_size > low); + } + return freed; +} + /* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c index 68bbe773a892..3b850596e163 100644 --- a/arch/sparc/kernel/process.c +++ b/arch/sparc/kernel/process.c @@ -40,7 +40,6 @@ #include extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *); -extern void srmmu_check_pgt_cache(void); struct task_struct *current_set[NR_CPUS] = {&init_task, }; @@ -92,9 +91,8 @@ asmlinkage int sys_idle(void) } } restore_flags(flags); - check_pgt_cache(); - } else - srmmu_check_pgt_cache(); + } + check_pgt_cache(); schedule(); } ret = 0; diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 2002a979fdbb..ac0b1e862de9 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -2722,16 +2722,12 @@ __initfunc(static void get_srmmu_type(void)) srmmu_is_bad(); } -/* Low and high watermarks for page table cache. - The system should try to have pgt_water[0] <= cache elements <= pgt_water[1] - */ -extern int pgt_cache_water[2]; - -void srmmu_check_pgt_cache(void) +static int srmmu_check_pgt_cache(int low, int high) { struct page *page, *page2; + int freed = 0; - if (pgtable_cache_size > pgt_cache_water[0]) { + if (pgtable_cache_size > high) { spin_lock(&pte_spinlock); for (page2 = NULL, page = (struct page *)pte_quicklist; page;) { if ((unsigned int)page->pprev_hash == 0xffff) { @@ -2743,11 +2739,12 @@ void srmmu_check_pgt_cache(void) page->pprev_hash = NULL; pgtable_cache_size -= 16; free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT)); + freed++; if (page2) page = page2->next_hash; else page = (struct page *)pte_quicklist; - if (pgtable_cache_size <= pgt_cache_water[1]) + if (pgtable_cache_size <= low) break; continue; } @@ -2756,7 +2753,7 @@ void srmmu_check_pgt_cache(void) } spin_unlock(&pte_spinlock); } - if (pgd_cache_size > pgt_cache_water[0] / 4) { + if (pgd_cache_size > high / 4) { spin_lock(&pgd_spinlock); for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) { if ((unsigned int)page->pprev_hash == 0xf) { @@ -2768,11 +2765,12 @@ void srmmu_check_pgt_cache(void) page->pprev_hash = NULL; pgd_cache_size -= 4; free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT)); + freed++; if (page2) page = page2->next_hash; else page = (struct page *)pgd_quicklist; - if (pgd_cache_size <= pgt_cache_water[1] / 4) + if (pgd_cache_size <= low / 4) break; continue; } @@ -2781,6 +2779,7 @@ void srmmu_check_pgt_cache(void) } spin_unlock(&pgd_spinlock); } + return freed; } extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, @@ -2853,6 +2852,7 @@ __initfunc(void ld_mmu_srmmu(void)) BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_RETINT(0)); BTFIXUPSET_CALL(free_pte_slow, srmmu_free_pte_slow, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(free_pgd_slow, srmmu_free_pgd_slow, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(set_pgdir, srmmu_set_pgdir, BTFIXUPCALL_NORM); diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c index d0443e19c083..9f00068b0143 100644 --- a/arch/sparc/mm/sun4c.c +++ b/arch/sparc/mm/sun4c.c @@ -2536,6 +2536,22 @@ extern __inline__ pgd_t *sun4c_get_pgd_fast(void) return (pgd_t *)ret; } +static int sun4c_check_pgt_cache(int low, int high) +{ + int freed = 0; + if(pgtable_cache_size > high) { + do { + if(pgd_quicklist) + free_pgd_slow(get_pgd_fast()), freed++; + if(pmd_quicklist) + free_pmd_slow(get_pmd_fast()), freed++; + if(pte_quicklist) + free_pte_slow(get_pte_fast()), freed++; + } while(pgtable_cache_size > low); + } + return freed; +} + static void sun4c_set_pgdir(unsigned long address, pgd_t entry) { /* Nothing to do */ @@ -2803,6 +2819,7 @@ __initfunc(void ld_mmu_sun4c(void)) BTFIXUPSET_CALL(get_pgd_fast, sun4c_pgd_alloc, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pte_slow, sun4c_free_pte_slow, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pgd_slow, sun4c_free_pgd_slow, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(do_check_pgt_cache, sun4c_check_pgt_cache, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(set_pgdir, sun4c_set_pgdir, BTFIXUPCALL_NOP); diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 5f9f81caa285..4136ba3bedac 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c @@ -43,45 +43,6 @@ #ifndef __SMP__ -extern int pgt_cache_water[2]; - -static inline void ultra_check_pgt_cache(void) -{ - struct page *page, *page2; - - if(pgtable_cache_size > pgt_cache_water[0]) { - do { - if(pmd_quicklist) - free_pmd_slow(get_pmd_fast()); - if(pte_quicklist) - free_pte_slow(get_pte_fast()); - } while(pgtable_cache_size > pgt_cache_water[1]); - } - if (pgd_cache_size > pgt_cache_water[0] / 4) { - for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) { - if ((unsigned long)page->pprev_hash == 3) { - if (page2) - page2->next_hash = page->next_hash; - else - (struct page *)pgd_quicklist = page->next_hash; - page->next_hash = NULL; - page->pprev_hash = NULL; - pgd_cache_size -= 2; - free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT)); - if (page2) - page = page2->next_hash; - else - page = (struct page *)pgd_quicklist; - if (pgd_cache_size <= pgt_cache_water[1] / 4) - break; - continue; - } - page2 = page; - page = page->next_hash; - } - } -} - /* * the idle loop on a Sparc... ;) */ @@ -94,7 +55,7 @@ asmlinkage int sys_idle(void) current->priority = -100; current->counter = -100; for (;;) { - ultra_check_pgt_cache(); + check_pgt_cache(); run_task_queue(&tq_scheduler); schedule(); } diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 7813886e6cde..2d667073ab61 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -54,6 +54,50 @@ static __inline__ void __init_pmd(pmd_t *pmdp) __bfill64((void *)pmdp, &two_null_pte_table); } +int do_check_pgt_cache(int low, int high) +{ + struct page *page, *page2; + int freed = 0; + + if(pgtable_cache_size > high) { + do { +#ifdef __SMP__ + if(pgd_quicklist) + free_pgd_slow(get_pgd_fast()), freed++; +#endif + if(pte_quicklist) + free_pte_slow(get_pte_fast()), freed++; + } while(pgtable_cache_size > low); + } +#ifndef __SMP__ + if (pgd_cache_size > high / 4) { + for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) { + if ((unsigned long)page->pprev_hash == 3) { + if (page2) + page2->next_hash = page->next_hash; + else + (struct page *)pgd_quicklist = page->next_hash; + page->next_hash = NULL; + page->pprev_hash = NULL; + pgd_cache_size -= 2; + free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT)); + freed++; + if (page2) + page = page2->next_hash; + else + page = (struct page *)pgd_quicklist; + if (pgd_cache_size <= low / 4) + break; + continue; + } + page2 = page; + page = page->next_hash; + } + } +#endif + return freed; +} + /* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h index 08cd606dc300..ba521665789e 100644 --- a/include/asm-alpha/pgtable.h +++ b/include/asm-alpha/pgtable.h @@ -559,6 +559,8 @@ extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address) #define pte_alloc_kernel pte_alloc #define pmd_alloc_kernel pmd_alloc +extern int do_check_pgt_cache(int, int); + extern inline void set_pgdir(unsigned long address, pgd_t entry) { struct task_struct * p; diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h index 4f9e5ba4aa0f..c447459a0409 100644 --- a/include/asm-arm/pgtable.h +++ b/include/asm-arm/pgtable.h @@ -7,4 +7,6 @@ #define module_map vmalloc #define module_unmap vfree +extern int do_check_pgt_cache(int, int); + #endif /* _ASMARM_PGTABLE_H */ diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 77a50e114e0c..7b1a5f1d9119 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -547,6 +547,8 @@ extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address) #define pmd_free_kernel pmd_free #define pmd_alloc_kernel pmd_alloc +extern int do_check_pgt_cache(int, int); + extern inline void set_pgdir(unsigned long address, pgd_t entry) { struct task_struct * p; diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index 890ac50ae4c3..b07d61a68ebf 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -9,9 +9,16 @@ /* * Your basic spinlocks, allowing only a single CPU anywhere + * + * Gcc-2.7.x has a nasty bug with empty initializers. */ -typedef struct { int gcc_is_buggy; } spinlock_t; -#define SPIN_LOCK_UNLOCKED { 0 } +#if (__GNUC__ > 2) || (__GNUC_MINOR__ >= 8) + typedef struct { } spinlock_t; + #define SPIN_LOCK_UNLOCKED { 0 } +#else + typedef struct { int gcc_is_buggy; } spinlock_t; + #define SPIN_LOCK_UNLOCKED { 0 } +#endif #define spin_lock_init(lock) do { } while(0) #define spin_lock(lock) do { } while(0) diff --git a/include/asm-m68k/pgtable.h b/include/asm-m68k/pgtable.h index a0bef56e92fd..c05a7c6008cc 100644 --- a/include/asm-m68k/pgtable.h +++ b/include/asm-m68k/pgtable.h @@ -621,7 +621,7 @@ extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset); extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset); extern pmd_t *get_pointer_table(void); -extern void free_pointer_table(pmd_t *); +extern int free_pointer_table(pmd_t *); extern pmd_t *get_kpointer_table(void); extern void free_kpointer_table(pmd_t *); @@ -671,9 +671,9 @@ extern __inline__ void free_pmd_fast(pmd_t *pmd) quicklists.pgtable_cache_sz++; } -extern __inline__ void free_pmd_slow(pmd_t *pmd) +extern __inline__ int free_pmd_slow(pmd_t *pmd) { - free_pointer_table(pmd); + return free_pointer_table(pmd); } /* The pgd cache is folded into the pmd cache, so these are dummy routines. */ @@ -789,6 +789,8 @@ extern inline pgd_t * pgd_alloc(void) return pgd; } +extern int do_check_pgt_cache(int, int); + extern inline void set_pgdir(unsigned long address, pgd_t entry) { } diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h index dbecc0da9cb3..3d9fc6a71abc 100644 --- a/include/asm-mips/pgtable.h +++ b/include/asm-mips/pgtable.h @@ -489,6 +489,8 @@ extern inline pgd_t *pgd_alloc(void) return (pgd_t *) page; } +extern int do_check_pgt_cache(int, int); + extern pgd_t swapper_pg_dir[1024]; extern void (*update_mmu_cache)(struct vm_area_struct *vma, diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index 4464ae586948..dcc818c5fb1d 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h @@ -513,6 +513,8 @@ extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address) #define pmd_alloc_kernel pmd_alloc #define pte_alloc_kernel pte_alloc +extern int do_check_pgt_cache(int, int); + extern inline void set_pgdir(unsigned long address, pgd_t entry) { struct task_struct * p; diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h index 11f37e980856..b83420985b69 100644 --- a/include/asm-sparc/pgtable.h +++ b/include/asm-sparc/pgtable.h @@ -354,6 +354,7 @@ BTFIXUPDEF_CALL(pte_t *, get_pte_fast, void) BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void) BTFIXUPDEF_CALL(void, free_pte_slow, pte_t *) BTFIXUPDEF_CALL(void, free_pgd_slow, pgd_t *) +BTFIXUPDEF_CALL(int, do_check_pgt_cache, int, int) #define get_pte_fast() BTFIXUP_CALL(get_pte_fast)() extern __inline__ pmd_t *get_pmd_fast(void) @@ -366,6 +367,7 @@ extern __inline__ void free_pmd_slow(pmd_t *pmd) { } #define free_pgd_slow(pgd) BTFIXUP_CALL(free_pgd_slow)(pgd) +#define do_check_pgt_cache(low,high) BTFIXUP_CALL(do_check_pgt_cache)(low,high) /* * Allocate and free page tables. The xxx_kernel() versions are diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index fc993c0f0dd6..3ed564ff244b 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -541,6 +541,8 @@ extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address) #define pte_alloc_kernel(pmd, addr) pte_alloc(pmd, addr) #define pmd_alloc_kernel(pgd, addr) pmd_alloc(pgd, addr) +extern int do_check_pgt_cache(int, int); + extern inline void set_pgdir(unsigned long address, pgd_t entry) { /* Nothing to do on sparc64 :) */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 8ff946430ce0..e93dd799b019 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -277,9 +277,11 @@ extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t p extern void vmtruncate(struct inode * inode, unsigned long offset); extern void handle_mm_fault(struct task_struct *tsk,struct vm_area_struct *vma, unsigned long address, int write_access); -extern void check_pgt_cache(void); extern void make_pages_present(unsigned long addr, unsigned long end); +extern int pgt_cache_water[2]; +extern int check_pgt_cache(void); + extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem); extern void mem_init(unsigned long start_mem, unsigned long end_mem); extern void show_mem(void); diff --git a/mm/memory.c b/mm/memory.c index 324e5d987c19..ed25e9e312a1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -56,6 +56,11 @@ unsigned long max_mapnr = 0; unsigned long num_physpages = 0; void * high_memory = NULL; +/* Low and high watermarks for page table cache. + The system should try to have pgt_water[0] <= cache elements <= pgt_water[1] + */ +int pgt_cache_water[2] = { 25, 50 }; + /* * We special-case the C-O-W ZERO_PAGE, because it's such * a common occurrence (no need to read the page to know @@ -136,7 +141,8 @@ void clear_page_tables(struct task_struct * tsk) free_one_pgd(page_dir + i); /* keep the page table cache within bounds */ - check_pgt_cache(); + do_check_pgt_cache(pgtable_cache_water[0], + pgtable_cache_water[1]); return; out_bad: @@ -165,7 +171,8 @@ void free_page_tables(struct mm_struct * mm) pgd_free(page_dir); /* keep the page table cache within bounds */ - check_pgt_cache(); + do_check_pgt_cache(pgtable_cache_water[0], + pgtable_cache_water[1]); out: return; @@ -948,21 +955,9 @@ void make_pages_present(unsigned long addr, unsigned long end) } } -/* Low and high watermarks for page table cache. - The system should try to have pgt_water[0] <= cache elements <= pgt_water[1] - */ -int pgt_cache_water[2] = { 25, 50 }; - -void check_pgt_cache(void) +/* Returns the number of pages freed */ +int check_pgt_cache(void) { - if (pgtable_cache_size > pgt_cache_water[1]) { - do { - if (pgd_quicklist) - free_pgd_slow(get_pgd_fast()); - if (pmd_quicklist) - free_pmd_slow(get_pmd_fast()); - if (pte_quicklist) - free_pte_slow(get_pte_fast()); - } while (pgtable_cache_size > pgt_cache_water[0]); - } + return do_check_pgt_cache(pgtable_cache_water[0], + pgtable_cache_water[1]); } -- 2.39.5