]> git.neil.brown.name Git - history.git/commitdiff
Import 2.4.0-test6pre8 2.4.0-test6pre8
authorLinus Torvalds <torvalds@linuxfoundation.org>
Fri, 23 Nov 2007 20:37:40 +0000 (15:37 -0500)
committerLinus Torvalds <torvalds@linuxfoundation.org>
Fri, 23 Nov 2007 20:37:40 +0000 (15:37 -0500)
104 files changed:
arch/alpha/mm/init.c
arch/arm/mm/init.c
arch/arm/mm/mm-armv.c
arch/arm/mm/small_page.c
arch/i386/mm/init.c
arch/i386/mm/ioremap.c
arch/ia64/ia32/binfmt_elf32.c
arch/ia64/mm/init.c
arch/m68k/atari/stram.c
arch/m68k/mm/init.c
arch/m68k/mm/memory.c
arch/m68k/mm/motorola.c
arch/mips/arc/memory.c
arch/mips/dec/prom/memory.c
arch/mips/mm/init.c
arch/mips/mm/umap.c
arch/mips64/arc/memory.c
arch/mips64/mm/init.c
arch/mips64/mm/umap.c
arch/ppc/mm/init.c
arch/s390/mm/init.c
arch/sh/mm/init.c
arch/sparc/kernel/sun4d_smp.c
arch/sparc/kernel/sun4m_smp.c
arch/sparc/mm/generic.c
arch/sparc/mm/init.c
arch/sparc/mm/io-unit.c
arch/sparc/mm/iommu.c
arch/sparc/mm/srmmu.c
arch/sparc/mm/sun4c.c
arch/sparc64/mm/fault.c
arch/sparc64/mm/generic.c
arch/sparc64/mm/init.c
drivers/block/lvm-snap.c
drivers/char/agp/agpgart_be.c
drivers/char/bttv-driver.c
drivers/char/buz.c
drivers/char/cpia.c
drivers/char/drm/i810_dma.c
drivers/char/drm/memory.c
drivers/char/drm/mga_dma.c
drivers/char/drm/vm.c
drivers/char/ftape/lowlevel/ftape-buffer.c
drivers/char/planb.c
drivers/char/zr36120_mem.c
drivers/ieee1394/video1394.c
drivers/sound/cmpci.c
drivers/sound/dmabuf.c
drivers/sound/emu10k1/audio.c
drivers/sound/es1370.c
drivers/sound/es1371.c
drivers/sound/esssolo1.c
drivers/sound/i810_audio.c
drivers/sound/maestro.c
drivers/sound/msnd_pinnacle.c
drivers/sound/sonicvibes.c
drivers/sound/sscape.c
drivers/sound/trident.c
drivers/usb/audio.c
drivers/usb/ibmcam.c
drivers/usb/ov511.c
drivers/video/acornfb.c
drivers/video/sa1100fb.c
fs/proc/array.c
include/asm-alpha/pgalloc.h
include/asm-alpha/pgtable.h
include/asm-alpha/processor.h
include/asm-arm/pgtable.h
include/asm-i386/pgtable.h
include/asm-i386/processor.h
include/asm-ia64/pgtable.h
include/asm-ia64/processor.h
include/asm-m68k/pgtable.h
include/asm-m68k/processor.h
include/asm-mips/pgtable.h
include/asm-mips/processor.h
include/asm-mips64/pgtable.h
include/asm-mips64/processor.h
include/asm-ppc/pgtable.h
include/asm-ppc/processor.h
include/asm-s390/pgtable.h
include/asm-s390/processor.h
include/asm-sh/pgtable.h
include/asm-sh/processor.h
include/asm-sparc/pgtable.h
include/asm-sparc64/pgalloc.h
include/asm-sparc64/pgtable.h
include/asm-sparc64/processor.h
include/linux/highmem.h
include/linux/mm.h
include/linux/pagemap.h
include/linux/wrapper.h
kernel/ptrace.c
mm/bootmem.c
mm/highmem.c
mm/memory.c
mm/page_alloc.c
mm/page_io.c
mm/slab.c
mm/swap_state.c
mm/swapfile.c
mm/vmalloc.c
mm/vmscan.c
net/packet/af_packet.c

index 84b608f4518e4d26752afd5b413fa0b71ada03f4..2ab7941b29b4bdaaf93f27435d235d88b2600b3a 100644 (file)
@@ -141,7 +141,7 @@ pte_t
 __bad_page(void)
 {
        memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
-       return pte_mkdirty(mk_pte(mem_map + MAP_NR(EMPTY_PGE), PAGE_SHARED));
+       return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
 }
 
 void
@@ -325,8 +325,8 @@ free_initmem (void)
 
        addr = (unsigned long)(&__init_begin);
        for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
-               ClearPageReserved(mem_map + MAP_NR(addr));
-               set_page_count(mem_map+MAP_NR(addr), 1);
+               ClearPageReserved(virt_to_page(addr));
+               set_page_count(virt_to_page(addr), 1);
                free_page(addr);
                totalram_pages++;
        }
@@ -339,8 +339,8 @@ void
 free_initrd_mem(unsigned long start, unsigned long end)
 {
        for (; start < end; start += PAGE_SIZE) {
-               ClearPageReserved(mem_map + MAP_NR(start));
-               set_page_count(mem_map+MAP_NR(start), 1);
+               ClearPageReserved(virt_to_page(start));
+               set_page_count(virt_to_page(start), 1);
                free_page(start);
                totalram_pages++;
        }
index 589653111074d9c82cf53b980082426152bde918..72eaa2e4752104d631f39630426192e01e204511 100644 (file)
@@ -534,8 +534,8 @@ void __init paging_init(struct meminfo *mi)
        memzero(zero_page, PAGE_SIZE);
        memzero(bad_page, PAGE_SIZE);
 
-       empty_zero_page = mem_map + MAP_NR(zero_page);
-       empty_bad_page  = mem_map + MAP_NR(bad_page);
+       empty_zero_page = virt_to_page(zero_page);
+       empty_bad_page  = virt_to_page(bad_page);
        empty_bad_pte_table = ((pte_t *)bad_table) + TABLE_OFFSET;
 }
 
@@ -598,7 +598,7 @@ void __init mem_init(void)
 static inline void free_area(unsigned long addr, unsigned long end, char *s)
 {
        unsigned int size = (end - addr) >> 10;
-       struct page *page = mem_map + MAP_NR(addr);
+       struct page *page = virt_to_page(addr);
 
        for (; addr < end; addr += PAGE_SIZE, page ++) {
                ClearPageReserved(page);
@@ -632,8 +632,8 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 
        if (!keep_initrd) {
                for (addr = start; addr < end; addr += PAGE_SIZE) {
-                       ClearPageReserved(mem_map + MAP_NR(addr));
-                       set_page_count(mem_map+MAP_NR(addr), 1);
+                       ClearPageReserved(virt_to_page(addr));
+                       set_page_count(virt_to_page(addr), 1);
                        free_page(addr);
                        totalram_pages++;
                }
index 1edbc35fd938924a48687da67ecd16eac24c00b1..e656a25d2accc90e8fbcaae622f0f79d3f88454c 100644 (file)
@@ -417,8 +417,8 @@ static inline void free_memmap(unsigned long start, unsigned long end)
        start = __phys_to_virt(start);
        end   = __phys_to_virt(end);
 
-       pg    = PAGE_ALIGN((unsigned long)(mem_map + MAP_NR(start)));
-       pgend = ((unsigned long)(mem_map + MAP_NR(end))) & PAGE_MASK;
+       pg    = PAGE_ALIGN((unsigned long)(virt_to_page(start)));
+       pgend = ((unsigned long)(virt_to_page(end))) & PAGE_MASK;
 
        start = __virt_to_phys(pg);
        end   = __virt_to_phys(pgend);
index 40c91ba323966212626fc5f56986dba5ff973290..27fb0f663ea5edc932a299a9bc3661360e921c9f 100644 (file)
@@ -142,12 +142,10 @@ no_page:
 static void __free_small_page(unsigned long spage, struct order *order)
 {
        unsigned long flags;
-       unsigned long nr;
        struct page *page;
 
-       nr = MAP_NR(spage);
-       if (nr < max_mapnr) {
-               page = mem_map + nr;
+       page = virt_to_page(spage);
+       if (VALID_PAGE(page)) {
 
                /*
                 * The container-page must be marked Reserved
index af72d158103a548e07bc344d927c250bbbe7321d..9ba2baa3178728e2650d2c9d26025d47b3b8c4b0 100644 (file)
@@ -566,8 +566,6 @@ void __init mem_init(void)
 
 #ifdef CONFIG_HIGHMEM
        highmem_start_page = mem_map + highstart_pfn;
-       /* cache the highmem_mapnr */
-       highmem_mapnr = highstart_pfn;
        max_mapnr = num_physpages = highend_pfn;
 #else
        max_mapnr = num_physpages = max_low_pfn;
@@ -642,8 +640,8 @@ void free_initmem(void)
 
        addr = (unsigned long)(&__init_begin);
        for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
-               ClearPageReserved(mem_map + MAP_NR(addr));
-               set_page_count(mem_map+MAP_NR(addr), 1);
+               ClearPageReserved(virt_to_page(addr));
+               set_page_count(virt_to_page(addr), 1);
                free_page(addr);
                totalram_pages++;
        }
@@ -656,8 +654,8 @@ void free_initrd_mem(unsigned long start, unsigned long end)
        if (start < end)
                printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
        for (; start < end; start += PAGE_SIZE) {
-               ClearPageReserved(mem_map + MAP_NR(start));
-               set_page_count(mem_map+MAP_NR(start), 1);
+               ClearPageReserved(virt_to_page(start));
+               set_page_count(virt_to_page(start), 1);
                free_page(start);
                totalram_pages++;
        }
index cb44276729cded976f4112d45e9efe6d43e0f229..ba5c2e7ba03d81e4b0ac2795acafc1b1c2b86264 100644 (file)
@@ -121,15 +121,14 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
         */
        if (phys_addr < virt_to_phys(high_memory)) {
                char *t_addr, *t_end;
-               int i;
+               struct page *page;
 
                t_addr = __va(phys_addr);
                t_end = t_addr + (size - 1);
           
-               for(i = MAP_NR(t_addr); i < MAP_NR(t_end); i++) {
-                       if(!PageReserved(mem_map + i))
+               for(page = virt_to_page(t_addr); page < virt_to_page(t_end); page++)
+                       if(!PageReserved(page))
                                return NULL;
-               }
        }
 
        /*
index 6c63e33a7d77eb0ac05dd3efd75434fa1aff1bf7..770ef63633908f21c5f1b9ec3d3b3f5983bf747f 100644 (file)
@@ -81,9 +81,9 @@ void ia64_elf32_init(struct pt_regs *regs)
 {
        int nr;
 
-       put_shared_page(current, mem_map + MAP_NR(ia32_gdt_table), IA32_PAGE_OFFSET);
+       put_shared_page(current, virt_to_page(ia32_gdt_table), IA32_PAGE_OFFSET);
        if (PAGE_SHIFT <= IA32_PAGE_SHIFT)
-               put_shared_page(current, mem_map + MAP_NR(ia32_tss), IA32_PAGE_OFFSET + PAGE_SIZE);
+               put_shared_page(current, virt_to_page(ia32_tss), IA32_PAGE_OFFSET + PAGE_SIZE);
 
        nr = smp_processor_id();
        
index d911c91bf17888b7fe72127155f46a95d80ebe8a..8ddda7e1192febf11c7bf38971a3a99b9752c9a2 100644 (file)
@@ -173,8 +173,8 @@ free_initmem (void)
 
        addr = (unsigned long) &__init_begin;
        for (; addr < (unsigned long) &__init_end; addr += PAGE_SIZE) {
-               clear_bit(PG_reserved, &mem_map[MAP_NR(addr)].flags);
-               set_page_count(&mem_map[MAP_NR(addr)], 1);
+               clear_bit(PG_reserved, &virt_to_page(addr)->flags);
+               set_page_count(virt_to_page(addr), 1);
                free_page(addr);
                ++totalram_pages;
        }
@@ -188,8 +188,8 @@ free_initrd_mem(unsigned long start, unsigned long end)
        if (start < end)
                printk ("Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
        for (; start < end; start += PAGE_SIZE) {
-               clear_bit(PG_reserved, &mem_map[MAP_NR(start)].flags);
-               set_page_count(&mem_map[MAP_NR(start)], 1);
+               clear_bit(PG_reserved, &virt_to_page(start)->flags);
+               set_page_count(virt_to_page(start), 1);
                free_page(start);
                ++totalram_pages;
        }
@@ -372,7 +372,7 @@ count_reserved_pages (u64 start, u64 end, void *arg)
        unsigned long *count = arg;
        struct page *pg;
 
-       for (pg = mem_map + MAP_NR(start); pg < mem_map + MAP_NR(end); ++pg)
+       for (pg = virt_to_page(start); pg < virt_to_page(end); ++pg)
                if (PageReserved(pg))
                        ++num_reserved;
        *count += num_reserved;
@@ -409,7 +409,7 @@ mem_init (void)
               datasize >> 10, initsize >> 10);
 
        /* install the gate page in the global page table: */
-       put_gate_page(mem_map + MAP_NR(__start_gate_section), GATE_ADDR);
+       put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR);
 
 #ifndef CONFIG_IA64_SOFTSDV_HACKS
        /*
index 2b804b7670a30741df873081d58a9d7dd978582e..d0acdaaaea3277bacc9563beea2cdaa7e2e60f37 100644 (file)
@@ -305,7 +305,7 @@ void __init atari_stram_reserve_pages(unsigned long start_mem)
 
        /* always reserve first page of ST-RAM, the first 2 kB are
         * supervisor-only! */
-       set_bit( PG_reserved, &mem_map[MAP_NR(stram_start)].flags );
+       set_bit( PG_reserved, &virt_to_page(stram_start)->flags );
 
 #ifdef CONFIG_STRAM_SWAP
        if (!max_swap_size) {
@@ -699,7 +699,7 @@ static inline void unswap_pte(struct vm_area_struct * vma, unsigned long
                if (pte_page(pte) != page)
                        return;
                if (0 /* isswap */)
-                       mem_map[MAP_NR(pte_page(pte))].offset = page;
+                       virt_to_page(pte_page(pte))->offset = page;
                else
                        /* We will be removing the swap cache in a moment, so... */
                        set_pte(dir, pte_mkdirty(pte));
@@ -716,7 +716,7 @@ static inline void unswap_pte(struct vm_area_struct * vma, unsigned long
                DPRINTK( "unswap_pte: replacing entry %08lx by new page %08lx",
                                 entry, page );
                set_pte(dir, pte_mkdirty(__mk_pte(page,vma->vm_page_prot)));
-               atomic_inc(&mem_map[MAP_NR(page)].count);
+               atomic_inc(&virt_to_page(page)->count);
                ++vma->vm_mm->rss;
        }
        swap_free(entry);
@@ -1291,7 +1291,7 @@ static int get_gfp_order( unsigned long size )
 /* reserve a range of pages in mem_map[] */
 static void reserve_region( unsigned long addr, unsigned long end )
 {
-       mem_map_t *mapp = &mem_map[MAP_NR(addr)];
+       mem_map_t *mapp = virt_to_page(addr);
 
        for( ; addr < end; addr += PAGE_SIZE, ++mapp )
                set_bit( PG_reserved, &mapp->flags );
index 4e6b282659b58a728e75471a54733224247ca4dc..612d51edaef5067857e7b530e6064935f552fd49 100644 (file)
@@ -153,10 +153,10 @@ void __init mem_init(void)
 #if 0
 #ifndef CONFIG_SUN3
                if (virt_to_phys ((void *)tmp) >= mach_max_dma_address)
-                       clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
+                       clear_bit(PG_DMA, &virt_to_page(tmp)->flags);
 #endif
 #endif
-               if (PageReserved(mem_map+MAP_NR(tmp))) {
+               if (PageReserved(virt_to_page(tmp))) {
                        if (tmp >= (unsigned long)&_text
                            && tmp < (unsigned long)&_etext)
                                codepages++;
@@ -168,7 +168,7 @@ void __init mem_init(void)
                        continue;
                }
 #if 0
-               set_page_count(mem_map+MAP_NR(tmp), 1);
+               set_page_count(virt_to_page(tmp), 1);
 #ifdef CONFIG_BLK_DEV_INITRD
                if (!initrd_start ||
                    (tmp < (initrd_start & PAGE_MASK) || tmp >= initrd_end))
@@ -202,8 +202,8 @@ void __init mem_init(void)
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
        for (; start < end; start += PAGE_SIZE) {
-               ClearPageReserved(mem_map + MAP_NR(start));
-               set_page_count(mem_map+MAP_NR(start), 1);
+               ClearPageReserved(virt_to_page(start));
+               set_page_count(virt_to_page(start), 1);
                free_page(start);
                totalram_pages++;
        }
index a0a5336ed3d749adc730bf3b5f96a1c08dd70f9a..a3c4f38c5aeffb416071477c3b2ac2b610c3a1f6 100644 (file)
@@ -93,7 +93,7 @@ pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset)
 typedef struct list_head ptable_desc;
 static LIST_HEAD(ptable_list);
 
-#define PD_PTABLE(page) ((ptable_desc *)&mem_map[MAP_NR(page)])
+#define PD_PTABLE(page) ((ptable_desc *)virt_to_page(page))
 #define PD_PAGE(ptable) (list_entry(ptable, struct page, list))
 #define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
 
index 146dc7279b2c34c8dda751bd0fdf6607bb54f045..61cd918e7c3aa5ddafb34723831e303a56e0c176 100644 (file)
@@ -293,8 +293,8 @@ void free_initmem(void)
 
        addr = (unsigned long)&__init_begin;
        for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
-               mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
-               set_page_count(mem_map+MAP_NR(addr), 1);
+               virt_to_page(addr)->flags &= ~(1 << PG_reserved);
+               set_page_count(virt_to_page(addr), 1);
                free_page(addr);
        }
 }
index 49de2ffa94fd1e3e7933006ee6d45ff700f32955..7988cfc39a31693936f19f17a5f321ae9c55a8b1 100644 (file)
@@ -228,8 +228,8 @@ prom_free_prom_memory (void)
 
                addr = PAGE_OFFSET + p->base;
                while (addr < p->base + p->size) {
-                       ClearPageReserved(mem_map + MAP_NR(addr));
-                       set_page_count(mem_map + MAP_NR(addr), 1);
+                       ClearPageReserved(virt_to_page(addr));
+                       set_page_count(virt_to_page(addr), 1);
                        free_page(addr);
                        addr += PAGE_SIZE;
                        freed += PAGE_SIZE;
index 1025104ce12327784554448188e6c02b3328f213..e54e78a2562a0037e5b0606289f2515d7b4f8d28 100644 (file)
@@ -149,8 +149,8 @@ void prom_free_prom_memory (void)
 
        addr = PAGE_SIZE;
        while (addr < end) {
-               ClearPageReserved(mem_map + MAP_NR(addr));
-               set_page_count(mem_map + MAP_NR(addr), 1);
+               ClearPageReserved(virt_to_page(addr));
+               set_page_count(virt_to_page(addr), 1);
                free_page(addr);
                addr += PAGE_SIZE;
        }
index b1fde51089e996e4840f3e47b159aee39d227750..a6f448e05c881eb49cdedd2c17de9534935fd5eb 100644 (file)
@@ -120,7 +120,8 @@ unsigned long empty_zero_page, zero_page_mask;
 
 static inline unsigned long setup_zero_pages(void)
 {
-       unsigned long order, size, pg;
+       unsigned long order, size;
+       struct page *page;
 
        switch (mips_cputype) {
        case CPU_R4000SC:
@@ -137,11 +138,11 @@ static inline unsigned long setup_zero_pages(void)
        if (!empty_zero_page)
                panic("Oh boy, that early out of memory?");
 
-       pg = MAP_NR(empty_zero_page);
-       while (pg < MAP_NR(empty_zero_page) + (1 << order)) {
-               set_bit(PG_reserved, &mem_map[pg].flags);
-               set_page_count(mem_map + pg, 0);
-               pg++;
+       page = virt_to_page(empty_zero_page);
+       while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) {
+               set_bit(PG_reserved, &page->flags);
+               set_page_count(page, 0);
+               page++;
        }
 
        size = PAGE_SIZE << order;
@@ -309,8 +310,8 @@ void __init mem_init(void)
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
        for (; start < end; start += PAGE_SIZE) {
-               ClearPageReserved(mem_map + MAP_NR(start));
-               set_page_count(mem_map+MAP_NR(start), 1);
+               ClearPageReserved(virt_to_page(start));
+               set_page_count(virt_to_page(start), 1);
                free_page(start);
                totalram_pages++;
        }
@@ -329,8 +330,8 @@ void free_initmem(void)
     
        addr = (unsigned long) &__init_begin;
        while (addr < (unsigned long) &__init_end) {
-               ClearPageReserved(mem_map + MAP_NR(addr));
-               set_page_count(mem_map + MAP_NR(addr), 1);
+               ClearPageReserved(virt_to_page(addr));
+               set_page_count(virt_to_page(addr), 1);
                free_page(addr);
                totalram_pages++;
                addr += PAGE_SIZE;
index 003866ac439f4080a88fa0199c291ce9ef6c75aa..465bec37b44801975ddc460fb6d08e0deb8caa58 100644 (file)
@@ -115,10 +115,10 @@ void *vmalloc_uncached (unsigned long size)
 static inline void free_pte(pte_t page)
 {
        if (pte_present(page)) {
-               unsigned long nr = pte_pagenr(page);
-               if (nr >= max_mapnr || PageReserved(mem_map+nr))
+               struct page *ptpage = pte_page(page);
+               if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
                        return;
-               __free_page(pte_page(page));
+               __free_page(ptpage);
                if (current->mm->rss <= 0)
                        return;
                current->mm->rss--;
index 1e88dd6c7d3b617c0c4fe8e4e10b9be1c5869f99..d2ef958696303f6e0b65383d9b230658329c5536 100644 (file)
@@ -233,8 +233,8 @@ prom_free_prom_memory (void)
                addr = PAGE_OFFSET + (unsigned long) (long) p->base;
                end = addr + (unsigned long) (long) p->size;
                while (addr < end) {
-                       ClearPageReserved(mem_map + MAP_NR(addr));
-                       set_page_count(mem_map + MAP_NR(addr), 1);
+                       ClearPageReserved(virt_to_page(addr));
+                       set_page_count(virt_to_page(addr), 1);
                        free_page(addr);
                        addr += PAGE_SIZE;
                        freed += PAGE_SIZE;
index 223ca3e5e625e7a933384b181672c9d817e50793..f1d95c702e181a0481448623cb6e586667de508a 100644 (file)
@@ -215,7 +215,8 @@ unsigned long empty_zero_page, zero_page_mask;
 
 unsigned long setup_zero_pages(void)
 {
-       unsigned long order, size, pg;
+       unsigned long order, size;
+       struct page *page;
 
        switch (mips_cputype) {
        case CPU_R4000SC:
@@ -232,11 +233,11 @@ unsigned long setup_zero_pages(void)
        if (!empty_zero_page)
                panic("Oh boy, that early out of memory?");
 
-       pg = MAP_NR(empty_zero_page);
-       while (pg < MAP_NR(empty_zero_page) + (1 << order)) {
-               set_bit(PG_reserved, &mem_map[pg].flags);
-               set_page_count(mem_map + pg, 0);
-               pg++;
+       page = virt_to_page(empty_zero_page);
+       while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) {
+               set_bit(PG_reserved, &page->flags);
+               set_page_count(page, 0);
+               page++;
        }
 
        size = PAGE_SIZE << order;
@@ -374,8 +375,8 @@ void __init mem_init(void)
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
        for (; start < end; start += PAGE_SIZE) {
-               ClearPageReserved(mem_map + MAP_NR(start));
-               set_page_count(mem_map+MAP_NR(start), 1);
+               ClearPageReserved(virt_to_page(start));
+               set_page_count(virt_to_page(start), 1);
                free_page(start);
                totalram_pages++;
        }
@@ -396,8 +397,8 @@ free_initmem(void)
        addr = (unsigned long)(&__init_begin);
        while (addr < (unsigned long)&__init_end) {
                page = PAGE_OFFSET | CPHYSADDR(addr);
-               ClearPageReserved(mem_map + MAP_NR(page));
-               set_page_count(mem_map + MAP_NR(page), 1);
+               ClearPageReserved(virt_to_page(page));
+               set_page_count(virt_to_page(page), 1);
                free_page(page);
                totalram_pages++;
                addr += PAGE_SIZE;
index b8ae542ccad5799a5943a61d7affc2de7086cdae..9b0e7db1e015d299a1a10637e31a5f7249ad40f6 100644 (file)
@@ -109,10 +109,10 @@ void *vmalloc_uncached (unsigned long size)
 static inline void free_pte(pte_t page)
 {
        if (pte_present(page)) {
-               unsigned long nr = pte_pagenr(page);
-               if (nr >= max_mapnr || PageReserved(mem_map+nr))
+               struct page *ptpage = pte_page(page);
+               if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
                        return;
-               __free_page(pte_page(page));
+               __free_page(ptpage);
                if (current->mm->rss <= 0)
                        return;
                current->mm->rss--;
index 16c3bc694c63753c1c13ae627b9ef733f5b1557f..67257c50bf238f8274d88823162c1d428cc41aa4 100644 (file)
@@ -797,8 +797,8 @@ void __init free_initmem(void)
 #define FREESEC(START,END,CNT) do { \
        a = (unsigned long)(&START); \
        for (; a < (unsigned long)(&END); a += PAGE_SIZE) { \
-               clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags); \
-               set_page_count(mem_map+MAP_NR(a), 1); \
+               clear_bit(PG_reserved, &virt_to_page(a)->flags); \
+               set_page_count(virt_to_page(a), 1); \
                free_page(a); \
                CNT++; \
        } \
@@ -865,8 +865,8 @@ void __init free_initmem(void)
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
        for (; start < end; start += PAGE_SIZE) {
-               ClearPageReserved(mem_map + MAP_NR(start));
-               set_page_count(mem_map+MAP_NR(start), 1);
+               ClearPageReserved(virt_to_page(start));
+               set_page_count(virt_to_page(start), 1);
                free_page(start);
                totalram_pages++;
        }
@@ -1187,7 +1187,7 @@ void __init mem_init(void)
           make sure the ramdisk pages aren't reserved. */
        if (initrd_start) {
                for (addr = initrd_start; addr < initrd_end; addr += PAGE_SIZE)
-                       clear_bit(PG_reserved, &mem_map[MAP_NR(addr)].flags);
+                       clear_bit(PG_reserved, &virt_to_page(addr)->flags);
        }
 #endif /* CONFIG_BLK_DEV_INITRD */
 
@@ -1196,17 +1196,17 @@ void __init mem_init(void)
        if ( rtas_data )
                for (addr = rtas_data; addr < PAGE_ALIGN(rtas_data+rtas_size) ;
                     addr += PAGE_SIZE)
-                       SetPageReserved(mem_map + MAP_NR(addr));
+                       SetPageReserved(virt_to_page(addr));
 #endif /* defined(CONFIG_ALL_PPC) */
        if ( sysmap_size )
                for (addr = (unsigned long)sysmap;
                     addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ;
                     addr += PAGE_SIZE)
-                       SetPageReserved(mem_map + MAP_NR(addr));
+                       SetPageReserved(virt_to_page(addr));
        
        for (addr = PAGE_OFFSET; addr < (unsigned long)end_of_DRAM;
             addr += PAGE_SIZE) {
-               if (!PageReserved(mem_map + MAP_NR(addr)))
+               if (!PageReserved(virt_to_page(addr)))
                        continue;
                if (addr < (ulong) etext)
                        codepages++;
index 4e30c015a94f370d6d6d90a73fb5ce8eca5d2b01..a45d02ed8c332d853451182b48d91c36882be86d 100644 (file)
@@ -351,8 +351,8 @@ void free_initmem(void)
 
         addr = (unsigned long)(&__init_begin);
         for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
-               ClearPageReserved(mem_map + MAP_NR(addr));
-               set_page_count(mem_map+MAP_NR(addr), 1);
+               ClearPageReserved(virt_to_page(addr));
+               set_page_count(virt_to_page(addr), 1);
                free_page(addr);
                totalram_pages++;
         }
@@ -366,8 +366,8 @@ void free_initrd_mem(unsigned long start, unsigned long end)
         if (start < end)
                 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
         for (; start < end; start += PAGE_SIZE) {
-                ClearPageReserved(mem_map + MAP_NR(start));
-                set_page_count(mem_map+MAP_NR(start), 1);
+                ClearPageReserved(virt_to_page(start));
+                set_page_count(virt_to_page(start), 1);
                 free_page(start);
                 totalram_pages++;
         }
index f798ef968a5bb78afd7e3fd917a13bd2c6d9ff0b..5e0632a86c847f801ccb4489f6893b3c0190e2d7 100644 (file)
@@ -270,8 +270,8 @@ void free_initmem(void)
        
        addr = (unsigned long)(&__init_begin);
        for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
-               ClearPageReserved(mem_map + MAP_NR(addr));
-               set_page_count(mem_map+MAP_NR(addr), 1);
+               ClearPageReserved(virt_to_page(addr));
+               set_page_count(virt_to_page(addr), 1);
                free_page(addr);
                totalram_pages++;
        }
@@ -283,8 +283,8 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 {
        unsigned long p;
        for (p = start; p < end; p += PAGE_SIZE) {
-               ClearPageReserved(mem_map + MAP_NR(p));
-               set_page_count(mem_map+MAP_NR(p), 1);
+               ClearPageReserved(virt_to_page(p));
+               set_page_count(virt_to_page(p), 1);
                free_page(p);
                totalram_pages++;
        }
index 1294ad31ab841ca323035908f07bcccaa70d1490..2803926296e3c120912bbfe7be109acd4a524d62 100644 (file)
@@ -298,20 +298,20 @@ void __init smp4d_boot_cpus(void)
        }
 
        /* Free unneeded trap tables */
-       ClearPageReserved(mem_map + MAP_NR(trapbase_cpu1));
-       set_page_count(mem_map + MAP_NR(trapbase_cpu1), 1);
+       ClearPageReserved(virt_to_page(trapbase_cpu1));
+       set_page_count(virt_to_page(trapbase_cpu1), 1);
        free_page((unsigned long)trapbase_cpu1);
        totalram_pages++;
        num_physpages++;
 
-       ClearPageReserved(mem_map + MAP_NR(trapbase_cpu2));
-       set_page_count(mem_map + MAP_NR(trapbase_cpu2), 1);
+       ClearPageReserved(virt_to_page(trapbase_cpu2));
+       set_page_count(virt_to_page(trapbase_cpu2), 1);
        free_page((unsigned long)trapbase_cpu2);
        totalram_pages++;
        num_physpages++;
 
-       ClearPageReserved(mem_map + MAP_NR(trapbase_cpu3));
-       set_page_count(mem_map + MAP_NR(trapbase_cpu3), 1);
+       ClearPageReserved(virt_to_page(trapbase_cpu3));
+       set_page_count(virt_to_page(trapbase_cpu3), 1);
        free_page((unsigned long)trapbase_cpu3);
        totalram_pages++;
        num_physpages++;
index 60c37373ebdb19a1980250785bfa9e58ea28a854..d6c126d00c1a897ebae19bdb82dc2782c344e82f 100644 (file)
@@ -279,22 +279,22 @@ void __init smp4m_boot_cpus(void)
        
        /* Free unneeded trap tables */
        if (!(cpu_present_map & (1 << 1))) {
-               ClearPageReserved(mem_map + MAP_NR(trapbase_cpu1));
-               set_page_count(mem_map + MAP_NR(trapbase_cpu1), 1);
+               ClearPageReserved(virt_to_page(trapbase_cpu1));
+               set_page_count(virt_to_page(trapbase_cpu1), 1);
                free_page((unsigned long)trapbase_cpu1);
                totalram_pages++;
                num_physpages++;
        }
        if (!(cpu_present_map & (1 << 2))) {
-               ClearPageReserved(mem_map + MAP_NR(trapbase_cpu2));
-               set_page_count(mem_map + MAP_NR(trapbase_cpu2), 1);
+               ClearPageReserved(virt_to_page(trapbase_cpu2));
+               set_page_count(virt_to_page(trapbase_cpu2), 1);
                free_page((unsigned long)trapbase_cpu2);
                totalram_pages++;
                num_physpages++;
        }
        if (!(cpu_present_map & (1 << 3))) {
-               ClearPageReserved(mem_map + MAP_NR(trapbase_cpu3));
-               set_page_count(mem_map + MAP_NR(trapbase_cpu3), 1);
+               ClearPageReserved(virt_to_page(trapbase_cpu3));
+               set_page_count(virt_to_page(trapbase_cpu3), 1);
                free_page((unsigned long)trapbase_cpu3);
                totalram_pages++;
                num_physpages++;
index 9e599fd9dac25c7dc3fb79e84480e1c1469d7610..ad74d0ed2a5f1603fa94ac12ff63ebc7b1dafe27 100644 (file)
@@ -18,14 +18,14 @@ static inline void forget_pte(pte_t page)
        if (pte_none(page))
                return;
        if (pte_present(page)) {
-               unsigned long nr = pte_pagenr(page);
-               if (nr >= max_mapnr || PageReserved(mem_map+nr))
+               struct page *ptpage = pte_page(page);
+               if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
                        return;
                /* 
                 * free_page() used to be able to clear swap cache
                 * entries.  We may now have to do it manually.  
                 */
-               free_page_and_swap_cache(mem_map+nr);
+               free_page_and_swap_cache(ptpage);
                return;
        }
        swap_free(pte_to_swp_entry(page));
index ced31b91f3d7a9deefe4c537a06e6b53157b45c3..3d42e0c630e7d750648cdbb15f6a3c018db85360 100644 (file)
@@ -369,8 +369,8 @@ void __init free_mem_map_range(struct page *first, struct page *last)
        prom_printf("[%p,%p] ", first, last);
 #endif
        while (first < last) {
-               ClearPageReserved(mem_map + MAP_NR(first));
-               set_page_count(mem_map + MAP_NR(first), 1);
+               ClearPageReserved(virt_to_page(first));
+               set_page_count(virt_to_page(first), 1);
                free_page((unsigned long)first);
                totalram_pages++;
                num_physpages++;
@@ -542,7 +542,7 @@ void free_initmem (void)
                struct page *p;
 
                page = addr + phys_base;
-               p = mem_map + MAP_NR(page);
+               p = virt_to_page(page);
 
                ClearPageReserved(p);
                set_page_count(p, 1);
@@ -559,7 +559,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
        if (start < end)
                printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
        for (; start < end; start += PAGE_SIZE) {
-               struct page *p = mem_map + MAP_NR(start);
+               struct page *p = virt_to_page(start);
 
                ClearPageReserved(p);
                set_page_count(p, 1);
index a370ea2d46fbab0ac716c3e86ba4d55a3fe5e532..f395f06f540c49e655eeac26babba4b11ebf3e7e 100644 (file)
@@ -190,7 +190,7 @@ static void iounit_map_dma_area(unsigned long va, __u32 addr, int len)
                        pmdp = pmd_offset(pgdp, addr);
                        ptep = pte_offset(pmdp, addr);
 
-                       set_pte(ptep, pte_val(mk_pte(mem_map + MAP_NR(page), dvma_prot)));
+                       set_pte(ptep, pte_val(mk_pte(virt_to_page(page), dvma_prot)));
                        
                        i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
 
index 5fde9a52a46d74e2c2c24a077688e2348920f4ae..e48128479ca78f33f9dca671e14bdbfcd97a97d6 100644 (file)
@@ -247,7 +247,7 @@ static void iommu_map_dma_area(unsigned long va, __u32 addr, int len)
                        pmdp = pmd_offset(pgdp, addr);
                        ptep = pte_offset(pmdp, addr);
 
-                       set_pte(ptep, mk_pte(mem_map + MAP_NR(page), dvma_prot));
+                       set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
                        if (ipte_cache != 0) {
                                iopte_val(*iopte++) = MKIOPTE(__pa(page));
                        } else {
index c7f839f28f13bd4f7c45e17e0a389937ad2eb108..7e76841de03495edc81d4189c0b8a523229d0a00 100644 (file)
@@ -612,7 +612,7 @@ static void srmmu_free_task_struct(struct task_struct *tsk)
 
 static void srmmu_get_task_struct(struct task_struct *tsk)
 {
-       atomic_inc(&mem_map[MAP_NR(tsk)].count);
+       atomic_inc(&virt_to_page(tsk)->count);
 }
 
 /* tsunami.S */
@@ -2153,7 +2153,7 @@ void __init ld_mmu_srmmu(void)
        BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1);
        BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM);
 
-       BTFIXUPSET_CALL(pte_pagenr, srmmu_pte_pagenr, BTFIXUPCALL_NORM);
+       BTFIXUPSET_CALL(sparc_pte_pagenr, srmmu_pte_pagenr, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM);
 
index 55551c561fa2995598eeb0a9557b15ec08324ec6..717269b6807ee8710ccf6cc924d3a94980496cdc 100644 (file)
@@ -1320,7 +1320,7 @@ static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus
        unsigned long page;
 
        page = ((unsigned long)bufptr) & PAGE_MASK;
-       if (MAP_NR(page) > max_mapnr) {
+       if (!VALID_PAGE(virt_to_page(page))) {
                sun4c_flush_page(page);
                return (__u32)bufptr; /* already locked */
        }
@@ -2095,7 +2095,7 @@ static int sun4c_pmd_none(pmd_t pmd)              { return !pmd_val(pmd); }
 static int sun4c_pmd_bad(pmd_t pmd)
 {
        return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||
-               (MAP_NR(pmd_val(pmd)) > max_mapnr));
+               (!VALID_PAGE(virt_to_page(pmd_val(pmd)))));
 }
 
 static int sun4c_pmd_present(pmd_t pmd)
@@ -2650,7 +2650,7 @@ void __init ld_mmu_sun4c(void)
 
        BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0);
 
-       BTFIXUPSET_CALL(pte_pagenr, sun4c_pte_pagenr, BTFIXUPCALL_NORM);
+       BTFIXUPSET_CALL(sparc_pte_pagenr, sun4c_pte_pagenr, BTFIXUPCALL_NORM);
 #if PAGE_SHIFT <= 12   
        BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1));
 #else
index 63da67c8e80997fec7794c20e8a7738b47196835..cd4b3365c18173236540777f2e8b7d6ab6a4d31a 100644 (file)
@@ -110,7 +110,7 @@ static unsigned int get_user_insn(unsigned long tpc)
        if(!pte_present(pte))
                goto out;
 
-       pa  = phys_base + (pte_pagenr(pte) << PAGE_SHIFT);
+       pa  = phys_base + (sparc64_pte_pagenr(pte) << PAGE_SHIFT);
        pa += (tpc & ~PAGE_MASK);
 
        /* Use phys bypass so we don't pollute dtlb/dcache. */
index 9a1ab1de33350cda8f0b8bba101e774d8a066615..c469c60626553c7a654fdf3ad9560e7ca65b3879 100644 (file)
@@ -18,14 +18,14 @@ static inline void forget_pte(pte_t page)
        if (pte_none(page))
                return;
        if (pte_present(page)) {
-               unsigned long nr = pte_pagenr(page);
-               if (nr >= max_mapnr || PageReserved(mem_map+nr))
+               struct page *ptpage = pte_page(page);
+               if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
                        return;
                /* 
                 * free_page() used to be able to clear swap cache
                 * entries.  We may now have to do it manually.  
                 */
-               free_page_and_swap_cache(mem_map+nr);
+               free_page_and_swap_cache(ptpage);
                return;
        }
        swap_free(pte_to_swp_entry(page));
index d3c9e5036f10e2517dd5548660b35dfe3590ec44..63994c51d388ac389b319d99a8e7232fe4ec26b5 100644 (file)
@@ -1242,7 +1242,7 @@ void free_initmem (void)
                page = (addr +
                        ((unsigned long) __va(phys_base)) -
                        ((unsigned long) &empty_zero_page));
-               p = mem_map + MAP_NR(page);
+               p = virt_to_page(page);
 
                ClearPageReserved(p);
                set_page_count(p, 1);
@@ -1257,7 +1257,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
        if (start < end)
                printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
        for (; start < end; start += PAGE_SIZE) {
-               struct page *p = mem_map + MAP_NR(start);
+               struct page *p = virt_to_page(start);
 
                ClearPageReserved(p);
                set_page_count(p, 1);
index 20e1c78ccc320b593810420af04e1175ad1f7517..938ffc26eb92b9a6d96ce62035b8fbe771ab43e8 100644 (file)
@@ -326,7 +326,7 @@ static int lvm_snapshot_alloc_iobuf_pages(struct kiobuf * iobuf, int sectors)
                        if (!addr)
                                goto out;
                        iobuf->pagelist[i] = addr;
-                       page = mem_map + MAP_NR(addr);
+                       page = virt_to_page(addr);
                }
 #endif
 
index 573419a28b1e991d7a4894bf82a0cee6500820c8..a31a6acf7d954ca43c534536b2e4d6f040e7e1c4 100644 (file)
@@ -141,8 +141,8 @@ static unsigned long agp_alloc_page(void)
        if (pt == NULL) {
                return 0;
        }
-       atomic_inc(&mem_map[MAP_NR(pt)].count);
-       set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
+       atomic_inc(&virt_to_page(pt)->count);
+       set_bit(PG_locked, &virt_to_page(pt)->flags);
        atomic_inc(&agp_bridge.current_memory_agp);
        return (unsigned long) pt;
 }
@@ -154,9 +154,9 @@ static void agp_destroy_page(unsigned long page)
        if (pt == NULL) {
                return;
        }
-       atomic_dec(&mem_map[MAP_NR(pt)].count);
-       clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
-       wake_up(&mem_map[MAP_NR(pt)].wait);
+       atomic_dec(&virt_to_page(pt)->count);
+       clear_bit(PG_locked, &virt_to_page(pt)->flags);
+       wake_up(&virt_to_page(pt)->wait);
        free_page((unsigned long) pt);
        atomic_dec(&agp_bridge.current_memory_agp);
 }
@@ -541,6 +541,7 @@ static int agp_generic_create_gatt_table(void)
        int num_entries;
        int i;
        void *temp;
+       struct page *page;
 
        /* The generic routines can't handle 2 level gatt's */
        if (agp_bridge.size_type == LVL2_APER_SIZE) {
@@ -622,9 +623,8 @@ static int agp_generic_create_gatt_table(void)
        }
        table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
 
-       for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
-               set_bit(PG_reserved, &mem_map[i].flags);
-       }
+       for (page = virt_to_page(table); page < get_mem_map(table_end); page++)
+               set_bit(PG_reserved, &page->flags);
 
        agp_bridge.gatt_table_real = (unsigned long *) table;
        CACHE_FLUSH();
@@ -633,9 +633,8 @@ static int agp_generic_create_gatt_table(void)
        CACHE_FLUSH();
 
        if (agp_bridge.gatt_table == NULL) {
-               for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
-                       clear_bit(PG_reserved, &mem_map[i].flags);
-               }
+               for (page = virt_to_page(table); page < get_mem_map(table_end); page++)
+                       clear_bit(PG_reserved, &page->flags);
 
                free_pages((unsigned long) table, page_order);
 
@@ -653,10 +652,10 @@ static int agp_generic_create_gatt_table(void)
 
 static int agp_generic_free_gatt_table(void)
 {
-       int i;
        int page_order;
        char *table, *table_end;
        void *temp;
+       struct page *page;
 
        temp = agp_bridge.current_size;
 
@@ -691,9 +690,8 @@ static int agp_generic_free_gatt_table(void)
        table = (char *) agp_bridge.gatt_table_real;
        table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
 
-       for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
-               clear_bit(PG_reserved, &mem_map[i].flags);
-       }
+       for (page = virt_to_page(table); page < get_mem_map(table_end); page++)
+               clear_bit(PG_reserved, &page->flags);
 
        free_pages((unsigned long) agp_bridge.gatt_table_real, page_order);
        return 0;
@@ -1500,13 +1498,13 @@ static int amd_create_page_map(amd_page_map *page_map)
        if (page_map->real == NULL) {
                return -ENOMEM;
        }
-       set_bit(PG_reserved, &mem_map[MAP_NR(page_map->real)].flags);
+       set_bit(PG_reserved, &virt_to_page(page_map->real)->flags);
        CACHE_FLUSH();
        page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), 
                                            PAGE_SIZE);
        if (page_map->remapped == NULL) {
                clear_bit(PG_reserved, 
-                         &mem_map[MAP_NR(page_map->real)].flags);
+                         &virt_to_page(page_map->real)->flags);
                free_page((unsigned long) page_map->real);
                page_map->real = NULL;
                return -ENOMEM;
@@ -1524,7 +1522,7 @@ static void amd_free_page_map(amd_page_map *page_map)
 {
        iounmap(page_map->remapped);
        clear_bit(PG_reserved, 
-                 &mem_map[MAP_NR(page_map->real)].flags);
+                 &virt_to_page(page_map->real)->flags);
        free_page((unsigned long) page_map->real);
 }
 
index 74d4188a2860d058059ca6e6923480af808a059b..6c88a757e702d8c98fbf95a0d8b56e0b0e4e2036 100644 (file)
@@ -191,7 +191,7 @@ static void * rvmalloc(signed long size)
                while (size > 0) 
                 {
                        page = kvirt_to_pa(adr);
-                       mem_map_reserve(MAP_NR(__va(page)));
+                       mem_map_reserve(virt_to_page(__va(page)));
                        adr+=PAGE_SIZE;
                        size-=PAGE_SIZE;
                }
@@ -209,7 +209,7 @@ static void rvfree(void * mem, signed long size)
                while (size > 0) 
                 {
                        page = kvirt_to_pa(adr);
-                       mem_map_unreserve(MAP_NR(__va(page)));
+                       mem_map_unreserve(virt_to_page(__va(page)));
                        adr+=PAGE_SIZE;
                        size-=PAGE_SIZE;
                }
index 299a4f8ca1dc54f0f197e3979abe59e1e2d800fd..ca3cb4f47449e5922895a98ef119f1aea2410919 100644 (file)
@@ -199,7 +199,7 @@ static int v4l_fbuffer_alloc(struct zoran *zr)
                        zr->v4l_gbuf[i].fbuffer_phys = virt_to_phys(mem);
                        zr->v4l_gbuf[i].fbuffer_bus = virt_to_bus(mem);
                        for (off = 0; off < v4l_bufsize; off += PAGE_SIZE)
-                               mem_map_reserve(MAP_NR(mem + off));
+                               mem_map_reserve(virt_to_page(mem + off));
                        DEBUG(printk(BUZ_INFO ": V4L frame %d mem 0x%x (bus: 0x%x=%d)\n", i, mem, virt_to_bus(mem), virt_to_bus(mem)));
                } else {
                        return -ENOBUFS;
@@ -221,7 +221,7 @@ static void v4l_fbuffer_free(struct zoran *zr)
 
                mem = zr->v4l_gbuf[i].fbuffer;
                for (off = 0; off < v4l_bufsize; off += PAGE_SIZE)
-                       mem_map_unreserve(MAP_NR(mem + off));
+                       mem_map_unreserve(virt_to_page(mem + off));
                kfree((void *) zr->v4l_gbuf[i].fbuffer);
                zr->v4l_gbuf[i].fbuffer = NULL;
        }
@@ -286,7 +286,7 @@ static int jpg_fbuffer_alloc(struct zoran *zr)
                        zr->jpg_gbuf[i].frag_tab[0] = virt_to_bus((void *) mem);
                        zr->jpg_gbuf[i].frag_tab[1] = ((zr->jpg_bufsize / 4) << 1) | 1;
                        for (off = 0; off < zr->jpg_bufsize; off += PAGE_SIZE)
-                               mem_map_reserve(MAP_NR(mem + off));
+                               mem_map_reserve(virt_to_page(mem + off));
                } else {
                        /* jpg_bufsize is alreay page aligned */
                        for (j = 0; j < zr->jpg_bufsize / PAGE_SIZE; j++) {
@@ -297,7 +297,7 @@ static int jpg_fbuffer_alloc(struct zoran *zr)
                                }
                                zr->jpg_gbuf[i].frag_tab[2 * j] = virt_to_bus((void *) mem);
                                zr->jpg_gbuf[i].frag_tab[2 * j + 1] = (PAGE_SIZE / 4) << 1;
-                               mem_map_reserve(MAP_NR(mem));
+                               mem_map_reserve(virt_to_page(mem));
                        }
 
                        zr->jpg_gbuf[i].frag_tab[2 * j - 1] |= 1;
@@ -329,7 +329,7 @@ static void jpg_fbuffer_free(struct zoran *zr)
                        if (zr->jpg_gbuf[i].frag_tab[0]) {
                                mem = (unsigned char *) bus_to_virt(zr->jpg_gbuf[i].frag_tab[0]);
                                for (off = 0; off < zr->jpg_bufsize; off += PAGE_SIZE)
-                                       mem_map_unreserve(MAP_NR(mem + off));
+                                       mem_map_unreserve(virt_to_page(mem + off));
                                kfree((void *) mem);
                                zr->jpg_gbuf[i].frag_tab[0] = 0;
                                zr->jpg_gbuf[i].frag_tab[1] = 0;
@@ -338,7 +338,7 @@ static void jpg_fbuffer_free(struct zoran *zr)
                        for (j = 0; j < zr->jpg_bufsize / PAGE_SIZE; j++) {
                                if (!zr->jpg_gbuf[i].frag_tab[2 * j])
                                        break;
-                               mem_map_unreserve(MAP_NR(bus_to_virt(zr->jpg_gbuf[i].frag_tab[2 * j])));
+                               mem_map_unreserve(virt_to_page(bus_to_virt(zr->jpg_gbuf[i].frag_tab[2 * j])));
                                free_page((unsigned long) bus_to_virt(zr->jpg_gbuf[i].frag_tab[2 * j]));
                                zr->jpg_gbuf[i].frag_tab[2 * j] = 0;
                                zr->jpg_gbuf[i].frag_tab[2 * j + 1] = 0;
index c9ec988d1c3d364c3d5427e45ca90b333c23fdd5..d7d007f01aa48da4dce6eee6f85dbc4c7b5e6c03 100644 (file)
@@ -239,7 +239,7 @@ static void *rvmalloc(unsigned long size)
        adr = (unsigned long) mem;
        while (size > 0) {
                page = kvirt_to_pa(adr);
-               mem_map_reserve(MAP_NR(__va(page)));
+               mem_map_reserve(virt_to_page(__va(page)));
                adr += PAGE_SIZE;
                if (size > PAGE_SIZE)
                        size -= PAGE_SIZE;
@@ -263,7 +263,7 @@ static void rvfree(void *mem, unsigned long size)
        adr = (unsigned long) mem;
        while (size > 0) {
                page = kvirt_to_pa(adr);
-               mem_map_unreserve(MAP_NR(__va(page)));
+               mem_map_unreserve(virt_to_page(__va(page)));
                adr += PAGE_SIZE;
                if (size > PAGE_SIZE)
                        size -= PAGE_SIZE;
index f3d9db43ee0edc0be5b9cf9ae54f17bc9388578d..19b7bd9289ef2fc382806f3d10c04d18512f6530 100644 (file)
@@ -282,8 +282,8 @@ static unsigned long i810_alloc_page(drm_device_t *dev)
        if(address == 0UL) 
                return 0;
        
-       atomic_inc(&mem_map[MAP_NR((void *) address)].count);
-       set_bit(PG_locked, &mem_map[MAP_NR((void *) address)].flags);
+       atomic_inc(&virt_to_page(address)->count);
+       set_bit(PG_locked, &virt_to_page(address)->flags);
    
        return address;
 }
@@ -293,9 +293,9 @@ static void i810_free_page(drm_device_t *dev, unsigned long page)
        if(page == 0UL) 
                return;
        
-       atomic_dec(&mem_map[MAP_NR((void *) page)].count);
-       clear_bit(PG_locked, &mem_map[MAP_NR((void *) page)].flags);
-       wake_up(&mem_map[MAP_NR((void *) page)].wait);
+       atomic_dec(&virt_to_page(page)->count);
+       clear_bit(PG_locked, &virt_to_page(page)->flags);
+       wake_up(&virt_to_page(page)->wait);
        free_page(page);
        return;
 }
index 5023de808920eb6f72adfe56949a0b52bd10990a..43e46f1d63fd388d459b5a350cb2ef394c8f68ea 100644 (file)
@@ -246,7 +246,7 @@ unsigned long drm_alloc_pages(int order, int area)
        for (addr = address, sz = bytes;
             sz > 0;
             addr += PAGE_SIZE, sz -= PAGE_SIZE) {
-               mem_map_reserve(MAP_NR(addr));
+               mem_map_reserve(virt_to_page(addr));
        }
        
        return address;
@@ -267,7 +267,7 @@ void drm_free_pages(unsigned long address, int order, int area)
                for (addr = address, sz = bytes;
                     sz > 0;
                     addr += PAGE_SIZE, sz -= PAGE_SIZE) {
-                       mem_map_unreserve(MAP_NR(addr));
+                       mem_map_unreserve(virt_to_page(addr));
                }
                free_pages(address, order);
        }
index 28e8811c86949a3df5098e67ef2c1ed333b4a873..d02c3b58163e68620b1ae17f2dc6f49b33e5b30c 100644 (file)
@@ -57,8 +57,8 @@ static unsigned long mga_alloc_page(drm_device_t *dev)
        if(address == 0UL) {
                return 0;
        }
-       atomic_inc(&mem_map[MAP_NR((void *) address)].count);
-       set_bit(PG_locked, &mem_map[MAP_NR((void *) address)].flags);
+       atomic_inc(&virt_to_page(address)->count);
+       set_bit(PG_locked, &virt_to_page(address)->flags);
    
        return address;
 }
@@ -70,9 +70,9 @@ static void mga_free_page(drm_device_t *dev, unsigned long page)
        if(page == 0UL) {
                return;
        }
-       atomic_dec(&mem_map[MAP_NR((void *) page)].count);
-       clear_bit(PG_locked, &mem_map[MAP_NR((void *) page)].flags);
-       wake_up(&mem_map[MAP_NR((void *) page)].wait);
+       atomic_dec(&virt_to_page(page)->count);
+       clear_bit(PG_locked, &virt_to_page(page)->flags);
+       wake_up(&virt_to_page(page)->wait);
        free_page(page);
        return;
 }
index 5ee9e3242f8e04fa8e92debaab11ccc67073d463..1386bd172eb0ca8e7f8489e70fcff5785bd1d0f1 100644 (file)
@@ -89,13 +89,13 @@ struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
        offset   = address - vma->vm_start;
        page     = offset >> PAGE_SHIFT;
        physical = (unsigned long)dev->lock.hw_lock + offset;
-       atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */
+       atomic_inc(&virt_to_page(physical)->count);     /* Dec. by kernel */
 
        DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
 #if LINUX_VERSION_CODE < 0x020317
        return physical;
 #else
-       return mem_map + MAP_NR(physical);
+       return (virt_to_page(physical));
 #endif
 }
 
@@ -124,13 +124,13 @@ struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
        offset   = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
        page     = offset >> PAGE_SHIFT;
        physical = dma->pagelist[page] + (offset & (~PAGE_MASK));
-       atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */
+       atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
 
        DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
 #if LINUX_VERSION_CODE < 0x020317
        return physical;
 #else
-       return mem_map + MAP_NR(physical);
+       return (virt_to_page(physical));
 #endif
 }
 
index d7d31dbbbcd99c2112f1ec6032a9379cb08a99c7..bf3addcf4ef5b67780bd8113536301abb0fc1cf1 100644 (file)
@@ -48,11 +48,10 @@ static inline void *dmaalloc(size_t size)
        }
        addr = __get_dma_pages(GFP_KERNEL, get_order(size));
        if (addr) {
-               int i;
+               struct page *page;
 
-               for (i = MAP_NR(addr); i < MAP_NR(addr+size); i++) {
-                       mem_map_reserve(i);
-               }
+               for (page = virt_to_page(addr); page < get_mem_map(addr+size); page++)
+                       mem_map_reserve(page);
        }
        return (void *)addr;
 }
@@ -60,12 +59,11 @@ static inline void *dmaalloc(size_t size)
 static inline void dmafree(void *addr, size_t size)
 {
        if (size > 0) {
-               int i;
+               struct page *page;
 
-               for (i = MAP_NR((unsigned long)addr);
-                    i < MAP_NR((unsigned long)addr+size); i++) {
-                       mem_map_unreserve (i);
-               }
+               for (page = virt_to_page((unsigned long)addr);
+                    page < virt_to_page((unsigned long)addr+size); page++)
+                       mem_map_unreserve(page);
                free_pages((unsigned long) addr, get_order(size));
        }
 }
index fe5f905e9ab9990dd22d2e8937be7e6e80de0393..94707619d3950ae93bc95bf3f375d5fcb3398034 100644 (file)
@@ -136,13 +136,12 @@ static int grabbuf_alloc(struct planb *pb)
                                                                |GFP_DMA, 0);
                if (!pb->rawbuf[i])
                        break;
-               set_bit(PG_reserved, &mem_map[MAP_NR(pb->rawbuf[i])].flags);
+               mem_map_reserve(virt_to_page(pb->rawbuf[i]));
        }
        if (i-- < npage) {
                printk(KERN_DEBUG "PlanB: init_grab: grab buffer not allocated\n");
                for (; i > 0; i--) {
-                       clear_bit(PG_reserved,
-                               &mem_map[MAP_NR(pb->rawbuf[i])].flags);
+                       mem_map_unreserve(virt_to_page(pb->rawbuf[i]));
                        free_pages((unsigned long)pb->rawbuf[i], 0);
                }
                kfree(pb->rawbuf);
@@ -435,8 +434,7 @@ static void planb_prepare_close(struct planb *pb)
        }
        if(pb->rawbuf) {
                for (i = 0; i < pb->rawbuf_size; i++) {
-                       clear_bit(PG_reserved,
-                               &mem_map[MAP_NR(pb->rawbuf[i])].flags);
+                       mem_map_unreserve(virt_to_page(pb->rawbuf[i]));
                        free_pages((unsigned long)pb->rawbuf[i], 0);
                }
                kfree(pb->rawbuf);
index 082cfee061e663aa27b7ba1142c08598a14cec8d..b4c6078d377750165b3c4bc5cb9958ac107dd5c1 100644 (file)
@@ -50,7 +50,7 @@ void* bmalloc(unsigned long size)
        if (mem) {
                unsigned long adr = (unsigned long)mem;
                while (size > 0) {
-                       mem_map_reserve(MAP_NR(phys_to_virt(adr)));
+                       mem_map_reserve(virt_to_page(phys_to_virt(adr)));
                        adr += PAGE_SIZE;
                        size -= PAGE_SIZE;
                }
@@ -64,7 +64,7 @@ void bfree(void* mem, unsigned long size)
                unsigned long adr = (unsigned long)mem;
                unsigned long siz = size;
                while (siz > 0) {
-                       mem_map_unreserve(MAP_NR(phys_to_virt(adr)));
+                       mem_map_unreserve(virt_to_page(phys_to_virt(adr)));
                        adr += PAGE_SIZE;
                        siz -= PAGE_SIZE;
                }
index b42ce0b460c3cf035219a556f25033c42abd5e46..f43d8280316499eb583eaeef322481ecab46cafb 100644 (file)
@@ -221,7 +221,7 @@ static void * rvmalloc(unsigned long size)
                while (size > 0) 
                 {
                        page = kvirt_to_pa(adr);
-                       mem_map_reserve(MAP_NR(__va(page)));
+                       mem_map_reserve(virt_to_page(__va(page)));
                        adr+=PAGE_SIZE;
                        size-=PAGE_SIZE;
                }
@@ -239,7 +239,7 @@ static void rvfree(void * mem, unsigned long size)
                while (size > 0) 
                 {
                        page = kvirt_to_pa(adr);
-                       mem_map_unreserve(MAP_NR(__va(page)));
+                       mem_map_unreserve(virt_to_page(__va(page)));
                        adr+=PAGE_SIZE;
                        size-=PAGE_SIZE;
                }
index e977210443f1f46ff68e0cfbe3463a10375bbaad..6fcb896ac4b09818b706d73323eb86e8d1c25c1e 100644 (file)
 #include <linux/malloc.h>
 #include <linux/soundcard.h>
 #include <linux/pci.h>
+#include <linux/wrapper.h>
 #include <asm/io.h>
 #include <asm/dma.h>
 #include <linux/init.h>
@@ -591,13 +592,13 @@ static void start_adc(struct cm_state *s)
 
 static void dealloc_dmabuf(struct dmabuf *db)
 {
-       unsigned long map, mapend;
+       struct page *pstart, *pend;
 
        if (db->rawbuf) {
                /* undo marking the pages as reserved */
-               mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
-               for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
-                       clear_bit(PG_reserved, &mem_map[map].flags);    
+               pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+               for (pstart = virt_to_page(db->rawbuf); pstart <= pend; pstart++)
+                       mem_map_unreserve(pstart);
                free_pages((unsigned long)db->rawbuf, db->buforder);
        }
        db->rawbuf = NULL;
@@ -614,7 +615,7 @@ static int prog_dmabuf(struct cm_state *s, unsigned rec)
        int order;
        unsigned bytepersec;
        unsigned bufs;
-       unsigned long map, mapend;
+       struct page *pstart, *pend;
        unsigned char fmt;
        unsigned long flags;
 
@@ -646,9 +647,9 @@ static int prog_dmabuf(struct cm_state *s, unsigned rec)
                        printk(KERN_DEBUG "cmpci: DMA buffer beyond 16MB: busaddr 0x%lx  size %ld\n", 
                               virt_to_bus(db->rawbuf), PAGE_SIZE << db->buforder);
                /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
-               mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
-               for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
-                       set_bit(PG_reserved, &mem_map[map].flags);
+               pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+               for (pstart = virt_to_page(db->rawbuf); pstart <= pend; pstart++)
+                       mem_map_reserve(pstart);
        }
        bytepersec = rate << sample_shift[fmt];
        bufs = PAGE_SIZE << db->buforder;
index 07d416a27ac1406041ba87093c4d4ccdf41b6309..c11c1fe73b5786f0fbd800ce38e0b665d4e64a49 100644 (file)
@@ -56,8 +56,9 @@ static long dmabuf_timeout(struct dma_buffparms *dmap)
 static int sound_alloc_dmap(struct dma_buffparms *dmap)
 {
        char *start_addr, *end_addr;
-       int i, dma_pagesize;
+       int dma_pagesize;
        int sz, size;
+       struct page *page;
 
        dmap->mapping_flags &= ~DMA_MAP_MAPPED;
 
@@ -113,14 +114,15 @@ static int sound_alloc_dmap(struct dma_buffparms *dmap)
        dmap->raw_buf = start_addr;
        dmap->raw_buf_phys = virt_to_bus(start_addr);
 
-       for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++)
-               set_bit(PG_reserved, &mem_map[i].flags);;
+       for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++)
+               mem_map_reserve(page);
        return 0;
 }
 
 static void sound_free_dmap(struct dma_buffparms *dmap)
 {
-       int sz, size, i;
+       int sz, size;
+       struct page *page;
        unsigned long start_addr, end_addr;
 
        if (dmap->raw_buf == NULL)
@@ -132,8 +134,8 @@ static void sound_free_dmap(struct dma_buffparms *dmap)
        start_addr = (unsigned long) dmap->raw_buf;
        end_addr = start_addr + dmap->buffsize;
 
-       for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++)
-               clear_bit(PG_reserved, &mem_map[i].flags);;
+       for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++)
+               mem_map_unreserve(page);
 
        free_pages((unsigned long) dmap->raw_buf, sz);
        dmap->raw_buf = NULL;
index 9e2ee24d1556187b8f01462922cabd1ca2d95c74..4d81c1a464536564716382cb523cc119a947d84b 100644 (file)
@@ -38,6 +38,7 @@
 #include "audio.h"
 #include <linux/sched.h>
 #include <linux/smp_lock.h>
+#include <linux/wrapper.h>
 
 static void calculate_ofrag(struct woinst *);
 static void calculate_ifrag(struct wiinst *);
@@ -918,7 +919,7 @@ static int emu10k1_audio_mmap(struct file *file, struct vm_area_struct *vma)
 
                        /* Now mark the pages as reserved, otherwise remap_page_range doesn't do what we want */
                        for (i = 0; i < wave_out->wavexferbuf->numpages; i++)
-                               set_bit(PG_reserved, &mem_map[MAP_NR(wave_out->pagetable[i])].flags);
+                               mem_map_reserve(virt_to_page(wave_out->pagetable[i]));
                }
 
                size = vma->vm_end - vma->vm_start;
@@ -1137,7 +1138,7 @@ static int emu10k1_audio_release(struct inode *inode, struct file *file)
 
                                /* Undo marking the pages as reserved */
                                for (i = 0; i < woinst->wave_out->wavexferbuf->numpages; i++)
-                                       set_bit(PG_reserved, &mem_map[MAP_NR(woinst->wave_out->pagetable[i])].flags);
+                                       mem_map_reserve(virt_to_page(woinst->wave_out->pagetable[i]));
                        }
 
                        woinst->mapped = 0;
index e6c5945cb3e0e792a9daa07727728ba750820860..a5bb668f2c90e9576d1f72bcdb78bda0cb6dbf41 100644 (file)
 #include <linux/soundcard.h>
 #include <linux/pci.h>
 #include <linux/smp_lock.h>
+#include <linux/wrapper.h>
 #include <asm/io.h>
 #include <asm/dma.h>
 #include <linux/init.h>
@@ -540,13 +541,13 @@ static void start_adc(struct es1370_state *s)
 
 extern inline void dealloc_dmabuf(struct es1370_state *s, struct dmabuf *db)
 {
-       unsigned long map, mapend;
+       struct page *page, *pend;
 
        if (db->rawbuf) {
                /* undo marking the pages as reserved */
-               mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
-               for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
-                       clear_bit(PG_reserved, &mem_map[map].flags);    
+               pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+               for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+                       mem_map_unreserve(page);
                pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
        }
        db->rawbuf = NULL;
@@ -558,7 +559,7 @@ static int prog_dmabuf(struct es1370_state *s, struct dmabuf *db, unsigned rate,
        int order;
        unsigned bytepersec;
        unsigned bufs;
-       unsigned long map, mapend;
+       struct page *page, *pend;
 
        db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
        if (!db->rawbuf) {
@@ -570,9 +571,9 @@ static int prog_dmabuf(struct es1370_state *s, struct dmabuf *db, unsigned rate,
                        return -ENOMEM;
                db->buforder = order;
                /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
-               mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
-               for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
-                       set_bit(PG_reserved, &mem_map[map].flags);
+               pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+               for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+                       mem_map_reserve(page);
        }
        fmt &= ES1370_FMT_MASK;
        bytepersec = rate << sample_shift[fmt];
index 5a2cc95740b0b94eb910668bd496cd38afc95e73..df362e8bb7573d108a77356037749809da59fb58 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/smp_lock.h>
 #include <linux/ac97_codec.h>
+#include <linux/wrapper.h>
 #include <asm/io.h>
 #include <asm/dma.h>
 #include <asm/uaccess.h>
@@ -872,13 +873,13 @@ static void start_adc(struct es1371_state *s)
 
 extern inline void dealloc_dmabuf(struct es1371_state *s, struct dmabuf *db)
 {
-       unsigned long map, mapend;
+       struct page *page, *pend;
 
        if (db->rawbuf) {
                /* undo marking the pages as reserved */
-               mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
-               for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
-                       clear_bit(PG_reserved, &mem_map[map].flags);
+               pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+               for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+                       mem_map_unreserve(page);
                pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
        }
        db->rawbuf = NULL;
@@ -890,7 +891,7 @@ static int prog_dmabuf(struct es1371_state *s, struct dmabuf *db, unsigned rate,
        int order;
        unsigned bytepersec;
        unsigned bufs;
-       unsigned long map, mapend;
+       struct page *page, *pend;
 
        db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
        if (!db->rawbuf) {
@@ -902,9 +903,9 @@ static int prog_dmabuf(struct es1371_state *s, struct dmabuf *db, unsigned rate,
                        return -ENOMEM;
                db->buforder = order;
                /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
-               mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
-               for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
-                       set_bit(PG_reserved, &mem_map[map].flags);
+               pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+               for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+                       mem_map_reserve(page);
        }
        fmt &= ES1371_FMT_MASK;
        bytepersec = rate << sample_shift[fmt];
index 6a1623cf073b2c126d822374e9cda1255a0ca87a..4c3bdaff83e38abaf994485ee93822c95055ab67 100644 (file)
@@ -91,6 +91,7 @@
 #include <linux/poll.h>
 #include <linux/spinlock.h>
 #include <linux/smp_lock.h>
+#include <linux/wrapper.h>
 #include <asm/uaccess.h>
 #include <asm/hardirq.h>
 
@@ -403,13 +404,13 @@ static void start_adc(struct solo1_state *s)
 
 extern inline void dealloc_dmabuf(struct solo1_state *s, struct dmabuf *db)
 {
-       unsigned long map, mapend;
+       struct page *page, *pend;
 
        if (db->rawbuf) {
                /* undo marking the pages as reserved */
-               mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
-               for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
-                       clear_bit(PG_reserved, &mem_map[map].flags);    
+               pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+               for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+                       mem_map_unreserve(page);
                pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
        }
        db->rawbuf = NULL;
@@ -421,7 +422,7 @@ static int prog_dmabuf(struct solo1_state *s, struct dmabuf *db)
        int order;
        unsigned bytespersec;
        unsigned bufs, sample_shift = 0;
-       unsigned long map, mapend;
+       struct page *page, *pend;
 
        db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
        if (!db->rawbuf) {
@@ -433,9 +434,9 @@ static int prog_dmabuf(struct solo1_state *s, struct dmabuf *db)
                        return -ENOMEM;
                db->buforder = order;
                /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
-               mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
-               for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
-                       set_bit(PG_reserved, &mem_map[map].flags);
+               pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+               for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+                       mem_map_reserve(page);
        }
        if (s->fmt & (AFMT_S16_LE | AFMT_U16_LE))
                sample_shift++;
index 792f670af2b68f66b58875376abbfbabc57d8dc9..27a1c243b3007a7e112033f9a9e568949355e1c6 100644 (file)
@@ -78,6 +78,7 @@
 #include <linux/spinlock.h>
 #include <linux/smp_lock.h>
 #include <linux/ac97_codec.h>
+#include <linux/wrapper.h>
 #include <asm/uaccess.h>
 #include <asm/hardirq.h>
 
@@ -628,7 +629,7 @@ static int alloc_dmabuf(struct i810_state *state)
        struct dmabuf *dmabuf = &state->dmabuf;
        void *rawbuf;
        int order;
-       unsigned long map, mapend;
+       struct page *page, *pend;
 
        /* alloc as big a chunk as we can, FIXME: is this necessary ?? */
        for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
@@ -649,9 +650,9 @@ static int alloc_dmabuf(struct i810_state *state)
        dmabuf->buforder = order;
        
        /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
-       mapend = MAP_NR(rawbuf + (PAGE_SIZE << order) - 1);
-       for (map = MAP_NR(rawbuf); map <= mapend; map++)
-               set_bit(PG_reserved, &mem_map[map].flags);
+       pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
+       for (page = virt_to_page(rawbuf); page <= pend; page++)
+               mem_map_reserve(page);
 
        return 0;
 }
@@ -660,13 +661,13 @@ static int alloc_dmabuf(struct i810_state *state)
 static void dealloc_dmabuf(struct i810_state *state)
 {
        struct dmabuf *dmabuf = &state->dmabuf;
-       unsigned long map, mapend;
+       struct page *page, *pend;
 
        if (dmabuf->rawbuf) {
                /* undo marking the pages as reserved */
-               mapend = MAP_NR(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
-               for (map = MAP_NR(dmabuf->rawbuf); map <= mapend; map++)
-                       clear_bit(PG_reserved, &mem_map[map].flags);
+               pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
+               for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++)
+                       mem_map_unreserve(page);
                pci_free_consistent(state->card->pci_dev, PAGE_SIZE << dmabuf->buforder,
                                    dmabuf->rawbuf, dmabuf->dma_handle);
        }
index 429f0055f80fd5a8f52791c2443723af0c4afac4..6c664aea42a98d17a26f17be6f60a005dcbfccc7 100644 (file)
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/smp_lock.h>
+#include <linux/wrapper.h>
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
 
@@ -2819,7 +2820,7 @@ allocate_buffers(struct ess_state *s)
 {
        void *rawbuf=NULL;
        int order,i;
-       unsigned long mapend,map;
+       struct page *page, *pend;
 
        /* alloc as big a chunk as we can */
        for (order = (dsps_order + (16-PAGE_SHIFT) + 1); order >= (dsps_order + 2 + 1); order--)
@@ -2865,17 +2866,16 @@ allocate_buffers(struct ess_state *s)
        }
 
        /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
-       mapend = MAP_NR(rawbuf + (PAGE_SIZE << order) - 1);
-       for (map = MAP_NR(rawbuf); map <= mapend; map++) {
-               set_bit(PG_reserved, &mem_map[map].flags);
-       }
+       pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
+       for (page = virt_to_page(rawbuf); page <= pend; page++)
+               mem_map_reserve(page);
 
        return 0;
 } 
 static void
 free_buffers(struct ess_state *s)
 {
-       unsigned long map, mapend;
+       struct page *page, *pend;
 
        s->dma_dac.rawbuf = s->dma_adc.rawbuf = NULL;
        s->dma_dac.mapped = s->dma_adc.mapped = 0;
@@ -2884,9 +2884,9 @@ free_buffers(struct ess_state *s)
        M_printk("maestro: freeing %p\n",s->card->dmapages);
        /* undo marking the pages as reserved */
 
-       mapend = MAP_NR(s->card->dmapages + (PAGE_SIZE << s->card->dmaorder) - 1);
-       for (map = MAP_NR(s->card->dmapages); map <= mapend; map++)
-               clear_bit(PG_reserved, &mem_map[map].flags);    
+       pend = virt_to_page(s->card->dmapages + (PAGE_SIZE << s->card->dmaorder) - 1);
+       for (page = virt_to_page(s->card->dmapages); page <= pend; page++)
+               mem_map_unreserve(page);
 
        free_pages((unsigned long)s->card->dmapages,s->card->dmaorder);
        s->card->dmapages = NULL;
index 91c7d23066f1915813267b8033f1e03091ba9686..59d805c7be4d115b57567562898e6db83181c433 100644 (file)
@@ -33,6 +33,7 @@
  *
  ********************************************************************/
 
+#include <linux/kernel.h>
 #include <linux/config.h>
 #include <linux/version.h>
 #include <linux/module.h>
index 6b51468b3ea9c275be62bd6014e2ee44e68ae7bd..526ba929a57107325b15fc6107b0563bc339c137 100644 (file)
 #include <linux/poll.h>
 #include <linux/spinlock.h>
 #include <linux/smp_lock.h>
+#include <linux/wrapper.h>
 #include <asm/uaccess.h>
 #include <asm/hardirq.h>
 
@@ -692,13 +693,13 @@ static void start_adc(struct sv_state *s)
 
 static void dealloc_dmabuf(struct sv_state *s, struct dmabuf *db)
 {
-       unsigned long map, mapend;
+       struct page *page, *pend;
 
        if (db->rawbuf) {
                /* undo marking the pages as reserved */
-               mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
-               for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
-                       clear_bit(PG_reserved, &mem_map[map].flags);    
+               pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+               for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+                       mem_map_unreserve(page);
                pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
        }
        db->rawbuf = NULL;
@@ -715,7 +716,7 @@ static int prog_dmabuf(struct sv_state *s, unsigned rec)
        int order;
        unsigned bytepersec;
        unsigned bufs;
-       unsigned long map, mapend;
+       struct page *page, *pend;
        unsigned char fmt;
        unsigned long flags;
 
@@ -747,9 +748,9 @@ static int prog_dmabuf(struct sv_state *s, unsigned rec)
                        printk(KERN_DEBUG "sv: DMA buffer beyond 16MB: busaddr 0x%lx  size %ld\n", 
                               virt_to_bus(db->rawbuf), PAGE_SIZE << db->buforder);
                /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
-               mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
-               for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
-                       set_bit(PG_reserved, &mem_map[map].flags);
+               pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+               for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+                       mem_map_reserve(page);
        }
        bytepersec = rate << sample_shift[fmt];
        bufs = PAGE_SIZE << db->buforder;
index b9ac2879e342e0a6bf1cfebd1fba1b30ae8893b0..e953aac6f14d1a659c1cfda6097ed12c64de0caf 100644 (file)
@@ -810,8 +810,9 @@ static      void sscape_write_host_ctrl2(sscape_info *devc, int a, int b)
 static int sscape_alloc_dma(sscape_info *devc)
 {
        char *start_addr, *end_addr;
-       int i, dma_pagesize;
+       int dma_pagesize;
        int sz, size;
+       struct page *page;
 
        if (devc->raw_buf != NULL) return 0;    /* Already done */
        dma_pagesize = (devc->dma < 4) ? (64 * 1024) : (128 * 1024);
@@ -848,23 +849,24 @@ static int sscape_alloc_dma(sscape_info *devc)
        devc->raw_buf = start_addr;
        devc->raw_buf_phys = virt_to_bus(start_addr);
 
-       for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++)
-               set_bit(PG_reserved, &mem_map[i].flags);;
+       for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++)
+               mem_map_reserve(page);
        return 1;
 }
 
 static void sscape_free_dma(sscape_info *devc)
 {
-       int sz, size, i;
+       int sz, size;
        unsigned long start_addr, end_addr;
+       struct page *page;
 
        if (devc->raw_buf == NULL) return;
        for (sz = 0, size = PAGE_SIZE; size < devc->buffsize; sz++, size <<= 1);
        start_addr = (unsigned long) devc->raw_buf;
        end_addr = start_addr + devc->buffsize;
 
-       for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++)
-               clear_bit(PG_reserved, &mem_map[i].flags);;
+       for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++)
+               mem_map_unreserve(page);
 
        free_pages((unsigned long) devc->raw_buf, sz);
        devc->raw_buf = NULL;
index f19cb17400aa90d68a8fac4087e392455da94427..27a29e42d4ed97b17af263d9da1dc3023b3a3524 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/smp_lock.h>
 #include <linux/ac97_codec.h>
+#include <linux/wrapper.h>
 #include <asm/uaccess.h>
 #include <asm/hardirq.h>
 #include <linux/bitops.h>
@@ -925,7 +926,7 @@ static int alloc_dmabuf(struct trident_state *state)
        struct dmabuf *dmabuf = &state->dmabuf;
        void *rawbuf;
        int order;
-       unsigned long map, mapend;
+       struct page *page, *pend;
 
        /* alloc as big a chunk as we can, FIXME: is this necessary ?? */
        for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
@@ -946,9 +947,9 @@ static int alloc_dmabuf(struct trident_state *state)
        dmabuf->buforder = order;
        
        /* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
-       mapend = MAP_NR(rawbuf + (PAGE_SIZE << order) - 1);
-       for (map = MAP_NR(rawbuf); map <= mapend; map++)
-               set_bit(PG_reserved, &mem_map[map].flags);
+       pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
+       for (page = virt_to_page(rawbuf); page <= pend; page++)
+               mem_map_reserve(page);
 
        return 0;
 }
@@ -957,13 +958,13 @@ static int alloc_dmabuf(struct trident_state *state)
 static void dealloc_dmabuf(struct trident_state *state)
 {
        struct dmabuf *dmabuf = &state->dmabuf;
-       unsigned long map, mapend;
+       struct page *page, *pend;
 
        if (dmabuf->rawbuf) {
                /* undo marking the pages as reserved */
-               mapend = MAP_NR(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
-               for (map = MAP_NR(dmabuf->rawbuf); map <= mapend; map++)
-                       clear_bit(PG_reserved, &mem_map[map].flags);
+               pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
+               for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++)
+                       mem_map_unreserve(page);
                pci_free_consistent(state->card->pci_dev, PAGE_SIZE << dmabuf->buforder,
                                    dmabuf->rawbuf, dmabuf->dma_handle);
        }
index 5d6def5627fe3a457f27e1c0d831b036b3ba890f..3efd73183690f0858ef39c4bd26cb58626d593d4 100644 (file)
@@ -422,7 +422,7 @@ static void dmabuf_release(struct dmabuf *db)
        for(nr = 0; nr < NRSGBUF; nr++) {
                if (!(p = db->sgbuf[nr]))
                        continue;
-               mem_map_unreserve(MAP_NR(p));
+               mem_map_unreserve(virt_to_page(p));
                free_page((unsigned long)p);
                db->sgbuf[nr] = NULL;
        }
@@ -464,7 +464,7 @@ static int dmabuf_init(struct dmabuf *db)
                        if (!p)
                                return -ENOMEM;
                        db->sgbuf[nr] = p;
-                       mem_map_reserve(MAP_NR(p));
+                       mem_map_reserve(virt_to_page(p));
                }
                memset(db->sgbuf[nr], AFMT_ISUNSIGNED(db->format) ? 0x80 : 0, PAGE_SIZE);
                if ((nr << PAGE_SHIFT) >= db->dmasize)
index 86360f68796aa179f3f0d98680ae6c2824fcf83f..806c9834ddc20c45d3f827ac8054867bdb153264 100644 (file)
@@ -268,7 +268,7 @@ static void *rvmalloc(unsigned long size)
        adr = (unsigned long) mem;
        while (size > 0) {
                page = kvirt_to_pa(adr);
-               mem_map_reserve(MAP_NR(__va(page)));
+               mem_map_reserve(virt_to_page(__va(page)));
                adr += PAGE_SIZE;
                if (size > PAGE_SIZE)
                        size -= PAGE_SIZE;
@@ -292,7 +292,7 @@ static void rvfree(void *mem, unsigned long size)
        adr=(unsigned long) mem;
        while (size > 0) {
                page = kvirt_to_pa(adr);
-               mem_map_unreserve(MAP_NR(__va(page)));
+               mem_map_unreserve(virt_to_page(__va(page)));
                adr += PAGE_SIZE;
                if (size > PAGE_SIZE)
                        size -= PAGE_SIZE;
index 055099a0c4b51b1e13f4e7a6006359caae89a886..8f32026f25f78bd1475ca833e0f8488f5f086ac6 100644 (file)
@@ -248,7 +248,7 @@ static void *rvmalloc(unsigned long size)
        adr = (unsigned long) mem;
        while (size > 0) {
                page = kvirt_to_pa(adr);
-               mem_map_reserve(MAP_NR(__va(page)));
+               mem_map_reserve(virt_to_page(__va(page)));
                adr += PAGE_SIZE;
                if (size > PAGE_SIZE)
                        size -= PAGE_SIZE;
@@ -272,7 +272,7 @@ static void rvfree(void *mem, unsigned long size)
        adr=(unsigned long) mem;
        while (size > 0) {
                page = kvirt_to_pa(adr);
-               mem_map_unreserve(MAP_NR(__va(page)));
+               mem_map_unreserve(virt_to_page(__va(page)));
                adr += PAGE_SIZE;
                if (size > PAGE_SIZE)
                        size -= PAGE_SIZE;
index f504cddaf6a4a32bf4cf0cda47db3a8610a9e13c..e5e0e4aadb6c7ea7ed9be28ae4bcfb5eb1888882 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/malloc.h>
 #include <linux/init.h>
 #include <linux/fb.h>
+#include <linux/wrapper.h>
 
 #include <asm/hardware.h>
 #include <asm/io.h>
@@ -1532,8 +1533,8 @@ free_unused_pages(unsigned int virtual_start, unsigned int virtual_end)
                 * set count to 1, and free
                 * the page.
                 */
-               clear_bit(PG_reserved, &mem_map[MAP_NR(virtual_start)].flags);
-               atomic_set(&mem_map[MAP_NR(virtual_start)].count, 1);
+               mem_map_unreserve(virt_to_page(virtual_start));
+               atomic_set(&virt_to_page(virtual_start)->count, 1);
                free_page(virtual_start);
 
                virtual_start += PAGE_SIZE;
@@ -1628,7 +1629,7 @@ acornfb_init(void)
                for (page = current_par.screen_base; 
                     page < PAGE_ALIGN(current_par.screen_base + size);
                     page += PAGE_SIZE)
-                       mem_map[MAP_NR(page)].flags |= (1 << PG_reserved);
+                       mem_map_reserve(virt_to_page(page));
                /* Hand back any excess pages that we allocated. */
                for (page = current_par.screen_base + size; page < top; page += PAGE_SIZE)
                        free_page(page);
index 7a91d0b01c4bf90384717955b0ddf283404287b0..df86a674e966f93c0b455686a5b16b4d094198b5 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/init.h>
 #include <linux/fb.h>
 #include <linux/delay.h>
+#include <linux/wrapper.h>
 
 #include <asm/hardware.h>
 #include <asm/io.h>
@@ -730,8 +731,8 @@ __init sa1100fb_map_video_memory(void)
        u_int  required_pages;
        u_int  extra_pages;
        u_int  order;
-        u_int  i;
         char   *allocated_region;
+       struct page *page;
 
        if (VideoMemRegion != NULL)
                return -EINVAL;
@@ -757,9 +758,9 @@ __init sa1100fb_map_video_memory(void)
 
         /* Set reserved flag for fb memory to allow it to be remapped into */
         /* user space by the common fbmem driver using remap_page_range(). */
-        for(i = MAP_NR(VideoMemRegion);                                  
-            i < MAP_NR(VideoMemRegion + ALLOCATED_FB_MEM_SIZE); i++) 
-          set_bit(PG_reserved, &mem_map[i].flags);
+       for(page = virt_to_page(VideoMemRegion); 
+           page < virt_to_page(VideoMemRegion + ALLOCATED_FB_MEM_SIZE); page++)
+         mem_map_reserve(page);
 
        /* Remap the fb memory to a non-buffered, non-cached region */
        VideoMemRegion = (u_char *)__ioremap((u_long)VideoMemRegion_phys,
index 22ceb63bb8e93913ba07ef3eca5059e5565d3420..5d764084c20ca512f73541acc59045004ec57d57 100644 (file)
@@ -421,6 +421,7 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned
                end = PMD_SIZE;
        do {
                pte_t page = *pte;
+               struct page *ptpage;
 
                address += PAGE_SIZE;
                pte++;
@@ -432,8 +433,9 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned
                ++*pages;
                if (pte_dirty(page))
                        ++*dirty;
-               if ((pte_pagenr(page) >= max_mapnr) || 
-                                       PageReserved(pte_pagenr(page) + mem_map))
+               ptpage = pte_page(page);
+               if ((!VALID_PAGE(ptpage)) || 
+                                       PageReserved(ptpage))
                        continue;
                if (page_count(pte_page(page)) > 1)
                        ++*shared;
index 45e5b3b598af966c77dac18e3af379e44b61ddc4..dc23ef74d7812e5f5cd71f13a4c942a748fc62f0 100644 (file)
@@ -245,7 +245,7 @@ extern __inline__ pgd_t *get_pgd_slow(void)
                        (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
 
                pgd_val(ret[PTRS_PER_PGD])
-                 = pte_val(mk_pte(mem_map + MAP_NR(ret), PAGE_KERNEL));
+                 = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
        }
        return ret;
 }
index 2614e818982e33c715049f70672b9c992693e455..d68e1833206609555bf0c26e9cf99132ca872dfd 100644 (file)
@@ -141,7 +141,7 @@ extern unsigned long __zero_page(void);
 
 #define BAD_PAGETABLE  __bad_pagetable()
 #define BAD_PAGE       __bad_page()
-#define ZERO_PAGE(vaddr)       (mem_map + MAP_NR(ZERO_PGE))
+#define ZERO_PAGE(vaddr)       (virt_to_page(ZERO_PGE))
 
 /* number of bits that fit into a memory pointer */
 #define BITS_PER_PTR                   (8*sizeof(unsigned long))
@@ -209,8 +209,7 @@ extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
 { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 
-#define pte_pagenr(x)  ((unsigned long)((pte_val(x) >> 32)))
-#define pte_page(x)    (mem_map+pte_pagenr(x))
+#define pte_page(x)    (mem_map+(unsigned long)((pte_val(x) >> 32)))
 
 extern inline unsigned long pmd_page(pmd_t pmd)
 { return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
index 6992e14456af61001f14068f429a86a26e47d579..81427025c48a14f02d04b6211f7e4f2d3a0d6fab 100644 (file)
@@ -145,7 +145,7 @@ unsigned long get_wchan(struct task_struct *p);
 #define alloc_task_struct() \
         ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
 #define free_task_struct(p)     free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk)      atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk)      atomic_inc(&virt_to_page(tsk)->count)
 
 #define init_task      (init_task_union.task)
 #define init_stack     (init_task_union.stack)
index b18e572d0723ed0fe983c72abfef3346e4685feb..850ea4139c42bd4bb90d9a0d476df121014083b1 100644 (file)
@@ -80,7 +80,7 @@ extern void __handle_bad_pmd_kernel(pmd_t *pmd);
 #define pte_clear(ptep)                set_pte((ptep), __pte(0))
 
 #ifndef CONFIG_DISCONTIGMEM
-#define pte_pagenr(pte)                ((unsigned long)(((pte_val(pte) - PHYS_OFFSET) >> PAGE_SHIFT)))
+#define pte_page(x)            (mem_map + (unsigned long)(((pte_val(pte) - PHYS_OFFSET) >> PAGE_SHIFT)))
 #else
 /*
  * I'm not happy with this - we needlessly convert a physical address
@@ -88,7 +88,7 @@ extern void __handle_bad_pmd_kernel(pmd_t *pmd);
  * which, if __va and __pa are expensive causes twice the expense for
  * zero gain. --rmk
  */
-#define pte_pagenr(pte)                MAP_NR(__va(pte_val(pte)))
+#define pte_page(x)            (mem_map + MAP_NR(__va(pte_val(pte))))
 #endif
 
 #define pmd_none(pmd)          (!pmd_val(pmd))
@@ -99,7 +99,6 @@ extern void __handle_bad_pmd_kernel(pmd_t *pmd);
  */
 #define page_address(page)     ((page)->virtual)
 #define pages_to_mb(x)         ((x) >> (20 - PAGE_SHIFT))
-#define pte_page(x)            (mem_map + pte_pagenr(x))
 
 /*
  * Conversion functions: convert a page and protection to a page entry,
index 33d5a73e570ed7b0781bbf8d449a3493a982aec2..94025647e943d92867a7e8d0f1083961a9eb968a 100644 (file)
@@ -89,7 +89,7 @@ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
  * for zero-mapped memory areas etc..
  */
 extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 
 #endif /* !__ASSEMBLY__ */
 
index e9faddd62bb60ce77530b2a5288cff15930eac0e..769269192a515b44c3c84499dec7715d2300632c 100644 (file)
@@ -422,7 +422,7 @@ unsigned long get_wchan(struct task_struct *p);
 #define THREAD_SIZE (2*PAGE_SIZE)
 #define alloc_task_struct() ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
 #define free_task_struct(p) free_pages((unsigned long) (p), 1)
-#define get_task_struct(tsk)      atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk)      atomic_inc(&virt_to_page(tsk)->count)
 
 #define init_task      (init_task_union.task)
 #define init_stack     (init_task_union.stack)
index 0efc7f155577281ae5d0694addfd7b73d341ff08..9963ebb731ad1d08f3e755627f715c8cb146fb3c 100644 (file)
  */
 #define page_address(page)     ((void *) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)))
 
-/*
- * Given a PTE, return the index of the mem_map[] entry corresponding
- * to the page frame the PTE.
- */
-#define pte_pagenr(x)          ((unsigned long) ((pte_val(x) & _PFN_MASK) >> PAGE_SHIFT))
-
 /*
  * Now for some cache flushing routines.  This is the kind of stuff
  * that can be very expensive, so try to avoid them whenever possible.
@@ -250,7 +244,7 @@ extern pmd_t *ia64_bad_pagetable (void);
 #define pte_present(pte)               (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
 #define pte_clear(pte)                 (pte_val(*(pte)) = 0UL)
 /* pte_page() returns the "struct page *" corresponding to the PTE: */
-#define pte_page(pte)                  (mem_map + pte_pagenr(pte))
+#define pte_page(pte)                  (mem_map + (unsigned long) ((pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT))
 
 #define pmd_set(pmdp, ptep)            (pmd_val(*(pmdp)) = __pa(ptep))
 #define pmd_none(pmd)                  (!pmd_val(pmd))
@@ -418,7 +412,7 @@ do {                                                                                                \
  * for zero-mapped memory areas etc..
  */
 extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 
 # endif /* !__ASSEMBLY__ */
 
index fa3721bdeae419481fe37479c347f44d146a5b75..5024801ae90dd3786f476d7ff0e46866f47492a6 100644 (file)
@@ -685,7 +685,7 @@ thread_saved_pc (struct thread_struct *t)
 #define alloc_task_struct() \
         ((struct task_struct *) __get_free_pages(GFP_KERNEL, IA64_TASK_STRUCT_LOG_NUM_PAGES))
 #define free_task_struct(p)     free_pages((unsigned long)(p), IA64_TASK_STRUCT_LOG_NUM_PAGES)
-#define get_task_struct(tsk)   atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk)   atomic_inc(&virt_to_page(tsk)->count)
 
 #define init_task      (init_task_union.task)
 #define init_stack     (init_task_union.stack)
index 3755cb2f4f0e39210d26b31fc24264216ed29ce3..bedaecc4e020fd0573525f35d3ae4ca46365b9e1 100644 (file)
@@ -172,7 +172,7 @@ extern pte_t * __bad_pagetable(void);
 
 #define BAD_PAGETABLE __bad_pagetable()
 #define BAD_PAGE __bad_page()
-#define ZERO_PAGE(vaddr)       (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr)       (virt_to_page(empty_zero_page))
 
 /* number of bits that fit into a memory pointer */
 #define BITS_PER_PTR                   (8*sizeof(unsigned long))
@@ -228,7 +228,6 @@ extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
 #define pte_none(pte)          (!pte_val(pte))
 #define pte_present(pte)       (pte_val(pte) & (_PAGE_PRESENT | _PAGE_FAKE_SUPER))
 #define pte_clear(ptep)                ({ pte_val(*(ptep)) = 0; })
-#define pte_pagenr(pte)                ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
 
 #define pmd_none(pmd)          (!pmd_val(pmd))
 #define pmd_bad(pmd)           ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
@@ -248,7 +247,7 @@ extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
 /* Permanent address of a page. */
 #define page_address(page)     ((page)->virtual)
 #define __page_address(page)   (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
-#define pte_page(pte)          (mem_map+pte_pagenr(pte))
+#define pte_page(pte)          (mem_map+((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT))
 
 #define pte_ERROR(e) \
        printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
index 4444e4f43ef7500ec56aa01f3fd221a34898c129..79d81adc2e1e22b9903bc306c9e5dd2bc7f2f38d 100644 (file)
@@ -141,7 +141,7 @@ unsigned long get_wchan(struct task_struct *p);
     ({                 \
        unsigned long eip = 0;   \
        if ((tsk)->thread.esp0 > PAGE_SIZE && \
-           MAP_NR((tsk)->thread.esp0) < max_mapnr) \
+           (VALID_PAGE(virt_to_page((tsk)->thread.esp0)))) \
              eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
        eip; })
 #define        KSTK_ESP(tsk)   ((tsk) == current ? rdusp() : (tsk)->thread.usp)
@@ -152,7 +152,7 @@ unsigned long get_wchan(struct task_struct *p);
 #define alloc_task_struct() \
        ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
 #define free_task_struct(p)    free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk)      atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk)      atomic_inc(&virt_to_page(tsk)->count)
 
 #define init_task      (init_task_union.task)
 #define init_stack     (init_task_union.stack)
index 242cda457bd88dfc8e360a55bc23eac05809246e..f11b734bc73e10922ff91511f39212e8a901a0ac 100644 (file)
@@ -216,7 +216,7 @@ extern unsigned long zero_page_mask;
 #define BAD_PAGETABLE __bad_pagetable()
 #define BAD_PAGE __bad_page()
 #define ZERO_PAGE(vaddr) \
-       (mem_map + MAP_NR(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
+       (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
 
 /* number of bits that fit into a memory pointer */
 #define BITS_PER_PTR                   (8*sizeof(unsigned long))
@@ -241,11 +241,6 @@ extern pmd_t invalid_pte_table[PAGE_SIZE/sizeof(pmd_t)];
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
  */
-extern inline unsigned long pte_page(pte_t pte)
-{
-       return PAGE_OFFSET + (pte_val(pte) & PAGE_MASK);
-}
-
 extern inline unsigned long pmd_page(pmd_t pmd)
 {
        return pmd_val(pmd);
@@ -312,8 +307,7 @@ extern inline void pgd_clear(pgd_t *pgdp)   { }
  * is simple.
  */
 #define page_address(page)     ((page)->virtual)
-#define pte_pagenr(x)          ((unsigned long)((pte_val(x) >> PAGE_SHIFT)))
-#define pte_page(x)            (mem_map+pte_pagenr(x))
+#define pte_page(x)            (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
 
 /*
  * The following only work if pte_present() is true.
index d6fb7526dffa2c19ddd211fa616605e2b187ad68..022bb12159910f9ae0781fa3cb5351e108ed7f86 100644 (file)
@@ -234,7 +234,7 @@ unsigned long get_wchan(struct task_struct *p);
 #define alloc_task_struct() \
        ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
 #define free_task_struct(p)    free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk)      atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk)      atomic_inc(&virt_to_page(tsk)->count)
 
 #define init_task      (init_task_union.task)
 #define init_stack     (init_task_union.stack)
index 9b3ae3d4525937862a51b5645ebc7572050990ed..b959ab03a9c3bf3005eea0107a63b2a6747e4aaa 100644 (file)
@@ -241,7 +241,7 @@ extern unsigned long zero_page_mask;
 #define BAD_PMDTABLE __bad_pmd_table()
 #define BAD_PAGE __bad_page()
 #define ZERO_PAGE(vaddr) \
-       (mem_map + MAP_NR(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
+       (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
 
 /* number of bits that fit into a memory pointer */
 #define BITS_PER_PTR                   (8*sizeof(unsigned long))
@@ -267,11 +267,6 @@ extern pmd_t empty_bad_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)];
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
  */
-extern inline unsigned long pte_page(pte_t pte)
-{
-       return PAGE_OFFSET + (pte_val(pte) & PAGE_MASK);
-}
-
 extern inline unsigned long pmd_page(pmd_t pmd)
 {
        return pmd_val(pmd);
@@ -359,13 +354,13 @@ extern inline void pgd_clear(pgd_t *pgdp)
  */
 #define page_address(page)     ((page)->virtual)
 #ifndef CONFIG_DISCONTIGMEM
-#define pte_pagenr(x)          ((unsigned long)((pte_val(x) >> PAGE_SHIFT)))
+#define pte_page(x)            (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
 #else
-#define pte_pagenr(x) \
+#define mips64_pte_pagenr(x) \
        (PLAT_NODE_DATA_STARTNR(PHYSADDR_TO_NID(pte_val(x))) + \
        PLAT_NODE_DATA_LOCALNR(pte_val(x), PHYSADDR_TO_NID(pte_val(x))))
+#define pte_page(x)            (mem_map+mips64_pte_pagenr(x))
 #endif
-#define pte_page(x)            (mem_map+pte_pagenr(x))
 
 /*
  * The following only work if pte_present() is true.
index 3e8cab4b531cc9918b74fd0241a8239e5ad9045f..618e8ad2527414a93347d726ac5f934d97953ed2 100644 (file)
@@ -290,7 +290,7 @@ unsigned long get_wchan(struct task_struct *p);
 #define alloc_task_struct() \
        ((struct task_struct *) __get_free_pages(GFP_KERNEL, 2))
 #define free_task_struct(p)    free_pages((unsigned long)(p), 2)
-#define get_task_struct(tsk)   atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk)   atomic_inc(&virt_to_page(tsk)->count)
 
 #define init_task      (init_task_union.task)
 #define init_stack     (init_task_union.stack)
index 703dc409b7c8d8df95d5d0c229f2ed4a417953db..3484048282ad7c3b3738a90a1ebea1b0afe3ed7a 100644 (file)
@@ -274,7 +274,7 @@ extern unsigned long ioremap_bot, ioremap_base;
  * for zero-mapped memory areas etc..
  */
 extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 
 /*
  * BAD_PAGETABLE is used when we need a bogus page-table, while
@@ -303,7 +303,6 @@ extern pte_t * __bad_pagetable(void);
 #define pte_none(pte)          (!pte_val(pte))
 #define pte_present(pte)       (pte_val(pte) & _PAGE_PRESENT)
 #define pte_clear(ptep)                do { pte_val(*(ptep)) = 0; } while (0)
-#define pte_pagenr(x)          ((unsigned long)((pte_val(x) >> PAGE_SHIFT)))
 
 #define pmd_none(pmd)          (!pmd_val(pmd))
 #define        pmd_bad(pmd)            ((pmd_val(pmd) & ~PAGE_MASK) != 0)
@@ -315,7 +314,7 @@ extern pte_t * __bad_pagetable(void);
  */
 #define page_address(page)  ((page)->virtual)
 #define pages_to_mb(x)         ((x) >> (20-PAGE_SHIFT))
-#define pte_page(x)            (mem_map+pte_pagenr(x))
+#define pte_page(x)            (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
 
 #ifndef __ASSEMBLY__
 /*
index a0a96d9372f98eaa9e72eeb38a7982994ac97460..4fd684705f3712bec94ca00a1258ade9b8bac6b2 100644 (file)
@@ -687,7 +687,7 @@ unsigned long get_wchan(struct task_struct *p);
 #define alloc_task_struct() \
        ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
 #define free_task_struct(p)    free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk)      atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk)      atomic_inc(&virt_to_page(tsk)->count)
 
 /* in process.c - for early bootup debug -- Cort */
 int ll_printk(const char *, ...);
index fd4792961aa504cdbf30b9a4bcccf98b607f321a..d6509ea2f528009831423c67b45a3a2bd79c0198 100644 (file)
@@ -41,7 +41,7 @@ extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
  * for zero-mapped memory areas etc..
  */
 extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 #endif /* !__ASSEMBLY__ */
 
 /* Certain architectures need to do special things when PTEs
@@ -272,7 +272,6 @@ extern inline int pte_none(pte_t pte)           { return ((pte_val(pte) & (_PAGE
 extern inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_PRESENT; }
 extern inline void pte_clear(pte_t *ptep)       { pte_val(*ptep) = _PAGE_INVALID; }
 #define PTE_INIT(x) pte_clear(x)
-extern inline int pte_pagenr(pte_t pte)                { return ((unsigned long)((pte_val(pte) >> PAGE_SHIFT))); }
 
 extern inline int pmd_none(pmd_t pmd)           { return pmd_val(pmd) & _PAGE_TABLE_INV; }
 extern inline int pmd_bad(pmd_t pmd)            { return (pmd_val(pmd) == 0); }
@@ -337,7 +336,7 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 { pte_val(pte) = (pte_val(pte) & PAGE_MASK) | pgprot_val(newprot); return pte; }
 
 #define page_address(page)  ((page)->virtual)
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+(unsigned long)((pte_val(pte) >> PAGE_SHIFT)))
 
 #define pmd_page(pmd) \
 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
index d55567d943419a7acd7f92554aed2359729b45fb..5cb89c8a3db95d94c11c6486227d6a6115db84da 100644 (file)
@@ -149,7 +149,7 @@ unsigned long get_wchan(struct task_struct *p);
 #define alloc_task_struct() \
         ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
 #define free_task_struct(p)     free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk)      atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk)      atomic_inc(&virt_to_page(tsk)->count)
 
 #define init_task       (init_task_union.task)
 #define init_stack      (init_task_union.stack)
index d7da3b727fb39d052f58bfb4dd710a3644cc25b7..867cf06acbf5abbb8e7ef88851b84dbbfa5d212c 100644 (file)
@@ -62,7 +62,7 @@ extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
  * for zero-mapped memory areas etc..
  */
 extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 
 #endif /* !__ASSEMBLY__ */
 
@@ -156,7 +156,6 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd);
 #define pte_none(x)    (!pte_val(x))
 #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
 #define pte_clear(xp)  do { set_pte(xp, __pte(0)); } while (0)
-#define pte_pagenr(x)  ((unsigned long)(((pte_val(x) -__MEMORY_START) >> PAGE_SHIFT)))
 
 #define pmd_none(x)    (!pmd_val(x))
 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
@@ -169,7 +168,7 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd);
  */
 #define page_address(page)  ((page)->virtual)
 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+(unsigned long)(((pte_val(x) -__MEMORY_START) >> PAGE_SHIFT)))
 
 /*
  * The following only work if pte_present() is true.
index cf59dbb881a3638d633f03e2747afd71f519ec39..77cbe9c1ee884fbce9926d5b8d074378e927233e 100644 (file)
@@ -217,7 +217,7 @@ extern unsigned long get_wchan(struct task_struct *p);
 #define THREAD_SIZE (2*PAGE_SIZE)
 extern struct task_struct * alloc_task_struct(void);
 extern void free_task_struct(struct task_struct *);
-#define get_task_struct(tsk)      atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk)      atomic_inc(&virt_to_page(tsk)->count)
 
 #define init_task      (init_task_union.task)
 #define init_stack     (init_task_union.stack)
index 5e1dc8b63666f5c393bebef6d1e67f99db439e89..5fc50713292b772bf83dc0eb3218bb637f02b0f9 100644 (file)
@@ -204,11 +204,11 @@ extern unsigned long empty_zero_page;
 
 #define SIZEOF_PTR_LOG2   2
 
-BTFIXUPDEF_CALL_CONST(unsigned long, pte_pagenr, pte_t)
+BTFIXUPDEF_CALL_CONST(unsigned long, sparc_pte_pagenr, pte_t)
 BTFIXUPDEF_CALL_CONST(unsigned long, pmd_page, pmd_t)
 BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page, pgd_t)
 
-#define pte_pagenr(pte) BTFIXUP_CALL(pte_pagenr)(pte)
+#define sparc_pte_pagenr(pte) BTFIXUP_CALL(sparc_pte_pagenr)(pte)
 #define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
 #define pgd_page(pgd) BTFIXUP_CALL(pgd_page)(pgd)
 
@@ -308,7 +308,7 @@ BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
 
 /* Permanent address of a page. */
 #define page_address(page)  ((page)->virtual)
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+sparc_pte_pagenr(x))
 
 /*
  * Conversion functions: convert a page and protection to a page entry,
index 41cd7a7dd34e7f9376a2d0433cbf7348aee3633d..b81b9cee496af1dcc94274bce546c67beed8cd1f 100644 (file)
@@ -125,7 +125,7 @@ extern struct pgtable_cache_struct {
 
 extern __inline__ void free_pgd_fast(pgd_t *pgd)
 {
-       struct page *page = mem_map + MAP_NR(pgd);
+       struct page *page = virt_to_page(pgd);
 
        if (!page->pprev_hash) {
                (unsigned long *)page->next_hash = pgd_quicklist;
index 6a03c66d8e7244d64e3405cc2b76815d2e8adf3a..3318578f36dab2388ee2bcf38f8449505f1b4097 100644 (file)
@@ -177,7 +177,7 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
        (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
 #define pgd_set(pgdp, pmdp)    \
        (pgd_val(*(pgdp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
-#define pte_pagenr(pte)   (((unsigned long) ((pte_val(pte)&~PAGE_OFFSET)-phys_base)>>PAGE_SHIFT))
+#define sparc64_pte_pagenr(pte)   (((unsigned long) ((pte_val(pte)&~PAGE_OFFSET)-phys_base)>>PAGE_SHIFT))
 #define pmd_page(pmd)                  ((unsigned long) __va((pmd_val(pmd)<<11UL)))
 #define pgd_page(pgd)                  ((unsigned long) __va((pgd_val(pgd)<<11UL)))
 #define pte_none(pte)                  (!pte_val(pte))
@@ -209,7 +209,7 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
 #define __page_address(page)   ((page)->virtual)
 #define page_address(page)     ({ __page_address(page); })
 
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+sparc64_pte_pagenr(x))
 
 /* Be very careful when you change these three, they are delicate. */
 #define pte_mkyoung(pte)       (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R))
index b7b124a56fdcd54ccdb64e86640abf15a06601ee..37433eace43fe29dbd2e41b9cbbe239ce2609189 100644 (file)
@@ -259,7 +259,7 @@ __out:      __ret; \
 /* Allocation and freeing of task_struct and kernel stack. */
 #define alloc_task_struct()   ((struct task_struct *)__get_free_pages(GFP_KERNEL, 1))
 #define free_task_struct(tsk) free_pages((unsigned long)(tsk),1)
-#define get_task_struct(tsk)      atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk)      atomic_inc(&virt_to_page(tsk)->count)
 
 #define init_task      (init_task_union.task)
 #define init_stack     (init_task_union.stack)
index 9a2f2cbe66005f11e205bf29eafc3f935898cffc..bde78147519f9901240c2e861ac2b4b4f411d439 100644 (file)
@@ -11,7 +11,6 @@ extern struct page *highmem_start_page;
 #include <asm/highmem.h>
 
 /* declarations for linux/mm/highmem.c */
-extern unsigned long highmem_mapnr;
 FASTCALL(unsigned int nr_free_highpages(void));
 
 extern struct page * prepare_highmem_swapout(struct page *);
index a248e939f749f758c43d18d3730f6b0ee7feae2b..94e30f7c7a6c8e6e5b7ddbf56e57190d3eb9b8d0 100644 (file)
@@ -338,6 +338,9 @@ extern unsigned long FASTCALL(get_zeroed_page(int gfp_mask));
 #define __get_dma_pages(gfp_mask, order) \
                __get_free_pages((gfp_mask) | GFP_DMA,(order))
 
+#define virt_to_page(kaddr)    (mem_map + MAP_NR(kaddr))
+#define VALID_PAGE(page)       ((page - mem_map) < max_mapnr)
+
 /*
  * The old interface name will be removed in 2.5:
  */
index a1176b978c70a39f4d410f07fc8b937adf942638..a3b8973787b8e1c089f40c897695961f58144563 100644 (file)
@@ -36,7 +36,7 @@
 /*
  * From a kernel address, get the "struct page *"
  */
-#define page_cache_entry(x)    (mem_map + MAP_NR(x))
+#define page_cache_entry(x)    virt_to_page(x)
 
 extern unsigned int page_hash_bits;
 #define PAGE_HASH_BITS (page_hash_bits)
index 36d80ef1eba49228698f6e84367011e31a528caf..edf8ef1535bada9e40760a1a064155f59607b9c0 100644 (file)
@@ -29,8 +29,8 @@
 #define vma_get_end(v) v->vm_end
 #define vma_get_page_prot(v) v->vm_page_prot
 
-#define mem_map_reserve(p) set_bit(PG_reserved, &mem_map[p].flags)
-#define mem_map_unreserve(p) clear_bit(PG_reserved, &mem_map[p].flags)
-#define mem_map_inc_count(p) atomic_inc(&(mem_map[p].count))
-#define mem_map_dec_count(p) atomic_dec(&(mem_map[p].count))
+#define mem_map_reserve(p) set_bit(PG_reserved, &p->flags)
+#define mem_map_unreserve(p) clear_bit(PG_reserved, &p->flags)
+#define mem_map_inc_count(p) atomic_inc(&(p->count))
+#define mem_map_dec_count(p) atomic_dec(&(p->count))
 #endif
index e009bca35e45f26fc431c58e7fdfb06007ce471c..9ab02a4cca2bc6a1eebbc83e2eadcaf7906831cc 100644 (file)
@@ -24,7 +24,6 @@ static int access_one_page(struct mm_struct * mm, struct vm_area_struct * vma, u
        pgd_t * pgdir;
        pmd_t * pgmiddle;
        pte_t * pgtable;
-       unsigned long mapnr;
        unsigned long maddr; 
        struct page *page;
 
@@ -42,11 +41,10 @@ repeat:
        pgtable = pte_offset(pgmiddle, addr);
        if (!pte_present(*pgtable))
                goto fault_in_page;
-       mapnr = pte_pagenr(*pgtable);
        if (write && (!pte_write(*pgtable) || !pte_dirty(*pgtable)))
                goto fault_in_page;
-       page = mem_map + mapnr;
-       if ((mapnr >= max_mapnr) || PageReserved(page))
+       page = pte_page(*pgtable);
+       if ((!VALID_PAGE(page)) || PageReserved(page))
                return 0;
        flush_cache_page(vma, addr);
 
index 0e11fe9ed716f651d9b475fc6d6a0011be0cb9b9..fbcb2bb061a1e6e399345390ec6c59b649a5d5c2 100644 (file)
@@ -246,7 +246,7 @@ static unsigned long __init free_all_bootmem_core(int nid, bootmem_data_t *bdata
         * Now free the allocator bitmap itself, it's not
         * needed anymore:
         */
-       page = mem_map + MAP_NR(bdata->node_bootmem_map);
+       page = virt_to_page(bdata->node_bootmem_map);
        count = 0;
        for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
                count++;
index 411f20c52993599065c589d6c59945edb49ff448..6208e347dfc5bb410b2eaca03e0ac561d0f4a858 100644 (file)
@@ -22,8 +22,6 @@
 #include <linux/swap.h>
 #include <linux/slab.h>
 
-unsigned long highmem_mapnr;
-
 /*
  * Take one locked page, return another low-memory locked page.
  */
@@ -61,7 +59,7 @@ struct page * prepare_highmem_swapout(struct page * page)
         * we stored its data into the new regular_page.
         */
        page_cache_release(page);
-       new_page = mem_map + MAP_NR(regular_page);
+       new_page = virt_to_page(regular_page);
        LockPage(new_page);
        return new_page;
 }
index 7b54fc58570fe7712a7573227abc0586482ea4c5..07708bc2f4ab2199c62bf6096950704d1b2ebec0 100644 (file)
@@ -210,7 +210,7 @@ skip_copy_pte_range:                address = (address + PMD_SIZE) & PMD_MASK;
                        
                        do {
                                pte_t pte = *src_pte;
-                               unsigned long page_nr;
+                               struct page *ptepage;
                                
                                /* copy_one_pte */
 
@@ -221,9 +221,9 @@ skip_copy_pte_range:                address = (address + PMD_SIZE) & PMD_MASK;
                                        set_pte(dst_pte, pte);
                                        goto cont_copy_pte_range;
                                }
-                               page_nr = pte_pagenr(pte);
-                               if (page_nr >= max_mapnr || 
-                                   PageReserved(mem_map+page_nr)) {
+                               ptepage = pte_page(pte);
+                               if ((!VALID_PAGE(ptepage)) || 
+                                   PageReserved(ptepage)) {
                                        set_pte(dst_pte, pte);
                                        goto cont_copy_pte_range;
                                }
@@ -236,7 +236,7 @@ skip_copy_pte_range:                address = (address + PMD_SIZE) & PMD_MASK;
                                if (vma->vm_flags & VM_SHARED)
                                        pte = pte_mkclean(pte);
                                set_pte(dst_pte, pte_mkold(pte));
-                               get_page(mem_map + page_nr);
+                               get_page(ptepage);
                        
 cont_copy_pte_range:           address += PAGE_SIZE;
                                if (address >= end)
@@ -262,14 +262,14 @@ nomem:
 static inline int free_pte(pte_t page)
 {
        if (pte_present(page)) {
-               unsigned long nr = pte_pagenr(page);
-               if (nr >= max_mapnr || PageReserved(mem_map+nr))
+               struct page *ptpage = pte_page(page);
+               if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
                        return 0;
                /* 
                 * free_page() used to be able to clear swap cache
                 * entries.  We may now have to do it manually.  
                 */
-               free_page_and_swap_cache(mem_map+nr);
+               free_page_and_swap_cache(ptpage);
                return 1;
        }
        swap_free(pte_to_swp_entry(page));
@@ -409,7 +409,7 @@ static struct page * follow_page(unsigned long address)
 
 static inline struct page * get_page_map(struct page *page)
 {
-       if (page > (mem_map + max_mapnr))
+       if (!VALID_PAGE(page))
                return 0;
        return page;
 }
@@ -711,12 +711,12 @@ static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned
        if (end > PMD_SIZE)
                end = PMD_SIZE;
        do {
-               unsigned long mapnr;
+               struct page *page;
                pte_t oldpage = *pte;
                pte_clear(pte);
 
-               mapnr = MAP_NR(__va(phys_addr));
-               if (mapnr >= max_mapnr || PageReserved(mem_map+mapnr))
+               page = virt_to_page(__va(phys_addr));
+               if ((!VALID_PAGE(page)) || PageReserved(page))
                        set_pte(pte, mk_pte_phys(phys_addr, prot));
                forget_pte(oldpage);
                address += PAGE_SIZE;
@@ -818,13 +818,11 @@ static inline void break_cow(struct vm_area_struct * vma, struct page *   old_page
 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
        unsigned long address, pte_t *page_table, pte_t pte)
 {
-       unsigned long map_nr;
        struct page *old_page, *new_page;
 
-       map_nr = pte_pagenr(pte);
-       if (map_nr >= max_mapnr)
+       old_page = pte_page(pte);
+       if (!VALID_PAGE(old_page))
                goto bad_wp_page;
-       old_page = mem_map + map_nr;
        
        /*
         * We can avoid the copy if:
@@ -883,7 +881,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
 
 bad_wp_page:
        spin_unlock(&mm->page_table_lock);
-       printk("do_wp_page: bogus page at address %08lx (nr %ld)\n",address,map_nr);
+       printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page);
        return -1;
 }
 
@@ -920,7 +918,7 @@ static void partial_clear(struct vm_area_struct *vma, unsigned long address)
                return;
        flush_cache_page(vma, address);
        page = pte_page(pte);
-       if ((page-mem_map >= max_mapnr) || PageReserved(page))
+       if ((!VALID_PAGE(page)) || PageReserved(page))
                return;
        offset = address & ~PAGE_MASK;
        memclear_highpage_flush(page, offset, PAGE_SIZE - offset);
index 420f91f92420e8abf338600672d69ba437e933d4..8b74a73db388db5e6f085056010ed7c9d9822f10 100644 (file)
@@ -86,7 +86,7 @@ static void __free_pages_ok (struct page *page, unsigned long order)
                BUG();
        if (page->mapping)
                BUG();
-       if (page-mem_map >= max_mapnr)
+       if (!VALID_PAGE(page))
                BUG();
        if (PageSwapCache(page))
                BUG();
@@ -350,14 +350,14 @@ void __free_pages(struct page *page, unsigned long order)
 
 void free_pages(unsigned long addr, unsigned long order)
 {
-       unsigned long map_nr;
+       struct page *fpage;
 
 #ifdef CONFIG_DISCONTIGMEM
        if (addr == 0) return;
 #endif
-       map_nr = MAP_NR(addr);
-       if (map_nr < max_mapnr)
-               __free_pages(mem_map + map_nr, order);
+       fpage = virt_to_page(addr);
+       if (VALID_PAGE(fpage))
+               __free_pages(fpage, order);
 }
 
 /*
index b2b6359d0a54d6e0b1013a43579cc3305c431ebe..25ed62221b55c45561eaa3141626ca53843a17d7 100644 (file)
@@ -126,7 +126,7 @@ void rw_swap_page(int rw, struct page *page, int wait)
  */
 void rw_swap_page_nolock(int rw, swp_entry_t entry, char *buf, int wait)
 {
-       struct page *page = mem_map + MAP_NR(buf);
+       struct page *page = virt_to_page(buf);
        
        if (!PageLocked(page))
                PAGE_BUG(page);
index 49c1a4879cd2019745c930ab33b57029036a7969..81543069898e6522f171860a3bf02a402530ee56 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -496,7 +496,7 @@ static inline void * kmem_getpages (kmem_cache_t *cachep, unsigned long flags)
 static inline void kmem_freepages (kmem_cache_t *cachep, void *addr)
 {
        unsigned long i = (1<<cachep->gfporder);
-       struct page *page = mem_map + MAP_NR(addr);
+       struct page *page = virt_to_page(addr);
 
        /* free_pages() does not clear the type bit - we do that.
         * The pages have been unlinked from their cache-slab,
@@ -1115,7 +1115,7 @@ static int kmem_cache_grow (kmem_cache_t * cachep, int flags)
 
        /* Nasty!!!!!! I hope this is OK. */
        i = 1 << cachep->gfporder;
-       page = mem_map + MAP_NR(objp);
+       page = virt_to_page(objp);
        do {
                SET_PAGE_CACHE(page, cachep);
                SET_PAGE_SLAB(page, slabp);
@@ -1321,9 +1321,9 @@ alloc_new_slab_nolock:
  */
 
 #if DEBUG
-# define CHECK_NR(nr)                                          \
+# define CHECK_NR(pg)                                          \
        do {                                                    \
-               if (nr >= max_mapnr) {                          \
+               if (!VALID_PAGE(pg)) {                          \
                        printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \
                                (unsigned long)objp);           \
                        BUG();                                  \
@@ -1331,6 +1331,7 @@ alloc_new_slab_nolock:
        } while (0)
 # define CHECK_PAGE(page)                                      \
        do {                                                    \
+               CHECK_NR(page);                                 \
                if (!PageSlab(page)) {                          \
                        printk(KERN_ERR "kfree: bad ptr %lxh.\n", \
                                (unsigned long)objp);           \
@@ -1339,23 +1340,21 @@ alloc_new_slab_nolock:
        } while (0)
 
 #else
-# define CHECK_NR(nr)  do { } while (0)
-# define CHECK_PAGE(nr)        do { } while (0)
+# define CHECK_PAGE(pg)        do { } while (0)
 #endif
 
 static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
 {
        slab_t* slabp;
 
-       CHECK_NR(MAP_NR(objp));
-       CHECK_PAGE(mem_map + MAP_NR(objp));
+       CHECK_PAGE(virt_to_page(objp));
        /* reduces memory footprint
         *
        if (OPTIMIZE(cachep))
                slabp = (void*)((unsigned long)objp&(~(PAGE_SIZE-1)));
         else
         */
-       slabp = GET_PAGE_SLAB(mem_map + MAP_NR(objp));
+       slabp = GET_PAGE_SLAB(virt_to_page(objp));
 
 #if DEBUG
        if (cachep->flags & SLAB_DEBUG_INITIAL)
@@ -1452,8 +1451,7 @@ static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp)
 #ifdef CONFIG_SMP
        cpucache_t *cc = cc_data(cachep);
 
-       CHECK_NR(MAP_NR(objp));
-       CHECK_PAGE(mem_map + MAP_NR(objp));
+       CHECK_PAGE(virt_to_page(objp));
        if (cc) {
                int batchcount;
                if (cc->avail < cc->limit) {
@@ -1536,9 +1534,8 @@ void kmem_cache_free (kmem_cache_t *cachep, void *objp)
 {
        unsigned long flags;
 #if DEBUG
-       CHECK_NR(MAP_NR(objp));
-       CHECK_PAGE(mem_map + MAP_NR(objp));
-       if (cachep != GET_PAGE_CACHE(mem_map + MAP_NR(objp)))
+       CHECK_PAGE(virt_to_page(objp));
+       if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
                BUG();
 #endif
 
@@ -1562,9 +1559,8 @@ void kfree (const void *objp)
        if (!objp)
                return;
        local_irq_save(flags);
-       CHECK_NR(MAP_NR(objp));
-       CHECK_PAGE(mem_map + MAP_NR(objp));
-       c = GET_PAGE_CACHE(mem_map + MAP_NR(objp));
+       CHECK_PAGE(virt_to_page(objp));
+       c = GET_PAGE_CACHE(virt_to_page(objp));
        __kmem_cache_free(c, (void*)objp);
        local_irq_restore(flags);
 }
index 72f3eaca49b326e9ce5ae9dfadf87c75aa613dc1..50616035438844e2cf4919e5b346ef6d38efede5 100644 (file)
@@ -220,7 +220,7 @@ struct page * read_swap_cache_async(swp_entry_t entry, int wait)
        new_page_addr = __get_free_page(GFP_USER);
        if (!new_page_addr)
                goto out_free_swap;     /* Out of memory */
-       new_page = mem_map + MAP_NR(new_page_addr);
+       new_page = virt_to_page(new_page_addr);
 
        /*
         * Check the swap cache again, in case we stalled above.
index a84e73f2fe72bad88217d93186de51239250b4fa..fa4cb133e99e1da5372298ac4a75247dc64599ef 100644 (file)
@@ -645,7 +645,7 @@ asmlinkage long sys_swapon(const char * specialfile, int swap_flags)
                goto bad_swap;
        }
 
-       lock_page(mem_map + MAP_NR(swap_header));
+       lock_page(virt_to_page(swap_header));
        rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header, 1);
 
        if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
index 57f3ca56cc7ec2d72fdc13fb8e6fc20b572fb1db..817a3966b4ae153fdf74e41cba19ee4274eb0069 100644 (file)
@@ -41,10 +41,9 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo
                if (pte_none(page))
                        continue;
                if (pte_present(page)) {
-                       unsigned long map_nr = pte_pagenr(page);
-                       if ((map_nr < max_mapnr) && 
-                                               (!PageReserved(mem_map + map_nr)))
-                               __free_page(mem_map + map_nr);
+                       struct page *ptpage = pte_page(page);
+                       if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
+                               __free_page(ptpage);
                        continue;
                }
                printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
index 4dda15dd1be2a652444c6ea4131acd7e7237b4c6..95098e4d1be732c8010e5a24b82f999ace9a6177 100644 (file)
@@ -45,7 +45,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
        if (!pte_present(pte))
                goto out_failed;
        page = pte_page(pte);
-       if ((page-mem_map >= max_mapnr) || PageReserved(page))
+       if ((!VALID_PAGE(page)) || PageReserved(page))
                goto out_failed;
 
        if (mm->swap_cnt)
index b0ea8b73059708a2545e107f7f8b7acd825d6694..862cc7027ea9065c1a669ac6cb2bb1b7c741f6b9 100644 (file)
@@ -1564,11 +1564,11 @@ static void free_pg_vec(unsigned long *pg_vec, unsigned order, unsigned len)
 
        for (i=0; i<len; i++) {
                if (pg_vec[i]) {
-                       unsigned long map, mapend;
+                       struct page *page, *pend;
 
-                       mapend = MAP_NR(pg_vec[i] + (PAGE_SIZE << order) - 1);
-                       for (map = MAP_NR(pg_vec[i]); map <= mapend; map++)
-                               clear_bit(PG_reserved, &mem_map[map].flags);
+                       pend = virt_to_page(pg_vec[i] + (PAGE_SIZE << order) - 1);
+                       for (page = virt_to_page(pg_vec[i]); page <= pend; page++)
+                               mem_map_unreserve(page);
                        free_pages(pg_vec[i], order);
                }
        }
@@ -1616,14 +1616,14 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
                memset(pg_vec, 0, req->tp_block_nr*sizeof(unsigned long*));
 
                for (i=0; i<req->tp_block_nr; i++) {
-                       unsigned long map, mapend;
+                       struct page *page, *pend;
                        pg_vec[i] = __get_free_pages(GFP_KERNEL, order);
                        if (!pg_vec[i])
                                goto out_free_pgvec;
 
-                       mapend = MAP_NR(pg_vec[i] + (PAGE_SIZE << order) - 1);
-                       for (map = MAP_NR(pg_vec[i]); map <= mapend; map++)
-                               set_bit(PG_reserved, &mem_map[map].flags);
+                       pend = virt_to_page(pg_vec[i] + (PAGE_SIZE << order) - 1);
+                       for (page = virt_to_page(pg_vec[i]); page <= pend; page++)
+                               mem_map_reserve(page);
                }
                /* Page vector is allocated */