__bad_page(void)
{
memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
- return pte_mkdirty(mk_pte(mem_map + MAP_NR(EMPTY_PGE), PAGE_SHARED));
+ return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
}
void
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map+MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
}
free_initrd_mem(unsigned long start, unsigned long end)
{
for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(start));
- set_page_count(mem_map+MAP_NR(start), 1);
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
memzero(zero_page, PAGE_SIZE);
memzero(bad_page, PAGE_SIZE);
- empty_zero_page = mem_map + MAP_NR(zero_page);
- empty_bad_page = mem_map + MAP_NR(bad_page);
+ empty_zero_page = virt_to_page(zero_page);
+ empty_bad_page = virt_to_page(bad_page);
empty_bad_pte_table = ((pte_t *)bad_table) + TABLE_OFFSET;
}
static inline void free_area(unsigned long addr, unsigned long end, char *s)
{
unsigned int size = (end - addr) >> 10;
- struct page *page = mem_map + MAP_NR(addr);
+ struct page *page = virt_to_page(addr);
for (; addr < end; addr += PAGE_SIZE, page ++) {
ClearPageReserved(page);
if (!keep_initrd) {
for (addr = start; addr < end; addr += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map+MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
}
start = __phys_to_virt(start);
end = __phys_to_virt(end);
- pg = PAGE_ALIGN((unsigned long)(mem_map + MAP_NR(start)));
- pgend = ((unsigned long)(mem_map + MAP_NR(end))) & PAGE_MASK;
+ pg = PAGE_ALIGN((unsigned long)(virt_to_page(start)));
+ pgend = ((unsigned long)(virt_to_page(end))) & PAGE_MASK;
start = __virt_to_phys(pg);
end = __virt_to_phys(pgend);
static void __free_small_page(unsigned long spage, struct order *order)
{
unsigned long flags;
- unsigned long nr;
struct page *page;
- nr = MAP_NR(spage);
- if (nr < max_mapnr) {
- page = mem_map + nr;
+ page = virt_to_page(spage);
+ if (VALID_PAGE(page)) {
/*
* The container-page must be marked Reserved
#ifdef CONFIG_HIGHMEM
highmem_start_page = mem_map + highstart_pfn;
- /* cache the highmem_mapnr */
- highmem_mapnr = highstart_pfn;
max_mapnr = num_physpages = highend_pfn;
#else
max_mapnr = num_physpages = max_low_pfn;
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map+MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
}
if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(start));
- set_page_count(mem_map+MAP_NR(start), 1);
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
*/
if (phys_addr < virt_to_phys(high_memory)) {
char *t_addr, *t_end;
- int i;
+ struct page *page;
t_addr = __va(phys_addr);
t_end = t_addr + (size - 1);
- for(i = MAP_NR(t_addr); i < MAP_NR(t_end); i++) {
- if(!PageReserved(mem_map + i))
+ for(page = virt_to_page(t_addr); page < virt_to_page(t_end); page++)
+ if(!PageReserved(page))
return NULL;
- }
}
/*
{
int nr;
- put_shared_page(current, mem_map + MAP_NR(ia32_gdt_table), IA32_PAGE_OFFSET);
+ put_shared_page(current, virt_to_page(ia32_gdt_table), IA32_PAGE_OFFSET);
if (PAGE_SHIFT <= IA32_PAGE_SHIFT)
- put_shared_page(current, mem_map + MAP_NR(ia32_tss), IA32_PAGE_OFFSET + PAGE_SIZE);
+ put_shared_page(current, virt_to_page(ia32_tss), IA32_PAGE_OFFSET + PAGE_SIZE);
nr = smp_processor_id();
addr = (unsigned long) &__init_begin;
for (; addr < (unsigned long) &__init_end; addr += PAGE_SIZE) {
- clear_bit(PG_reserved, &mem_map[MAP_NR(addr)].flags);
- set_page_count(&mem_map[MAP_NR(addr)], 1);
+ clear_bit(PG_reserved, &virt_to_page(addr)->flags);
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
++totalram_pages;
}
if (start < end)
printk ("Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
- clear_bit(PG_reserved, &mem_map[MAP_NR(start)].flags);
- set_page_count(&mem_map[MAP_NR(start)], 1);
+ clear_bit(PG_reserved, &virt_to_page(start)->flags);
+ set_page_count(virt_to_page(start), 1);
free_page(start);
++totalram_pages;
}
unsigned long *count = arg;
struct page *pg;
- for (pg = mem_map + MAP_NR(start); pg < mem_map + MAP_NR(end); ++pg)
+ for (pg = virt_to_page(start); pg < virt_to_page(end); ++pg)
if (PageReserved(pg))
++num_reserved;
*count += num_reserved;
datasize >> 10, initsize >> 10);
/* install the gate page in the global page table: */
- put_gate_page(mem_map + MAP_NR(__start_gate_section), GATE_ADDR);
+ put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR);
#ifndef CONFIG_IA64_SOFTSDV_HACKS
/*
/* always reserve first page of ST-RAM, the first 2 kB are
* supervisor-only! */
- set_bit( PG_reserved, &mem_map[MAP_NR(stram_start)].flags );
+ set_bit( PG_reserved, &virt_to_page(stram_start)->flags );
#ifdef CONFIG_STRAM_SWAP
if (!max_swap_size) {
if (pte_page(pte) != page)
return;
if (0 /* isswap */)
- mem_map[MAP_NR(pte_page(pte))].offset = page;
+ virt_to_page(pte_page(pte))->offset = page;
else
/* We will be removing the swap cache in a moment, so... */
set_pte(dir, pte_mkdirty(pte));
DPRINTK( "unswap_pte: replacing entry %08lx by new page %08lx",
entry, page );
set_pte(dir, pte_mkdirty(__mk_pte(page,vma->vm_page_prot)));
- atomic_inc(&mem_map[MAP_NR(page)].count);
+ atomic_inc(&virt_to_page(page)->count);
++vma->vm_mm->rss;
}
swap_free(entry);
/* reserve a range of pages in mem_map[] */
static void reserve_region( unsigned long addr, unsigned long end )
{
- mem_map_t *mapp = &mem_map[MAP_NR(addr)];
+ mem_map_t *mapp = virt_to_page(addr);
for( ; addr < end; addr += PAGE_SIZE, ++mapp )
set_bit( PG_reserved, &mapp->flags );
#if 0
#ifndef CONFIG_SUN3
if (virt_to_phys ((void *)tmp) >= mach_max_dma_address)
- clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
+ clear_bit(PG_DMA, &virt_to_page(tmp)->flags);
#endif
#endif
- if (PageReserved(mem_map+MAP_NR(tmp))) {
+ if (PageReserved(virt_to_page(tmp))) {
if (tmp >= (unsigned long)&_text
&& tmp < (unsigned long)&_etext)
codepages++;
continue;
}
#if 0
- set_page_count(mem_map+MAP_NR(tmp), 1);
+ set_page_count(virt_to_page(tmp), 1);
#ifdef CONFIG_BLK_DEV_INITRD
if (!initrd_start ||
(tmp < (initrd_start & PAGE_MASK) || tmp >= initrd_end))
void free_initrd_mem(unsigned long start, unsigned long end)
{
for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(start));
- set_page_count(mem_map+MAP_NR(start), 1);
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
typedef struct list_head ptable_desc;
static LIST_HEAD(ptable_list);
-#define PD_PTABLE(page) ((ptable_desc *)&mem_map[MAP_NR(page)])
+#define PD_PTABLE(page) ((ptable_desc *)virt_to_page(page))
#define PD_PAGE(ptable) (list_entry(ptable, struct page, list))
#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
addr = (unsigned long)&__init_begin;
for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
- mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
- set_page_count(mem_map+MAP_NR(addr), 1);
+ virt_to_page(addr)->flags &= ~(1 << PG_reserved);
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
}
}
addr = PAGE_OFFSET + p->base;
while (addr < p->base + p->size) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map + MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
addr += PAGE_SIZE;
freed += PAGE_SIZE;
addr = PAGE_SIZE;
while (addr < end) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map + MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
addr += PAGE_SIZE;
}
static inline unsigned long setup_zero_pages(void)
{
- unsigned long order, size, pg;
+ unsigned long order, size;
+ struct page *page;
switch (mips_cputype) {
case CPU_R4000SC:
if (!empty_zero_page)
panic("Oh boy, that early out of memory?");
- pg = MAP_NR(empty_zero_page);
- while (pg < MAP_NR(empty_zero_page) + (1 << order)) {
- set_bit(PG_reserved, &mem_map[pg].flags);
- set_page_count(mem_map + pg, 0);
- pg++;
+ page = virt_to_page(empty_zero_page);
+ while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) {
+ set_bit(PG_reserved, &page->flags);
+ set_page_count(page, 0);
+ page++;
}
size = PAGE_SIZE << order;
void free_initrd_mem(unsigned long start, unsigned long end)
{
for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(start));
- set_page_count(mem_map+MAP_NR(start), 1);
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
addr = (unsigned long) &__init_begin;
while (addr < (unsigned long) &__init_end) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map + MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
addr += PAGE_SIZE;
static inline void free_pte(pte_t page)
{
if (pte_present(page)) {
- unsigned long nr = pte_pagenr(page);
- if (nr >= max_mapnr || PageReserved(mem_map+nr))
+ struct page *ptpage = pte_page(page);
+ if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
return;
- __free_page(pte_page(page));
+ __free_page(ptpage);
if (current->mm->rss <= 0)
return;
current->mm->rss--;
addr = PAGE_OFFSET + (unsigned long) (long) p->base;
end = addr + (unsigned long) (long) p->size;
while (addr < end) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map + MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
addr += PAGE_SIZE;
freed += PAGE_SIZE;
unsigned long setup_zero_pages(void)
{
- unsigned long order, size, pg;
+ unsigned long order, size;
+ struct page *page;
switch (mips_cputype) {
case CPU_R4000SC:
if (!empty_zero_page)
panic("Oh boy, that early out of memory?");
- pg = MAP_NR(empty_zero_page);
- while (pg < MAP_NR(empty_zero_page) + (1 << order)) {
- set_bit(PG_reserved, &mem_map[pg].flags);
- set_page_count(mem_map + pg, 0);
- pg++;
+ page = virt_to_page(empty_zero_page);
+ while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) {
+ set_bit(PG_reserved, &page->flags);
+ set_page_count(page, 0);
+ page++;
}
size = PAGE_SIZE << order;
void free_initrd_mem(unsigned long start, unsigned long end)
{
for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(start));
- set_page_count(mem_map+MAP_NR(start), 1);
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
addr = (unsigned long)(&__init_begin);
while (addr < (unsigned long)&__init_end) {
page = PAGE_OFFSET | CPHYSADDR(addr);
- ClearPageReserved(mem_map + MAP_NR(page));
- set_page_count(mem_map + MAP_NR(page), 1);
+ ClearPageReserved(virt_to_page(page));
+ set_page_count(virt_to_page(page), 1);
free_page(page);
totalram_pages++;
addr += PAGE_SIZE;
static inline void free_pte(pte_t page)
{
if (pte_present(page)) {
- unsigned long nr = pte_pagenr(page);
- if (nr >= max_mapnr || PageReserved(mem_map+nr))
+ struct page *ptpage = pte_page(page);
+ if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
return;
- __free_page(pte_page(page));
+ __free_page(ptpage);
if (current->mm->rss <= 0)
return;
current->mm->rss--;
#define FREESEC(START,END,CNT) do { \
a = (unsigned long)(&START); \
for (; a < (unsigned long)(&END); a += PAGE_SIZE) { \
- clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags); \
- set_page_count(mem_map+MAP_NR(a), 1); \
+ clear_bit(PG_reserved, &virt_to_page(a)->flags); \
+ set_page_count(virt_to_page(a), 1); \
free_page(a); \
CNT++; \
} \
void free_initrd_mem(unsigned long start, unsigned long end)
{
for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(start));
- set_page_count(mem_map+MAP_NR(start), 1);
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
make sure the ramdisk pages aren't reserved. */
if (initrd_start) {
for (addr = initrd_start; addr < initrd_end; addr += PAGE_SIZE)
- clear_bit(PG_reserved, &mem_map[MAP_NR(addr)].flags);
+ clear_bit(PG_reserved, &virt_to_page(addr)->flags);
}
#endif /* CONFIG_BLK_DEV_INITRD */
if ( rtas_data )
for (addr = rtas_data; addr < PAGE_ALIGN(rtas_data+rtas_size) ;
addr += PAGE_SIZE)
- SetPageReserved(mem_map + MAP_NR(addr));
+ SetPageReserved(virt_to_page(addr));
#endif /* defined(CONFIG_ALL_PPC) */
if ( sysmap_size )
for (addr = (unsigned long)sysmap;
addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ;
addr += PAGE_SIZE)
- SetPageReserved(mem_map + MAP_NR(addr));
+ SetPageReserved(virt_to_page(addr));
for (addr = PAGE_OFFSET; addr < (unsigned long)end_of_DRAM;
addr += PAGE_SIZE) {
- if (!PageReserved(mem_map + MAP_NR(addr)))
+ if (!PageReserved(virt_to_page(addr)))
continue;
if (addr < (ulong) etext)
codepages++;
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map+MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
}
if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(start));
- set_page_count(mem_map+MAP_NR(start), 1);
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(addr));
- set_page_count(mem_map+MAP_NR(addr), 1);
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
}
{
unsigned long p;
for (p = start; p < end; p += PAGE_SIZE) {
- ClearPageReserved(mem_map + MAP_NR(p));
- set_page_count(mem_map+MAP_NR(p), 1);
+ ClearPageReserved(virt_to_page(p));
+ set_page_count(virt_to_page(p), 1);
free_page(p);
totalram_pages++;
}
}
/* Free unneeded trap tables */
- ClearPageReserved(mem_map + MAP_NR(trapbase_cpu1));
- set_page_count(mem_map + MAP_NR(trapbase_cpu1), 1);
+ ClearPageReserved(virt_to_page(trapbase_cpu1));
+ set_page_count(virt_to_page(trapbase_cpu1), 1);
free_page((unsigned long)trapbase_cpu1);
totalram_pages++;
num_physpages++;
- ClearPageReserved(mem_map + MAP_NR(trapbase_cpu2));
- set_page_count(mem_map + MAP_NR(trapbase_cpu2), 1);
+ ClearPageReserved(virt_to_page(trapbase_cpu2));
+ set_page_count(virt_to_page(trapbase_cpu2), 1);
free_page((unsigned long)trapbase_cpu2);
totalram_pages++;
num_physpages++;
- ClearPageReserved(mem_map + MAP_NR(trapbase_cpu3));
- set_page_count(mem_map + MAP_NR(trapbase_cpu3), 1);
+ ClearPageReserved(virt_to_page(trapbase_cpu3));
+ set_page_count(virt_to_page(trapbase_cpu3), 1);
free_page((unsigned long)trapbase_cpu3);
totalram_pages++;
num_physpages++;
/* Free unneeded trap tables */
if (!(cpu_present_map & (1 << 1))) {
- ClearPageReserved(mem_map + MAP_NR(trapbase_cpu1));
- set_page_count(mem_map + MAP_NR(trapbase_cpu1), 1);
+ ClearPageReserved(virt_to_page(trapbase_cpu1));
+ set_page_count(virt_to_page(trapbase_cpu1), 1);
free_page((unsigned long)trapbase_cpu1);
totalram_pages++;
num_physpages++;
}
if (!(cpu_present_map & (1 << 2))) {
- ClearPageReserved(mem_map + MAP_NR(trapbase_cpu2));
- set_page_count(mem_map + MAP_NR(trapbase_cpu2), 1);
+ ClearPageReserved(virt_to_page(trapbase_cpu2));
+ set_page_count(virt_to_page(trapbase_cpu2), 1);
free_page((unsigned long)trapbase_cpu2);
totalram_pages++;
num_physpages++;
}
if (!(cpu_present_map & (1 << 3))) {
- ClearPageReserved(mem_map + MAP_NR(trapbase_cpu3));
- set_page_count(mem_map + MAP_NR(trapbase_cpu3), 1);
+ ClearPageReserved(virt_to_page(trapbase_cpu3));
+ set_page_count(virt_to_page(trapbase_cpu3), 1);
free_page((unsigned long)trapbase_cpu3);
totalram_pages++;
num_physpages++;
if (pte_none(page))
return;
if (pte_present(page)) {
- unsigned long nr = pte_pagenr(page);
- if (nr >= max_mapnr || PageReserved(mem_map+nr))
+ struct page *ptpage = pte_page(page);
+ if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
return;
/*
* free_page() used to be able to clear swap cache
* entries. We may now have to do it manually.
*/
- free_page_and_swap_cache(mem_map+nr);
+ free_page_and_swap_cache(ptpage);
return;
}
swap_free(pte_to_swp_entry(page));
prom_printf("[%p,%p] ", first, last);
#endif
while (first < last) {
- ClearPageReserved(mem_map + MAP_NR(first));
- set_page_count(mem_map + MAP_NR(first), 1);
+ ClearPageReserved(virt_to_page(first));
+ set_page_count(virt_to_page(first), 1);
free_page((unsigned long)first);
totalram_pages++;
num_physpages++;
struct page *p;
page = addr + phys_base;
- p = mem_map + MAP_NR(page);
+ p = virt_to_page(page);
ClearPageReserved(p);
set_page_count(p, 1);
if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
- struct page *p = mem_map + MAP_NR(start);
+ struct page *p = virt_to_page(start);
ClearPageReserved(p);
set_page_count(p, 1);
pmdp = pmd_offset(pgdp, addr);
ptep = pte_offset(pmdp, addr);
- set_pte(ptep, pte_val(mk_pte(mem_map + MAP_NR(page), dvma_prot)));
+ set_pte(ptep, pte_val(mk_pte(virt_to_page(page), dvma_prot)));
i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
pmdp = pmd_offset(pgdp, addr);
ptep = pte_offset(pmdp, addr);
- set_pte(ptep, mk_pte(mem_map + MAP_NR(page), dvma_prot));
+ set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
if (ipte_cache != 0) {
iopte_val(*iopte++) = MKIOPTE(__pa(page));
} else {
static void srmmu_get_task_struct(struct task_struct *tsk)
{
- atomic_inc(&mem_map[MAP_NR(tsk)].count);
+ atomic_inc(&virt_to_page(tsk)->count);
}
/* tsunami.S */
BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1);
BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pte_pagenr, srmmu_pte_pagenr, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(sparc_pte_pagenr, srmmu_pte_pagenr, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM);
unsigned long page;
page = ((unsigned long)bufptr) & PAGE_MASK;
- if (MAP_NR(page) > max_mapnr) {
+ if (!VALID_PAGE(virt_to_page(page))) {
sun4c_flush_page(page);
return (__u32)bufptr; /* already locked */
}
static int sun4c_pmd_bad(pmd_t pmd)
{
return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||
- (MAP_NR(pmd_val(pmd)) > max_mapnr));
+ (!VALID_PAGE(virt_to_page(pmd_val(pmd)))));
}
static int sun4c_pmd_present(pmd_t pmd)
BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0);
- BTFIXUPSET_CALL(pte_pagenr, sun4c_pte_pagenr, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(sparc_pte_pagenr, sun4c_pte_pagenr, BTFIXUPCALL_NORM);
#if PAGE_SHIFT <= 12
BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1));
#else
if(!pte_present(pte))
goto out;
- pa = phys_base + (pte_pagenr(pte) << PAGE_SHIFT);
+ pa = phys_base + (sparc64_pte_pagenr(pte) << PAGE_SHIFT);
pa += (tpc & ~PAGE_MASK);
/* Use phys bypass so we don't pollute dtlb/dcache. */
if (pte_none(page))
return;
if (pte_present(page)) {
- unsigned long nr = pte_pagenr(page);
- if (nr >= max_mapnr || PageReserved(mem_map+nr))
+ struct page *ptpage = pte_page(page);
+ if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
return;
/*
* free_page() used to be able to clear swap cache
* entries. We may now have to do it manually.
*/
- free_page_and_swap_cache(mem_map+nr);
+ free_page_and_swap_cache(ptpage);
return;
}
swap_free(pte_to_swp_entry(page));
page = (addr +
((unsigned long) __va(phys_base)) -
((unsigned long) &empty_zero_page));
- p = mem_map + MAP_NR(page);
+ p = virt_to_page(page);
ClearPageReserved(p);
set_page_count(p, 1);
if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
- struct page *p = mem_map + MAP_NR(start);
+ struct page *p = virt_to_page(start);
ClearPageReserved(p);
set_page_count(p, 1);
if (!addr)
goto out;
iobuf->pagelist[i] = addr;
- page = mem_map + MAP_NR(addr);
+ page = virt_to_page(addr);
}
#endif
if (pt == NULL) {
return 0;
}
- atomic_inc(&mem_map[MAP_NR(pt)].count);
- set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
+ atomic_inc(&virt_to_page(pt)->count);
+ set_bit(PG_locked, &virt_to_page(pt)->flags);
atomic_inc(&agp_bridge.current_memory_agp);
return (unsigned long) pt;
}
if (pt == NULL) {
return;
}
- atomic_dec(&mem_map[MAP_NR(pt)].count);
- clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
- wake_up(&mem_map[MAP_NR(pt)].wait);
+ atomic_dec(&virt_to_page(pt)->count);
+ clear_bit(PG_locked, &virt_to_page(pt)->flags);
+ wake_up(&virt_to_page(pt)->wait);
free_page((unsigned long) pt);
atomic_dec(&agp_bridge.current_memory_agp);
}
int num_entries;
int i;
void *temp;
+ struct page *page;
/* The generic routines can't handle 2 level gatt's */
if (agp_bridge.size_type == LVL2_APER_SIZE) {
}
table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
- for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
- set_bit(PG_reserved, &mem_map[i].flags);
- }
+ for (page = virt_to_page(table); page < get_mem_map(table_end); page++)
+ set_bit(PG_reserved, &page->flags);
agp_bridge.gatt_table_real = (unsigned long *) table;
CACHE_FLUSH();
CACHE_FLUSH();
if (agp_bridge.gatt_table == NULL) {
- for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
- clear_bit(PG_reserved, &mem_map[i].flags);
- }
+ for (page = virt_to_page(table); page < get_mem_map(table_end); page++)
+ clear_bit(PG_reserved, &page->flags);
free_pages((unsigned long) table, page_order);
static int agp_generic_free_gatt_table(void)
{
- int i;
int page_order;
char *table, *table_end;
void *temp;
+ struct page *page;
temp = agp_bridge.current_size;
table = (char *) agp_bridge.gatt_table_real;
table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
- for (i = MAP_NR(table); i < MAP_NR(table_end); i++) {
- clear_bit(PG_reserved, &mem_map[i].flags);
- }
+ for (page = virt_to_page(table); page < get_mem_map(table_end); page++)
+ clear_bit(PG_reserved, &page->flags);
free_pages((unsigned long) agp_bridge.gatt_table_real, page_order);
return 0;
if (page_map->real == NULL) {
return -ENOMEM;
}
- set_bit(PG_reserved, &mem_map[MAP_NR(page_map->real)].flags);
+ set_bit(PG_reserved, &virt_to_page(page_map->real)->flags);
CACHE_FLUSH();
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL) {
clear_bit(PG_reserved,
- &mem_map[MAP_NR(page_map->real)].flags);
+ &virt_to_page(page_map->real)->flags);
free_page((unsigned long) page_map->real);
page_map->real = NULL;
return -ENOMEM;
{
iounmap(page_map->remapped);
clear_bit(PG_reserved,
- &mem_map[MAP_NR(page_map->real)].flags);
+ &virt_to_page(page_map->real)->flags);
free_page((unsigned long) page_map->real);
}
while (size > 0)
{
page = kvirt_to_pa(adr);
- mem_map_reserve(MAP_NR(__va(page)));
+ mem_map_reserve(virt_to_page(__va(page)));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
while (size > 0)
{
page = kvirt_to_pa(adr);
- mem_map_unreserve(MAP_NR(__va(page)));
+ mem_map_unreserve(virt_to_page(__va(page)));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
zr->v4l_gbuf[i].fbuffer_phys = virt_to_phys(mem);
zr->v4l_gbuf[i].fbuffer_bus = virt_to_bus(mem);
for (off = 0; off < v4l_bufsize; off += PAGE_SIZE)
- mem_map_reserve(MAP_NR(mem + off));
+ mem_map_reserve(virt_to_page(mem + off));
DEBUG(printk(BUZ_INFO ": V4L frame %d mem 0x%x (bus: 0x%x=%d)\n", i, mem, virt_to_bus(mem), virt_to_bus(mem)));
} else {
return -ENOBUFS;
mem = zr->v4l_gbuf[i].fbuffer;
for (off = 0; off < v4l_bufsize; off += PAGE_SIZE)
- mem_map_unreserve(MAP_NR(mem + off));
+ mem_map_unreserve(virt_to_page(mem + off));
kfree((void *) zr->v4l_gbuf[i].fbuffer);
zr->v4l_gbuf[i].fbuffer = NULL;
}
zr->jpg_gbuf[i].frag_tab[0] = virt_to_bus((void *) mem);
zr->jpg_gbuf[i].frag_tab[1] = ((zr->jpg_bufsize / 4) << 1) | 1;
for (off = 0; off < zr->jpg_bufsize; off += PAGE_SIZE)
- mem_map_reserve(MAP_NR(mem + off));
+ mem_map_reserve(virt_to_page(mem + off));
} else {
/* jpg_bufsize is alreay page aligned */
for (j = 0; j < zr->jpg_bufsize / PAGE_SIZE; j++) {
}
zr->jpg_gbuf[i].frag_tab[2 * j] = virt_to_bus((void *) mem);
zr->jpg_gbuf[i].frag_tab[2 * j + 1] = (PAGE_SIZE / 4) << 1;
- mem_map_reserve(MAP_NR(mem));
+ mem_map_reserve(virt_to_page(mem));
}
zr->jpg_gbuf[i].frag_tab[2 * j - 1] |= 1;
if (zr->jpg_gbuf[i].frag_tab[0]) {
mem = (unsigned char *) bus_to_virt(zr->jpg_gbuf[i].frag_tab[0]);
for (off = 0; off < zr->jpg_bufsize; off += PAGE_SIZE)
- mem_map_unreserve(MAP_NR(mem + off));
+ mem_map_unreserve(virt_to_page(mem + off));
kfree((void *) mem);
zr->jpg_gbuf[i].frag_tab[0] = 0;
zr->jpg_gbuf[i].frag_tab[1] = 0;
for (j = 0; j < zr->jpg_bufsize / PAGE_SIZE; j++) {
if (!zr->jpg_gbuf[i].frag_tab[2 * j])
break;
- mem_map_unreserve(MAP_NR(bus_to_virt(zr->jpg_gbuf[i].frag_tab[2 * j])));
+ mem_map_unreserve(virt_to_page(bus_to_virt(zr->jpg_gbuf[i].frag_tab[2 * j])));
free_page((unsigned long) bus_to_virt(zr->jpg_gbuf[i].frag_tab[2 * j]));
zr->jpg_gbuf[i].frag_tab[2 * j] = 0;
zr->jpg_gbuf[i].frag_tab[2 * j + 1] = 0;
adr = (unsigned long) mem;
while (size > 0) {
page = kvirt_to_pa(adr);
- mem_map_reserve(MAP_NR(__va(page)));
+ mem_map_reserve(virt_to_page(__va(page)));
adr += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
adr = (unsigned long) mem;
while (size > 0) {
page = kvirt_to_pa(adr);
- mem_map_unreserve(MAP_NR(__va(page)));
+ mem_map_unreserve(virt_to_page(__va(page)));
adr += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
if(address == 0UL)
return 0;
- atomic_inc(&mem_map[MAP_NR((void *) address)].count);
- set_bit(PG_locked, &mem_map[MAP_NR((void *) address)].flags);
+ atomic_inc(&virt_to_page(address)->count);
+ set_bit(PG_locked, &virt_to_page(address)->flags);
return address;
}
if(page == 0UL)
return;
- atomic_dec(&mem_map[MAP_NR((void *) page)].count);
- clear_bit(PG_locked, &mem_map[MAP_NR((void *) page)].flags);
- wake_up(&mem_map[MAP_NR((void *) page)].wait);
+ atomic_dec(&virt_to_page(page)->count);
+ clear_bit(PG_locked, &virt_to_page(page)->flags);
+ wake_up(&virt_to_page(page)->wait);
free_page(page);
return;
}
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- mem_map_reserve(MAP_NR(addr));
+ mem_map_reserve(virt_to_page(addr));
}
return address;
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- mem_map_unreserve(MAP_NR(addr));
+ mem_map_unreserve(virt_to_page(addr));
}
free_pages(address, order);
}
if(address == 0UL) {
return 0;
}
- atomic_inc(&mem_map[MAP_NR((void *) address)].count);
- set_bit(PG_locked, &mem_map[MAP_NR((void *) address)].flags);
+ atomic_inc(&virt_to_page(address)->count);
+ set_bit(PG_locked, &virt_to_page(address)->flags);
return address;
}
if(page == 0UL) {
return;
}
- atomic_dec(&mem_map[MAP_NR((void *) page)].count);
- clear_bit(PG_locked, &mem_map[MAP_NR((void *) page)].flags);
- wake_up(&mem_map[MAP_NR((void *) page)].wait);
+ atomic_dec(&virt_to_page(page)->count);
+ clear_bit(PG_locked, &virt_to_page(page)->flags);
+ wake_up(&virt_to_page(page)->wait);
free_page(page);
return;
}
offset = address - vma->vm_start;
page = offset >> PAGE_SHIFT;
physical = (unsigned long)dev->lock.hw_lock + offset;
- atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */
+ atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
#if LINUX_VERSION_CODE < 0x020317
return physical;
#else
- return mem_map + MAP_NR(physical);
+ return (virt_to_page(physical));
#endif
}
offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
page = offset >> PAGE_SHIFT;
physical = dma->pagelist[page] + (offset & (~PAGE_MASK));
- atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */
+ atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
#if LINUX_VERSION_CODE < 0x020317
return physical;
#else
- return mem_map + MAP_NR(physical);
+ return (virt_to_page(physical));
#endif
}
}
addr = __get_dma_pages(GFP_KERNEL, get_order(size));
if (addr) {
- int i;
+ struct page *page;
- for (i = MAP_NR(addr); i < MAP_NR(addr+size); i++) {
- mem_map_reserve(i);
- }
+ for (page = virt_to_page(addr); page < get_mem_map(addr+size); page++)
+ mem_map_reserve(page);
}
return (void *)addr;
}
static inline void dmafree(void *addr, size_t size)
{
if (size > 0) {
- int i;
+ struct page *page;
- for (i = MAP_NR((unsigned long)addr);
- i < MAP_NR((unsigned long)addr+size); i++) {
- mem_map_unreserve (i);
- }
+ for (page = virt_to_page((unsigned long)addr);
+ page < virt_to_page((unsigned long)addr+size); page++)
+ mem_map_unreserve(page);
free_pages((unsigned long) addr, get_order(size));
}
}
|GFP_DMA, 0);
if (!pb->rawbuf[i])
break;
- set_bit(PG_reserved, &mem_map[MAP_NR(pb->rawbuf[i])].flags);
+ mem_map_reserve(virt_to_page(pb->rawbuf[i]));
}
if (i-- < npage) {
printk(KERN_DEBUG "PlanB: init_grab: grab buffer not allocated\n");
for (; i > 0; i--) {
- clear_bit(PG_reserved,
- &mem_map[MAP_NR(pb->rawbuf[i])].flags);
+ mem_map_unreserve(virt_to_page(pb->rawbuf[i]));
free_pages((unsigned long)pb->rawbuf[i], 0);
}
kfree(pb->rawbuf);
}
if(pb->rawbuf) {
for (i = 0; i < pb->rawbuf_size; i++) {
- clear_bit(PG_reserved,
- &mem_map[MAP_NR(pb->rawbuf[i])].flags);
+ mem_map_unreserve(virt_to_page(pb->rawbuf[i]));
free_pages((unsigned long)pb->rawbuf[i], 0);
}
kfree(pb->rawbuf);
if (mem) {
unsigned long adr = (unsigned long)mem;
while (size > 0) {
- mem_map_reserve(MAP_NR(phys_to_virt(adr)));
+ mem_map_reserve(virt_to_page(phys_to_virt(adr)));
adr += PAGE_SIZE;
size -= PAGE_SIZE;
}
unsigned long adr = (unsigned long)mem;
unsigned long siz = size;
while (siz > 0) {
- mem_map_unreserve(MAP_NR(phys_to_virt(adr)));
+ mem_map_unreserve(virt_to_page(phys_to_virt(adr)));
adr += PAGE_SIZE;
siz -= PAGE_SIZE;
}
while (size > 0)
{
page = kvirt_to_pa(adr);
- mem_map_reserve(MAP_NR(__va(page)));
+ mem_map_reserve(virt_to_page(__va(page)));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
while (size > 0)
{
page = kvirt_to_pa(adr);
- mem_map_unreserve(MAP_NR(__va(page)));
+ mem_map_unreserve(virt_to_page(__va(page)));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
#include <linux/malloc.h>
#include <linux/soundcard.h>
#include <linux/pci.h>
+#include <linux/wrapper.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/init.h>
static void dealloc_dmabuf(struct dmabuf *db)
{
- unsigned long map, mapend;
+ struct page *pstart, *pend;
if (db->rawbuf) {
/* undo marking the pages as reserved */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (pstart = virt_to_page(db->rawbuf); pstart <= pend; pstart++)
+ mem_map_unreserve(pstart);
free_pages((unsigned long)db->rawbuf, db->buforder);
}
db->rawbuf = NULL;
int order;
unsigned bytepersec;
unsigned bufs;
- unsigned long map, mapend;
+ struct page *pstart, *pend;
unsigned char fmt;
unsigned long flags;
printk(KERN_DEBUG "cmpci: DMA buffer beyond 16MB: busaddr 0x%lx size %ld\n",
virt_to_bus(db->rawbuf), PAGE_SIZE << db->buforder);
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (pstart = virt_to_page(db->rawbuf); pstart <= pend; pstart++)
+ mem_map_reserve(pstart);
}
bytepersec = rate << sample_shift[fmt];
bufs = PAGE_SIZE << db->buforder;
static int sound_alloc_dmap(struct dma_buffparms *dmap)
{
char *start_addr, *end_addr;
- int i, dma_pagesize;
+ int dma_pagesize;
int sz, size;
+ struct page *page;
dmap->mapping_flags &= ~DMA_MAP_MAPPED;
dmap->raw_buf = start_addr;
dmap->raw_buf_phys = virt_to_bus(start_addr);
- for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++)
- set_bit(PG_reserved, &mem_map[i].flags);;
+ for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++)
+ mem_map_reserve(page);
return 0;
}
static void sound_free_dmap(struct dma_buffparms *dmap)
{
- int sz, size, i;
+ int sz, size;
+ struct page *page;
unsigned long start_addr, end_addr;
if (dmap->raw_buf == NULL)
start_addr = (unsigned long) dmap->raw_buf;
end_addr = start_addr + dmap->buffsize;
- for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++)
- clear_bit(PG_reserved, &mem_map[i].flags);;
+ for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++)
+ mem_map_unreserve(page);
free_pages((unsigned long) dmap->raw_buf, sz);
dmap->raw_buf = NULL;
#include "audio.h"
#include <linux/sched.h>
#include <linux/smp_lock.h>
+#include <linux/wrapper.h>
static void calculate_ofrag(struct woinst *);
static void calculate_ifrag(struct wiinst *);
/* Now mark the pages as reserved, otherwise remap_page_range doesn't do what we want */
for (i = 0; i < wave_out->wavexferbuf->numpages; i++)
- set_bit(PG_reserved, &mem_map[MAP_NR(wave_out->pagetable[i])].flags);
+ mem_map_reserve(virt_to_page(wave_out->pagetable[i]));
}
size = vma->vm_end - vma->vm_start;
/* Undo marking the pages as reserved */
for (i = 0; i < woinst->wave_out->wavexferbuf->numpages; i++)
- set_bit(PG_reserved, &mem_map[MAP_NR(woinst->wave_out->pagetable[i])].flags);
+ mem_map_reserve(virt_to_page(woinst->wave_out->pagetable[i]));
}
woinst->mapped = 0;
#include <linux/soundcard.h>
#include <linux/pci.h>
#include <linux/smp_lock.h>
+#include <linux/wrapper.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/init.h>
extern inline void dealloc_dmabuf(struct es1370_state *s, struct dmabuf *db)
{
- unsigned long map, mapend;
+ struct page *page, *pend;
if (db->rawbuf) {
/* undo marking the pages as reserved */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_unreserve(page);
pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
}
db->rawbuf = NULL;
int order;
unsigned bytepersec;
unsigned bufs;
- unsigned long map, mapend;
+ struct page *page, *pend;
db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
if (!db->rawbuf) {
return -ENOMEM;
db->buforder = order;
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_reserve(page);
}
fmt &= ES1370_FMT_MASK;
bytepersec = rate << sample_shift[fmt];
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/ac97_codec.h>
+#include <linux/wrapper.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/uaccess.h>
extern inline void dealloc_dmabuf(struct es1371_state *s, struct dmabuf *db)
{
- unsigned long map, mapend;
+ struct page *page, *pend;
if (db->rawbuf) {
/* undo marking the pages as reserved */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_unreserve(page);
pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
}
db->rawbuf = NULL;
int order;
unsigned bytepersec;
unsigned bufs;
- unsigned long map, mapend;
+ struct page *page, *pend;
db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
if (!db->rawbuf) {
return -ENOMEM;
db->buforder = order;
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_reserve(page);
}
fmt &= ES1371_FMT_MASK;
bytepersec = rate << sample_shift[fmt];
#include <linux/poll.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
+#include <linux/wrapper.h>
#include <asm/uaccess.h>
#include <asm/hardirq.h>
extern inline void dealloc_dmabuf(struct solo1_state *s, struct dmabuf *db)
{
- unsigned long map, mapend;
+ struct page *page, *pend;
if (db->rawbuf) {
/* undo marking the pages as reserved */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_unreserve(page);
pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
}
db->rawbuf = NULL;
int order;
unsigned bytespersec;
unsigned bufs, sample_shift = 0;
- unsigned long map, mapend;
+ struct page *page, *pend;
db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
if (!db->rawbuf) {
return -ENOMEM;
db->buforder = order;
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_reserve(page);
}
if (s->fmt & (AFMT_S16_LE | AFMT_U16_LE))
sample_shift++;
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/ac97_codec.h>
+#include <linux/wrapper.h>
#include <asm/uaccess.h>
#include <asm/hardirq.h>
struct dmabuf *dmabuf = &state->dmabuf;
void *rawbuf;
int order;
- unsigned long map, mapend;
+ struct page *page, *pend;
/* alloc as big a chunk as we can, FIXME: is this necessary ?? */
for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
dmabuf->buforder = order;
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(rawbuf + (PAGE_SIZE << order) - 1);
- for (map = MAP_NR(rawbuf); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
+ for (page = virt_to_page(rawbuf); page <= pend; page++)
+ mem_map_reserve(page);
return 0;
}
static void dealloc_dmabuf(struct i810_state *state)
{
struct dmabuf *dmabuf = &state->dmabuf;
- unsigned long map, mapend;
+ struct page *page, *pend;
if (dmabuf->rawbuf) {
/* undo marking the pages as reserved */
- mapend = MAP_NR(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
- for (map = MAP_NR(dmabuf->rawbuf); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
+ for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++)
+ mem_map_unreserve(page);
pci_free_consistent(state->card->pci_dev, PAGE_SIZE << dmabuf->buforder,
dmabuf->rawbuf, dmabuf->dma_handle);
}
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
+#include <linux/wrapper.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
{
void *rawbuf=NULL;
int order,i;
- unsigned long mapend,map;
+ struct page *page, *pend;
/* alloc as big a chunk as we can */
for (order = (dsps_order + (16-PAGE_SHIFT) + 1); order >= (dsps_order + 2 + 1); order--)
}
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(rawbuf + (PAGE_SIZE << order) - 1);
- for (map = MAP_NR(rawbuf); map <= mapend; map++) {
- set_bit(PG_reserved, &mem_map[map].flags);
- }
+ pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
+ for (page = virt_to_page(rawbuf); page <= pend; page++)
+ mem_map_reserve(page);
return 0;
}
static void
free_buffers(struct ess_state *s)
{
- unsigned long map, mapend;
+ struct page *page, *pend;
s->dma_dac.rawbuf = s->dma_adc.rawbuf = NULL;
s->dma_dac.mapped = s->dma_adc.mapped = 0;
M_printk("maestro: freeing %p\n",s->card->dmapages);
/* undo marking the pages as reserved */
- mapend = MAP_NR(s->card->dmapages + (PAGE_SIZE << s->card->dmaorder) - 1);
- for (map = MAP_NR(s->card->dmapages); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(s->card->dmapages + (PAGE_SIZE << s->card->dmaorder) - 1);
+ for (page = virt_to_page(s->card->dmapages); page <= pend; page++)
+ mem_map_unreserve(page);
free_pages((unsigned long)s->card->dmapages,s->card->dmaorder);
s->card->dmapages = NULL;
*
********************************************************************/
+#include <linux/kernel.h>
#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
+#include <linux/wrapper.h>
#include <asm/uaccess.h>
#include <asm/hardirq.h>
static void dealloc_dmabuf(struct sv_state *s, struct dmabuf *db)
{
- unsigned long map, mapend;
+ struct page *page, *pend;
if (db->rawbuf) {
/* undo marking the pages as reserved */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_unreserve(page);
pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
}
db->rawbuf = NULL;
int order;
unsigned bytepersec;
unsigned bufs;
- unsigned long map, mapend;
+ struct page *page, *pend;
unsigned char fmt;
unsigned long flags;
printk(KERN_DEBUG "sv: DMA buffer beyond 16MB: busaddr 0x%lx size %ld\n",
virt_to_bus(db->rawbuf), PAGE_SIZE << db->buforder);
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
- for (map = MAP_NR(db->rawbuf); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
+ for (page = virt_to_page(db->rawbuf); page <= pend; page++)
+ mem_map_reserve(page);
}
bytepersec = rate << sample_shift[fmt];
bufs = PAGE_SIZE << db->buforder;
static int sscape_alloc_dma(sscape_info *devc)
{
char *start_addr, *end_addr;
- int i, dma_pagesize;
+ int dma_pagesize;
int sz, size;
+ struct page *page;
if (devc->raw_buf != NULL) return 0; /* Already done */
dma_pagesize = (devc->dma < 4) ? (64 * 1024) : (128 * 1024);
devc->raw_buf = start_addr;
devc->raw_buf_phys = virt_to_bus(start_addr);
- for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++)
- set_bit(PG_reserved, &mem_map[i].flags);;
+ for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++)
+ mem_map_reserve(page);
return 1;
}
static void sscape_free_dma(sscape_info *devc)
{
- int sz, size, i;
+ int sz, size;
unsigned long start_addr, end_addr;
+ struct page *page;
if (devc->raw_buf == NULL) return;
for (sz = 0, size = PAGE_SIZE; size < devc->buffsize; sz++, size <<= 1);
start_addr = (unsigned long) devc->raw_buf;
end_addr = start_addr + devc->buffsize;
- for (i = MAP_NR(start_addr); i <= MAP_NR(end_addr); i++)
- clear_bit(PG_reserved, &mem_map[i].flags);;
+ for (page = virt_to_page(start_addr); page <= get_mem_map(end_addr); page++)
+ mem_map_unreserve(page);
free_pages((unsigned long) devc->raw_buf, sz);
devc->raw_buf = NULL;
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/ac97_codec.h>
+#include <linux/wrapper.h>
#include <asm/uaccess.h>
#include <asm/hardirq.h>
#include <linux/bitops.h>
struct dmabuf *dmabuf = &state->dmabuf;
void *rawbuf;
int order;
- unsigned long map, mapend;
+ struct page *page, *pend;
/* alloc as big a chunk as we can, FIXME: is this necessary ?? */
for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
dmabuf->buforder = order;
/* now mark the pages as reserved; otherwise remap_page_range doesn't do what we want */
- mapend = MAP_NR(rawbuf + (PAGE_SIZE << order) - 1);
- for (map = MAP_NR(rawbuf); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
+ for (page = virt_to_page(rawbuf); page <= pend; page++)
+ mem_map_reserve(page);
return 0;
}
static void dealloc_dmabuf(struct trident_state *state)
{
struct dmabuf *dmabuf = &state->dmabuf;
- unsigned long map, mapend;
+ struct page *page, *pend;
if (dmabuf->rawbuf) {
/* undo marking the pages as reserved */
- mapend = MAP_NR(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
- for (map = MAP_NR(dmabuf->rawbuf); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
+ for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++)
+ mem_map_unreserve(page);
pci_free_consistent(state->card->pci_dev, PAGE_SIZE << dmabuf->buforder,
dmabuf->rawbuf, dmabuf->dma_handle);
}
for(nr = 0; nr < NRSGBUF; nr++) {
if (!(p = db->sgbuf[nr]))
continue;
- mem_map_unreserve(MAP_NR(p));
+ mem_map_unreserve(virt_to_page(p));
free_page((unsigned long)p);
db->sgbuf[nr] = NULL;
}
if (!p)
return -ENOMEM;
db->sgbuf[nr] = p;
- mem_map_reserve(MAP_NR(p));
+ mem_map_reserve(virt_to_page(p));
}
memset(db->sgbuf[nr], AFMT_ISUNSIGNED(db->format) ? 0x80 : 0, PAGE_SIZE);
if ((nr << PAGE_SHIFT) >= db->dmasize)
adr = (unsigned long) mem;
while (size > 0) {
page = kvirt_to_pa(adr);
- mem_map_reserve(MAP_NR(__va(page)));
+ mem_map_reserve(virt_to_page(__va(page)));
adr += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
adr=(unsigned long) mem;
while (size > 0) {
page = kvirt_to_pa(adr);
- mem_map_unreserve(MAP_NR(__va(page)));
+ mem_map_unreserve(virt_to_page(__va(page)));
adr += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
adr = (unsigned long) mem;
while (size > 0) {
page = kvirt_to_pa(adr);
- mem_map_reserve(MAP_NR(__va(page)));
+ mem_map_reserve(virt_to_page(__va(page)));
adr += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
adr=(unsigned long) mem;
while (size > 0) {
page = kvirt_to_pa(adr);
- mem_map_unreserve(MAP_NR(__va(page)));
+ mem_map_unreserve(virt_to_page(__va(page)));
adr += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
#include <linux/malloc.h>
#include <linux/init.h>
#include <linux/fb.h>
+#include <linux/wrapper.h>
#include <asm/hardware.h>
#include <asm/io.h>
* set count to 1, and free
* the page.
*/
- clear_bit(PG_reserved, &mem_map[MAP_NR(virtual_start)].flags);
- atomic_set(&mem_map[MAP_NR(virtual_start)].count, 1);
+ mem_map_unreserve(virt_to_page(virtual_start));
+ atomic_set(&virt_to_page(virtual_start)->count, 1);
free_page(virtual_start);
virtual_start += PAGE_SIZE;
for (page = current_par.screen_base;
page < PAGE_ALIGN(current_par.screen_base + size);
page += PAGE_SIZE)
- mem_map[MAP_NR(page)].flags |= (1 << PG_reserved);
+ mem_map_reserve(virt_to_page(page));
/* Hand back any excess pages that we allocated. */
for (page = current_par.screen_base + size; page < top; page += PAGE_SIZE)
free_page(page);
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/delay.h>
+#include <linux/wrapper.h>
#include <asm/hardware.h>
#include <asm/io.h>
u_int required_pages;
u_int extra_pages;
u_int order;
- u_int i;
char *allocated_region;
+ struct page *page;
if (VideoMemRegion != NULL)
return -EINVAL;
/* Set reserved flag for fb memory to allow it to be remapped into */
/* user space by the common fbmem driver using remap_page_range(). */
- for(i = MAP_NR(VideoMemRegion);
- i < MAP_NR(VideoMemRegion + ALLOCATED_FB_MEM_SIZE); i++)
- set_bit(PG_reserved, &mem_map[i].flags);
+ for(page = virt_to_page(VideoMemRegion);
+ page < virt_to_page(VideoMemRegion + ALLOCATED_FB_MEM_SIZE); page++)
+ mem_map_reserve(page);
/* Remap the fb memory to a non-buffered, non-cached region */
VideoMemRegion = (u_char *)__ioremap((u_long)VideoMemRegion_phys,
end = PMD_SIZE;
do {
pte_t page = *pte;
+ struct page *ptpage;
address += PAGE_SIZE;
pte++;
++*pages;
if (pte_dirty(page))
++*dirty;
- if ((pte_pagenr(page) >= max_mapnr) ||
- PageReserved(pte_pagenr(page) + mem_map))
+ ptpage = pte_page(page);
+ if ((!VALID_PAGE(ptpage)) ||
+ PageReserved(ptpage))
continue;
if (page_count(pte_page(page)) > 1)
++*shared;
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
pgd_val(ret[PTRS_PER_PGD])
- = pte_val(mk_pte(mem_map + MAP_NR(ret), PAGE_KERNEL));
+ = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
}
return ret;
}
#define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page()
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(ZERO_PGE))
+#define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE))
/* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8*sizeof(unsigned long))
extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
-#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> 32)))
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> 32)))
extern inline unsigned long pmd_page(pmd_t pmd)
{ return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
#define pte_clear(ptep) set_pte((ptep), __pte(0))
#ifndef CONFIG_DISCONTIGMEM
-#define pte_pagenr(pte) ((unsigned long)(((pte_val(pte) - PHYS_OFFSET) >> PAGE_SHIFT)))
+#define pte_page(x) (mem_map + (unsigned long)(((pte_val(pte) - PHYS_OFFSET) >> PAGE_SHIFT)))
#else
/*
* I'm not happy with this - we needlessly convert a physical address
* which, if __va and __pa are expensive causes twice the expense for
* zero gain. --rmk
*/
-#define pte_pagenr(pte) MAP_NR(__va(pte_val(pte)))
+#define pte_page(x) (mem_map + MAP_NR(__va(pte_val(pte))))
#endif
#define pmd_none(pmd) (!pmd_val(pmd))
*/
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
-#define pte_page(x) (mem_map + pte_pagenr(x))
/*
* Conversion functions: convert a page and protection to a page entry,
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* !__ASSEMBLY__ */
#define THREAD_SIZE (2*PAGE_SIZE)
#define alloc_task_struct() ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long) (p), 1)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
*/
#define page_address(page) ((void *) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)))
-/*
- * Given a PTE, return the index of the mem_map[] entry corresponding
- * to the page frame the PTE.
- */
-#define pte_pagenr(x) ((unsigned long) ((pte_val(x) & _PFN_MASK) >> PAGE_SHIFT))
-
/*
* Now for some cache flushing routines. This is the kind of stuff
* that can be very expensive, so try to avoid them whenever possible.
#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
#define pte_clear(pte) (pte_val(*(pte)) = 0UL)
/* pte_page() returns the "struct page *" corresponding to the PTE: */
-#define pte_page(pte) (mem_map + pte_pagenr(pte))
+#define pte_page(pte) (mem_map + (unsigned long) ((pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT))
#define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = __pa(ptep))
#define pmd_none(pmd) (!pmd_val(pmd))
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
# endif /* !__ASSEMBLY__ */
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL, IA64_TASK_STRUCT_LOG_NUM_PAGES))
#define free_task_struct(p) free_pages((unsigned long)(p), IA64_TASK_STRUCT_LOG_NUM_PAGES)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
#define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page()
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8*sizeof(unsigned long))
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_FAKE_SUPER))
#define pte_clear(ptep) ({ pte_val(*(ptep)) = 0; })
-#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
/* Permanent address of a page. */
#define page_address(page) ((page)->virtual)
#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
-#define pte_page(pte) (mem_map+pte_pagenr(pte))
+#define pte_page(pte) (mem_map+((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT))
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
({ \
unsigned long eip = 0; \
if ((tsk)->thread.esp0 > PAGE_SIZE && \
- MAP_NR((tsk)->thread.esp0) < max_mapnr) \
+ (VALID_PAGE(virt_to_page((tsk)->thread.esp0)))) \
eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
eip; })
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
#define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) \
- (mem_map + MAP_NR(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
+ (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
/* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8*sizeof(unsigned long))
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-extern inline unsigned long pte_page(pte_t pte)
-{
- return PAGE_OFFSET + (pte_val(pte) & PAGE_MASK);
-}
-
extern inline unsigned long pmd_page(pmd_t pmd)
{
return pmd_val(pmd);
* is simple.
*/
#define page_address(page) ((page)->virtual)
-#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PAGE_SHIFT)))
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
/*
* The following only work if pte_present() is true.
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
#define BAD_PMDTABLE __bad_pmd_table()
#define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) \
- (mem_map + MAP_NR(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
+ (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
/* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8*sizeof(unsigned long))
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-extern inline unsigned long pte_page(pte_t pte)
-{
- return PAGE_OFFSET + (pte_val(pte) & PAGE_MASK);
-}
-
extern inline unsigned long pmd_page(pmd_t pmd)
{
return pmd_val(pmd);
*/
#define page_address(page) ((page)->virtual)
#ifndef CONFIG_DISCONTIGMEM
-#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PAGE_SHIFT)))
+#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#else
-#define pte_pagenr(x) \
+#define mips64_pte_pagenr(x) \
(PLAT_NODE_DATA_STARTNR(PHYSADDR_TO_NID(pte_val(x))) + \
PLAT_NODE_DATA_LOCALNR(pte_val(x), PHYSADDR_TO_NID(pte_val(x))))
+#define pte_page(x) (mem_map+mips64_pte_pagenr(x))
#endif
-#define pte_page(x) (mem_map+pte_pagenr(x))
/*
* The following only work if pte_present() is true.
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL, 2))
#define free_task_struct(p) free_pages((unsigned long)(p), 2)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/*
* BAD_PAGETABLE is used when we need a bogus page-table, while
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#define pte_clear(ptep) do { pte_val(*(ptep)) = 0; } while (0)
-#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) != 0)
*/
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#ifndef __ASSEMBLY__
/*
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
/* in process.c - for early bootup debug -- Cort */
int ll_printk(const char *, ...);
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* !__ASSEMBLY__ */
/* Certain architectures need to do special things when PTEs
extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = _PAGE_INVALID; }
#define PTE_INIT(x) pte_clear(x)
-extern inline int pte_pagenr(pte_t pte) { return ((unsigned long)((pte_val(pte) >> PAGE_SHIFT))); }
extern inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; }
extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) == 0); }
{ pte_val(pte) = (pte_val(pte) & PAGE_MASK) | pgprot_val(newprot); return pte; }
#define page_address(page) ((page)->virtual)
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+(unsigned long)((pte_val(pte) >> PAGE_SHIFT)))
#define pmd_page(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* !__ASSEMBLY__ */
#define pte_none(x) (!pte_val(x))
#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
-#define pte_pagenr(x) ((unsigned long)(((pte_val(x) -__MEMORY_START) >> PAGE_SHIFT)))
#define pmd_none(x) (!pmd_val(x))
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
*/
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+(unsigned long)(((pte_val(x) -__MEMORY_START) >> PAGE_SHIFT)))
/*
* The following only work if pte_present() is true.
#define THREAD_SIZE (2*PAGE_SIZE)
extern struct task_struct * alloc_task_struct(void);
extern void free_task_struct(struct task_struct *);
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
#define SIZEOF_PTR_LOG2 2
-BTFIXUPDEF_CALL_CONST(unsigned long, pte_pagenr, pte_t)
+BTFIXUPDEF_CALL_CONST(unsigned long, sparc_pte_pagenr, pte_t)
BTFIXUPDEF_CALL_CONST(unsigned long, pmd_page, pmd_t)
BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page, pgd_t)
-#define pte_pagenr(pte) BTFIXUP_CALL(pte_pagenr)(pte)
+#define sparc_pte_pagenr(pte) BTFIXUP_CALL(sparc_pte_pagenr)(pte)
#define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
#define pgd_page(pgd) BTFIXUP_CALL(pgd_page)(pgd)
/* Permanent address of a page. */
#define page_address(page) ((page)->virtual)
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+sparc_pte_pagenr(x))
/*
* Conversion functions: convert a page and protection to a page entry,
extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
- struct page *page = mem_map + MAP_NR(pgd);
+ struct page *page = virt_to_page(pgd);
if (!page->pprev_hash) {
(unsigned long *)page->next_hash = pgd_quicklist;
(pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
#define pgd_set(pgdp, pmdp) \
(pgd_val(*(pgdp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
-#define pte_pagenr(pte) (((unsigned long) ((pte_val(pte)&~PAGE_OFFSET)-phys_base)>>PAGE_SHIFT))
+#define sparc64_pte_pagenr(pte) (((unsigned long) ((pte_val(pte)&~PAGE_OFFSET)-phys_base)>>PAGE_SHIFT))
#define pmd_page(pmd) ((unsigned long) __va((pmd_val(pmd)<<11UL)))
#define pgd_page(pgd) ((unsigned long) __va((pgd_val(pgd)<<11UL)))
#define pte_none(pte) (!pte_val(pte))
#define __page_address(page) ((page)->virtual)
#define page_address(page) ({ __page_address(page); })
-#define pte_page(x) (mem_map+pte_pagenr(x))
+#define pte_page(x) (mem_map+sparc64_pte_pagenr(x))
/* Be very careful when you change these three, they are delicate. */
#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R))
/* Allocation and freeing of task_struct and kernel stack. */
#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL, 1))
#define free_task_struct(tsk) free_pages((unsigned long)(tsk),1)
-#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
+#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
#include <asm/highmem.h>
/* declarations for linux/mm/highmem.c */
-extern unsigned long highmem_mapnr;
FASTCALL(unsigned int nr_free_highpages(void));
extern struct page * prepare_highmem_swapout(struct page *);
#define __get_dma_pages(gfp_mask, order) \
__get_free_pages((gfp_mask) | GFP_DMA,(order))
+#define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr))
+#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
+
/*
* The old interface name will be removed in 2.5:
*/
/*
* From a kernel address, get the "struct page *"
*/
-#define page_cache_entry(x) (mem_map + MAP_NR(x))
+#define page_cache_entry(x) virt_to_page(x)
extern unsigned int page_hash_bits;
#define PAGE_HASH_BITS (page_hash_bits)
#define vma_get_end(v) v->vm_end
#define vma_get_page_prot(v) v->vm_page_prot
-#define mem_map_reserve(p) set_bit(PG_reserved, &mem_map[p].flags)
-#define mem_map_unreserve(p) clear_bit(PG_reserved, &mem_map[p].flags)
-#define mem_map_inc_count(p) atomic_inc(&(mem_map[p].count))
-#define mem_map_dec_count(p) atomic_dec(&(mem_map[p].count))
+#define mem_map_reserve(p) set_bit(PG_reserved, &p->flags)
+#define mem_map_unreserve(p) clear_bit(PG_reserved, &p->flags)
+#define mem_map_inc_count(p) atomic_inc(&(p->count))
+#define mem_map_dec_count(p) atomic_dec(&(p->count))
#endif
pgd_t * pgdir;
pmd_t * pgmiddle;
pte_t * pgtable;
- unsigned long mapnr;
unsigned long maddr;
struct page *page;
pgtable = pte_offset(pgmiddle, addr);
if (!pte_present(*pgtable))
goto fault_in_page;
- mapnr = pte_pagenr(*pgtable);
if (write && (!pte_write(*pgtable) || !pte_dirty(*pgtable)))
goto fault_in_page;
- page = mem_map + mapnr;
- if ((mapnr >= max_mapnr) || PageReserved(page))
+ page = pte_page(*pgtable);
+ if ((!VALID_PAGE(page)) || PageReserved(page))
return 0;
flush_cache_page(vma, addr);
* Now free the allocator bitmap itself, it's not
* needed anymore:
*/
- page = mem_map + MAP_NR(bdata->node_bootmem_map);
+ page = virt_to_page(bdata->node_bootmem_map);
count = 0;
for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
count++;
#include <linux/swap.h>
#include <linux/slab.h>
-unsigned long highmem_mapnr;
-
/*
* Take one locked page, return another low-memory locked page.
*/
* we stored its data into the new regular_page.
*/
page_cache_release(page);
- new_page = mem_map + MAP_NR(regular_page);
+ new_page = virt_to_page(regular_page);
LockPage(new_page);
return new_page;
}
do {
pte_t pte = *src_pte;
- unsigned long page_nr;
+ struct page *ptepage;
/* copy_one_pte */
set_pte(dst_pte, pte);
goto cont_copy_pte_range;
}
- page_nr = pte_pagenr(pte);
- if (page_nr >= max_mapnr ||
- PageReserved(mem_map+page_nr)) {
+ ptepage = pte_page(pte);
+ if ((!VALID_PAGE(ptepage)) ||
+ PageReserved(ptepage)) {
set_pte(dst_pte, pte);
goto cont_copy_pte_range;
}
if (vma->vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
set_pte(dst_pte, pte_mkold(pte));
- get_page(mem_map + page_nr);
+ get_page(ptepage);
cont_copy_pte_range: address += PAGE_SIZE;
if (address >= end)
static inline int free_pte(pte_t page)
{
if (pte_present(page)) {
- unsigned long nr = pte_pagenr(page);
- if (nr >= max_mapnr || PageReserved(mem_map+nr))
+ struct page *ptpage = pte_page(page);
+ if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
return 0;
/*
* free_page() used to be able to clear swap cache
* entries. We may now have to do it manually.
*/
- free_page_and_swap_cache(mem_map+nr);
+ free_page_and_swap_cache(ptpage);
return 1;
}
swap_free(pte_to_swp_entry(page));
static inline struct page * get_page_map(struct page *page)
{
- if (page > (mem_map + max_mapnr))
+ if (!VALID_PAGE(page))
return 0;
return page;
}
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
- unsigned long mapnr;
+ struct page *page;
pte_t oldpage = *pte;
pte_clear(pte);
- mapnr = MAP_NR(__va(phys_addr));
- if (mapnr >= max_mapnr || PageReserved(mem_map+mapnr))
+ page = virt_to_page(__va(phys_addr));
+ if ((!VALID_PAGE(page)) || PageReserved(page))
set_pte(pte, mk_pte_phys(phys_addr, prot));
forget_pte(oldpage);
address += PAGE_SIZE;
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
unsigned long address, pte_t *page_table, pte_t pte)
{
- unsigned long map_nr;
struct page *old_page, *new_page;
- map_nr = pte_pagenr(pte);
- if (map_nr >= max_mapnr)
+ old_page = pte_page(pte);
+ if (!VALID_PAGE(old_page))
goto bad_wp_page;
- old_page = mem_map + map_nr;
/*
* We can avoid the copy if:
bad_wp_page:
spin_unlock(&mm->page_table_lock);
- printk("do_wp_page: bogus page at address %08lx (nr %ld)\n",address,map_nr);
+ printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page);
return -1;
}
return;
flush_cache_page(vma, address);
page = pte_page(pte);
- if ((page-mem_map >= max_mapnr) || PageReserved(page))
+ if ((!VALID_PAGE(page)) || PageReserved(page))
return;
offset = address & ~PAGE_MASK;
memclear_highpage_flush(page, offset, PAGE_SIZE - offset);
BUG();
if (page->mapping)
BUG();
- if (page-mem_map >= max_mapnr)
+ if (!VALID_PAGE(page))
BUG();
if (PageSwapCache(page))
BUG();
void free_pages(unsigned long addr, unsigned long order)
{
- unsigned long map_nr;
+ struct page *fpage;
#ifdef CONFIG_DISCONTIGMEM
if (addr == 0) return;
#endif
- map_nr = MAP_NR(addr);
- if (map_nr < max_mapnr)
- __free_pages(mem_map + map_nr, order);
+ fpage = virt_to_page(addr);
+ if (VALID_PAGE(fpage))
+ __free_pages(fpage, order);
}
/*
*/
void rw_swap_page_nolock(int rw, swp_entry_t entry, char *buf, int wait)
{
- struct page *page = mem_map + MAP_NR(buf);
+ struct page *page = virt_to_page(buf);
if (!PageLocked(page))
PAGE_BUG(page);
static inline void kmem_freepages (kmem_cache_t *cachep, void *addr)
{
unsigned long i = (1<<cachep->gfporder);
- struct page *page = mem_map + MAP_NR(addr);
+ struct page *page = virt_to_page(addr);
/* free_pages() does not clear the type bit - we do that.
* The pages have been unlinked from their cache-slab,
/* Nasty!!!!!! I hope this is OK. */
i = 1 << cachep->gfporder;
- page = mem_map + MAP_NR(objp);
+ page = virt_to_page(objp);
do {
SET_PAGE_CACHE(page, cachep);
SET_PAGE_SLAB(page, slabp);
*/
#if DEBUG
-# define CHECK_NR(nr) \
+# define CHECK_NR(pg) \
do { \
- if (nr >= max_mapnr) { \
+ if (!VALID_PAGE(pg)) { \
printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \
(unsigned long)objp); \
BUG(); \
} while (0)
# define CHECK_PAGE(page) \
do { \
+ CHECK_NR(page); \
if (!PageSlab(page)) { \
printk(KERN_ERR "kfree: bad ptr %lxh.\n", \
(unsigned long)objp); \
} while (0)
#else
-# define CHECK_NR(nr) do { } while (0)
-# define CHECK_PAGE(nr) do { } while (0)
+# define CHECK_PAGE(pg) do { } while (0)
#endif
static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
{
slab_t* slabp;
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
+ CHECK_PAGE(virt_to_page(objp));
/* reduces memory footprint
*
if (OPTIMIZE(cachep))
slabp = (void*)((unsigned long)objp&(~(PAGE_SIZE-1)));
else
*/
- slabp = GET_PAGE_SLAB(mem_map + MAP_NR(objp));
+ slabp = GET_PAGE_SLAB(virt_to_page(objp));
#if DEBUG
if (cachep->flags & SLAB_DEBUG_INITIAL)
#ifdef CONFIG_SMP
cpucache_t *cc = cc_data(cachep);
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
+ CHECK_PAGE(virt_to_page(objp));
if (cc) {
int batchcount;
if (cc->avail < cc->limit) {
{
unsigned long flags;
#if DEBUG
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
- if (cachep != GET_PAGE_CACHE(mem_map + MAP_NR(objp)))
+ CHECK_PAGE(virt_to_page(objp));
+ if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
BUG();
#endif
if (!objp)
return;
local_irq_save(flags);
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
- c = GET_PAGE_CACHE(mem_map + MAP_NR(objp));
+ CHECK_PAGE(virt_to_page(objp));
+ c = GET_PAGE_CACHE(virt_to_page(objp));
__kmem_cache_free(c, (void*)objp);
local_irq_restore(flags);
}
new_page_addr = __get_free_page(GFP_USER);
if (!new_page_addr)
goto out_free_swap; /* Out of memory */
- new_page = mem_map + MAP_NR(new_page_addr);
+ new_page = virt_to_page(new_page_addr);
/*
* Check the swap cache again, in case we stalled above.
goto bad_swap;
}
- lock_page(mem_map + MAP_NR(swap_header));
+ lock_page(virt_to_page(swap_header));
rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header, 1);
if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
if (pte_none(page))
continue;
if (pte_present(page)) {
- unsigned long map_nr = pte_pagenr(page);
- if ((map_nr < max_mapnr) &&
- (!PageReserved(mem_map + map_nr)))
- __free_page(mem_map + map_nr);
+ struct page *ptpage = pte_page(page);
+ if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
+ __free_page(ptpage);
continue;
}
printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
if (!pte_present(pte))
goto out_failed;
page = pte_page(pte);
- if ((page-mem_map >= max_mapnr) || PageReserved(page))
+ if ((!VALID_PAGE(page)) || PageReserved(page))
goto out_failed;
if (mm->swap_cnt)
for (i=0; i<len; i++) {
if (pg_vec[i]) {
- unsigned long map, mapend;
+ struct page *page, *pend;
- mapend = MAP_NR(pg_vec[i] + (PAGE_SIZE << order) - 1);
- for (map = MAP_NR(pg_vec[i]); map <= mapend; map++)
- clear_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(pg_vec[i] + (PAGE_SIZE << order) - 1);
+ for (page = virt_to_page(pg_vec[i]); page <= pend; page++)
+ mem_map_unreserve(page);
free_pages(pg_vec[i], order);
}
}
memset(pg_vec, 0, req->tp_block_nr*sizeof(unsigned long*));
for (i=0; i<req->tp_block_nr; i++) {
- unsigned long map, mapend;
+ struct page *page, *pend;
pg_vec[i] = __get_free_pages(GFP_KERNEL, order);
if (!pg_vec[i])
goto out_free_pgvec;
- mapend = MAP_NR(pg_vec[i] + (PAGE_SIZE << order) - 1);
- for (map = MAP_NR(pg_vec[i]); map <= mapend; map++)
- set_bit(PG_reserved, &mem_map[map].flags);
+ pend = virt_to_page(pg_vec[i] + (PAGE_SIZE << order) - 1);
+ for (page = virt_to_page(pg_vec[i]); page <= pend; page++)
+ mem_map_reserve(page);
}
/* Page vector is allocated */