# Select the processor-specific files
p-$(CONFIG_CPU_26) += proc-arm2,3.o
+
+# ARMv3
p-$(CONFIG_CPU_ARM610) += proc-arm6,7.o tlb-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM710) += proc-arm6,7.o tlb-v3.o copypage-v3.o
-p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o copypage-v4.o abort-lv4t.o
-p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wb.o copypage-v4.o abort-ev4t.o
-p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wb.o copypage-v4.o abort-ev4t.o
-p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wb.o copypage-v4.o abort-ev5ej.o
-p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wb.o copypage-v4.o abort-ev4t.o
-p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o copypage-v4.o copypage-v4mc.o abort-ev4.o minicache.o
-p-$(CONFIG_CPU_SA1100) += proc-sa110.o tlb-v4wb.o copypage-v4.o copypage-v4mc.o abort-ev4.o minicache.o
+
+# ARMv4
+p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o copypage-v4wt.o abort-lv4t.o
+p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wb.o copypage-v4wb.o abort-ev4t.o
+p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wb.o copypage-v4wb.o abort-ev4t.o
+p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wb.o copypage-v4wb.o abort-ev4t.o
+p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o copypage-v4wb.o abort-ev4.o minicache.o
+p-$(CONFIG_CPU_SA1100) += proc-sa110.o tlb-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o
+
+# ARMv5
+p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wb.o copypage-v4wb.o abort-ev5ej.o
p-$(CONFIG_CPU_XSCALE) += proc-xscale.o tlb-v4wb.o copypage-v5te.o abort-ev4t.o minicache.o
obj-y += $(sort $(p-y))
goto no_page;
*dma_handle = page_to_bus(page);
- ret = __ioremap(page_to_phys(page), size, 0);
+ ret = __ioremap(page_to_pfn(page) << PAGE_SHIFT, size, 0,
+ PAGE_SIZE << order);
if (!ret)
goto no_remap;
+++ /dev/null
-/*
- * linux/arch/arm/lib/copypage.S
- *
- * Copyright (C) 1995-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * ASM optimised string functions
- */
-#include <linux/linkage.h>
-#include <asm/constants.h>
-
- .text
- .align 5
-/*
- * ARMv4 optimised copy_user_page
- *
- * We flush the destination cache lines just before we write the data into the
- * corresponding address. Since the Dcache is read-allocate, this removes the
- * Dcache aliasing issue. The writes will be forwarded to the write buffer,
- * and merged as appropriate.
- *
- * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
- * instruction. If your processor does not supply this, you have to write your
- * own copy_user_page that does the right thing.
- */
-ENTRY(v4_copy_user_page)
- stmfd sp!, {r4, lr} @ 2
- mov r2, #PAGE_SZ/64 @ 1
- ldmia r1!, {r3, r4, ip, lr} @ 4
-1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
- stmia r0!, {r3, r4, ip, lr} @ 4
- ldmia r1!, {r3, r4, ip, lr} @ 4+1
- stmia r0!, {r3, r4, ip, lr} @ 4
- ldmia r1!, {r3, r4, ip, lr} @ 4
- mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
- stmia r0!, {r3, r4, ip, lr} @ 4
- ldmia r1!, {r3, r4, ip, lr} @ 4
- subs r2, r2, #1 @ 1
- stmia r0!, {r3, r4, ip, lr} @ 4
- ldmneia r1!, {r3, r4, ip, lr} @ 4
- bne 1b @ 1
- mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB
- ldmfd sp!, {r4, pc} @ 3
-
- .align 5
-/*
- * ARMv4 optimised clear_user_page
- *
- * Same story as above.
- */
-ENTRY(v4_clear_user_page)
- str lr, [sp, #-4]!
- mov r1, #PAGE_SZ/64 @ 1
- mov r2, #0 @ 1
- mov r3, #0 @ 1
- mov ip, #0 @ 1
- mov lr, #0 @ 1
-1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
- stmia r0!, {r2, r3, ip, lr} @ 4
- stmia r0!, {r2, r3, ip, lr} @ 4
- mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
- stmia r0!, {r2, r3, ip, lr} @ 4
- stmia r0!, {r2, r3, ip, lr} @ 4
- subs r1, r1, #1 @ 1
- bne 1b @ 1
- mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB
- ldr pc, [sp], #4
-
- .section ".text.init", #alloc, #execinstr
-
-ENTRY(v4_user_fns)
- .long v4_clear_user_page
- .long v4_copy_user_page
-
--- /dev/null
+/*
+ * linux/arch/arm/lib/copypage.S
+ *
+ * Copyright (C) 1995-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * ASM optimised string functions
+ */
+#include <linux/linkage.h>
+#include <asm/constants.h>
+
+ .text
+ .align 5
+/*
+ * ARMv4 optimised copy_user_page
+ *
+ * We flush the destination cache lines just before we write the data into the
+ * corresponding address. Since the Dcache is read-allocate, this removes the
+ * Dcache aliasing issue. The writes will be forwarded to the write buffer,
+ * and merged as appropriate.
+ *
+ * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
+ * instruction. If your processor does not supply this, you have to write your
+ * own copy_user_page that does the right thing.
+ */
+ENTRY(v4wb_copy_user_page)
+ stmfd sp!, {r4, lr} @ 2
+ mov r2, #PAGE_SZ/64 @ 1
+ ldmia r1!, {r3, r4, ip, lr} @ 4
+1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
+ stmia r0!, {r3, r4, ip, lr} @ 4
+ ldmia r1!, {r3, r4, ip, lr} @ 4+1
+ stmia r0!, {r3, r4, ip, lr} @ 4
+ ldmia r1!, {r3, r4, ip, lr} @ 4
+ mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
+ stmia r0!, {r3, r4, ip, lr} @ 4
+ ldmia r1!, {r3, r4, ip, lr} @ 4
+ subs r2, r2, #1 @ 1
+ stmia r0!, {r3, r4, ip, lr} @ 4
+ ldmneia r1!, {r3, r4, ip, lr} @ 4
+ bne 1b @ 1
+ mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB
+ ldmfd sp!, {r4, pc} @ 3
+
+ .align 5
+/*
+ * ARMv4 optimised clear_user_page
+ *
+ * Same story as above.
+ */
+ENTRY(v4wb_clear_user_page)
+ str lr, [sp, #-4]!
+ mov r1, #PAGE_SZ/64 @ 1
+ mov r2, #0 @ 1
+ mov r3, #0 @ 1
+ mov ip, #0 @ 1
+ mov lr, #0 @ 1
+1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
+ stmia r0!, {r2, r3, ip, lr} @ 4
+ stmia r0!, {r2, r3, ip, lr} @ 4
+ mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
+ stmia r0!, {r2, r3, ip, lr} @ 4
+ stmia r0!, {r2, r3, ip, lr} @ 4
+ subs r1, r1, #1 @ 1
+ bne 1b @ 1
+ mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB
+ ldr pc, [sp], #4
+
+ .section ".text.init", #alloc, #execinstr
+
+ENTRY(v4wb_user_fns)
+ .long v4wb_clear_user_page
+ .long v4wb_copy_user_page
+
--- /dev/null
+/*
+ * linux/arch/arm/lib/copypage-v4.S
+ *
+ * Copyright (C) 1995-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * ASM optimised string functions
+ *
+ * This is for CPUs with a writethrough cache and 'flush ID cache' is
+ * the only supported cache operation.
+ */
+#include <linux/linkage.h>
+#include <asm/constants.h>
+
+ .text
+ .align 5
+/*
+ * ARMv4 optimised copy_user_page
+ *
+ * Since we have writethrough caches, we don't have to worry about
+ * dirty data in the cache. However, we do have to ensure that
+ * subsequent reads are up to date.
+ */
+ENTRY(v4wt_copy_user_page)
+ stmfd sp!, {r4, lr} @ 2
+ mov r2, #PAGE_SZ/64 @ 1
+ ldmia r1!, {r3, r4, ip, lr} @ 4
+1: stmia r0!, {r3, r4, ip, lr} @ 4
+ ldmia r1!, {r3, r4, ip, lr} @ 4+1
+ stmia r0!, {r3, r4, ip, lr} @ 4
+ ldmia r1!, {r3, r4, ip, lr} @ 4
+ stmia r0!, {r3, r4, ip, lr} @ 4
+ ldmia r1!, {r3, r4, ip, lr} @ 4
+ subs r2, r2, #1 @ 1
+ stmia r0!, {r3, r4, ip, lr} @ 4
+ ldmneia r1!, {r3, r4, ip, lr} @ 4
+ bne 1b @ 1
+ mcr p15, 0, r2, c7, c7, 0 @ flush ID cache
+ ldmfd sp!, {r4, pc} @ 3
+
+ .align 5
+/*
+ * ARMv4 optimised clear_user_page
+ *
+ * Same story as above.
+ */
+ENTRY(v4wt_clear_user_page)
+ str lr, [sp, #-4]!
+ mov r1, #PAGE_SZ/64 @ 1
+ mov r2, #0 @ 1
+ mov r3, #0 @ 1
+ mov ip, #0 @ 1
+ mov lr, #0 @ 1
+1: stmia r0!, {r2, r3, ip, lr} @ 4
+ stmia r0!, {r2, r3, ip, lr} @ 4
+ stmia r0!, {r2, r3, ip, lr} @ 4
+ stmia r0!, {r2, r3, ip, lr} @ 4
+ subs r1, r1, #1 @ 1
+ bne 1b @ 1
+ mcr p15, 0, r2, c7, c7, 0 @ flush ID cache
+ ldr pc, [sp], #4
+
+ .section ".text.init", #alloc, #execinstr
+
+ENTRY(v4wt_user_fns)
+ .long v4wt_clear_user_page
+ .long v4wt_copy_user_page
+
.long cpu_arm1020_info
.long arm1020_processor_functions
.long v4wbi_tlb_fns
- .long v4_user_fns
+ .long v4wb_user_fns
.size __arm1020_proc_info, . - __arm1020_proc_info
.long cpu_arm720_info @ info
.long arm720_processor_functions
.long v4_tlb_fns
- .long v4_user_fns
+ .long v4wt_user_fns
.size __arm720_proc_info, . - __arm720_proc_info
.long cpu_arm920_info
.long arm920_processor_functions
.long v4wbi_tlb_fns
- .long v4_user_fns
+ .long v4wb_user_fns
.size __arm920_proc_info, . - __arm920_proc_info
.long cpu_arm922_info
.long arm922_processor_functions
.long v4wbi_tlb_fns
- .long v4_user_fns
+ .long v4wb_user_fns
.size __arm922_proc_info, . - __arm922_proc_info
.long cpu_arm926_info
.long arm926_processor_functions
.long v4wbi_tlb_fns
- .long v4_user_fns
+ .long v4wb_user_fns
.size __arm926_proc_info, . - __arm926_proc_info
.section ".proc.info", #alloc, #execinstr
+#ifdef CONFIG_CPU_SA110
.type __sa110_proc_info,#object
__sa110_proc_info:
.long 0x4401a100
.long cpu_sa110_info
.long sa110_processor_functions
.long v4wb_tlb_fns
- .long v4_user_fns
+ .long v4wb_user_fns
.size __sa110_proc_info, . - __sa110_proc_info
+#endif
+#ifdef CONFIG_CPU_SA1100
.type __sa1100_proc_info,#object
__sa1100_proc_info:
.long 0x4401a110
.long v4wb_tlb_fns
.long v4_mc_user_fns
.size __sa1110_proc_info, . - __sa1110_proc_info
+#endif
eors r3, ip, r3 @ == mm ?
movne pc, lr @ no, we dont do anything
vma_vm_flags ip, r2
+.v4_flush_kern_tlb_range:
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
-1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
+1: mcr p15, 0, r0, c8, c7, 1 @ invalidate TLB entry
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
- tst ip, #VM_EXEC
- mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB
mov pc, lr
/*
teq r2, r3 @ equal
movne pc, lr @ no
vma_vm_flags r2, r1
- mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
- tst r2, #VM_EXEC
- mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB
+.v4_flush_kern_tlb_page:
+ mcr p15, 0, r0, c8, c7, 1 @ invalidate TLB entry
mov pc, lr
/*
* - start - virtual address (may not be aligned)
* - end - virtual address (may not be aligned)
*/
- .align 5
-ENTRY(v4_flush_kern_tlb_range)
- bic r0, r0, #0x0ff
- bic r0, r0, #0xf00
-1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
- add r0, r0, #PAGE_SZ
- cmp r0, r1
- blo 1b
- mcr p15, 0, r3, c8, c5, 0 @ invalidate I TLB
- mov pc, lr
+.globl v4_flush_kern_tlb_range
+.equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range
/*
*
* - kaddr - Kernel virtual memory address
*/
-ENTRY(v4_flush_kern_tlb_page)
- mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
- mov pc, lr
+.globl v4_flush_kern_tlb_page
+.equ v4_flush_kern_tlb_page, .v4_flush_kern_tlb_page
.section ".text.init", #alloc, #execinstr
(((unsigned long)(addr) - PAGE_OFFSET) >> NODE_MAX_MEM_SHIFT)
/*
- * Given a physical address, convert it to a node id.
+ * Given a page frame number, convert it to a node id.
*/
-#define PHYS_TO_NID(addr) KVADDR_TO_NID(__phys_to_virt(addr))
+#define PFN_TO_NID(pfn) \
+ (((pfn) - PHYS_PFN_OFFSET) >> (NODE_MAX_MEM_SHIFT - PAGE_SHIFT))
/*
* Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory
#define ADDR_TO_MAPBASE(kaddr) \
NODE_MEM_MAP(KVADDR_TO_NID((unsigned long)(kaddr)))
+#define PFN_TO_MAPBASE(pfn) NODE_MEM_MAP(PFN_TO_NID(pfn))
+
/*
* Given a kaddr, LOCAL_MAR_NR finds the owning node of the memory
* and returns the index corresponding to the appropriate page in the
* node's mem_map.
*/
-#define LOCAL_MAP_NR(kaddr) \
- (((unsigned long)(kaddr)-LOCAL_BASE_ADDR((kaddr))) >> PAGE_SHIFT)
-
-/*
- * Given a kaddr, virt_to_page returns a pointer to the corresponding
- * mem_map entry.
- */
-#define virt_to_page(kaddr) \
- (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
-
-/*
- * VALID_PAGE returns a non-zero value if given page pointer is valid.
- * This assumes all node's mem_maps are stored within the node they refer to.
- */
-#define VALID_PAGE(page) \
-({ unsigned int node = KVADDR_TO_NID(page); \
- ( (node < NR_NODES) && \
- ((unsigned)((page) - NODE_MEM_MAP(node)) < NODE_DATA(node)->node_size) ); \
-})
+#define LOCAL_MAP_NR(addr) \
+ (((unsigned long)(addr) & (NODE_MAX_MEM_SIZE - 1)) >> PAGE_SHIFT)
/*
* The PS7211 allows up to 256MB max per DRAM bank, but the EDB7211
#define NODE_MAX_MEM_SHIFT 24
#define NODE_MAX_MEM_SIZE (1<<NODE_MAX_MEM_SHIFT)
-/*
- * Given a mem_map_t, LOCAL_MAP_BASE finds the owning node for the
- * physical page and returns the kaddr for the mem_map of that node.
- */
-#define LOCAL_MAP_BASE(page) \
- NODE_MEM_MAP(KVADDR_TO_NID((unsigned long)(page)))
-
-/*
- * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
- * and returns the kaddr corresponding to first physical page in the
- * node's mem_map.
- */
-#define LOCAL_BASE_ADDR(kaddr) ((unsigned long)(kaddr) & ~(NODE_MAX_MEM_SIZE-1))
-
-/*
- * With discontigmem, the conceptual mem_map array starts from PAGE_OFFSET.
- * Given a kaddr, MAP_NR returns the appropriate global mem_map index so
- * it matches the corresponding node's local mem_map.
- */
-#define MAP_NR(kaddr) (LOCAL_MAP_NR((kaddr)) + \
- (((unsigned long)ADDR_TO_MAPBASE((kaddr)) - PAGE_OFFSET) / \
- sizeof(mem_map_t)))
-
#else
-#define PHYS_TO_NID(addr) (0)
+#define PFN_TO_NID(pfn) (0)
#endif /* CONFIG_DISCONTIGMEM */
#endif /* CONFIG_ARCH_EDB7211 */
-#ifndef PHYS_TO_NID
-#define PHYS_TO_NID(addr) (0)
-#endif
-
#endif
/*
* Given a kernel address, find the home node of the underlying memory.
*/
-#define KVADDR_TO_NID(addr) \
- (((unsigned long)(addr) - 0xc0000000) >> 27)
+#define KVADDR_TO_NID(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> 27)
/*
- * Given a physical address, convert it to a node id.
+ * Given a page frame number, convert it to a node id.
*/
-#define PHYS_TO_NID(addr) KVADDR_TO_NID(__phys_to_virt(addr))
+#define PFN_TO_NID(pfn) (((pfn) - PHYS_PFN_OFFSET) >> (27 - PAGE_SHIFT))
/*
* Given a kaddr, ADDR_TO_MAPBASE finds the owning node of the memory
- * and returns the mem_map of that node.
+ * and return the mem_map of that node.
*/
-#define ADDR_TO_MAPBASE(kaddr) \
- NODE_MEM_MAP(KVADDR_TO_NID((unsigned long)(kaddr)))
+#define ADDR_TO_MAPBASE(kaddr) NODE_MEM_MAP(KVADDR_TO_NID(kaddr))
/*
- * Given a kaddr, LOCAL_MEM_MAP finds the owning node of the memory
- * and returns the index corresponding to the appropriate page in the
- * node's mem_map.
+ * Given a page frame number, find the owning node of the memory
+ * and return the mem_map of that node.
*/
-#define LOCAL_MAP_NR(kvaddr) \
- (((unsigned long)(kvaddr) & 0x07ffffff) >> PAGE_SHIFT)
+#define PFN_TO_MAPBASE(pfn) NODE_MEM_MAP(PFN_TO_NID(pfn))
/*
- * Given a kaddr, virt_to_page returns a pointer to the corresponding
- * mem_map entry.
- */
-#define virt_to_page(kaddr) \
- (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
-
-/*
- * VALID_PAGE returns a non-zero value if given page pointer is valid.
- * This assumes all node's mem_maps are stored within the node they refer to.
+ * Given a kaddr, LOCAL_MEM_MAP finds the owning node of the memory
+ * and returns the index corresponding to the appropriate page in the
+ * node's mem_map.
*/
-#define VALID_PAGE(page) \
-({ unsigned int node = KVADDR_TO_NID(page); \
- ( (node < NR_NODES) && \
- ((unsigned)((page) - NODE_MEM_MAP(node)) < NODE_DATA(node)->node_size) ); \
-})
+#define LOCAL_MAP_NR(addr) \
+ (((unsigned long)(addr) & 0x07ffffff) >> PAGE_SHIFT)
#else
-#define PHYS_TO_NID(addr) (0)
+#define PFN_TO_NID(addr) (0)
#endif
*
* We have the following to choose from:
* v3 - ARMv3
- * v4 - ARMv4 without minicache
+ * v4wt - ARMv4 with writethrough cache, without minicache
+ * v4wb - ARMv4 with writeback cache, without minicache
* v4_mc - ARMv4 with minicache
* v5te_mc - ARMv5TE with minicache
*/
# endif
#endif
-#if defined(CONFIG_CPU_ARM720T) || defined(CONFIG_CPU_ARM920T) || \
- defined(CONFIG_CPU_ARM922T) || defined(CONFIG_CPU_ARM926T) || \
- defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_ARM1020)
+#if defined(CONFIG_CPU_ARM720T)
+# ifdef _USER
+# define MULTI_USER 1
+# else
+# define _USER v4wt
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
+ defined(CONFIG_CPU_ARM926T) || defined(CONFIG_CPU_SA110) || \
+ defined(CONFIG_CPU_ARM1020)
# ifdef _USER
# define MULTI_USER 1
# else
-# define _USER v4
+# define _USER v4wb
# endif
#endif
extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle);
extern void consistent_sync(void *vaddr, size_t size, int rw);
-/*
- * Change "struct page" to physical address.
- */
-#ifdef CONFIG_DISCONTIGMEM
-#define page_to_phys(page) \
- ((((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
- + page_zone(page)->zone_start_paddr)
-#else
-#define page_to_phys(page) \
- (PHYS_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
-#endif
-
-/*
- * We should really eliminate virt_to_bus() here - it's depreciated.
- */
-#define page_to_bus(page) \
- (virt_to_bus(page_address(page)))
-
/*
* can the hardware map this into one segment or not, given no other
* constraints.
/*
* linux/include/asm-arm/memory.h
*
- * Copyright (C) 2000 Russell King
+ * Copyright (C) 2000-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Note: this file should not be included by non-asm/.h files
- *
- * Modifications:
*/
#ifndef __ASM_ARM_MEMORY_H
#define __ASM_ARM_MEMORY_H
+#include <linux/config.h>
#include <asm/arch/memory.h>
-static inline unsigned long virt_to_phys(volatile void *x)
+/*
+ * PFNs are used to describe any physical page; this means
+ * PFN 0 == physical address 0.
+ *
+ * This is the PFN of the first RAM page in the kernel
+ * direct-mapped view. We assume this is the first page
+ * of RAM in the mem_map as well.
+ */
+#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+
+/*
+ * These are *only* valid on the kernel direct mapped RAM memory.
+ */
+static inline unsigned long virt_to_phys(void *x)
{
return __virt_to_phys((unsigned long)(x));
}
return (void *)(__phys_to_virt((unsigned long)(x)));
}
+#define __pa(x) __virt_to_phys((unsigned long)(x))
+#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+
/*
* Virtual <-> DMA view memory address translations
+ * Again, these are *only* valid on the kernel direct mapped RAM
+ * memory. Use of these is *depreciated*.
*/
#define virt_to_bus(x) (__virt_to_bus((unsigned long)(x)))
#define bus_to_virt(x) ((void *)(__bus_to_virt((unsigned long)(x))))
+/*
+ * Conversion between a struct page and a physical address.
+ *
+ * Note: when converting an unknown physical address to a
+ * struct page, the resulting pointer must be validated
+ * using VALID_PAGE(). It must return an invalid struct page
+ * for any physical address not corresponding to a system
+ * RAM address.
+ *
+ * page_to_pfn(page) convert a struct page * to a PFN number
+ * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
+ * pfn_valid(pfn) indicates whether a PFN number is valid
+ *
+ * virt_to_page(k) convert a _valid_ virtual address to struct page *
+ * virt_addr_valid(k) indicates whether a virtual address is valid
+ */
+#ifndef CONFIG_DISCONTIGMEM
+
+#define page_to_pfn(page) (((page) - mem_map) + PHYS_PFN_OFFSET)
+#define pfn_to_page(pfn) ((mem_map + (pfn)) - PHYS_PFN_OFFSET)
+#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < max_mapnr)
+
+#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
+#define virt_addr_valid(kaddr) ((kaddr) >= PAGE_OFFSET && (kaddr) < (unsigned long)high_memory)
+
+#else
+/*
+ * This is more complex. We have a set of mem_map arrays spread
+ * around in memory.
+ */
+#define page_to_pfn(page) \
+ (((page) - page_zone(page)->zone_mem_map) \
+ + (page_zone(page)->zone_start_paddr >> PAGE_SHIFT))
+
+#define pfn_to_page(pfn) \
+ (PFN_TO_MAPBASE(pfn) + LOCAL_MAP_NR((pfn) << PAGE_SHIFT))
+
+#define pfn_valid(pfn) (PFN_TO_NID(pfn) < NR_NODES)
+
+#define virt_to_page(kaddr) \
+ (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
+
+#define virt_addr_valid(kaddr) (KVADDR_TO_NID(kaddr) < NR_NODES)
+
+/*
+ * Common discontigmem stuff.
+ * PHYS_TO_NID is used by the ARM kernel/setup.c
+ */
+#define PHYS_TO_NID(addr) PFN_TO_NID((addr) >> PAGE_SHIFT)
+
+#endif
+
+/*
+ * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die.
+ */
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+/*
+ * We should really eliminate virt_to_bus() here - it's depreciated.
+ */
+#define page_to_bus(page) (virt_to_bus(page_address(page)))
+
#endif
return order;
}
-#endif /* !__ASSEMBLY__ */
-
-#include <asm/arch/memory.h>
-
-#define __pa(x) __virt_to_phys((unsigned long)(x))
-#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+#include <asm/memory.h>
-#ifndef CONFIG_DISCONTIGMEM
-#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT) - \
- (PHYS_OFFSET >> PAGE_SHIFT))
-#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
-#endif
+#endif /* !__ASSEMBLY__ */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define _ASMARM_PGTABLE_H
#include <linux/config.h>
-#include <asm/arch/memory.h>
+#include <asm/memory.h>
#include <asm/arch/vmalloc.h>
/*
extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page)
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
#define pte_none(pte) (!pte_val(pte))
#define pte_clear(ptep) set_pte((ptep), __pte(0))
-
-#ifndef CONFIG_DISCONTIGMEM
-#define pte_page(x) (mem_map + (pte_val((x)) >> PAGE_SHIFT) - \
- (PHYS_OFFSET >> PAGE_SHIFT))
-#else
-/*
- * I'm not happy with this - we needlessly convert a physical address
- * to a virtual one, and then immediately back to a physical address,
- * which, if __va and __pa are expensive causes twice the expense for
- * zero gain. --rmk
- */
-#define pte_page(x) (virt_to_page(__va(pte_val((x)))))
-#endif
+#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
-{
- return __pte(physpage | pgprot_val(pgprot));
-}
-
-#define mk_pte(page,pgprot) mk_pte_phys(__pa(page_address(page)), pgprot)
+#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level