ENTRY(__trap_init)
stmfd sp!, {r4 - r6, lr}
+ mov r0, #0xff000000
+ orr r0, r0, #0x00ff0000 @ high vectors position
adr r1, .LCvectors @ set up the vectors
ldmia r1, {r1, r2, r3, r4, r5, r6, ip, lr}
stmia r0, {r1, r2, r3, r4, r5, r6, ip, lr}
#include <asm/system.h>
#include <asm/uaccess.h>
-#define FIQ_VECTOR (vectors_base() + 0x1c)
-
static unsigned long no_fiq_insn;
-static inline void unprotect_page_0(void)
-{
- modify_domain(DOMAIN_USER, DOMAIN_MANAGER);
-}
-
-static inline void protect_page_0(void)
-{
- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
-}
-
/* Default reacquire function
* - we always relinquish FIQ control
* - we always reacquire FIQ control
*/
static int fiq_def_op(void *ref, int relinquish)
{
- if (!relinquish) {
- unprotect_page_0();
- *(unsigned long *)FIQ_VECTOR = no_fiq_insn;
- protect_page_0();
- flush_icache_range(FIQ_VECTOR, FIQ_VECTOR + 4);
- }
+ if (!relinquish)
+ set_fiq_handler(&no_fiq_insn, sizeof(no_fiq_insn));
return 0;
}
void set_fiq_handler(void *start, unsigned int length)
{
- unprotect_page_0();
-
- memcpy((void *)FIQ_VECTOR, start, length);
-
- protect_page_0();
- flush_icache_range(FIQ_VECTOR, FIQ_VECTOR + length);
+ memcpy((void *)0xffff001c, start, length);
+ flush_icache_range(0xffff001c, 0xffff001c + length);
+ if (!vectors_high())
+ flush_icache_range(0x1c, 0x1c + length);
}
/*
void __init init_FIQ(void)
{
- no_fiq_insn = *(unsigned long *)FIQ_VECTOR;
- set_fs(get_fs());
+ no_fiq_insn = *(unsigned long *)0xffff001c;
}
return error;
}
+/*
+ * This is the lowest virtual address we can permit any user space
+ * mapping to be mapped at. This is particularly important for
+ * non-high vector CPUs.
+ */
+#define MIN_MAP_ADDR (vectors_high() ? 0 : PAGE_SIZE)
+
/* common code for old and new mmaps */
inline long do_mmap2(
unsigned long addr, unsigned long len,
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- /*
- * If we are doing a fixed mapping, and address < PAGE_SIZE,
- * then deny it.
- */
- if (flags & MAP_FIXED && addr < PAGE_SIZE && vectors_base() == 0)
+ if (flags & MAP_FIXED && addr < MIN_MAP_ADDR)
goto out;
error = -EBADF;
{
unsigned long ret = -EINVAL;
- /*
- * If we are doing a fixed mapping, and address < PAGE_SIZE,
- * then deny it.
- */
- if (flags & MREMAP_FIXED && new_addr < PAGE_SIZE &&
- vectors_base() == 0)
+ if (flags & MREMAP_FIXED && new_addr < MIN_MAP_ADDR)
goto out;
down_write(¤t->mm->mmap_sem);
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode)
{
- unsigned int vectors = vectors_base();
-
console_verbose();
printk(KERN_CRIT "Bad mode in %s handler detected: mode %s\n",
handler[reason], processor_modes[proc_mode]);
- /*
- * Dump out the vectors and stub routines. Maybe a better solution
- * would be to dump them out only if we detect that they are corrupted.
- */
- dump_mem(KERN_CRIT "Vectors: ", vectors, vectors + 0x40);
- dump_mem(KERN_CRIT "Stubs: ", vectors + 0x200, vectors + 0x4b8);
-
die("Oops - bad mode", regs, 0);
local_irq_disable();
panic("bad mode");
void __init trap_init(void)
{
- extern void __trap_init(unsigned long);
- unsigned long base = vectors_base();
-
- __trap_init(base);
- flush_icache_range(base, base + PAGE_SIZE);
- if (base != 0)
- printk(KERN_DEBUG "Relocating machine vectors to 0x%08lx\n",
- base);
+ extern void __trap_init(void);
+
+ __trap_init();
+ flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}
init_pgd = pgd_offset_k(0);
- if (vectors_base() == 0) {
+ if (!vectors_high()) {
/*
* This lock is here just to satisfy pmd_alloc and pte_lock
*/
.prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL,
},
- [MT_VECTORS] = {
+ [MT_LOW_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_USER,
},
+ [MT_HIGH_VECTORS] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_USER | L_PTE_EXEC,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .domain = DOMAIN_USER,
+ },
[MT_MEMORY] = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
}
if (cpu_arch <= CPU_ARCH_ARMv5) {
- mem_types[MT_DEVICE].prot_l1 |= PMD_BIT4;
- mem_types[MT_DEVICE].prot_sect |= PMD_BIT4;
- mem_types[MT_CACHECLEAN].prot_sect |= PMD_BIT4;
- mem_types[MT_MINICLEAN].prot_sect |= PMD_BIT4;
- mem_types[MT_VECTORS].prot_l1 |= PMD_BIT4;
- mem_types[MT_MEMORY].prot_sect |= PMD_BIT4;
- mem_types[MT_ROM].prot_sect |= PMD_BIT4;
+ for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
+ if (mem_types[i].prot_l1)
+ mem_types[i].prot_l1 |= PMD_BIT4;
+ if (mem_types[i].prot_sect)
+ mem_types[i].prot_sect |= PMD_BIT4;
+ }
}
/*
cp = &cache_policies[cachepolicy];
if (cpu_arch >= CPU_ARCH_ARMv5) {
- mem_types[MT_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
+ mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
+ mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
} else {
- mem_types[MT_VECTORS].prot_pte |= cp->pte;
+ mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte;
+ mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte;
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
}
- mem_types[MT_VECTORS].prot_l1 |= ecc_mask;
+ mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
+ mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_ROM].prot_sect |= cp->pmd;
ecc_mask ? "en" : "dis", cp->policy);
}
+#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
+
/*
* Create the page directory entries and any necessary
* page tables for the mapping specified by `md'. We
} while (address != 0);
/*
- * Create a mapping for the machine vectors at virtual address 0
- * or 0xffff0000. We should always try the high mapping.
+ * Create a mapping for the machine vectors at the high-vectors
+ * location (0xffff0000). If we aren't using high-vectors, also
+ * create a mapping at the low-vectors virtual address.
*/
init_maps->physical = virt_to_phys(init_maps);
- init_maps->virtual = vectors_base();
+ init_maps->virtual = 0xffff0000;
init_maps->length = PAGE_SIZE;
- init_maps->type = MT_VECTORS;
-
+ init_maps->type = MT_HIGH_VECTORS;
create_mapping(init_maps);
+ if (!vectors_high()) {
+ init_maps->virtual = 0;
+ init_maps->type = MT_LOW_VECTORS;
+ create_mapping(init_maps);
+ }
+
flush_cache_all();
flush_tlb_all();
}
#define MT_DEVICE 0
#define MT_CACHECLEAN 1
#define MT_MINICLEAN 2
-#define MT_VECTORS 3
-#define MT_MEMORY 4
-#define MT_ROM 5
+#define MT_LOW_VECTORS 3
+#define MT_HIGH_VECTORS 4
+#define MT_MEMORY 5
+#define MT_ROM 6
extern void create_memmap_holes(struct meminfo *);
extern void memtable_init(struct meminfo *);
extern unsigned int user_debug;
#if __LINUX_ARM_ARCH__ >= 4
-#define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0)
+#define vectors_high() (cr_alignment & CR_V)
#else
-#define vectors_base() (0)
+#define vectors_high() (0)
#endif
#define mb() __asm__ __volatile__ ("" : : : "memory")