]> git.neil.brown.name Git - history.git/commitdiff
2.5.8 ARM updates:
authorRussell King <rmk@flint.arm.linux.org.uk>
Wed, 17 Apr 2002 20:47:54 +0000 (21:47 +0100)
committerRussell King <rmk@flint.arm.linux.org.uk>
Wed, 17 Apr 2002 20:47:54 +0000 (21:47 +0100)
 - preempt updates
 - build fixes
 - new tlb flush macro
 - add asm/cacheflush.h and asm/tlbflush.h

23 files changed:
arch/arm/kernel/dma.c
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/irq.c
arch/arm/kernel/process.c
arch/arm/kernel/semaphore.c
arch/arm/kernel/signal.c
arch/arm/kernel/time.c
arch/arm/mm/minicache.c
arch/arm/mm/proc-xscale.S
arch/arm/mm/tlb-v3.S
arch/arm/mm/tlb-v4.S
arch/arm/mm/tlb-v4wb.S
arch/arm/nwfpe/fpmodule.c
include/asm-arm/cacheflush.h [new file with mode: 0644]
include/asm-arm/io.h
include/asm-arm/pgalloc.h
include/asm-arm/proc-armo/cache.h
include/asm-arm/proc-armo/tlbflush.h [new file with mode: 0644]
include/asm-arm/proc-armv/cache.h
include/asm-arm/proc-armv/tlbflush.h [new file with mode: 0644]
include/asm-arm/thread_info.h
include/asm-arm/tlbflush.h [new file with mode: 0644]

index c6ea827fb70e305e7344d5efa5688ac9387e894c..c5a4c6a09dab89f4334bbe23825f93a583f68881 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/mman.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
+#include <linux/errno.h>
 
 #include <asm/dma.h>
 
index 0b799eb31163c525079e6327ddc5307b80351a0e..ba2ed50151260e4723386240c703661b0a429513 100644 (file)
@@ -756,7 +756,6 @@ __irq_svc:  sub     sp, sp, #S_FRAME_SIZE
 #ifdef CONFIG_PREEMPT
                ldr     r0, [r8, #TI_FLAGS]             @ get flags
                tst     r0, #_TIF_NEED_RESCHED
-               ldrne   r6, .LCirq_stat
                blne    svc_preempt
 preempt_return:
                ldr     r0, [r8, #TI_PREEMPT]           @ read preempt value
@@ -770,20 +769,20 @@ preempt_return:
 
 #ifdef CONFIG_PREEMPT
 svc_preempt:   teq     r9, #0                          @ was preempt count = 0
+               ldreq   r6, .LCirq_stat
                movne   pc, lr                          @ no
                ldr     r0, [r6, #4]                    @ local_irq_count
                ldr     r1, [r6, #8]                    @ local_bh_count
                adds    r0, r0, r1
                movne   pc, lr
-               ldr     r1, [r8, #TI_TASK]
-               set_cpsr_c r2, #MODE_SVC                @ enable IRQs
-               str     r0, [r1, #0]                    @ current->state = TASK_RUNNING
-1:             bl      SYMBOL_NAME(schedule)
+               mov     r7, #PREEMPT_ACTIVE
+               str     r7, [r8, #TI_PREEMPT]           @ set PREEMPT_ACTIVE
+1:             set_cpsr_c r2, #MODE_SVC                @ enable IRQs
+               bl      SYMBOL_NAME(schedule)
                set_cpsr_c r0, #PSR_I_BIT | MODE_SVC    @ disable IRQs
-               ldr     r0, [r8, #TI_FLAGS]
+               ldr     r0, [r8, #TI_FLAGS]             @ get new tasks TI_FLAGS
                tst     r0, #_TIF_NEED_RESCHED
-               beq     preempt_return
-               set_cpsr_c r0, #MODE_SVC                @ enable IRQs
+               beq     preempt_return                  @ go again
                b       1b
 #endif
 
index 8d1566adbcbf32639c61cba1d81ce9fa0af641df..546d7554b342490f1cb80dcaf22ca9dd47aba168 100644 (file)
@@ -76,6 +76,9 @@ __do_notify_resume:
  * This is how we return from a fork.
  */
 ENTRY(ret_from_fork)
+#ifdef CONFIG_PREEMPT
+       bl      schedule_tail
+#endif
        get_thread_info tsk
        ldr     ip, [tsk, #TI_FLAGS]            @ check for syscall tracing
        mov     why, #1
index e98c895953ad1aab254f15bf67fb7393cb6b6e18..345b69e84e927e6e76649cbbe1297851da5632a9 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/smp.h>
 #include <linux/init.h>
 #include <linux/seq_file.h>
+#include <linux/errno.h>
 
 #include <asm/irq.h>
 #include <asm/system.h>
index 1118e5301200d42a9e8c962c971382bb77f0a886..d073d21127fc45ee18fa7e1702b0d41e35649ca5 100644 (file)
@@ -73,7 +73,7 @@ void (*pm_power_off)(void);
  * This is our default idle handler.  We need to disable
  * interrupts here to ensure we don't miss a wakeup call.
  */
-static void default_idle(void)
+void default_idle(void)
 {
        __cli();
        if (!need_resched() && !hlt_counter)
index 19aa6e9222af8599035cfe7f67a8820d4398e61f..2ac3faa7b3642294372b80560396274bfb1c9b15 100644 (file)
@@ -13,6 +13,7 @@
  */
 #include <linux/config.h>
 #include <linux/sched.h>
+#include <linux/errno.h>
 
 #include <asm/semaphore.h>
 
index 08e9740fd56c5defc054745bc2e77591ed19f034..b90df71485d062a9e898809a16208930ff5d83eb 100644 (file)
@@ -628,14 +628,12 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
 
                        case SIGSTOP: {
                                struct signal_struct *sig;
+                               current->state = TASK_STOPPED;
                                current->exit_code = signr;
                                sig = current->parent->sig;
-                               preempt_disable();
-                               current->state = TASK_STOPPED;
                                if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
                                        notify_parent(current, SIGCHLD);
                                schedule();
-                               preempt_enable();
                                continue;
                        }
 
index 39b50b5a701872d52b438223b0f35d8ccb67912b..7c7e03c5b6e94d385073c4c276be7a28a77b8921 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/init.h>
 #include <linux/smp.h>
 #include <linux/timex.h>
+#include <linux/errno.h>
 
 #include <asm/hardware.h>
 #include <asm/io.h>
index 7f20b9d0ea85e215afa01b3691ea0b26564e9804..9529c4b27ad6da13ddd80a5d904106e32c04a956 100644 (file)
@@ -44,7 +44,7 @@ static pte_t *minicache_pte;
 unsigned long map_page_minicache(unsigned long virt)
 {
        set_pte(minicache_pte, mk_pte_phys(__pa(virt), minicache_pgprot));
-       flush_kern_tlb_page(minicache_address);
+       flush_tlb_kernel_page(minicache_address);
 
        return minicache_address;
 }
index d3b439b6816a1c1590cdfc7841889075d7c0736d..67cb6ae4df400134a44046a5273d5e0f107a81e3 100644 (file)
@@ -723,7 +723,6 @@ ENTRY(xscale_processor_functions)
        .word   cpu_xscale_set_pgd
        .word   cpu_xscale_set_pmd
        .word   cpu_xscale_set_pte
-
        .size   xscale_processor_functions, . - xscale_processor_functions
 
        .type   cpu_80200_info, #object
@@ -779,6 +778,5 @@ __pxa250_proc_info:
        .long   xscale_processor_functions
        .long   v4wbi_tlb_fns
        .long   v5te_mc_user_fns
-       .size   __cotulla_proc_info, . - __cotulla_proc_info
        .size   __pxa250_proc_info, . - __pxa250_proc_info
 
index 0d48947b8354622ceb25a0593adc55fe2c8bf1fc..270108bb40c0767f5c2dc46b4fb6cc6013f3df17 100644 (file)
@@ -53,6 +53,7 @@ ENTRY(v3_flush_user_tlb_range)
        act_mm  r3                              @ get current->active_mm
        teq     r2, r3                          @ == mm ?
        movne   pc, lr                          @ no, we dont do anything
+ENTRY(v3_flush_kern_tlb_range)
        bic     r0, r0, #0x0ff
        bic     r0, r0, #0xf00
 1:     mcr     p15, 0, r0, c6, c0, 0           @ invalidate TLB entry
@@ -87,5 +88,6 @@ ENTRY(v3_tlb_fns)
        .long   v3_flush_user_tlb_mm
        .long   v3_flush_user_tlb_range
        .long   v3_flush_user_tlb_page
+       .long   v3_flush_kern_tlb_range
        .long   v3_flush_kern_tlb_page
        .size   v3_tlb_fns, . - v3_tlb_fns
index 294059991d3ec31cfe5e9fa2dd293f33f18676c2..d697d1f09b3b883ad91fe5d744761a7d18ae67e2 100644 (file)
@@ -42,7 +42,7 @@ ENTRY(v4_flush_kern_tlb_all)
 /*
  *     v4_flush_user_tlb_range(start, end, mm)
  *
- *     Invalidate a range of TLB entries in the specified address space.
+ *     Invalidate a range of TLB entries in the specified user address space.
  *
  *     - start - range start address
  *     - end   - range end address
@@ -85,6 +85,27 @@ ENTRY(v4_flush_user_tlb_page)
        mcrne   p15, 0, r3, c8, c5, 0           @ invalidate I TLB
        mov     pc, lr
 
+/*
+ *     v4_flush_kerm_tlb_range(start, end)
+ *
+ *     Invalidate a range of TLB entries in the specified kernel
+ *     address range.
+ *
+ *     - start - virtual address (may not be aligned)
+ *     - end   - virtual address (may not be aligned)
+ */
+       .align  5
+ENTRY(v4_flush_kern_tlb_range)
+       bic     r0, r0, #0x0ff
+       bic     r0, r0, #0xf00
+1:     mcr     p15, 0, r0, c8, c6, 1           @ invalidate D TLB entry
+       add     r0, r0, #PAGE_SZ
+       cmp     r0, r1
+       blo     1b
+       mcr     p15, 0, r3, c8, c5, 0           @ invalidate I TLB
+       mov     pc, lr
+
+
 /*
  *     v4_flush_kern_tlb_page(kaddr)
  *
@@ -106,5 +127,6 @@ ENTRY(v4_tlb_fns)
        .long   v4_flush_user_tlb_mm
        .long   v4_flush_user_tlb_range
        .long   v4_flush_user_tlb_page
+       .long   v4_flush_kern_tlb_range
        .long   v4_flush_kern_tlb_page
        .size   v4_tlb_fns, . - v4_tlb_fns
index 3cc408421c400af6a7de9807d5271a8a64474988..3cdca44fcfb2ac51c0cd5336cb00c3fc2bbac15d 100644 (file)
@@ -88,7 +88,41 @@ ENTRY(v4wb_flush_user_tlb_page)
        mcr     p15, 0, r3, c7, c10, 4          @ drain WB
        tst     r2, #VM_EXEC
        mcrne   p15, 0, r3, c8, c5, 0           @ invalidate I TLB
+       mcr     p15, 0, r0, c8, c6, 1           @ invalidate D TLB entry
+       mov     pc, lr
+
+/*
+ *     v4_flush_kerm_tlb_range(start, end)
+ *
+ *     Invalidate a range of TLB entries in the specified kernel
+ *     address range.
+ *
+ *     - start - virtual address (may not be aligned)
+ *     - end   - virtual address (may not be aligned)
+ */
+ENTRY(v4wb_flush_kern_tlb_range)
+       mov     r3, #0
+       mcr     p15, 0, r3, c7, c10, 4          @ drain WB
+       bic     r0, r0, #0x0ff
+       bic     r0, r0, #0xf00
+       mcr     p15, 0, r3, c8, c5, 0           @ invalidate I TLB
+1:     mcr     p15, 0, r0, c8, c6, 1           @ invalidate D TLB entry
+       add     r0, r0, #PAGE_SZ
+       cmp     r0, r1
+       blo     1b
+       mov     pc, lr
+
+/*
+ *     v4_flush_kern_tlb_page(kaddr)
+ *
+ *     Invalidate the TLB entry for the specified page.  The address
+ *     will be in the kernels virtual memory space.  Current uses
+ *     only require the D-TLB to be invalidated.
+ *
+ *     - kaddr - Kernel virtual memory address
+ */
 ENTRY(v4wb_flush_kern_tlb_page)
+       mcr     p15, 0, r3, c8, c5, 0           @ invalidate I TLB
        mcr     p15, 0, r0, c8, c6, 1           @ invalidate D TLB entry
        mov     pc, lr
 
@@ -107,14 +141,17 @@ ENTRY(v4wb_flush_kern_tlb_page)
  */
        .align  5
 ENTRY(v4wbi_flush_user_tlb_range)
+       vma_vm_mm ip, r2
        act_mm  r3                              @ get current->active_mm
-       teq     r2, r3                          @ == mm ?
+       eors    r3, ip, r3                      @ == mm ?
        movne   pc, lr                          @ no, we dont do anything
        mov     r3, #0
        mcr     p15, 0, r3, c7, c10, 4          @ drain WB
+       vma_vm_flags r2, r2
        bic     r0, r0, #0x0ff
        bic     r0, r0, #0xf00
-1:     mcr     p15, 0, r0, c8, c5, 1           @ invalidate I TLB entry
+1:     tst     r2, #VM_EXEC
+       mcrne   p15, 0, r0, c8, c5, 1           @ invalidate I TLB entry
        mcr     p15, 0, r0, c8, c6, 1           @ invalidate D TLB entry
        add     r0, r0, #PAGE_SZ
        cmp     r0, r1
@@ -140,7 +177,23 @@ ENTRY(v4wbi_flush_user_tlb_page)
        mcr     p15, 0, r3, c7, c10, 4          @ drain WB
        tst     r2, #VM_EXEC
        mcrne   p15, 0, r0, c8, c5, 1           @ invalidate I TLB entry
+       mcr     p15, 0, r0, c8, c6, 1           @ invalidate D TLB entry
+       mov     pc, lr
+
+ENTRY(v4wbi_flush_kern_tlb_range)
+       mov     r3, #0
+       mcr     p15, 0, r3, c7, c10, 4          @ drain WB
+       bic     r0, r0, #0x0ff
+       bic     r0, r0, #0xf00
+1:     mcr     p15, 0, r0, c8, c5, 1           @ invalidate I TLB entry
+       mcr     p15, 0, r0, c8, c6, 1           @ invalidate D TLB entry
+       add     r0, r0, #PAGE_SZ
+       cmp     r0, r1
+       blo     1b
+       mov     pc, lr
+
 ENTRY(v4wbi_flush_kern_tlb_page)
+       mcr     p15, 0, r0, c8, c5, 1           @ invalidate I TLB entry
        mcr     p15, 0, r0, c8, c6, 1           @ invalidate D TLB entry
        mov     pc, lr
 
@@ -152,6 +205,7 @@ ENTRY(v4wb_tlb_fns)
        .long   v4wb_flush_user_tlb_mm
        .long   v4wb_flush_user_tlb_range
        .long   v4wb_flush_user_tlb_page
+       .long   v4wb_flush_kern_tlb_range
        .long   v4wb_flush_kern_tlb_page
        .size   v4wb_tlb_fns, . - v4wb_tlb_fns
 
@@ -161,5 +215,6 @@ ENTRY(v4wbi_tlb_fns)
        .long   v4wbi_flush_user_tlb_mm
        .long   v4wbi_flush_user_tlb_range
        .long   v4wbi_flush_user_tlb_page
+       .long   v4wbi_flush_kern_tlb_range
        .long   v4wbi_flush_kern_tlb_page
        .size   v4wbi_tlb_fns, . - v4wbi_tlb_fns
index d367cfe2b2833a444a0d02f05aecd04b6107d0bb..528fa710aa3464cd6bc6ba1f11f9ea36f70bb836 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/config.h>
 
 /* XXX */
+#include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/signal.h>
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h
new file mode 100644 (file)
index 0000000..7e1e153
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ *  linux/include/asm-arm/cacheflush.h
+ *
+ *  Copyright (C) 2000-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_CACHEFLUSH_H
+#define _ASMARM_CACHEFLUSH_H
+
+#include <asm/proc/cache.h>
+
+#endif
index d527c3c568c5d930a179a5f91b2d68a91e07be8b..d12af77640aac3146b6f0c26da91b016af579374 100644 (file)
@@ -272,7 +272,7 @@ extern void consistent_sync(void *vaddr, size_t size, int rw);
 /*
  * Change "struct page" to physical address.
  */
-#ifdef CONFIG_DISCONTIG
+#ifdef CONFIG_DISCONTIGMEM
 #define page_to_phys(page)                                       \
        ((((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
                  + page_zone(page)->zone_start_paddr)
index 18c0c5354f9c8d10f8cd9cde09121733817e0a30..f0e2c9f5393d2a01228eae792444ab3e6fb68e63 100644 (file)
 #ifndef _ASMARM_PGALLOC_H
 #define _ASMARM_PGALLOC_H
 
-#include <linux/config.h>
-
 #include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
 
-#include <asm/proc/cache.h>
 #include <asm/proc/pgalloc.h>
 
 /*
index 3d87ca3842b63c0c0eaab83d0486041d134d8913..1ed553bc0aa8f9dbf821a004313a39c4d19201a4 100644 (file)
 
 /* DAG: ARM3 will flush cache on MEMC updates anyway? so don't bother */
 #define clean_cache_area(_start,_size) do { } while (0)
-
-/*
- * TLB flushing:
- *
- *  - flush_tlb_all() flushes all processes TLBs
- *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
- *  - flush_tlb_page(vma, vmaddr) flushes one page
- *  - flush_tlb_range(vma, start, end) flushes a range of pages
- */
-#define flush_tlb_all()                                memc_update_all()
-#define flush_tlb_mm(mm)                       memc_update_mm(mm)
-#define flush_tlb_range(vma,start,end)         \
-               do { memc_update_mm(vma->vm_mm); (void)(start); (void)(end); } while (0)
-#define flush_tlb_page(vma, vmaddr)            do { } while (0)
-
-/*
- * The following handle the weird MEMC chip
- */
-static inline void memc_update_all(void)
-{
-       struct task_struct *p;
-
-       cpu_memc_update_all(init_mm.pgd);
-       for_each_task(p) {
-               if (!p->mm)
-                       continue;
-               cpu_memc_update_all(p->mm->pgd);
-       }
-       processor._set_pgd(current->active_mm->pgd);
-}
-
-static inline void memc_update_mm(struct mm_struct *mm)
-{
-       cpu_memc_update_all(mm->pgd);
-
-       if (mm == current->active_mm)
-               processor._set_pgd(mm->pgd);
-}
-
-static inline void
-memc_clear(struct mm_struct *mm, struct page *page)
-{
-       cpu_memc_update_entry(mm->pgd, (unsigned long) page_address(page), 0);
-
-       if (mm == current->active_mm)
-               processor._set_pgd(mm->pgd);
-}
-
-static inline void
-memc_update_addr(struct mm_struct *mm, pte_t pte, unsigned long vaddr)
-{
-       cpu_memc_update_entry(mm->pgd, pte_val(pte), vaddr);
-
-       if (mm == current->active_mm)
-               processor._set_pgd(mm->pgd);
-}
-
-static inline void
-update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       memc_update_addr(mm, pte, addr);
-}
diff --git a/include/asm-arm/proc-armo/tlbflush.h b/include/asm-arm/proc-armo/tlbflush.h
new file mode 100644 (file)
index 0000000..f10e5b6
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * TLB flushing:
+ *
+ *  - flush_tlb_all() flushes all processes TLBs
+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - flush_tlb_range(vma, start, end) flushes a range of pages
+ */
+#define flush_tlb_all()                                memc_update_all()
+#define flush_tlb_mm(mm)                       memc_update_mm(mm)
+#define flush_tlb_range(vma,start,end)         \
+               do { memc_update_mm(vma->vm_mm); (void)(start); (void)(end); } while (0)
+#define flush_tlb_page(vma, vmaddr)            do { } while (0)
+
+/*
+ * The following handle the weird MEMC chip
+ */
+static inline void memc_update_all(void)
+{
+       struct task_struct *p;
+
+       cpu_memc_update_all(init_mm.pgd);
+       for_each_task(p) {
+               if (!p->mm)
+                       continue;
+               cpu_memc_update_all(p->mm->pgd);
+       }
+       processor._set_pgd(current->active_mm->pgd);
+}
+
+static inline void memc_update_mm(struct mm_struct *mm)
+{
+       cpu_memc_update_all(mm->pgd);
+
+       if (mm == current->active_mm)
+               processor._set_pgd(mm->pgd);
+}
+
+static inline void
+memc_clear(struct mm_struct *mm, struct page *page)
+{
+       cpu_memc_update_entry(mm->pgd, (unsigned long) page_address(page), 0);
+
+       if (mm == current->active_mm)
+               processor._set_pgd(mm->pgd);
+}
+
+static inline void
+memc_update_addr(struct mm_struct *mm, pte_t pte, unsigned long vaddr)
+{
+       cpu_memc_update_entry(mm->pgd, pte_val(pte), vaddr);
+
+       if (mm == current->active_mm)
+               processor._set_pgd(mm->pgd);
+}
+
+static inline void
+update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       memc_update_addr(mm, pte, addr);
+}
+
index 623e262d4166c0149673958fc1ffa4f6f68404e6..fe678c2b0d7e89619153d0a63ee207d452c4d5e9 100644 (file)
@@ -132,108 +132,3 @@ static inline void flush_dcache_page(struct page *page)
        do {                                                            \
                cpu_icache_invalidate_range((_s), (_e));                \
        } while (0)
-
-/*
- *     TLB Management
- *     ==============
- *
- *     The arch/arm/mm/tlb-*.S files implement these methods.
- *
- *     The TLB specific code is expected to perform whatever tests it
- *     needs to determine if it should invalidate the TLB for each
- *     call.  Start addresses are inclusive and end addresses are
- *     exclusive; it is safe to round these addresses down.
- *
- *     flush_tlb_all()
- *
- *             Invalidate the entire TLB.
- *
- *     flush_tlb_mm(mm)
- *
- *             Invalidate all TLB entries in a particular address
- *             space.
- *             - mm    - mm_struct describing address space
- *
- *     flush_tlb_range(mm,start,end)
- *
- *             Invalidate a range of TLB entries in the specified
- *             address space.
- *             - mm    - mm_struct describing address space
- *             - start - start address (may not be aligned)
- *             - end   - end address (exclusive, may not be aligned)
- *
- *     flush_tlb_page(vaddr,vma)
- *
- *             Invalidate the specified page in the specified address range.
- *             - vaddr - virtual address (may not be aligned)
- *             - vma   - vma_struct describing address range
- *
- *     flush_kern_tlb_page(kaddr)
- *
- *             Invalidate the TLB entry for the specified page.  The address
- *             will be in the kernels virtual memory space.  Current uses
- *             only require the D-TLB to be invalidated.
- *             - kaddr - Kernel virtual memory address
- */
-
-struct cpu_tlb_fns {
-       void (*flush_kern_all)(void);
-       void (*flush_user_mm)(struct mm_struct *);
-       void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
-       void (*flush_user_page)(unsigned long, struct vm_area_struct *);
-       void (*flush_kern_page)(unsigned long);
-};
-
-/*
- * Convert calls to our calling convention.
- */
-#define flush_tlb_all()                        __cpu_flush_kern_tlb_all()
-#define flush_tlb_mm(mm)               __cpu_flush_user_tlb_mm(mm)
-#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
-#define flush_tlb_page(vma,vaddr)      __cpu_flush_user_tlb_page(vaddr,vma)
-#define flush_kern_tlb_page(kaddr)     __cpu_flush_kern_tlb_page(kaddr)
-
-/*
- * Now select the calling method
- */
-#ifdef MULTI_TLB
-
-extern struct cpu_tlb_fns cpu_tlb;
-
-#define __cpu_flush_kern_tlb_all       cpu_tlb.flush_kern_all
-#define __cpu_flush_user_tlb_mm                cpu_tlb.flush_user_mm
-#define __cpu_flush_user_tlb_range     cpu_tlb.flush_user_range
-#define __cpu_flush_user_tlb_page      cpu_tlb.flush_user_page
-#define __cpu_flush_kern_tlb_page      cpu_tlb.flush_kern_page
-
-#else
-
-#define __cpu_flush_kern_tlb_all       __glue(_TLB,_flush_kern_tlb_all)
-#define __cpu_flush_user_tlb_mm                __glue(_TLB,_flush_user_tlb_mm)
-#define __cpu_flush_user_tlb_range     __glue(_TLB,_flush_user_tlb_range)
-#define __cpu_flush_user_tlb_page      __glue(_TLB,_flush_user_tlb_page)
-#define __cpu_flush_kern_tlb_page      __glue(_TLB,_flush_kern_tlb_page)
-
-extern void __cpu_flush_kern_tlb_all(void);
-extern void __cpu_flush_user_tlb_mm(struct mm_struct *);
-extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
-extern void __cpu_flush_user_tlb_page(unsigned long, struct vm_area_struct *);
-extern void __cpu_flush_kern_tlb_page(unsigned long);
-
-#endif
-
-/*
- * if PG_dcache_dirty is set for the page, we need to ensure that any
- * cache entries for the kernels virtual memory range are written
- * back to the page.
- */
-extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
-
-/*
- * Old ARM MEMC stuff.  This supports the reversed mapping handling that
- * we have on the older 26-bit machines.  We don't have a MEMC chip, so...
- */
-#define memc_update_all()              do { } while (0)
-#define memc_update_mm(mm)             do { } while (0)
-#define memc_update_addr(mm,pte,log)   do { } while (0)
-#define memc_clear(mm,physaddr)                do { } while (0)
diff --git a/include/asm-arm/proc-armv/tlbflush.h b/include/asm-arm/proc-armv/tlbflush.h
new file mode 100644 (file)
index 0000000..d465e95
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ *  linux/include/asm-arm/proc-armv/tlbflush.h
+ *
+ *  Copyright (C) 1999-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ *     TLB Management
+ *     ==============
+ *
+ *     The arch/arm/mm/tlb-*.S files implement these methods.
+ *
+ *     The TLB specific code is expected to perform whatever tests it
+ *     needs to determine if it should invalidate the TLB for each
+ *     call.  Start addresses are inclusive and end addresses are
+ *     exclusive; it is safe to round these addresses down.
+ *
+ *     flush_tlb_all()
+ *
+ *             Invalidate the entire TLB.
+ *
+ *     flush_tlb_mm(mm)
+ *
+ *             Invalidate all TLB entries in a particular address
+ *             space.
+ *             - mm    - mm_struct describing address space
+ *
+ *     flush_tlb_range(mm,start,end)
+ *
+ *             Invalidate a range of TLB entries in the specified
+ *             address space.
+ *             - mm    - mm_struct describing address space
+ *             - start - start address (may not be aligned)
+ *             - end   - end address (exclusive, may not be aligned)
+ *
+ *     flush_tlb_page(vaddr,vma)
+ *
+ *             Invalidate the specified page in the specified address range.
+ *             - vaddr - virtual address (may not be aligned)
+ *             - vma   - vma_struct describing address range
+ *
+ *     flush_kern_tlb_page(kaddr)
+ *
+ *             Invalidate the TLB entry for the specified page.  The address
+ *             will be in the kernels virtual memory space.  Current uses
+ *             only require the D-TLB to be invalidated.
+ *             - kaddr - Kernel virtual memory address
+ */
+
+struct cpu_tlb_fns {
+       void (*flush_kern_all)(void);
+       void (*flush_user_mm)(struct mm_struct *);
+       void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
+       void (*flush_user_page)(unsigned long, struct vm_area_struct *);
+       void (*flush_kern_range)(unsigned long, unsigned long);
+       void (*flush_kern_page)(unsigned long);
+};
+
+/*
+ * Convert calls to our calling convention.
+ */
+#define flush_tlb_all()                        __cpu_flush_kern_tlb_all()
+#define flush_tlb_mm(mm)               __cpu_flush_user_tlb_mm(mm)
+#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
+#define flush_tlb_page(vma,vaddr)      __cpu_flush_user_tlb_page(vaddr,vma)
+#define flush_tlb_kernel_range(s,e)    __cpu_flush_kern_tlb_range(s,e)
+#define flush_tlb_kernel_page(kaddr)   __cpu_flush_kern_tlb_page(kaddr)
+
+/*
+ * Now select the calling method
+ */
+#ifdef MULTI_TLB
+
+extern struct cpu_tlb_fns cpu_tlb;
+
+#define __cpu_flush_kern_tlb_all       cpu_tlb.flush_kern_all
+#define __cpu_flush_user_tlb_mm                cpu_tlb.flush_user_mm
+#define __cpu_flush_user_tlb_range     cpu_tlb.flush_user_range
+#define __cpu_flush_user_tlb_page      cpu_tlb.flush_user_page
+#define __cpu_flush_kern_tlb_range     cpu_tlb.flush_kern_range
+#define __cpu_flush_kern_tlb_page      cpu_tlb.flush_kern_page
+
+#else
+
+#define __cpu_flush_kern_tlb_all       __glue(_TLB,_flush_kern_tlb_all)
+#define __cpu_flush_user_tlb_mm                __glue(_TLB,_flush_user_tlb_mm)
+#define __cpu_flush_user_tlb_range     __glue(_TLB,_flush_user_tlb_range)
+#define __cpu_flush_user_tlb_page      __glue(_TLB,_flush_user_tlb_page)
+#define __cpu_flush_kern_tlb_range     __glue(_TLB,_flush_kern_tlb_range)
+#define __cpu_flush_kern_tlb_page      __glue(_TLB,_flush_kern_tlb_page)
+
+extern void __cpu_flush_kern_tlb_all(void);
+extern void __cpu_flush_user_tlb_mm(struct mm_struct *);
+extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+extern void __cpu_flush_user_tlb_page(unsigned long, struct vm_area_struct *);
+extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
+extern void __cpu_flush_kern_tlb_page(unsigned long);
+
+#endif
+
+/*
+ * if PG_dcache_dirty is set for the page, we need to ensure that any
+ * cache entries for the kernels virtual memory range are written
+ * back to the page.
+ */
+extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
+
+/*
+ * ARM processors do not cache TLB tables in RAM.
+ */
+#define flush_tlb_pgtables(mm,start,end)       do { } while (0)
+
+/*
+ * Old ARM MEMC stuff.  This supports the reversed mapping handling that
+ * we have on the older 26-bit machines.  We don't have a MEMC chip, so...
+ */
+#define memc_update_all()              do { } while (0)
+#define memc_update_mm(mm)             do { } while (0)
+#define memc_update_addr(mm,pte,log)   do { } while (0)
+#define memc_clear(mm,physaddr)                do { } while (0)
+
index bfef8bfb0e15829d19e11b47b5a5eac83a4c4753..ede2ac4359b166d58c1fa9aa0a8fe40c779b3c5b 100644 (file)
@@ -102,6 +102,8 @@ static inline unsigned long __thread_saved_fp(struct thread_info *thread)
 
 #endif
 
+#define PREEMPT_ACTIVE 0x04000000
+
 /*
  * thread information flags:
  *  TIF_SYSCALL_TRACE  - syscall trace active
diff --git a/include/asm-arm/tlbflush.h b/include/asm-arm/tlbflush.h
new file mode 100644 (file)
index 0000000..9011f00
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ *  linux/include/asm-arm/tlbflush.h
+ *
+ *  Copyright (C) 2000-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_TLBFLUSH_H
+#define _ASMARM_TLBFLUSH_H
+
+#include <asm-arm/proc/tlbflush.h>
+
+#endif