* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes kernel pages
*
* since the hardware hash table functions as an extension of the
* tlb as far as the linux tables are concerned, flush it too.
* -- Cort
*/
+/*
+ * 750 SMP is a Bad Idea because the 750 doesn't broadcast all
+ * the cache operations on the bus. Hence we need to use an IPI
+ * to get the other CPU(s) to invalidate their TLBs.
+ */
+#ifdef CONFIG_SMP_750
+#define FINISH_FLUSH smp_send_tlb_invalidate(0)
+#else
+#define FINISH_FLUSH do { } while (0)
+#endif
+
+static void flush_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ pmd_t *pmd;
+ unsigned long pmd_end;
+ int count;
+ unsigned int ctx = mm->context;
+
+ if (Hash == 0) {
+ _tlbia();
+ return;
+ }
+ start &= PAGE_MASK;
+ if (start >= end)
+ return;
+ end = (end - 1) | ~PAGE_MASK;
+ pmd = pmd_offset(pgd_offset(mm, start), start);
+ for (;;) {
+ pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
+ if (pmd_end > end)
+ pmd_end = end;
+ if (!pmd_none(*pmd)) {
+ count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
+ flush_hash_pages(ctx, start, pmd_val(*pmd), count);
+ }
+ if (pmd_end == end)
+ break;
+ start = pmd_end + 1;
+ ++pmd;
+ }
+}
+
/*
* Flush all tlb/hash table entries (except perhaps for those
* mapping RAM starting at PAGE_OFFSET, since they never change).
*/
void
-local_flush_tlb_all(void)
+flush_tlb_all(void)
{
- struct vm_area_struct vma;
-
- /* aargh!!! */
/*
* Just flush the kernel part of the address space, that's
* all that the current callers of this require.
* Eventually I hope to persuade the powers that be that
* we can and should dispense with flush_tlb_all().
* -- paulus.
+ *
+ * In fact this should never get called now that we
+ * have flush_tlb_kernel_range. -- paulus
*/
- vma.vm_mm = &init_mm;
- local_flush_tlb_range(&vma, TASK_SIZE, ~0UL);
+ printk(KERN_ERR "flush_tlb_all called from %p\n",
+ __builtin_return_address(0));
+ flush_range(&init_mm, TASK_SIZE, ~0UL);
+ FINISH_FLUSH;
+}
-#ifdef CONFIG_SMP
- smp_send_tlb_invalidate(0);
-#endif /* CONFIG_SMP */
+/*
+ * Flush kernel TLB entries in the given range
+ */
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ flush_range(&init_mm, start, end);
+ FINISH_FLUSH;
}
/*
* by mm. We can't rely on mm->mmap describing all the entries
* that might be in the hash table.
*/
-void
-local_flush_tlb_mm(struct mm_struct *mm)
+void flush_tlb_mm(struct mm_struct *mm)
{
if (Hash == 0) {
_tlbia();
if (mm->map_count) {
struct vm_area_struct *mp;
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
- local_flush_tlb_range(mp, mp->vm_start, mp->vm_end);
+ flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
} else {
- struct vm_area_struct vma;
- vma.vm_mm = mm;
- local_flush_tlb_range(&vma, 0, TASK_SIZE);
+ flush_range(mm, 0, TASK_SIZE);
}
-
-#ifdef CONFIG_SMP
- smp_send_tlb_invalidate(0);
-#endif
+ FINISH_FLUSH;
}
-void
-local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
struct mm_struct *mm;
pmd_t *pmd;
pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
if (!pmd_none(*pmd))
flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1);
-#ifdef CONFIG_SMP
- smp_send_tlb_invalidate(0);
-#endif
+ FINISH_FLUSH;
}
* and check _PAGE_HASHPTE bit; if it is set, find and destroy
* the corresponding HPTE.
*/
-void
-local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
{
- struct mm_struct *mm = vma->vm_mm;
- pmd_t *pmd;
- unsigned long pmd_end;
- int count;
- unsigned int ctx = mm->context;
-
- if (Hash == 0) {
- _tlbia();
- return;
- }
- start &= PAGE_MASK;
- if (start >= end)
- return;
- end = (end - 1) | ~PAGE_MASK;
- pmd = pmd_offset(pgd_offset(mm, start), start);
- for (;;) {
- pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
- if (pmd_end > end)
- pmd_end = end;
- if (!pmd_none(*pmd)) {
- count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
- flush_hash_pages(ctx, start, pmd_val(*pmd), count);
- }
- if (pmd_end == end)
- break;
- start = pmd_end + 1;
- ++pmd;
- }
-
-#ifdef CONFIG_SMP
- smp_send_tlb_invalidate(0);
-#endif
+ flush_range(vma->vm_mm, start, end);
+ FINISH_FLUSH;
}
#if defined(CONFIG_4xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
-static inline void local_flush_tlb_all(void)
+static inline void flush_tlb_all(void)
{ __tlbia(); }
-static inline void local_flush_tlb_mm(struct mm_struct *mm)
+static inline void flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
-static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{ _tlbie(vmaddr); }
-static inline void local_flush_tlb_range(struct mm_struct *mm,
+static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{ __tlbia(); }
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+ { __tlbia(); }
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#elif defined(CONFIG_8xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
-static inline void local_flush_tlb_all(void)
+static inline void flush_tlb_all(void)
{ __tlbia(); }
-static inline void local_flush_tlb_mm(struct mm_struct *mm)
+static inline void flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
-static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{ _tlbie(vmaddr); }
-static inline void local_flush_tlb_range(struct mm_struct *mm,
+static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{ __tlbia(); }
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+ { __tlbia(); }
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#else /* 6xx, 7xx, 7xxx cpus */
struct mm_struct;
struct vm_area_struct;
-extern void local_flush_tlb_all(void);
-extern void local_flush_tlb_mm(struct mm_struct *mm);
-extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
/*
* This gets called at the end of handling a page fault, when
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
#endif
-#define flush_tlb_all local_flush_tlb_all
-#define flush_tlb_mm local_flush_tlb_mm
-#define flush_tlb_page local_flush_tlb_page
-#define flush_tlb_range local_flush_tlb_range
-
/*
* This is called in munmap when we have freed up some page-table
* pages. We don't need to do anything here, there's nothing special