#include <asm/machdep.h>
#include <asm/lmb.h>
#include <asm/abs_addr.h>
+#include <asm/tlbflush.h>
#ifdef CONFIG_PPC_EEH
#include <asm/eeh.h>
#endif
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
/*
* Create a pte. Used during initialization only.
#include <asm/pci_dma.h>
#include <linux/pci.h>
#include <asm/Naca.h>
+#include <asm/tlbflush.h>
/* Status return values */
#define H_Success 0
#include <asm/bitops.h>
#include <asm/checksum.h>
#include <asm/pgtable.h>
-#include <linux/adb.h>
-#include <linux/cuda.h>
-#include <linux/pmu.h>
#include <asm/prom.h>
#include <asm/system.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/hw_irq.h>
#include <asm/abs_addr.h>
-#ifdef CONFIG_SMP
#include <asm/smplock.h>
-#endif /* CONFIG_SMP */
+#include <asm/cacheflush.h>
#ifdef CONFIG_PPC_ISERIES
#include <asm/iSeries/iSeries_pci.h>
#include <asm/iSeries/iSeries_proc.h>
#include <asm/pgtable.h>
#include <asm/ppcdebug.h>
#include <asm/unistd.h>
+#include <asm/cacheflush.h>
#define DEBUG_SIG 0
}
void
-local_flush_tlb_mm(struct mm_struct *mm)
+flush_tlb_all(void)
+{
+ /* Implemented to just flush the vmalloc area.
+ * vmalloc is the only user of flush_tlb_all.
+ */
+ __flush_tlb_range(NULL, VMALLOC_START, VMALLOC_END);
+}
+
+void
+flush_tlb_mm(struct mm_struct *mm)
{
if (mm->map_count) {
struct vm_area_struct *mp;
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
- local_flush_tlb_range(mm, mp->vm_start, mp->vm_end);
+ __flush_tlb_range(mm, mp->vm_start, mp->vm_end);
} else {
/* MIKEC: It is not clear why this is needed */
/* paulus: it is needed to clear out stale HPTEs
* when an address space (represented by an mm_struct)
* is being destroyed. */
- local_flush_tlb_range(mm, USER_START, USER_END);
+ __flush_tlb_range(mm, USER_START, USER_END);
}
/* XXX are there races with checking cpu_vm_mask? - Anton */
* Callers should hold the mm->page_table_lock
*/
void
-local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
unsigned long context = 0;
pgd_t *pgd;
break;
default:
- panic("local_flush_tlb_page: invalid region 0x%016lx", vmaddr);
+ panic("flush_tlb_page: invalid region 0x%016lx", vmaddr);
}
struct tlb_batch_data tlb_batch_array[NR_CPUS][MAX_BATCH_FLUSH];
void
-local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+__flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
pgd_t *pgd;
pmd_t *pmd;
--- /dev/null
+#ifndef _PPC64_CACHEFLUSH_H
+#define _PPC64_CACHEFLUSH_H
+
+/* Keep includes the same across arches. */
+#include <linux/mm.h>
+
+/*
+ * No cache flushing is required when address mappings are
+ * changed, because the caches on PowerPCs are physically
+ * addressed.
+ */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr) do { } while (0)
+#define flush_page_to_ram(page) do { } while (0)
+
+extern void flush_dcache_page(struct page *page);
+extern void flush_icache_range(unsigned long, unsigned long);
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+extern void flush_icache_user_range(struct vm_area_struct *vma,
+ struct page *page, unsigned long addr,
+ int len);
+extern void __flush_dcache_icache(void *page_va);
+
+#endif /* _PPC64_CACHEFLUSH_H */
pte_update(ptep, ~_PAGE_HPTEFLAGS, 0);
}
-struct mm_struct;
-struct vm_area_struct;
-extern void local_flush_tlb_all(void);
-extern void local_flush_tlb_mm(struct mm_struct *mm);
-extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void local_flush_tlb_range(struct mm_struct *mm,
- unsigned long start, unsigned long end);
-
-#define flush_tlb_all local_flush_tlb_all
-#define flush_tlb_mm local_flush_tlb_mm
-#define flush_tlb_page local_flush_tlb_page
-#define flush_tlb_range(vma, start, end) local_flush_tlb_range(vma->vm_mm, start, end)
-
-extern inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- /* PPC has hw page tables. */
-}
-
-/*
- * No cache flushing is required when address mappings are
- * changed, because the caches on PowerPCs are physically
- * addressed.
- */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_range(vma, a, b) do { } while (0)
-#define flush_cache_page(vma, p) do { } while (0)
-#define flush_page_to_ram(page) do { } while (0)
-
-extern void flush_icache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long addr, int len);
-extern void flush_icache_range(unsigned long, unsigned long);
-extern void __flush_dcache_icache(void *page_va);
-extern void flush_dcache_page(struct page *page);
-extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
-
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address);
extern unsigned long ioremap_bot, ioremap_base;
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
#endif
-extern void flush_hash_segments(unsigned low_vsid, unsigned high_vsid);
-extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
- int local);
-void flush_hash_range(unsigned long context, unsigned long number, int local);
-
-/* TLB flush batching */
-#define MAX_BATCH_FLUSH 128
-struct tlb_batch_data {
- pte_t pte;
- unsigned long addr;
-};
-extern struct tlb_batch_data tlb_batch_array[NR_CPUS][MAX_BATCH_FLUSH];
-
/* Encode and de-code a swap entry */
#define SWP_TYPE(entry) (((entry).val >> 1) & 0x3f)
#define SWP_OFFSET(entry) ((entry).val >> 8)
--- /dev/null
+#ifndef _PPC64_TLBFLUSH_H
+#define _PPC64_TLBFLUSH_H
+
+#include <linux/threads.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ */
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void __flush_tlb_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+#define flush_tlb_range(vma, start, end) \
+ __flush_tlb_range(vma->vm_mm, start, end)
+
+extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ /* PPC has hw page tables. */
+}
+
+extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
+ int local);
+void flush_hash_range(unsigned long context, unsigned long number, int local);
+
+/* TLB flush batching */
+#define MAX_BATCH_FLUSH 128
+struct tlb_batch_data {
+ pte_t pte;
+ unsigned long addr;
+};
+extern struct tlb_batch_data tlb_batch_array[NR_CPUS][MAX_BATCH_FLUSH];
+
+#endif /* _PPC64_TLBFLUSH_H */