if (agp_special_page)
printk(KERN_INFO "AGP special page: 0x%08lx\n", agp_special_page);
#endif /* defined(CONFIG_ALL_PPC) */
-#ifdef CONFIG_PPC_ISERIES
+#ifdef CONFIG_PPC_ISERIES
create_virtual_bus_tce_table();
-
#endif /* CONFIG_PPC_ISERIES */
+
+ /* Make sure all our pagetable pages have page->mapping
+ and page->index set correctly. */
+ for (addr = KERNELBASE; addr != 0; addr += PGDIR_SIZE) {
+ struct page *pg;
+ pmd_t *pmd = pmd_offset(pgd_offset_k(addr), addr);
+ if (pmd_present(*pmd)) {
+ pg = pmd_page(*pmd);
+ pg->mapping = (void *) &init_mm;
+ pg->index = addr;
+ }
+ }
+
mem_init_done = 1;
}
clear_bit(PG_arch_1, &page->flags);
}
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
- unsigned long phys;
-
- if (page->mapping && !PageReserved(page)
- && !test_bit(PG_arch_1, &page->flags)) {
- phys = page_to_pfn(page) << PAGE_SHIFT;
- __flush_dcache_icache_phys(phys);
- set_bit(PG_arch_1, &page->flags);
- }
-}
-
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
clear_page(page);
+ clear_bit(PG_arch_1, &pg->flags);
}
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg)
{
copy_page(vto, vfrom);
- __flush_dcache_icache(vto);
+ clear_bit(PG_arch_1, &pg->flags);
}
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
flush_icache_range(maddr, maddr + len);
kunmap(page);
}
+
+/*
+ * This is called at the end of handling a user page fault, when the
+ * fault has been handled by updating a PTE in the linux page tables.
+ * We use it to preload an HPTE into the hash table corresponding to
+ * the updated linux PTE.
+ */
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t pte)
+{
+ /* handle i-cache coherency */
+ unsigned long pfn = pte_pfn(pte);
+
+ if (pfn_valid(pfn)) {
+ struct page *page = pfn_to_page(pfn);
+ if (!PageReserved(page)
+ && !test_bit(PG_arch_1, &page->flags)) {
+ __flush_dcache_icache((void *) address);
+ set_bit(PG_arch_1, &page->flags);
+ }
+ }
+
+#ifdef CONFIG_PPC_STD_MMU
+ /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
+ if (Hash != 0 && pte_young(pte)) {
+ struct mm_struct *mm;
+ pmd_t *pmd;
+
+ mm = (address < TASK_SIZE)? vma->vm_mm: &init_mm;
+ pmd = pmd_offset(pgd_offset(mm, address), address);
+ if (!pmd_none(*pmd))
+ add_hash_page(mm->context, address, pmd_val(*pmd));
+ }
+#endif
+}
if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
}
-
-/*
- * This is called at the end of handling a user page fault, when the
- * fault has been handled by updating a PTE in the linux page tables.
- * We use it to preload an HPTE into the hash table corresponding to
- * the updated linux PTE.
- */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
- pte_t pte)
-{
- struct mm_struct *mm;
- pmd_t *pmd;
- static int nopreload;
-
- if (Hash == 0 || nopreload)
- return;
- /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
- if (!pte_young(pte))
- return;
- mm = (address < TASK_SIZE)? vma->vm_mm: &init_mm;
- pmd = pmd_offset(pgd_offset(mm, address), address);
- if (!pmd_none(*pmd))
- add_hash_page(mm->context, address, pmd_val(*pmd));
-}
#define flush_cache_range(vma, a, b) do { } while (0)
#define flush_cache_page(vma, p) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
+#define flush_icache_page(vma, page) do { } while (0)
extern void flush_dcache_page(struct page *page);
-extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
extern void flush_icache_range(unsigned long, unsigned long);
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len);
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{ _tlbia(); }
-#define update_mmu_cache(vma, addr, pte) do { } while (0)
#elif defined(CONFIG_8xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{ __tlbia(); }
-#define update_mmu_cache(vma, addr, pte) do { } while (0)
#else /* 6xx, 7xx, 7xxx cpus */
struct mm_struct;
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-
-/*
- * This gets called at the end of handling a page fault, when
- * the kernel has put a new PTE into the page table for the process.
- * We use it to put a corresponding HPTE into the hash table
- * ahead of time, instead of waiting for the inevitable extra
- * hash-table miss exception.
- */
-extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
#endif
/*
{
}
+/*
+ * This gets called at the end of handling a page fault, when
+ * the kernel has put a new PTE into the page table for the process.
+ * We use it to ensure coherency between the i-cache and d-cache
+ * for the page which has just been mapped in.
+ * On machines which use an MMU hash table, we use this to put a
+ * corresponding HPTE into the hash table ahead of time, instead of
+ * waiting for the inevitable extra hash-table miss exception.
+ */
+extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
+
#endif /* _PPC_TLBFLUSH_H */
#endif /*__KERNEL__ */