Another processor could be walking the page table in the middle of the
PTE page to be freeded. Synchronize with hash_page using the lock.
.comm mmu_hash_lock,4
#endif /* CONFIG_SMP */
+/*
+ * Sync CPUs with hash_page taking & releasing the hash
+ * table lock
+ */
+#ifdef CONFIG_SMP
+ .text
+_GLOBAL(hash_page_sync)
+ lis r8,mmu_hash_lock@h
+ ori r8,r8,mmu_hash_lock@l
+ lis r0,0x0fff
+ b 10f
+11: lwz r6,0(r8)
+ cmpwi 0,r6,0
+ bne 11b
+10: lwarx r6,0,r8
+ cmpwi 0,r6,0
+ bne- 11b
+ stwcx. r0,0,r8
+ bne- 10b
+ isync
+ eieio
+ li r0,0
+ stw r0,0(r8)
+ blr
+#endif
+
/*
* Load a PTE into the hash table, if possible.
* The address is in r4, and r3 contains an access flag:
extern char etext[], _stext[];
+#ifdef CONFIG_SMP
+extern void hash_page_sync(void);
+#endif
+
#ifdef HAVE_BATS
extern unsigned long v_mapped_by_bats(unsigned long va);
extern unsigned long p_mapped_by_bats(unsigned long pa);
void pte_free_kernel(pte_t *pte)
{
+#ifdef CONFIG_SMP
+ hash_page_sync();
+#endif
free_page((unsigned long)pte);
}
void pte_free(struct page *pte)
{
+#ifdef CONFIG_SMP
+ hash_page_sync();
+#endif
__free_page(pte);
}