static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm)
{
mmu_gather_t *tlb = &mmu_gathers[smp_processor_id()];
+ unsigned long nr;
tlb->mm = mm;
tlb->freed = 0;
- /* Use fast mode if there is only one user of this mm (this process) */
- tlb->nr = (atomic_read(&(mm)->mm_users) == 1) ? ~0UL : 0UL;
+
+ /* Use fast mode if this MM only exists on this CPU */
+ nr = ~0UL;
+#ifdef CONFIG_SMP
+ if (mm->cpu_vm_mask != (1<<smp_processor_id()))
+ nr = 0UL;
+#endif
+ tlb->nr = nr;
return tlb;
}
release_segments(mm);
spin_lock(&mm->page_table_lock);
- mpnt = mm->mmap;
- mm->mmap = mm->mmap_cache = NULL;
- mm->mm_rb = RB_ROOT;
- mm->rss = 0;
- mm->total_vm = 0;
- mm->locked_vm = 0;
tlb = tlb_gather_mmu(mm);
flush_cache_mm(mm);
+ mpnt = mm->mmap;
while (mpnt) {
- struct vm_area_struct * next = mpnt->vm_next;
unsigned long start = mpnt->vm_start;
unsigned long end = mpnt->vm_end;
- if (mpnt->vm_ops) {
- if (mpnt->vm_ops->close)
- mpnt->vm_ops->close(mpnt);
- }
mm->map_count--;
remove_shared_vm_struct(mpnt);
unmap_page_range(tlb, mpnt, start, end);
- if (mpnt->vm_file)
- fput(mpnt->vm_file);
- kmem_cache_free(vm_area_cachep, mpnt);
- mpnt = next;
+ mpnt = mpnt->vm_next;
}
/* This is just debugging */
clear_page_tables(tlb, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
tlb_finish_mmu(tlb, FIRST_USER_PGD_NR*PGDIR_SIZE, USER_PTRS_PER_PGD*PGDIR_SIZE);
+
+ mpnt = mm->mmap;
+ mm->mmap = mm->mmap_cache = NULL;
+ mm->mm_rb = RB_ROOT;
+ mm->rss = 0;
+ mm->total_vm = 0;
+ mm->locked_vm = 0;
+
spin_unlock(&mm->page_table_lock);
+
+ /*
+ * Walk the list again, actually closing and freeing it
+ * without holding any MM locks.
+ */
+ while (mpnt) {
+ struct vm_area_struct * next = mpnt->vm_next;
+ if (mpnt->vm_ops) {
+ if (mpnt->vm_ops->close)
+ mpnt->vm_ops->close(mpnt);
+ }
+ if (mpnt->vm_file)
+ fput(mpnt->vm_file);
+ kmem_cache_free(vm_area_cachep, mpnt);
+ mpnt = next;
+ }
+
}
/* Insert vm structure into process list sorted by address