From: Andrew Morton Date: Sat, 10 Aug 2002 09:44:47 +0000 (-0700) Subject: [PATCH] sync get_user_pages with 2.4 X-Git-Tag: v2.5.31~3^2~2 X-Git-Url: http://git.neil.brown.name/?a=commitdiff_plain;h=6df888f672d95369572a6d813790ab9afdf0cec6;p=history.git [PATCH] sync get_user_pages with 2.4 Forward port of get_user_pages() change from 2.4. - If the vma is marked as VM_IO area then fail the map. This prevents kernel deadlocks which occur when applications which have frame buffers mapped try to dump core. Also prevents a kernel oops when a debugger is attached to a process which has an IO mmap. - Check that the mapped page is inside mem_map[] (pfn_valid). - inline follow_page() and remove the preempt_disable()s. It has only a single callsite and is called under spinloclk. --- diff --git a/mm/memory.c b/mm/memory.c index 83b3a9915689..f1c4f9c2dcbd 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -429,9 +429,11 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned } /* - * Do a quick page-table lookup for a single page. + * Do a quick page-table lookup for a single page. + * mm->page_table_lock must be held. */ -static struct page * follow_page(struct mm_struct *mm, unsigned long address, int write) +static inline struct page * +follow_page(struct mm_struct *mm, unsigned long address, int write) { pgd_t *pgd; pmd_t *pmd; @@ -446,19 +448,14 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in if (pmd_none(*pmd) || pmd_bad(*pmd)) goto out; - preempt_disable(); ptep = pte_offset_map(pmd, address); - if (!ptep) { - preempt_enable(); + if (!ptep) goto out; - } pte = *ptep; pte_unmap(ptep); - preempt_enable(); if (pte_present(pte)) { - if (!write || - (pte_write(pte) && pte_dirty(pte))) { + if (!write || (pte_write(pte) && pte_dirty(pte))) { pfn = pte_pfn(pte); if (pfn_valid(pfn)) return pfn_to_page(pfn); @@ -475,13 +472,17 @@ out: * with IO-aperture pages in kiobufs. */ -static inline struct page * get_page_map(struct page *page) +static inline struct page *get_page_map(struct page *page) { + if (!pfn_valid(page_to_pfn(page))) + return 0; return page; } -int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, - int len, int write, int force, struct page **pages, struct vm_area_struct **vmas) + +int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, int len, int write, int force, + struct page **pages, struct vm_area_struct **vmas) { int i; unsigned int flags; @@ -493,14 +494,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); i = 0; - do { struct vm_area_struct * vma; vma = find_extend_vma(mm, start); - if ( !vma || !(flags & vma->vm_flags) ) + if (!vma || (pages && (vma->vm_flags & VM_IO)) + || !(flags & vma->vm_flags)) return i ? : -EFAULT; spin_lock(&mm->page_table_lock); @@ -508,7 +509,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long struct page *map; while (!(map = follow_page(mm, start, write))) { spin_unlock(&mm->page_table_lock); - switch (handle_mm_fault(mm, vma, start, write)) { + switch (handle_mm_fault(mm,vma,start,write)) { case VM_FAULT_MINOR: tsk->min_flt++; break; @@ -526,11 +527,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long } if (pages) { pages[i] = get_page_map(map); - /* FIXME: call the correct function, - * depending on the type of the found page - */ - if (pages[i]) - page_cache_get(pages[i]); + if (!pages[i]) { + spin_unlock(&mm->page_table_lock); + while (i--) + page_cache_release(pages[i]); + i = -EFAULT; + goto out; + } + page_cache_get(pages[i]); } if (vmas) vmas[i] = vma; @@ -540,6 +544,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long } while(len && start < vma->vm_end); spin_unlock(&mm->page_table_lock); } while(len); +out: return i; }