]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] rmap 3 arches + mapping_mapped
authorAndrew Morton <akpm@osdl.org>
Mon, 12 Apr 2004 07:54:31 +0000 (00:54 -0700)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Mon, 12 Apr 2004 07:54:31 +0000 (00:54 -0700)
From: Hugh Dickins <hugh@veritas.com>

Some arches refer to page->mapping for their dcache flushing: use
page_mapping(page) for safety, to avoid confusion on anon pages, which will
store a different pointer there - though in most cases flush_dcache_page is
being applied to pagecache pages.

arm has a useful mapping_mapped macro: move that to generic, and add
mapping_writably_mapped, to avoid explicit list_empty checks on i_mmap and
i_mmap_shared in several places.

Very tempted to add page_mapped(page) tests, perhaps along with the
mapping_writably_mapped tests in do_generic_mapping_read and
do_shmem_file_read, to cut down on wasted flush_dcache effort; but the
serialization is not obvious, too unsafe to do in a hurry.

14 files changed:
arch/arm/mm/fault-armv.c
arch/mips/mm/cache.c
arch/parisc/kernel/cache.c
arch/sparc64/kernel/smp.c
arch/sparc64/mm/init.c
fs/locks.c
fs/xfs/linux/xfs_vnode.h
include/asm-arm/cacheflush.h
include/asm-parisc/cacheflush.h
include/asm-sh/pgalloc.h
include/linux/fs.h
mm/filemap.c
mm/shmem.c
mm/vmscan.c

index 7aa6398abdb04886989e3cc82a3d9e0e6d4f73a9..8c5ad6a4c2c0525c9735bcb9a03cf6e3bcd4b5e1 100644 (file)
@@ -191,7 +191,7 @@ void __flush_dcache_page(struct page *page)
 
        __cpuc_flush_dcache_page(page_address(page));
 
-       if (!page->mapping)
+       if (!page_mapping(page))
                return;
 
        /*
@@ -292,7 +292,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
        if (!pfn_valid(pfn))
                return;
        page = pfn_to_page(pfn);
-       if (page->mapping) {
+       if (page_mapping(page)) {
                int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
 
                if (dirty)
index d384be0cb00ec1a6b4e581b7c42496f0f6de822a..5c9e9855caa8df85de77811a7e67dc87c8f93969 100644 (file)
@@ -57,16 +57,13 @@ void flush_dcache_page(struct page *page)
 {
        unsigned long addr;
 
-       if (page->mapping &&
-           list_empty(&page->mapping->i_mmap) &&
-           list_empty(&page->mapping->i_mmap_shared)) {
+       if (page_mapping(page) && !mapping_mapped(page->mapping)) {
                SetPageDcacheDirty(page);
-
                return;
        }
 
        /*
-        * We could delay the flush for the !page->mapping case too.  But that
+        * We could delay the flush for the !page_mapping case too.  But that
         * case is for exec env/arg pages and those are %99 certainly going to
         * get faulted into the tlb (and thus flushed) anyways.
         */
@@ -81,7 +78,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
        unsigned long pfn, addr;
 
        pfn = pte_pfn(pte);
-       if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page->mapping) &&
+       if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) &&
            Page_dcache_dirty(page)) {
                if (pages_do_alias((unsigned long)page_address(page),
                                   address & PAGE_MASK)) {
index a23bb15dc2f89edab8df6a3d53edf043e92bf789..ac36c927ab5bc219c6d7127877a70a6a73ba2741 100644 (file)
@@ -68,7 +68,7 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 {
        struct page *page = pte_page(pte);
 
-       if (VALID_PAGE(page) && page->mapping &&
+       if (VALID_PAGE(page) && page_mapping(page) &&
            test_bit(PG_dcache_dirty, &page->flags)) {
 
                flush_kernel_dcache_page(page_address(page));
@@ -234,7 +234,7 @@ void __flush_dcache_page(struct page *page)
 
        flush_kernel_dcache_page(page_address(page));
 
-       if (!page->mapping)
+       if (!page_mapping(page))
                return;
        /* check shared list first if it's not empty...it's usually
         * the shortest */
index ce479585c484934c3db0f39493e8e4f5d1338aa2..88fe647652f48700ef001925510304f1a7016e57 100644 (file)
@@ -671,9 +671,9 @@ static __inline__ void __local_flush_dcache_page(struct page *page)
 #if (L1DCACHE_SIZE > PAGE_SIZE)
        __flush_dcache_page(page->virtual,
                            ((tlb_type == spitfire) &&
-                            page->mapping != NULL));
+                            page_mapping(page) != NULL));
 #else
-       if (page->mapping != NULL &&
+       if (page_mapping(page) != NULL &&
            tlb_type == spitfire)
                __flush_icache_page(__pa(page->virtual));
 #endif
@@ -694,7 +694,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
                if (tlb_type == spitfire) {
                        data0 =
                                ((u64)&xcall_flush_dcache_page_spitfire);
-                       if (page->mapping != NULL)
+                       if (page_mapping(page) != NULL)
                                data0 |= ((u64)1 << 32);
                        spitfire_xcall_deliver(data0,
                                               __pa(page->virtual),
@@ -727,7 +727,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
                goto flush_self;
        if (tlb_type == spitfire) {
                data0 = ((u64)&xcall_flush_dcache_page_spitfire);
-               if (page->mapping != NULL)
+               if (page_mapping(page) != NULL)
                        data0 |= ((u64)1 << 32);
                spitfire_xcall_deliver(data0,
                                       __pa(page->virtual),
index 690120faf6c87a4f4da5414d90efcb71f284ac47..81e68ee52f8d5ef915fb6605427ea85dd073baa6 100644 (file)
@@ -139,9 +139,9 @@ __inline__ void flush_dcache_page_impl(struct page *page)
 #if (L1DCACHE_SIZE > PAGE_SIZE)
        __flush_dcache_page(page->virtual,
                            ((tlb_type == spitfire) &&
-                            page->mapping != NULL));
+                            page_mapping(page) != NULL));
 #else
-       if (page->mapping != NULL &&
+       if (page_mapping(page) != NULL &&
            tlb_type == spitfire)
                __flush_icache_page(__pa(page->virtual));
 #endif
@@ -203,7 +203,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
 
        pfn = pte_pfn(pte);
        if (pfn_valid(pfn) &&
-           (page = pfn_to_page(pfn), page->mapping) &&
+           (page = pfn_to_page(pfn), page_mapping(page)) &&
            ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
                int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
 
@@ -227,9 +227,7 @@ void flush_dcache_page(struct page *page)
        int dirty = test_bit(PG_dcache_dirty, &page->flags);
        int dirty_cpu = dcache_dirty_cpu(page);
 
-       if (page->mapping &&
-           list_empty(&page->mapping->i_mmap) &&
-           list_empty(&page->mapping->i_mmap_shared)) {
+       if (page_mapping(page) && !mapping_mapped(page->mapping)) {
                if (dirty) {
                        if (dirty_cpu == smp_processor_id())
                                return;
@@ -237,7 +235,7 @@ void flush_dcache_page(struct page *page)
                }
                set_dcache_dirty(page);
        } else {
-               /* We could delay the flush for the !page->mapping
+               /* We could delay the flush for the !page_mapping
                 * case too.  But that case is for exec env/arg
                 * pages and those are %99 certainly going to get
                 * faulted into the tlb (and thus flushed) anyways.
@@ -279,7 +277,7 @@ static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsig
                        if (!pfn_valid(pfn))
                                continue;
                        page = pfn_to_page(pfn);
-                       if (PageReserved(page) || !page->mapping)
+                       if (PageReserved(page) || !page_mapping(page))
                                continue;
                        pgaddr = (unsigned long) page_address(page);
                        uaddr = address + offset;
index c6a6010a7218599d362f2df8a2cfcfcc44163ccf..da593493962c037ea03bca134ab3e1ef39a6b985 100644 (file)
@@ -1453,13 +1453,10 @@ int fcntl_setlk(struct file *filp, unsigned int cmd, struct flock __user *l)
         * and shared.
         */
        if (IS_MANDLOCK(inode) &&
-           (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
-               struct address_space *mapping = filp->f_mapping;
-
-               if (!list_empty(&mapping->i_mmap_shared)) {
-                       error = -EAGAIN;
-                       goto out;
-               }
+           (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
+           mapping_writably_mapped(filp->f_mapping)) {
+               error = -EAGAIN;
+               goto out;
        }
 
        error = flock_to_posix_lock(filp, file_lock, &flock);
@@ -1591,13 +1588,10 @@ int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
         * and shared.
         */
        if (IS_MANDLOCK(inode) &&
-           (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
-               struct address_space *mapping = filp->f_mapping;
-
-               if (!list_empty(&mapping->i_mmap_shared)) {
-                       error = -EAGAIN;
-                       goto out;
-               }
+           (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
+           mapping_writably_mapped(filp->f_mapping)) {
+               error = -EAGAIN;
+               goto out;
        }
 
        error = flock64_to_posix_lock(filp, file_lock, &flock);
index 6736f7aa2b975215000c4bc334e3d253d6150abb..af0b65fe5136e507637707ba1b9e3586133b2123 100644 (file)
@@ -596,9 +596,7 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag)
 /*
  * Some useful predicates.
  */
-#define VN_MAPPED(vp)  \
-       (!list_empty(&(LINVFS_GET_IP(vp)->i_mapping->i_mmap)) || \
-       (!list_empty(&(LINVFS_GET_IP(vp)->i_mapping->i_mmap_shared))))
+#define VN_MAPPED(vp)  mapping_mapped(LINVFS_GET_IP(vp)->i_mapping)
 #define VN_CACHED(vp)  (LINVFS_GET_IP(vp)->i_mapping->nrpages)
 #define VN_DIRTY(vp)   mapping_tagged(LINVFS_GET_IP(vp)->i_mapping, \
                                        PAGECACHE_TAG_DIRTY)
index 6968e8e90c3ed4a0d766c65f0f03e28261f39104..91b16cc3f5028270dee218efee8fc02e07ed957e 100644 (file)
@@ -283,23 +283,19 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
  * flush_dcache_page is used when the kernel has written to the page
  * cache page at virtual address page->virtual.
  *
- * If this page isn't mapped (ie, page->mapping = NULL), or it has
- * userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
- * then we _must_ always clean + invalidate the dcache entries associated
- * with the kernel mapping.
+ * If this page isn't mapped (ie, page_mapping == NULL), or it might
+ * have userspace mappings, then we _must_ always clean + invalidate
+ * the dcache entries associated with the kernel mapping.
  *
  * Otherwise we can defer the operation, and clean the cache when we are
  * about to change to user space.  This is the same method as used on SPARC64.
  * See update_mmu_cache for the user space part.
  */
-#define mapping_mapped(map)    (!list_empty(&(map)->i_mmap) || \
-                                !list_empty(&(map)->i_mmap_shared))
-
 extern void __flush_dcache_page(struct page *);
 
 static inline void flush_dcache_page(struct page *page)
 {
-       if (page->mapping && !mapping_mapped(page->mapping))
+       if (page_mapping(page) && !mapping_mapped(page->mapping))
                set_bit(PG_dcache_dirty, &page->flags);
        else
                __flush_dcache_page(page);
index 52b0c6a96aea582675fff5df9e3b13938671e0db..7a77986e3738ebf3512d98231c957d358e70aef4 100644 (file)
@@ -69,8 +69,7 @@ extern void __flush_dcache_page(struct page *page);
 
 static inline void flush_dcache_page(struct page *page)
 {
-       if (page->mapping && list_empty(&page->mapping->i_mmap) &&
-                       list_empty(&page->mapping->i_mmap_shared)) {
+       if (page_mapping(page) && !mapping_mapped(page->mapping)) {
                set_bit(PG_dcache_dirty, &page->flags);
        } else {
                __flush_dcache_page(page);
index bd02728a69d5637508b3368c079db9df11f53e21..4584c9e37a759ec36ed442fa16c96d3206652591 100644 (file)
@@ -101,8 +101,8 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep)
                unsigned long pfn = pte_pfn(pte);
                if (pfn_valid(pfn)) {
                        page = pfn_to_page(pfn);
-                       if (!page->mapping
-                           || list_empty(&page->mapping->i_mmap_shared))
+                       if (!page_mapping(page) ||
+                           !mapping_writably_mapped(page->mapping))
                                __clear_bit(PG_mapped, &page->flags);
                }
        }
index c7f0052b4abdb3841ec084c4c59599941549b3e0..3d7c320d675e67ab772436a32817173c6fd93eff 100644 (file)
@@ -373,6 +373,26 @@ struct block_device {
 
 int mapping_tagged(struct address_space *mapping, int tag);
 
+/*
+ * Might pages of this file be mapped into userspace?
+ */
+static inline int mapping_mapped(struct address_space *mapping)
+{
+       return  !list_empty(&mapping->i_mmap) ||
+               !list_empty(&mapping->i_mmap_shared);
+}
+
+/*
+ * Might pages of this file have been modified in userspace?
+ * Note that i_mmap_shared holds all the VM_SHARED vmas: do_mmap_pgoff
+ * marks vma as VM_SHARED if it is shared, and the file was opened for
+ * writing i.e. vma may be mprotected writable even if now readonly.
+ */
+static inline int mapping_writably_mapped(struct address_space *mapping)
+{
+       return  !list_empty(&mapping->i_mmap_shared);
+}
+
 /*
  * Use sequence counter to get consistent i_size on 32-bit processors.
  */
index ca8fc1148296d80c8cf49970ba07d098916a6747..c83a97b5aed752d927a00ab4aa6c9c7515a37a92 100644 (file)
@@ -660,7 +660,7 @@ page_ok:
                 * virtual addresses, take care about potential aliasing
                 * before reading the page on the kernel side.
                 */
-               if (!list_empty(&mapping->i_mmap_shared))
+               if (mapping_writably_mapped(mapping))
                        flush_dcache_page(page);
 
                /*
index 345e04cb0f6c38ca010802b69ff9521548baa19a..31001df23c3dfd68a53014a4141be8da964c0b99 100644 (file)
@@ -1340,7 +1340,7 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
                         * virtual addresses, take care about potential aliasing
                         * before reading the page on the kernel side.
                         */
-                       if (!list_empty(&mapping->i_mmap_shared))
+                       if (mapping_writably_mapped(mapping))
                                flush_dcache_page(page);
                        /*
                         * Mark the page accessed if we read the beginning.
index 34151f9aed30a8c37e8c57a55ea38c6e3e5f585c..55d8feae21a285c38c8fc33a9b0b123c655dddf9 100644 (file)
@@ -190,13 +190,8 @@ static inline int page_mapping_inuse(struct page *page)
        if (!mapping)
                return 0;
 
-       /* File is mmap'd by somebody. */
-       if (!list_empty(&mapping->i_mmap))
-               return 1;
-       if (!list_empty(&mapping->i_mmap_shared))
-               return 1;
-
-       return 0;
+       /* File is mmap'd by somebody? */
+       return mapping_mapped(mapping);
 }
 
 static inline int is_page_cache_freeable(struct page *page)