__cpuc_flush_dcache_page(page_address(page));
- if (!page->mapping)
+ if (!page_mapping(page))
return;
/*
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
- if (page->mapping) {
+ if (page_mapping(page)) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty)
{
unsigned long addr;
- if (page->mapping &&
- list_empty(&page->mapping->i_mmap) &&
- list_empty(&page->mapping->i_mmap_shared)) {
+ if (page_mapping(page) && !mapping_mapped(page->mapping)) {
SetPageDcacheDirty(page);
-
return;
}
/*
- * We could delay the flush for the !page->mapping case too. But that
+ * We could delay the flush for the !page_mapping case too. But that
* case is for exec env/arg pages and those are %99 certainly going to
* get faulted into the tlb (and thus flushed) anyways.
*/
unsigned long pfn, addr;
pfn = pte_pfn(pte);
- if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page->mapping) &&
+ if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) &&
Page_dcache_dirty(page)) {
if (pages_do_alias((unsigned long)page_address(page),
address & PAGE_MASK)) {
{
struct page *page = pte_page(pte);
- if (VALID_PAGE(page) && page->mapping &&
+ if (VALID_PAGE(page) && page_mapping(page) &&
test_bit(PG_dcache_dirty, &page->flags)) {
flush_kernel_dcache_page(page_address(page));
flush_kernel_dcache_page(page_address(page));
- if (!page->mapping)
+ if (!page_mapping(page))
return;
/* check shared list first if it's not empty...it's usually
* the shortest */
#if (L1DCACHE_SIZE > PAGE_SIZE)
__flush_dcache_page(page->virtual,
((tlb_type == spitfire) &&
- page->mapping != NULL));
+ page_mapping(page) != NULL));
#else
- if (page->mapping != NULL &&
+ if (page_mapping(page) != NULL &&
tlb_type == spitfire)
__flush_icache_page(__pa(page->virtual));
#endif
if (tlb_type == spitfire) {
data0 =
((u64)&xcall_flush_dcache_page_spitfire);
- if (page->mapping != NULL)
+ if (page_mapping(page) != NULL)
data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0,
__pa(page->virtual),
goto flush_self;
if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
- if (page->mapping != NULL)
+ if (page_mapping(page) != NULL)
data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0,
__pa(page->virtual),
#if (L1DCACHE_SIZE > PAGE_SIZE)
__flush_dcache_page(page->virtual,
((tlb_type == spitfire) &&
- page->mapping != NULL));
+ page_mapping(page) != NULL));
#else
- if (page->mapping != NULL &&
+ if (page_mapping(page) != NULL &&
tlb_type == spitfire)
__flush_icache_page(__pa(page->virtual));
#endif
pfn = pte_pfn(pte);
if (pfn_valid(pfn) &&
- (page = pfn_to_page(pfn), page->mapping) &&
+ (page = pfn_to_page(pfn), page_mapping(page)) &&
((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
int dirty = test_bit(PG_dcache_dirty, &page->flags);
int dirty_cpu = dcache_dirty_cpu(page);
- if (page->mapping &&
- list_empty(&page->mapping->i_mmap) &&
- list_empty(&page->mapping->i_mmap_shared)) {
+ if (page_mapping(page) && !mapping_mapped(page->mapping)) {
if (dirty) {
if (dirty_cpu == smp_processor_id())
return;
}
set_dcache_dirty(page);
} else {
- /* We could delay the flush for the !page->mapping
+ /* We could delay the flush for the !page_mapping
* case too. But that case is for exec env/arg
* pages and those are %99 certainly going to get
* faulted into the tlb (and thus flushed) anyways.
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
- if (PageReserved(page) || !page->mapping)
+ if (PageReserved(page) || !page_mapping(page))
continue;
pgaddr = (unsigned long) page_address(page);
uaddr = address + offset;
* and shared.
*/
if (IS_MANDLOCK(inode) &&
- (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
- struct address_space *mapping = filp->f_mapping;
-
- if (!list_empty(&mapping->i_mmap_shared)) {
- error = -EAGAIN;
- goto out;
- }
+ (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
+ mapping_writably_mapped(filp->f_mapping)) {
+ error = -EAGAIN;
+ goto out;
}
error = flock_to_posix_lock(filp, file_lock, &flock);
* and shared.
*/
if (IS_MANDLOCK(inode) &&
- (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
- struct address_space *mapping = filp->f_mapping;
-
- if (!list_empty(&mapping->i_mmap_shared)) {
- error = -EAGAIN;
- goto out;
- }
+ (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
+ mapping_writably_mapped(filp->f_mapping)) {
+ error = -EAGAIN;
+ goto out;
}
error = flock64_to_posix_lock(filp, file_lock, &flock);
/*
* Some useful predicates.
*/
-#define VN_MAPPED(vp) \
- (!list_empty(&(LINVFS_GET_IP(vp)->i_mapping->i_mmap)) || \
- (!list_empty(&(LINVFS_GET_IP(vp)->i_mapping->i_mmap_shared))))
+#define VN_MAPPED(vp) mapping_mapped(LINVFS_GET_IP(vp)->i_mapping)
#define VN_CACHED(vp) (LINVFS_GET_IP(vp)->i_mapping->nrpages)
#define VN_DIRTY(vp) mapping_tagged(LINVFS_GET_IP(vp)->i_mapping, \
PAGECACHE_TAG_DIRTY)
* flush_dcache_page is used when the kernel has written to the page
* cache page at virtual address page->virtual.
*
- * If this page isn't mapped (ie, page->mapping = NULL), or it has
- * userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
- * then we _must_ always clean + invalidate the dcache entries associated
- * with the kernel mapping.
+ * If this page isn't mapped (ie, page_mapping == NULL), or it might
+ * have userspace mappings, then we _must_ always clean + invalidate
+ * the dcache entries associated with the kernel mapping.
*
* Otherwise we can defer the operation, and clean the cache when we are
* about to change to user space. This is the same method as used on SPARC64.
* See update_mmu_cache for the user space part.
*/
-#define mapping_mapped(map) (!list_empty(&(map)->i_mmap) || \
- !list_empty(&(map)->i_mmap_shared))
-
extern void __flush_dcache_page(struct page *);
static inline void flush_dcache_page(struct page *page)
{
- if (page->mapping && !mapping_mapped(page->mapping))
+ if (page_mapping(page) && !mapping_mapped(page->mapping))
set_bit(PG_dcache_dirty, &page->flags);
else
__flush_dcache_page(page);
static inline void flush_dcache_page(struct page *page)
{
- if (page->mapping && list_empty(&page->mapping->i_mmap) &&
- list_empty(&page->mapping->i_mmap_shared)) {
+ if (page_mapping(page) && !mapping_mapped(page->mapping)) {
set_bit(PG_dcache_dirty, &page->flags);
} else {
__flush_dcache_page(page);
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
- if (!page->mapping
- || list_empty(&page->mapping->i_mmap_shared))
+ if (!page_mapping(page) ||
+ !mapping_writably_mapped(page->mapping))
__clear_bit(PG_mapped, &page->flags);
}
}
int mapping_tagged(struct address_space *mapping, int tag);
+/*
+ * Might pages of this file be mapped into userspace?
+ */
+static inline int mapping_mapped(struct address_space *mapping)
+{
+ return !list_empty(&mapping->i_mmap) ||
+ !list_empty(&mapping->i_mmap_shared);
+}
+
+/*
+ * Might pages of this file have been modified in userspace?
+ * Note that i_mmap_shared holds all the VM_SHARED vmas: do_mmap_pgoff
+ * marks vma as VM_SHARED if it is shared, and the file was opened for
+ * writing i.e. vma may be mprotected writable even if now readonly.
+ */
+static inline int mapping_writably_mapped(struct address_space *mapping)
+{
+ return !list_empty(&mapping->i_mmap_shared);
+}
+
/*
* Use sequence counter to get consistent i_size on 32-bit processors.
*/
* virtual addresses, take care about potential aliasing
* before reading the page on the kernel side.
*/
- if (!list_empty(&mapping->i_mmap_shared))
+ if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
/*
* virtual addresses, take care about potential aliasing
* before reading the page on the kernel side.
*/
- if (!list_empty(&mapping->i_mmap_shared))
+ if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
/*
* Mark the page accessed if we read the beginning.
if (!mapping)
return 0;
- /* File is mmap'd by somebody. */
- if (!list_empty(&mapping->i_mmap))
- return 1;
- if (!list_empty(&mapping->i_mmap_shared))
- return 1;
-
- return 0;
+ /* File is mmap'd by somebody? */
+ return mapping_mapped(mapping);
}
static inline int is_page_cache_freeable(struct page *page)