]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] Writeback page range hint
authorAndrew Morton <akpm@osdl.org>
Mon, 23 Aug 2004 05:59:27 +0000 (22:59 -0700)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Mon, 23 Aug 2004 05:59:27 +0000 (22:59 -0700)
Modify mpage_writepages to optionally only write back dirty pages within a
specified range in a file (as in the case of O_SYNC).  Cheat a little to avoid
changes to prototypes of aops - just put the <start, end> hint into the
writeback_control struct instead.  If <start, end> are not set, then default
to writing back all the mapping's dirty pages.

Signed-off-by: Suparna Bhattacharya <suparna@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
fs/mpage.c
include/linux/writeback.h

index 71c7ca3a455de3687f1147152cf0f76b87f3a1f9..7dfc8c53b350a18b5c879c35afe5d3bb1a973b1f 100644 (file)
@@ -627,7 +627,9 @@ mpage_writepages(struct address_space *mapping,
        struct pagevec pvec;
        int nr_pages;
        pgoff_t index;
+       pgoff_t end = -1;               /* Inclusive */
        int scanned = 0;
+       int is_range = 0;
 
        if (wbc->nonblocking && bdi_write_congested(bdi)) {
                wbc->encountered_congestion = 1;
@@ -645,9 +647,16 @@ mpage_writepages(struct address_space *mapping,
                index = 0;                        /* whole-file sweep */
                scanned = 1;
        }
+       if (wbc->start || wbc->end) {
+               index = wbc->start >> PAGE_CACHE_SHIFT;
+               end = wbc->end >> PAGE_CACHE_SHIFT;
+               is_range = 1;
+               scanned = 1;
+       }
 retry:
        while (!done && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-                                       PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE))) {
+                       PAGECACHE_TAG_DIRTY,
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
                unsigned i;
 
                scanned = 1;
@@ -664,10 +673,21 @@ retry:
 
                        lock_page(page);
 
+                       if (unlikely(page->mapping != mapping)) {
+                               unlock_page(page);
+                               continue;
+                       }
+
+                       if (unlikely(is_range) && page->index > end) {
+                               done = 1;
+                               unlock_page(page);
+                               continue;
+                       }
+
                        if (wbc->sync_mode != WB_SYNC_NONE)
                                wait_on_page_writeback(page);
 
-                       if (page->mapping != mapping || PageWriteback(page) ||
+                       if (PageWriteback(page) ||
                                        !clear_page_dirty_for_io(page)) {
                                unlock_page(page);
                                continue;
@@ -706,7 +726,8 @@ retry:
                index = 0;
                goto retry;
        }
-       mapping->writeback_index = index;
+       if (!is_range)
+               mapping->writeback_index = index;
        if (bio)
                mpage_bio_submit(WRITE, bio);
        return ret;
index e4450070ac78529608e65cc27014e0d5be0f1eba..48d95e59230b7e46bb281bc7937e4a981246b942 100644 (file)
@@ -29,7 +29,9 @@ enum writeback_sync_modes {
 };
 
 /*
- * A control structure which tells the writeback code what to do
+ * A control structure which tells the writeback code what to do.  These are
+ * always on the stack, and hence need no locking.  They are always initialised
+ * in a manner such that unspecified fields are set to zero.
  */
 struct writeback_control {
        struct backing_dev_info *bdi;   /* If !NULL, only write back this
@@ -40,10 +42,19 @@ struct writeback_control {
        long nr_to_write;               /* Write this many pages, and decrement
                                           this for each page written */
        long pages_skipped;             /* Pages which were not written */
-       int nonblocking;                /* Don't get stuck on request queues */
-       int encountered_congestion;     /* An output: a queue is full */
-       int for_kupdate;                /* A kupdate writeback */
-       int for_reclaim;                /* Invoked from the page allocator */
+
+       /*
+        * For a_ops->writepages(): is start or end are non-zero then this is
+        * a hint that the filesystem need only write out the pages inside that
+        * byterange.  The byte at `end' is included in the writeout request.
+        */
+       loff_t start;
+       loff_t end;
+
+       int nonblocking:1;              /* Don't get stuck on request queues */
+       int encountered_congestion:1;   /* An output: a queue is full */
+       int for_kupdate:1;              /* A kupdate writeback */
+       int for_reclaim:1;              /* Invoked from the page allocator */
 };
 
 /*