]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] remove write_mapping_buffers()
authorAndrew Morton <akpm@digeo.com>
Sat, 5 Oct 2002 03:35:37 +0000 (20:35 -0700)
committerRussell King <rmk@flint.arm.linux.org.uk>
Sat, 5 Oct 2002 03:35:37 +0000 (20:35 -0700)
When the global buffer LRU was present, dirty ext2 indirect blocks were
automatically scheduled for writeback alongside their data.

I added write_mapping_buffers() to replace this - the idea was to
schedule the indirects close in time to the scheduling of their data.

It works OK for small-to-medium sized files but for large, linear writes
it doesn't work: the request queue is completely full of file data and
when we later come to scheduling the indirects, their neighbouring data
has already been written.

So writeback of really huge files tends to be a bit seeky.

So.  Kill it.  Will fix this problem by other means.

fs/buffer.c
fs/ext2/inode.c
fs/ext3/inode.c
include/linux/buffer_head.h

index d1da2c0ffac87d83e8cae9af64145197a0c5e13b..b00fb755781d6718b7883da6efc47404e48393d6 100644 (file)
@@ -735,81 +735,6 @@ int sync_mapping_buffers(struct address_space *mapping)
 }
 EXPORT_SYMBOL(sync_mapping_buffers);
 
-/**
- * write_mapping_buffers - Start writeout of a mapping's "associated" buffers.
- * @mapping - the mapping which wants those buffers written.
- *
- * Starts I/O against dirty buffers which are on @mapping->private_list.
- * Those buffers must be backed by @mapping->assoc_mapping.
- *
- * The private_list buffers generally contain filesystem indirect blocks.
- * The idea is that the filesystem can start I/O against the indirects at
- * the same time as running generic_writepages(), so the indirect's
- * I/O will be merged with the data.
- *
- * We sneakliy write the buffers in probable tail-to-head order.  This is
- * because generic_writepages() writes in probable head-to-tail
- * order.  If the file is so huge that the data or the indirects overflow
- * the request queue we will at least get some merging this way.
- *
- * Any clean+unlocked buffers are de-listed.  clean/locked buffers must be
- * left on the list for an fsync() to wait on.
- *
- * Couldn't think of a smart way of avoiding livelock, so chose the dumb
- * way instead.
- *
- * FIXME: duplicates fsync_inode_buffers() functionality a bit.
- */
-int write_mapping_buffers(struct address_space *mapping)
-{
-       spinlock_t *lock;
-       struct address_space *buffer_mapping;
-       unsigned nr_to_write;   /* livelock avoidance */
-       struct list_head *lh;
-       int ret = 0;
-
-       if (list_empty(&mapping->private_list))
-               goto out;
-
-       buffer_mapping = mapping->assoc_mapping;
-       lock = &buffer_mapping->private_lock;
-       spin_lock(lock);
-       nr_to_write = 0;
-       lh = mapping->private_list.next;
-       while (lh != &mapping->private_list) {
-               lh = lh->next;
-               nr_to_write++;
-       }
-       nr_to_write *= 2;       /* Allow for some late additions */
-
-       while (nr_to_write-- && !list_empty(&mapping->private_list)) {
-               struct buffer_head *bh;
-
-               bh = BH_ENTRY(mapping->private_list.prev);
-               list_del_init(&bh->b_assoc_buffers);
-               if (!buffer_dirty(bh) && !buffer_locked(bh))
-                       continue;
-               /* Stick it on the far end of the list. Order is preserved. */
-               list_add(&bh->b_assoc_buffers, &mapping->private_list);
-               if (test_set_buffer_locked(bh))
-                       continue;
-               get_bh(bh);
-               spin_unlock(lock);
-               if (test_clear_buffer_dirty(bh)) {
-                       bh->b_end_io = end_buffer_io_sync;
-                       submit_bh(WRITE, bh);
-               } else {
-                       unlock_buffer(bh);
-                       put_bh(bh);
-               }
-               spin_lock(lock);
-       }
-       spin_unlock(lock);
-out:
-       return ret;
-}
-EXPORT_SYMBOL(write_mapping_buffers);
-
 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
 {
        struct address_space *mapping = inode->i_mapping;
index 99627183120ece972b72ec11f44b13ea84b2725a..d27313d1dd106989322284849bfe12a0f21e37d5 100644 (file)
@@ -629,14 +629,7 @@ ext2_direct_IO(int rw, struct inode *inode, const struct iovec *iov,
 static int
 ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
 {
-       int ret;
-       int err;
-
-       ret = write_mapping_buffers(mapping);
-       err = mpage_writepages(mapping, wbc, ext2_get_block);
-       if (!ret)
-               ret = err;
-       return ret;
+       return mpage_writepages(mapping, wbc, ext2_get_block);
 }
 
 struct address_space_operations ext2_aops = {
index 978e9e60d070f45c6b512ec0aaba3466a9e4338d..5b2c49a9b34e34568b2bbac3fae3e0fcd02e1392 100644 (file)
@@ -1477,14 +1477,7 @@ struct address_space_operations ext3_aops = {
 static int
 ext3_writepages(struct address_space *mapping, struct writeback_control *wbc)
 {
-       int ret;
-       int err;
-
-       ret = write_mapping_buffers(mapping);
-       err = mpage_writepages(mapping, wbc, ext3_get_block);
-       if (!ret)
-               ret = err;
-       return ret;
+       return mpage_writepages(mapping, wbc, ext3_get_block);
 }
 #endif
 
index dd8f1bfb150e607ad44730fcd7c6008c70b12f1a..7ad9fafbea0bfcdbc0ddc08b7162d5d392d2a778 100644 (file)
@@ -140,7 +140,6 @@ void end_buffer_io_sync(struct buffer_head *bh, int uptodate);
 void buffer_insert_list(spinlock_t *lock,
                        struct buffer_head *, struct list_head *);
 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
-int write_mapping_buffers(struct address_space *mapping);
 int inode_has_buffers(struct inode *);
 void invalidate_inode_buffers(struct inode *);
 int fsync_buffers_list(spinlock_t *lock, struct list_head *);