]> git.neil.brown.name Git - LaFS.git/commitdiff
Simplify iolocking in get_flushable
authorNeilBrown <neilb@suse.de>
Sat, 15 Aug 2009 07:09:20 +0000 (17:09 +1000)
committerNeilBrown <neilb@suse.de>
Sat, 15 Aug 2009 07:09:20 +0000 (17:09 +1000)
The difference between data and index block is not really supportable,
and we cannot just avoid waiting for some blocks.

But we cannot always for a full iowait as block that have been
allocated to a cluster do not complete until the cluster is written
and we don't want to wait for a cluster to be written, especially as
we there thread that is supposed to do that.

So create an intermediate iowait which wait for iolock to be dropped
or the block to be placed on a list.  Once it is on a list we can be
sure not to lose it.

So we wait while incorporation or truncation happens, but not while
writeout happens.

checkpoint.c
cluster.c
index.c
io.c
lafs.h

index baad30e3ea888dfc6fe9a4cf393ec9564c944ac9..686d2cce21d5f161417a72de3230ab0bf10dd2c4 100644 (file)
@@ -303,30 +303,20 @@ struct block *lafs_get_flushable(struct fs *fs, int phase)
                b = list_entry(fs->phase_leafs[phase].next, struct block, lru);
        else
                b = NULL;
-       if (b) {
+       if (b)
                /* the list counted a reference.  Now we hold it */
                list_del_init(&b->lru);
-
-               if (!test_bit(B_Index, &b->flags)) {
-                       if (test_and_set_bit(B_IOLock, &b->flags)) {
-                               /* someone else is invalidating this block,
-                                * so I can just skip it.  Better drop the ref.
-                                */
-                               spin_unlock(&fs->lock);
-                               putref(b, MKREF(leaf));
-                               goto retry;
-                       } else
-                               set_iolock_info(b);
-               }
-       }
        spin_unlock(&fs->lock);
-       if (b && test_bit(B_Index, &b->flags))
-               /* we couldn't iolock in the spinlock because there might
-                * be an intermediate incorporation happening that
-                * would block us.  So we get the lock now, waiting if
-                * needed.
+       if (b)
+               /* Need an iolock, but if the list gets put on another
+                * lru (like a cluster or back on leafs) then we lose
+                * interest.
                 */
-               lafs_iolock_block(b);
+               if (lafs_iolock_block_empty(b) == 0) {
+                       /* gave up on the lock */
+                       putref(b, MKREF(leaf));
+                       goto retry;
+               }
 
        return b;
 }
index ea52e2e707da59fa0c33b624e4ee9cf4673795eb..7dd6aa807f34800bda19a200ac2faa13de4b5a50 100644 (file)
--- a/cluster.c
+++ b/cluster.c
@@ -731,7 +731,8 @@ unsigned long long lafs_cluster_allocate(struct block *b, int cnum)
                        /* won't fit */
                        // printk("Wont fit - used = %d\n", used);
                        cluster_flush(fs, cnum);
-               }
+               } else
+                       lafs_io_wake(b);
        } while (used < 0);
 
        if (used > 0)
diff --git a/index.c b/index.c
index 7a9e65b2a4574ba23cf85e7873e01d9feaa00f9c..bbbf4051bd272515bcd6535fb503fdb91f0e440a 100644 (file)
--- a/index.c
+++ b/index.c
@@ -818,6 +818,7 @@ void lafs_refile(struct block *b, int dec)
                                        getref(b, MKREF(leaf));
                        }
                        spin_unlock(&fs->lock);
+                       lafs_io_wake(b);
                }
                /* check the ->parent link */
                if (atomic_read(&b->refcnt) == dec) {
diff --git a/io.c b/io.c
index b37eafb96c18134747d837b7f9303f38b98e5c29..bc51f15b9cfd36a16d99c2e1bf0b75c271482949 100644 (file)
--- a/io.c
+++ b/io.c
@@ -187,9 +187,14 @@ lafs_super_wait(struct fs *fs)
 
 static DECLARE_WAIT_QUEUE_HEAD(block_wait); /* need more of these later FIXME */
 
-void
-_lafs_iolock_block(struct block *b)
+void lafs_io_wake(struct block *b)
+{
+       wake_up(&block_wait);
+}      
+int
+_lafs_iolock_block(struct block *b, int checkempty)
 {
+       int locked = 1;
        if (test_and_set_bit(B_IOLock, &b->flags)) {
                DEFINE_WAIT(wq);
 #ifdef DEBUG_IOLOCK
@@ -199,13 +204,17 @@ _lafs_iolock_block(struct block *b)
 #endif
                for (;;) {
                        prepare_to_wait(&block_wait, &wq, TASK_UNINTERRUPTIBLE);
-                       if (test_and_set_bit(B_IOLock, &b->flags))
-                               schedule();
-                       else
+                       if (checkempty && !list_empty_careful(&b->lru)) {
+                               locked = 0;
                                break;
+                       }
+                       if (!test_and_set_bit(B_IOLock, &b->flags))
+                               break;
+                       schedule();
                }
                finish_wait(&block_wait, &wq);
        }
+       return locked;
 }
 
 void
@@ -222,7 +231,7 @@ lafs_iounlock_block(struct block *b, int bit)
            !db->page || !db->page->private ||
            (!PageLocked(db->page) && !PageWriteback(db->page))) {
                clear_bit(B_IOLock, &b->flags);
-               wake_up(&block_wait);
+               lafs_io_wake(b);
                if (test_bit(B_Async, &b->flags))
                        lafs_wake_cleaner(fs_from_inode(b->inode));
                dprintk("unlock non-data block\n");
@@ -253,7 +262,7 @@ lafs_iounlock_block(struct block *b, int bit)
                        end_page_writeback(db->page);
        }
 
-       wake_up(&block_wait);
+       lafs_io_wake(b);
        if (test_bit(B_Async, &b->flags))
                lafs_wake_cleaner(fs_from_inode(b->inode));
 }
diff --git a/lafs.h b/lafs.h
index 4d67fff57570ac7639621327cbdc64b45dd14df3..02f88e13c4b30120b9fcb3a412b628ee5f094e1e 100644 (file)
--- a/lafs.h
+++ b/lafs.h
@@ -102,13 +102,15 @@ struct indexblock *lafs_leaf_find(struct inode *inode, u32 addr,
                                  int adopt, u32 *next, int async, REFARG);
 u32 lafs_leaf_next(struct indexblock *ib, u32 start);
 #ifdef DEBUG_IOLOCK
-#define set_iolock_info(b) do { (b)->iolock_file = __FILE__; (b)->iolock_line = __LINE__; } while (0)
+#define set_iolock_info(b) ( (b)->iolock_file = __FILE__, (b)->iolock_line = __LINE__)
 #else
-#define set_iolock_info(b) do {} while(0)
+#define set_iolock_info(b) (0)
 #endif
-#define lafs_iolock_block(b) do { _lafs_iolock_block(b); set_iolock_info(b); } while(0)
+#define lafs_iolock_block(b) do { _lafs_iolock_block(b, 0); set_iolock_info(b); } while(0)
+#define lafs_iolock_block_empty(b) (_lafs_iolock_block(b, 1) ? ( set_iolock_info(b), 1): 0)
 
-void _lafs_iolock_block(struct block *b);
+void lafs_io_wake(struct block *b);
+int _lafs_iolock_block(struct block *b, int checkempty);
 void lafs_iounlock_block(struct block *b, int bit);
 
 void lafs_super_write(struct fs *fs, int dev, u64 addr, char *buf, int size);