]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] shmem fixes
authorAndrew Morton <akpm@zip.com.au>
Thu, 4 Jul 2002 15:30:44 +0000 (08:30 -0700)
committerLinus Torvalds <torvalds@home.transmeta.com>
Thu, 4 Jul 2002 15:30:44 +0000 (08:30 -0700)
A shmem cleanup/bugfix patch from Hugh Dickins.

- Minor: in try_to_unuse(), only wait on writeout if we actually
  started new writeout.  Otherwise, there is no need because a
  wait_on_page_writeback() has already been executed against this page.
  And it's locked, so no new writeback can start.

- Minor: in shmem_unuse_inode(): remove all the
  wait_on_page_writeback() logic.  We already did that in
  try_to_unuse(), adn the page is locked so no new writeback can start.

- Less minor: add a missing a page_cache_release() to
  shmem_get_page_locked() in the uncommon case where the page was found
  to be under writeout.

mm/shmem.c
mm/swapfile.c

index 07bdba83bdf5624420c4495ee6d8a520b558e104..8975d336f93eb6a925f1867b8cef42d4906cd650 100644 (file)
@@ -426,22 +426,15 @@ found:
        swap_free(entry);
        ptr[offset] = (swp_entry_t) {0};
 
-       while (inode && (PageWriteback(page) ||
-                       move_from_swap_cache(page, idx, inode->i_mapping))) {
+       while (inode && move_from_swap_cache(page, idx, inode->i_mapping)) {
                /*
                 * Yield for kswapd, and try again - but we're still
                 * holding the page lock - ugh! fix this up later on.
                 * Beware of inode being unlinked or truncated: just
                 * leave try_to_unuse to delete_from_swap_cache if so.
-                *
-                * AKPM: We now wait on writeback too.  Note that it's
-                * the page lock which prevents new writeback from starting.
                 */
                spin_unlock(&info->lock);
-               if (PageWriteback(page))
-                       wait_on_page_writeback(page);
-               else
-                       yield();
+               yield();
                spin_lock(&info->lock);
                ptr = shmem_swp_entry(info, idx, 0);
                if (IS_ERR(ptr))
@@ -607,6 +600,7 @@ repeat:
                        spin_unlock(&info->lock);
                        wait_on_page_writeback(page);
                        unlock_page(page);
+                       page_cache_release(page);
                        goto repeat;
                }
                error = move_from_swap_cache(page, idx, mapping);
index 175c812a63d6494a46e9641ad1806af07c3f2ab5..54d19a9a431beb7cb9742d68228465af3dfbe24e 100644 (file)
@@ -687,11 +687,10 @@ static int try_to_unuse(unsigned int type)
                if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
                        swap_writepage(page);
                        lock_page(page);
-               }
-               if (PageSwapCache(page)) {
                        wait_on_page_writeback(page);
-                       delete_from_swap_cache(page);
                }
+               if (PageSwapCache(page))
+                       delete_from_swap_cache(page);
 
                /*
                 * So we could skip searching mms once swap count went