]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] vmscan: add lru_to_page() helper
authorAndrew Morton <akpm@osdl.org>
Fri, 12 Mar 2004 00:25:47 +0000 (16:25 -0800)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Fri, 12 Mar 2004 00:25:47 +0000 (16:25 -0800)
From: Nick Piggin <piggin@cyberone.com.au>

Add a little helper macro for a common list extraction operation in vmscan.c

mm/vmscan.c

index 7768aca74d1dac48c3dbb7ecbd5ebf1c0b6da26a..be07be47f92b19e64c8bdb20d802fecc7a2d7d41 100644 (file)
 int vm_swappiness = 60;
 static long total_memory;
 
+#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
+
 #ifdef ARCH_HAS_PREFETCH
 #define prefetch_prev_lru_page(_page, _base, _field)                   \
        do {                                                            \
                if ((_page)->lru.prev != _base) {                       \
                        struct page *prev;                              \
                                                                        \
-                       prev = list_entry(_page->lru.prev,              \
-                                       struct page, lru);              \
+                       prev = lru_to_page(&(_page->lru));              \
                        prefetch(&prev->_field);                        \
                }                                                       \
        } while (0)
@@ -64,8 +65,7 @@ static long total_memory;
                if ((_page)->lru.prev != _base) {                       \
                        struct page *prev;                              \
                                                                        \
-                       prev = list_entry(_page->lru.prev,              \
-                                       struct page, lru);              \
+                       prev = lru_to_page(&(_page->lru));                      \
                        prefetchw(&prev->_field);                       \
                }                                                       \
        } while (0)
@@ -260,7 +260,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask, int *nr_scanned)
                int may_enter_fs;
                int referenced;
 
-               page = list_entry(page_list->prev, struct page, lru);
+               page = lru_to_page(page_list);
                list_del(&page->lru);
 
                if (TestSetPageLocked(page))
@@ -494,8 +494,7 @@ shrink_cache(struct zone *zone, unsigned int gfp_mask,
 
                while (nr_scan++ < SWAP_CLUSTER_MAX &&
                                !list_empty(&zone->inactive_list)) {
-                       page = list_entry(zone->inactive_list.prev,
-                                               struct page, lru);
+                       page = lru_to_page(&zone->inactive_list);
 
                        prefetchw_prev_lru_page(page,
                                                &zone->inactive_list, flags);
@@ -540,7 +539,7 @@ shrink_cache(struct zone *zone, unsigned int gfp_mask,
                 * Put back any unfreeable pages.
                 */
                while (!list_empty(&page_list)) {
-                       page = list_entry(page_list.prev, struct page, lru);
+                       page = lru_to_page(&page_list);
                        if (TestSetPageLRU(page))
                                BUG();
                        list_del(&page->lru);
@@ -599,7 +598,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
        pgmoved = 0;
        spin_lock_irq(&zone->lru_lock);
        while (nr_pages && !list_empty(&zone->active_list)) {
-               page = list_entry(zone->active_list.prev, struct page, lru);
+               page = lru_to_page(&zone->active_list);
                prefetchw_prev_lru_page(page, &zone->active_list, flags);
                if (!TestClearPageLRU(page))
                        BUG();
@@ -650,7 +649,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
                reclaim_mapped = 1;
 
        while (!list_empty(&l_hold)) {
-               page = list_entry(l_hold.prev, struct page, lru);
+               page = lru_to_page(&l_hold);
                list_del(&page->lru);
                if (page_mapped(page)) {
                        if (!reclaim_mapped) {
@@ -681,7 +680,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
        pgmoved = 0;
        spin_lock_irq(&zone->lru_lock);
        while (!list_empty(&l_inactive)) {
-               page = list_entry(l_inactive.prev, struct page, lru);
+               page = lru_to_page(&l_inactive);
                prefetchw_prev_lru_page(page, &l_inactive, flags);
                if (TestSetPageLRU(page))
                        BUG();
@@ -710,7 +709,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
 
        pgmoved = 0;
        while (!list_empty(&l_active)) {
-               page = list_entry(l_active.prev, struct page, lru);
+               page = lru_to_page(&l_active);
                prefetchw_prev_lru_page(page, &l_active, flags);
                if (TestSetPageLRU(page))
                        BUG();