Well I didn't test that very well. __page_cache_release() is doing a
__free_page() on a zero-ref page, so __free_pages() sends the refcount
negative and doesn't free it. With patch #8, page_cache_release()
almost never frees pages, but it must have been leaking a little bit.
Lucky it showed up.
This fixes it, and also adds a missing PageReserved test in put_page().
Which makes put_page() identical to page_cache_release(), but there are
header file woes. I'll fix that up later.
#define page_count(p) atomic_read(&(p)->count)
#define set_page_count(p,v) atomic_set(&(p)->count, v)
extern void FASTCALL(__page_cache_release(struct page *));
-#define put_page(p) \
- do { \
- if (put_page_testzero(p)) \
- __page_cache_release(p); \
+#define put_page(p) \
+ do { \
+ if (!PageReserved(p) && put_page_testzero(p)) \
+ __page_cache_release(p); \
} while (0)
+void FASTCALL(__free_pages_ok(struct page *page, unsigned int order));
/*
* Multiple processes may "see" the same page. E.g. for untouched
* -- wli
*/
-static void FASTCALL(__free_pages_ok (struct page *page, unsigned int order));
-static void __free_pages_ok (struct page *page, unsigned int order)
+void __free_pages_ok (struct page *page, unsigned int order)
{
unsigned long index, page_idx, mask, flags;
free_area_t *area;
del_page_from_inactive_list(page);
spin_unlock_irqrestore(&_pagemap_lru_lock, flags);
}
- __free_page(page);
+ __free_pages_ok(page, 0);
}
/*