return page;
}
+static inline void set_page_refs(struct page *page, int order)
+{
+#ifdef CONFIG_MMU
+ set_page_count(page, 1);
+#else
+ int i;
+
+ /*
+ * We need to reference all the pages for this order, otherwise if
+ * anyone accesses one of the pages with (get/put) it will be freed.
+ */
+ for (i = 0; i < (1 << order); i++)
+ set_page_count(page+i, 1);
+#endif /* CONFIG_MMU */
+}
+
/*
* This page is about to be returned from the page allocator
*/
-static void prep_new_page(struct page *page)
+static void prep_new_page(struct page *page, int order)
{
- if ( page->mapping ||
- page_mapped(page) ||
- (page->flags & (
+ if (page->mapping || page_mapped(page) ||
+ (page->flags & (
1 << PG_private |
1 << PG_locked |
1 << PG_lru |
page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
1 << PG_referenced | 1 << PG_arch_1 |
1 << PG_checked);
- set_page_count(page, 1);
+ set_page_refs(page, order);
}
/*
list_for_each(curr, &temp) {
page = list_entry(curr, struct page, list);
BUG_ON(bad_range(zone, page));
- prep_new_page(page);
+ prep_new_page(page, order);
}
list_splice(&temp, list->prev);
return allocated;
if (page != NULL) {
BUG_ON(bad_range(zone, page));
- prep_new_page(page);
+ prep_new_page(page, order);
}
return page;
}