/* have to delete it as __free_pages_bulk list manipulates */
list_del(&page->list);
__free_pages_bulk(page, base, zone, area, mask, order);
- mod_page_state(pgfree, count<<order);
ret++;
}
spin_unlock_irqrestore(&zone->lock, flags);
{
LIST_HEAD(list);
+ mod_page_state(pgfree, 1 << order);
free_pages_check(__FUNCTION__, page);
list_add(&page->list, &list);
free_pages_bulk(page_zone(page), 1, &list, order);
unsigned long count, struct list_head *list)
{
unsigned long flags;
- int i, allocated = 0;
+ int i;
+ int allocated = 0;
struct page *page;
- struct list_head *curr;
- LIST_HEAD(temp);
spin_lock_irqsave(&zone->lock, flags);
for (i = 0; i < count; ++i) {
page = __rmqueue(zone, order);
if (page == NULL)
break;
- ++allocated;
- list_add(&page->list, &temp);
+ allocated++;
+ list_add_tail(&page->list, list);
}
spin_unlock_irqrestore(&zone->lock, flags);
-
- /*
- * This may look inefficient because we're walking the list again,
- * but the cachelines are hot, so it's very cheap, and this way we
- * can drop the zone lock much earlier
- */
- list_for_each(curr, &temp) {
- page = list_entry(curr, struct page, list);
- BUG_ON(bad_range(zone, page));
- prep_new_page(page, order);
- }
- list_splice(&temp, list->prev);
return allocated;
}
struct per_cpu_pages *pcp;
unsigned long flags;
+ inc_page_state(pgfree);
free_pages_check(__FUNCTION__, page);
pcp = &zone->pageset[get_cpu()].pcp[cold];
local_irq_save(flags);
if (page != NULL) {
BUG_ON(bad_range(zone, page));
+ mod_page_state(pgalloc, 1 << order);
prep_new_page(page, order);
}
return page;
if (gfp_mask & __GFP_COLD)
cold = 1;
- mod_page_state(pgalloc, 1<<order);
-
zones = zonelist->zones; /* the list of zones suitable for gfp_mask */
classzone = zones[0];
if (classzone == NULL) /* no zones in the zonelist */