#define ZAP_BLOCK_SIZE (~(0UL))
#endif
-/*
- * hugepage regions must be unmapped with HPAGE_SIZE granularity
- */
-static inline unsigned long zap_block_size(struct vm_area_struct *vma)
-{
- if (is_vm_hugetlb_page(vma))
- return HPAGE_SIZE;
- return ZAP_BLOCK_SIZE;
-}
-
/**
* unmap_vmas - unmap a range of memory covered by a list of vma's
* @tlbp: address of the caller's struct mmu_gather
struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted)
{
- unsigned long zap_bytes = zap_block_size(vma);
+ unsigned long zap_bytes = ZAP_BLOCK_SIZE;
unsigned long tlb_start; /* For tlb_finish_mmu */
int tlb_start_valid = 0;
int ret = 0;
ret++;
while (start != end) {
- unsigned long block = min(zap_bytes, end - start);
+ unsigned long block;
+
+ if (is_vm_hugetlb_page(vma))
+ block = end - start;
+ else
+ block = min(zap_bytes, end - start);
if (!tlb_start_valid) {
tlb_start = start;
unmap_page_range(*tlbp, vma, start, start + block);
start += block;
zap_bytes -= block;
- if (zap_bytes != 0)
+ if ((long)zap_bytes > 0)
continue;
if (need_resched()) {
tlb_finish_mmu(*tlbp, tlb_start, start);
*tlbp = tlb_gather_mmu(mm, 0);
tlb_start_valid = 0;
}
- zap_bytes = zap_block_size(vma);
+ zap_bytes = ZAP_BLOCK_SIZE;
}
if (vma->vm_next && vma->vm_next->vm_start < vma->vm_end)
printk("%s: VMA list is not sorted correctly!\n",