]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] hugetlbpage MAP_FIXED fix
authorAndrew Morton <akpm@digeo.com>
Mon, 10 Feb 2003 15:36:32 +0000 (07:36 -0800)
committerLinus Torvalds <torvalds@home.transmeta.com>
Mon, 10 Feb 2003 15:36:32 +0000 (07:36 -0800)
We need to validate that the address and length of a MAP_FIXED request are
suitable for hugetlb pages.

arch/i386/mm/hugetlbpage.c
arch/ia64/mm/hugetlbpage.c
arch/sparc64/mm/hugetlbpage.c
arch/x86_64/mm/hugetlbpage.c
include/linux/hugetlb.h
mm/mmap.c

index 243d844a1a7985da02ef9188dac80a0eb0f4a4ef..749cadb2e5a3547006b2937a34e53fd301ad3561 100644 (file)
@@ -88,6 +88,18 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struc
        set_pte(page_table, entry);
 }
 
+/*
+ * This function checks for proper alignment of input addr and len parameters.
+ */
+int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
+{
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (addr & ~HPAGE_MASK)
+               return -EINVAL;
+       return 0;
+}
+
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                        struct vm_area_struct *vma)
 {
index a08a64c1d39d3c6646be16c6cf087d4a93d8a9ad..c71ed65b5a2d7bf6a63da77440f15b5e2c75b177 100644 (file)
@@ -96,6 +96,18 @@ set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma,
        return;
 }
 
+/*
+ * This function checks for proper alignment of input addr and len parameters.
+ */
+int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
+{
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (addr & ~HPAGE_MASK)
+               return -EINVAL;
+       return 0;
+}
+
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                        struct vm_area_struct *vma)
 {
index c137cb8c9d56ff5d3a51ec1238a0f867b0f667e4..63895ce0202f3b1f8e41b7a0bd3aad559628446b 100644 (file)
@@ -232,6 +232,18 @@ out_error:
        return -1;
 }
 
+/*
+ * This function checks for proper alignment of input addr and len parameters.
+ */
+int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
+{
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (addr & ~HPAGE_MASK)
+               return -EINVAL;
+       return 0;
+}
+
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                            struct vm_area_struct *vma)
 {
index f8e146193dc6e20586b7bc2fea849581d91bfe3a..e1c31afb196e4464dcf181f4c899a3331ac805f2 100644 (file)
@@ -86,6 +86,18 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struc
        set_pte(page_table, entry);
 }
 
+/*
+ * This function checks for proper alignment of input addr and len parameters.
+ */
+int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
+{
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (addr & ~HPAGE_MASK)
+               return -EINVAL;
+       return 0;
+}
+
 int
 copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                        struct vm_area_struct *vma)
index 370411eaaba2028dd0fb72ebc641d10e6e6a40fd..7c31efc0b61bad6dfad6ac94dbf324bbfb3dfeac 100644 (file)
@@ -26,6 +26,7 @@ struct vm_area_struct *hugepage_vma(struct mm_struct *mm,
                                        unsigned long address);
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                                pmd_t *pmd, int write);
+int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
 int pmd_huge(pmd_t pmd);
 
 extern int htlbpage_max;
@@ -56,6 +57,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
 #define hugepage_vma(mm, addr)                 0
 #define mark_mm_hugetlb(mm, vma)               do { } while (0)
 #define follow_huge_pmd(mm, addr, pmd, write)  0
+#define is_aligned_hugepage_range(addr, len)   0
 #define pmd_huge(x)    0
 
 #ifndef HPAGE_MASK
index 07e2417185fffb9091082966f2bc7696016f6ba7..7696c40185bdf36ec42745e6bc162de3326bce3c 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -801,6 +801,13 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                        return -ENOMEM;
                if (addr & ~PAGE_MASK)
                        return -EINVAL;
+               if (is_file_hugepages(file)) {
+                       unsigned long ret;
+
+                       ret = is_aligned_hugepage_range(addr, len);
+                       if (ret)
+                               return ret;
+               }
                return addr;
        }
 
@@ -1224,8 +1231,10 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
        /* we have  start < mpnt->vm_end  */
 
        if (is_vm_hugetlb_page(mpnt)) {
-               if ((start & ~HPAGE_MASK) || (len & ~HPAGE_MASK))
-                       return -EINVAL;
+               int ret = is_aligned_hugepage_range(start, len);
+
+               if (ret)
+                       return ret;
        }
 
        /* if it doesn't overlap, we have nothing.. */