]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] i386 mm init cleanup part 1
authorBrian Gerst <bgerst@didntduck.org>
Tue, 28 May 2002 15:02:27 +0000 (08:02 -0700)
committerPatrick Mochel <mochel@osdl.org>
Tue, 28 May 2002 15:02:27 +0000 (08:02 -0700)
This revised patch starts untangling the mess in arch/i386/mm/init.c
- Pull setting bits in cr4 out of the loop
- Make __PAGE_KERNEL a variable and cache the global bit there.
- New pfn_pmd() for large pages.

arch/i386/kernel/i386_ksyms.c
arch/i386/mm/init.c
include/asm-i386/pgtable-2level.h
include/asm-i386/pgtable-3level.h
include/asm-i386/pgtable.h

index eb9d6ed7117563d6aea37c29c83b0c887a0e3493..92fdeebc797b678e11265d02312cacaeae93258e 100644 (file)
@@ -174,3 +174,5 @@ EXPORT_SYMBOL(atomic_dec_and_lock);
 
 extern int is_sony_vaio_laptop;
 EXPORT_SYMBOL(is_sony_vaio_laptop);
+
+EXPORT_SYMBOL(__PAGE_KERNEL);
index 4ce59a3223ccb7b90cb44a6b9add623b9b73fac7..8dab03ca7428d85c2bdc25484873cce0fdbb8665 100644 (file)
@@ -177,6 +177,8 @@ static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t
        }
 }
 
+unsigned long __PAGE_KERNEL = _PAGE_KERNEL;
+
 static void __init pagetable_init (void)
 {
        unsigned long vaddr, end;
@@ -196,6 +198,14 @@ static void __init pagetable_init (void)
        for (i = 0; i < PTRS_PER_PGD; i++)
                set_pgd(pgd_base + i, __pgd(1 + __pa(empty_zero_page)));
 #endif
+       if (cpu_has_pse) {
+               set_in_cr4(X86_CR4_PSE);
+       }
+       if (cpu_has_pge) {
+               set_in_cr4(X86_CR4_PGE);
+               __PAGE_KERNEL |= _PAGE_GLOBAL;
+       }
+
        i = __pgd_offset(PAGE_OFFSET);
        pgd = pgd_base + i;
 
@@ -216,17 +226,7 @@ static void __init pagetable_init (void)
                        if (end && (vaddr >= end))
                                break;
                        if (cpu_has_pse) {
-                               unsigned long __pe;
-
-                               set_in_cr4(X86_CR4_PSE);
-                               boot_cpu_data.wp_works_ok = 1;
-                               __pe = _KERNPG_TABLE + _PAGE_PSE + __pa(vaddr);
-                               /* Make it "global" too if supported */
-                               if (cpu_has_pge) {
-                                       set_in_cr4(X86_CR4_PGE);
-                                       __pe += _PAGE_GLOBAL;
-                               }
-                               set_pmd(pmd, __pmd(__pe));
+                               set_pmd(pmd, pfn_pmd(__pa(vaddr) >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
                                continue;
                        }
 
@@ -358,14 +358,17 @@ static int do_test_wp_bit(unsigned long vaddr);
 
 void __init test_wp_bit(void)
 {
-/*
- * Ok, all PSE-capable CPUs are definitely handling the WP bit right.
- */
        const unsigned long vaddr = PAGE_OFFSET;
        pgd_t *pgd;
        pmd_t *pmd;
        pte_t *pte, old_pte;
 
+       if (cpu_has_pse) {
+               /* Ok, all PSE-capable CPUs are definitely handling the WP bit right. */
+               boot_cpu_data.wp_works_ok = 1;
+               return;
+       }
+
        printk("Checking if this processor honours the WP bit even in supervisor mode... ");
 
        pgd = swapper_pg_dir + __pgd_offset(vaddr);
index 3b59d04f9d367e33202e02659fb4a68ccb22f082..e22db0cc68249659d04c813ea7d40a7f91ede7fa 100644 (file)
@@ -60,5 +60,6 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
 #define pte_none(x)            (!(x).pte_low)
 #define pte_pfn(x)             ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
 #define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot)     __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 
 #endif /* _I386_PGTABLE_2LEVEL_H */
index c396e7454930013eb344b44a51505edcb552981a..bb2eaea63fdea47e0435278ce6848697e29b494c 100644 (file)
@@ -99,4 +99,9 @@ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
        return pte;
 }
 
+static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
+{
+       return __pmd(((unsigned long long)page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
+}
+
 #endif /* _I386_PGTABLE_3LEVEL_H */
index 42dd1a33d618a2e0c4cd8c204e31967694e4fcd6..8785225614da0701825f4e09eb63112ef728d3a8 100644 (file)
@@ -132,27 +132,18 @@ extern void pgtable_cache_init(void);
 #define PAGE_COPY      __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 #define PAGE_READONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 
-#define __PAGE_KERNEL \
+#define _PAGE_KERNEL \
        (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define __PAGE_KERNEL_NOCACHE \
-       (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
-#define __PAGE_KERNEL_RO \
-       (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
-
-# define MAKE_GLOBAL(x)                                                \
-       ({                                                      \
-               pgprot_t __ret;                                 \
-                                                               \
-               if (cpu_has_pge)                                \
-                       __ret = __pgprot((x) | _PAGE_GLOBAL);   \
-               else                                            \
-                       __ret = __pgprot(x);                    \
-               __ret;                                          \
-       })
-
-#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
-#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
-#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
+
+extern unsigned long __PAGE_KERNEL;
+#define __PAGE_KERNEL_RO       (__PAGE_KERNEL & ~_PAGE_RW)
+#define __PAGE_KERNEL_NOCACHE  (__PAGE_KERNEL | _PAGE_PCD)
+#define __PAGE_KERNEL_LARGE    (__PAGE_KERNEL | _PAGE_PSE)
+
+#define PAGE_KERNEL            __pgprot(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO         __pgprot(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_NOCACHE    __pgprot(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_LARGE      __pgprot(__PAGE_KERNEL_LARGE)
 
 /*
  * The i386 can't do page protection for execute, and considers that