#ifdef CONFIG_X86_NUMAQ
EXPORT_SYMBOL(xquad_portio);
#endif
+#ifndef CONFIG_X86_WP_WORKS_OK
EXPORT_SYMBOL(__verify_write);
+#endif
EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(dump_extended_fpu);
extern int console_loglevel;
+#ifndef CONFIG_X86_WP_WORKS_OK
/*
* Ugly, ugly, but the goto's result in better assembly..
*/
int __verify_write(const void * addr, unsigned long size)
{
+ struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
unsigned long start = (unsigned long) addr;
- if (!size)
+ if (!size || segment_eq(get_fs(),KERNEL_DS))
return 1;
+ down_read(&mm->mmap_sem);
vma = find_vma(current->mm, start);
if (!vma)
goto bad_area;
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;;
}
+ /*
+ * We really need to hold mmap_sem over the whole access to
+ * userspace, else another thread could change permissions.
+ * This is unfixable, so don't use i386-class machines for
+ * critical servers.
+ */
+ up_read(&mm->mmap_sem);
return 1;
check_stack:
goto good_area;
bad_area:
+ up_read(&mm->mmap_sem);
return 0;
out_of_memory:
}
goto bad_area;
}
+#endif
/*
* Unlock any spinlocks which will prevent us from getting the
* This function cannot be __init, since exceptions don't work in that
* section.
*/
-static int do_test_wp_bit(unsigned long vaddr);
+static int do_test_wp_bit(void);
void __init test_wp_bit(void)
{
- const unsigned long vaddr = PAGE_OFFSET;
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte, old_pte;
-
if (cpu_has_pse) {
/* Ok, all PSE-capable CPUs are definitely handling the WP bit right. */
boot_cpu_data.wp_works_ok = 1;
printk("Checking if this processor honours the WP bit even in supervisor mode... ");
- pgd = swapper_pg_dir + __pgd_offset(vaddr);
- pmd = pmd_offset(pgd, vaddr);
- pte = pte_offset_kernel(pmd, vaddr);
- old_pte = *pte;
- *pte = pfn_pte(0, PAGE_READONLY);
- local_flush_tlb();
-
- boot_cpu_data.wp_works_ok = do_test_wp_bit(vaddr);
-
- *pte = old_pte;
- local_flush_tlb();
+ /* Any page-aligned address will do, the test is non-destructive */
+ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
+ boot_cpu_data.wp_works_ok = do_test_wp_bit();
+ clear_fixmap(FIX_WP_TEST);
if (!boot_cpu_data.wp_works_ok) {
printk("No.\n");
#endif
/* Put this after the callers, so that it cannot be inlined */
-static int do_test_wp_bit(unsigned long vaddr)
+static int do_test_wp_bit(void)
{
char tmp_reg;
int flag;
" .align 4 \n"
" .long 1b,2b \n"
".previous \n"
- :"=m" (*(char *) vaddr),
+ :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
"=q" (tmp_reg),
"=r" (flag)
:"2" (1)
idx = FIX_BTMAP_BEGIN;
while (nrpages > 0) {
- __set_fixmap(idx, 0, __pgprot(0));
+ clear_fixmap(idx);
--idx;
--nrpages;
}
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
+ * in the boot process. We allocate these special addresses
* from the end of virtual memory (0xfffff000) backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* TLB entries of such buffers will not be flushed across
* task switches.
*/
-
-/*
- * on UP currently we will have no trace of the fixmap mechanizm,
- * no page table allocations, etc. This might change in the
- * future, say framebuffers for the console driver(s) could be
- * fix-mapped?
- */
enum fixed_addresses {
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#define NR_FIX_BTMAPS 16
FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
+ FIX_WP_TEST,
__end_of_fixed_addresses
};
*/
#define set_fixmap_nocache(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+
+#define clear_fixmap(idx) \
+ __set_fixmap(idx, 0, __pgprot(0))
+
/*
* used by vmalloc.c.
*
#define access_ok(type,addr,size) ( (__range_ok(addr,size) == 0) && \
((type) == VERIFY_READ || boot_cpu_data.wp_works_ok || \
- segment_eq(get_fs(),KERNEL_DS) || \
__verify_write((void *)(addr),(size))))
#endif