.word 0x9200 # data read/write
.word 0x00CF # granularity = 4096, 386
# (+5th nibble of limit)
-# this is 64bit descriptor for code
- .word 0xFFFF
- .word 0
- .word 0x9A00 # code read/exec
- .word 0x00AF # as above, but it is long mode and with D=0
- # it does not seem to do the trick.
-
idt_48:
.word 0 # idt limit = 0
.word 0, 0 # idt base = 0L
stack_t uss,uoss;
int ret;
mm_segment_t seg;
+ if (uss_ptr) {
+ u32 ptr;
if (!access_ok(VERIFY_READ,uss_ptr,sizeof(stack_ia32_t)) ||
- __get_user(ptr_to_u32(uss.ss_sp), &uss_ptr->ss_sp) ||
- __get_user((u32)uss.ss_flags, &uss_ptr->ss_flags) ||
- __get_user((u32)uss.ss_size, &uss_ptr->ss_size))
+ __get_user(ptr, &uss_ptr->ss_sp) ||
+ __get_user(uss.ss_flags, &uss_ptr->ss_flags) ||
+ __get_user(uss.ss_size, &uss_ptr->ss_size))
return -EFAULT;
+ uss.ss_sp = (void *)(u64)ptr;
+ }
seg = get_fs();
set_fs(KERNEL_DS);
- ret = do_sigaltstack(&uss, &uoss, regs.rsp);
+ ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs.rsp);
set_fs(seg);
if (ret >= 0 && uoss_ptr) {
- if (!access_ok(VERIFY_WRITE,uss_ptr,sizeof(stack_ia32_t)) ||
- __put_user(ptr_to_u32(uss.ss_sp), &uss_ptr->ss_sp) ||
- __put_user((u32)uss.ss_flags, &uss_ptr->ss_flags) ||
- __put_user((u32)uss.ss_size, &uss_ptr->ss_size))
+ if (!access_ok(VERIFY_WRITE,uoss_ptr,sizeof(stack_ia32_t)) ||
+ __put_user((u32)(u64)uss.ss_sp, &uoss_ptr->ss_sp) ||
+ __put_user(uss.ss_flags, &uoss_ptr->ss_flags) ||
+ __put_user(uss.ss_size, &uoss_ptr->ss_size))
ret = -EFAULT;
}
return ret;
.quad compat_sys_futex /* 240 */
.quad sys32_sched_setaffinity
.quad sys32_sched_getaffinity
- .quad sys_set_thread_area
- .quad sys_get_thread_area
+ .quad sys32_set_thread_area
+ .quad sys32_get_thread_area
.quad sys32_io_setup
.quad sys_io_destroy
.quad sys32_io_getevents
#include <asm/errno.h>
#include <asm/debugreg.h>
#include <asm/i387.h>
+#include <asm/desc.h>
+#include <asm/ldt.h>
#include <asm/fpu32.h>
#include <linux/mm.h>
#include <linux/ptrace.h>
if (s->si_signo >= SIGRTMIN) {
d->si_pid = s->si_pid;
d->si_uid = s->si_uid;
- /* XXX: Ouch, how to find this out??? */
- d->si_int = s->si_int;
+ memcpy(&d->si_int, &s->si_int,
+ sizeof(siginfo_t) - offsetof(siginfo_t,si_int));
} else switch (s->si_signo) {
/* XXX: What about POSIX1.b timers */
case SIGCHLD:
if (s->si_signo >= SIGRTMIN) {
d->si_pid = s->si_pid;
d->si_uid = s->si_uid;
- /* XXX: Ouch, how to find this out??? */
- d->si_int = s->si_int;
+ memcpy(&d->si_int,
+ &s->si_int,
+ sizeof(siginfo_t) - offsetof(siginfo_t, si_int));
} else switch (s->si_signo) {
/* XXX: What about POSIX1.b timers */
case SIGCHLD:
--- /dev/null
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/user.h>
+
+#include <asm/uaccess.h>
+#include <asm/desc.h>
+#include <asm/system.h>
+#include <asm/ldt.h>
+#include <asm/processor.h>
+#include <asm/proto.h>
+
+/*
+ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
+ */
+static int get_free_idx(void)
+{
+ struct thread_struct *t = ¤t->thread;
+ int idx;
+
+ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+ if (desc_empty((struct n_desc_struct *)(t->tls_array) + idx))
+ return idx + GDT_ENTRY_TLS_MIN;
+ return -ESRCH;
+}
+
+/*
+ * Set a given TLS descriptor:
+ * When you want addresses > 32bit use arch_prctl()
+ */
+int do_set_thread_area(struct thread_struct *t, struct user_desc *u_info)
+{
+ struct user_desc info;
+ struct n_desc_struct *desc;
+ int cpu, idx;
+
+ if (copy_from_user(&info, u_info, sizeof(info)))
+ return -EFAULT;
+
+ idx = info.entry_number;
+
+ /*
+ * index -1 means the kernel should try to find and
+ * allocate an empty descriptor:
+ */
+ if (idx == -1) {
+ idx = get_free_idx();
+ if (idx < 0)
+ return idx;
+ if (put_user(idx, &u_info->entry_number))
+ return -EFAULT;
+ }
+
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
+
+ /*
+ * We must not get preempted while modifying the TLS.
+ */
+ cpu = get_cpu();
+
+ if (LDT_empty(&info)) {
+ desc->a = 0;
+ desc->b = 0;
+ } else {
+ desc->a = LDT_entry_a(&info);
+ desc->b = LDT_entry_b(&info);
+ }
+ if (t == ¤t->thread)
+ load_TLS(t, cpu);
+
+ put_cpu();
+ return 0;
+}
+
+asmlinkage long sys32_set_thread_area(struct user_desc *u_info)
+{
+ return do_set_thread_area(¤t->thread, u_info);
+}
+
+
+/*
+ * Get the current Thread-Local Storage area:
+ */
+
+#define GET_BASE(desc) ( \
+ (((desc)->a >> 16) & 0x0000ffff) | \
+ (((desc)->b << 16) & 0x00ff0000) | \
+ ( (desc)->b & 0xff000000) )
+
+#define GET_LIMIT(desc) ( \
+ ((desc)->a & 0x0ffff) | \
+ ((desc)->b & 0xf0000) )
+
+#define GET_32BIT(desc) (((desc)->b >> 23) & 1)
+#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
+#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
+#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
+#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
+#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
+#define GET_LONGMODE(desc) (((desc)->b >> 21) & 1)
+
+int do_get_thread_area(struct thread_struct *t, struct user_desc *u_info)
+{
+ struct user_desc info;
+ struct n_desc_struct *desc;
+ int idx;
+
+ if (get_user(idx, &u_info->entry_number))
+ return -EFAULT;
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
+
+ memset(&info, 0, sizeof(struct user_desc));
+ info.entry_number = idx;
+ info.base_addr = GET_BASE(desc);
+ info.limit = GET_LIMIT(desc);
+ info.seg_32bit = GET_32BIT(desc);
+ info.contents = GET_CONTENTS(desc);
+ info.read_exec_only = !GET_WRITABLE(desc);
+ info.limit_in_pages = GET_LIMIT_PAGES(desc);
+ info.seg_not_present = !GET_PRESENT(desc);
+ info.useable = GET_USEABLE(desc);
+ info.lm = GET_LONGMODE(desc);
+
+ if (copy_to_user(u_info, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+}
+
+asmlinkage long sys32_get_thread_area(struct user_desc *u_info)
+{
+ return do_get_thread_area(¤t->thread, u_info);
+}
+
+
+int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs)
+{
+ struct n_desc_struct *desc;
+ struct user_desc info, *cp;
+ int idx;
+
+ cp = (void *)childregs->rsi;
+ if (copy_from_user(&info, cp, sizeof(info)))
+ return -EFAULT;
+ if (LDT_empty(&info))
+ return -EINVAL;
+
+ idx = info.entry_number;
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = (struct n_desc_struct *)(p->thread.tls_array) + idx - GDT_ENTRY_TLS_MIN;
+ desc->a = LDT_entry_a(&info);
+ desc->b = LDT_entry_b(&info);
+
+ return 0;
+}
asmlinkage void smp_spurious_interrupt(void)
{
unsigned int v;
- static unsigned long last_warning;
- static unsigned long skipped;
-
irq_enter();
/*
* Check if this really is a spurious interrupt and ACK it
if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
ack_APIC_irq();
+#if 0
+ static unsigned long last_warning;
+ static unsigned long skipped;
+
/* see sw-dev-man vol 3, chapter 7.4.13.5 */
- if (last_warning+30*HZ < jiffies) {
+ if (time_before(last_warning+30*HZ,jiffies)) {
printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
smp_processor_id(), skipped);
last_warning = jiffies;
} else {
skipped++;
}
+#endif
irq_exit();
}
clear_bss();
pda_init(0);
copy_bootdata(real_mode_data);
+ /* default console: */
+ if (!strstr(saved_command_line, "console="))
+ strcat(saved_command_line, " console=tty0");
s = strstr(saved_command_line, "earlyprintk=");
if (s != NULL)
setup_early_printk(s+12);
*/
static int __init timer_irq_works(void)
{
- unsigned int t1 = jiffies;
+ unsigned long t1 = jiffies;
local_irq_enable();
/* Let ten ticks pass... */
* might have cached one ExtINT interrupt. Finally, at
* least one tick may be lost due to delays.
*/
+
+ /* jiffies wrap? */
if (jiffies - t1 > 4)
return 1;
return 0;
#define __KERNEL_SYSCALLS__
#include <stdarg.h>
-#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/elfcore.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/stddef.h>
#include <linux/unistd.h>
-#include <linux/ptrace.h>
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/user.h>
#include <linux/module.h>
#include <linux/a.out.h>
#include <linux/interrupt.h>
-#include <linux/config.h>
#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/ctype.h>
-#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/ptrace.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/ldt.h>
#include <asm/processor.h>
#include <asm/i387.h>
-#include <asm/desc.h>
#include <asm/mmu_context.h>
#include <asm/pda.h>
#include <asm/prctl.h>
#include <asm/kdebug.h>
-
-#include <linux/irq.h>
+#include <asm/desc.h>
+#include <asm/proto.h>
+#include <asm/ia32.h>
asmlinkage extern void ret_from_fork(void);
-int sys_arch_prctl(int code, unsigned long addr);
unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
}
}
+static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
+{
+ struct user_desc ud = {
+ .base_addr = addr,
+ .limit = 0xfffff,
+ .seg_32bit = 1,
+ .limit_in_pages = 1,
+ .useable = 1,
+ };
+ struct n_desc_struct *desc = (void *)t->thread.tls_array;
+ desc += tls;
+ desc->a = LDT_entry_a(&ud);
+ desc->b = LDT_entry_b(&ud);
+}
+
+static inline u32 read_32bit_tls(struct task_struct *t, int tls)
+{
+ struct desc_struct *desc = (void *)t->thread.tls_array;
+ desc += tls;
+ return desc->base0 |
+ (((u32)desc->base1) << 16) |
+ (((u32)desc->base2) << 24);
+}
+
+
int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
unsigned long unused,
struct task_struct * p, struct pt_regs * regs)
{
+ int err;
struct pt_regs * childregs;
struct task_struct *me = current;
p->thread.rsp = (unsigned long) childregs;
p->thread.rsp0 = (unsigned long) (childregs+1);
- p->thread.userrsp = current->thread.userrsp;
+ p->thread.userrsp = me->thread.userrsp;
p->thread.rip = (unsigned long) ret_from_fork;
asm("movl %%es,%0" : "=m" (p->thread.es));
asm("movl %%ds,%0" : "=m" (p->thread.ds));
- unlazy_fpu(current);
- p->thread.i387 = current->thread.i387;
+ unlazy_fpu(me);
+ p->thread.i387 = me->thread.i387;
if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
p->thread.io_bitmap_ptr = kmalloc((IO_BITMAP_SIZE+1)*4, GFP_KERNEL);
* Set a new TLS for the child thread?
*/
if (clone_flags & CLONE_SETTLS) {
- struct n_desc_struct *desc;
- struct user_desc info;
- int idx;
-
- if (copy_from_user(&info, test_thread_flag(TIF_IA32) ?
- (void *)childregs->rsi :
- (void *)childregs->rdx, sizeof(info)))
- return -EFAULT;
- if (LDT_empty(&info))
- return -EINVAL;
-
- idx = info.entry_number;
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = (struct n_desc_struct *)(p->thread.tls_array) + idx - GDT_ENTRY_TLS_MIN;
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
+#ifdef CONFIG_IA32_EMULATION
+ if (test_thread_flag(TIF_IA32))
+ err = ia32_child_tls(p, childregs);
+ else
+#endif
+ err = do_arch_prctl(p, ARCH_SET_FS, childregs->r10);
+ if (err)
+ goto out;
}
-
- return 0;
+ err = 0;
+out:
+ if (err && p->thread.io_bitmap_ptr)
+ kfree(p->thread.io_bitmap_ptr);
+ return err;
}
/*
if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
if (next->io_bitmap_ptr) {
/*
- * 4 cachelines copy ... not good, but not that
+ * 2 cachelines copy ... not good, but not that
* bad either. Anyone got something better?
* This only affects processes which use ioperm().
*/
#undef last_sched
#undef first_sched
-int sys_arch_prctl(int code, unsigned long addr)
+long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
{
int ret = 0;
+ int doit = task == current;
+ int cpu;
switch (code) {
case ARCH_SET_GS:
if (addr >= TASK_SIZE)
return -EPERM;
- get_cpu();
+ cpu = get_cpu();
+ /* handle small bases via the GDT because that's faster to
+ switch. */
+ if (addr <= 0xffffffff) {
+ set_32bit_tls(task, GS_TLS, addr);
+ if (doit) {
+ load_TLS(&task->thread, cpu);
+ load_gs_index(GS_TLS_SEL);
+ }
+ task->thread.gsindex = GS_TLS_SEL;
+ task->thread.gs = 0;
+ } else {
+ task->thread.gsindex = 0;
+ task->thread.gs = addr;
+ if (doit) {
load_gs_index(0);
- current->thread.gsindex = 0;
- current->thread.gs = addr;
ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
+ }
+ }
put_cpu();
break;
case ARCH_SET_FS:
with gs */
if (addr >= TASK_SIZE)
return -EPERM;
- get_cpu();
+ cpu = get_cpu();
+ /* handle small bases via the GDT because that's faster to
+ switch. */
+ if (addr <= 0xffffffff) {
+ set_32bit_tls(task, FS_TLS, addr);
+ if (doit) {
+ load_TLS(&task->thread, cpu);
+ asm volatile("movl %0,%%fs" :: "r" (FS_TLS_SEL));
+ }
+ task->thread.fsindex = FS_TLS_SEL;
+ task->thread.fs = 0;
+ } else {
+ task->thread.fsindex = 0;
+ task->thread.fs = addr;
+ if (doit) {
+ /* set the selector to 0 to not confuse
+ __switch_to */
asm volatile("movl %0,%%fs" :: "r" (0));
- current->thread.fsindex = 0;
- current->thread.fs = addr;
ret = checking_wrmsrl(MSR_FS_BASE, addr);
+ }
+ }
put_cpu();
break;
-
- /* Returned value may not be correct when the user changed fs/gs */
- case ARCH_GET_FS:
- ret = put_user(current->thread.fs, (unsigned long *)addr);
+ case ARCH_GET_FS: {
+ unsigned long base;
+ if (task->thread.fsindex == FS_TLS_SEL)
+ base = read_32bit_tls(task, FS_TLS);
+ else if (doit) {
+ rdmsrl(MSR_FS_BASE, base);
+ } else
+ base = task->thread.fs;
+ ret = put_user(base, (unsigned long *)addr);
break;
-
- case ARCH_GET_GS:
- ret = put_user(current->thread.gs, (unsigned long *)addr);
+ }
+ case ARCH_GET_GS: {
+ unsigned long base;
+ if (task->thread.gsindex == GS_TLS_SEL)
+ base = read_32bit_tls(task, GS_TLS);
+ else if (doit) {
+ rdmsrl(MSR_KERNEL_GS_BASE, base);
+ } else
+ base = task->thread.gs;
+ ret = put_user(base, (unsigned long *)addr);
break;
+ }
default:
ret = -EINVAL;
return ret;
}
-/*
- * sys_alloc_thread_area: get a yet unused TLS descriptor index.
- */
-static int get_free_idx(void)
-{
- struct thread_struct *t = ¤t->thread;
- int idx;
-
- for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
- if (desc_empty((struct n_desc_struct *)(t->tls_array) + idx))
- return idx + GDT_ENTRY_TLS_MIN;
- return -ESRCH;
-}
-
-/*
- * Set a given TLS descriptor:
- * When you want addresses > 32bit use arch_prctl()
- */
-int do_set_thread_area(struct thread_struct *t, struct user_desc *u_info)
-{
- struct user_desc info;
- struct n_desc_struct *desc;
- int cpu, idx;
-
- if (copy_from_user(&info, u_info, sizeof(info)))
- return -EFAULT;
-
- idx = info.entry_number;
-
- /*
- * index -1 means the kernel should try to find and
- * allocate an empty descriptor:
- */
- if (idx == -1) {
- idx = get_free_idx();
- if (idx < 0)
- return idx;
- if (put_user(idx, &u_info->entry_number))
- return -EFAULT;
- }
-
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
-
- /*
- * We must not get preempted while modifying the TLS.
- */
- cpu = get_cpu();
-
- if (LDT_empty(&info)) {
- desc->a = 0;
- desc->b = 0;
- } else {
- desc->a = LDT_entry_a(&info);
- desc->b = LDT_entry_b(&info);
- }
- if (t == ¤t->thread)
- load_TLS(t, cpu);
-
- put_cpu();
- return 0;
-}
-
-asmlinkage int sys_set_thread_area(struct user_desc *u_info)
-{
- return do_set_thread_area(¤t->thread, u_info);
-}
-
-
-/*
- * Get the current Thread-Local Storage area:
- */
-
-#define GET_BASE(desc) ( \
- (((desc)->a >> 16) & 0x0000ffff) | \
- (((desc)->b << 16) & 0x00ff0000) | \
- ( (desc)->b & 0xff000000) )
-
-#define GET_LIMIT(desc) ( \
- ((desc)->a & 0x0ffff) | \
- ((desc)->b & 0xf0000) )
-
-#define GET_32BIT(desc) (((desc)->b >> 23) & 1)
-#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
-#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
-#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
-#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
-#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
-#define GET_LONGMODE(desc) (((desc)->b >> 21) & 1)
-
-int do_get_thread_area(struct thread_struct *t, struct user_desc *u_info)
-{
- struct user_desc info;
- struct n_desc_struct *desc;
- int idx;
-
- if (get_user(idx, &u_info->entry_number))
- return -EFAULT;
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
- desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
-
- memset(&info, 0, sizeof(struct user_desc));
- info.entry_number = idx;
- info.base_addr = GET_BASE(desc);
- info.limit = GET_LIMIT(desc);
- info.seg_32bit = GET_32BIT(desc);
- info.contents = GET_CONTENTS(desc);
- info.read_exec_only = !GET_WRITABLE(desc);
- info.limit_in_pages = GET_LIMIT_PAGES(desc);
- info.seg_not_present = !GET_PRESENT(desc);
- info.useable = GET_USEABLE(desc);
- info.lm = GET_LONGMODE(desc);
-
- if (copy_to_user(u_info, &info, sizeof(info)))
- return -EFAULT;
- return 0;
-}
-
-asmlinkage int sys_get_thread_area(struct user_desc *u_info)
+long sys_arch_prctl(int code, unsigned long addr)
{
- return do_get_thread_area(¤t->thread, u_info);
+ return do_arch_prctl(current, code, addr);
}
/*
#include <asm/ldt.h>
#include <asm/desc.h>
#include <asm/proto.h>
+#include <asm/ia32.h>
/*
* does not yet catch signals sent when the child dies.
wake_up_process(child);
ret = 0;
break;
+ }
+
+#ifdef CONFIG_IA32_EMULATION
+ /* This makes only sense with 32bit programs. Allow a
+ 64bit debugger to fully examine them too. Better
+ don't use it against 64bit processes, use
+ PTRACE_ARCH_PRCTL instead. */
case PTRACE_SET_THREAD_AREA: {
int old;
get_user(old, &((struct user_desc *)data)->entry_number);
put_user(old, &((struct user_desc *)data)->entry_number);
break;
}
- }
+#endif
+ /* normal 64bit interface to access TLS data.
+ Works just like arch_prctl, except that the arguments
+ are reversed. */
+ case PTRACE_ARCH_PRCTL:
+ ret = do_arch_prctl(child, data, addr);
+ break;
/*
* make the child exit. Best I can do is send it a sigkill.
#include <asm/i387.h>
#include <asm/percpu.h>
#include <asm/mtrr.h>
+#include <asm/proto.h>
char x86_boot_params[2048] __initdata = {0,};
struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned;
-extern void system_call(void);
-extern void ia32_cstar_target(void);
-
extern struct task_struct init_task;
extern unsigned char __per_cpu_start[], __per_cpu_end[];
#define EXCEPTION_STK_ORDER 0 /* >= N_EXCEPTION_STACKS*EXCEPTION_STKSZ */
char boot_exception_stacks[N_EXCEPTION_STACKS*EXCEPTION_STKSZ];
+void syscall_init(void)
+{
+ /*
+ * LSTAR and STAR live in a bit strange symbiosis.
+ * They both write to the same internal register. STAR allows to set CS/DS
+ * but only a 32bit target. LSTAR sets the 64bit rip.
+ */
+ wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
+ wrmsrl(MSR_LSTAR, system_call);
+
+#ifdef CONFIG_IA32_EMULATION
+ wrmsrl(MSR_CSTAR, ia32_cstar_target);
+#endif
+
+ /* Flags to clear on syscall */
+ wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000);
+}
+
/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT
asm volatile("pushfq ; popq %%rax ; btr $14,%%rax ; pushq %%rax ; popfq" ::: "eax");
- /*
- * LSTAR and STAR live in a bit strange symbiosis.
- * They both write to the same internal register. STAR allows to set CS/DS
- * but only a 32bit target. LSTAR sets the 64bit rip.
- */
- wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
- wrmsrl(MSR_LSTAR, system_call);
-
-#ifdef CONFIG_IA32_EMULATION
- wrmsrl(MSR_CSTAR, ia32_cstar_target);
-#endif
-
- /* Flags to clear on syscall */
- wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000);
+ syscall_init();
wrmsrl(MSR_FS_BASE, 0);
wrmsrl(MSR_KERNEL_GS_BASE, 0);
#define COPY(x) err |= __get_user(regs->x, &sc->x)
- {
- unsigned int seg;
- err |= __get_user(seg, &sc->gs);
- load_gs_index(seg);
- err |= __get_user(seg, &sc->fs);
- loadsegment(fs,seg);
- }
-
COPY(rdi); COPY(rsi); COPY(rbp); COPY(rsp); COPY(rbx);
COPY(rdx); COPY(rcx); COPY(rip);
COPY(r8);
static inline int
setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me)
{
- int tmp, err = 0;
+ int err = 0;
- tmp = 0;
- __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
- err |= __put_user(tmp, (unsigned int *)&sc->gs);
- __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
- err |= __put_user(tmp, (unsigned int *)&sc->fs);
+ err |= __put_user(0, &sc->gs);
+ err |= __put_user(0, &sc->fs);
err |= __put_user(regs->rdi, &sc->rdi);
err |= __put_user(regs->rsi, &sc->rsi);
#include <asm/acpi.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
+#include <asm/proto.h>
static struct saved_context saved_context;
asm volatile ("movw %%gs, %0" : "=m" (saved_context.gs));
asm volatile ("movw %%ss, %0" : "=m" (saved_context.ss));
- asm volatile ("swapgs");
- rdmsrl(0xc0000100, saved_context.fs_base);
- rdmsrl(0xc0000101, saved_context.gs_base);
- asm volatile ("swapgs");
+ rdmsrl(MSR_FS_BASE, saved_context.fs_base);
+ rdmsrl(MSR_GS_BASE, saved_context.gs_base);
+ rdmsrl(MSR_KERNEL_GS_BASE, saved_context.gs_kernel_base);
/*
* control registers
load_gs_index(saved_context.gs);
asm volatile ("movw %0, %%ss" :: "r" (saved_context.ss));
- asm volatile ("swapgs");
- wrmsrl(0xc0000100, saved_context.fs_base);
- wrmsrl(0xc0000101, saved_context.gs_base);
- asm volatile ("swapgs");
+ wrmsrl(MSR_FS_BASE, saved_context.fs_base);
+ wrmsrl(MSR_GS_BASE, saved_context.gs_base);
+ wrmsrl(MSR_KERNEL_GS_BASE, saved_context.gs_kernel_base);
/*
* now restore the descriptor tables to their proper values
#include <asm/apic.h>
#endif
-u64 jiffies_64;
+u64 jiffies_64 = INITIAL_JIFFIES;
extern int using_apic_timer;
struct hpet_data __hpet __section_hpet; /* address, quotient, trigger, hz */
-volatile unsigned long __jiffies __section_jiffies;
-unsigned long __wall_jiffies __section_wall_jiffies;
+volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
+unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
struct timespec __xtime __section_xtime;
struct timezone __sys_tz __section_sys_tz;
#include <asm/thread_info.h>
.text
- .p2align
+ .p2align 4
.globl __get_user_1
__get_user_1:
GET_THREAD_INFO(%rbx)
xorq %rax,%rax
ret
- .p2align
+ .p2align 4
.globl __get_user_2
__get_user_2:
GET_THREAD_INFO(%rbx)
xorq %rax,%rax
ret
- .p2align
+ .p2align 4
.globl __get_user_4
__get_user_4:
GET_THREAD_INFO(%rbx)
xorq %rax,%rax
ret
- .p2align
+ .p2align 4
.globl __get_user_8
__get_user_8:
GET_THREAD_INFO(%rbx)
static void flush_kernel_map(void *address)
{
- if (address && cpu_has_clflush) {
+ if (0 && address && cpu_has_clflush) {
/* is this worth it? */
int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
static int __devinit pci_sanity_check(struct pci_ops *o)
{
u32 x = 0;
- struct pci_bus bus; /* Fake bus and device */
- struct pci_dev dev;
+ int retval = 0;
+ struct pci_bus *bus; /* Fake bus and device */
+ struct pci_dev *dev;
if (pci_probe & PCI_NO_CHECKS)
return 1;
- bus.number = 0;
- dev.bus = &bus;
- for(dev.devfn=0; dev.devfn < 0x100; dev.devfn++)
- if ((!o->read(&bus, dev.devfn, PCI_CLASS_DEVICE, 2, &x) &&
+
+ bus = kmalloc(sizeof(*bus), GFP_ATOMIC);
+ dev = kmalloc(sizeof(*dev), GFP_ATOMIC);
+ if (!bus || !dev) {
+ printk(KERN_ERR "Out of memory in %s\n", __FUNCTION__);
+ goto exit;
+ }
+
+ bus->number = 0;
+ dev->bus = bus;
+ for(dev->devfn=0; dev->devfn < 0x100; dev->devfn++)
+ if ((!o->read(bus, dev->devfn, PCI_CLASS_DEVICE, 2, &x) &&
(x == PCI_CLASS_BRIDGE_HOST || x == PCI_CLASS_DISPLAY_VGA)) ||
- (!o->read(&bus, dev.devfn, PCI_VENDOR_ID, 2, &x) &&
- (x == PCI_VENDOR_ID_INTEL || x == PCI_VENDOR_ID_COMPAQ)))
- return 1;
+ (!o->read(bus, dev->devfn, PCI_VENDOR_ID, 2, &x) &&
+ (x == PCI_VENDOR_ID_INTEL || x == PCI_VENDOR_ID_COMPAQ))) {
+ retval = 1;
+ goto exit;
+ }
DBG("PCI: Sanity check failed\n");
- return 0;
+exit:
+ kfree(dev);
+ kfree(bus);
+ return retval;
}
static int __init pci_direct_init(void)
unsigned int tmp;
unsigned long flags;
- local_save_flags(flags); local_irq_disable();
+ local_irq_save(flags);
/*
* Check if configuration type 1 works.
}
local_irq_restore(flags);
- pci_root_ops = NULL;
return 0;
}
*/
if (busmap[i] && pci_scan_bus(i, pci_root_bus->ops, NULL))
printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
- pcibios_last_bus = -1;
+ //pcibios_last_bus = -1;
}
/*
{
irq = read_config_nybble(router, 0x56, pirq - 1);
}
- printk(KERN_INFO "AMD: dev %04x:%04x, router pirq : %d get irq : %2d\n",
+ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n",
dev->vendor, dev->device, pirq, irq);
return irq;
}
static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
- printk(KERN_INFO "AMD: dev %04x:%04x, router pirq : %d SET irq : %2d\n",
+ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n",
dev->vendor, dev->device, pirq, irq);
if (pirq <= 4)
{
/*
* legacy.c - traditional, old school PCI bus probing
*/
-#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/pci.h>
#include "pci.h"
/*
void __devinit pcibios_fixup_peer_bridges(void)
{
int n;
- struct pci_bus bus;
- struct pci_dev dev;
+ struct pci_bus *bus;
+ struct pci_dev *dev;
u16 l;
- if (pcibios_last_bus <= 0 || pcibios_last_bus > 0xff)
+ if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff)
return;
DBG("PCI: Peer bridge fixup\n");
+
+ bus = kmalloc(sizeof(*bus), GFP_ATOMIC);
+ dev = kmalloc(sizeof(*dev), GFP_ATOMIC);
+ if (!bus || !dev) {
+ printk(KERN_ERR "Out of memory in %s\n", __FUNCTION__);
+ goto exit;
+ }
+
for (n=0; n <= pcibios_last_bus; n++) {
if (pci_bus_exists(&pci_root_buses, n))
continue;
- bus.number = n;
- bus.ops = pci_root_ops;
- dev.bus = &bus;
- for(dev.devfn=0; dev.devfn<256; dev.devfn += 8)
- if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) &&
+ bus->number = n;
+ bus->ops = pci_root_ops;
+ dev->bus = bus;
+ for (dev->devfn=0; dev->devfn<256; dev->devfn += 8)
+ if (!pci_read_config_word(dev, PCI_VENDOR_ID, &l) &&
l != 0x0000 && l != 0xffff) {
- DBG("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l);
+ DBG("Found device at %02x:%02x [%04x]\n", n, dev->devfn, l);
printk(KERN_INFO "PCI: Discovered peer bus %02x\n", n);
pci_scan_bus(n, pci_root_ops, NULL);
break;
}
}
+exit:
+ kfree(dev);
+ kfree(bus);
}
static int __init pci_legacy_init(void)
extern int pcibios_scanned;
extern spinlock_t pci_config_lock;
-void pcibios_fixup_irqs(void);
int pirq_enable_irq(struct pci_dev *dev);
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
static inline void set_seg_base(unsigned cpu, int entry, void *base)
{
struct desc_struct *d = &cpu_gdt_table[cpu][entry];
- d->base0 = PTR_LOW(base);
- d->base1 = PTR_MIDDLE(base);
- d->base2 = PTR_HIGH(base);
+ u32 addr = (u32)(u64)base;
+ BUG_ON((u64)base >> 32);
+ d->base0 = addr & 0xffff;
+ d->base1 = (addr >> 16) & 0xff;
+ d->base2 = (addr >> 24) & 0xff;
}
#define LDT_entry_a(info) \
#define IA32_PAGE_OFFSET 0xffffe000
#define IA32_STACK_TOP IA32_PAGE_OFFSET
+#ifdef __KERNEL__
+struct user_desc;
+int do_get_thread_area(struct thread_struct *t, struct user_desc *u_info);
+int do_set_thread_area(struct thread_struct *t, struct user_desc *u_info);
+int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs);
+#endif
+
#endif /* !CONFIG_IA32_SUPPORT */
#endif
extern unsigned long end_pfn;
extern unsigned long table_start, table_end;
-struct thread_struct;
-struct user_desc;
+extern void syscall_init(void);
-int do_set_thread_area(struct thread_struct *t, struct user_desc *u_info);
-int do_get_thread_area(struct thread_struct *t, struct user_desc *u_info);
+struct pt_regs;
+
+long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1))
#define round_down(x,y) ((x) & ~((y)-1))
#define PTRACE_GETFPXREGS 18
#define PTRACE_SETFPXREGS 19
+/* only useful for access 32bit programs */
#define PTRACE_GET_THREAD_AREA 25
#define PTRACE_SET_THREAD_AREA 26
+#define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
#define user_mode(regs) (!!((regs)->cs & 3))
#define __KERNEL32_CS 0x38
-#define __USER_LONGBASE ((GDT_ENTRY_LONGBASE * 8) | 3)
-
/*
* we cannot use the same code segment descriptor for user and kernel
* -- not even in the long flat mode, because of different DPL /kkeil
#define GDT_ENTRY_TLS_ENTRIES 3
+/* TLS indexes for 64bit - hardcoded in arch_prctl */
+#define FS_TLS 0
+#define GS_TLS 1
+
+#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
+#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
+
#define IDT_ENTRIES 256
#define GDT_ENTRIES 16
#define GDT_SIZE (GDT_ENTRIES * 8)
/* image of the saved processor state */
struct saved_context {
u16 ds, es, fs, gs, ss;
- unsigned long gs_base, fs_base;
+ unsigned long gs_base, gs_kernel_base, fs_base;
unsigned long cr0, cr2, cr3, cr4;
u16 gdt_pad;
u16 gdt_limit;
* User space memory access functions
*/
#include <linux/config.h>
+#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/prefetch.h>
#define access_ok(type,addr,size) (__range_not_ok(addr,size) == 0)
-extern inline int verify_area(int type, const void * addr, unsigned long size)
+extern __force_inline int verify_area(int type, const void * addr, unsigned long size)
{
return access_ok(type,addr,size) ? 0 : -EFAULT;
}
extern unsigned long copy_to_user(void *to, const void *from, unsigned len);
extern unsigned long copy_from_user(void *to, const void *from, unsigned len);
-static inline int __copy_from_user(void *dst, const void *src, unsigned size)
+static __force_inline int __copy_from_user(void *dst, const void *src, unsigned size)
{
if (!__builtin_constant_p(size))
return copy_user_generic(dst,src,size);
}
}
-static inline int __copy_to_user(void *dst, const void *src, unsigned size)
+static __force_inline int __copy_to_user(void *dst, const void *src, unsigned size)
{
if (!__builtin_constant_p(size))
return copy_user_generic(dst,src,size);
#define __NR_sched_getaffinity 204
__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity)
#define __NR_set_thread_area 205
-__SYSCALL(__NR_set_thread_area, sys_set_thread_area)
+__SYSCALL(__NR_set_thread_area, sys_ni_syscall) /* use arch_prctl */
#define __NR_io_setup 206
__SYSCALL(__NR_io_setup, sys_io_setup)
#define __NR_io_destroy 207
#define __NR_io_cancel 210
__SYSCALL(__NR_io_cancel, sys_io_cancel)
#define __NR_get_thread_area 211
-__SYSCALL(__NR_get_thread_area, sys_get_thread_area)
+__SYSCALL(__NR_get_thread_area, sys_ni_syscall) /* use arch_prctl */
#define __NR_lookup_dcookie 212
__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie)
#define __NR_epoll_create 213