Add a warning if enable_kernel_{fp,altivec} is called with preempt
enabled since this is always an error, and make sure the alignement
exception handler properly disables preempt when doing FP operations.
* the kernel with -msoft-float so it doesn't use the
* fp regs for copying 8-byte objects. */
case LD+F+S:
+ preempt_disable();
enable_kernel_fp();
cvt_fd(&data.f, ¤t->thread.fpr[reg], ¤t->thread.fpscr);
/* current->thread.fpr[reg] = data.f; */
+ preempt_enable();
break;
case ST+F+S:
+ preempt_disable();
enable_kernel_fp();
cvt_df(¤t->thread.fpr[reg], &data.f, ¤t->thread.fpscr);
/* data.f = current->thread.fpr[reg]; */
+ preempt_enable();
break;
default:
printk("align: can't handle flags=%x\n", flags);
void
enable_kernel_altivec(void)
{
- preempt_disable();
+ WARN_ON(current_thread_info()->preempt_count == 0 && !irqs_disabled());
+
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
giveup_altivec(current);
#else
giveup_altivec(last_task_used_altivec);
#endif /* __SMP __ */
- preempt_enable();
}
+EXPORT_SYMBOL(enable_kernel_altivec);
#endif /* CONFIG_ALTIVEC */
void
enable_kernel_fp(void)
{
- preempt_disable();
+ WARN_ON(current_thread_info()->preempt_count == 0 && !irqs_disabled());
+
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
giveup_fpu(current);
#else
giveup_fpu(last_task_used_math);
#endif /* CONFIG_SMP */
- preempt_enable();
}
+EXPORT_SYMBOL(enable_kernel_fp);
int
dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)