if (fpu_owner)
ia64_flush_fph(fpu_owner);
-
- ia64_set_fpu_owner(current);
}
#endif /* !CONFIG_SMP */
+ ia64_set_fpu_owner(current);
if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) {
__ia64_load_fpu(current->thread.fph);
psr->mfh = 0;
__u64 map_base; /* base address for get_unmapped_area() */
__u64 task_size; /* limit for task size */
struct siginfo *siginfo; /* current siginfo struct for ptrace() */
+ __u64 last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
#ifdef CONFIG_IA32_SUPPORT
__u64 eflag; /* IA32 EFLAGS reg */
}
}
-#ifndef CONFIG_SMP
-
static inline struct task_struct *
ia64_get_fpu_owner (void)
{
ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) t);
}
-#endif /* !CONFIG_SMP */
-
extern void __ia64_init_fpu (void);
extern void __ia64_save_fpu (struct ia64_fpreg *fph);
extern void __ia64_load_fpu (struct ia64_fpreg *fph);
* task->thread.fph, avoiding the complication of having to fetch
* the latest fph state from another CPU.
*/
-# define switch_to(prev,next,last) do { \
- if (ia64_psr(ia64_task_regs(prev))->mfh) { \
- ia64_psr(ia64_task_regs(prev))->mfh = 0; \
- (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
- __ia64_save_fpu((prev)->thread.fph); \
- } \
- ia64_psr(ia64_task_regs(prev))->dfh = 1; \
- __switch_to(prev,next,last); \
+# define switch_to(prev,next,last) do { \
+ if (ia64_psr(ia64_task_regs(prev))->mfh) { \
+ ia64_psr(ia64_task_regs(prev))->mfh = 0; \
+ (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
+ __ia64_save_fpu((prev)->thread.fph); \
+ (prev)->thread.last_fph_cpu = smp_processor_id(); \
+ } \
+ if ((next)->thread.flags & IA64_THREAD_FPH_VALID) { \
+ if (((next)->thread.last_fph_cpu == smp_processor_id()) \
+ && (ia64_get_fpu_owner() == next)) \
+ { \
+ ia64_psr(ia64_task_regs(next))->dfh = 0; \
+ ia64_psr(ia64_task_regs(next))->mfh = 0; \
+ } else \
+ ia64_psr(ia64_task_regs(next))->dfh = 1; \
+ } \
+ __switch_to(prev,next,last); \
} while (0)
#else
# define switch_to(prev,next,last) do { \