]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] make raid5 checksums preempt-safe, take two
authorRobert Love <rml@tech9.net>
Thu, 29 Aug 2002 06:24:16 +0000 (23:24 -0700)
committerLinus Torvalds <torvalds@penguin.transmeta.com>
Thu, 29 Aug 2002 06:24:16 +0000 (23:24 -0700)
The raid5 xor checksums use MMX/SSE state and are not preempt-safe.

Attached patch disables preemption in FPU_SAVE and XMMS_SAVE and
restores it in FPU_RESTORE and XMMS_RESTORE - preventing preemption
while in fp mode.

include/asm-i386/xor.h
include/asm-x86_64/xor.h

index 3e022e344b06ce37fb9541de4bd432af7c2c6063..29b02468e0e16827d18eea65eea765c1fd14d26a 100644 (file)
@@ -20,6 +20,7 @@
 
 #define FPU_SAVE                                                       \
   do {                                                                 \
+       preempt_disable();                                              \
        if (!test_thread_flag(TIF_USEDFPU))                             \
                __asm__ __volatile__ (" clts;\n");                      \
        __asm__ __volatile__ ("fsave %0; fwait": "=m"(fpu_save[0]));    \
@@ -30,6 +31,7 @@
        __asm__ __volatile__ ("frstor %0": : "m"(fpu_save[0]));         \
        if (!test_thread_flag(TIF_USEDFPU))                             \
                stts();                                                 \
+       preempt_enable();                                               \
   } while (0)
 
 #define LD(x,y)                "       movq   8*("#x")(%1), %%mm"#y"   ;\n"
@@ -542,7 +544,8 @@ static struct xor_block_template xor_block_p5_mmx = {
  * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
  */
 
-#define XMMS_SAVE                              \
+#define XMMS_SAVE do {                         \
+       preempt_disable();                      \
        __asm__ __volatile__ (                  \
                "movl %%cr0,%0          ;\n\t"  \
                "clts                   ;\n\t"  \
@@ -552,9 +555,10 @@ static struct xor_block_template xor_block_p5_mmx = {
                "movups %%xmm3,0x30(%1) ;\n\t"  \
                : "=&r" (cr0)                   \
                : "r" (xmm_save)                \
-               : "memory")
+               : "memory");                    \
+} while(0)
 
-#define XMMS_RESTORE                           \
+#define XMMS_RESTORE do {                      \
        __asm__ __volatile__ (                  \
                "sfence                 ;\n\t"  \
                "movups (%1),%%xmm0     ;\n\t"  \
@@ -564,7 +568,9 @@ static struct xor_block_template xor_block_p5_mmx = {
                "movl   %0,%%cr0        ;\n\t"  \
                :                               \
                : "r" (cr0), "r" (xmm_save)     \
-               : "memory")
+               : "memory");                    \
+       preempt_enable();                       \
+} while(0)
 
 #define ALIGN16 __attribute__((aligned(16)))
 
index d46897456538584719b31395553a57f7b35bbd22..d7d59512c833e176b484761807f08437c0bcf1f8 100644 (file)
@@ -37,8 +37,9 @@ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
 
 /* Doesn't use gcc to save the XMM registers, because there is no easy way to 
    tell it to do a clts before the register saving. */
-#define XMMS_SAVE                              \
-       asm volatile (                  \
+#define XMMS_SAVE do {                         \
+       preempt_disable();                      \
+       asm volatile (                          \
                "movq %%cr0,%0          ;\n\t"  \
                "clts                   ;\n\t"  \
                "movups %%xmm0,(%1)     ;\n\t"  \
@@ -47,10 +48,11 @@ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
                "movups %%xmm3,0x30(%1) ;\n\t"  \
                : "=r" (cr0)                    \
                : "r" (xmm_save)                \
-               : "memory")
+               : "memory");                    \
+} while(0)
 
 #define XMMS_RESTORE                           \
-       asm volatile (                  \
+       asm volatile (                          \
                "sfence                 ;\n\t"  \
                "movups (%1),%%xmm0     ;\n\t"  \
                "movups 0x10(%1),%%xmm1 ;\n\t"  \
@@ -59,7 +61,9 @@ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
                "movq   %0,%%cr0        ;\n\t"  \
                :                               \
                : "r" (cr0), "r" (xmm_save)     \
-               : "memory")
+               : "memory");                    \
+       preempt_enable();                       \
+} while(0)
 
 #define OFFS(x)                "16*("#x")"
 #define PF_OFFS(x)     "256+16*("#x")"