]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] Clean up __cacheline_aligned
authorAlexander Viro <viro@parcelfarce.linux.theplanet.co.uk>
Thu, 26 Feb 2004 02:07:47 +0000 (18:07 -0800)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Thu, 26 Feb 2004 02:07:47 +0000 (18:07 -0800)
arm-26, ppc, sparc, sparc64 and sh have per-arch definitions of
__cacheline_aligned that are identical to default.  And yes, removal is
safe - all users of __cacheline_aligned actually pull linux/cache.h in.

include/asm-arm26/cache.h
include/asm-ppc/cache.h
include/asm-sh/cache.h
include/asm-sparc/cache.h
include/asm-sparc64/cache.h

index d95112e028a640a32dad499a12647f8fe88da513..f52ca1b808cd02398efbf7f3d582e30c21673e47 100644 (file)
@@ -8,12 +8,4 @@
 #define        L1_CACHE_ALIGN(x)       (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
 #define        SMP_CACHE_BYTES L1_CACHE_BYTES
 
-#ifdef MODULE
-#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
-#else
-#define __cacheline_aligned                                    \
-  __attribute__((__aligned__(L1_CACHE_BYTES),                  \
-                __section__(".data.cacheline_aligned")))
-#endif
-
 #endif
index 6a3f4c05dc6777e5c39b2926519243a983ab7bb4..1fcf0f3e7b87c3633b66b1d294f3e9a59455d457 100644 (file)
 #define        L1_CACHE_ALIGN(x)       (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
 #define        L1_CACHE_PAGES          8
 
-#ifdef MODULE
-#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
-#else
-#define __cacheline_aligned                                    \
-  __attribute__((__aligned__(L1_CACHE_BYTES),                  \
-                __section__(".data.cacheline_aligned")))
-#endif
-
 #ifndef __ASSEMBLY__
 extern void clean_dcache_range(unsigned long start, unsigned long stop);
 extern void flush_dcache_range(unsigned long start, unsigned long stop);
index f8cf61f9869a5adf8d4f83c38d9c8c13109e9908..9decb1ced21783cfca4b7d57901a7bc495f2e2a6 100644 (file)
 
 #define L1_CACHE_ALIGN(x)      (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
 
-#ifdef MODULE
-#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
-#else
-#define __cacheline_aligned                                    \
-  __attribute__((__aligned__(L1_CACHE_BYTES),                  \
-                __section__(".data.cacheline_aligned")))
-#endif
-
 #define L1_CACHE_SHIFT_MAX     5       /* largest L1 which this arch supports */
 
 struct cache_info {
index 86fd491b42a65e60d16164f38de92fd749e95a81..e6316fd7e1a4085880796216fc45e9368b90c391 100644 (file)
 
 #define SMP_CACHE_BYTES 32
 
-#ifdef MODULE
-#define __cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
-#else
-#define __cacheline_aligned                                    \
-  __attribute__((__aligned__(SMP_CACHE_BYTES),                 \
-                __section__(".data.cacheline_aligned")))
-#endif
-
 /* Direct access to the instruction cache is provided through and
  * alternate address space.  The IDC bit must be off in the ICCR on
  * HyperSparcs for these accesses to work.  The code below does not do
index c4ba581b7af14ca6b017fa8599749a0844774e2d..ade5ec3bfd5a23ca18e08acc4185ed3d908589b1 100644 (file)
 #define        SMP_CACHE_BYTES_SHIFT   6
 #define        SMP_CACHE_BYTES         (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */
 
-#ifdef MODULE
-#define __cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
-#else
-#define __cacheline_aligned                                    \
-  __attribute__((__aligned__(SMP_CACHE_BYTES),                 \
-                __section__(".data.cacheline_aligned")))
-#endif
-
 #endif