} \
})
-#define ia64_fetch_and_add(i,v) \
-({ \
- __u64 _tmp; \
- volatile __typeof__(*(v)) *_v = (v); \
- switch (i) { \
- case -16: IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); break; \
- case -8: IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); break; \
- case -4: IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); break; \
- case -1: IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); break; \
- case 1: IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); break; \
- case 4: IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); break; \
- case 8: IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); break; \
- case 16: IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); break; \
- default: \
- _tmp = __bad_increment_for_ia64_fetch_and_add(); \
- break; \
- } \
- (__typeof__(*(v))) (_tmp + (i)); /* return new value */ \
+#define ia64_fetch_and_add(i,v) \
+({ \
+ __u64 _tmp; \
+ volatile __typeof__(*(v)) *_v = (v); \
+ /* Can't use a switch () here: gcc isn't always smart enough for that... */ \
+ if ((i) == -16) \
+ IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); \
+ else if ((i) == -8) \
+ IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); \
+ else if ((i) == -4) \
+ IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); \
+ else if ((i) == -2) \
+ IA64_FETCHADD(_tmp, _v, -2, sizeof(*(v))); \
+ else if ((i) == -1) \
+ IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); \
+ else if ((i) == 1) \
+ IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); \
+ else if ((i) == 2) \
+ IA64_FETCHADD(_tmp, _v, 2, sizeof(*(v))); \
+ else if ((i) == 4) \
+ IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); \
+ else if ((i) == 8) \
+ IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); \
+ else if ((i) == 16) \
+ IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); \
+ else \
+ _tmp = __bad_increment_for_ia64_fetch_and_add(); \
+ (__typeof__(*(v))) (_tmp + (i)); /* return new value */ \
})
/*
* waiting (in which case it goes to sleep).
*/
-#ifndef _IA64_RWSEM_H
-#define _IA64_RWSEM_H
+#ifndef _ASM_IA64_RWSEM_H
+#define _ASM_IA64_RWSEM_H
-#ifdef __KERNEL__
#include <linux/list.h>
#include <linux/spinlock.h>
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-static inline void init_rwsem(struct rw_semaphore *sem)
+static inline void
+init_rwsem (struct rw_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
/*
* lock for reading
*/
-static inline void __down_read(struct rw_semaphore *sem)
+static inline void
+__down_read (struct rw_semaphore *sem)
{
int result;
__asm__ __volatile__ ("fetchadd4.acq %0=[%1],1" :
/*
* lock for writing
*/
-static inline void __down_write(struct rw_semaphore *sem)
+static inline void
+__down_write (struct rw_semaphore *sem)
{
int old, new;
/*
* unlock after reading
*/
-static inline void __up_read(struct rw_semaphore *sem)
+static inline void
+__up_read (struct rw_semaphore *sem)
{
int result;
__asm__ __volatile__ ("fetchadd4.rel %0=[%1],-1" :
/*
* unlock after writing
*/
-static inline void __up_write(struct rw_semaphore *sem)
+static inline void
+__up_write (struct rw_semaphore *sem)
{
int old, new;
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-static inline int __down_read_trylock(struct rw_semaphore *sem)
+static inline int
+__down_read_trylock (struct rw_semaphore *sem)
{
int tmp;
while ((tmp = sem->count) >= 0) {
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-static inline int __down_write_trylock(struct rw_semaphore *sem)
+static inline int
+__down_write_trylock (struct rw_semaphore *sem)
{
int tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
- RWSEM_ACTIVE_WRITE_BIAS);
+ RWSEM_ACTIVE_WRITE_BIAS);
return tmp == RWSEM_UNLOCKED_VALUE;
}
/*
* downgrade write lock to read lock
*/
-static inline void __downgrade_write(struct rw_semaphore *sem)
+static inline void
+__downgrade_write (struct rw_semaphore *sem)
{
int old, new;
}
/*
- * implement atomic add functionality
+ * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1
+ * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
*/
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
-{
- atomic_add(delta, (atomic_t *)(&sem->count));
-}
-
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
-{
- return atomic_add_return(delta, (atomic_t *)(&sem->count));
-}
+#define rwsem_atomic_add(delta, sem) atomic_add(delta, (atomic_t *)(&(sem)->count))
+#define rwsem_atomic_update(delta, sem) atomic_add_return(delta, (atomic_t *)(&(sem)->count))
-#endif /* __KERNEL__ */
-#endif /* _IA64_RWSEM_H */
+#endif /* _ASM_IA64_RWSEM_H */