void down(struct semaphore *sem)
{
+ might_sleep();
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
{
int ret = 0;
+ might_sleep();
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
static inline void __down(struct semaphore *sem)
{
- long count = atomic_dec_return(&sem->count);
+ long count;
+ might_sleep();
+ count = atomic_dec_return(&sem->count);
if (unlikely(count < 0))
__down_failed(sem);
}
static inline int __down_interruptible(struct semaphore *sem)
{
- long count = atomic_dec_return(&sem->count);
+ long count;
+ might_sleep();
+ count = atomic_dec_return(&sem->count);
if (unlikely(count < 0))
return __down_failed_interruptible(sem);
return 0;
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
__down_op(sem, __down_failed);
}
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
return __down_op_ret(sem, __down_interruptible_failed);
}
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
__down_op(sem, __down_failed);
}
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
return __down_op_ret(sem, __down_interruptible_failed);
}
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
/* atomically decrement the semaphores count, and if its negative, we wait */
local_save_flags(flags);
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
/* atomically decrement the semaphores count, and if its negative, we wait */
local_save_flags(flags);
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
count = &(sem->count);
__asm__ __volatile__(
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
count = &(sem->count);
__asm__ __volatile__(
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
__asm__ __volatile__(
"| atomic down operation\n\t"
"subql #1,%0@\n\t"
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
__asm__ __volatile__(
"| atomic interruptible down operation\n\t"
"subql #1,%1@\n\t"
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
__asm__ __volatile__(
"| atomic down operation\n\t"
"movel %0, %%a1\n\t"
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
__asm__ __volatile__(
"| atomic down operation\n\t"
"movel %1, %%a1\n\t"
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
spin_lock_irq(&sem->sentry);
if (sem->count > 0) {
sem->count--;
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
-
+ might_sleep();
spin_lock_irq(&sem->sentry);
if (sem->count > 0) {
sem->count--;
static inline void down(struct semaphore * sem)
{
+ might_sleep();
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
{
int ret = 0;
+ might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
ptr = &(sem->count.counter);
increment = 1;
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
ptr = &(sem->count.counter);
increment = 1;
extern inline void down (struct semaphore * sem)
{
+ might_sleep();
if (atomic_dec_return (&sem->count) < 0)
__down (sem);
}
extern inline int down_interruptible (struct semaphore * sem)
{
int ret = 0;
+ might_sleep();
if (atomic_dec_return (&sem->count) < 0)
ret = __down_interruptible (sem);
return ret;
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
__asm__ __volatile__(
"# atomic down operation\n\t"
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
+ might_sleep();
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
void __might_sleep(char *file, int line);
#define might_sleep() __might_sleep(__FILE__, __LINE__)
+#define might_sleep_if(cond) do { if (unlikely(cond)) might_sleep(); } while (0)
#else
#define might_sleep() do {} while(0)
+#define might_sleep_if(cond) do {} while (0)
#endif
extern struct notifier_block *panic_notifier_list;
*/
static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
{
+ might_sleep_if(pri & __GFP_WAIT);
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, pri);
kfree_skb(skb);
*/
static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
{
+ might_sleep_if(pri & __GFP_WAIT);
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, pri);
kfree_skb(skb); /* Free our shared copy */
int do_retry;
struct reclaim_state reclaim_state;
- if (wait)
- might_sleep();
+ might_sleep_if(wait);
cold = 0;
if (gfp_mask & __GFP_COLD)
struct pte_chain *ret;
struct pte_chain **pte_chainp;
- if (gfp_flags & __GFP_WAIT)
- might_sleep();
+ might_sleep_if(gfp_flags & __GFP_WAIT);
pte_chainp = &get_cpu_var(local_pte_chain);
if (*pte_chainp) {
static inline void
cache_alloc_debugcheck_before(kmem_cache_t *cachep, int flags)
{
- if (flags & __GFP_WAIT)
- might_sleep();
+ might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
kmem_flagcheck(cachep, flags);
#endif