if (idx >= __BR_END)
__br_lock_usage_bug();
- read_lock(&__brlock_array[smp_processor_id()][idx]);
+ preempt_disable();
+ _raw_read_lock(&__brlock_array[smp_processor_id()][idx]);
}
static inline void br_read_unlock (enum brlock_indices idx)
if (idx >= __BR_END)
__br_lock_usage_bug();
+ preempt_disable();
ctr = &__brlock_array[smp_processor_id()][idx];
lock = &__br_write_locks[idx].lock;
again:
wmb();
(*ctr)--;
+ preempt_enable();
}
#endif /* __BRLOCK_USE_ATOMICS */
{
if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
unsigned long flags;
- int cpu = smp_processor_id();
+ int cpu;
local_irq_save(flags);
+ cpu = smp_processor_id();
dev->next_sched = softnet_data[cpu].output_queue;
softnet_data[cpu].output_queue = dev;
cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
static inline void dev_kfree_skb_irq(struct sk_buff *skb)
{
if (atomic_dec_and_test(&skb->users)) {
- int cpu =smp_processor_id();
+ int cpu;
unsigned long flags;
local_irq_save(flags);
+ cpu = smp_processor_id();
skb->next = softnet_data[cpu].completion_queue;
softnet_data[cpu].completion_queue = skb;
cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
static inline void __netif_rx_schedule(struct net_device *dev)
{
unsigned long flags;
- int cpu = smp_processor_id();
+ int cpu;
local_irq_save(flags);
+ cpu = smp_processor_id();
dev_hold(dev);
list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list);
if (dev->quota < 0)
{
if (netif_rx_schedule_prep(dev)) {
unsigned long flags;
- int cpu = smp_processor_id();
+ int cpu;
dev->quota += undo;
local_irq_save(flags);
+ cpu = smp_processor_id();
list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list);
__cpu_raise_softirq(cpu, NET_RX_SOFTIRQ);
local_irq_restore(flags);
#define mod_page_state(member, delta) \
do { \
- preempt_disable(); \
- page_states[smp_processor_id()].member += (delta); \
- preempt_enable(); \
+ int cpu = get_cpu(); \
+ page_states[cpu].member += (delta); \
+ put_cpu(); \
} while (0)
#define inc_page_state(member) mod_page_state(member, 1UL)