int cpu = smp_processor_id();
if (dev->xmit_lock_owner != cpu) {
+ /*
+ * The spin_lock effectivly does a preempt lock, but
+ * we are about to drop that...
+ */
+ preempt_disable();
spin_unlock(&dev->queue_lock);
spin_lock(&dev->xmit_lock);
dev->xmit_lock_owner = cpu;
+ preempt_enable();
if (!netif_queue_stopped(dev)) {
if (netdev_nit)
static __inline__ struct sk_buff *skb_head_from_pool(void)
{
- struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
+ struct sk_buff_head *list;
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
- if (skb_queue_len(list)) {
- struct sk_buff *skb;
- unsigned long flags;
+ local_irq_save(flags);
- local_irq_save(flags);
+ list = &skb_head_pool[smp_processor_id()].list;
+
+ if (skb_queue_len(list))
skb = __skb_dequeue(list);
- local_irq_restore(flags);
- return skb;
- }
- return NULL;
+
+ local_irq_restore(flags);
+ return skb;
}
static __inline__ void skb_head_to_pool(struct sk_buff *skb)
{
- struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
+ struct sk_buff_head *list;
+ unsigned long flags;
- if (skb_queue_len(list) < sysctl_hot_list_len) {
- unsigned long flags;
+ local_irq_save(flags);
+ list = &skb_head_pool[smp_processor_id()].list;
- local_irq_save(flags);
+ if (skb_queue_len(list) < sysctl_hot_list_len) {
__skb_queue_head(list, skb);
local_irq_restore(flags);
return;
}
+
+ local_irq_restore(flags);
kmem_cache_free(skbuff_head_cache, skb);
}