]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] preempt-safe net/ code
authorRobert Love <rml@tech9.net>
Tue, 28 May 2002 09:22:13 +0000 (02:22 -0700)
committerLinus Torvalds <torvalds@home.transmeta.com>
Tue, 28 May 2002 09:22:13 +0000 (02:22 -0700)
This fixes three locations in net/ where per-CPU data could bite us
under preemption.  This is the result of an audit I did and should
constitute all of the unsafe code in net/.

In net/core/skbuff.c I did not have to introduce any code - just
rearrange the grabbing of smp_processor_id() to be in the interrupt off
region.  Pretty clean fixes.

Note in the future we can use put_cpu() and get_cpu() to grab the CPU#
safely.  I will send a patch to Marcelo so we can have a 2.4 version
(which doesn't do the preempt stuff), too...

net/core/dev.c
net/core/skbuff.c

index 8c340f76aa568f3500c333781866767fcbc9b628..53b2c9c4104a9b015a12adc5e9d926658fe93063 100644 (file)
@@ -1047,9 +1047,15 @@ int dev_queue_xmit(struct sk_buff *skb)
                int cpu = smp_processor_id();
 
                if (dev->xmit_lock_owner != cpu) {
+                       /*
+                        * The spin_lock effectivly does a preempt lock, but 
+                        * we are about to drop that...
+                        */
+                       preempt_disable();
                        spin_unlock(&dev->queue_lock);
                        spin_lock(&dev->xmit_lock);
                        dev->xmit_lock_owner = cpu;
+                       preempt_enable();
 
                        if (!netif_queue_stopped(dev)) {
                                if (netdev_nit)
index 9c2bc53758802aac813c9c471653a3a90298b24a..1c0aacd9d9112c13e1dd18962dd6c7b4ee5d1860 100644 (file)
@@ -111,33 +111,37 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 
 static __inline__ struct sk_buff *skb_head_from_pool(void)
 {
-       struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
+       struct sk_buff_head *list;
+       struct sk_buff *skb = NULL;
+       unsigned long flags;
 
-       if (skb_queue_len(list)) {
-               struct sk_buff *skb;
-               unsigned long flags;
+       local_irq_save(flags);
 
-               local_irq_save(flags);
+       list = &skb_head_pool[smp_processor_id()].list;
+
+       if (skb_queue_len(list))
                skb = __skb_dequeue(list);
-               local_irq_restore(flags);
-               return skb;
-       }
-       return NULL;
+
+       local_irq_restore(flags);
+       return skb;
 }
 
 static __inline__ void skb_head_to_pool(struct sk_buff *skb)
 {
-       struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
+       struct sk_buff_head *list;
+       unsigned long flags;
 
-       if (skb_queue_len(list) < sysctl_hot_list_len) {
-               unsigned long flags;
+       local_irq_save(flags);
+       list = &skb_head_pool[smp_processor_id()].list;
 
-               local_irq_save(flags);
+       if (skb_queue_len(list) < sysctl_hot_list_len) {
                __skb_queue_head(list, skb);
                local_irq_restore(flags);
 
                return;
        }
+
+       local_irq_restore(flags);
        kmem_cache_free(skbuff_head_cache, skb);
 }