]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] per-cpu data preempt-safing
authorRobert Love <rml@tech9.net>
Tue, 24 Sep 2002 08:37:38 +0000 (01:37 -0700)
committerLinus Torvalds <torvalds@home.transmeta.com>
Tue, 24 Sep 2002 08:37:38 +0000 (01:37 -0700)
This unsafe access to per-CPU data via reordering of instructions or use
of "get_cpu()".

Before anyone balks at the brlock.h fix, note this was in the
alternative version of the code which is not used by default.

include/linux/brlock.h
include/linux/netdevice.h
include/linux/page-flags.h
mm/highmem.c

index db27bc1830d032c7cc58f62398a2782308b84abe..59880a3f38c6d4987f10860e56401f2262ff76be 100644 (file)
@@ -85,7 +85,8 @@ static inline void br_read_lock (enum brlock_indices idx)
        if (idx >= __BR_END)
                __br_lock_usage_bug();
 
-       read_lock(&__brlock_array[smp_processor_id()][idx]);
+       preempt_disable();
+       _raw_read_lock(&__brlock_array[smp_processor_id()][idx]);
 }
 
 static inline void br_read_unlock (enum brlock_indices idx)
@@ -109,6 +110,7 @@ static inline void br_read_lock (enum brlock_indices idx)
        if (idx >= __BR_END)
                __br_lock_usage_bug();
 
+       preempt_disable();
        ctr = &__brlock_array[smp_processor_id()][idx];
        lock = &__br_write_locks[idx].lock;
 again:
@@ -147,6 +149,7 @@ static inline void br_read_unlock (enum brlock_indices idx)
 
        wmb();
        (*ctr)--;
+       preempt_enable();
 }
 #endif /* __BRLOCK_USE_ATOMICS */
 
index c73a00744bc8d462fec63a4c02a64e2d1d697757..93b4d5f1e64d38cca75495fb0ffbf0c83ee6efa5 100644 (file)
@@ -514,9 +514,10 @@ static inline void __netif_schedule(struct net_device *dev)
 {
        if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
                unsigned long flags;
-               int cpu = smp_processor_id();
+               int cpu;
 
                local_irq_save(flags);
+               cpu = smp_processor_id();
                dev->next_sched = softnet_data[cpu].output_queue;
                softnet_data[cpu].output_queue = dev;
                cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
@@ -563,10 +564,11 @@ static inline int netif_running(struct net_device *dev)
 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
 {
        if (atomic_dec_and_test(&skb->users)) {
-               int cpu =smp_processor_id();
+               int cpu;
                unsigned long flags;
 
                local_irq_save(flags);
+               cpu = smp_processor_id();
                skb->next = softnet_data[cpu].completion_queue;
                softnet_data[cpu].completion_queue = skb;
                cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
@@ -726,9 +728,10 @@ static inline int netif_rx_schedule_prep(struct net_device *dev)
 static inline void __netif_rx_schedule(struct net_device *dev)
 {
        unsigned long flags;
-       int cpu = smp_processor_id();
+       int cpu;
 
        local_irq_save(flags);
+       cpu = smp_processor_id();
        dev_hold(dev);
        list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list);
        if (dev->quota < 0)
@@ -754,11 +757,12 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo)
 {
        if (netif_rx_schedule_prep(dev)) {
                unsigned long flags;
-               int cpu = smp_processor_id();
+               int cpu;
 
                dev->quota += undo;
 
                local_irq_save(flags);
+               cpu = smp_processor_id();
                list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list);
                __cpu_raise_softirq(cpu, NET_RX_SOFTIRQ);
                local_irq_restore(flags);
index 5a4208b4651f746b9e1ea3c09fc800d9d8fb0c7f..0970c101c197f40b5b9fa4eed72314f33cebff63 100644 (file)
@@ -86,9 +86,9 @@ extern void get_page_state(struct page_state *ret);
 
 #define mod_page_state(member, delta)                                  \
        do {                                                            \
-               preempt_disable();                                      \
-               page_states[smp_processor_id()].member += (delta);      \
-               preempt_enable();                                       \
+               int cpu = get_cpu();                                    \
+               page_states[cpu].member += (delta);                     \
+               put_cpu();                                              \
        } while (0)
 
 #define inc_page_state(member) mod_page_state(member, 1UL)
index 4e446a09463ebcf00c25d77320dd23abb1091031..11d599cacbd00c9a188a0b6753fd64dfeb4d5953 100644 (file)
@@ -472,6 +472,7 @@ void check_highmem_ptes(void)
 {
        int idx, type;
 
+       preempt_disable();
        for (type = 0; type < KM_TYPE_NR; type++) {
                idx = type + KM_TYPE_NR*smp_processor_id();
                if (!pte_none(*(kmap_pte-idx))) {
@@ -479,6 +480,7 @@ void check_highmem_ptes(void)
                        BUG();
                }
        }
+       preempt_enable();
 }
 #endif