From 83cccd0ebc57f310f9fed36c3bc8d97baf20b91c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 23 Aug 2002 17:56:54 +0200 Subject: [PATCH] Cleanup BKL handling and move kernel_flag definition to common code --- arch/alpha/kernel/alpha_ksyms.c | 2 - arch/alpha/kernel/smp.c | 2 - arch/arm/kernel/armksyms.c | 4 -- arch/arm/kernel/setup.c | 4 -- arch/i386/kernel/i386_ksyms.c | 1 - arch/i386/kernel/smp.c | 4 +- arch/ia64/kernel/ia64_ksyms.c | 4 -- arch/ia64/kernel/smp.c | 8 ---- arch/mips/kernel/smp.c | 1 - arch/mips64/kernel/smp.c | 1 - arch/parisc/kernel/parisc_ksyms.c | 3 -- arch/ppc/kernel/ppc_ksyms.c | 3 -- arch/ppc/kernel/smp.c | 1 - arch/ppc64/kernel/ppc_ksyms.c | 1 - arch/ppc64/kernel/smp.c | 1 - arch/s390/kernel/smp.c | 3 -- arch/s390x/kernel/smp.c | 3 -- arch/sparc/kernel/smp.c | 3 -- arch/sparc/kernel/sparc_ksyms.c | 7 ---- arch/sparc64/kernel/smp.c | 3 -- arch/sparc64/kernel/sparc64_ksyms.c | 8 +--- arch/x86_64/kernel/smp.c | 3 -- arch/x86_64/kernel/x8664_ksyms.c | 1 - include/linux/smp_lock.h | 61 +++++++++++++---------------- kernel/ksyms.c | 4 ++ kernel/sched.c | 15 ++++++- 26 files changed, 48 insertions(+), 103 deletions(-) diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c index c8a663867540..f9797ec48684 100644 --- a/arch/alpha/kernel/alpha_ksyms.c +++ b/arch/alpha/kernel/alpha_ksyms.c @@ -40,7 +40,6 @@ extern struct hwrpb_struct *hwrpb; extern void dump_thread(struct pt_regs *, struct user *); extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); -extern spinlock_t kernel_flag; extern spinlock_t rtc_lock; /* these are C runtime functions with special calling conventions: */ @@ -207,7 +206,6 @@ EXPORT_SYMBOL(up); */ #ifdef CONFIG_SMP -EXPORT_SYMBOL(kernel_flag); EXPORT_SYMBOL(synchronize_irq); EXPORT_SYMBOL(flush_tlb_all); EXPORT_SYMBOL(flush_tlb_mm); diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index eb8ff9eec6d9..b328c280aa95 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -67,8 +67,6 @@ enum ipi_message_type { IPI_CPU_STOP, }; -spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; - /* Set to a secondary's cpuid when it comes online. */ static int smp_secondary_alive __initdata = 0; diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 8138a71d3626..9d287938c27e 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -273,7 +273,3 @@ EXPORT_SYMBOL_NOVERS(__down_trylock_failed); EXPORT_SYMBOL_NOVERS(__up_wakeup); EXPORT_SYMBOL(get_wchan); - -#ifdef CONFIG_PREEMPT -EXPORT_SYMBOL(kernel_flag); -#endif diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index f87230487d6b..4baecfe247e5 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -36,10 +36,6 @@ #define MEM_SIZE (16*1024*1024) #endif -#ifdef CONFIG_PREEMPT -spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; -#endif - #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) char fpe_type[8]; diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c index d31b096b4da7..2879def67aa1 100644 --- a/arch/i386/kernel/i386_ksyms.c +++ b/arch/i386/kernel/i386_ksyms.c @@ -126,7 +126,6 @@ EXPORT_SYMBOL(mmx_copy_page); #ifdef CONFIG_SMP EXPORT_SYMBOL(cpu_data); -EXPORT_SYMBOL(kernel_flag); EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL_NOVERS(__write_lock_failed); EXPORT_SYMBOL_NOVERS(__read_lock_failed); diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index 19000818cf2b..0d463df558c3 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -103,9 +104,6 @@ * about nothing of note with C stepping upwards. */ -/* The 'big kernel lock' */ -spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; - struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0, }}; /* diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index d7e8085dcaad..67a3c4c86c2c 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c @@ -84,10 +84,6 @@ EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function_single); EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL(ia64_cpu_to_sapicid); - -#include -EXPORT_SYMBOL(kernel_flag); - #else /* !CONFIG_SMP */ EXPORT_SYMBOL(__flush_tlb_all); diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 7bde7122fdc2..0344a6d519e8 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -52,14 +52,6 @@ #include #include -/* - * The Big Kernel Lock. It's not supposed to be used for performance critical stuff - * anymore. But we still need to align it because certain workloads are still affected by - * it. For example, llseek() and various other filesystem related routines still use the - * BKL. - */ -spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED; - /* * Structure and data for smp_call_function(). This is designed to minimise static memory * requirements. It also looks cleaner. diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 45bff0a5ae82..f766ead50818 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -53,7 +53,6 @@ /* Ze Big Kernel Lock! */ -spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; int smp_threads_ready; /* Not used */ int smp_num_cpus; int global_irq_holder = NO_PROC_ID; diff --git a/arch/mips64/kernel/smp.c b/arch/mips64/kernel/smp.c index 6d1855c8d9ae..4494b7d96ca6 100644 --- a/arch/mips64/kernel/smp.c +++ b/arch/mips64/kernel/smp.c @@ -53,7 +53,6 @@ static void sendintr(int destid, unsigned char status) #endif /* CONFIG_SGI_IP27 */ /* The 'big kernel lock' */ -spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; int smp_threads_ready; /* Not used */ atomic_t smp_commenced = ATOMIC_INIT(0); struct cpuinfo_mips cpu_data[NR_CPUS]; diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 111a2fb5a467..8d40cada7361 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c @@ -35,9 +35,6 @@ EXPORT_SYMBOL(boot_cpu_data); #ifdef CONFIG_SMP EXPORT_SYMBOL(synchronize_irq); -#include -EXPORT_SYMBOL(kernel_flag); - #include EXPORT_SYMBOL(__global_sti); EXPORT_SYMBOL(__global_cli); diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c index 95b915ed328f..a1fc5194a0b2 100644 --- a/arch/ppc/kernel/ppc_ksyms.c +++ b/arch/ppc/kernel/ppc_ksyms.c @@ -93,9 +93,6 @@ EXPORT_SYMBOL(enable_irq); EXPORT_SYMBOL(disable_irq); EXPORT_SYMBOL(disable_irq_nosync); EXPORT_SYMBOL(probe_irq_mask); -#ifdef CONFIG_SMP -EXPORT_SYMBOL(kernel_flag); -#endif /* CONFIG_SMP */ EXPORT_SYMBOL(ISA_DMA_THRESHOLD); EXPORT_SYMBOL_NOVERS(DMA_MODE_READ); diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c index fd4970b2f599..efc7b9724bb9 100644 --- a/arch/ppc/kernel/smp.c +++ b/arch/ppc/kernel/smp.c @@ -47,7 +47,6 @@ struct cpuinfo_PPC cpu_data[NR_CPUS]; struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 }; atomic_t ipi_recv; atomic_t ipi_sent; -spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; unsigned int prof_multiplier[NR_CPUS] = { [1 ... NR_CPUS-1] = 1 }; unsigned int prof_counter[NR_CPUS] = { [1 ... NR_CPUS-1] = 1 }; unsigned long cache_decay_ticks = HZ/100; diff --git a/arch/ppc64/kernel/ppc_ksyms.c b/arch/ppc64/kernel/ppc_ksyms.c index 953959974eac..4ed07f854e62 100644 --- a/arch/ppc64/kernel/ppc_ksyms.c +++ b/arch/ppc64/kernel/ppc_ksyms.c @@ -74,7 +74,6 @@ EXPORT_SYMBOL(disable_irq); EXPORT_SYMBOL(disable_irq_nosync); #ifdef CONFIG_SMP EXPORT_SYMBOL(synchronize_irq); -EXPORT_SYMBOL(kernel_flag); #endif /* CONFIG_SMP */ EXPORT_SYMBOL(register_ioctl32_conversion); diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c index d5a531bd0d88..3e602ba91951 100644 --- a/arch/ppc64/kernel/smp.c +++ b/arch/ppc64/kernel/smp.c @@ -51,7 +51,6 @@ #include int smp_threads_ready = 0; -spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED; unsigned long cache_decay_ticks; /* initialised so it doesnt end up in bss */ diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index b460bb23ed6c..6c41cc6c7914 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -54,8 +54,6 @@ cycles_t cacheflush_time=0; int smp_threads_ready=0; /* Set when the idlers are all forked. */ static atomic_t smp_commenced = ATOMIC_INIT(0); -spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; - volatile unsigned long phys_cpu_present_map; volatile unsigned long cpu_online_map; unsigned long cache_decay_ticks = 0; @@ -634,7 +632,6 @@ int setup_profiling_timer(unsigned int multiplier) } EXPORT_SYMBOL(lowcore_ptr); -EXPORT_SYMBOL(kernel_flag); EXPORT_SYMBOL(smp_ctl_set_bit); EXPORT_SYMBOL(smp_ctl_clear_bit); EXPORT_SYMBOL(smp_num_cpus); diff --git a/arch/s390x/kernel/smp.c b/arch/s390x/kernel/smp.c index c9ff1ff21f43..eba81ceede09 100644 --- a/arch/s390x/kernel/smp.c +++ b/arch/s390x/kernel/smp.c @@ -53,8 +53,6 @@ cycles_t cacheflush_time=0; int smp_threads_ready=0; /* Set when the idlers are all forked. */ static atomic_t smp_commenced = ATOMIC_INIT(0); -spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; - volatile unsigned long phys_cpu_present_map; volatile unsigned long cpu_online_map; unsigned long cache_decay_ticks = 0; @@ -613,7 +611,6 @@ int setup_profiling_timer(unsigned int multiplier) } EXPORT_SYMBOL(lowcore_ptr); -EXPORT_SYMBOL(kernel_flag); EXPORT_SYMBOL(smp_ctl_set_bit); EXPORT_SYMBOL(smp_ctl_clear_bit); EXPORT_SYMBOL(smp_num_cpus); diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c index cb994b3a2520..b4ca31eecb20 100644 --- a/arch/sparc/kernel/smp.c +++ b/arch/sparc/kernel/smp.c @@ -66,9 +66,6 @@ cycles_t cacheflush_time = 0; /* XXX */ * instruction which is much better... */ -/* Kernel spinlock */ -spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; - /* Used to make bitops atomic */ unsigned char bitops_spinlock = 0; diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index 832ad57aec31..1f51d96bbd5a 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c @@ -77,10 +77,6 @@ extern int __divdi3(int, int); extern void dump_thread(struct pt_regs *, struct user *); -#ifdef CONFIG_SMP -extern spinlock_t kernel_flag; -#endif - /* One thing to note is that the way the symbols of the mul/div * support routines are named is a mess, they all start with * a '.' which makes it a bitch to export, here is the trick: @@ -130,9 +126,6 @@ EXPORT_SYMBOL_PRIVATE(_clear_bit); EXPORT_SYMBOL_PRIVATE(_change_bit); #ifdef CONFIG_SMP -/* Kernel wide locking */ -EXPORT_SYMBOL(kernel_flag); - /* IRQ implementation. */ EXPORT_SYMBOL(global_irq_holder); EXPORT_SYMBOL(synchronize_irq); diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 74dc8427eeb1..ebf4cb96cff6 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -46,9 +46,6 @@ cpuinfo_sparc cpu_data[NR_CPUS]; /* Please don't make this stuff initdata!!! --DaveM */ static unsigned char boot_cpu_id; -/* Kernel spinlock */ -spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; - atomic_t sparc64_num_cpus_online = ATOMIC_INIT(0); unsigned long cpu_online_map = 0; atomic_t sparc64_num_cpus_possible = ATOMIC_INIT(0); diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 41208bd8aa82..391c7078a944 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c @@ -101,9 +101,7 @@ extern int __ashrdi3(int, int); extern void dump_thread(struct pt_regs *, struct user *); extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); -#ifdef CONFIG_SMP -extern spinlock_t kernel_flag; -#ifdef CONFIG_DEBUG_SPINLOCK +#if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK) extern void _do_spin_lock (spinlock_t *lock, char *str); extern void _do_spin_unlock (spinlock_t *lock); extern int _spin_trylock (spinlock_t *lock); @@ -112,7 +110,6 @@ extern void _do_read_unlock(rwlock_t *rw, char *str); extern void _do_write_lock(rwlock_t *rw, char *str); extern void _do_write_unlock(rwlock_t *rw); #endif -#endif extern unsigned long phys_base; extern unsigned long pfn_base; @@ -127,9 +124,6 @@ EXPORT_SYMBOL(__write_lock); EXPORT_SYMBOL(__write_unlock); #endif -/* Kernel wide locking */ -EXPORT_SYMBOL(kernel_flag); - /* Hard IRQ locking */ #ifdef CONFIG_SMP EXPORT_SYMBOL(synchronize_irq); diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c index 2078148fe8ce..f10fd18b44a4 100644 --- a/arch/x86_64/kernel/smp.c +++ b/arch/x86_64/kernel/smp.c @@ -22,9 +22,6 @@ #include #include -/* The 'big kernel lock' */ -spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; - /* * the following functions deal with sending IPIs between CPUs. * diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c index 2bbb7d8238b5..956ca21cbc2e 100644 --- a/arch/x86_64/kernel/x8664_ksyms.c +++ b/arch/x86_64/kernel/x8664_ksyms.c @@ -109,7 +109,6 @@ EXPORT_SYMBOL(mmx_copy_page); #ifdef CONFIG_SMP EXPORT_SYMBOL(cpu_data); -EXPORT_SYMBOL(kernel_flag); EXPORT_SYMBOL(smp_num_cpus); EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL_NOVERS(__write_lock_failed); diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index 40f5358fc856..5a0b83a677d9 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h @@ -2,21 +2,10 @@ #define __LINUX_SMPLOCK_H #include - -#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT) - -#define lock_kernel() do { } while(0) -#define unlock_kernel() do { } while(0) -#define release_kernel_lock(task) do { } while(0) -#define reacquire_kernel_lock(task) do { } while(0) -#define kernel_locked() 1 - -#else - -#include -#include #include -#include +#include + +#if CONFIG_SMP || CONFIG_PREEMPT extern spinlock_t kernel_flag; @@ -26,23 +15,22 @@ extern spinlock_t kernel_flag; #define put_kernel_lock() spin_unlock(&kernel_flag) /* - * Release global kernel lock and global interrupt lock + * Release global kernel lock. */ -#define release_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - put_kernel_lock(); \ -} while (0) +static inline void release_kernel_lock(struct task_struct *task) +{ + if (unlikely(task->lock_depth >= 0)) + put_kernel_lock(); +} /* * Re-acquire the kernel lock */ -#define reacquire_kernel_lock(task) \ -do { \ - if (unlikely(task->lock_depth >= 0)) \ - get_kernel_lock(); \ -} while (0) - +static inline void reacquire_kernel_lock(struct task_struct *task) +{ + if (unlikely(task->lock_depth >= 0)) + get_kernel_lock(); +} /* * Getting the big kernel lock. @@ -51,22 +39,29 @@ do { \ * so we only need to worry about other * CPU's. */ -static __inline__ void lock_kernel(void) +static inline void lock_kernel(void) { int depth = current->lock_depth+1; - if (!depth) + if (likely(!depth)) get_kernel_lock(); current->lock_depth = depth; } -static __inline__ void unlock_kernel(void) +static inline void unlock_kernel(void) { - if (current->lock_depth < 0) + if (unlikely(current->lock_depth < 0)) BUG(); - if (--current->lock_depth < 0) + if (likely(--current->lock_depth < 0)) put_kernel_lock(); } -#endif /* CONFIG_SMP */ +#else + +#define lock_kernel() do { } while(0) +#define unlock_kernel() do { } while(0) +#define release_kernel_lock(task) do { } while(0) +#define reacquire_kernel_lock(task) do { } while(0) +#define kernel_locked() 1 -#endif +#endif /* CONFIG_SMP || CONFIG_PREEMPT */ +#endif /* __LINUX_SMPLOCK_H */ diff --git a/kernel/ksyms.c b/kernel/ksyms.c index dff658338907..84b116465785 100644 --- a/kernel/ksyms.c +++ b/kernel/ksyms.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #if defined(CONFIG_PROC_FS) @@ -481,6 +482,9 @@ EXPORT_SYMBOL_GPL(idle_cpu); #if CONFIG_SMP EXPORT_SYMBOL_GPL(set_cpus_allowed); #endif +#if CONFIG_SMP || CONFIG_PREEMPT +EXPORT_SYMBOL(kernel_flag); +#endif EXPORT_SYMBOL(jiffies); EXPORT_SYMBOL(jiffies_64); EXPORT_SYMBOL(xtime); diff --git a/kernel/sched.c b/kernel/sched.c index 0d9e5bdef050..4749298e45bd 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1881,7 +1881,6 @@ void __init init_idle(task_t *idle, int cpu) } #if CONFIG_SMP - /* * This is how migration works: * @@ -2070,6 +2069,20 @@ __init int migration_init(void) #endif +#if CONFIG_SMP || CONFIG_PREEMPT +/* + * The 'big kernel lock' + * + * This spinlock is taken and released recursively by lock_kernel() + * and unlock_kernel(). It is transparently dropped and reaquired + * over schedule(). It is used to protect legacy code that hasn't + * been migrated to a proper locking design yet. + * + * Don't use in new code. + */ +spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; +#endif + extern void init_timervecs(void); extern void timer_bh(void); extern void tqueue_bh(void); -- 2.39.5