]> git.neil.brown.name Git - history.git/commitdiff
Move x86 big-kernel-lock implementation into <linux/smp_lock.h>,
authorLinus Torvalds <torvalds@home.transmeta.com>
Tue, 20 Aug 2002 07:01:08 +0000 (00:01 -0700)
committerLinus Torvalds <torvalds@home.transmeta.com>
Tue, 20 Aug 2002 07:01:08 +0000 (00:01 -0700)
since it was generic.

Remove all architecture-specific <asm/smplock.h> files.

19 files changed:
include/asm-alpha/smplock.h [deleted file]
include/asm-arm/smplock.h [deleted file]
include/asm-cris/smplock.h [deleted file]
include/asm-generic/smplock.h [deleted file]
include/asm-i386/smplock.h [deleted file]
include/asm-ia64/smplock.h [deleted file]
include/asm-m68k/smplock.h [deleted file]
include/asm-mips/smplock.h [deleted file]
include/asm-mips64/smplock.h [deleted file]
include/asm-parisc/smplock.h [deleted file]
include/asm-ppc/smplock.h [deleted file]
include/asm-ppc64/smplock.h [deleted file]
include/asm-s390/smplock.h [deleted file]
include/asm-s390x/smplock.h [deleted file]
include/asm-sh/smplock.h [deleted file]
include/asm-sparc/smplock.h [deleted file]
include/asm-sparc64/smplock.h [deleted file]
include/asm-x86_64/smplock.h [deleted file]
include/linux/smp_lock.h

diff --git a/include/asm-alpha/smplock.h b/include/asm-alpha/smplock.h
deleted file mode 100644 (file)
index cfd3645..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * <asm/smplock.h>
- *
- * Default SMP lock implementation
- */
-
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-extern spinlock_t kernel_flag;
-
-#define kernel_locked()                spin_is_locked(&kernel_flag)
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-static __inline__ void release_kernel_lock(struct task_struct *task)
-{
-       if (unlikely(task->lock_depth >= 0))
-               spin_unlock(&kernel_flag);
-}
-
-/*
- * Re-acquire the kernel lock
- */
-static __inline__ void reacquire_kernel_lock(struct task_struct *task)
-{
-       if (unlikely(task->lock_depth >= 0))
-               spin_lock(&kernel_flag);
-}
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-static __inline__ void lock_kernel(void)
-{
-#ifdef CONFIG_PREEMPT
-       if (current->lock_depth == -1)
-               spin_lock(&kernel_flag);
-       ++current->lock_depth;
-#else
-       if (!++current->lock_depth)
-               spin_lock(&kernel_flag);
-#endif
-}
-
-static __inline__ void unlock_kernel(void)
-{
-       if (--current->lock_depth < 0)
-               spin_unlock(&kernel_flag);
-}
diff --git a/include/asm-arm/smplock.h b/include/asm-arm/smplock.h
deleted file mode 100644 (file)
index 7b70d46..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * <asm/smplock.h>
- *
- * Default SMP lock implementation
- */
-#include <linux/config.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-extern spinlock_t kernel_flag;
-
-#ifdef CONFIG_PREEMPT
-#define kernel_locked()                preempt_get_count()
-#else
-#define kernel_locked()                spin_is_locked(&kernel_flag)
-#endif
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task, cpu)         \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               spin_unlock(&kernel_flag);      \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task)            \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               spin_lock(&kernel_flag);        \
-} while (0)
-
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-static inline void lock_kernel(void)
-{
-#ifdef CONFIG_PREEMPT
-       if (current->lock_depth == -1)
-               spin_lock(&kernel_flag);
-       ++current->lock_depth;
-#else
-       if (!++current->lock_depth)
-               spin_lock(&kernel_flag);
-#endif
-}
-
-static inline void unlock_kernel(void)
-{
-       if (--current->lock_depth < 0)
-               spin_unlock(&kernel_flag);
-}
diff --git a/include/asm-cris/smplock.h b/include/asm-cris/smplock.h
deleted file mode 100644 (file)
index 3985620..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __ASM_CRIS_SMPLOCK_H
-#define __ASM_CRIS_SMPLOCK_H
-
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/config.h>
-
-#ifndef CONFIG_SMP
-
-#define lock_kernel()                           do { } while(0)
-#define unlock_kernel()                         do { } while(0)
-#define release_kernel_lock(task, cpu, depth)   ((depth) = 1)
-#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
-
-#else
-
-#error "We do not support SMP on CRIS"
-
-#endif
-
-#endif
diff --git a/include/asm-generic/smplock.h b/include/asm-generic/smplock.h
deleted file mode 100644 (file)
index f02afc9..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * <asm/smplock.h>
- *
- * Default SMP lock implementation
- */
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-extern spinlock_t kernel_flag;
-
-#define kernel_locked()                spin_is_locked(&kernel_flag)
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task) \
-do { \
-       if (task->lock_depth >= 0) \
-               spin_unlock(&kernel_flag); \
-       local_irq_enable(); \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task) \
-do { \
-       if (task->lock_depth >= 0) \
-               spin_lock(&kernel_flag); \
-} while (0)
-
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-extern __inline__ void lock_kernel(void)
-{
-       if (!++current->lock_depth)
-               spin_lock(&kernel_flag);
-}
-
-extern __inline__ void unlock_kernel(void)
-{
-       if (--current->lock_depth < 0)
-               spin_unlock(&kernel_flag);
-}
diff --git a/include/asm-i386/smplock.h b/include/asm-i386/smplock.h
deleted file mode 100644 (file)
index 2134982..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * <asm/smplock.h>
- *
- * i386 SMP lock implementation
- */
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <asm/current.h>
-
-extern spinlock_t kernel_flag;
-
-#define kernel_locked()                (current->lock_depth >= 0)
-
-#define get_kernel_lock()      spin_lock(&kernel_flag)
-#define put_kernel_lock()      spin_unlock(&kernel_flag)
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task)              \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               put_kernel_lock();              \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task)            \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               get_kernel_lock();              \
-} while (0)
-
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-static __inline__ void lock_kernel(void)
-{
-       int depth = current->lock_depth+1;
-       if (!depth)
-               get_kernel_lock();
-       current->lock_depth = depth;
-}
-
-static __inline__ void unlock_kernel(void)
-{
-       if (current->lock_depth < 0)
-               BUG();
-       if (--current->lock_depth < 0)
-               put_kernel_lock();
-}
diff --git a/include/asm-ia64/smplock.h b/include/asm-ia64/smplock.h
deleted file mode 100644 (file)
index 103185f..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * <asm/smplock.h>
- *
- * Default SMP lock implementation
- */
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-
-#include <asm/current.h>
-#include <asm/hardirq.h>
-
-extern spinlock_t kernel_flag;
-
-#ifdef CONFIG_SMP
-# define kernel_locked()       spin_is_locked(&kernel_flag)
-#else
-# define kernel_locked()       (1)
-#endif
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task)              \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               spin_unlock(&kernel_flag);      \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task)            \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               spin_lock(&kernel_flag);        \
-} while (0)
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-static __inline__ void
-lock_kernel(void)
-{
-       if (!++current->lock_depth)
-               spin_lock(&kernel_flag);
-}
-
-static __inline__ void
-unlock_kernel(void)
-{
-       if (--current->lock_depth < 0)
-               spin_unlock(&kernel_flag);
-}
diff --git a/include/asm-m68k/smplock.h b/include/asm-m68k/smplock.h
deleted file mode 100644 (file)
index 3e98a6a..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * <asm/smplock.h>
- *
- * Default SMP lock implementation
- */
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-extern spinlock_t kernel_flag;
-
-#define kernel_locked()                spin_is_locked(&kernel_flag)
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task, cpu) \
-do { \
-       if (task->lock_depth >= 0) \
-               spin_unlock(&kernel_flag); \
-       release_irqlock(cpu); \
-       local_irq_enable(); \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task) \
-do { \
-       if (task->lock_depth >= 0) \
-               spin_lock(&kernel_flag); \
-} while (0)
-
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-extern __inline__ void lock_kernel(void)
-{
-       if (!++current->lock_depth)
-               spin_lock(&kernel_flag);
-}
-
-extern __inline__ void unlock_kernel(void)
-{
-       if (--current->lock_depth < 0)
-               spin_unlock(&kernel_flag);
-}
diff --git a/include/asm-mips/smplock.h b/include/asm-mips/smplock.h
deleted file mode 100644 (file)
index 43da07e..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/* $Id: smplock.h,v 1.2 1999/10/09 00:01:43 ralf Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Default SMP lock implementation
- */
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-extern spinlock_t kernel_flag;
-
-#define kernel_locked()                spin_is_locked(&kernel_flag)
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task, cpu) \
-do { \
-       if (task->lock_depth >= 0) \
-               spin_unlock(&kernel_flag); \
-       release_irqlock(cpu); \
-       local_irq_enable(); \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task) \
-do { \
-       if (task->lock_depth >= 0) \
-               spin_lock(&kernel_flag); \
-} while (0)
-
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-extern __inline__ void lock_kernel(void)
-{
-       if (!++current->lock_depth)
-               spin_lock(&kernel_flag);
-}
-
-extern __inline__ void unlock_kernel(void)
-{
-       if (--current->lock_depth < 0)
-               spin_unlock(&kernel_flag);
-}
diff --git a/include/asm-mips64/smplock.h b/include/asm-mips64/smplock.h
deleted file mode 100644 (file)
index 68345b0..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * <asm/smplock.h>
- *
- * Default SMP lock implementation
- */
-#ifndef _ASM_SMPLOCK_H
-#define _ASM_SMPLOCK_H
-
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-extern spinlock_t kernel_flag;
-
-#define kernel_locked()                        spin_is_locked(&kernel_flag)
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-static __inline__ void release_kernel_lock(struct task_struct *task, int cpu)
-{
-       if (task->lock_depth >= 0)
-               spin_unlock(&kernel_flag);
-       release_irqlock(cpu);
-       local_irq_enable();
-}
-
-/*
- * Re-acquire the kernel lock
- */
-static __inline__ void reacquire_kernel_lock(struct task_struct *task)
-{
-       if (task->lock_depth >= 0)
-               spin_lock(&kernel_flag);
-}
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-static __inline__ void lock_kernel(void)
-{
-       if (!++current->lock_depth)
-               spin_lock(&kernel_flag);
-}
-
-static __inline__ void unlock_kernel(void)
-{
-       if (--current->lock_depth < 0)
-               spin_unlock(&kernel_flag);
-}
-
-#endif /* _ASM_SMPLOCK_H */
diff --git a/include/asm-parisc/smplock.h b/include/asm-parisc/smplock.h
deleted file mode 100644 (file)
index 06fb015..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * <asm/smplock.h>
- *
- * Default SMP lock implementation
- */
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-extern spinlock_t kernel_flag;
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task, cpu) \
-do { \
-       if (task->lock_depth >= 0) \
-               spin_unlock(&kernel_flag); \
-       release_irqlock(cpu); \
-       local_irq_enable(); \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task) \
-do { \
-       if (task->lock_depth >= 0) \
-               spin_lock(&kernel_flag); \
-} while (0)
-
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-extern __inline__ void lock_kernel(void)
-{
-       if (!++current->lock_depth)
-               spin_lock(&kernel_flag);
-}
-
-extern __inline__ void unlock_kernel(void)
-{
-       if (--current->lock_depth < 0)
-               spin_unlock(&kernel_flag);
-}
diff --git a/include/asm-ppc/smplock.h b/include/asm-ppc/smplock.h
deleted file mode 100644 (file)
index 8e8ec92..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * BK Id: %F% %I% %G% %U% %#%
- */
-/*
- * <asm/smplock.h>
- *
- * Default SMP lock implementation
- */
-#ifdef __KERNEL__
-#ifndef __ASM_SMPLOCK_H__
-#define __ASM_SMPLOCK_H__
-
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-extern spinlock_t kernel_flag;
-
-#ifdef CONFIG_SMP
-#define kernel_locked()                spin_is_locked(&kernel_flag)
-#elif defined(CONFIG_PREEMPT)
-#define kernel_locked()                preempt_count()
-#endif
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task)              \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               spin_unlock(&kernel_flag);      \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task)            \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               spin_lock(&kernel_flag);        \
-} while (0)
-
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-static __inline__ void lock_kernel(void)
-{
-#ifdef CONFIG_PREEMPT
-       if (current->lock_depth == -1)
-               spin_lock(&kernel_flag);
-       ++current->lock_depth;
-#else
-       if (!++current->lock_depth)
-               spin_lock(&kernel_flag);
-#endif /* CONFIG_PREEMPT */
-}
-
-static __inline__ void unlock_kernel(void)
-{
-       if (--current->lock_depth < 0)
-               spin_unlock(&kernel_flag);
-}
-#endif /* __ASM_SMPLOCK_H__ */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/smplock.h b/include/asm-ppc64/smplock.h
deleted file mode 100644 (file)
index 16b0b2f..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * <asm/smplock.h>
- *
- * Default SMP lock implementation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-extern spinlock_t kernel_flag;
-
-#define kernel_locked()                spin_is_locked(&kernel_flag)
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task)              \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               spin_unlock(&kernel_flag);      \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task)            \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               spin_lock(&kernel_flag);        \
-} while (0)
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-static __inline__ void lock_kernel(void)
-{
-       if (!++current->lock_depth)
-               spin_lock(&kernel_flag);
-}
-
-static __inline__ void unlock_kernel(void)
-{
-       if (current->lock_depth < 0)
-               BUG();
-       if (--current->lock_depth < 0)
-               spin_unlock(&kernel_flag);
-}
diff --git a/include/asm-s390/smplock.h b/include/asm-s390/smplock.h
deleted file mode 100644 (file)
index a12df4a..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- *  include/asm-s390/smplock.h
- *
- *  S390 version
- *
- *  Derived from "include/asm-i386/smplock.h"
- */
-
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-extern spinlock_t kernel_flag;
-
-#define kernel_locked()                spin_is_locked(&kernel_flag)
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task, cpu)     \
-do {                                       \
-       if (task->lock_depth >= 0)         \
-               spin_unlock(&kernel_flag); \
-       release_irqlock(cpu);              \
-       local_irq_enable();                           \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task)        \
-do {                                       \
-       if (task->lock_depth >= 0)         \
-               spin_lock(&kernel_flag);   \
-} while (0)
-
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-extern __inline__ void lock_kernel(void)
-{
-        if (!++current->lock_depth)
-                spin_lock(&kernel_flag);
-}
-
-extern __inline__ void unlock_kernel(void)
-{
-        if (--current->lock_depth < 0)
-                spin_unlock(&kernel_flag);
-}
-
diff --git a/include/asm-s390x/smplock.h b/include/asm-s390x/smplock.h
deleted file mode 100644 (file)
index a12df4a..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- *  include/asm-s390/smplock.h
- *
- *  S390 version
- *
- *  Derived from "include/asm-i386/smplock.h"
- */
-
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-extern spinlock_t kernel_flag;
-
-#define kernel_locked()                spin_is_locked(&kernel_flag)
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task, cpu)     \
-do {                                       \
-       if (task->lock_depth >= 0)         \
-               spin_unlock(&kernel_flag); \
-       release_irqlock(cpu);              \
-       local_irq_enable();                           \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task)        \
-do {                                       \
-       if (task->lock_depth >= 0)         \
-               spin_lock(&kernel_flag);   \
-} while (0)
-
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-extern __inline__ void lock_kernel(void)
-{
-        if (!++current->lock_depth)
-                spin_lock(&kernel_flag);
-}
-
-extern __inline__ void unlock_kernel(void)
-{
-        if (--current->lock_depth < 0)
-                spin_unlock(&kernel_flag);
-}
-
diff --git a/include/asm-sh/smplock.h b/include/asm-sh/smplock.h
deleted file mode 100644 (file)
index 3349981..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __ASM_SH_SMPLOCK_H
-#define __ASM_SH_SMPLOCK_H
-
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/config.h>
-
-#ifndef CONFIG_SMP
-
-#define lock_kernel()                          do { } while(0)
-#define unlock_kernel()                                do { } while(0)
-#define release_kernel_lock(task, cpu, depth)  ((depth) = 1)
-#define reacquire_kernel_lock(task, cpu, depth)        do { } while(0)
-
-#else
-#error "We do not support SMP on SH"
-#endif /* CONFIG_SMP */
-
-#endif /* __ASM_SH_SMPLOCK_H */
diff --git a/include/asm-sparc/smplock.h b/include/asm-sparc/smplock.h
deleted file mode 100644 (file)
index bd931bb..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * <asm/smplock.h>
- *
- * Default SMP lock implementation
- */
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-extern spinlock_t kernel_flag;
-
-#define kernel_locked()                        \
-       (spin_is_locked(&kernel_flag) &&\
-        (current->lock_depth >= 0))
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task, cpu)         \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0)) {  \
-               spin_unlock(&kernel_flag);      \
-               release_irqlock(cpu);           \
-               local_irq_enable();                     \
-       }                                       \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task)            \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               spin_lock(&kernel_flag);        \
-} while (0)
-
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-#define lock_kernel()                          \
-do {                                           \
-       if (!++current->lock_depth)             \
-               spin_lock(&kernel_flag);        \
-} while(0)
-
-#define unlock_kernel()                                \
-do {                                           \
-       if (--current->lock_depth < 0)          \
-               spin_unlock(&kernel_flag);      \
-} while(0)
diff --git a/include/asm-sparc64/smplock.h b/include/asm-sparc64/smplock.h
deleted file mode 100644 (file)
index b7edf01..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * <asm/smplock.h>
- *
- * Default SMP lock implementation
- */
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-extern spinlock_t kernel_flag;
-
-#ifdef CONFIG_SMP
-#define kernel_locked()                        \
-       (spin_is_locked(&kernel_flag) &&\
-        (current->lock_depth >= 0))
-#else
-#ifdef CONFIG_PREEMPT
-#define kernel_locked()                        preempt_get_count()
-#else
-#define kernel_locked()                        1
-#endif
-#endif
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task)              \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               spin_unlock(&kernel_flag);      \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task)            \
-do {                                           \
-       if (unlikely(task->lock_depth >= 0))    \
-               spin_lock(&kernel_flag);        \
-} while (0)
-
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-#define lock_kernel()                          \
-do {                                           \
-       if (!++current->lock_depth)             \
-               spin_lock(&kernel_flag);        \
-} while(0)
-
-#define unlock_kernel()                                \
-do {                                           \
-       if (--current->lock_depth < 0)          \
-               spin_unlock(&kernel_flag);      \
-} while(0)
diff --git a/include/asm-x86_64/smplock.h b/include/asm-x86_64/smplock.h
deleted file mode 100644 (file)
index 6c0b652..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * <asm/smplock.h>
- */
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <asm/current.h>
-
-extern spinlock_t kernel_flag;
-
-#ifdef CONFIG_SMP
-#define kernel_locked()                spin_is_locked(&kernel_flag)
-#define check_irq_holder(cpu)  \
-       if (global_irq_holder == (cpu)) \
-                       BUG();                 
-#else
-#ifdef CONFIG_PREEMPT
-#define kernel_locked()                preempt_get_count()
-#define global_irq_holder      0
-#define check_irq_holder(cpu) do {} while(0)
-#else
-#define kernel_locked()                1
-#define check_irq_holder(cpu)  \
-       if (global_irq_holder == (cpu)) \
-                       BUG();                 
-#endif
-#endif
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task, cpu) \
-do { \
-       if (unlikely(task->lock_depth >= 0)) {  \
-               spin_unlock(&kernel_flag); \
-               check_irq_holder(cpu);  \
-       }                                       \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task) \
-do { \
-       if (unlikely(task->lock_depth >= 0))    \
-               spin_lock(&kernel_flag); \
-} while (0)
-
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-extern __inline__ void lock_kernel(void)
-{
-#ifdef CONFIG_PREEMPT
-       if (current->lock_depth == -1)
-               spin_lock(&kernel_flag);
-       ++current->lock_depth;
-#else
-#if 1
-       if (!++current->lock_depth)
-               spin_lock(&kernel_flag);
-#else
-       __asm__ __volatile__(
-               "incl %1\n\t"
-               "jne 9f"
-               spin_lock_string
-               "\n9:"
-               :"=m" (__dummy_lock(&kernel_flag)),
-                "=m" (current->lock_depth));
-#endif
-#endif
-}
-
-extern __inline__ void unlock_kernel(void)
-{
-       if (current->lock_depth < 0)
-               BUG();
-#if 1
-       if (--current->lock_depth < 0)
-               spin_unlock(&kernel_flag);
-#else
-       __asm__ __volatile__(
-               "decl %1\n\t"
-               "jns 9f\n\t"
-               spin_unlock_string
-               "\n9:"
-               :"=m" (__dummy_lock(&kernel_flag)),
-                "=m" (current->lock_depth));
-#endif
-}
index cfb23f363e6110e77ec11dbccae704b28be3c7ed..40f5358fc856220a43988787ef643ff5f518f1ec 100644 (file)
 
 #else
 
-#include <asm/smplock.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <asm/current.h>
+
+extern spinlock_t kernel_flag;
+
+#define kernel_locked()                (current->lock_depth >= 0)
+
+#define get_kernel_lock()      spin_lock(&kernel_flag)
+#define put_kernel_lock()      spin_unlock(&kernel_flag)
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task)              \
+do {                                           \
+       if (unlikely(task->lock_depth >= 0))    \
+               put_kernel_lock();              \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task)            \
+do {                                           \
+       if (unlikely(task->lock_depth >= 0))    \
+               get_kernel_lock();              \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+static __inline__ void lock_kernel(void)
+{
+       int depth = current->lock_depth+1;
+       if (!depth)
+               get_kernel_lock();
+       current->lock_depth = depth;
+}
+
+static __inline__ void unlock_kernel(void)
+{
+       if (current->lock_depth < 0)
+               BUG();
+       if (--current->lock_depth < 0)
+               put_kernel_lock();
+}
 
 #endif /* CONFIG_SMP */