VERSION = 2
PATCHLEVEL = 1
-SUBLEVEL = 113
+SUBLEVEL = 114
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
/*
* bios32.c - Low-Level PCI Access
*
- * $Id: bios32.c,v 1.42 1998/07/26 09:33:07 mj Exp $
+ * $Id: bios32.c,v 1.43 1998/08/03 15:59:20 mj Exp $
*
* Copyright 1993, 1994 Drew Eckhardt
* Visionary Computing
struct pci_bus *b = &pci_root;
int i;
+ /*
+ * Don't search for peer host bridges if we use config type 2
+ * since it reads bogus values for non-existent busses and
+ * chipsets supporting multiple primary busses use conf1 anyway.
+ */
+ if (access_pci == &pci_direct_conf2)
+ return;
do {
int n = b->subordinate+1;
u16 l;
/*
* Don't enable VGA-compatible cards since they have
* fixed I/O and memory space.
+ *
+ * Don't enabled disabled IDE interfaces either because
+ * some BIOSes may reallocate the same address when they
+ * find that no devices are attached.
*/
- if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
+ if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) &&
+ ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)) {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (has_io && !(cmd & PCI_COMMAND_IO)) {
printk("PCI: Enabling I/O for device %02x:%02x\n",
* patches and reporting/debugging problems patiently!
*/
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/kernel_stat.h>
-#include <linux/delay.h>
-#include <linux/mc146818rtc.h>
-#include <asm/i82489.h>
-#include <linux/smp.h>
#include <linux/smp_lock.h>
-#include <linux/interrupt.h>
#include <linux/init.h>
-#include <asm/pgtable.h>
-#include <asm/bitops.h>
-#include <asm/pgtable.h>
-#include <asm/smp.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include "irq.h"
/*
- * volatile is justified in this case, it might change
- * spontaneously, GCC should not cache it
+ * volatile is justified in this case, IO-APIC register contents
+ * might change spontaneously, GCC should not cache it
*/
#define IO_APIC_BASE ((volatile int *)fix_to_virt(FIX_IO_APIC_BASE))
-enum mp_irq_source_types {
- mp_INT = 0,
- mp_NMI = 1,
- mp_SMI = 2,
- mp_ExtINT = 3
-};
-
-enum ioapic_irq_destination_types {
- dest_Fixed = 0,
- dest_LowestPrio = 1,
- dest_ExtINT = 7
-};
-
/*
* The structure of the IO-APIC:
*/
+
struct IO_APIC_reg_00 {
__u32 __reserved_2 : 24,
ID : 4,
__reserved_1 : 4;
} __attribute__ ((packed));
+/*
+ * # of IRQ routing registers
+ */
+int nr_ioapic_registers = 0;
+
+enum ioapic_irq_destination_types {
+ dest_Fixed = 0,
+ dest_LowestPrio = 1,
+ dest_ExtINT = 7
+};
+
struct IO_APIC_route_entry {
__u32 vector : 8,
delivery_mode : 3, /* 000: FIXED
} __attribute__ ((packed));
-#define UNEXPECTED_IO_APIC() \
- { \
- printk(" WARNING: unexpected IO-APIC, please mail\n"); \
- printk(" to linux-smp@vger.rutgers.edu\n"); \
- }
+/*
+ * MP-BIOS irq configuration table structures:
+ */
+
+enum mp_irq_source_types {
+ mp_INT = 0,
+ mp_NMI = 1,
+ mp_SMI = 2,
+ mp_ExtINT = 3
+};
-int nr_ioapic_registers = 0; /* # of IRQ routing registers */
int mp_irq_entries = 0; /* # of MP IRQ source entries */
struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
/* MP IRQ source entries */
*/
static int irq_2_pin[NR_IRQS];
-unsigned int io_apic_read (unsigned int reg)
+static inline unsigned int io_apic_read(unsigned int reg)
{
*IO_APIC_BASE = reg;
return *(IO_APIC_BASE+4);
}
-void io_apic_write (unsigned int reg, unsigned int value)
+static inline void io_apic_write(unsigned int reg, unsigned int value)
{
*IO_APIC_BASE = reg;
*(IO_APIC_BASE+4) = value;
* We disable IO-APIC IRQs by setting their 'destination CPU mask' to
* zero. Trick, trick.
*/
-void disable_IO_APIC_irq(unsigned int irq)
+static inline void disable_IO_APIC_irq(unsigned int irq)
{
int pin = irq_2_pin[irq];
struct IO_APIC_route_entry entry;
if (pin != -1) {
- *(((int *)&entry)+1) = io_apic_read(0x11+pin*2);
+ *(((int *)&entry) + 1) = io_apic_read(0x11 + pin * 2);
entry.dest.logical.logical_dest = 0x0;
- io_apic_write(0x11+2*pin, *(((int *)&entry)+1));
+ io_apic_write(0x11 + 2 * pin, *(((int *)&entry) + 1));
io_apic_sync();
}
}
-void enable_IO_APIC_irq(unsigned int irq)
+static inline void enable_IO_APIC_irq(unsigned int irq)
{
int pin = irq_2_pin[irq];
struct IO_APIC_route_entry entry;
if (pin != -1) {
- *(((int *)&entry)+1) = io_apic_read(0x11+pin*2);
+ *(((int *)&entry) + 1) = io_apic_read(0x11 + pin * 2);
entry.dest.logical.logical_dest = 0xff;
- io_apic_write(0x11+2*pin, *(((int *)&entry)+1));
+ io_apic_write(0x11 + 2 * pin, *(((int *)&entry) + 1));
}
}
-void mask_IO_APIC_irq(unsigned int irq)
+static inline void mask_IO_APIC_irq(unsigned int irq)
{
int pin = irq_2_pin[irq];
struct IO_APIC_route_entry entry;
if (pin != -1) {
- *(((int *)&entry)+0) = io_apic_read(0x10+pin*2);
+ *(((int *)&entry) + 0) = io_apic_read(0x10 + pin * 2);
entry.mask = 1;
- io_apic_write(0x10+2*pin, *(((int *)&entry)+0));
+ io_apic_write(0x10 + 2 * pin, *(((int *)&entry) + 0));
io_apic_sync();
}
}
-void unmask_IO_APIC_irq(unsigned int irq)
+static inline void unmask_IO_APIC_irq(unsigned int irq)
{
int pin = irq_2_pin[irq];
struct IO_APIC_route_entry entry;
if (pin != -1) {
- *(((int *)&entry)+0) = io_apic_read(0x10+pin*2);
+ *(((int *)&entry) + 0) = io_apic_read(0x10 + pin * 2);
entry.mask = 0;
- io_apic_write(0x10+2*pin, *(((int *)&entry)+0));
+ io_apic_write(0x10 + 2 * pin, *(((int *)&entry) + 0));
}
}
-void clear_IO_APIC_pin (unsigned int pin)
+static void __init clear_IO_APIC_pin(unsigned int pin)
{
struct IO_APIC_route_entry entry;
*/
memset(&entry, 0, sizeof(entry));
entry.mask = 1;
- io_apic_write(0x10+2*pin, *(((int *)&entry)+0));
- io_apic_write(0x11+2*pin, *(((int *)&entry)+1));
+ io_apic_write(0x10 + 2 * pin, *(((int *)&entry) + 0));
+ io_apic_write(0x11 + 2 * pin, *(((int *)&entry) + 1));
}
int pirq_entries [MAX_PIRQS];
int pirqs_enabled;
-__initfunc(void ioapic_pirq_setup(char *str, int *ints))
+void __init ioapic_pirq_setup(char *str, int *ints)
{
int i, max;
- for (i=0; i<MAX_PIRQS; i++)
- pirq_entries[i]=-1;
+ for (i = 0; i < MAX_PIRQS; i++)
+ pirq_entries[i] = -1;
if (!ints) {
- pirqs_enabled=0;
- printk("PIRQ redirection SETUP, trusting MP-BIOS.\n");
+ pirqs_enabled = 0;
+ printk("PIRQ redirection, trusting MP-BIOS.\n");
} else {
- pirqs_enabled=1;
- printk("PIRQ redirection SETUP, working around broken MP-BIOS.\n");
+ pirqs_enabled = 1;
+ printk("PIRQ redirection, working around broken MP-BIOS.\n");
max = MAX_PIRQS;
if (ints[0] < MAX_PIRQS)
max = ints[0];
- for (i=0; i < max; i++) {
+ for (i = 0; i < max; i++) {
printk("... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
/*
* PIRQs are mapped upside down, usually.
*/
- pirq_entries[MAX_PIRQS-i-1]=ints[i+1];
+ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
}
}
}
/*
* Find the IRQ entry number of a certain pin.
*/
-__initfunc(static int find_irq_entry(int pin, int type))
+static int __init find_irq_entry(int pin, int type)
{
int i;
- for (i=0; i<mp_irq_entries; i++)
+ for (i = 0; i < mp_irq_entries; i++)
if ( (mp_irqs[i].mpc_irqtype == type) &&
(mp_irqs[i].mpc_dstirq == pin))
/*
* Find the pin to which IRQ0 (ISA) is connected
*/
-__initfunc(int find_timer_pin (int type))
+static int __init find_timer_pin(int type)
{
int i;
- for (i=0; i<mp_irq_entries; i++) {
+ for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].mpc_srcbus;
if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA) &&
* Find a specific PCI IRQ entry.
* Not an initfunc, possibly needed by modules
*/
-int IO_APIC_get_PCI_irq_vector (int bus, int slot, int pci_pin)
+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pci_pin)
{
int i;
- for (i=0; i<mp_irq_entries; i++) {
+ for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].mpc_srcbus;
if (IO_APIC_IRQ(mp_irqs[i].mpc_dstirq) &&
* to be accepted. Yes, ugh.
*/
-static int MPBIOS_polarity(int idx)
+static int __init MPBIOS_polarity(int idx)
{
int bus = mp_irqs[idx].mpc_srcbus;
int polarity;
return polarity;
}
-
-static int MPBIOS_trigger(int idx)
+static int __init MPBIOS_trigger(int idx)
{
int bus = mp_irqs[idx].mpc_srcbus;
int trigger;
return trigger;
}
-static int trigger_flag_broken (int idx)
+static int __init trigger_flag_broken(int idx)
{
int bus = mp_irqs[idx].mpc_srcbus;
int polarity = MPBIOS_polarity(idx);
return 0;
}
-static int irq_polarity (int idx)
+static inline int irq_polarity(int idx)
{
/*
* There are no known BIOS bugs wrt polarity. yet.
return MPBIOS_polarity(idx);
}
-static int irq_trigger (int idx)
+static inline int irq_trigger(int idx)
{
int trigger = MPBIOS_trigger(idx);
- if (trigger_flag_broken (idx))
+ if (trigger_flag_broken(idx))
trigger = 0;
return trigger;
}
-static int pin_2_irq (int idx, int pin)
+static int __init pin_2_irq(int idx, int pin)
{
int irq;
int bus = mp_irqs[idx].mpc_srcbus;
/*
* PCI IRQ command line redirection. Yes, limits are hardcoded.
*/
- if ((pin>=16) && (pin<=23)) {
+ if ((pin >= 16) && (pin <= 23)) {
if (pirq_entries[pin-16] != -1) {
if (!pirq_entries[pin-16]) {
printk("disabling PIRQ%d\n", pin-16);
return irq;
}
-int IO_APIC_irq_trigger (int irq)
+static inline int IO_APIC_irq_trigger(int irq)
{
int idx, pin;
- for (pin=0; pin<nr_ioapic_registers; pin++) {
+ for (pin = 0; pin < nr_ioapic_registers; pin++) {
idx = find_irq_entry(pin,mp_INT);
if ((idx != -1) && (irq == pin_2_irq(idx,pin)))
- return (irq_trigger(idx));
+ return irq_trigger(idx);
}
/*
* nonexistent IRQs are edge default
return 0;
}
-__initfunc(static int assign_irq_vector(int irq))
+static int __init assign_irq_vector(int irq)
{
static int current_vector = IRQ0_TRAP_VECTOR, offset = 0;
if (IO_APIC_VECTOR(irq) > 0)
return current_vector;
}
-__initfunc(void setup_IO_APIC_irqs (void))
+void __init setup_IO_APIC_irqs(void)
{
struct IO_APIC_route_entry entry;
- int pin, idx, bus, irq, first_notcon=1;
+ int pin, idx, bus, irq, first_notcon = 1;
printk("init IO_APIC IRQs\n");
- for (pin=0; pin<nr_ioapic_registers; pin++) {
+ for (pin = 0; pin < nr_ioapic_registers; pin++) {
/*
* add it to the IO-APIC irq-routing table:
if (idx == -1) {
if (first_notcon) {
printk(" IO-APIC pin %d", pin);
- first_notcon=0;
+ first_notcon = 0;
} else
printk(", %d", pin);
continue;
printk(" not connected.\n");
}
-__initfunc(void setup_IO_APIC_irq_ISA_default (unsigned int irq))
+void __init setup_IO_APIC_irq_ISA_default(unsigned int irq)
{
struct IO_APIC_route_entry entry;
entry.vector = assign_irq_vector(irq);
- entry.polarity=0;
- entry.trigger=0;
+ entry.polarity = 0;
+ entry.trigger = 0;
io_apic_write(0x10+2*irq, *(((int *)&entry)+0));
io_apic_write(0x11+2*irq, *(((int *)&entry)+1));
/*
* Set up a certain pin as ExtINT delivered interrupt
*/
-__initfunc(void setup_ExtINT_pin (unsigned int pin))
+void __init setup_ExtINT_pin(unsigned int pin)
{
struct IO_APIC_route_entry entry;
entry.vector = 0; /* it's ignored */
- entry.polarity=0;
- entry.trigger=0;
+ entry.polarity = 0;
+ entry.trigger = 0;
io_apic_write(0x10+2*pin, *(((int *)&entry)+0));
io_apic_write(0x11+2*pin, *(((int *)&entry)+1));
}
-void print_IO_APIC (void)
+void __init UNEXPECTED_IO_APIC(void)
+{
+ printk(" WARNING: unexpected IO-APIC, please mail\n");
+ printk(" to linux-smp@vger.rutgers.edu\n");
+}
+
+void __init print_IO_APIC(void)
{
int i;
struct IO_APIC_reg_00 reg_00;
printk(" NR Log Phy ");
printk("Mask Trig IRR Pol Stat Dest Deli Vect: \n");
- for (i=0; i<=reg_01.entries; i++) {
+ for (i = 0; i <= reg_01.entries; i++) {
struct IO_APIC_route_entry entry;
*(((int *)&entry)+0) = io_apic_read(0x10+i*2);
}
printk("IRQ to pin mappings:\n");
- for (i=0; i<NR_IRQS; i++)
+ for (i = 0; i < NR_IRQS; i++)
printk("%d->%d ", i, irq_2_pin[i]);
printk("\n");
return;
}
-__initfunc(static void init_sym_mode (void))
+static void __init init_sym_mode(void)
{
int i, pin;
- for (i=0; i<NR_IRQS; i++)
+ for (i = 0; i < NR_IRQS; i++)
irq_2_pin[i] = -1;
if (!pirqs_enabled)
- for (i=0; i<MAX_PIRQS; i++)
- pirq_entries[i]=-1;
+ for (i = 0; i < MAX_PIRQS; i++)
+ pirq_entries[i] =- 1;
printk("enabling symmetric IO mode... ");
/*
* Do not trust the IO-APIC being empty at bootup
*/
- for (pin=0; pin<nr_ioapic_registers; pin++)
- clear_IO_APIC_pin (pin);
+ for (pin = 0; pin < nr_ioapic_registers; pin++)
+ clear_IO_APIC_pin(pin);
}
/*
* Not an initfunc, needed by the reboot code
*/
-void init_pic_mode (void)
+void init_pic_mode(void)
{
printk("disabling symmetric IO mode... ");
- outb_p (0x70, 0x22);
- outb_p (0x00, 0x23);
+ outb_p(0x70, 0x22);
+ outb_p(0x00, 0x23);
printk("...done.\n");
}
char * product_id;
};
-struct ioapic_list_entry ioapic_whitelist [] = {
+struct ioapic_list_entry __initdata ioapic_whitelist [] = {
{ "INTEL " , "PR440FX " },
{ "INTEL " , "82440FX " },
{ 0 , 0 }
};
-struct ioapic_list_entry ioapic_blacklist [] = {
+struct ioapic_list_entry __initdata ioapic_blacklist [] = {
{ "OEM00000" , "PROD00000000" },
{ 0 , 0 }
};
-__initfunc(static int in_ioapic_list (struct ioapic_list_entry * table))
+static int __init in_ioapic_list(struct ioapic_list_entry * table)
{
- for (;table->oem_id; table++)
+ for ( ; table->oem_id ; table++)
if ((!strcmp(table->oem_id,ioapic_OEM_ID)) &&
(!strcmp(table->product_id,ioapic_Product_ID)))
return 1;
return 0;
}
-__initfunc(static int ioapic_whitelisted (void))
+static int __init ioapic_whitelisted(void)
{
/*
* Right now, whitelist everything to see whether the new parsing
#endif
}
-__initfunc(static int ioapic_blacklisted (void))
+static int __init ioapic_blacklisted(void)
{
return in_ioapic_list(ioapic_blacklist);
}
-__initfunc(static void setup_ioapic_id (void))
+static void __init setup_ioapic_id(void)
{
struct IO_APIC_reg_00 reg_00;
panic("could not set ID");
}
-__initfunc(static void construct_default_ISA_mptable (void))
+static void __init construct_default_ISA_mptable(void)
{
- int i, pos=0;
+ int i, pos = 0;
- for (i=0; i<16; i++) {
+ for (i = 0; i < 16; i++) {
if (!IO_APIC_IRQ(i))
continue;
* - if this function detects that timer IRQs are defunct, then we fall
* back to ISA timer IRQs
*/
-__initfunc(static int timer_irq_works (void))
+static int __init timer_irq_works(void)
{
- unsigned int t1=jiffies;
- unsigned long flags;
+ unsigned int t1 = jiffies;
- save_flags(flags);
sti();
-
udelay(10*10000);
if (jiffies-t1>1)
return 0;
}
-#ifdef __SMP__
-
/*
* In the SMP+IOAPIC case it might happen that there are an unspecified
* number of pending IRQ events unhandled. These cases are very rare,
* better to do it this way as thus we do not have to be aware of
* 'pending' interrupts in the IRQ path, except at this point.
*/
-static inline void self_IPI (unsigned int irq)
+static inline void self_IPI(unsigned int irq)
{
irq_desc_t *desc = irq_desc + irq;
irq_exit(cpu, irq);
}
-static void do_level_ioapic_IRQ (unsigned int irq, int cpu,
- struct pt_regs * regs)
+static void do_level_ioapic_IRQ(unsigned int irq, int cpu,
+ struct pt_regs * regs)
{
irq_desc_t *desc = irq_desc + irq;
struct irqaction * action;
disable_level_ioapic_irq
};
-void init_IO_APIC_traps(void)
+static inline void init_IO_APIC_traps(void)
{
int i;
/*
}
}
}
-#endif
/*
* This code may look a bit paranoid, but it's supposed to cooperate with
* is so screwy. Thanks to Brian Perkins for testing/hacking this beast
* fanatically on his truly buggy board.
*/
-__initfunc(static void check_timer (void))
+static inline void check_timer(void)
{
int pin1, pin2;
- pin1 = find_timer_pin (mp_INT);
- pin2 = find_timer_pin (mp_ExtINT);
+ pin1 = find_timer_pin(mp_INT);
+ pin2 = find_timer_pin(mp_ExtINT);
- if (!timer_irq_works ()) {
+ if (!timer_irq_works()) {
if (pin1 != -1)
printk("..MP-BIOS bug: 8254 timer not connected to IO-APIC\n");
printk("...trying to set up timer as ExtINT... ");
if (pin2 != -1) {
printk(".. (found pin %d) ...", pin2);
- setup_ExtINT_pin (pin2);
+ setup_ExtINT_pin(pin2);
make_8259A_irq(0);
}
- if (!timer_irq_works ()) {
+ if (!timer_irq_works()) {
printk(" failed.\n");
printk("...trying to set up timer as BP IRQ...");
/*
* Just in case ...
*/
if (pin1 != -1)
- clear_IO_APIC_pin (pin1);
+ clear_IO_APIC_pin(pin1);
if (pin2 != -1)
- clear_IO_APIC_pin (pin2);
+ clear_IO_APIC_pin(pin2);
make_8259A_irq(0);
- if (!timer_irq_works ()) {
+ if (!timer_irq_works()) {
printk(" failed.\n");
panic("IO-APIC + timer doesn't work!");
}
}
}
-__initfunc(void setup_IO_APIC (void))
+void __init setup_IO_APIC(void)
{
init_sym_mode();
* Set up the IO-APIC IRQ routing table by parsing the MP-BIOS
* mptable:
*/
- setup_IO_APIC_irqs ();
+ setup_IO_APIC_irqs();
init_IRQ_SMP();
check_timer();
void mask_irq(unsigned int irq);
void unmask_irq(unsigned int irq);
-void enable_IO_APIC_irq (unsigned int irq);
-void disable_IO_APIC_irq (unsigned int irq);
-void unmask_IO_APIC_irq (unsigned int irq);
-void mask_IO_APIC_irq (unsigned int irq);
void set_8259A_irq_mask (unsigned int irq);
int i8259A_irq_pending (unsigned int irq);
void ack_APIC_irq (void);
void setup_IO_APIC (void);
-void init_IO_APIC_traps(void);
int IO_APIC_get_PCI_irq_vector (int bus, int slot, int fn);
-int IO_APIC_irq_trigger (int irq);
void make_8259A_irq (unsigned int irq);
void send_IPI (int dest, int vector);
void init_pic_mode (void);
*
* This extra buffer essentially acts to make for less
* "jitter" in the allocations..
+ *
+ * On SMP we don't do this right now because:
+ * - we aren't holding any locks when called, and we might
+ * as well just depend on the generic memory management
+ * to do proper locking for us instead of complicating it
+ * here.
+ * - if you use SMP you have a beefy enough machine that
+ * this shouldn't matter..
*/
+#ifndef __SMP__
#define EXTRA_TASK_STRUCT 16
static struct task_struct * task_struct_stack[EXTRA_TASK_STRUCT];
static int task_struct_stack_ptr = -1;
+#endif
struct task_struct * alloc_task_struct(void)
{
+#ifndef EXTRA_TASK_STRUCT
+ return (struct task_struct *) __get_free_pages(GFP_KERNEL,1);
+#else
int index;
struct task_struct *ret;
}
}
return ret;
+#endif
}
void free_task_struct(struct task_struct *p)
{
+#ifdef EXTRA_TASK_STRUCT
int index = task_struct_stack_ptr+1;
if (index < EXTRA_TASK_STRUCT) {
task_struct_stack[index] = p;
task_struct_stack_ptr = index;
} else
+#endif
free_pages((unsigned long) p, 1);
}
unsigned long *stack, addr, module_start, module_end;
extern char _stext, _etext;
- esp = (unsigned long) ®s->esp;
+ esp = (unsigned long) (1+regs);
ss = __KERNEL_DS;
if (regs->xcs & 3) {
in_kernel = 0;
printk("\nCode: ");
for(i=0;i<20;i++)
printk("%02x ", ((unsigned char *)regs->eip)[i]);
- printk("\n");
}
+ printk("\n");
}
spinlock_t die_lock;
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/uaccess.h>
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/zorro.h>
** Created: 12/10/97 by Alain Malek
*/
-#include <linux/config.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <asm/amigayle.h>
if (result <= 0) {
#ifdef PARANOIA
- printk(KERN_ERR "NBD: %s - sock=%d at buf=%d, size=%d returned %d.\n",
- send ? "send" : "receive", (int) sock, (int) buf, size, result);
+ printk(KERN_ERR "NBD: %s - sock=%ld at buf=%ld, size=%d returned %d.\n",
+ send ? "send" : "receive", (long) sock, (long) buf, size, result);
#endif
break;
}
return 0;
#ifdef PARANOIA
case NBD_PRINT_DEBUG:
- printk(KERN_INFO "NBD device %d: head = %x, tail = %x. Global: in %d, out %d\n",
- dev, (int) lo->head, (int) lo->tail, requests_in, requests_out);
+ printk(KERN_INFO "NBD device %d: head = %lx, tail = %lx. Global: in %d, out %d\n",
+ dev, (long) lo->head, (long) lo->tail, requests_in, requests_out);
return 0;
#endif
}
#include <linux/module.h>
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/blk.h>
#include <linux/sched.h>
#include <linux/version.h>
-#include <linux/config.h>
#include <linux/zorro.h>
#include <asm/setup.h>
nlm_shutdown_hosts();
nlmsvc_pid = 0;
} else
- printk("lockd: new process, skipping host shutdown\n");
+ printk(KERN_DEBUG
+ "lockd: new process, skipping host shutdown\n");
wake_up(&lockd_exit);
/* Exit the RPC thread */
int
lockd_up(void)
{
+ static int warned = 0;
struct svc_serv * serv;
int error = 0;
* we should be the first user ...
*/
if (nlmsvc_users > 1)
- printk("lockd_up: no pid, %d users??\n", nlmsvc_users);
+ printk(KERN_WARNING
+ "lockd_up: no pid, %d users??\n", nlmsvc_users);
error = -ENOMEM;
serv = svc_create(&nlmsvc_program, 0, NLMSVC_XDRSIZE);
if (!serv) {
- printk("lockd_up: create service failed\n");
+ printk(KERN_WARNING "lockd_up: create service failed\n");
goto out;
}
if ((error = svc_makesock(serv, IPPROTO_UDP, 0)) < 0
|| (error = svc_makesock(serv, IPPROTO_TCP, 0)) < 0) {
- printk("lockd_up: makesock failed, error=%d\n", error);
+ if (warned++ == 0)
+ printk(KERN_WARNING
+ "lockd_up: makesock failed, error=%d\n", error);
goto destroy_and_out;
- }
+ }
+ warned = 0;
/*
* Create the kernel thread and wait for it to start.
*/
error = svc_create_thread(lockd, serv);
if (error) {
- printk("lockd_up: create thread failed, error=%d\n", error);
+ printk(KERN_WARNING
+ "lockd_up: create thread failed, error=%d\n", error);
goto destroy_and_out;
}
sleep_on(&lockd_start);
void
lockd_down(void)
{
+ static int warned = 0;
+
down(&nlmsvc_sema);
if (nlmsvc_users) {
if (--nlmsvc_users)
goto out;
} else
- printk("lockd_down: no users! pid=%d\n", nlmsvc_pid);
+ printk(KERN_WARNING "lockd_down: no users! pid=%d\n", nlmsvc_pid);
if (!nlmsvc_pid) {
- printk("lockd_down: nothing to do!\n");
+ if (warned++ == 0)
+ printk(KERN_WARNING "lockd_down: no lockd running.\n");
goto out;
}
+ warned = 0;
kill_proc(nlmsvc_pid, SIGKILL, 1);
/*
interruptible_sleep_on(&lockd_exit);
current->timeout = 0;
if (nlmsvc_pid) {
- printk("lockd_down: lockd failed to exit, clearing pid\n");
+ printk(KERN_WARNING
+ "lockd_down: lockd failed to exit, clearing pid\n");
nlmsvc_pid = 0;
}
spin_lock_irq(¤t->sigmask_lock);
--- /dev/null
+/*
+ * <asm/smplock.h>
+ *
+ * Default SMP lock implementation
+ */
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
--- /dev/null
+/*
+ * <asm/smplock.h>
+ *
+ * Default SMP lock implementation
+ */
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
--- /dev/null
+/*
+ * <asm/smplock.h>
+ *
+ * Default SMP lock implementation
+ */
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
--- /dev/null
+/*
+ * <asm/smplock.h>
+ *
+ * i386 SMP lock implementation
+ */
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ __asm__ __volatile__(
+ "incl %1\n\t"
+ "jne 9f"
+ spin_lock_string
+ "\n9:"
+ :"=m" (__dummy_lock(&kernel_flag)),
+ "=m" (current->lock_depth));
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ __asm__ __volatile__(
+ "decl %1\n\t"
+ "jns 9f\n"
+ spin_unlock_string
+ "\n9:"
+ :"=m" (__dummy_lock(&kernel_flag)),
+ "=m" (current->lock_depth));
+}
typedef struct { unsigned long a[100]; } __dummy_lock_t;
#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
-#define spin_lock(lock) \
-__asm__ __volatile__( \
+#define spin_lock_string \
"\n1:\t" \
"lock ; btsl $0,%0\n\t" \
"jc 2f\n" \
"testb $1,%0\n\t" \
"jne 2b\n\t" \
"jmp 1b\n" \
- ".previous" \
+ ".previous"
+
+#define spin_unlock_string \
+ "lock ; btrl $0,%0"
+
+#define spin_lock(lock) \
+__asm__ __volatile__( \
+ spin_lock_string \
:"=m" (__dummy_lock(lock)))
#define spin_unlock(lock) \
__asm__ __volatile__( \
- "lock ; btrl $0,%0" \
+ spin_unlock_string \
:"=m" (__dummy_lock(lock)))
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
--- /dev/null
+/*
+ * <asm/smplock.h>
+ *
+ * Default SMP lock implementation
+ */
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
--- /dev/null
+/*
+ * <asm/smplock.h>
+ *
+ * Default SMP lock implementation
+ */
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
--- /dev/null
+/*
+ * <asm/smplock.h>
+ *
+ * Default SMP lock implementation
+ */
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
--- /dev/null
+/*
+ * <asm/smplock.h>
+ *
+ * Default SMP lock implementation
+ */
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
--- /dev/null
+/*
+ * <asm/smplock.h>
+ *
+ * Default SMP lock implementation
+ */
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
/* various fields */
long counter;
long priority;
- struct linux_binfmt *binfmt;
+/* SMP and runqueue state */
+ int has_cpu;
+ int processor;
+ int last_processor;
+ int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
struct task_struct *next_task, *prev_task;
struct task_struct *next_run, *prev_run;
+
+/* task state */
+ struct linux_binfmt *binfmt;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
/* ??? */
/* memory management info */
struct mm_struct *mm;
/* signal handlers */
+ spinlock_t sigmask_lock; /* Protects signal and blocked */
struct signal_struct *sig;
sigset_t signal, blocked;
struct signal_queue *sigqueue, **sigqueue_tail;
unsigned long sas_ss_sp;
size_t sas_ss_size;
-/* SMP state */
- int has_cpu;
- int processor;
- int last_processor;
- int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
- /* Spinlocks for various pieces or per-task state. */
- spinlock_t sigmask_lock; /* Protects signal and blocked */
};
/*
#define INIT_TASK \
/* state etc */ { 0,0,0,KERNEL_DS,&default_exec_domain,0, \
/* counter */ DEF_PRIORITY,DEF_PRIORITY, \
-/* binfmt */ NULL, \
+/* SMP */ 0,0,0,-1, \
/* schedlink */ &init_task,&init_task, &init_task, &init_task, \
+/* binfmt */ NULL, \
/* ec,brk... */ 0,0,0,0,0,0, \
/* pid etc.. */ 0,0,0,0,0, \
/* proc links*/ &init_task,&init_task,NULL,NULL,NULL, \
/* fs */ &init_fs, \
/* files */ &init_files, \
/* mm */ &init_mm, \
-/* signals */ &init_signals, {{0}}, {{0}}, NULL, &init_task.sigqueue, \
- 0, 0, \
-/* SMP */ 0,0,0,0, \
-/* locks */ INIT_LOCKS \
+/* signals */ INIT_LOCKS, &init_signals, {{0}}, {{0}}, NULL, &init_task.sigqueue, 0, 0, \
}
union task_union {
#else
-#include <linux/interrupt.h>
-#include <asm/spinlock.h>
-
-extern spinlock_t kernel_flag;
-
-/*
- * Release global kernel lock and global interrupt lock
- */
-#define release_kernel_lock(task, cpu) \
-do { \
- if (task->lock_depth) \
- spin_unlock(&kernel_flag); \
- release_irqlock(cpu); \
- __sti(); \
-} while (0)
-
-/*
- * Re-acquire the kernel lock
- */
-#define reacquire_kernel_lock(task) \
-do { \
- if (task->lock_depth) \
- spin_lock(&kernel_flag); \
-} while (0)
-
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously,
- * so we only need to worry about other
- * CPU's.
- */
-extern __inline__ void lock_kernel(void)
-{
- struct task_struct *tsk = current;
- int lock_depth;
-
- lock_depth = tsk->lock_depth;
- tsk->lock_depth = lock_depth+1;
- if (!lock_depth)
- spin_lock(&kernel_flag);
-}
-
-extern __inline__ void unlock_kernel(void)
-{
- struct task_struct *tsk = current;
- int lock_depth;
-
- lock_depth = tsk->lock_depth-1;
- tsk->lock_depth = lock_depth;
- if (!lock_depth)
- spin_unlock(&kernel_flag);
-}
+#include <asm/smplock.h>
#endif /* __SMP__ */
{
if (p != current) {
#ifdef __SMP__
- /* FIXME! Cheesy, but kills the window... -DaveM */
- do {
- barrier();
- } while (p->has_cpu);
- spin_unlock_wait(&scheduler_lock);
+ /*
+ * Wait to make sure the process isn't active on any
+ * other CPU
+ */
+ for (;;) {
+ int has_cpu;
+ spin_lock(&scheduler_lock);
+ has_cpu = p->has_cpu;
+ spin_unlock(&scheduler_lock);
+ if (!has_cpu)
+ break;
+ do {
+ barrier();
+ } while (p->has_cpu);
+ }
#endif
charge_uid(p, -1);
nr_tasks--;
add_free_taskslot(p->tarray_ptr);
- {
- write_lock_irq(&tasklist_lock);
- unhash_pid(p);
- REMOVE_LINKS(p);
- write_unlock_irq(&tasklist_lock);
- }
+
+ write_lock_irq(&tasklist_lock);
+ unhash_pid(p);
+ REMOVE_LINKS(p);
+ write_unlock_irq(&tasklist_lock);
+
release_thread(p);
current->cmin_flt += p->min_flt + p->cmin_flt;
current->cmaj_flt += p->maj_flt + p->cmaj_flt;
NORET_TYPE void do_exit(long code)
{
+ struct task_struct *tsk = current;
+
if (in_interrupt())
printk("Aiee, killing interrupt handler\n");
- if (current == task[0])
+ if (!tsk->pid)
panic("Attempted to kill the idle task!");
+ tsk->flags |= PF_EXITING;
+ del_timer(&tsk->real_timer);
+
+ lock_kernel();
fake_volatile:
- current->flags |= PF_EXITING;
#ifdef CONFIG_BSD_PROCESS_ACCT
acct_process(code);
#endif
- del_timer(¤t->real_timer);
sem_exit();
- __exit_mm(current);
+ __exit_mm(tsk);
#if CONFIG_AP1000
- exit_msc(current);
+ exit_msc(tsk);
#endif
- __exit_files(current);
- __exit_fs(current);
- __exit_sighand(current);
+ __exit_files(tsk);
+ __exit_fs(tsk);
+ __exit_sighand(tsk);
exit_thread();
- current->state = TASK_ZOMBIE;
- current->exit_code = code;
+ tsk->state = TASK_ZOMBIE;
+ tsk->exit_code = code;
exit_notify();
#ifdef DEBUG_PROC_TREE
audit_ptree();
#endif
- if (current->exec_domain && current->exec_domain->module)
- __MOD_DEC_USE_COUNT(current->exec_domain->module);
- if (current->binfmt && current->binfmt->module)
- __MOD_DEC_USE_COUNT(current->binfmt->module);
+ if (tsk->exec_domain && tsk->exec_domain->module)
+ __MOD_DEC_USE_COUNT(tsk->exec_domain->module);
+ if (tsk->binfmt && tsk->binfmt->module)
+ __MOD_DEC_USE_COUNT(tsk->binfmt->module);
schedule();
/*
* In order to get rid of the "volatile function does return" message
asmlinkage int sys_exit(int error_code)
{
- lock_kernel();
do_exit((error_code&0xff)<<8);
- unlock_kernel();
}
asmlinkage int sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru)
int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
{
int nr;
- int error = -ENOMEM;
+ int retval = -ENOMEM;
struct task_struct *p;
down(¤t->mm->mmap_sem);
if (!p)
goto bad_fork;
- error = -EAGAIN;
+ retval = -EAGAIN;
nr = find_empty_process();
if (nr < 0)
goto bad_fork_free;
copy_flags(clone_flags, p);
p->pid = get_pid(clone_flags);
- p->next_run = NULL;
- p->prev_run = NULL;
+ /*
+ * This is a "shadow run" state. The process
+ * is marked runnable, but isn't actually on
+ * any run queue yet.. (that happens at the
+ * very end).
+ */
+ p->state = TASK_RUNNING;
+ p->next_run = p;
+ p->prev_run = p;
+
p->p_pptr = p->p_opptr = current;
p->p_cptr = NULL;
init_waitqueue(&p->wait_chldexit);
spin_lock_init(&p->sigmask_lock);
}
#endif
- p->lock_depth = 0;
+ p->lock_depth = -1; /* -1 = no lock */
p->start_time = jiffies;
p->tarray_ptr = &task[nr];
*p->tarray_ptr = p;
{
+ /* This makes it visible to the rest of the system */
unsigned long flags;
write_lock_irqsave(&tasklist_lock, flags);
SET_LINKS(p);
nr_tasks++;
- error = -ENOMEM;
+ retval = -ENOMEM;
/* copy all the process information */
if (copy_files(clone_flags, p))
goto bad_fork_cleanup;
goto bad_fork_cleanup_fs;
if (copy_mm(nr, clone_flags, p))
goto bad_fork_cleanup_sighand;
- error = copy_thread(nr, clone_flags, usp, p, regs);
- if (error)
+ retval = copy_thread(nr, clone_flags, usp, p, regs);
+ if (retval)
goto bad_fork_cleanup_sighand;
p->semundo = NULL;
current->counter >>= 1;
p->counter = current->counter;
- if(p->pid) {
- wake_up_process(p); /* do this last, just in case */
- } else {
- p->state = TASK_RUNNING;
- p->next_run = p->prev_run = p;
+ /* Ok, add it to the run-queues, let it rip! */
+ retval = p->pid;
+ if (retval) {
+ p->next_run = NULL;
+ p->prev_run = NULL;
+ wake_up_process(p); /* do this last */
}
++total_forks;
- error = p->pid;
bad_fork:
up(¤t->mm->mmap_sem);
unlock_kernel();
- return error;
+ return retval;
bad_fork_cleanup_sighand:
exit_sighand(p);
current->need_resched = 1;
}
-
+/*
+ * Careful!
+ *
+ * This has to add the process to the _beginning_ of the
+ * run-queue, not the end. See the comment about "This is
+ * subtle" in the scheduler proper..
+ */
static inline void add_to_runqueue(struct task_struct * p)
{
- nr_running++;
- reschedule_idle(p);
- (p->prev_run = init_task.prev_run)->next_run = p;
- p->next_run = &init_task;
- init_task.prev_run = p;
+ struct task_struct *next = init_task.next_run;
+
+ p->prev_run = &init_task;
+ init_task.next_run = p;
+ p->next_run = next;
+ next->prev_run = p;
}
static inline void del_from_runqueue(struct task_struct * p)
spin_lock_irqsave(&runqueue_lock, flags);
p->state = TASK_RUNNING;
- if (!p->next_run)
+ if (!p->next_run) {
add_to_runqueue(p);
+ reschedule_idle(p);
+ nr_running++;
+ }
spin_unlock_irqrestore(&runqueue_lock, flags);
}
ret = detach_timer(timer);
timer->next = timer->prev = 0;
spin_unlock_irqrestore(&timerlist_lock, flags);
+
+ /* Make sure the timer isn't running in parallell.. */
+ synchronize_bh();
return ret;
}
/*
* We play safe to avoid deadlocks.
*/
- spin_lock_irq(&scheduler_lock);
- spin_lock(&runqueue_lock);
+ spin_lock(&scheduler_lock);
+ spin_lock_irq(&runqueue_lock);
read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
out_unlock:
read_unlock(&tasklist_lock);
- spin_unlock(&runqueue_lock);
- spin_unlock_irq(&scheduler_lock);
+ spin_unlock_irq(&runqueue_lock);
+ spin_unlock(&scheduler_lock);
out_nounlock:
return retval;
else
printk(" %016lx ", thread_saved_pc(&p->tss));
#endif
-#if 0
- for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
- if (((unsigned long *)p->kernel_stack_page)[free])
- break;
+ {
+ unsigned long * n = (unsigned long *) (p+1);
+ while (!*n)
+ n++;
+ free = (unsigned long) n - (unsigned long)(p+1);
}
-#endif
- printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
+ printk("%5lu %5d %6d ", free, p->pid, p->p_pptr->pid);
if (p->p_cptr)
printk("%5d ", p->p_cptr->pid);
else