# Maintained by Axel Boldt (boldt@math.ucsb.edu)
#
# This version of the Linux kernel configuration help texts
-# corresponds to the kernel versions 2.1.x. Be aware that these are
-# development kernels and need not be completely stable.
+# corresponds to the kernel versions 2.2.x.
#
# Translations of this file available on the WWW:
#
This driver is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
- The module will be called aha17400.o. If you want to compile it as a
+ The module will be called aha1740.o. If you want to compile it as a
module, say M here and read Documentation/modules.txt.
Adaptec AIC7xxx chipset SCSI controller support
- real-root-dev ==> Documentation/initrd.txt
- reboot-cmd [ SPARC only ]
- sg-big-buff [ generic SCSI device (sg) ]
+- shmmax [ sysv ipc ]
- version
- zero-paged [ PPC only ]
==============================================================
+shmmax:
+
+This value can be used to query and set the run time limit
+on the maximum shared memory segment size that can be created.
+Shared memory segments up to 1Gb are now supported in the
+kernel. This value defaults to SHMMAX.
+
+==============================================================
+
zero-paged: (PPC only)
When enabled (non-zero), Linux-PPC will pre-zero pages in
VERSION = 2
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION =-pre2
+EXTRAVERSION =-pre3
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
#
# Define implied options from the CPU selection here
#
-if [ "$CONFIG_M386" != "n" ]; then
- define_bool CONFIG_WP_WORKS_OK y
- define_bool CONFIG_INVLPG y
- define_bool CONFIG_BSWAP y
+if [ "$CONFIG_M386" != "y" ]; then
+ define_bool CONFIG_X86_WP_WORKS_OK y
+ define_bool CONFIG_X86_INVLPG y
+ define_bool CONFIG_X86_BSWAP y
+ define_bool CONFIG_X86_POPAD_OK y
fi
if [ "$CONFIG_M686" = "y" -o "$CONFIG_M586TSC" = "y" ]; then
- define_bool CONFIG_TSC y
+ define_bool CONFIG_X86_TSC y
fi
if [ "$CONFIG_M686" = "y" ]; then
- define_bool CONFIG_GOOD_APIC y
+ define_bool CONFIG_X86_GOOD_APIC y
fi
bool 'Math emulation' CONFIG_MATH_EMULATION
char ignore_irq13 = 0; /* set if exception 16 works */
struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
-static char Cx86_step[8]; /* decoded Cyrix step number */
/*
* Bus types ..
#define RAMDISK_PROMPT_FLAG 0x8000
#define RAMDISK_LOAD_FLAG 0x4000
+
static char command_line[COMMAND_LINE_SIZE] = { 0, };
char saved_command_line[COMMAND_LINE_SIZE];
}
#endif
-#define VMALLOC_RESERVE (64 << 20) /* 64MB for vmalloc */
-#define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
-
- if (memory_end > MAXMEM)
- memory_end = MAXMEM;
-
memory_end &= PAGE_MASK;
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
}
*to = '\0';
*cmdline_p = command_line;
+
+#define VMALLOC_RESERVE (64 << 20) /* 64MB for vmalloc */
+#define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
+
+ if (memory_end > MAXMEM)
+ {
+ memory_end = MAXMEM;
+ printk(KERN_WARNING "Warning only %ldMB will be used.\n",
+ MAXMEM>>20);
+ }
+
memory_end += PAGE_OFFSET;
*memory_start_p = memory_start;
*memory_end_p = memory_end;
conswitchp = &dummy_con;
#endif
#endif
+ /*
+ * Check the bugs that will bite us before we get booting
+ */
+
}
__initfunc(static int amd_model(struct cpuinfo_x86 *c))
cpuid(0x80000000, &n, &dummy, &dummy, &dummy);
if (n < 4)
return 0;
+ cpuid(0x80000001, &dummy, &dummy, &dummy, &(c->x86_capability));
v = (unsigned int *) c->x86_model_id;
cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
}
/*
- * Use the Cyrix DEVID CPU registers if avail. to get more detailed info.
+ * Read Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
*/
-__initfunc(static void do_cyrix_devid(struct cpuinfo_x86 *c))
+static inline void do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
{
unsigned char ccr2, ccr3;
getCx86(0xc0); /* dummy */
if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
- c->x86_model = 0xfd;
+ *dir0 = 0xfd;
else { /* Cx486S A step */
setCx86(CX86_CCR2, ccr2);
- c->x86_model = 0xfe;
+ *dir0 = 0xfe;
}
}
else {
setCx86(CX86_CCR3, ccr3); /* restore CCR3 */
/* read DIR0 and DIR1 CPU registers */
- c->x86_model = getCx86(CX86_DIR0);
- c->x86_mask = getCx86(CX86_DIR1);
+ *dir0 = getCx86(CX86_DIR0);
+ *dir1 = getCx86(CX86_DIR1);
}
sti();
}
+/*
+ * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
+ * order to identify the Cyrix CPU model after we're out of setup.c
+ */
+unsigned char Cx86_dir0_msb __initdata = 0;
+
static char Cx86_model[][9] __initdata = {
"Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
"M II ", "Unknown"
static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock";
static char cyrix_model_mult1[] __initdata = "12??43";
static char cyrix_model_mult2[] __initdata = "12233445";
-static char cyrix_model_oldstep[] __initdata = "A step";
__initfunc(static void cyrix_model(struct cpuinfo_x86 *c))
{
- unsigned char dir0_msn, dir0_lsn, dir1;
+ unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
char *buf = c->x86_model_id;
const char *p = NULL;
- do_cyrix_devid(c);
+ do_cyrix_devid(&dir0, &dir1);
- dir0_msn = c->x86_model >> 4;
- dir0_lsn = c->x86_model & 0xf;
- dir1 = c->x86_mask;
+ Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family" */
+ dir0_lsn = dir0 & 0xf; /* model or clock multiplier */
- /* common case stepping number -- exceptions handled below */
- sprintf(Cx86_step, "%d.%d", (dir1 >> 4) + 1, dir1 & 0x0f);
+ /* common case step number/rev -- exceptions handled below */
+ c->x86_model = (dir1 >> 4) + 1;
+ c->x86_mask = dir1 & 0xf;
/* Now cook; the original recipe is by Channing Corn, from Cyrix.
* We do the same thing for each generation: we work out
- * the model, multiplier and stepping.
+ * the model, multiplier and stepping. Black magic included,
+ * to make the silicon step/rev numbers match the printed ones.
*/
+
switch (dir0_msn) {
unsigned char tmp;
if (dir1 > 0x21) { /* 686L */
Cx86_cb[0] = 'L';
p = Cx86_cb;
- Cx86_step[0]++;
+ (c->x86_model)++;
} else /* 686 */
p = Cx86_cb+1;
break;
case 4: /* MediaGX/GXm */
+ /*
+ * Life sometimes gets weiiiiiiiird if we use this
+ * on the MediaGX. So we turn it off for now.
+ */
+
/* GXm supports extended cpuid levels 'ala' AMD */
if (c->cpuid_level == 2) {
amd_model(c); /* get CPU marketing name */
+ c->x86_capability&=~X86_FEATURE_TSC;
return;
}
else { /* MediaGX */
Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
p = Cx86_cb+2;
- Cx86_step[0] = (dir1 & 0x20) ? '1' : '2';
+ c->x86_model = (dir1 & 0x20) ? 1 : 2;
+ c->x86_capability&=~X86_FEATURE_TSC;
}
break;
case 5: /* 6x86MX/M II */
- /* the TSC is broken (for now) */
- c->x86_capability &= ~16;
-
if (dir1 > 7) dir0_msn++; /* M II */
tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
p = Cx86_cb+tmp;
if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
- Cx86_step[0]++;
+ (c->x86_model)++;
break;
- case 0xf: /* Cyrix 486 without DIR registers */
+ case 0xf: /* Cyrix 486 without DEVID registers */
switch (dir0_lsn) {
case 0xd: /* either a 486SLC or DLC w/o DEVID */
dir0_msn = 0;
case 0xe: /* a 486S A step */
dir0_msn = 0;
p = Cx486S_name[0];
- strcpy(Cx86_step, cyrix_model_oldstep);
- c->x86_mask = 1; /* must != 0 to print */
break;
break;
}
+
+ default: /* unknown (shouldn't happen, we know everyone ;-) */
+ dir0_msn = 7;
+ break;
}
strcpy(buf, Cx86_model[dir0_msn & 7]);
if (p) strcat(buf, p);
"486 DX/4", "486 DX/4-WB", NULL, NULL, NULL, NULL, "Am5x86-WT",
"Am5x86-WB" }},
{ X86_VENDOR_AMD, 5,
- { "K5/SSA5 (PR75, PR90, PR100)", "K5 (PR120, PR133)",
- "K5 (PR166)", "K5 (PR200)", NULL, NULL,
- "K6 (PR166 - PR266)", "K6 (PR166 - PR300)", "K6-2 (PR233 - PR333)",
- "K6-3 (PR300 - PR450)", NULL, NULL, NULL, NULL, NULL, NULL }},
+ { "K5/SSA5", "K5",
+ "K5", "K5", NULL, NULL,
+ "K6", "K6", "K6-2",
+ "K6-3", NULL, NULL, NULL, NULL, NULL, NULL }},
{ X86_VENDOR_UMC, 4,
{ NULL, "U5D", "U5S", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL }},
{ X86_VENDOR_CENTAUR, 5,
- { NULL, NULL, NULL, NULL, "C6", NULL, NULL, NULL, NULL, NULL, NULL,
+ { NULL, NULL, NULL, NULL, "C6", NULL, NULL, NULL, "C6-2", NULL, NULL,
NULL, NULL, NULL, NULL, NULL }},
{ X86_VENDOR_NEXGEN, 5,
{ "Nx586", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
return;
}
+ if (c->x86_vendor == X86_VENDOR_AMD && amd_model(c))
+ return;
+
for (i = 0; i < sizeof(cpu_models)/sizeof(struct cpu_model_info); i++) {
if (c->cpuid_level > 1) {
/* supports eax=2 call */
return;
}
- if (c->x86_vendor == X86_VENDOR_AMD && amd_model(c))
- return;
-
sprintf(c->x86_model_id, "%02x/%02x", c->x86_vendor, c->x86_model);
}
+/*
+ * Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
+ */
+
+__initfunc(void dodgy_tsc(void))
+{
+ get_cpu_vendor(&boot_cpu_data);
+
+ if(boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)
+ {
+ return;
+ }
+ cyrix_model(&boot_cpu_data);
+}
+
+
+#define rdmsr(msr,val1,val2) \
+ __asm__ __volatile__("rdmsr" \
+ : "=a" (val1), "=d" (val2) \
+ : "c" (msr))
+
+#define wrmsr(msr,val1,val2) \
+ __asm__ __volatile__("wrmsr" \
+ : /* no outputs */ \
+ : "c" (msr), "a" (val1), "d" (val2))
+
static char *cpu_vendor_names[] __initdata = {
"Intel", "Cyrix", "AMD", "UMC", "NexGen", "Centaur" };
+
__initfunc(void print_cpu_info(struct cpuinfo_x86 *c))
{
char *vendor = NULL;
else
printk("%s", c->x86_model_id);
- if (c->x86_mask) {
- if (c->x86_vendor == X86_VENDOR_CYRIX)
- printk(" stepping %s", Cx86_step);
- else
- printk(" stepping %02x", c->x86_mask);
+ if (c->x86_mask)
+ printk(" stepping %02x", c->x86_mask);
+
+ if(c->x86_vendor == X86_VENDOR_CENTAUR)
+ {
+ u32 hv,lv;
+ rdmsr(0x107, lv, hv);
+ printk("\nCentaur FSR was 0x%X ",lv);
+ lv|=(1<<8);
+ lv|=(1<<7);
+ /* lv|=(1<<6); - may help too if the board can cope */
+ printk("now 0x%X", lv);
+ wrmsr(0x107, lv, hv);
}
printk("\n");
}
c->x86_model,
c->x86_model_id[0] ? c->x86_model_id : "unknown");
- if (c->x86_mask) {
- if (c->x86_vendor == X86_VENDOR_CYRIX)
- p += sprintf(p, "stepping\t: %s\n", Cx86_step);
- else
- p += sprintf(p, "stepping\t: %d\n", c->x86_mask);
- } else
+ if (c->x86_mask)
+ p += sprintf(p, "stepping\t: %d\n", c->x86_mask);
+ else
p += sprintf(p, "stepping\t: unknown\n");
if (c->x86_capability & X86_FEATURE_TSC) {
- p += sprintf(p, "cpu MHz\t\t: %lu.%06lu\n",
+ p += sprintf(p, "cpu MHz\t\t: %lu.%02lu\n",
cpu_hz / 1000000, (cpu_hz % 1000000));
}
p += sprintf(p, "cache size\t: %d KB\n", c->x86_cache_size);
/* Modify the capabilities according to chip type */
- if (c->x86_mask) {
- if (c->x86_vendor == X86_VENDOR_CYRIX) {
- x86_cap_flags[24] = "cxmmx";
- } else if (c->x86_vendor == X86_VENDOR_AMD) {
- x86_cap_flags[16] = "fcmov";
- x86_cap_flags[31] = "amd3d";
- } else if (c->x86_vendor == X86_VENDOR_INTEL) {
- x86_cap_flags[6] = "pae";
- x86_cap_flags[9] = "apic";
- x86_cap_flags[12] = "mtrr";
- x86_cap_flags[14] = "mca";
- x86_cap_flags[16] = "pat";
- x86_cap_flags[17] = "pse36";
- x86_cap_flags[24] = "osfxsr";
- }
+ if (c->x86_vendor == X86_VENDOR_CYRIX) {
+ x86_cap_flags[24] = "cxmmx";
+ } else if (c->x86_vendor == X86_VENDOR_AMD) {
+ x86_cap_flags[16] = "fcmov";
+ x86_cap_flags[31] = "3dnow";
+ } else if (c->x86_vendor == X86_VENDOR_INTEL) {
+ x86_cap_flags[6] = "pae";
+ x86_cap_flags[9] = "apic";
+ x86_cap_flags[12] = "mtrr";
+ x86_cap_flags[14] = "mca";
+ x86_cap_flags[16] = "pat";
+ x86_cap_flags[17] = "pse36";
+ x86_cap_flags[24] = "osfxsr";
}
sep_bug = c->x86_vendor == X86_VENDOR_INTEL &&
* scheduling on <=i486 based SMP boards.
*/
cacheflush_time = 0;
+ return;
} else {
cachesize = boot_cpu_data.x86_cache_size;
if (cachesize == -1)
cacheflush_time = cpu_hz/1024*cachesize/5000;
}
- printk("per-CPU timeslice cutoff: %ld.%ld usecs.\n",
+ printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
(long)cacheflush_time/(cpu_hz/1000000),
((long)cacheflush_time*100/(cpu_hz/1000000)) % 100);
}
* Silly serialization to work around CPU bug in P5s.
* We can safely turn it off on a 686.
*/
-#ifdef CONFIG_GOOD_APIC
+#ifdef CONFIG_X86_GOOD_APIC
# define FORCE_APIC_SERIALIZATION 0
#else
# define FORCE_APIC_SERIALIZATION 1
#define TICK_SIZE tick
-#ifndef CONFIG_TSC
+#ifndef CONFIG_X86_TSC
/* This function must be called with interrupts disabled
* It was inspired by Steve McCanne's microtime-i386 for BSD. -- jrs
* to disk; this won't break the kernel, though, 'cuz we're
* smart. See arch/i386/kernel/apm.c.
*/
+ /*
+ * Firstly we have to do a CPU check for chips with
+ * a potentially buggy TSC. At this point we haven't run
+ * the ident/bugs checks so we must run this hook as it
+ * may turn off the TSC flag.
+ *
+ * NOTE: this doesnt yet handle SMP 486 machines where only
+ * some CPU's have a TSC. Thats never worked and nobody has
+ * moaned if you have the only one in the world - you fix it!
+ */
+
+ dodgy_tsc();
+
if (boot_cpu_data.x86_capability & X86_FEATURE_TSC) {
#ifndef do_gettimeoffset
do_gettimeoffset = do_fast_gettimeoffset;
if (boot_cpu_data.wp_works_ok < 0) {
boot_cpu_data.wp_works_ok = 0;
printk("No.\n");
-#ifdef CONFIG_WP_WORKS_OK
+#ifdef CONFIG_X86_WP_WORKS_OK
panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
#endif
} else
return 0;
if (drive->forced_geom) {
- /* bombs otherwise /axboe */
- if (drive == NULL)
- return 0;
/*
* Update the current 3D drive values.
*/
* occur during the bootup sequence
*/
timeout = jiffies+(HZ/10);
- while (timeout >= jiffies)
+ while (time_after_eq(timeout, jiffies))
;
cy_triggered = 0; /* Reset after letting things settle */
timeout = jiffies+(HZ/10);
- while (timeout >= jiffies)
+ while (time_after_eq(timeout, jiffies))
;
for (i = 0, mask = 1; i < 16; i++, mask <<= 1) {
restore_flags(flags);
timeout = jiffies+(HZ/50);
- while (timeout >= jiffies) {
+ while (time_after_eq(timeout, jiffies)) {
if (cy_irq_triggered)
break;
}
schedule_timeout(char_time);
if (signal_pending(current))
break;
- if (timeout && ((orig_jiffies + timeout) < jiffies))
+ if (timeout && time_before(orig_jiffies + timeout, jiffies))
break;
}
current->state = TASK_RUNNING;
tmp.baud_base = info->baud;
tmp.custom_divisor = 0; /*!!!*/
tmp.hub6 = 0; /*!!!*/
- copy_to_user(retinfo,&tmp,sizeof(*retinfo));
- return 0;
+ return copy_to_user(retinfo,&tmp,sizeof(*retinfo))?-EFAULT:0;
} /* get_serial_info */
get_mon_info(struct cyclades_port * info, struct cyclades_monitor * mon)
{
- copy_to_user(mon, &info->mon, sizeof(struct cyclades_monitor));
+ if(copy_to_user(mon, &info->mon, sizeof(struct cyclades_monitor)))
+ return -EFAULT;
info->mon.int_count = 0;
info->mon.char_count = 0;
info->mon.char_max = 0;
#include <linux/major.h>
#include <linux/sched.h>
#include <linux/string.h>
+#include <linux/timer.h>
#include <linux/init.h>
-#include <asm/irq.h>
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
#endif
#include <linux/malloc.h>
#include <linux/interrupt.h>
+#include <asm/irq.h>
#include <asm/uaccess.h>
#include <asm/system.h>
struct inode *inode = file->f_dentry->d_inode;
unsigned long total_bytes_written = 0;
unsigned int flags;
+ long timeout;
int rc;
int dev = MINOR(inode->i_rdev);
/* something blocked printing, so we don't want to sleep too long,
in case we have to rekick the interrupt */
- current->timeout = jiffies + LP_TIMEOUT_POLLED;
+ timeout = LP_TIMEOUT_POLLED;
} else {
- current->timeout = jiffies + LP_TIMEOUT_INTERRUPT;
+ timeout = LP_TIMEOUT_INTERRUPT;
}
- interruptible_sleep_on(&lp_table[dev]->lp_wait_q);
+ interruptible_sleep_on_timeout(&lp_table[dev]->lp_wait_q, timeout);
restore_flags(flags);
/* we're up again and running. we first disable lp_interrupt(), then
int dev = MINOR(inode->i_rdev);
#ifdef LP_DEBUG
- if (jiffies-lp_last_call > lp_table[dev]->time) {
+ if (time_after(jiffies, lp_last_call + lp_table[dev]->time)) {
lp_total_chars = 0;
lp_max_count = 1;
}
lp_total_chars = 0;
#endif
current->state = TASK_INTERRUPTIBLE;
- current->timeout = jiffies + timeout;
- schedule();
+ schedule_timeout(timeout);
}
}
return temp - buf;
pms_i2c_write(0x8A, 0x00, colour);
break;
case PHILIPS1:
- pms_i2c_write(0x42, 012, colour);
+ pms_i2c_write(0x42, 0x12, colour);
break;
}
}
* Eastlake, Steve Crocker, and Jeff Schiller.
*/
+/*
+ * Added a check for signal pending in the extract_entropy() loop to allow
+ * the read(2) syscall to be interrupted. Copyright (C) 1998 Andrea Arcangeli
+ */
+
#include <linux/utsname.h>
#include <linux/config.h>
#include <linux/kernel.h>
buf += i;
add_timer_randomness(r, &extract_timer_state, nbytes);
if (to_user && current->need_resched)
+ {
+ if (signal_pending(current))
+ {
+ ret = -EINTR;
+ break;
+ }
schedule();
+ }
}
/* Wipe data just returned from memory */
#include <asm/bitops.h>
#include <asm/io.h>
+#include <asm/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
-#define BLOCKOUT_2
-
/* A zero-terminated list of I/O addresses to be probed.
The 3c501 can be at many locations, but here are the popular ones. */
-static unsigned int netcard_portlist[] __initdata =
- { 0x280, 0x300, 0};
+static unsigned int netcard_portlist[] __initdata = {
+ 0x280, 0x300, 0
+};
\f
/*
struct net_local
{
- struct net_device_stats stats;
- int tx_pkt_start; /* The length of the current Tx packet. */
- int collisions; /* Tx collisions this packet */
- int loading; /* Spot buffer load collisions */
+ struct net_device_stats stats;
+ int tx_pkt_start; /* The length of the current Tx packet. */
+ int collisions; /* Tx collisions this packet */
+ int loading; /* Spot buffer load collisions */
+ spinlock_t lock; /* Serializing lock */
};
\f
__initfunc(static int el1_probe1(struct device *dev, int ioaddr))
{
+ struct net_local *lp;
const char *mname; /* Vendor name */
unsigned char station_addr[6];
int autoirq = 0;
return -ENOMEM;
memset(dev->priv, 0, sizeof(struct net_local));
+ lp=dev->priv;
+ spin_lock_init(&lp->lock);
+
/*
* The EL1-specific entries in the device structure.
*/
dev->trans_start = jiffies;
}
- save_flags(flags);
-
/*
* Avoid incoming interrupts between us flipping tbusy and flipping
* mode as the driver assumes tbusy is a faithful indicator of card
* state
*/
- cli();
-
+ spin_lock_irqsave(&lp->lock, flags);
+
/*
* Avoid timer-based retransmission conflicts.
*/
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0)
{
- restore_flags(flags);
- printk("%s: Transmitter access conflict.\n", dev->name);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ printk(KERN_WARNING "%s: Transmitter access conflict.\n", dev->name);
}
else
{
* mean no more interrupts can be pending on the card.
*/
-#ifdef BLOCKOUT_1
- disable_irq(dev->irq);
-#endif
outb_p(AX_SYS, AX_CMD);
inb_p(RX_STATUS);
inb_p(TX_STATUS);
* loading bytes into the board
*/
- restore_flags(flags);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
outw(0x00, RX_BUF_CLR); /* Set rx packet area to 0. */
outw(gp_start, GP_LOW); /* aim - packet will be loaded into buffer start */
outsb(DATAPORT,buf,skb->len); /* load buffer (usual thing each byte increments the pointer) */
outw(gp_start, GP_LOW); /* the board reuses the same register */
-#ifndef BLOCKOUT_1
+
if(lp->loading==2) /* A receive upset our load, despite our best efforts */
{
if(el_debug>2)
printk("%s: burped during tx load.\n", dev->name);
+ spin_lock_irqsave(&lp->lock, flags);
goto load_it_again_sam; /* Sigh... */
}
-#endif
outb(AX_XMIT, AX_CMD); /* fire ... Trigger xmit. */
lp->loading=0;
-#ifdef BLOCKOUT_1
- enable_irq(dev->irq);
-#endif
dev->trans_start = jiffies;
}
if (dev == NULL || dev->irq != irq)
{
- printk ("3c501 driver: irq %d for unknown device.\n", irq);
+ printk (KERN_ERR "3c501 driver: irq %d for unknown device.\n", irq);
return;
}
ioaddr = dev->base_addr;
lp = (struct net_local *)dev->priv;
+ spin_lock(&lp->lock);
+
/*
* What happened ?
*/
*/
if (el_debug > 3)
- printk("%s: el_interrupt() aux=%#02x", dev->name, axsr);
+ printk(KERN_DEBUG "%s: el_interrupt() aux=%#02x", dev->name, axsr);
if (dev->interrupt)
- printk("%s: Reentering the interrupt driver!\n", dev->name);
+ printk(KERN_WARNING "%s: Reentering the interrupt driver!\n", dev->name);
dev->interrupt = 1;
-#ifndef BLOCKOUT_1
if(lp->loading==1 && !dev->tbusy)
- printk("%s: Inconsistent state loading while not in tx\n",
+ printk(KERN_WARNING "%s: Inconsistent state loading while not in tx\n",
dev->name);
-#endif
-#ifdef BLOCKOUT_3
- lp->loading=2; /* So we can spot loading interruptions */
-#endif
if (dev->tbusy)
{
*/
int txsr = inb(TX_STATUS);
-#ifdef BLOCKOUT_2
+
if(lp->loading==1)
{
if(el_debug > 2)
{
- printk("%s: Interrupt while loading [", dev->name);
+ printk(KERN_DEBUG "%s: Interrupt while loading [", dev->name);
printk(" txsr=%02x gp=%04x rp=%04x]\n", txsr, inw(GP_LOW),inw(RX_LOW));
}
lp->loading=2; /* Force a reload */
dev->interrupt = 0;
+ spin_unlock(&lp->lock);
return;
}
-#endif
+
if (el_debug > 6)
- printk(" txsr=%02x gp=%04x rp=%04x", txsr, inw(GP_LOW),inw(RX_LOW));
+ printk(KERN_DEBUG " txsr=%02x gp=%04x rp=%04x", txsr, inw(GP_LOW),inw(RX_LOW));
if ((axsr & 0x80) && (txsr & TX_READY) == 0)
{
outb(AX_XMIT, AX_CMD);
lp->stats.collisions++;
dev->interrupt = 0;
+ spin_unlock(&lp->lock);
return;
}
else
inb(RX_STATUS); /* Be certain that interrupts are cleared. */
inb(TX_STATUS);
dev->interrupt = 0;
+ spin_unlock(&lp->lock);
return;
}
ep->tx_ring[entry].bufaddr = virt_to_bus(skb->data);
ep->tx_ring[entry].buflength = skb->len;
+ /* tx_bytes counting -- Nolan Leake */
+ ep->stats.tx_bytes += ep->tx_ring[entry].txlength;
+
if (ep->cur_tx - ep->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
flag = 0x10; /* No interrupt */
clear_bit(0, (void*)&dev->tbusy);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
ep->stats.rx_packets++;
+ /* rx_bytes counting -- Nolan Leake */
+ ep->stats.rx_bytes += pkt_len;
}
work_done++;
entry = (++ep->cur_rx) % RX_RING_SIZE;
* Jan 07, 1997 Gene Kozin Initial version.
*****************************************************************************/
-#if !defined(__KERNEL__) || !defined(MODULE)
-#error This code MUST be compiled as a kernel module!
-#endif
#include <linux/kernel.h> /* printk(), and other useful stuff */
#include <linux/stddef.h> /* offsetof(), etc. */
#define DSP_BUFFCOUNT 1 /* 1 is recommended. */
#endif
-#define DMA_AUTOINIT 0x10
-
#define FM_MONO 0x388 /* This is the I/O address used by AdLib */
#ifndef CONFIG_PAS_BASE
static int free_id = 1;
static int enabled = 1;
-#ifdef __SMP__
+#ifdef CONFIG_SMP
static rwlock_t entries_lock = RW_LOCK_UNLOCKED;
#endif
struct dentry * dentry;
char iname[128];
char *iname_addr = iname;
- int retval, fmt_flags = 0;
+ int retval;
MOD_INC_USE_COUNT;
- if (!enabled) {
- retval = -ENOEXEC;
+ retval = -ENOEXEC;
+ if (!enabled)
goto _ret;
- }
/* to keep locking time low, we copy the interpreter string */
read_lock(&entries_lock);
- if ((fmt = check_file(bprm))) {
+ fmt = check_file(bprm);
+ if (fmt) {
strncpy(iname, fmt->interpreter, 127);
iname[127] = '\0';
- fmt_flags = fmt->flags;
}
read_unlock(&entries_lock);
- if (!fmt) {
- retval = -ENOEXEC;
+ if (!fmt)
goto _ret;
- }
dput(bprm->dentry);
bprm->dentry = NULL;
bprm->argc++;
bprm->p = copy_strings(1, &iname_addr, bprm->page, bprm->p, 2);
bprm->argc++;
- if (!bprm->p) {
- retval = -E2BIG;
+ retval = -E2BIG;
+ if (!bprm->p)
goto _ret;
- }
bprm->filename = iname; /* for binfmt_script */
dentry = open_namei(iname, 0, 0);
*/
static void entry_proc_cleanup(struct binfmt_entry *e)
{
-#ifdef CONFIG_PROC_FS
remove_proc_entry(e->proc_name, bm_dir);
-#endif
}
/*
*/
static int entry_proc_setup(struct binfmt_entry *e)
{
-#ifdef CONFIG_PROC_FS
if (!(e->proc_dir = create_proc_entry(e->proc_name,
S_IFREG | S_IRUGO | S_IWUSR, bm_dir)))
return -ENOMEM;
e->proc_dir->data = (void *) (e->id);
e->proc_dir->read_proc = proc_read_status;
e->proc_dir->write_proc = proc_write_status;
-#endif
return 0;
}
int __init init_misc_binfmt(void)
{
int error = -ENOMEM;
-#ifdef CONFIG_PROC_FS
struct proc_dir_entry *status = NULL, *reg;
bm_dir = create_proc_entry("sys/fs/binfmt_misc", S_IFDIR, NULL);
if (!reg)
goto cleanup_status;
reg->write_proc = proc_write_register;
-#endif /* CONFIG_PROC_FS */
error = register_binfmt(&misc_format);
out:
bh->b_next_free = bh->b_prev_free = NULL;
}
-static inline void remove_from_queues(struct buffer_head * bh)
+static void remove_from_queues(struct buffer_head * bh)
{
if(bh->b_dev == B_FREE) {
remove_from_free_list(bh); /* Free list entries should not be
}
}
-static inline void insert_into_queues(struct buffer_head * bh)
+static void insert_into_queues(struct buffer_head * bh)
{
/* put at end of free list */
if(bh->b_dev == B_FREE) {
}
/*
- * Find a candidate buffer to be reclaimed.
- * N.B. Must search the entire BUF_LOCKED list rather than terminating
- * when the first locked buffer is found. Buffers are unlocked at
- * completion of IO, and under some conditions there may be (many)
- * unlocked buffers after the first locked one.
+ * We used to try various strange things. Let's not.
*/
-static struct buffer_head *find_candidate(struct buffer_head *bh,
- int *list_len, int size)
-{
- if (!bh)
- goto no_candidate;
-
- for (; (*list_len) > 0; bh = bh->b_next_free, (*list_len)--) {
- if (size != bh->b_size && !buffer_touched(bh)) {
- /* This provides a mechanism for freeing blocks
- * of other sizes, this is necessary now that we
- * no longer have the lav code.
- */
- try_to_free_buffer(bh,&bh);
- if (!bh)
- break;
- continue;
- }
- else if (!bh->b_count &&
- !buffer_locked(bh) &&
- !buffer_protected(bh) &&
- !buffer_dirty(bh))
- return bh;
- }
-
-no_candidate:
- return NULL;
-}
-
static void refill_freelist(int size)
{
- struct buffer_head * bh, * next;
- struct buffer_head * candidate[BUF_DIRTY];
- int buffers[BUF_DIRTY];
- int i;
- int needed, obtained=0;
-
- refilled = 1;
-
- /* We are going to try to locate this much memory. */
- needed = bdf_prm.b_un.nrefill * size;
-
- while ((nr_free_pages > freepages.min*2) &&
- !buffer_over_max() &&
- grow_buffers(GFP_BUFFER, size)) {
- obtained += PAGE_SIZE;
- if (obtained >= needed)
- return;
- }
-
- /*
- * Update the needed amount based on the number of potentially
- * freeable buffers. We don't want to free more than one quarter
- * of the available buffers.
- */
- i = (nr_buffers_type[BUF_CLEAN] + nr_buffers_type[BUF_LOCKED]) >> 2;
- if (i < bdf_prm.b_un.nrefill) {
- needed = i * size;
- if (needed < PAGE_SIZE)
- needed = PAGE_SIZE;
- }
-
- /*
- * OK, we cannot grow the buffer cache, now try to get some
- * from the lru list.
- */
-repeat:
- if (obtained >= needed)
- return;
-
- /*
- * First set the candidate pointers to usable buffers. This
- * should be quick nearly all of the time. N.B. There must be
- * no blocking calls after setting up the candidate[] array!
- */
- for (i = BUF_CLEAN; i<BUF_DIRTY; i++) {
- buffers[i] = nr_buffers_type[i];
- candidate[i] = find_candidate(lru_list[i], &buffers[i], size);
- }
-
- /*
- * Select the older of the available buffers until we reach our goal.
- */
- for (;;) {
- i = BUF_CLEAN;
- if (!candidate[BUF_CLEAN]) {
- if (!candidate[BUF_LOCKED])
- break;
- i = BUF_LOCKED;
- }
- else if (candidate[BUF_LOCKED] &&
- (candidate[BUF_LOCKED]->b_lru_time <
- candidate[BUF_CLEAN ]->b_lru_time))
- i = BUF_LOCKED;
- /*
- * Free the selected buffer and get the next candidate.
- */
- bh = candidate[i];
- next = bh->b_next_free;
-
- obtained += bh->b_size;
- remove_from_queues(bh);
- put_last_free(bh);
- if (obtained >= needed)
- return;
-
- if (--buffers[i] && bh != next)
- candidate[i] = find_candidate(next, &buffers[i], size);
- else
- candidate[i] = NULL;
- }
-
- /*
- * If there are dirty buffers, do a non-blocking wake-up.
- * This increases the chances of having buffers available
- * for the next call ...
- */
- if (nr_buffers_type[BUF_DIRTY])
- wakeup_bdflush(0);
-
- /*
- * Allocate buffers to reach half our goal, if possible.
- * Since the allocation doesn't block, there's no reason
- * to search the buffer lists again. Then return if there
- * are _any_ free buffers.
- */
- while (obtained < (needed >> 1) &&
- nr_free_pages > freepages.min + 5 &&
- grow_buffers(GFP_BUFFER, size))
- obtained += PAGE_SIZE;
-
- if (free_list[BUFSIZE_INDEX(size)])
- return;
-
- /*
- * If there are dirty buffers, wait while bdflush writes
- * them out. The buffers become locked, but we can just
- * wait for one to unlock ...
- */
- if (nr_buffers_type[BUF_DIRTY])
+ if (!grow_buffers(GFP_KERNEL, size)) {
wakeup_bdflush(1);
-
- /*
- * In order to prevent a buffer shortage from exhausting
- * the system's reserved pages, we force tasks to wait
- * before using reserved pages for buffers. This is easily
- * accomplished by waiting on an unused locked buffer.
- */
- if ((bh = lru_list[BUF_LOCKED]) != NULL) {
- for (i = nr_buffers_type[BUF_LOCKED]; i--; bh = bh->b_next_free)
- {
- if (bh->b_size != size)
- continue;
- if (bh->b_count)
- continue;
- if (!buffer_locked(bh))
- continue;
- if (buffer_dirty(bh) || buffer_protected(bh))
- continue;
- if (MAJOR(bh->b_dev) == LOOP_MAJOR)
- continue;
- /*
- * We've found an unused, locked, non-dirty buffer of
- * the correct size. Claim it so no one else can,
- * then wait for it to unlock.
- */
- bh->b_count++;
- wait_on_buffer(bh);
- bh->b_count--;
- /*
- * Loop back to harvest this (and maybe other) buffers.
- */
- goto repeat;
- }
- }
-
- /*
- * Convert a reserved page into buffers ... should happen only rarely.
- */
- if (grow_buffers(GFP_ATOMIC, size)) {
-#ifdef BUFFER_DEBUG
-printk("refill_freelist: used reserve page\n");
-#endif
- return;
+ current->policy |= SCHED_YIELD;
+ schedule();
}
-
- /*
- * System is _very_ low on memory ... sleep and try later.
- */
-#ifdef BUFFER_DEBUG
-printk("refill_freelist: task %s waiting for buffers\n", current->comm);
-#endif
- schedule();
- goto repeat;
}
void init_buffer(struct buffer_head *bh, kdev_t dev, int block,
return 1;
}
-
-/* =========== Reduce the buffer memory ============= */
-
-static inline int buffer_waiting(struct buffer_head * bh)
-{
- return waitqueue_active(&bh->b_wait);
-}
+/*
+ * Can the buffer be thrown out?
+ */
+#define BUFFER_BUSY_BITS ((1<<BH_Dirty) | (1<<BH_Lock) | (1<<BH_Protected))
+#define buffer_busy(bh) ((bh)->b_count || ((bh)->b_state & BUFFER_BUSY_BITS))
/*
- * try_to_free_buffer() checks if all the buffers on this particular page
+ * try_to_free_buffers() checks if all the buffers on this particular page
* are unused, and free's the page if so.
+ *
+ * Wake up bdflush() if this fails - if we're running low on memory due
+ * to dirty buffers, we need to flush them out as quickly as possible.
*/
-int try_to_free_buffer(struct buffer_head * bh, struct buffer_head ** bhp)
+int try_to_free_buffers(struct page * page_map)
{
- unsigned long page;
- struct buffer_head * tmp, * p;
+ struct buffer_head * tmp, * bh = page_map->buffers;
- *bhp = bh;
- page = (unsigned long) bh->b_data;
- page &= PAGE_MASK;
tmp = bh;
do {
- if (!tmp)
- return 0;
- if (tmp->b_count || buffer_protected(tmp) ||
- buffer_dirty(tmp) || buffer_locked(tmp) ||
- buffer_waiting(tmp))
- return 0;
+ struct buffer_head * p = tmp;
+
tmp = tmp->b_this_page;
+ if (!buffer_busy(p))
+ continue;
+
+ wakeup_bdflush(0);
+ return 0;
} while (tmp != bh);
tmp = bh;
do {
- p = tmp;
+ struct buffer_head * p = tmp;
tmp = tmp->b_this_page;
nr_buffers--;
- if (p == *bhp) {
- *bhp = p->b_prev_free;
- if (p == *bhp) /* Was this the last in the list? */
- *bhp = NULL;
- }
remove_from_queues(p);
put_unused_buffer_head(p);
} while (tmp != bh);
+
/* Wake up anyone waiting for buffer heads */
wake_up(&buffer_wait);
+ /* And free the page */
buffermem -= PAGE_SIZE;
- mem_map[MAP_NR(page)].buffers = NULL;
- free_page(page);
+ page_map->buffers = NULL;
+ __free_page(page_map);
return 1;
}
MSDOS_SB(sb)->root_cluster = CF_LE_L(b->root_cluster);
MSDOS_SB(sb)->fsinfo_offset =
CF_LE_W(b->info_sector) * logical_sector_size + 0x1e0;
+ if (MSDOS_SB(sb)->fsinfo_offset + sizeof(MSDOS_SB(sb)->fsinfo_offset) >= sizeof(struct fat_boot_sector)) {
+ printk("fat_read_super: Bad fsinfo_offset\n");
+ fat_brelse(sb, bh);
+ goto out_invalid;
+ }
fsinfo = (struct fat_boot_fsinfo *)
&bh->b_data[MSDOS_SB(sb)->fsinfo_offset];
- if ((MSDOS_SB(sb)->fsinfo_offset - sizeof(MSDOS_SB(sb)->fsinfo_offset) + 1)> bh->b_size)
- printk("fat_read_super: Bad fsinfo_offset\n");
- else if (CF_LE_L(fsinfo->signature) != 0x61417272) {
+ if (CF_LE_L(fsinfo->signature) != 0x61417272) {
printk("fat_read_super: Did not find valid FSINFO "
"signature. Found 0x%x\n",
CF_LE_L(fsinfo->signature));
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+#define DMA_AUTOINIT 0x10
+
extern spinlock_t dma_spin_lock;
static __inline__ unsigned long claim_dma_lock(void)
*
* Cyrix stuff, June 1998 by:
* - Rafael R. Reilova (moved everything from head.S),
+ * <rreilova@ececs.uc.edu>
* - Channing Corn (tests & fixes),
* - Andrew D. Balsa (code cleanup).
*/
printk("OK.\n");
}
-__initfunc(static void check_tlb(void))
-{
-#ifndef CONFIG_M386
- /*
- * The 386 chips don't support TLB finegrained invalidation.
- * They will fault when they hit an invlpg instruction.
- */
- if (boot_cpu_data.x86 == 3) {
- printk(KERN_EMERG "CPU is a 386 and this kernel was compiled for 486 or better.\n");
- printk("Giving up.\n");
- for (;;) ;
- }
-#endif
-}
-
/*
* Most 386 processors have a bug where a POPAD can lock the
* machine even from user space.
__initfunc(static void check_popad(void))
{
-#ifdef CONFIG_M386
+#ifndef CONFIG_X86_POPAD_OK
int res, inp = (int) &res;
printk(KERN_INFO "Checking for popad bug... ");
__asm__ __volatile__(
"movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
- : "=eax" (res)
- : "edx" (inp)
- : "eax", "ecx", "edx", "edi" );
+ : "=&a" (res)
+ : "d" (inp)
+ : "ecx", "edi" );
/* If this fails, it means that any user program may lock the CPU hard. Too bad. */
if (res != 12345678) printk( "Buggy.\n" );
else printk( "OK.\n" );
}
/*
- * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
- * by the fact that they preserve the flags across the division of 5/2.
- * PII and PPro exhibit this behavior too, but they have cpuid available.
+ * Fix cpuid problems with Cyrix CPU's:
+ * -- on the Cx686(L) the cpuid is disabled on power up.
+ * -- braindamaged BIOS disable cpuid on the Cx686MX.
*/
-__initfunc(static void check_cyrix_cpu(void))
+extern unsigned char Cx86_dir0_msb; /* exported HACK from cyrix_model() */
+
+__initfunc(static void check_cx686_cpuid(void))
{
- if ((boot_cpu_data.cpuid_level == -1) && (boot_cpu_data.x86 == 4)
- && test_cyrix_52div()) {
+ if (boot_cpu_data.cpuid_level == -1 &&
+ ((Cx86_dir0_msb == 5) || (Cx86_dir0_msb == 3))) {
+ int eax, dummy;
+ unsigned char ccr3, ccr4;
- /* default to an unknown Cx486, (we will differentiate later) */
- /* NOTE: using 0xff since 0x00 is a valid DIR0 value */
- strcpy(boot_cpu_data.x86_vendor_id, "CyrixInstead");
- boot_cpu_data.x86_model = 0xff;
- boot_cpu_data.x86_mask = 0;
+ cli();
+ ccr3 = getCx86(CX86_CCR3);
+ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
+ ccr4 = getCx86(CX86_CCR4);
+ setCx86(CX86_CCR4, ccr4 | 0x80); /* enable cpuid */
+ setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
+ sti();
+
+ /* we have up to level 1 available on the Cx6x86(L|MX) */
+ boot_cpu_data.cpuid_level = 1;
+ cpuid(1, &eax, &dummy, &dummy,
+ &boot_cpu_data.x86_capability);
+
+ boot_cpu_data.x86 = (eax >> 8) & 15;
+ /*
+ * we already have a cooked step/rev number from DIR1
+ * so we don't use the cpuid-provided ones.
+ */
}
}
/*
- * Fix two problems with the Cyrix 6x86 and 6x86L:
- * -- the cpuid is disabled on power up, enable it, use it.
- * -- the SLOP bit needs resetting on some motherboards due to old BIOS,
- * so that the udelay loop calibration works well. Recalibrate.
+ * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
+ * BIOSes for compatability with DOS games. This makes the udelay loop
+ * work correctly, and improves performance.
*/
extern void calibrate_delay(void) __init;
-__initfunc(static void check_cx686_cpuid_slop(void))
+__initfunc(static void check_cx686_slop(void))
{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX &&
- (boot_cpu_data.x86_model & 0xf0) == 0x30) { /* 6x86(L) */
- int dummy;
- unsigned char ccr3, ccr4, ccr5;
+ if (Cx86_dir0_msb == 3) {
+ unsigned char ccr3, ccr5;
cli();
ccr3 = getCx86(CX86_CCR3);
- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
- ccr4 = getCx86(CX86_CCR4);
- setCx86(CX86_CCR4, ccr4 | 0x80); /* enable cpuid */
+ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
ccr5 = getCx86(CX86_CCR5);
- if (ccr5 & 2) /* reset SLOP if needed, old BIOS do this wrong */
- setCx86(CX86_CCR5, ccr5 & 0xfd);
- setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
+ if (ccr5 & 2)
+ setCx86(CX86_CCR5, ccr5 & 0xfd); /* reset SLOP */
+ setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
sti();
- boot_cpu_data.cpuid_level = 1; /* should cover all 6x86(L) */
- boot_cpu_data.x86 = 5;
-
- /* we know we have level 1 available on the 6x86(L) */
- cpuid(1, &dummy, &dummy, &dummy,
- &boot_cpu_data.x86_capability);
- /*
- * DON'T use the x86_mask and x86_model from cpuid, these are
- * not as accurate (or the same) as those from the DIR regs.
- * already in place after cyrix_model() in setup.c
- */
-
if (ccr5 & 2) { /* possible wrong calibration done */
printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
calibrate_delay();
}
/*
- * Check wether we are able to run this kernel safely with this
- * configuration. Various configs imply certain minimum requirements
- * of the machine:
+ * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
+ * by the fact that they preserve the flags across the division of 5/2.
+ * PII and PPro exhibit this behavior too, but they have cpuid available.
+ */
+
+__initfunc(static void check_cyrix_cpu(void))
+{
+ if ((boot_cpu_data.cpuid_level == -1) && (boot_cpu_data.x86 == 4)
+ && test_cyrix_52div()) {
+
+ strcpy(boot_cpu_data.x86_vendor_id, "CyrixInstead");
+ }
+}
+
+/*
+ * Check wether we are able to run this kernel safely on SMP.
*
* - In order to run on a i386, we need to be compiled for i386
* (for due to lack of "invlpg" and working WP on a i386)
* compiled for a Pentium or lower, as a PPro config implies
* a properly working local APIC without the need to do extra
* reads from the APIC.
- */
+*/
+
__initfunc(static void check_config(void))
{
- /* Configuring for a i386 will boot on anything */
-#ifndef CONFIG_M386
- /* Configuring for an i486 only implies 'invlpg' and a working WP bit */
+/*
+ * We'd better not be a i386 if we're configured to use some
+ * i486+ only features! (WP works in supervisor mode and the
+ * new "invlpg" and "bswap" instructions)
+ */
+#if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_BSWAP)
if (boot_cpu_data.x86 == 3)
panic("Kernel requires i486+ for 'invlpg' and other features");
+#endif
-#ifndef CONFIG_M486
-
-#ifndef CONFIG_M586
- /* Configuring for a PPro implies that we have an IO-APIC without the read-before-write bug */
-
-#endif /* CONFIG_M586 */
-#endif /* CONFIG_M486 */
-#endif /* CONFIG_M386 */
-
-/* If we configured ourselves for a TSC, we'd better have one! */
-#ifdef CONFIG_TSC
+/*
+ * If we configured ourselves for a TSC, we'd better have one!
+ */
+#ifdef CONFIG_X86_TSC
if (!(boot_cpu_data.x86_capability & X86_FEATURE_TSC))
panic("Kernel compiled for Pentium+, requires TSC");
#endif
-/* If we were told we had a good APIC for SMP, we'd better be a PPro */
-#ifdef CONFIG_GOOD_APIC
+/*
+ * If we were told we had a good APIC for SMP, we'd better be a PPro
+ */
+#if defined(CONFIG_X86_GOOD_APIC) && defined(CONFIG_SMP)
if (smp_found_config && boot_cpu_data.x86 <= 5)
panic("Kernel compiled for PPro+, assumes local APIC without read-before-write bug");
#endif
{
check_cyrix_cpu();
identify_cpu(&boot_cpu_data);
- check_config();
+ check_cx686_cpuid();
+ check_cx686_slop();
#ifndef __SMP__
printk("CPU: ");
print_cpu_info(&boot_cpu_data);
#endif
- check_cx686_cpuid_slop();
- check_tlb();
+ check_config();
check_fpu();
check_hlt();
check_popad();
static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
{
-#ifdef CONFIG_BSWAP
+#ifdef CONFIG_X86_BSWAP
__asm__("bswap %0" : "=r" (x) : "0" (x));
#else
__asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+#define DMA_AUTOINIT 0x10
+
extern spinlock_t dma_spin_lock;
#define SHMGET 23
#define SHMCTL 24
+/* Used by the DIPC package, try and avoid reusing it */
+#define DIPC 25
+
#define IPCCALL(version,op) ((version)<<16 | (op))
#endif
#define __flush_tlb() \
do { unsigned long tmpreg; __asm__ __volatile__("movl %%cr3,%0\n\tmovl %0,%%cr3":"=r" (tmpreg) : :"memory"); } while (0)
-#ifndef CONFIG_INVLPG
+#ifndef CONFIG_X86_INVLPG
#define __flush_tlb_one(addr) flush_tlb()
#else
#define __flush_tlb_one(addr) \
extern void identify_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
+extern void dodgy_tsc(void);
/*
* Generic CPUID function
static inline cycles_t get_cycles (void)
{
-#ifndef CONFIG_TSC
+#ifndef CONFIG_X86_TSC
return 0;
#else
unsigned long eax, edx;
:"1" (addr),"g" (size),"g" (current->addr_limit.seg)); \
flag; })
-#ifdef CONFIG_WP_WORKS_OK
+#ifdef CONFIG_X86_WP_WORKS_OK
#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
extern void refile_buffer(struct buffer_head * buf);
extern void set_writetime(struct buffer_head * buf, int flag);
-extern int try_to_free_buffer(struct buffer_head*, struct buffer_head**);
+extern int try_to_free_buffers(struct page *);
extern int nr_buffers;
extern int buffermem;
int i2c_write(struct i2c_bus *bus, unsigned char addr,
unsigned char b1, unsigned char b2, int both);
+int i2c_init(void);
#endif /* I2C_H */
#define buffer_under_min() ((buffermem >> PAGE_SHIFT) * 100 < \
buffer_mem.min_percent * num_physpages)
-#define buffer_under_borrow() ((buffermem >> PAGE_SHIFT) * 100 < \
- buffer_mem.borrow_percent * num_physpages)
-#define buffer_under_max() ((buffermem >> PAGE_SHIFT) * 100 < \
- buffer_mem.max_percent * num_physpages)
-#define buffer_over_min() ((buffermem >> PAGE_SHIFT) * 100 > \
- buffer_mem.min_percent * num_physpages)
-#define buffer_over_borrow() ((buffermem >> PAGE_SHIFT) * 100 > \
- buffer_mem.borrow_percent * num_physpages)
-#define buffer_over_max() ((buffermem >> PAGE_SHIFT) * 100 > \
- buffer_mem.max_percent * num_physpages)
#define pgcache_under_min() (page_cache_size * 100 < \
page_cache.min_percent * num_physpages)
-#define pgcache_under_borrow() (page_cache_size * 100 < \
- page_cache.borrow_percent * num_physpages)
-#define pgcache_under_max() (page_cache_size * 100 < \
- page_cache.max_percent * num_physpages)
-#define pgcache_over_min() (page_cache_size * 100 > \
- page_cache.min_percent * num_physpages)
-#define pgcache_over_borrow() (page_cache_size * 100 > \
- page_cache.borrow_percent * num_physpages)
-#define pgcache_over_max() (page_cache_size * 100 > \
- page_cache.max_percent * num_physpages)
#endif /* __KERNEL__ */
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
int swappable:1;
+ int trashing_memory:1;
unsigned long swap_address;
unsigned long old_maj_flt; /* old value of maj_flt */
unsigned long dec_flt; /* page fault count of the last time */
/* utime */ {0,0,0,0},0, \
/* per CPU times */ {0, }, {0, }, \
/* flt */ 0,0,0,0,0,0, \
-/* swp */ 0,0,0,0,0, \
+/* swp */ 0,0,0,0,0,0, \
/* process credentials */ \
/* uid etc */ 0,0,0,0,0,0,0,0, \
/* suppl grps*/ 0, {0,}, \
/* ok, now we should be set up.. */
p->swappable = 1;
+ p->trashing_memory = 0;
p->exit_signal = clone_flags & CSIGNAL;
p->pdeath_signal = 0;
struct page * page;
int count;
- count = (limit<<1) >> (priority);
+ count = limit >> priority;
page = mem_map + clock;
do {
/* Is it a buffer page? */
if (page->buffers) {
- struct buffer_head *bh = page->buffers;
if (buffer_under_min())
continue;
- if (!try_to_free_buffer(bh, &bh))
+ if (!try_to_free_buffers(page))
continue;
return 1;
}
return 0;
}
-/*
- * This is called from try_to_swap_out() when we try to get rid of some
- * pages.. If we're unmapping the last occurrence of this page, we also
- * free it from the page hash-queues etc, as we don't want to keep it
- * in-core unnecessarily.
- */
-unsigned long page_unuse(struct page * page)
-{
- int count = atomic_read(&page->count);
-
- if (count != 2)
- return count;
- if (!page->inode)
- return count;
- if (PageSwapCache(page))
- panic ("Doing a normal page_unuse of a swap cache page");
- remove_inode_page(page);
- return 1;
-}
-
/*
* Update a page cache copy, when we're doing a "write()" system call
* See also "update_vm_cache()".
goto nopage;
}
- if (freepages.min > nr_free_pages) {
- int freed;
- freed = try_to_free_pages(gfp_mask, SWAP_CLUSTER_MAX);
- /*
- * Low priority (user) allocations must not
- * succeed if we didn't have enough memory
- * and we couldn't get more..
- */
- if (!freed && !(gfp_mask & (__GFP_MED | __GFP_HIGH)))
- goto nopage;
+ /*
+ * Avoid going back-and-forth between allocating
+ * memory and trying to free it. If we get into
+ * a bad memory situation, we're better off trying
+ * to free things up until things are better.
+ *
+ * Normally we shouldn't ever have to do this, with
+ * kswapd doing this in the background.
+ *
+ * Most notably, this puts most of the onus of
+ * freeing up memory on the processes that _use_
+ * the most memory, rather than on everybody.
+ */
+ if (nr_free_pages > freepages.min) {
+ if (!current->trashing_memory)
+ goto ok_to_allocate;
+ if (nr_free_pages > freepages.low) {
+ current->trashing_memory = 0;
+ goto ok_to_allocate;
+ }
}
+ /*
+ * Low priority (user) allocations must not
+ * succeed if we are having trouble allocating
+ * memory.
+ */
+ current->trashing_memory = 1;
+ if (!try_to_free_pages(gfp_mask, SWAP_CLUSTER_MAX) && !(gfp_mask & (__GFP_MED | __GFP_HIGH)))
+ goto nopage;
}
+ok_to_allocate:
spin_lock_irqsave(&page_alloc_lock, flags);
RMQUEUE(order, (gfp_mask & GFP_DMA));
spin_unlock_irqrestore(&page_alloc_lock, flags);
swapstat_t swapstats = {0};
buffer_mem_t buffer_mem = {
- 5, /* minimum percent buffer */
+ 2, /* minimum percent buffer */
10, /* borrow percent buffer */
60 /* maximum percent buffer */
};
buffer_mem_t page_cache = {
- 5, /* minimum percent page cache */
+ 2, /* minimum percent page cache */
15, /* borrow percent page cache */
75 /* maximum */
};
void free_page_and_swap_cache(unsigned long addr)
{
struct page *page = mem_map + MAP_NR(addr);
+
/*
* If we are the only user, then free up the swap cache.
*/
delete_from_swap_cache(page);
}
- free_page(addr);
+ __free_page(page);
}
* copy in memory, so we add it to the swap
* cache. */
if (PageSwapCache(page_map)) {
- free_page(page);
+ __free_page(page_map);
return (atomic_read(&page_map->count) == 0);
}
add_to_swap_cache(page_map, entry);
* asynchronously. That's no problem, shrink_mmap() can
* correctly clean up the occassional unshared page
* which gets left behind in the swap cache. */
- free_page(page);
+ __free_page(page_map);
return 1; /* we slept: the process may not exist any more */
}
set_pte(page_table, __pte(entry));
flush_tlb_page(vma, address);
swap_duplicate(entry);
- free_page(page);
+ __free_page(page_map);
return (atomic_read(&page_map->count) == 0);
}
/*
priority = 5;
do {
- shrink_dcache_memory(priority, gfp_mask);
free_memory(shrink_mmap(priority, gfp_mask));
free_memory(shm_swap(priority, gfp_mask));
free_memory(swap_out(priority, gfp_mask));
+ shrink_dcache_memory(priority, gfp_mask);
} while (--priority >= 0);
retval = 0;
done:
proc_unregister(proc_net_ip_masq, ent->low_ino);
}
-/*
- * Wrapper over inet_select_addr()
- */
-u32 ip_masq_select_addr(struct device *dev, u32 dst, int scope)
-{
- return inet_select_addr(dev, dst, scope);
-}
__initfunc(static void masq_proc_init(void))
{
}
}
#endif /* CONFIG_PROC_FS */
+/*
+ * Wrapper over inet_select_addr()
+ */
+u32 ip_masq_select_addr(struct device *dev, u32 dst, int scope)
+{
+ return inet_select_addr(dev, dst, scope);
+}
/*
* Initialize ip masquerading
/* We got an ack, but it's not a good ack. */
if(!tcp_ack(sk,th, TCP_SKB_CB(skb)->seq,
- TCP_SKB_CB(skb)->ack_seq, len)) {
- sk->err = ECONNRESET;
- sk->state_change(sk);
- tcp_statistics.TcpAttemptFails++;
+ TCP_SKB_CB(skb)->ack_seq, len))
return 1;
- }
if(th->rst) {
tcp_reset(sk);
goto discard;
}
- if(!th->syn) {
- /* A valid ack from a different connection
- * start. Shouldn't happen but cover it.
- */
- sk->err = ECONNRESET;
- sk->state_change(sk);
- tcp_statistics.TcpAttemptFails++;
- return 1;
- }
+ if(!th->syn)
+ goto discard;
/* Ok.. it's good. Set up sequence numbers and
* move to established.
{
struct sock *sk;
- sk = sk_alloc(PF_IPX, GFP_KERNEL, 1);
+ /*
+ * Called on connection receive so cannot be GFP_KERNEL
+ */
+
+ sk = sk_alloc(PF_IPX, GFP_ATOMIC, 1);
if(sk == NULL)
return (-ENOMEM);