if (setup_sectors < SETUP_SECTS)
setup_sectors = SETUP_SECTS;
fprintf(stderr, "Setup is %d bytes.\n", i);
- memset(buf, sizeof(buf), 0);
+ memset(buf, 0, sizeof(buf));
while (i < setup_sectors * 512) {
c = setup_sectors * 512 - i;
if (c > sizeof(buf))
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
- handle_mm_fault(tsk, vma, address, write);
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ if (!handle_mm_fault(tsk, vma, address, write))
+ goto bad_area;
/*
* Did it hit the DOS screen memory VA from vm86 mode?
* Copyright (C) 1997 Ralf Baechle (ralf@gnu.org),
* derived from r4xx0.c by David S. Miller (dm@engr.sgi.com).
*/
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
* Copyright (C) 1997, 1998 Ralf Baechle (ralf@gnu.org)
*/
+#include <linux/config.h>
#include <linux/init.h>
#include <linux/kbd_ll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/config.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/file.h>
* Andrea Arcangeli
*
* based on work by Grant Guenther <grant@torque.net> and Phil Blundell.
+ *
+ * Cleaned up include files - Russell King <linux@arm.uk.linux.org>
*/
/* This driver should work with any hardware that is broadly compatible
static int irq_probe_ECP(struct parport *pb)
{
int irqs, i;
-
+
sti();
irqs = probe_irq_on();
- parport_pc_write_econtrol(pb, 0x00); /* Reset FIFO */
- parport_pc_write_econtrol(pb, 0xd0); /* TEST FIFO + nErrIntrEn */
+ parport_pc_write_econtrol(pb, 0x00); /* Reset FIFO */
+ parport_pc_write_econtrol(pb, 0xd0); /* TEST FIFO + nErrIntrEn */
/* If Full FIFO sure that WriteIntrThresold is generated */
for (i=0; i < 1024 && !(parport_pc_read_econtrol(pb) & 0x02) ; i++)
*
* based on work by Grant Guenther <grant@torque.net>
* and Philip Blundell
+ *
+ * Cleaned up include files - Russell King <linux@arm.uk.linux.org>
*/
-#include <linux/stddef.h>
-#include <linux/tasks.h>
-#include <linux/ctype.h>
-#include <asm/ptrace.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/irq.h>
-
+#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/malloc.h>
#include <linux/proc_fs.h>
#include <linux/parport.h>
+#include <linux/ctype.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
struct proc_dir_entry *base = NULL;
tmp->ops = ops;
tmp->number = portnum;
memset (&tmp->probe_info, 0, sizeof (struct parport_device_info));
- spin_lock_init(&tmp->cad_lock);
+ tmp->cad_lock = RW_LOCK_UNLOCKED;
spin_lock_init(&tmp->waitlist_lock);
spin_lock_init(&tmp->pardevice_lock);
v1.10 4/21/97 Fixed module code so that multiple cards may be detected,
other cleanups. -djb
Andrea Arcangeli: Upgraded to Donald Becker's version 1.12.
+ Rick Payne: Fixed SMP race condition
*/
static char *version = "3c509.c:1.12 6/4/97 becker@cesdis.gsfc.nasa.gov\n";
#include <linux/skbuff.h>
#include <linux/delay.h> /* for udelay() */
+#include <asm/spinlock.h>
#include <asm/bitops.h>
#include <asm/io.h>
struct el3_private {
struct enet_statistics stats;
struct device *next_dev;
+ spinlock_t lock;
/* skb send-queue */
int head, size;
struct sk_buff *queue[SKB_QUEUE_SIZE];
outw(RxReset, ioaddr + EL3_CMD);
outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+ /* Set the spinlock before grabbing IRQ! */
+ ((struct el3_private *)dev->priv)->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
+
if (request_irq(dev->irq, &el3_interrupt, 0, "3c509", dev)) {
return -EAGAIN;
}
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0)
printk("%s: Transmitter access conflict.\n", dev->name);
else {
+ unsigned long flags;
+
+ /* Spin on the lock, until we're clear of an IRQ */
+ spin_lock_irqsave(&lp->lock, flags);
+
/* Put out the doubleword header... */
outw(skb->len, ioaddr + TX_FIFO);
outw(0x00, ioaddr + TX_FIFO);
} else
/* Interrupt us when the FIFO has room for max-sized packet. */
outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
}
dev_kfree_skb (skb);
el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct device *dev = (struct device *)dev_id;
+ struct el3_private *lp;
int ioaddr, status;
int i = INTR_WORK;
return;
}
+ lp = (struct el3_private *)dev->priv;
+ spin_lock(&lp->lock);
+
if (dev->interrupt)
printk("%s: Re-entering the interrupt handler.\n", dev->name);
dev->interrupt = 1;
printk("%s: exiting interrupt, status %4.4x.\n", dev->name,
inw(ioaddr + EL3_STATUS));
}
-
+ spin_unlock(&lp->lock);
dev->interrupt = 0;
return;
}
"ne2.c:v0.90 Oct 14 1998 David Weinehall <tao@acc.umu.se>\n";
#include <linux/module.h>
-#include <linux/config.h>
#include <linux/version.h>
#include <linux/kernel.h>
+Wed Oct 21 21:00 1998 Gerard Roudier (groudier@club-internet.fr)
+ * revision 3.1a
+ - Changes from Eddie Dost for Sparc and Alpha:
+ ioremap/iounmap support for Sparc.
+ pcivtophys changed to bus_dvma_to_phys.
+ - Add the 53c876 description to the chip table. This is only usefull
+ for printing the right name of the controller.
+ - DEL-441 Item 2 work-around for the 53c876 rev <= 5 (0x15).
+ - Add additionnal checking of INQUIRY data:
+ Check INQUIRY data received length is at least 7. Byte 7 of
+ inquiry data contains device features bits and the driver might
+ be confused by garbage. Also check peripheral qualifier.
+ - Cleanup of the SCSI tasks management:
+ Remove the special case for 32 tags. Now the driver only uses the
+ scheme that allows up to 64 tags per LUN.
+ Merge some code from the 896 driver.
+ Use a 1,3,5,...MAXTAGS*2+1 tag numbering. Previous driver could
+ use any tag number from 1 to 253 and some non conformant devices
+ might have problems with large tag numbers.
+ - 'no_sync' changed to 'no_disc' in the README file. This is an old
+ and trivial mistake that seems to demonstrate the README file is
+ not often read. :)
+
Sun Oct 4 14:00 1998 Gerard Roudier (groudier@club-internet.fr)
* revision 3.0i
- Cosmetic changes for sparc (but not for the driver) that needs
21 Rue Carnot
95170 DEUIL LA BARRE - FRANCE
-27 June 1998
+18 October 1998
===============================================================================
1. Introduction
8.4 Set order type for tagged command
8.5 Set debug mode
8.6 Clear profile counters
- 8.7 Set flag (no_sync)
+ 8.7 Set flag (no_disc)
8.8 Set verbose level
9. Configuration parameters
10. Boot setup commands
The "clearprof" command allows you to clear these counters at any time.
-8.7 Set flag (no_sync)
+8.7 Set flag (no_disc)
setflag <target> <flag>
For the moment, only one flag is available:
- no_sync: not allow target to disconnect.
+ no_disc: not allow target to disconnect.
Do not specify any flag in order to reset the flag. For example:
- setflag 4
- will reset no_sync flag for target 4, so will allow it disconnections.
+ will reset no_disc flag for target 4, so will allow it disconnections.
- setflag all
will allow disconnection for all devices on the SCSI bus.
Will enable fast synchronous data transfer negotiation for all targets.
- echo "setflag 3" >/proc/scsi/ncr53c8xx/0
- Will reset flags (no_sync) for target 3, and so will allow it to disconnect
+ Will reset flags (no_disc) for target 3, and so will allow it to disconnect
the SCSI Bus.
- echo "settags 3 8" >/proc/scsi/ncr53c8xx/0
*/
/*
-** October 4 1998, version 3.0i
+** October 21 1998, version 3.1a
**
** Supported SCSI-II features:
** Synchronous negotiation
#endif
/*
-** Define the BSD style u_int32 type
+** Define the BSD style u_int32 and u_int64 type.
+** Are in fact u_int32_t and u_int64_t :-)
*/
typedef u32 u_int32;
+typedef u64 u_int64;
#include "ncr53c8xx.h"
#define NO_TAG (255)
/*
-** For more than 32 TAGS support, we do some address calculation
-** from the SCRIPTS using 2 additionnal SCR_COPY's and a fiew
-** bit handling on 64 bit integers. For these reasons, support for
-** 32 up to 64 TAGS is compiled conditionnaly.
+** Choose appropriate type for tag bitmap.
*/
-
-#if SCSI_NCR_MAX_TAGS <= 32
-struct nlink {
- ncrcmd l_cmd;
- ncrcmd l_paddr;
-};
+#if SCSI_NCR_MAX_TAGS > 32
+typedef u_int64 tagmap_t;
#else
-struct nlink {
- ncrcmd l_paddr;
-};
-typedef u64 u_int64;
+typedef u_int32 tagmap_t;
#endif
-
/*
** Number of targets supported by the driver.
** n permits target numbers 0..n-1.
#define iounmap vfree
#endif
-#ifdef __sparc__
+#if defined (__sparc__)
#include <asm/irq.h>
-#define remap_pci_mem(base, size) ((vm_offset_t) __va(base))
-#define unmap_pci_mem(vaddr, size)
-#define pcivtophys(p) ((p) & pci_dvma_mask)
-#else
-#if defined(__alpha__)
-#define pcivtophys(p) ((p) & 0xfffffffful)
+#elif defined (__alpha__)
+#define bus_dvma_to_mem(p) ((p) & 0xfffffffful)
#else
-#define pcivtophys(p) (p)
+#define bus_dvma_to_mem(p) (p)
#endif
#ifndef NCR_IOMAPPED
iounmap((void *) (vaddr & PAGE_MASK));
}
#endif /* !NCR_IOMAPPED */
-#endif /* __sparc__ */
/*
** Insert a delay in micro-seconds and milli-seconds.
** 64 possible tags.
**----------------------------------------------------------------
*/
- struct nlink jump_ccb_0; /* Default table if no tags */
- struct nlink *jump_ccb; /* Virtual address */
+ u_int32 jump_ccb_0; /* Default table if no tags */
+ u_int32 *jump_ccb; /* Virtual address */
/*----------------------------------------------------------------
** CCB queue management.
*/
u_char ia_tag; /* Allocation index */
u_char if_tag; /* Freeing index */
-#if SCSI_NCR_MAX_TAGS <= 32
- u_char cb_tags[32]; /* Circular tags buffer */
-#else
- u_char cb_tags[64]; /* Circular tags buffer */
-#endif
+ u_char cb_tags[SCSI_NCR_MAX_TAGS]; /* Circular tags buffer */
u_char usetags; /* Command queuing is active */
u_char maxtags; /* Max nr of tags asked by user */
u_char numtags; /* Current number of tags */
** QUEUE FULL control and ORDERED tag control.
**----------------------------------------------------------------
*/
+ /*----------------------------------------------------------------
+ ** QUEUE FULL and ORDERED tag control.
+ **----------------------------------------------------------------
+ */
u_short num_good; /* Nr of GOOD since QUEUE FULL */
-#if SCSI_NCR_MAX_TAGS <= 32
- u_int tags_umap; /* Used tags bitmap */
- u_int tags_smap; /* Tags in use at 'tag_stime' */
-#else
- u_int64 tags_umap; /* Used tags bitmap */
- u_int64 tags_smap; /* Tags in use at 'tag_stime' */
-#endif
+ tagmap_t tags_umap; /* Used tags bitmap */
+ tagmap_t tags_smap; /* Tags in use at 'tag_stime' */
u_long tags_stime; /* Last time we set smap=umap */
ccb_p held_ccb; /* CCB held for QUEUE FULL */
};
ncrcmd loadpos1 [ 4];
#endif
ncrcmd resel_lun [ 6];
-#if SCSI_NCR_MAX_TAGS <= 32
- ncrcmd resel_tag [ 8];
-#else
ncrcmd resel_tag [ 6];
ncrcmd jump_to_nexus [ 4];
ncrcmd nexus_indirect [ 4];
-#endif
-#if SCSI_NCR_MAX_TAGS <= 32
- ncrcmd resel_notag [ 4];
-#else
ncrcmd resel_notag [ 4];
-#endif
ncrcmd data_in [MAX_SCATTERL * 4];
ncrcmd data_in2 [ 4];
ncrcmd data_out [MAX_SCATTERL * 4];
/*
** Read the TAG from the SIDL.
** Still an aggressive optimization. ;-)
+ ** Compute the CCB indirect jump address which
+ ** is (#TAG*2 & 0xfc) due to tag numbering using
+ ** 1,3,5..MAXTAGS*2+1 actual values.
*/
- SCR_FROM_REG (sidl),
- 0,
- /*
- ** JUMP indirectly to the restart point of the CCB.
- */
-#if SCSI_NCR_MAX_TAGS <= 32
- SCR_SFBR_REG (temp, SCR_AND, 0xf8),
+ SCR_REG_SFBR (sidl, SCR_SHL, 0),
0,
- SCR_RETURN,
- 0,
-#else
SCR_SFBR_REG (temp, SCR_AND, 0xfc),
0,
}/*-------------------------< JUMP_TO_NEXUS >-------------------*/,{
RADDR (temp),
SCR_RETURN,
0,
-#endif
}/*-------------------------< RESEL_NOTAG >-------------------*/,{
/*
** No tag expected.
*/
SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
NADDR (msgin),
-#if SCSI_NCR_MAX_TAGS <= 32
- SCR_RETURN,
- 0,
-#else
SCR_JUMP,
PADDR (jump_to_nexus),
-#endif
}/*-------------------------< DATA_IN >--------------------*/,{
/*
** Because the size depends on the
switch (old & RELOC_MASK) {
case RELOC_REGISTER:
new = (old & ~RELOC_MASK)
- + pcivtophys(np->paddr);
+ + bus_dvma_to_mem(np->paddr);
break;
case RELOC_LABEL:
new = (old & ~RELOC_MASK) + np->p_script;
np->scripth = np->scripth0;
np->p_scripth = vtophys(np->scripth);
- np->p_script = (np->paddr2) ? pcivtophys(np->paddr2) : vtophys(np->script0);
+ np->p_script = (np->paddr2) ? bus_dvma_to_mem(np->paddr2) : vtophys(np->script0);
ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script0, sizeof(struct script));
ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth0, sizeof(struct scripth));
}
}
msgptr[msglen++] = order;
-#if SCSI_NCR_MAX_TAGS <= 32
- msgptr[msglen++] = (cp->tag << 3) + 1;
-#else
- msgptr[msglen++] = (cp->tag << 2) + 1;
-#endif
-
+ /*
+ ** Actual tags are numbered 1,3,5,..2*MAXTAGS+1,
+ ** since we may have to deal with devices that have
+ ** problems with #TAG 0 or too great #TAG numbers.
+ */
+ msgptr[msglen++] = (cp->tag << 1) + 1;
}
switch (nego) {
++lp->queuedccbs;
cp = xpt_que_entry(qp, struct ccb, link_ccbq);
xpt_insque_tail(qp, &lp->busy_ccbq);
- lp->jump_ccb[cp->tag == NO_TAG ? 0 : cp->tag].l_paddr =
+ lp->jump_ccb[cp->tag == NO_TAG ? 0 : cp->tag] =
cpu_to_scr(CCB_PHYS (cp, restart));
ncr_put_start_queue(np, cp);
}
#ifdef DEBUG_NCR53C8XX
printk("%s: freeing lp (%lx)\n", ncr_name(np), (u_long) lp);
#endif
- if (lp->maxnxs > 1)
+ if (lp->jump_ccb != &lp->jump_ccb_0)
m_free(lp->jump_ccb, 256);
m_free(lp, sizeof(*lp));
}
/*
** On standard INQUIRY response (EVPD and CmDt
** not set), setup logical unit according to
- ** announced capabilities.
+ ** announced capabilities (we need the 1rst 7 bytes).
*/
- if (cmd->cmnd[0] == 0x12 && !(cmd->cmnd[1] & 0x3)) {
+ if (cmd->cmnd[0] == 0x12 && !(cmd->cmnd[1] & 0x3) &&
+ cmd->cmnd[4] >= 7) {
ncr_setup_lcb (np, cmd->target, cmd->lun,
(char *) cmd->request_buffer);
}
np->scsi_mode = INB (nc_stest4) & SMODE;
}
+ /*
+ ** DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2.
+ ** Disable overlapped arbitration.
+ */
+ if (np->device_id == PCI_DEVICE_ID_NCR_53C875 &&
+ np->revision_id >= 0x10 && np->revision_id <= 0x15)
+ OUTB (nc_ctest0, (1<<5));
+
/*
** Fill in target structure.
** Reinitialize usrsync.
** We just assume lun=0, 1 CCB, no tag.
*/
if (tp->lp[0]) {
- OUTL (nc_dsp, scr_to_cpu(tp->lp[0]->jump_ccb[0].l_paddr));
+ OUTL (nc_dsp, scr_to_cpu(tp->lp[0]->jump_ccb[0]));
return;
}
case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */
if (lp) {
if (tag != NO_TAG) {
++lp->ia_tag;
-#if SCSI_NCR_MAX_TAGS <= 32
- if (lp->ia_tag == 32)
-#else
- if (lp->ia_tag == 64)
-#endif
+ if (lp->ia_tag == SCSI_NCR_MAX_TAGS)
lp->ia_tag = 0;
-#if SCSI_NCR_MAX_TAGS <= 32
- lp->tags_umap |= (1u << tag);
-#else
- lp->tags_umap |= (((u_int64) 1) << tag);
-#endif
+ lp->tags_umap |= (((tagmap_t) 1) << tag);
}
}
if (lp) {
if (cp->tag != NO_TAG) {
lp->cb_tags[lp->if_tag++] = cp->tag;
-#if SCSI_NCR_MAX_TAGS <= 32
- if (lp->if_tag == 32)
-#else
- if (lp->if_tag == 64)
-#endif
+ if (lp->if_tag == SCSI_NCR_MAX_TAGS)
lp->if_tag = 0;
-#if SCSI_NCR_MAX_TAGS <= 32
- lp->tags_umap &= ~(1u << cp->tag);
-#else
- lp->tags_umap &= ~(((u_int64) 1) << cp->tag);
-#endif
+ lp->tags_umap &= ~(((tagmap_t) 1) << cp->tag);
lp->tags_smap &= lp->tags_umap;
- lp->jump_ccb[cp->tag].l_paddr =
+ lp->jump_ccb[cp->tag] =
cpu_to_scr(NCB_SCRIPTH_PHYS(np, bad_i_t_l_q));
} else {
- lp->jump_ccb[0].l_paddr =
+ lp->jump_ccb[0] =
cpu_to_scr(NCB_SCRIPTH_PHYS(np, bad_i_t_l));
}
}
#define ncr_reg_bus_addr(r) \
- (pcivtophys(np->paddr) + offsetof (struct ncr_reg, r))
+ (bus_dvma_to_mem(np->paddr) + offsetof (struct ncr_reg, r))
/*------------------------------------------------------------------------
** Initialize the fixed part of a CCB structure.
}
-/*------------------------------------------------------------------------
-** Reselection JUMP table initialisation.
-**------------------------------------------------------------------------
-** The SCRIPTS processor jumps on reselection to the entry
-** corresponding to the CCB using the tag as offset.
-**------------------------------------------------------------------------
-*/
-static void ncr_setup_jump_ccb(ncb_p np, lcb_p lp)
-{
- int i;
-
- lp->p_jump_ccb = cpu_to_scr(vtophys(lp->jump_ccb));
- for (i = 0 ; i < lp->maxnxs ; i++) {
-#if SCSI_NCR_MAX_TAGS <= 32
- lp->jump_ccb[i].l_cmd = cpu_to_scr(SCR_JUMP);
-#endif
- lp->jump_ccb[i].l_paddr =
- cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l_q));
- lp->cb_tags[i] = i;
- }
-}
-
/*------------------------------------------------------------------------
** Lun control block allocation and initialization.
**------------------------------------------------------------------------
xpt_que_init(&lp->skip_ccbq);
/*
- ** Set max CCBs to 1 and use the default jump table
- ** by default.
+ ** Set max CCBs to 1 and use the default 1 entry
+ ** jump table by default.
*/
- lp->maxnxs = 1;
- lp->jump_ccb = &lp->jump_ccb_0;
- ncr_setup_jump_ccb(np, lp);
+ lp->maxnxs = 1;
+ lp->jump_ccb = &lp->jump_ccb_0;
+ lp->p_jump_ccb = cpu_to_scr(vtophys(lp->jump_ccb));
/*
** Initilialyze the reselect script:
if ((inq_data[2] & 0x7) >= 2 && (inq_data[3] & 0xf) == 2)
inq_byte7 = inq_data[7];
+ /*
+ ** Throw away announced LUN capabilities if we are told
+ ** that there is no real device supported by the logical unit.
+ */
+ if ((inq_data[0] & 0xe0) > 0x20 || (inq_data[0] & 0x1f) == 0x1f)
+ inq_byte7 &= (INQ7_SYNC | INQ7_WIDE16);
+
/*
** If user is wanting SYNC, force this feature.
*/
** If unit supports tagged commands, allocate the
** CCB JUMP table if not yet.
*/
- if ((inq_byte7 & INQ7_QUEUE) && lp->maxnxs < 2) {
- struct nlink *jumps;
- jumps = m_alloc(256, 8);
- if (!jumps)
+ if ((inq_byte7 & INQ7_QUEUE) && lp->jump_ccb == &lp->jump_ccb_0) {
+ int i;
+ lp->jump_ccb = m_alloc(256, 8);
+ if (!lp->jump_ccb) {
+ lp->jump_ccb = &lp->jump_ccb_0;
goto fail;
-#if SCSI_NCR_MAX_TAGS <= 32
- lp->maxnxs = 32;
-#else
- lp->maxnxs = 64;
-#endif
- lp->jump_ccb = jumps;
- ncr_setup_jump_ccb(np, lp);
- lp->tags_stime = jiffies;
+ }
+ lp->p_jump_ccb = cpu_to_scr(vtophys(lp->jump_ccb));
+ for (i = 0 ; i < 64 ; i++)
+ lp->jump_ccb[i] =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l_q));
+ for (i = 0 ; i < SCSI_NCR_MAX_TAGS ; i++)
+ lp->cb_tags[i] = i;
+ lp->maxnxs = SCSI_NCR_MAX_TAGS;
+ lp->tags_stime = jiffies;
}
/*
/*
** Name and revision of the driver
*/
-#define SCSI_NCR_DRIVER_NAME "ncr53c8xx - revision 3.0i"
+#define SCSI_NCR_DRIVER_NAME "ncr53c8xx - revision 3.1a"
/*
** Check supported Linux versions
{PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, \
FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
, \
- {PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, \
+ {PCI_DEVICE_ID_NCR_53C875, 0x0f, "875", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0xff, "876", 6, 16, 5, \
FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
, \
{PCI_DEVICE_ID_NCR_53C875J,0xff, "875J", 6, 16, 5, \
#define SD_MINOR_NUMBER(i) ((i) & 255)
#define MKDEV_SD_PARTITION(i) MKDEV(SD_MAJOR_NUMBER(i), (i) & 255)
#define MKDEV_SD(index) MKDEV_SD_PARTITION((index) << 4)
-#define N_USED_SD_MAJORS ((sd_template.dev_max + SCSI_DISKS_PER_MAJOR - 1) / SCSI_DISKS_PER_MAJOR)
+#define N_USED_SCSI_DISKS (sd_template.dev_max + SCSI_DISKS_PER_MAJOR - 1)
+#define N_USED_SD_MAJORS (N_USED_SCSI_DISKS / SCSI_DISKS_PER_MAJOR)
#define MAX_RETRIES 5
scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
for (i=0; i <= sd_template.dev_max / SCSI_DISKS_PER_MAJOR; i++)
- unregister_blkdev(SD_MAJOR(i),"sd");
+ unregister_blkdev(SD_MAJOR(i),"sd");
sd_registered--;
if( rscsi_disks != NULL )
for (sdgd = gendisk_head; sdgd; sdgd = sdgd->next)
{
- if (sdgd->next >= sd_gendisks && sdgd->next <= LAST_SD_GENDISK)
+ if (sdgd->next >= sd_gendisks && sdgd->next <= LAST_SD_GENDISK.max_nr)
removed++, sdgd->next = sdgd->next->next;
else sdgd = sdgd->next;
}
- if (removed != N_USED_SCSI_DISKS)
+ if (removed != N_USED_SD_MAJORS)
printk("%s %d sd_gendisks in disk chain",
- removed > N_USED_SCSI_DISKS ? "total" : "just", removed);
+ removed > N_USED_SD_MAJORS ? "total" : "just", removed);
}
/*****************************************************************************/
+#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/string.h>
if (!inode)
goto exit;
- error = -EACCES;
+ error = -ELOOP;
if (S_ISLNK(inode->i_mode))
goto exit;
* Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
*/
+#include <linux/config.h>
#include <linux/version.h>
#include <linux/sched.h>
#include <linux/errno.h>
int c0, c, nc;
struct utf8_table *t;
- printk("utf8_mbtowc\n");
nc = 0;
c0 = *s;
l = c0;
const __u8 *ip;
int size;
- printk("\nutf8_mbstowcs: n=%d\n", n);
op = pwcs;
ip = s;
while (*ip && n > 0) {
- printk(" %02x", *ip);
if (*ip & 0x80) {
size = utf8_mbtowc(op, ip, n);
if (size == -1) {
int retval;
int i;
- lock_kernel();
-
wait = NULL;
current->timeout = timeout;
if (timeout) {
- struct poll_table_entry *entry = (struct poll_table_entry *)
- __get_free_page(GFP_KERNEL);
- if (!entry) {
- retval = -ENOMEM;
- goto out_nowait;
- }
+ struct poll_table_entry *entry = (struct poll_table_entry *) __get_free_page(GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
wait_table.nr = 0;
wait_table.entry = entry;
wait = &wait_table;
}
+ lock_kernel();
+
retval = max_select_fd(n, fds);
if (retval < 0)
goto out;
#ifndef __ASM_MIPS_FLOPPY_H
#define __ASM_MIPS_FLOPPY_H
-#include <linux/config.h>
#include <asm/bootinfo.h>
#include <asm/jazz.h>
#include <asm/jazzdma.h>
extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
extern void vmtruncate(struct inode * inode, unsigned long offset);
-extern void handle_mm_fault(struct task_struct *tsk,struct vm_area_struct *vma, unsigned long address, int write_access);
+extern int handle_mm_fault(struct task_struct *tsk,struct vm_area_struct *vma, unsigned long address, int write_access);
extern void make_pages_present(unsigned long addr, unsigned long end);
extern int pgt_cache_water[2];
*/
extern int free_memory_available(void);
extern struct task_struct * kswapd_task;
-
-extern inline void kswapd_notify(unsigned int gfp_mask)
-{
- if (kswapd_task) {
- wake_up_process(kswapd_task);
- if (gfp_mask & __GFP_WAIT) {
- current->policy |= SCHED_YIELD;
- schedule();
- }
- }
-}
-
+#define wakeup_kswapd() do { \
+ if (kswapd_task->state & TASK_INTERRUPTIBLE) \
+ wake_up_process(kswapd_task); \
+} while (0)
+
/* vma is the first one with address < vma->vm_end,
* and even address < vma->vm_start. Have to extend vma. */
static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
static inline void __add_page_to_hash_queue(struct page * page, struct page **p)
{
page_cache_size++;
- set_bit(PG_referenced, &page->flags);
page->age = PAGE_AGE_VALUE;
if((page->next_hash = *p) != NULL)
(*p)->pprev_hash = &page->next_hash;
int number; /* port index - the `n' in `parportn' */
spinlock_t pardevice_lock;
spinlock_t waitlist_lock;
- spinlock_t cad_lock;
+ rwlock_t cad_lock;
};
/* parport_register_port registers a new parallel port at the given address (if
struct page **hash)
{
atomic_inc(&page->count);
- page->flags &= ~((1 << PG_uptodate) | (1 << PG_error));
+ page->flags = (page->flags & ~((1 << PG_uptodate) | (1 << PG_error))) | (1 << PG_referenced);
page->offset = offset;
add_page_to_inode_queue(inode, page);
__add_page_to_hash_queue(page, hash);
*/
page = mem_map + MAP_NR(page_cache);
add_to_page_cache(page, inode, offset, hash);
- set_bit(PG_referenced, &page->flags);
inode->i_op->readpage(file, page);
page_cache = 0;
}
* change only once the write actually happens. This avoids a few races,
* and potentially makes it more efficient.
*/
-static void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
+static int do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
unsigned long address, pte_t *page_table)
{
pte_t pte;
set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
free_page(old_page);
flush_tlb_page(vma, address);
- return;
+ return 1;
}
flush_cache_page(vma, address);
set_pte(page_table, BAD_PAGE);
flush_tlb_page(vma, address);
free_page(old_page);
oom(tsk);
- return;
+ return 0;
}
if (PageSwapCache(page_map))
delete_from_swap_cache(page_map);
flush_cache_page(vma, address);
set_pte(page_table, pte_mkdirty(pte_mkwrite(pte)));
flush_tlb_page(vma, address);
+end_wp_page:
if (new_page)
free_page(new_page);
- return;
+ return 1;
+
bad_wp_page:
printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
send_sig(SIGKILL, tsk, 1);
-end_wp_page:
if (new_page)
free_page(new_page);
- return;
+ return 0;
}
/*
}
-static inline void do_swap_page(struct task_struct * tsk,
+static int do_swap_page(struct task_struct * tsk,
struct vm_area_struct * vma, unsigned long address,
pte_t * page_table, pte_t entry, int write_access)
{
- pte_t page;
-
+ lock_kernel();
if (!vma->vm_ops || !vma->vm_ops->swapin) {
swap_in(tsk, vma, page_table, pte_val(entry), write_access);
flush_page_to_ram(pte_page(*page_table));
- return;
+ } else {
+ pte_t page = vma->vm_ops->swapin(vma, address - vma->vm_start + vma->vm_offset, pte_val(entry));
+ if (pte_val(*page_table) != pte_val(entry)) {
+ free_page(pte_page(page));
+ } else {
+ if (atomic_read(&mem_map[MAP_NR(pte_page(page))].count) > 1 &&
+ !(vma->vm_flags & VM_SHARED))
+ page = pte_wrprotect(page);
+ ++vma->vm_mm->rss;
+ ++tsk->maj_flt;
+ flush_page_to_ram(pte_page(page));
+ set_pte(page_table, page);
+ }
}
- page = vma->vm_ops->swapin(vma, address - vma->vm_start + vma->vm_offset, pte_val(entry));
- if (pte_val(*page_table) != pte_val(entry)) {
- free_page(pte_page(page));
- return;
+ unlock_kernel();
+ return 1;
+}
+
+/*
+ * This only needs the MM semaphore
+ */
+static int do_anonymous_page(struct task_struct * tsk, struct vm_area_struct * vma, pte_t *page_table, int write_access)
+{
+ pte_t entry = pte_wrprotect(mk_pte(ZERO_PAGE, vma->vm_page_prot));
+ if (write_access) {
+ unsigned long page = __get_free_page(GFP_KERNEL);
+ if (!page)
+ return 0;
+ clear_page(page);
+ entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
+ vma->vm_mm->rss++;
+ tsk->min_flt++;
+ flush_page_to_ram(page);
}
- if (atomic_read(&mem_map[MAP_NR(pte_page(page))].count) > 1 &&
- !(vma->vm_flags & VM_SHARED))
- page = pte_wrprotect(page);
- ++vma->vm_mm->rss;
- ++tsk->maj_flt;
- flush_page_to_ram(pte_page(page));
- set_pte(page_table, page);
- return;
+ put_page(page_table, entry);
+ return 1;
}
/*
*
* As this is called only for pages that do not currently exist, we
* do not need to flush old virtual caches or the TLB.
+ *
+ * This is called with the MM semaphore held, but without the kernel
+ * lock.
*/
-static void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
- unsigned long address, int write_access, pte_t *page_table, pte_t entry)
+static int do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
+ unsigned long address, int write_access, pte_t *page_table)
{
unsigned long page;
+ pte_t entry;
- if (!pte_none(entry))
- goto swap_page;
- address &= PAGE_MASK;
if (!vma->vm_ops || !vma->vm_ops->nopage)
- goto anonymous_page;
+ return do_anonymous_page(tsk, vma, page_table, write_access);
+
/*
* The third argument is "no_share", which tells the low-level code
* to copy, not share the page even if sharing is possible. It's
- * essentially an early COW detection
+ * essentially an early COW detection.
+ *
+ * We need to grab the kernel lock for this..
*/
- page = vma->vm_ops->nopage(vma, address,
+ lock_kernel();
+ page = vma->vm_ops->nopage(vma, address & PAGE_MASK,
(vma->vm_flags & VM_SHARED)?0:write_access);
+ unlock_kernel();
if (!page)
- goto sigbus;
+ return 0;
+
++tsk->maj_flt;
++vma->vm_mm->rss;
/*
entry = pte_wrprotect(entry);
put_page(page_table, entry);
/* no need to invalidate: a not-present page shouldn't be cached */
- return;
-
-anonymous_page:
- entry = pte_wrprotect(mk_pte(ZERO_PAGE, vma->vm_page_prot));
- if (write_access) {
- unsigned long page = __get_free_page(GFP_KERNEL);
- if (!page)
- goto sigbus;
- clear_page(page);
- entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
- vma->vm_mm->rss++;
- tsk->min_flt++;
- flush_page_to_ram(page);
- }
- put_page(page_table, entry);
- return;
-
-sigbus:
- force_sig(SIGBUS, current);
- put_page(page_table, BAD_PAGE);
- /* no need to invalidate, wasn't present */
- return;
-
-swap_page:
- do_swap_page(tsk, vma, address, page_table, entry, write_access);
- return;
+ return 1;
}
/*
* with external mmu caches can use to update those (ie the Sparc or
* PowerPC hashed page tables that act as extended TLBs).
*/
-static inline void handle_pte_fault(struct task_struct *tsk,
+static inline int handle_pte_fault(struct task_struct *tsk,
struct vm_area_struct * vma, unsigned long address,
int write_access, pte_t * pte)
{
pte_t entry = *pte;
if (!pte_present(entry)) {
- do_no_page(tsk, vma, address, write_access, pte, entry);
- return;
+ if (pte_none(entry))
+ return do_no_page(tsk, vma, address, write_access, pte);
+ return do_swap_page(tsk, vma, address, pte, entry, write_access);
}
+
entry = pte_mkyoung(entry);
set_pte(pte, entry);
flush_tlb_page(vma, address);
if (!write_access)
- return;
+ return 1;
+
if (pte_write(entry)) {
entry = pte_mkdirty(entry);
set_pte(pte, entry);
flush_tlb_page(vma, address);
- return;
+ return 1;
}
- do_wp_page(tsk, vma, address, pte);
+ return do_wp_page(tsk, vma, address, pte);
}
/*
* By the time we get here, we already hold the mm semaphore
*/
-void handle_mm_fault(struct task_struct *tsk, struct vm_area_struct * vma,
+int handle_mm_fault(struct task_struct *tsk, struct vm_area_struct * vma,
unsigned long address, int write_access)
{
pgd_t *pgd;
pmd_t *pmd;
- pte_t *pte;
pgd = pgd_offset(vma->vm_mm, address);
pmd = pmd_alloc(pgd, address);
- if (!pmd)
- goto no_memory;
- pte = pte_alloc(pmd, address);
- if (!pte)
- goto no_memory;
- lock_kernel();
- handle_pte_fault(tsk, vma, address, write_access, pte);
- unlock_kernel();
- update_mmu_cache(vma, address, *pte);
- return;
-no_memory:
- oom(tsk);
+ if (pmd) {
+ pte_t * pte = pte_alloc(pmd, address);
+ if (pte) {
+ if (handle_pte_fault(tsk, vma, address, write_access, pte)) {
+ update_mmu_cache(vma, address, *pte);
+ return 1;
+ }
+ }
+ }
+ return 0;
}
/*
/*
* If we failed to find anything, we'll return NULL, but we'll
- * wake up kswapd _now_ and even wait for it synchronously if
- * we can.. This way we'll at least make some forward progress
+ * wake up kswapd _now_ and even yield to it if we can..
+ * This way we'll at least make some forward progress
* over time.
*/
- kswapd_notify(gfp_mask);
+ wakeup_kswapd();
+ if (gfp_mask & __GFP_WAIT) {
+ current->policy |= SCHED_YIELD;
+ schedule();
+ }
+
nopage:
return 0;
}
}
if (pte_young(pte)) {
+ /*
+ * Transfer the "accessed" bit from the page
+ * tables to the global page map.
+ */
set_pte(page_table, pte_mkold(pte));
- touch_page(page_map);
+ set_bit(PG_referenced, &page_map->flags);
+
/*
* We should test here to see if we want to recover any
* swap cache page here. We do this if the page seeing
return 0;
}
- age_page(page_map);
- if (page_map->age)
- return 0;
-
if (pte_dirty(pte)) {
if (vma->vm_ops && vma->vm_ops->swapout) {
pid_t pid = tsk->pid;
}
static int swap_out_vma(struct task_struct * tsk, struct vm_area_struct * vma,
- pgd_t *pgdir, unsigned long start, int gfp_mask)
+ unsigned long address, int gfp_mask)
{
+ pgd_t *pgdir;
unsigned long end;
/* Don't swap out areas like shared memory which have their
if (vma->vm_flags & (VM_SHM | VM_LOCKED))
return 0;
+ pgdir = pgd_offset(tsk->mm, address);
+
end = vma->vm_end;
- while (start < end) {
- int result = swap_out_pgd(tsk, vma, pgdir, start, end, gfp_mask);
+ while (address < end) {
+ int result = swap_out_pgd(tsk, vma, pgdir, address, end, gfp_mask);
if (result)
return result;
- start = (start + PGDIR_SIZE) & PGDIR_MASK;
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
pgdir++;
}
return 0;
* Find the proper vm-area
*/
vma = find_vma(p->mm, address);
- if (!vma) {
- p->swap_address = 0;
- return 0;
+ if (vma) {
+ if (address < vma->vm_start)
+ address = vma->vm_start;
+
+ for (;;) {
+ int result = swap_out_vma(p, vma, address, gfp_mask);
+ if (result)
+ return result;
+ vma = vma->vm_next;
+ if (!vma)
+ break;
+ address = vma->vm_start;
+ }
}
- if (address < vma->vm_start)
- address = vma->vm_start;
- for (;;) {
- int result = swap_out_vma(p, vma, pgd_offset(p->mm, address), address, gfp_mask);
- if (result)
- return result;
- vma = vma->vm_next;
- if (!vma)
- break;
- address = vma->vm_start;
- }
+ /* We didn't find anything for the process */
+ p->swap_cnt = 0;
p->swap_address = 0;
return 0;
}
}
pbest->swap_cnt--;
- switch (swap_out_process(pbest, gfp_mask)) {
- case 0:
- /*
- * Clear swap_cnt so we don't look at this task
- * again until we've tried all of the others.
- * (We didn't block, so the task is still here.)
- */
- pbest->swap_cnt = 0;
- break;
- case 1:
+ /*
+ * Nonzero means we cleared out something, but only "1" means
+ * that we actually free'd up a page as a result.
+ */
+ if (swap_out_process(pbest, gfp_mask) == 1)
return 1;
- default:
- break;
- };
}
out:
return 0;
init_swap_timer();
kswapd_task = current;
while (1) {
- int tries;
+ unsigned long start_time;
current->state = TASK_INTERRUPTIBLE;
flush_signals(current);
schedule();
swapstats.wakeups++;
- /*
- * Do the background pageout: be
- * more aggressive if we're really
- * low on free memory.
- *
- * We try page_daemon.tries_base times, divided by
- * an 'urgency factor'. In practice this will mean
- * a value of pager_daemon.tries_base / 8 or 4 = 64
- * or 128 pages at a time.
- * This gives us 64 (or 128) * 4k * 4 (times/sec) =
- * 1 (or 2) MB/s swapping bandwidth in low-priority
- * background paging. This number rises to 8 MB/s
- * when the priority is highest (but then we'll be
- * woken up more often and the rate will be even
- * higher).
- */
- tries = pager_daemon.tries_base;
- tries >>= 4*free_memory_available();
-
+ start_time = jiffies;
do {
do_try_to_free_page(0);
- /*
- * Syncing large chunks is faster than swapping
- * synchronously (less head movement). -- Rik.
- */
- if (atomic_read(&nr_async_pages) >= pager_daemon.swap_cluster)
- run_task_queue(&tq_disk);
if (free_memory_available() > 1)
break;
- } while (--tries > 0);
+ } while (jiffies != start_time);
}
/* As if we could ever get here - maybe we want to make this killable */
kswapd_task = NULL;
#
# 090398 Axel Boldt (boldt@math.ucsb.edu) - allow for empty lines in help
# texts.
+#
+# 102598 Michael Chastain (mec@shout.net) - put temporary files in
+# current directory, not in /tmp.
#
# Make sure we're really running bash.
echo "# Using defaults found in" $DEFAULTS
echo "#"
. $DEFAULTS
- sed -e 's/# \(.*\) is not.*/\1=n/' < $DEFAULTS > /tmp/conf.$$
- . /tmp/conf.$$
- rm /tmp/conf.$$
+ sed -e 's/# \(.*\) is not.*/\1=n/' < $DEFAULTS > .config-is-not.$$
+ . .config-is-not.$$
+ rm .config-is-not.$$
else
echo "#"
echo "# No defaults found"