VERSION = 1
PATCHLEVEL = 1
-SUBLEVEL = 36
+SUBLEVEL = 37
all: Version zImage
.c.s:
$(CC) $(CFLAGS) -S -o $*.s $<
.s.o:
- $(AS) -c -o $*.o $<
+ $(AS) -o $*.o $<
.c.o:
$(CC) $(CFLAGS) -c -o $*.o $<
-IDE Performance Enhancements Version 2.0
+IDE Performance Enhancements Version 2.1
============================ ===========
+What's new in version 2.1:
+ -- Support added for E-IDE BIOSs, for systems with IDE drives
+ that "have more than 16 logical heads" (according the BIOS).
+ -- the HDIO_SETUNMASKINTR and HDIO_SETMULTCOUNT now permit
+ only the superuser to change settings, and no longer permit
+ minor device numbers to be used.
+
This version of hd.c includes support for two optional features:
(1) The disk I/O routines can now run with interrupts unmasked
Drives which support "Multiple Sector Mode" are identified by the
kernel at boot time, and a message is displayed indicating the
-largest possible setting for "MaxMult". I recommend using settings
-of 8, 16, or 32. Many drives also support non-powers of two,
-but many other drives do not -- try strange values at your own risk!
+largest possible setting for "MaxMult" (max sector count for
+"Multiple Sector Mode").
For more detailed boot-time information about your drive, change
the definition of VERBOSE_DRIVE_INFO from 0 to 1 near the top
correctly, and data corruption may occur.. but if you wait long enough
the error recovery logic *should* be able to recover eventually.
+I recommend using settings of 8, 16, or 32. Many drives also support
+non-powers of two, but other drives do not -- try strange values at
+your own risk!
+
To try this out more safely, mount the drive's partitions read-only
before using hdparm (see below) for the first time. If it doesn't
work, email me (mlord@bnr.ca) with the drive name as displayed at
boot time, so I can warn others and possibly add a hook to the code.
-To enable the features, a small program is included: hdparm.c
+To enable the features, a small program is included below: hdparm.c
This one is *different* from previous versions -- be sure to recompile it!
Compile this using cc -O -o /usr/bin/hdparm hdparm.c
Enjoy,
mlord@bnr.ca
+
+**** CUT HERE for hdparm.c ****
+
+/* make using: cc -O -o /usr/bin/hdparm hdparm.c */
+
+#include <linux/hdreg.h>
+#include <sys/ioctl.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+/* extern int hd_ioctl(struct inode * inode, struct file * file,
+ * unsigned int cmd, unsigned long arg);
+ */
+
+
+void main (int argc, char *argv[])
+{
+ int fd, mrc, irc;
+ static long mcount, irqmode;
+
+ if (argc != 4 && argc != 2) {
+ fprintf(stderr,"Usage: %s <device>"
+ " [<MultModeCount:0-64> <unmask:0/1>]\n", *argv);
+ exit (1);
+ }
+
+ fd = open (*++argv, O_RDONLY);
+ if (fd < 0) {
+ printf ("open failed - '%s' - %d\n", *argv, fd);
+ exit(1);
+ }
+ if (argc == 4) {
+ mcount = atoi(*++argv);
+ mrc = ioctl (fd, HDIO_SETMULTCOUNT, &mcount);
+ /* note that the new mcount does not take effect
+ until the next disk I/O operation, so if we were
+ to query it before then, the old value will show.
+ Also, the drive may reject the new value, which will
+ result in multiple mode being turned OFF completely! */
+ irqmode = atoi(*++argv);
+ irc = ioctl (fd, HDIO_SETUNMASKINTR, &irqmode);
+ }
+ else {
+ mrc = ioctl (fd, HDIO_GETMULTCOUNT, &mcount);
+ irc = ioctl (fd, HDIO_GETUNMASKINTR, &irqmode);
+ }
+ printf("MultModeCount=%d, rc=%d\n", mcount, mrc);
+ printf("unmask=%d, rc=%d\n", irqmode, irc);
+}
if ((bh = req->bh) != NULL) {
req->bh = bh->b_reqnext;
bh->b_reqnext = NULL;
- bh->b_uptodate = uptodate;
+ bh->b_uptodate = uptodate;
+ if (!uptodate) bh->b_req = 0; /* So no "Weird" errors */
unlock_buffer(bh);
if ((bh = req->bh) != NULL) {
req->current_nr_sectors = bh->b_size >> 9;
* in the early extended-partition checks and added DM partitions
*
* IDE IRQ-unmask & drive-id & multiple-mode code added by Mark Lord.
+ *
+ * Support for E-IDE BIOS drive geometry translation added by Mark Lord.
+ * -- hd.c no longer chokes on E-IDE drives with "more than 16 heads".
*/
#define DEFAULT_MULT_COUNT 0 /* set to 0 to disable multiple mode at boot */
unsigned int head,sect,cyl,wpcom,lzone,ctl;
};
#ifdef HD_TYPE
-struct hd_i_struct hd_info[] = { HD_TYPE };
+static struct hd_i_struct hd_info[] = { HD_TYPE };
+struct hd_i_struct bios_info[] = { HD_TYPE };
static int NR_HD = ((sizeof (hd_info))/(sizeof (struct hd_i_struct)));
#else
-struct hd_i_struct hd_info[] = { {0,0,0,0,0,0},{0,0,0,0,0,0} };
+static struct hd_i_struct hd_info[] = { {0,0,0,0,0,0},{0,0,0,0,0,0} };
+struct hd_i_struct bios_info[] = { {0,0,0,0,0,0},{0,0,0,0,0,0} };
static int NR_HD = 0;
#endif
if (ints[0] != 3)
return;
- if (hd_info[0].head != 0)
+ if (bios_info[0].head != 0)
hdind=1;
- hd_info[hdind].head = ints[2];
- hd_info[hdind].sect = ints[3];
- hd_info[hdind].cyl = ints[1];
- hd_info[hdind].wpcom = 0;
- hd_info[hdind].lzone = ints[1];
- hd_info[hdind].ctl = (ints[2] > 8 ? 8 : 0);
+ bios_info[hdind].head = hd_info[hdind].head = ints[2];
+ bios_info[hdind].sect = hd_info[hdind].sect = ints[3];
+ bios_info[hdind].cyl = hd_info[hdind].cyl = ints[1];
+ bios_info[hdind].wpcom = hd_info[hdind].wpcom = 0;
+ bios_info[hdind].lzone = hd_info[hdind].lzone = ints[1];
+ bios_info[hdind].ctl = hd_info[hdind].ctl = (ints[2] > 8 ? 8 : 0);
NR_HD = hdind+1;
}
printk ("\n Config={");
for (i=0; i<=15; i++) if (ib[0] & (1<<i)) printk (cfg_str[i]);
printk (" }\n");
- printk (" Default c/h/s=%d/%d/%d, TrkSize=%d, SectSize=%d, ECCbytes=%d\n",
+ printk (" Default CHS=%d/%d/%d, TrkSize=%d, SectSize=%d, ECCbytes=%d\n",
ib[1],ib[3],ib[6],ib[4],ib[5], ib[22]);
dmpstr (" BuffType=",ib[20],BuffType,3);
ib[47] &= 0xFF;
dmpstr (", tPIO=",ib[51]>>8,SlowMedFast,2);
if (ib[49]&0x100 && (ib[53]&1))
dmpstr (", tDMA=",ib[52]>>8,SlowMedFast,2);
- printk ("\n (%s): Current c/h/s=%d/%d/%d, TotSect=%d",
+ printk ("\n (%s): Current CHS=%d/%d/%d, TotSect=%d",
(((ib[53]&1)==0)?"maybe":"valid"),
ib[54],ib[55],ib[56],*(int *)&ib[57]);
if (ib[49]&0x200)
printk (" hd%c: ", dev+'a');
rawstring(NULL, (char *)&ib[27], 40);
max_mult[dev] = ib[47] & 0xff;
- printk (" (%dMB IDE w/%dKB Cache, MaxMult=%d)\n",
- ib[1]*ib[3]*ib[6] / 2048, ib[21]>>1, max_mult[dev]);
+ if (ib[53]&1 && ib[54] && ib[55] && ib[56]) {
+ /*
+ * Extract the physical drive geometry for our use.
+ * Note that we purposely do *not* update the bios_info.
+ * This way, programs that use it (like fdisk) will
+ * still have the same logical view as the BIOS does,
+ * which keeps the partition table from being screwed.
+ */
+ hd_info[dev].cyl = ib[54];
+ hd_info[dev].head = ib[55];
+ hd_info[dev].sect = ib[56];
+ }
+ printk (" (%dMB IDE w/%dKB Cache, MaxMult=%d, CHS=%d/%d/%d)\n",
+ ib[1]*ib[3]*ib[6] / 2048, ib[21]>>1, max_mult[dev],
+ hd_info[dev].cyl, hd_info[dev].head, hd_info[dev].sect);
insw(HD_DATA,(char *)ib,64); /* flush remaining 384 ID bytes */
insw(HD_DATA,(char *)ib,64);
insw(HD_DATA,(char *)ib,64);
static void read_intr(void)
{
unsigned int dev = DEVICE_NR(CURRENT->dev);
- int i, retries = 100000, msect, nsect;
+ int i, retries = 100000, msect = mult_count[dev], nsect;
if (unmask_intr[dev])
sti(); /* permit other IRQs during xfer */
+read_next:
do {
i = (unsigned) inb_p(HD_STATUS);
if (i & BUSY_STAT)
hd_request();
return;
ok_to_read:
- msect = mult_count[dev];
-read_next:
if (msect) {
if ((nsect = CURRENT->current_nr_sectors) > msect)
nsect = msect;
goto repeat;
return;
}
+ if (hd_info[dev].head > 16) {
+ printk ("hd%c: cannot handle device with more than 16 heads - giving up\n", dev+'a');
+ end_request(0);
+ goto repeat;
+ }
if (CURRENT->cmd == READ) {
unsigned int cmd = mult_count[dev] > 1 ? WIN_MULTREAD : WIN_READ;
hd_out(dev,nsect,sec,head,cyl,cmd,&read_intr);
err = verify_area(VERIFY_WRITE, loc, sizeof(*loc));
if (err)
return err;
- put_fs_byte(hd_info[dev].head,
+ put_fs_byte(bios_info[dev].head,
(char *) &loc->heads);
- put_fs_byte(hd_info[dev].sect,
+ put_fs_byte(bios_info[dev].sect,
(char *) &loc->sectors);
- put_fs_word(hd_info[dev].cyl,
+ put_fs_word(bios_info[dev].cyl,
(short *) &loc->cylinders);
put_fs_long(hd[MINOR(inode->i_rdev)].start_sect,
(long *) &loc->start);
return revalidate_hddisk(inode->i_rdev, 1);
case HDIO_SETUNMASKINTR:
+ if (!suser()) return -EACCES;
+ if (MINOR(inode->i_rdev) & 0x3F) return -EINVAL;
if (!arg) return -EINVAL;
err = verify_area(VERIFY_READ, (long *) arg, sizeof(long));
if (err)
case HDIO_SETMULTCOUNT:
{
unsigned long flags;
+ if (!suser()) return -EACCES;
if (!arg) return -EINVAL;
+ if (MINOR(inode->i_rdev) & 0x3F) return -EINVAL;
err = verify_area(VERIFY_READ, (long *) arg, sizeof(long));
if (err)
return err;
hd, /* hd struct */
hd_sizes, /* block sizes */
0, /* number */
- (void *) hd_info, /* internal */
+ (void *) bios_info, /* internal */
NULL /* next */
};
if (!NR_HD) {
for (drive=0 ; drive<2 ; drive++) {
- hd_info[drive].cyl = *(unsigned short *) BIOS;
- hd_info[drive].head = *(2+BIOS);
- hd_info[drive].wpcom = *(unsigned short *) (5+BIOS);
- hd_info[drive].ctl = *(8+BIOS);
- hd_info[drive].lzone = *(unsigned short *) (12+BIOS);
- hd_info[drive].sect = *(14+BIOS);
+ bios_info[drive].cyl = hd_info[drive].cyl = *(unsigned short *) BIOS;
+ bios_info[drive].head = hd_info[drive].head = *(2+BIOS);
+ bios_info[drive].wpcom = hd_info[drive].wpcom = *(unsigned short *) (5+BIOS);
+ bios_info[drive].ctl = hd_info[drive].ctl = *(8+BIOS);
+ bios_info[drive].lzone = hd_info[drive].lzone = *(unsigned short *) (12+BIOS);
+ bios_info[drive].sect = hd_info[drive].sect = *(14+BIOS);
#ifdef does_not_work_for_everybody_with_scsi_but_helps_ibm_vp
if (hd_info[drive].cyl && NR_HD == drive)
NR_HD++;
i = NR_HD;
while (i-- > 0) {
hd[i<<6].nr_sects = 0;
- if (hd_info[i].head > 16) {
- printk("hd.c: ST-506 interface disk with more than 16 heads detected,\n");
- printk(" probably due to non-standard sector translation. Giving up.\n");
- printk(" (disk %d: cyl=%d, sect=%d, head=%d)\n", i,
- hd_info[i].cyl,
- hd_info[i].sect,
- hd_info[i].head);
- if (i+1 == NR_HD)
- NR_HD--;
- continue;
+ if (bios_info[i].head > 16) {
+ /*
+ * The newer E-IDE BIOSs handle drives larger than 1024
+ * cylinders by increasing the number of logical heads
+ * to keep the number of logical cylinders below the
+ * sacred INT13 limit of 1024 (10 bits). If that is
+ * what's happening here, we'll find out and correct
+ * it later when "identifying" the drive.
+ */
+ printk("hd.c: IDE/ST-506 disk with more than 16 heads detected.\n");
+ printk(" (hd%c: cyl=%d, sect=%d, head=%d)\n", i+'a',
+ bios_info[i].cyl,
+ bios_info[i].sect,
+ bios_info[i].head);
}
- hd[i<<6].nr_sects = hd_info[i].head*
- hd_info[i].sect*hd_info[i].cyl;
+ hd[i<<6].nr_sects = bios_info[i].head *
+ bios_info[i].sect * bios_info[i].cyl;
}
if (NR_HD) {
if (irqaction(HD_IRQ,&hd_sigaction)) {
#define DEVICE_BUSY busy[target]
#define USAGE access_count[target]
-#define CAPACITY (hd_info[target].head*hd_info[target].sect*hd_info[target].cyl)
+#define CAPACITY (bios_info[target].head*bios_info[target].sect*bios_info[target].cyl)
/* We assume that the the bios parameters do not change, so the disk capacity
will not change */
#undef MAYBE_REINIT
return -EAGAIN;
vma->vm_inode = inode;
inode->i_count++;
- insert_vm_struct(current, vma);
- merge_segments(current->mm->mmap);
return 0;
}
return -EINVAL;
if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
- insert_vm_struct(current, vma);
- merge_segments(current->mm->mmap);
return 0;
}
/* #define INITIALIZE_DEVICE */
-#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
#include <linux/errno.h>
+#include <linux/timer.h>
#include <asm/io.h>
#include <asm/segment.h>
#include <linux/string.h>
#include <linux/ioport.h>
+#include <linux/delay.h>
+
#include <linux/sched.h>
#include <asm/dma.h>
} \
}
+/* Similar to WAIT, except we use the udelay call to regulate the
+ amount of time we wait. */
+#define WAITd(port, mask, allof, noneof, timeout) \
+ { register WAITbits; \
+ register WAITtimeout = timeout; \
+ while (1) { \
+ WAITbits = inb(port) & (mask); \
+ if ((WAITbits & (allof)) == (allof) && ((WAITbits & (noneof)) == 0)) \
+ break; \
+ udelay(1000); \
+ if (--WAITtimeout == 0) goto fail; \
+ } \
+ }
+
static void aha1542_stat(void)
{
/* int s = inb(STATUS), i = inb(INTRFLAGS);
return 1;
}
+/* Similar to aha1542_in, except that we wait a very short period of time.
+ We use this if we know the board is alive and awake, but we are not sure
+ if the board will respond the the command we are about to send or not */
+static int aha1542_in1(unsigned int base, unchar *cmdp, int len)
+{
+ cli();
+ while (len--)
+ {
+ WAITd(STATUS(base), DF, DF, 0, 100);
+ *cmdp++ = inb(DATA(base));
+ }
+ sti();
+ return 0;
+ fail:
+ sti();
+ return 1;
+}
+
static int makecode(unsigned hosterr, unsigned scsierr)
{
switch (hosterr) {
mbenable_cmd[0]=CMD_EXTBIOS;
aha1542_out(base,mbenable_cmd,1);
- aha1542_in(base,mbenable_result,2);
- WAIT(INTRFLAGS(base),INTRMASK,HACC,0);
+ if(aha1542_in1(base,mbenable_result,2))
+ return retval;
+ WAITd(INTRFLAGS(base),INTRMASK,HACC,0,100);
aha1542_intr_reset(base);
if ((mbenable_result[0] & 0x08) || mbenable_result[1]) {
return 1;
};
- /* 1542C returns 0x44, 1542CF returns 0x45 */
- if (inquiry_result[0] == 0x44 || inquiry_result[0] == 0x45)
- { /* Detect 1542C */
- *transl = aha1542_mbenable(base_io);
- };
+ /* Always call this - boards that do not support extended bios translation
+ will ignore the command, and we will set the proper default */
+
+ *transl = aha1542_mbenable(base_io);
+
return 0;
}
* $Log: generic_NCR5380.c,v $
*/
-#include <linux/config.h>
-#if defined(CONFIG_SCSI_GENERIC_NCR5380)
-/* Standard option */
#define AUTOPROBE_IRQ
#include <asm/system.h>
}
#include "NCR5380.c"
-
-#endif /* defined(CONFIG_SCSI_GENERIC_NCR5380) */
*/
struct Scsi_Host * scsi_hostlist = NULL;
+struct Scsi_Device_Template * scsi_devicelist;
int max_scsi_hosts = 0;
static int next_host = 0;
return retval;
}
+int
+scsi_register_device(struct Scsi_Device_Template * sdpnt)
+{
+ if(sdpnt->next) panic("Device already registered");
+ sdpnt->next = scsi_devicelist;
+ scsi_devicelist = sdpnt;
+ return 0;
+}
+
unsigned int scsi_init()
{
static int called = 0;
}
printk ("scsi : %d hosts.\n", count);
+ /* Now attach the high level drivers */
+#ifdef CONFIG_BLK_DEV_SD
+ scsi_register_device(&sd_template);
+#endif
+#ifdef CONFIG_BLK_DEV_SR
+ scsi_register_device(&sr_template);
+#endif
+#ifdef CONFIG_CHR_DEV_ST
+ scsi_register_device(&st_template);
+#endif
+#ifdef CONFIG_CHR_DEV_SG
+ scsi_register_device(&sg_template);
+#endif
+
max_scsi_hosts = count;
return 0;
}
-
-#ifndef CONFIG_BLK_DEV_SD
-unsigned long sd_init(unsigned long memory_start, unsigned long memory_end){
- return memory_start;
-};
-void sd_init1(){
- return;
-};
-void sd_attach(Scsi_Device * SDp){
-};
-int NR_SD=-1;
-int MAX_SD=0;
-#endif
-
-
-#ifndef CONFIG_BLK_DEV_SR
-unsigned long sr_init(unsigned long memory_start, unsigned long memory_end){
- return memory_start;
-};
-void sr_init1(){
- return;
-};
-void sr_attach(Scsi_Device * SDp){
-};
-int NR_SR=-1;
-int MAX_SR=0;
-#endif
-
-
-#ifndef CONFIG_CHR_DEV_ST
-unsigned long st_init(unsigned long memory_start, unsigned long memory_end){
- return memory_start;
-};
-void st_init1(){
- return;
-};
-void st_attach(Scsi_Device * SDp){
-};
-int NR_ST=-1;
-int MAX_ST=0;
-#endif
-
-#ifndef CONFIG_CHR_DEV_SG
-unsigned long sg_init(unsigned long memory_start, unsigned long memory_end){
- return memory_start;
-};
-void sg_init1(){
- return;
-};
-void sg_attach(Scsi_Device * SDp){
-};
-int NR_SG=-1;
-int MAX_SG=0;
-#endif
};
extern struct Scsi_Host * scsi_hostlist;
+extern struct Scsi_Device_Template * scsi_devicelist;
extern Scsi_Host_Template * scsi_hosts;
#define BLANK_HOST {"", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
#endif
+
+struct Scsi_Device_Template
+{
+ struct Scsi_Device_Template * next;
+ char * name;
+ char * tag;
+ unsigned char scsi_type;
+ unsigned char major;
+ unsigned char nr_dev; /* Number currently attached */
+ unsigned char dev_noticed; /* Number of devices detected. */
+ unsigned char dev_max; /* Current size of arrays */
+ unsigned blk:1; /* 0 if character device */
+ int (*detect)(Scsi_Device *); /* Returns 1 if we can attach this device */
+ void (*init)(void); /* Sizes arrays based upon number of devices detected */
+ void (*finish)(void); /* Perform initialization after attachment */
+ void (*attach)(Scsi_Device *); /* Attach devices to arrays */
+ void (*detach)(Scsi_Device *);
+};
+
+extern struct Scsi_Device_Template sd_template;
+extern struct Scsi_Device_Template st_template;
+extern struct Scsi_Device_Template sr_template;
+extern struct Scsi_Device_Template sg_template;
+
+int scsi_register_device(struct Scsi_Device_Template * sdpnt);
/*
global variables :
- NR_SCSI_DEVICES is the number of SCSI devices we have detected,
scsi_devices an array of these specifing the address for each
(host, id, LUN)
*/
-int NR_SCSI_DEVICES=0;
-
Scsi_Device * scsi_devices = NULL;
static unsigned char generic_sense[6] = {REQUEST_SENSE, 0,0,0, 255, 0};
unsigned char scsi_cmd [12];
unsigned char scsi_result [256];
Scsi_Device * SDpnt, *SDtail;
+ struct Scsi_Device_Template * sdtpnt;
Scsi_Cmnd SCmd;
++in_scan;
SDpnt->lun = lun;
SDpnt->device_wait = NULL;
SDpnt->next = NULL;
+ SDpnt->attached = 0;
/*
* Assume that the device will have handshaking problems, and then
* fix this field later if it turns out it doesn't.
if (type != -1)
{
+ struct Scsi_Device_Template * sdtpnt;
print_inquiry(scsi_result);
- switch(type){
- case TYPE_TAPE:
- printk("Detected scsi tape st%d at scsi%d, id %d, lun %d\n", MAX_ST,
- shpnt->host_no , dev, lun);
- if(NR_ST != -1) ++MAX_ST;
- break;
- case TYPE_ROM:
- printk("Detected scsi CD-ROM sr%d at scsi%d, id %d, lun %d\n", MAX_SR,
- shpnt->host_no , dev, lun);
- if(NR_SR != -1) ++MAX_SR;
- break;
- case TYPE_DISK:
- case TYPE_MOD:
- printk("Detected scsi disk sd%c at scsi%d, id %d, lun %d\n", 'a'+MAX_SD,
- shpnt->host_no , dev, lun);
- if(NR_SD != -1) ++MAX_SD;
- break;
- default:
- break;
- };
-
- if(NR_SG != -1) ++MAX_SG;
-
+
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->detect) SDpnt->attached +=
+ (*sdtpnt->detect)(SDpnt);
+
SDpnt->scsi_level = scsi_result[2] & 0x07;
if (SDpnt->scsi_level >= 2 ||
(SDpnt->scsi_level == 1 &&
scsi_devices = SDpnt;
SDtail = SDpnt;
- ++NR_SCSI_DEVICES;
SDpnt = (Scsi_Device *) scsi_init_malloc(sizeof (Scsi_Device));
/* Some scsi devices cannot be polled for lun != 0
due to firmware bugs */
shpnt->host_queue = NULL; /* No longer needed here */
printk("scsi : detected ");
- if(NR_SD != -1)
- printk("%d SCSI disk%s ", MAX_SD, (MAX_SD != 1) ? "s" : "");
-
- if(NR_ST != -1)
- printk("%d tape%s ", MAX_ST, (MAX_ST != 1) ? "s" : "");
-
- if(NR_SR != -1)
- printk("%d CD-ROM drive%s ", MAX_SR, (MAX_SR != 1) ? "s" : "");
-
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->dev_noticed && sdtpnt->name)
+ printk("%d SCSI %s%s ", sdtpnt->dev_noticed, sdtpnt->name,
+ (sdtpnt->dev_noticed != 1) ? "s" : "");
+
printk("total.\n");
/* Last device block does not exist. Free memory. */
struct Scsi_Host * host = NULL;
Scsi_Device * SDpnt;
struct Scsi_Host * shpnt;
+ struct Scsi_Device_Template * sdtpnt;
Scsi_Cmnd * SCpnt;
#ifdef FOO_ON_YOU
return;
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
scan_scsis(shpnt); /* scan for scsi devices */
- sd_init1();
- st_init1();
- sr_init1();
- sg_init1();
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)();
for (SDpnt=scsi_devices; SDpnt; SDpnt = SDpnt->next) {
int j;
SDpnt->scsi_request_fn = NULL;
- switch (SDpnt->type)
- {
- case TYPE_TAPE :
- st_attach(SDpnt);
- break;
- case TYPE_ROM:
- sr_attach(SDpnt);
- break;
- case TYPE_DISK:
- case TYPE_MOD:
- sd_attach(SDpnt);
- default:
- break;
- };
- sg_attach(SDpnt);
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->attach) (*sdtpnt->attach)(SDpnt);
if(SDpnt->type != -1){
for(j=0;j<SDpnt->host->hostt->cmd_per_lun;j++){
SCpnt = (Scsi_Cmnd *) scsi_init_malloc(sizeof(Scsi_Cmnd));
};
};
- memory_start = scsi_init_memory_start;
- if (NR_SD > 0 || NR_SR > 0 || NR_ST > 0)
+ if (scsi_devicelist)
dma_sectors = 16; /* Base value we use */
for (SDpnt=scsi_devices; SDpnt; SDpnt = SDpnt->next) {
dma_sectors = (dma_sectors + 15) & 0xfff0;
dma_free_sectors = dma_sectors; /* This must be a multiple of 16 */
- memory_start = (memory_start + 3) & 0xfffffffc;
- dma_malloc_freelist = (unsigned short *) memory_start;
- memory_start += dma_sectors >> 3;
+ scsi_init_memory_start = (scsi_init_memory_start + 3) & 0xfffffffc;
+ dma_malloc_freelist = (unsigned short *)
+ scsi_init_malloc(dma_sectors >> 3);
memset(dma_malloc_freelist, 0, dma_sectors >> 3);
- if(memory_start & 1) memory_start++; /* Some host adapters require
- buffers to be word aligned */
- dma_malloc_buffer = (unsigned char *) memory_start;
- memory_start += dma_sectors << 9;
+ /* Some host adapters require buffers to be word aligned */
+ if(scsi_init_memory_start & 1) scsi_init_memory_start++;
- memory_start = sd_init(memory_start, memory_end); /* init scsi disks */
- memory_start = st_init(memory_start, memory_end); /* init scsi tapes */
- memory_start = sr_init(memory_start, memory_end); /* init scsi CDROMs */
- memory_start = sg_init(memory_start, memory_end); /* init scsi generic */
+ dma_malloc_buffer = (unsigned char *)
+ scsi_init_malloc(dma_sectors << 9);
+
+ /* OK, now we finish the initialization by doing spin-up, read
+ capacity, etc, etc */
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->finish && sdtpnt->nr_dev)
+ (*sdtpnt->finish)();
scsi_loadable_module_flag = 1;
- return memory_start;
+ return scsi_init_memory_start;
}
static void print_inquiry(unsigned char *data)
typedef struct scsi_device {
struct scsi_device * next; /* Used for linked list */
unsigned char id, lun;
+ int attached; /* # of high level drivers attached to this */
int access_count; /* Count of open channels/mounts */
struct wait_queue * device_wait; /* Used to wait if device is busy */
struct Scsi_Host * host;
These are the SCSI devices available on the system.
*/
-extern int NR_SCSI_DEVICES;
extern Scsi_Device * scsi_devices;
/*
Initializes all SCSI devices. This scans all scsi busses.
extern int scsi_reset (Scsi_Cmnd *);
extern int max_scsi_hosts;
-extern int MAX_SD, NR_SD, MAX_ST, NR_ST, MAX_SR, NR_SR, NR_SG, MAX_SG;
-extern unsigned long sd_init(unsigned long, unsigned long);
-extern void sd_init1(void);
-extern void sd_attach(Scsi_Device *);
-
-extern unsigned long sr_init(unsigned long, unsigned long);
-extern void sr_init1(void);
-extern void sr_attach(Scsi_Device *);
-
-extern unsigned long st_init(unsigned long, unsigned long);
-extern void st_init1(void);
-extern void st_attach(Scsi_Device *);
-
-extern unsigned long sg_init(unsigned long, unsigned long);
-extern void sg_init1(void);
-extern void sg_attach(Scsi_Device *);
#if defined(MAJOR_NR) && (MAJOR_NR != SCSI_TAPE_MAJOR)
static void end_scsi_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors)
struct hd_struct * sd;
-int NR_SD=0;
-int MAX_SD=0;
Scsi_Disk * rscsi_disks;
static int * sd_sizes;
static int * sd_blocksizes;
static void requeue_sd_request (Scsi_Cmnd * SCpnt);
+static void sd_init(void);
+static void sd_finish(void);
+static void sd_attach(Scsi_Device *);
+static int sd_detect(Scsi_Device *);
+
+struct Scsi_Device_Template sd_template = {NULL, "disk", "sd", TYPE_DISK,
+ SCSI_DISK_MAJOR, 0, 0, 0, 1,
+ sd_detect, sd_init,
+ sd_finish, sd_attach, NULL};
+
static int sd_open(struct inode * inode, struct file * filp)
{
int target;
target = DEVICE_NR(MINOR(inode->i_rdev));
- if(target >= NR_SD || !rscsi_disks[target].device)
+ if(target >= sd_template.dev_max || !rscsi_disks[target].device)
return -ENXIO; /* No such device */
/* Make sure that only one process can do a check_change_disk at one time.
{
int i;
- for (i = 0; i < NR_SD; ++i)
- sd[i << 4].nr_sects = rscsi_disks[i].capacity;
- sd_gendisk.nr_real = NR_SD;
+ for (i = 0; i < sd_template.dev_max; ++i)
+ if(rscsi_disks[i].device)
+ sd[i << 4].nr_sects = rscsi_disks[i].capacity;
+ sd_gendisk.nr_real = sd_template.dev_max;
}
/*
to have the interrupts off when monkeying with the request list, because
otherwise the kernel might try and slip in a request inbetween somewhere. */
- if (!SCpnt && NR_SD > 1){
+ if (!SCpnt && sd_template.nr_dev > 1){
struct request *req1;
req1 = NULL;
cli();
printk("Doing sd request, dev = %d, block = %d\n", dev, block);
#endif
- if (dev >= (NR_SD << 4) || block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
+ if (dev >= (sd_template.dev_max << 4) ||
+ !rscsi_disks[DEVICE_NR(dev)].device ||
+ block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
{
end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
goto repeat;
target = DEVICE_NR(MINOR(full_dev));
- if (target >= NR_SD) {
+ if (target >= sd_template.dev_max ||
+ !rscsi_disks[target].device) {
printk("SCSI disk request error: invalid device.\n");
return 0;
};
rscsi_disks[i].capacity = 0;
} else {
printk ("scsi : deleting disk entry.\n");
- for (j=i; j < NR_SD - 1;)
- rscsi_disks[j] = rscsi_disks[++j];
- --i;
- --NR_SD;
- scsi_free(buffer, 512);
+ rscsi_disks[j].device = NULL;
+ sd_template.nr_dev--;
return i;
};
}
*/
-unsigned long sd_init(unsigned long memory_start, unsigned long memory_end)
+static void sd_init()
{
int i;
+ static int sd_registered = 0;
+
+ if (sd_template.dev_noticed == 0) return;
- if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
- printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
- return memory_start;
+ if(!sd_registered) {
+ if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
+ printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
+ return;
+ }
+ sd_registered++;
}
- if (MAX_SD == 0) return memory_start;
- sd_sizes = (int *) memory_start;
- memory_start += (MAX_SD << 4) * sizeof(int);
- memset(sd_sizes, 0, (MAX_SD << 4) * sizeof(int));
+ /* We do not support attaching loadable devices yet. */
+ if(scsi_loadable_module_flag) return;
+
+ sd_template.dev_max = sd_template.dev_noticed;
+
+ rscsi_disks = (Scsi_Disk *)
+ scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk));
- sd_blocksizes = (int *) memory_start;
- memory_start += (MAX_SD << 4) * sizeof(int);
- for(i=0;i<(MAX_SD << 4);i++) sd_blocksizes[i] = 1024;
+ sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(int));
+ memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
+
+ sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(int));
+ for(i=0;i<(sd_template.dev_max << 4);i++) sd_blocksizes[i] = 1024;
blksize_size[MAJOR_NR] = sd_blocksizes;
- sd = (struct hd_struct *) memory_start;
- memory_start += (MAX_SD << 4) * sizeof(struct hd_struct);
+ sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(struct hd_struct));
+
- sd_gendisk.max_nr = MAX_SD;
+ sd_gendisk.max_nr = sd_template.dev_max;
sd_gendisk.part = sd;
sd_gendisk.sizes = sd_sizes;
sd_gendisk.real_devices = (void *) rscsi_disks;
- for (i = 0; i < NR_SD; ++i)
- i = sd_init_onedisk(i);
+}
+
+static void sd_finish()
+{
+ int i;
+
+ for (i = 0; i < sd_template.dev_max; ++i)
+ if (rscsi_disks[i].device) i = sd_init_onedisk(i);
blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
sd_gendisk.next = gendisk_head;
gendisk_head = &sd_gendisk;
- return memory_start;
+ return;
}
-void sd_init1(){
- rscsi_disks = (Scsi_Disk *) scsi_init_malloc(MAX_SD * sizeof(Scsi_Disk));
-};
+static int sd_detect(Scsi_Device * SDp){
+ /* We do not support attaching loadable devices yet. */
+ if(scsi_loadable_module_flag) return 0;
+ if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
+
+ printk("Detected scsi disk sd%c at scsi%d, id %d, lun %d\n",
+ 'a'+ (sd_template.dev_noticed++),
+ SDp->host->host_no , SDp->id, SDp->lun);
+
+ return 1;
+
+}
+
+static void sd_attach(Scsi_Device * SDp){
+ Scsi_Disk * dpnt;
+ int i;
+
+ /* We do not support attaching loadable devices yet. */
+ if(scsi_loadable_module_flag) return;
+ if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return;
+
+ if(sd_template.nr_dev >= sd_template.dev_max)
+ panic ("scsi_devices corrupt (sd)");
+
+ for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
+ if(!dpnt->device) break;
+
+ if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
-void sd_attach(Scsi_Device * SDp){
- SDp->scsi_request_fn = do_sd_request;
- rscsi_disks[NR_SD++].device = SDp;
- if(NR_SD > MAX_SD) panic ("scsi_devices corrupt (sd)");
+ SDp->scsi_request_fn = do_sd_request;
+ rscsi_disks[i].device = SDp;
+ sd_template.nr_dev++;
};
#define DEVICE_BUSY rscsi_disks[target].device->busy
* transfer rate if handshaking isn't working correctly.
*/
-#include <linux/config.h>
-#if defined(CONFIG_SCSI_SEAGATE) || defined(CONFIG_SCSI_FD_8xx)
#include <asm/io.h>
#include <asm/system.h>
#include <linux/signal.h>
return result;
}
-#endif /* defined(CONFIG_SCSI_SEGATE) */
-
#include "../block/blk.h"
#include "scsi.h"
+#include "hosts.h"
#include "scsi_ioctl.h"
#include "sg.h"
-int NR_SG=0;
-int MAX_SG=0;
+static void sg_init(void);
+static void sg_attach(Scsi_Device *);
+static int sg_detect(Scsi_Device *);
+
+
+struct Scsi_Device_Template sg_template = {NULL, NULL, "sg", 0xff,
+ SCSI_GENERIC_MAJOR, 0, 0, 0, 0,
+ sg_detect, sg_init,
+ NULL, sg_attach, NULL};
#ifdef SG_BIG_BUFF
static char *big_buff;
unsigned int cmd_in, unsigned long arg)
{
int dev = MINOR(inode->i_rdev);
- if ((dev<0) || (dev>=NR_SG))
+ if ((dev<0) || (dev>=sg_template.dev_max))
return -ENXIO;
switch(cmd_in)
{
{
int dev=MINOR(inode->i_rdev);
int flags=filp->f_flags;
- if (dev>=NR_SG)
+ if (dev>=sg_template.dev_max || !scsi_generics[dev].device)
return -ENXIO;
if (O_RDWR!=(flags & O_ACCMODE))
return -EACCES;
};
+static int sg_detect(Scsi_Device * SDp){
+ /* We do not support attaching loadable devices yet. */
+ if(scsi_loadable_module_flag) return 0;
+
+ ++sg_template.dev_noticed;
+ return 1;
+}
+
/* Driver initialization */
-unsigned long sg_init(unsigned long mem_start, unsigned long mem_end)
+static void sg_init()
{
- if (register_chrdev(SCSI_GENERIC_MAJOR,"sg",&sg_fops))
- {
- printk("Unable to get major %d for generic SCSI device\n",
- SCSI_GENERIC_MAJOR);
- return mem_start;
+ static int sg_registered = 0;
+
+ if (sg_template.dev_noticed == 0) return;
+
+ if(!sg_registered) {
+ if (register_chrdev(SCSI_GENERIC_MAJOR,"sg",&sg_fops))
+ {
+ printk("Unable to get major %d for generic SCSI device\n",
+ SCSI_GENERIC_MAJOR);
+ return;
+ }
+ sg_registered++;
}
- if (NR_SG == 0) return mem_start;
+ /* We do not support attaching loadable devices yet. */
+ if(scsi_loadable_module_flag) return;
#ifdef DEBUG
printk("sg: Init generic device.\n");
#endif
#ifdef SG_BIG_BUFF
- big_buff= (char *) mem_start;
- mem_start+=SG_BIG_BUFF;
+ big_buff= (char *) scsi_init_malloc(SG_BIG_BUFF);
#endif
- return mem_start;
+
+ scsi_generics = (struct scsi_generic *)
+ scsi_init_malloc(sg_template.dev_noticed * sizeof(struct scsi_generic));
+
+ sg_template.dev_max = sg_template.dev_noticed;
}
-void sg_init1()
+static void sg_attach(Scsi_Device * SDp)
{
- scsi_generics = (struct scsi_generic *)
- scsi_init_malloc(MAX_SG * sizeof(struct scsi_generic));
- };
+ struct scsi_generic * gpnt;
+ int i;
-void sg_attach(Scsi_Device * SDp)
- {
- if(NR_SG >= MAX_SG)
- panic ("scsi_devices corrupt (sg)");
- scsi_generics[NR_SG].device=SDp;
- scsi_generics[NR_SG].users=0;
- scsi_generics[NR_SG].generic_wait=NULL;
- scsi_generics[NR_SG].read_wait=NULL;
- scsi_generics[NR_SG].write_wait=NULL;
- scsi_generics[NR_SG].exclude=0;
- scsi_generics[NR_SG].pending=0;
- scsi_generics[NR_SG].timeout=SG_DEFAULT_TIMEOUT;
- NR_SG++;
+ /* We do not support attaching loadable devices yet. */
+ if(scsi_loadable_module_flag) return;
+
+ if(sg_template.nr_dev >= sg_template.dev_max)
+ panic ("scsi_devices corrupt (sg)");
+
+ for(gpnt = scsi_generics, i=0; i<sg_template.dev_max; i++, gpnt++)
+ if(!gpnt->device) break;
+
+ if(i >= sg_template.dev_max) panic ("scsi_devices corrupt (sg)");
+
+ scsi_generics[i].device=SDp;
+ scsi_generics[i].users=0;
+ scsi_generics[i].generic_wait=NULL;
+ scsi_generics[i].read_wait=NULL;
+ scsi_generics[i].write_wait=NULL;
+ scsi_generics[i].exclude=0;
+ scsi_generics[i].pending=0;
+ scsi_generics[i].timeout=SG_DEFAULT_TIMEOUT;
+ sg_template.nr_dev++;
};
#include "scsi_ioctl.h" /* For the door lock/unlock commands */
#include "constants.h"
-#define MAX_RETRIES 1
-#define SR_TIMEOUT 500
+#define MAX_RETRIES 3
+#define SR_TIMEOUT 5000
+
+static void sr_init(void);
+static void sr_finish(void);
+static void sr_attach(Scsi_Device *);
+static int sr_detect(Scsi_Device *);
+
+struct Scsi_Device_Template sr_template = {NULL, "cdrom", "sr", TYPE_ROM,
+ SCSI_CDROM_MAJOR, 0, 0, 0, 1,
+ sr_detect, sr_init,
+ sr_finish, sr_attach, NULL};
-int NR_SR=0;
-int MAX_SR=0;
Scsi_CD * scsi_CDs;
static int * sr_sizes;
target = MINOR(full_dev);
- if (target >= NR_SR) {
+ if (target >= sr_template.nr_dev) {
printk("CD-ROM request error: invalid device.\n");
return 0;
};
static int sr_open(struct inode * inode, struct file * filp)
{
- if(MINOR(inode->i_rdev) >= NR_SR ||
+ if(MINOR(inode->i_rdev) >= sr_template.nr_dev ||
!scsi_CDs[MINOR(inode->i_rdev)].device) return -ENXIO; /* No such device */
if (filp->f_mode & 2)
sti();
return;
};
-
+
INIT_SCSI_REQUEST;
if (flag++ == 0)
to have the interrupts off when monkeying with the request list, because
otherwise the kernel might try and slip in a request inbetween somewhere. */
- if (!SCpnt && NR_SR > 1){
+ if (!SCpnt && sr_template.nr_dev > 1){
struct request *req1;
req1 = NULL;
cli();
buffer = NULL;
this_count = 0;
- if (dev >= NR_SR)
+ if (dev >= sr_template.nr_dev)
{
/* printk("CD-ROM request error: invalid device.\n"); */
end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
}
else
{
- if (realcount > 0xff)
- {
- realcount = 0xff;
- this_count = realcount * (scsi_CDs[dev].sector_size >> 9);
- }
-
- cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
- cmd[2] = (unsigned char) ((block >> 8) & 0xff);
- cmd[3] = (unsigned char) block & 0xff;
- cmd[4] = (unsigned char) realcount;
- cmd[5] = 0;
+ if (realcount > 0xff)
+ {
+ realcount = 0xff;
+ this_count = realcount * (scsi_CDs[dev].sector_size >> 9);
+ }
+
+ cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
+ cmd[2] = (unsigned char) ((block >> 8) & 0xff);
+ cmd[3] = (unsigned char) block & 0xff;
+ cmd[4] = (unsigned char) realcount;
+ cmd[5] = 0;
}
#ifdef DEBUG
-{
- int i;
- printk("ReadCD: %d %d %d %d\n",block, realcount, buffer, this_count);
- printk("Use sg: %d\n", SCpnt->use_sg);
- printk("Dumping command: ");
- for(i=0; i<12; i++) printk("%2.2x ", cmd[i]);
- printk("\n");
-};
+ {
+ int i;
+ printk("ReadCD: %d %d %d %d\n",block, realcount, buffer, this_count);
+ printk("Use sg: %d\n", SCpnt->use_sg);
+ printk("Dumping command: ");
+ for(i=0; i<12; i++) printk("%2.2x ", cmd[i]);
+ printk("\n");
+ };
#endif
-
+
SCpnt->this_count = this_count;
scsi_do_cmd (SCpnt, (void *) cmd, buffer,
realcount * scsi_CDs[dev].sector_size,
rw_intr, SR_TIMEOUT, MAX_RETRIES);
}
-void sr_init1(){
- scsi_CDs = (Scsi_CD *) scsi_init_malloc(MAX_SR * sizeof(Scsi_CD));
-};
+static int sr_detect(Scsi_Device * SDp){
+
+ /* We do not support attaching loadable devices yet. */
+ if(scsi_loadable_module_flag) return 0;
+ if(SDp->type != TYPE_ROM && SDp->type != TYPE_WORM) return 0;
-void sr_attach(Scsi_Device * SDp){
+ printk("Detected scsi CD-ROM sr%d at scsi%d, id %d, lun %d\n",
+ ++sr_template.dev_noticed,
+ SDp->host->host_no , SDp->id, SDp->lun);
+
+ return 1;
+}
+
+static void sr_attach(Scsi_Device * SDp){
+ Scsi_CD * cpnt;
+ int i;
+
+ /* We do not support attaching loadable devices yet. */
+
+ if(scsi_loadable_module_flag) return;
+ if(SDp->type != TYPE_ROM && SDp->type != TYPE_WORM) return;
+
+ if (sr_template.nr_dev >= sr_template.dev_max)
+ panic ("scsi_devices corrupt (sr)");
+
+ for(cpnt = scsi_CDs, i=0; i<sr_template.dev_max; i++, cpnt++)
+ if(!cpnt->device) break;
+
+ if(i >= sr_template.dev_max) panic ("scsi_devices corrupt (sr)");
+
SDp->scsi_request_fn = do_sr_request;
- scsi_CDs[NR_SR++].device = SDp;
- if(NR_SR > MAX_SR) panic ("scsi_devices corrupt (sr)");
-};
+ scsi_CDs[i].device = SDp;
+ sr_template.nr_dev++;
+ if(sr_template.nr_dev > sr_template.dev_max)
+ panic ("scsi_devices corrupt (sr)");
+}
+
static void sr_init_done (Scsi_Cmnd * SCpnt)
{
};
}
-unsigned long sr_init(unsigned long memory_start, unsigned long memory_end)
+static void sr_init()
{
int i;
+ static int sr_registered = 0;
+
+ if(sr_template.dev_noticed == 0) return;
- if (register_blkdev(MAJOR_NR,"sr",&sr_fops)) {
- printk("Unable to get major %d for SCSI-CD\n",MAJOR_NR);
- return memory_start;
+ if(!sr_registered) {
+ if (register_blkdev(MAJOR_NR,"sr",&sr_fops)) {
+ printk("Unable to get major %d for SCSI-CD\n",MAJOR_NR);
+ return;
+ }
}
- if(MAX_SR == 0) return memory_start;
- sr_sizes = (int *) memory_start;
- memory_start += MAX_SR * sizeof(int);
- memset(sr_sizes, 0, MAX_SR * sizeof(int));
+ /* We do not support attaching loadable devices yet. */
+ if(scsi_loadable_module_flag) return;
+
+ sr_template.dev_max = sr_template.dev_noticed;
+ scsi_CDs = (Scsi_CD *) scsi_init_malloc(sr_template.dev_max * sizeof(Scsi_CD));
+
+ sr_sizes = (int *) scsi_init_malloc(sr_template.dev_max * sizeof(int));
+ memset(sr_sizes, 0, sr_template.dev_max * sizeof(int));
- sr_blocksizes = (int *) memory_start;
- memory_start += MAX_SR * sizeof(int);
- for(i=0;i<MAX_SR;i++) sr_blocksizes[i] = 2048;
+ sr_blocksizes = (int *) scsi_init_malloc(sr_template.dev_max *
+ sizeof(int));
+ for(i=0;i<sr_template.dev_max;i++) sr_blocksizes[i] = 2048;
blksize_size[MAJOR_NR] = sr_blocksizes;
- for (i = 0; i < NR_SR; ++i)
+}
+
+void sr_finish()
+{
+ int i;
+
+ for (i = 0; i < sr_template.nr_dev; ++i)
{
get_sectorsize(i);
printk("Scd sectorsize = %d bytes\n", scsi_CDs[i].sector_size);
else
read_ahead[MAJOR_NR] = 4; /* 4 sector read-ahead */
- return memory_start;
+ return;
}
int result, target, err;
target = MINOR(dev);
- if (target >= NR_SR) return -ENXIO;
+
+ if (target >= sr_template.nr_dev ||
+ !scsi_CDs[target].device) return -ENXIO;
switch (cmd)
{
#define MAJOR_NR SCSI_TAPE_MAJOR
#include "../block/blk.h"
#include "scsi.h"
+#include "hosts.h"
#include "scsi_ioctl.h"
#include "st.h"
#include "constants.h"
static int st_max_buffers = ST_MAX_BUFFERS;
static Scsi_Tape * scsi_tapes;
-int NR_ST=0;
-int MAX_ST=0;
+
+static void st_init(void);
+static void st_attach(Scsi_Device *);
+static int st_detect(Scsi_Device *);
+
+struct Scsi_Device_Template st_template = {NULL, "tape", "st", TYPE_TAPE,
+ SCSI_TAPE_MAJOR, 0, 0, 0, 0,
+ st_detect, st_init,
+ NULL, st_attach, NULL};
static int st_int_ioctl(struct inode * inode,struct file * file,
unsigned int cmd_in, unsigned long arg);
int st_nbr, remainder;
Scsi_Tape * STp;
- if ((st_nbr = SCpnt->request.dev) < NR_ST && st_nbr >= 0) {
+ if ((st_nbr = SCpnt->request.dev) < st_template.nr_dev && st_nbr >= 0) {
STp = &(scsi_tapes[st_nbr]);
if ((STp->buffer)->writing &&
(SCpnt->sense_buffer[0] & 0x70) == 0x70 &&
Scsi_Tape * STp;
dev = MINOR(inode->i_rdev) & 127;
- if (dev >= NR_ST)
+ if (dev >= st_template.dev_max || !scsi_tapes[dev].device)
return (-ENXIO);
STp = &(scsi_tapes[dev]);
if (STp->in_use) {
NULL /* fsync */
};
-void st_attach(Scsi_Device * SDp){
- scsi_tapes[NR_ST++].device = SDp;
- if(NR_ST > MAX_ST) panic ("scsi_devices corrupt (st)");
-};
+static void st_attach(Scsi_Device * SDp){
+ Scsi_Tape * tpnt;
+ int i;
+
+ /* We do not support attaching loadable devices yet. */
+ if(scsi_loadable_module_flag) return;
+ if(SDp->type != TYPE_TAPE) return;
-void st_init1(){
- scsi_tapes = (Scsi_Tape *) scsi_init_malloc(MAX_ST * sizeof(Scsi_Tape));
+ if(st_template.nr_dev >= st_template.dev_max)
+ panic ("scsi_devices corrupt (st)");
+
+ for(tpnt = scsi_tapes, i=0; i<st_template.dev_max; i++, tpnt++)
+ if(!tpnt->device) break;
+
+ if(i >= st_template.dev_max) panic ("scsi_devices corrupt (st)");
+
+ scsi_tapes[i].device = SDp;
+ st_template.nr_dev++;
};
+static int st_detect(Scsi_Device * SDp){
+
+ /* We do not support attaching loadable devices yet. */
+ if(scsi_loadable_module_flag) return 0;
+ if(SDp->type != TYPE_TAPE) return 0;
+
+ printk("Detected scsi tape st%d at scsi%d, id %d, lun %d\n",
+ ++st_template.dev_noticed,
+ SDp->host->host_no , SDp->id, SDp->lun);
+
+ return 1;
+}
+
/* Driver initialization */
-unsigned long st_init(unsigned long mem_start, unsigned long mem_end)
+static void st_init()
{
int i;
Scsi_Tape * STp;
Scsi_Device * SDp;
+ static int st_registered = 0;
+
+ if (st_template.dev_noticed == 0) return;
- if (register_chrdev(MAJOR_NR,"st",&st_fops)) {
- printk("Unable to get major %d for SCSI tapes\n",MAJOR_NR);
- return mem_start;
+ if(!st_registered) {
+ if (register_chrdev(MAJOR_NR,"st",&st_fops)) {
+ printk("Unable to get major %d for SCSI tapes\n",MAJOR_NR);
+ return;
+ }
+ st_registered++;
}
- if (NR_ST == 0) return mem_start;
+
+ /* We do not support attaching loadable devices yet. */
+ if(scsi_loadable_module_flag) return;
+
+ scsi_tapes = (Scsi_Tape *) scsi_init_malloc(st_template.dev_noticed *
+ sizeof(Scsi_Tape));
+ st_template.dev_max = st_template.dev_noticed;
#ifdef DEBUG
printk("st: Buffer size %d bytes, write threshold %d bytes.\n",
st_buffer_size, st_write_threshold);
#endif
- for (i=0, SDp = scsi_devices; i < NR_ST; ++i) {
+ for (i=0, SDp = scsi_devices; i < st_template.dev_noticed; ++i) {
STp = &(scsi_tapes[i]);
STp->capacity = 0xfffff;
STp->dirty = 0;
STp->write_threshold = st_write_threshold;
STp->drv_block = 0;
STp->moves_after_eof = 1;
- STp->mt_status = (struct mtget *) mem_start;
- mem_start += sizeof(struct mtget);
+ STp->mt_status = (struct mtget *) scsi_init_malloc(sizeof(struct mtget));
/* Initialize status */
memset((void *) scsi_tapes[i].mt_status, 0, sizeof(struct mtget));
for (; SDp; SDp = SDp->next)
}
/* Allocate the buffers */
- st_nbr_buffers = NR_ST;
+ st_nbr_buffers = st_template.dev_noticed;
if (st_nbr_buffers > st_max_buffers)
st_nbr_buffers = st_max_buffers;
- st_buffers = (ST_buffer **)mem_start;
- mem_start += st_nbr_buffers * sizeof(ST_buffer *);
+ st_buffers = (ST_buffer **) scsi_init_malloc(st_nbr_buffers *
+ sizeof(ST_buffer *));
for (i=0; i < st_nbr_buffers; i++) {
- st_buffers[i] = (ST_buffer *) mem_start;
+ st_buffers[i] = (ST_buffer *) scsi_init_malloc(sizeof(ST_buffer) -
+ 1 + st_buffer_size);
#ifdef DEBUG
/* printk("st: Buffer address: %p\n", st_buffers[i]); */
#endif
- mem_start += sizeof(ST_buffer) - 1 + st_buffer_size;
st_buffers[i]->in_use = 0;
st_buffers[i]->writing = 0;
}
-
- return mem_start;
+ return;
}
OBJS= open.o read_write.o inode.o devices.o file_table.o buffer.o super.o \
block_dev.o stat.o exec.o pipe.o namei.o fcntl.o ioctl.o \
- select.o fifo.o locks.o filesystems.o $(BINFMTS)
+ select.o fifo.o locks.o filesystems.o dcache.o $(BINFMTS)
all: fs.o filesystems.a
--- /dev/null
+/*
+ * linux/fs/dcache.c
+ *
+ * (C) Copyright 1994 Linus Torvalds
+ */
+
+/*
+ * The directory cache is a "two-level" cache, each level doing LRU on
+ * its entries. Adding new entries puts them at the end of the LRU
+ * queue on the first-level cache, while the second-level cache is
+ * fed by any cache hits.
+ *
+ * The idea is that new additions (from readdir(), for example) will not
+ * flush the cache of entries that have really been used.
+ *
+ * There is a global hash-table over both caches that hashes the entries
+ * based on the directory inode number and device as well as on a
+ * string-hash computed over the name.
+ */
+
+#include <stddef.h>
+
+#include <linux/fs.h>
+#include <linux/string.h>
+
+/*
+ * Don't bother caching long names.. They just take up space in the cache, and
+ * for a name cache you just want to cache the "normal" names anyway which tend
+ * to be short.
+ */
+#define DCACHE_NAME_LEN 15
+#define DCACHE_SIZE 64
+
+struct hash_list {
+ struct dir_cache_entry * next;
+ struct dir_cache_entry * prev;
+};
+
+/*
+ * The dir_cache_entry must be in this order: we do ugly things with the pointers
+ */
+struct dir_cache_entry {
+ struct hash_list h;
+ unsigned long dev;
+ unsigned long dir;
+ unsigned long version;
+ unsigned long ino;
+ unsigned char name_len;
+ char name[DCACHE_NAME_LEN];
+ struct dir_cache_entry ** lru_head;
+ struct dir_cache_entry * next_lru, * prev_lru;
+};
+
+#define COPYDATA(de, newde) \
+memcpy((void *) &newde->dev, (void *) &de->dev, \
+4*sizeof(unsigned long) + 1 + DCACHE_NAME_LEN)
+
+static struct dir_cache_entry level1_cache[DCACHE_SIZE];
+static struct dir_cache_entry level2_cache[DCACHE_SIZE];
+
+/*
+ * The LRU-lists are doubly-linked circular lists, and do not change in size
+ * so these pointers always have something to point to (after _init)
+ */
+static struct dir_cache_entry * level1_head;
+static struct dir_cache_entry * level2_head;
+
+/*
+ * The hash-queues are also doubly-linked circular lists, but the head is
+ * itself on the doubly-linked list, not just a pointer to the first entry.
+ */
+#define DCACHE_HASH_QUEUES 19
+#define hash_fn(dev,dir,namehash) (((dev) ^ (dir) ^ (namehash)) % DCACHE_HASH_QUEUES)
+
+static struct hash_list hash_table[DCACHE_HASH_QUEUES];
+
+static inline void remove_lru(struct dir_cache_entry * de)
+{
+ de->next_lru->prev_lru = de->prev_lru;
+ de->prev_lru->next_lru = de->next_lru;
+}
+
+static inline void add_lru(struct dir_cache_entry * de, struct dir_cache_entry *head)
+{
+ de->next_lru = head;
+ de->prev_lru = head->prev_lru;
+ de->prev_lru->next_lru = de;
+ head->prev_lru = de;
+}
+
+static inline void update_lru(struct dir_cache_entry * de)
+{
+ if (de == *de->lru_head)
+ *de->lru_head = de->next_lru;
+ else {
+ remove_lru(de);
+ add_lru(de,*de->lru_head);
+ }
+}
+
+/*
+ * Stupid name"hash" algorithm. Write something better if you want to,
+ * but I doubt it matters that much
+ */
+static inline unsigned long namehash(const char * name, int len)
+{
+ return len * *(unsigned char *) name;
+}
+
+/*
+ * Hash queue manipulation. Look out for the casts..
+ */
+static inline void remove_hash(struct dir_cache_entry * de)
+{
+ if (de->h.next) {
+ de->h.next->h.prev = de->h.prev;
+ de->h.prev->h.next = de->h.next;
+ de->h.next = NULL;
+ }
+}
+
+static inline void add_hash(struct dir_cache_entry * de, struct hash_list * hash)
+{
+ de->h.next = hash->next;
+ de->h.prev = (struct dir_cache_entry *) hash;
+ hash->next->h.prev = de;
+ hash->next = de;
+}
+
+/*
+ * Find a directory cache entry given all the necessary info.
+ */
+static struct dir_cache_entry * find_entry(struct inode * dir, const char * name, int len, struct hash_list * hash)
+{
+ struct dir_cache_entry * de = hash->next;
+
+ for (de = hash->next ; de != (struct dir_cache_entry *) hash ; de = de->h.next) {
+ if (de->dev != dir->i_dev)
+ continue;
+ if (de->dir != dir->i_ino)
+ continue;
+ if (de->version != dir->i_version)
+ continue;
+ if (de->name_len != len)
+ continue;
+ if (memcmp(de->name, name, len))
+ continue;
+ return de;
+ }
+ return NULL;
+}
+
+/*
+ * Move a successfully used entry to level2. If already at level2,
+ * move it to the end of the LRU queue..
+ */
+static inline void move_to_level2(struct dir_cache_entry * old_de, struct hash_list * hash)
+{
+ struct dir_cache_entry * de;
+
+ if (old_de->lru_head == &level2_head) {
+ update_lru(old_de);
+ return;
+ }
+ de = level2_head;
+ level2_head = de->next_lru;
+ remove_hash(de);
+ COPYDATA(old_de, de);
+ add_hash(de, hash);
+}
+
+unsigned long dcache_lookup(struct inode * dir, const char * name, int len)
+{
+ struct hash_list * hash;
+ struct dir_cache_entry *de;
+
+ if (len > DCACHE_NAME_LEN)
+ return 0;
+ hash = hash_table + hash_fn(dir->i_dev, dir->i_ino, namehash(name,len));
+ de = find_entry(dir, name, len, hash);
+ if (!de)
+ return 0;
+ move_to_level2(de, hash);
+ return de->ino;
+}
+
+void dcache_add(struct inode * dir, const char * name, int len, unsigned long ino)
+{
+ struct hash_list * hash;
+ struct dir_cache_entry *de;
+
+ if (len > DCACHE_NAME_LEN)
+ return;
+ hash = hash_table + hash_fn(dir->i_dev, dir->i_ino, namehash(name,len));
+ if ((de = find_entry(dir, name, len, hash)) != NULL) {
+ update_lru(de);
+ return;
+ }
+ de = level1_head;
+ level1_head = de->next_lru;
+ remove_hash(de);
+ de->dev = dir->i_dev;
+ de->dir = dir->i_ino;
+ de->version = dir->i_version;
+ de->ino = ino;
+ de->name_len = len;
+ memcpy(de->name, name, len);
+ add_hash(de, hash);
+}
+
+unsigned long name_cache_init(unsigned long mem_start, unsigned long mem_end)
+{
+ int i;
+ struct dir_cache_entry * p;
+
+ /*
+ * Init level1 LRU lists..
+ */
+ p = level1_cache;
+ do {
+ p[1].prev_lru = p;
+ p[0].next_lru = p+1;
+ p[0].lru_head = &level1_head;
+ } while (++p < level1_cache + DCACHE_SIZE-1);
+ level1_cache[0].prev_lru = p;
+ p[0].next_lru = &level1_cache[0];
+ p[0].lru_head = &level1_head;
+ level1_head = level1_cache;
+
+ /*
+ * Init level2 LRU lists..
+ */
+ p = level2_cache;
+ do {
+ p[1].prev_lru = p;
+ p[0].next_lru = p+1;
+ p[0].lru_head = &level2_head;
+ } while (++p < level2_cache + DCACHE_SIZE-1);
+ level2_cache[0].prev_lru = p;
+ p[0].next_lru = &level2_cache[0];
+ p[0].lru_head = &level2_head;
+ level2_head = level2_cache;
+
+ /*
+ * Empty hash queues..
+ */
+ for (i = 0 ; i < DCACHE_HASH_QUEUES ; i++)
+ hash_table[i].next = hash_table[i].next =
+ (struct dir_cache_entry *) &hash_table[i];
+ return mem_start;
+}
.s.o:
$(AS) -o $*.o $<
-OBJS= acl.o balloc.o bitmap.o dcache.o dir.o file.o fsync.o \
- ialloc.o inode.o ioctl.o namei.o super.o symlink.o truncate.o
+OBJS= acl.o balloc.o bitmap.o dir.o file.o fsync.o ialloc.o \
+ inode.o ioctl.o namei.o super.o symlink.o truncate.o
ext2.o: $(OBJS)
$(LD) -r -o ext2.o $(OBJS)
put_fs_long (de->inode, &dirent->d_ino);
put_fs_byte (0, de->name_len + dirent->d_name);
put_fs_word (de->name_len, &dirent->d_reclen);
-#ifndef DONT_USE_DCACHE
- ext2_dcache_add (inode->i_dev, inode->i_ino,
- de->name, de->name_len,
+ dcache_add(inode, de->name, de->name_len,
de->inode);
-#endif
i = de->name_len;
brelse (bh);
if (!IS_RDONLY(inode)) {
inode->u.ext2_i.i_dtime = raw_inode->i_dtime;
inode->i_blksize = inode->i_sb->s_blocksize;
inode->i_blocks = raw_inode->i_blocks;
+ inode->i_version = ++event;
inode->u.ext2_i.i_flags = raw_inode->i_flags;
inode->u.ext2_i.i_faddr = raw_inode->i_faddr;
inode->u.ext2_i.i_frag = raw_inode->i_frag;
iput (dir);
return -ENOENT;
}
-#ifndef DONT_USE_DCACHE
- if (!(ino = ext2_dcache_lookup (dir->i_dev, dir->i_ino, name, len))) {
-#endif
+ if (!(ino = dcache_lookup(dir, name, len))) {
if (!(bh = ext2_find_entry (dir, name, len, &de))) {
iput (dir);
return -ENOENT;
}
ino = de->inode;
-#ifndef DONT_USE_DCACHE
- ext2_dcache_add (dir->i_dev, dir->i_ino, de->name,
- de->name_len, ino);
-#endif
+ dcache_add(dir, de->name, de->name_len, ino);
brelse (bh);
-#ifndef DONT_USE_DCACHE
}
-#endif
if (!(*result = iget (dir->i_sb, ino))) {
iput (dir);
return -EACCES;
return err;
}
de->inode = inode->i_ino;
- dir->i_version++;
-#ifndef DONT_USE_DCACHE
- ext2_dcache_add (dir->i_dev, dir->i_ino, de->name, de->name_len,
- de->inode);
-#endif
+ dir->i_version = ++event;
+ dcache_add(dir, de->name, de->name_len, de->inode);
mark_buffer_dirty(bh, 1);
if (IS_SYNC(dir)) {
ll_rw_block (WRITE, 1, &bh);
return err;
}
de->inode = inode->i_ino;
- dir->i_version++;
-#ifndef DONT_USE_DCACHE
- ext2_dcache_add (dir->i_dev, dir->i_ino, de->name, de->name_len,
- de->inode);
-#endif
+ dir->i_version = ++event;
+ dcache_add(dir, de->name, de->name_len, de->inode);
mark_buffer_dirty(bh, 1);
if (IS_SYNC(dir)) {
ll_rw_block (WRITE, 1, &bh);
return err;
}
de->inode = inode->i_ino;
- dir->i_version++;
-#ifndef DONT_USE_DCACHE
- ext2_dcache_add (dir->i_dev, dir->i_ino, de->name, de->name_len,
- de->inode);
-#endif
+ dir->i_version = ++event;
+ dcache_add(dir, de->name, de->name_len, de->inode);
mark_buffer_dirty(bh, 1);
if (IS_SYNC(dir)) {
ll_rw_block (WRITE, 1, &bh);
inode->i_size = 0;
}
retval = ext2_delete_entry (de, bh);
- dir->i_version++;
+ dir->i_version = ++event;
}
up(&inode->i_sem);
if (retval)
ll_rw_block (WRITE, 1, &bh);
wait_on_buffer (bh);
}
-#ifndef DONT_USE_DCACHE
- ext2_dcache_remove(inode->i_dev, inode->i_ino, ".", 1);
- ext2_dcache_remove(inode->i_dev, inode->i_ino, "..", 2);
-#endif
if (inode->i_nlink != 2)
ext2_warning (inode->i_sb, "ext2_rmdir",
"empty directory has nlink!=2 (%d)",
inode->i_nlink);
-#ifndef DONT_USE_DCACHE
- ext2_dcache_remove (dir->i_dev, dir->i_ino, de->name, de->name_len);
-#endif
+ inode->i_version = ++event;
inode->i_nlink = 0;
inode->i_dirt = 1;
dir->i_nlink--;
retval = ext2_delete_entry (de, bh);
if (retval)
goto end_unlink;
- dir->i_version++;
+ dir->i_version = ++event;
mark_buffer_dirty(bh, 1);
if (IS_SYNC(dir)) {
ll_rw_block (WRITE, 1, &bh);
wait_on_buffer (bh);
}
-#ifndef DONT_USE_DCACHE
- ext2_dcache_remove (dir->i_dev, dir->i_ino, de->name, de->name_len);
-#endif
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
dir->i_dirt = 1;
inode->i_nlink--;
return err;
}
de->inode = inode->i_ino;
- dir->i_version++;
-#ifndef DONT_USE_DCACHE
- ext2_dcache_add (dir->i_dev, dir->i_ino, de->name, de->name_len,
- de->inode);
-#endif
+ dir->i_version = ++event;
+ dcache_add(dir, de->name, de->name_len, de->inode);
mark_buffer_dirty(bh, 1);
if (IS_SYNC(dir)) {
ll_rw_block (WRITE, 1, &bh);
return err;
}
de->inode = oldinode->i_ino;
- dir->i_version++;
-#ifndef DONT_USE_DCACHE
- ext2_dcache_add (dir->i_dev, dir->i_ino, de->name, de->name_len,
- de->inode);
-#endif
+ dir->i_version = ++event;
+ dcache_add(dir, de->name, de->name_len, de->inode);
mark_buffer_dirty(bh, 1);
if (IS_SYNC(dir)) {
ll_rw_block (WRITE, 1, &bh);
* ok, that's it
*/
new_de->inode = old_inode->i_ino;
-#ifndef DONT_USE_DCACHE
- ext2_dcache_remove (old_dir->i_dev, old_dir->i_ino, old_de->name,
- old_de->name_len);
- ext2_dcache_add (new_dir->i_dev, new_dir->i_ino, new_de->name,
- new_de->name_len, new_de->inode);
-#endif
+ dcache_add(new_dir, new_de->name, new_de->name_len, new_de->inode);
retval = ext2_delete_entry (old_de, old_bh);
if (retval == -ENOENT)
goto try_again;
if (retval)
goto end_rename;
- new_dir->i_version++;
- old_dir->i_version++;
+ new_dir->i_version = ++event;
+ old_dir->i_version = ++event;
if (new_inode) {
new_inode->i_nlink--;
new_inode->i_ctime = CURRENT_TIME;
sb->u.ext2_sb.s_es->s_state = sb->u.ext2_sb.s_mount_state;
mark_buffer_dirty(sb->u.ext2_sb.s_sbh, 1);
}
-#ifndef DONT_USE_DCACHE
- ext2_dcache_invalidate (sb->s_dev);
-#endif
sb->s_dev = 0;
for (i = 0; i < EXT2_MAX_GROUP_DESC; i++)
if (sb->u.ext2_sb.s_group_desc[i])
clear_inode(inode);
inode->i_count = 1;
inode->i_nlink = 1;
+ inode->i_version = ++event;
inode->i_sem.count = 1;
nr_free_inodes--;
if (nr_free_inodes < 0) {
offset &= 1023; \
if(offset + cont_size >= 1024) { \
bh = bread(DEV->i_dev, block++, ISOFS_BUFFER_SIZE(DEV)); \
- memcpy(buffer, bh->b_data + offset, 1024 - offset); \
- brelse(bh); \
- offset1 = 1024 - offset; \
- offset = 0; \
+ if(!bh) {printk("Unable to read continuation Rock Ridge record\n"); \
+ kfree(buffer); \
+ buffer = NULL; } else { \
+ memcpy(buffer, bh->b_data + offset, 1024 - offset); \
+ brelse(bh); \
+ offset1 = 1024 - offset; \
+ offset = 0;} \
} \
}; \
- bh = bread(DEV->i_dev, block, ISOFS_BUFFER_SIZE(DEV)); \
- if(bh){ \
- memcpy(buffer + offset1, bh->b_data + offset, cont_size - offset1); \
- brelse(bh); \
- chr = (unsigned char *) buffer; \
- len = cont_size; \
- cont_extent = 0; \
- cont_size = 0; \
- cont_offset = 0; \
- goto LABEL; \
- }; \
+ if(buffer) { \
+ bh = bread(DEV->i_dev, block, ISOFS_BUFFER_SIZE(DEV)); \
+ if(bh){ \
+ memcpy(buffer + offset1, bh->b_data + offset, cont_size - offset1); \
+ brelse(bh); \
+ chr = (unsigned char *) buffer; \
+ len = cont_size; \
+ cont_extent = 0; \
+ cont_size = 0; \
+ cont_offset = 0; \
+ goto LABEL; \
+ }; \
+ } \
printk("Unable to read rock-ridge attributes\n"); \
}}
inode->i_dirt = 1;
}
- unmap_page_range(vma->vm_start, vma->vm_end - vma->vm_start);
vma->vm_inode = inode;
inode->i_count++;
vma->vm_ops = &msdos_file_mmap;
- insert_vm_struct(current, vma);
- merge_segments(current->mm->mmap);
return 0;
}
inode->i_dirt = 1;
}
- unmap_page_range(vma->vm_start, vma->vm_end - vma->vm_start);
vma->vm_inode = inode;
inode->i_count++;
vma->vm_ops = &nfs_file_mmap;
- insert_vm_struct(current, vma);
- merge_segments(current->mm->mmap);
return 0;
}
$(AS) -o $*.o $<
OBJS= ialloc.o balloc.o inode.o file.o dir.o symlink.o namei.o \
- fsync.o truncate.o # mmap.o
+ fsync.o truncate.o mmap.o
sysv.o: $(OBJS)
$(LD) -r -o sysv.o $(OBJS)
- SystemV/386 FS,
- Coherent FS.
-This is version beta 1.
+This is version beta 2.
To install:
* Answer the 'System V and Coherent filesystem support' question with 'y'
#include <linux/fs.h>
#include <linux/sysv_fs.h>
-static int sysv_file_read(struct inode *, struct file *, char *, int);
static int sysv_file_write(struct inode *, struct file *, char *, int);
/*
NULL, /* readdir - bad */
NULL, /* select - default */
NULL, /* ioctl - default */
- NULL, /* mmap */
+ sysv_mmap, /* mmap */
NULL, /* no special open is needed */
NULL, /* release */
sysv_sync_file /* fsync */
char * bh_data;
};
-static int sysv_file_read(struct inode * inode, struct file * filp, char * buf, int count)
+int sysv_file_read(struct inode * inode, struct file * filp, char * buf, int count)
{
struct super_block * sb = inode->i_sb;
int read,left,chars;
--- /dev/null
+/*
+ * linux/fs/sysv/mmap.c
+ *
+ * mm/memory.c, mm/mmap.c
+ * Copyright (C) 1991, 1992, 1993 Linus Torvalds
+ *
+ * nfs/mmap.c
+ * Copyright (C) 1993 Jon Tombs
+ *
+ * fs/msdos/mmap.c
+ * Copyright (C) 1994 Jacques Gelinas
+ *
+ * fs/sysv/mmap.c
+ * Copyright (C) 1994 Bruno Haible
+ *
+ * SystemV/Coherent mmap handling
+ */
+
+#include <asm/segment.h>
+
+#include <linux/fs.h>
+#include <linux/sysv_fs.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/malloc.h>
+
+/*
+ * Fill in the supplied page for mmap
+ */
+static unsigned long sysv_file_mmap_nopage (struct vm_area_struct * area,
+ unsigned long address, unsigned long page, int no_share)
+{
+ int remaining, count, old_fs;
+ struct file filp;
+
+ address &= PAGE_MASK;
+ /* prepare a file pointer */
+ filp.f_pos = address - area->vm_start + area->vm_offset;
+ filp.f_reada = 0;
+ remaining = area->vm_end - address;
+ if (remaining > PAGE_SIZE)
+ remaining = PAGE_SIZE;
+ /* read from the file. page is in kernel space, not user space. */
+ old_fs = get_fs(); set_fs(get_ds());
+ count = sysv_file_read (area->vm_inode, &filp, (char *)page, remaining);
+ set_fs(old_fs);
+ if (count < 0)
+ count = 0; /* do nothing on I/O error ?? */
+ else
+ remaining -= count;
+ if (remaining > 0)
+ memset((char *)page + count, 0, remaining);
+ return page;
+}
+
+static struct vm_operations_struct sysv_file_mmap = {
+ NULL, /* open */
+ NULL, /* close */
+ sysv_file_mmap_nopage, /* nopage */
+ NULL, /* wppage */
+ NULL, /* share */
+ NULL, /* unmap */
+};
+
+int sysv_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
+{
+ if (vma->vm_page_prot & PAGE_RW) /* only PAGE_COW or read-only supported right now */
+ return -EINVAL;
+ if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
+ return -EINVAL;
+ if (!inode->i_sb || !S_ISREG(inode->i_mode))
+ return -EACCES;
+ if (!IS_RDONLY(inode)) {
+ inode->i_atime = CURRENT_TIME;
+ inode->i_dirt = 1;
+ }
+
+ vma->vm_inode = inode;
+ inode->i_count++;
+ vma->vm_ops = &sysv_file_mmap;
+ return 0;
+}
mpnt->vm_start = PAGE_MASK & (unsigned long) p;
mpnt->vm_end = TASK_SIZE;
mpnt->vm_page_prot = PAGE_PRIVATE|PAGE_DIRTY;
+ mpnt->vm_flags = VM_STACK_FLAGS;
mpnt->vm_share = NULL;
+ mpnt->vm_ops = NULL;
mpnt->vm_inode = NULL;
mpnt->vm_offset = 0;
- mpnt->vm_ops = NULL;
+ mpnt->vm_pte = 0;
insert_vm_struct(current, mpnt);
- current->mm->stk_vma = mpnt;
}
sp = (unsigned long *) (0xfffffffc & (unsigned long) p);
if(exec) sp -= DLINFO_ITEMS*2;
*/
#undef EXT2FS_PRE_02B_COMPAT
-/*
- * Define DONT_USE_DCACHE to inhibit the directory cache
- */
-#define DONT_USE_DCACHE
-
/*
* Define EXT2_PREALLOCATE to preallocate data blocks for expanding files
*/
/* bitmap.c */
extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
-#ifndef DONT_USE_DCACHE
-/* dcache.c */
-extern void ext2_dcache_invalidate (unsigned short);
-extern unsigned long ext2_dcache_lookup (unsigned short, unsigned long,
- const char *, int);
-extern void ext2_dcache_add (unsigned short, unsigned long, const char *,
- int, unsigned long);
-extern void ext2_dcache_remove (unsigned short, unsigned long, const char *,
- int);
-#endif
-
/* dir.c */
extern int ext2_check_dir_entry (char *, struct inode *,
struct ext2_dir_entry *, struct buffer_head *,
extern void buffer_init(void);
extern unsigned long inode_init(unsigned long start, unsigned long end);
extern unsigned long file_table_init(unsigned long start, unsigned long end);
+extern unsigned long name_cache_init(unsigned long start, unsigned long end);
#define MAJOR(a) (int)((unsigned short)(a) >> 8)
#define MINOR(a) (int)((unsigned short)(a) & 0xFF)
extern int block_fsync(struct inode *, struct file *);
extern int file_fsync(struct inode *, struct file *);
+extern void dcache_add(struct inode *, const char *, int, unsigned long);
+extern unsigned long dcache_lookup(struct inode *, const char *, int);
+
extern inline struct inode * iget(struct super_block * sb,int nr)
{
return __iget(sb,nr,1);
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
-#define PAGE_SIZE ((unsigned long)1<<PAGE_SHIFT)
+#define PGDIR_SHIFT 22
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#ifdef __KERNEL__
#define BITS_PER_PTR (8*sizeof(unsigned long))
/* to mask away the intra-page address bits */
#define PAGE_MASK (~(PAGE_SIZE-1))
+ /* to mask away the intra-page address bits */
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
/* to align the pointer to a pointer address */
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H
-#define NEW_SWAP
-
/*
* define DEBUG if you want the wait-queues to have some extra
* debugging code. It's not normally used, but might catch some
extern int wp_works_ok;
extern unsigned long intr_count;
+extern unsigned long event;
#define start_bh_atomic() \
__asm__ __volatile__("incl _intr_count")
unsigned long rss;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
int swappable:1;
-#ifdef NEW_SWAP
+ unsigned long swap_address;
unsigned long old_maj_flt; /* old value of maj_flt */
unsigned long dec_flt; /* page fault count of the last time */
unsigned long swap_cnt; /* number of pages to swap on next pass */
- short swap_table; /* current page table */
- short swap_page; /* current page */
-#endif NEW_SWAP
struct vm_area_struct * mmap;
};
0, \
/* ?_flt */ 0, 0, 0, 0, \
0, \
-/* swap */ 0, 0, 0, 0, 0, \
+/* swap */ 0, 0, 0, 0, \
NULL }
struct task_struct {
extern struct buffer_head * sysv_getblk(struct inode *, unsigned int, int, char* *);
extern struct buffer_head * sysv_file_bread(struct inode *, int, int, char* *);
+extern int sysv_file_read(struct inode *, struct file *, char *, int);
extern void sysv_truncate(struct inode *);
extern void sysv_put_super(struct super_block *);
extern void sysv_statfs(struct super_block *, struct statfs *);
extern int sysv_sync_inode(struct inode *);
extern int sysv_sync_file(struct inode *, struct file *);
-#if 0
extern int sysv_mmap(struct inode *, struct file *, unsigned long, size_t, int, unsigned long);
-#endif
extern struct inode_operations sysv_file_inode_operations;
extern struct inode_operations sysv_file_inode_operations_with_bmap;
#endif
memory_start = inode_init(memory_start,memory_end);
memory_start = file_table_init(memory_start,memory_end);
+ memory_start = name_cache_init(memory_start,memory_end);
mem_init(low_memory_start,memory_start,memory_end);
buffer_init();
time_init();
.c.s:
$(CC) $(CFLAGS) -S $<
.s.o:
- $(AS) -c -o $*.o $<
+ $(AS) -o $*.o $<
.c.o:
$(CC) $(CFLAGS) -c $<
X(verify_area),
X(do_mmap),
X(do_munmap),
- X(insert_vm_struct),
X(zeromap_page_range),
X(unmap_page_range),
- X(merge_segments),
/* internal kernel memory management */
X(__get_free_pages),
long time_adjust_step = 0;
int need_resched = 0;
+unsigned long event = 0;
/*
* Tell us the machine setup..
if ((p->state == TASK_UNINTERRUPTIBLE) ||
(p->state == TASK_INTERRUPTIBLE)) {
p->state = TASK_RUNNING;
- if (p->counter > current->counter)
+ if (p->counter > current->counter + 3)
need_resched = 1;
}
}
if ((p = tmp->task) != NULL) {
if (p->state == TASK_INTERRUPTIBLE) {
p->state = TASK_RUNNING;
- if (p->counter > current->counter)
+ if (p->counter > current->counter + 3)
need_resched = 1;
}
}
int old_ruid = current->uid;
if (ruid != (uid_t) -1) {
- if ((current->euid==ruid) ||
- (old_ruid == ruid) ||
+ if ((old_ruid == ruid) ||
+ (current->euid==ruid) ||
suser())
current->uid = ruid;
else
if (euid != (uid_t) -1) {
if ((old_ruid == euid) ||
(current->euid == euid) ||
+ (current->suid == euid) ||
suser())
current->euid = euid;
else {
else
error = anon_map(NULL, NULL, vma);
- if (!error)
- return addr;
-
- kfree(vma);
- if (!current->errno)
- current->errno = -error;
- return -1;
+ if (error) {
+ kfree(vma);
+ if (!current->errno)
+ current->errno = -error;
+ return -1;
+ }
+ insert_vm_struct(current, vma);
+ merge_segments(current->mm->mmap);
+ return addr;
}
asmlinkage int sys_mmap(unsigned long *buffer)
int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
{
extern struct vm_operations_struct file_mmap;
- struct buffer_head * bh;
if (vma->vm_page_prot & PAGE_RW) /* only PAGE_COW or read-only supported right now */
return -EINVAL;
return -EACCES;
if (!inode->i_op || !inode->i_op->bmap)
return -ENOEXEC;
- if (!(bh = bread(inode->i_dev,bmap(inode,0),inode->i_sb->s_blocksize)))
- return -EACCES;
if (!IS_RDONLY(inode)) {
inode->i_atime = CURRENT_TIME;
inode->i_dirt = 1;
}
- brelse(bh);
-
- unmap_page_range(vma->vm_start, vma->vm_end - vma->vm_start);
vma->vm_inode = inode;
inode->i_count++;
vma->vm_ops = &file_mmap;
- insert_vm_struct(current, vma);
- merge_segments(current->mm->mmap);
-
return 0;
}
/*
- * Insert vm structure into process list
- * This makes sure the list is sorted by start address, and
- * some some simple overlap checking.
- * JSGF
+ * Insert vm structure into process list sorted by address.
*/
void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
{
- struct vm_area_struct **nxtpp, *mpnt;
+ struct vm_area_struct **p, *mpnt;
- nxtpp = &t->mm->mmap;
-
- for(mpnt = t->mm->mmap; mpnt != NULL; mpnt = mpnt->vm_next)
- {
+ p = &t->mm->mmap;
+ while ((mpnt = *p) != NULL) {
if (mpnt->vm_start > vmp->vm_start)
break;
- nxtpp = &mpnt->vm_next;
-
- if ((vmp->vm_start >= mpnt->vm_start &&
- vmp->vm_start < mpnt->vm_end) ||
- (vmp->vm_end >= mpnt->vm_start &&
- vmp->vm_end < mpnt->vm_end))
- printk("insert_vm_struct: ins area %lx-%lx in area %lx-%lx\n",
- vmp->vm_start, vmp->vm_end,
- mpnt->vm_start, vmp->vm_end);
+ if (mpnt->vm_end > vmp->vm_start)
+ printk("insert_vm_struct: overlapping memory areas\n");
+ p = &mpnt->vm_next;
}
-
vmp->vm_next = mpnt;
-
- *nxtpp = vmp;
+ *p = vmp;
}
/*
{
if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -ENOMEM;
-
- insert_vm_struct(current, vma);
- merge_segments(current->mm->mmap);
return 0;
}
*
* (C) 1993 Kai Petzke, wpp@marie.physik.tu-berlin.de
*/
-#ifdef NEW_SWAP
+
/*
* These are the miminum and maximum number of pages to swap from one process,
* before proceeding to the next:
*/
#define SWAP_RATIO 128
-static int swap_out(unsigned int priority)
+static int swap_out_process(struct task_struct * p)
{
- static int swap_task;
- int table;
- int page;
- long pg_table;
- int loop;
- int counter = NR_TASKS * 2 >> priority;
- struct task_struct *p;
-
- counter = NR_TASKS * 2 >> priority;
- for(; counter >= 0; counter--, swap_task++) {
- /*
- * Check that swap_task is suitable for swapping. If not, look for
- * the next suitable process.
- */
- loop = 0;
- while(1) {
- if(swap_task >= NR_TASKS) {
- swap_task = 1;
- if(loop)
- /* all processes are unswappable or already swapped out */
- return 0;
- loop = 1;
- }
-
- p = task[swap_task];
- if(p && p->mm->swappable && p->mm->rss)
- break;
-
- swap_task++;
- }
-
- /*
- * Determine the number of pages to swap from this process.
- */
- if(! p->mm->swap_cnt) {
- p->mm->dec_flt = (p->mm->dec_flt * 3) / 4 + p->mm->maj_flt - p->mm->old_maj_flt;
- p->mm->old_maj_flt = p->mm->maj_flt;
-
- if(p->mm->dec_flt >= SWAP_RATIO / SWAP_MIN) {
- p->mm->dec_flt = SWAP_RATIO / SWAP_MIN;
- p->mm->swap_cnt = SWAP_MIN;
- } else if(p->mm->dec_flt <= SWAP_RATIO / SWAP_MAX)
- p->mm->swap_cnt = SWAP_MAX;
- else
- p->mm->swap_cnt = SWAP_RATIO / p->mm->dec_flt;
- }
+ unsigned long address;
+ unsigned long offset;
+ unsigned long *pgdir;
+ unsigned long pg_table;
/*
* Go through process' page directory.
*/
- for(table = p->mm->swap_table; table < 1024; table++) {
- pg_table = ((unsigned long *) p->tss.cr3)[table];
- if(pg_table >= high_memory)
- continue;
- if(mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)
- continue;
- if(!(PAGE_PRESENT & pg_table)) {
- printk("swap_out: bad page-table at pg_dir[%d]: %08lx\n",
- table, pg_table);
- ((unsigned long *) p->tss.cr3)[table] = 0;
- continue;
- }
- pg_table &= 0xfffff000;
-
- /*
- * Go through this page table.
- */
- for(page = p->mm->swap_page; page < 1024; page++) {
- switch(try_to_swap_out(page + (unsigned long *) pg_table)) {
- case 0:
- break;
+ address = p->mm->swap_address;
+ pgdir = (address >> PGDIR_SHIFT) + (unsigned long *) p->tss.cr3;
+ offset = address & ~PGDIR_MASK;
+ address &= PGDIR_MASK;
+ for ( ; address < TASK_SIZE ;
+ pgdir++, address = address + PGDIR_SIZE, offset = 0) {
+ pg_table = *pgdir;
+ if (pg_table >= high_memory)
+ continue;
+ if (mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)
+ continue;
+ if (!(PAGE_PRESENT & pg_table)) {
+ printk("swap_out_process (%s): bad page-table at vm %08lx: %08lx\n",
+ p->comm, address + offset, pg_table);
+ *pgdir = 0;
+ continue;
+ }
+ pg_table &= 0xfffff000;
- case 1:
- p->mm->rss--;
- /* continue with the following page the next time */
- p->mm->swap_table = table;
- p->mm->swap_page = page + 1;
- if((--p->mm->swap_cnt) == 0)
- swap_task++;
- return 1;
+ /*
+ * Go through this page table.
+ */
+ for( ; offset < ~PGDIR_MASK ; offset += PAGE_SIZE) {
+ switch(try_to_swap_out((unsigned long *) (pg_table + (offset >> 10)))) {
+ case 0:
+ break;
- default:
- p->mm->rss--;
- break;
- }
- }
+ case 1:
+ p->mm->rss--;
+ /* continue with the following page the next time */
+ p->mm->swap_address = address + offset + PAGE_SIZE;
+ return 1;
- p->mm->swap_page = 0;
+ default:
+ p->mm->rss--;
+ break;
+ }
+ }
}
-
/*
* Finish work with this process, if we reached the end of the page
* directory. Mark restart from the beginning the next time.
*/
- p->mm->swap_table = 0;
- }
- return 0;
+ p->mm->swap_address = 0;
+ return 0;
}
-#else /* old swapping procedure */
-
-/*
- * Go through the page tables, searching for a user page that
- * we can swap out.
- *
- * We now check that the process is swappable (normally only 'init'
- * is un-swappable), allowing high-priority processes which cannot be
- * swapped out (things like user-level device drivers (Not implemented)).
- */
static int swap_out(unsigned int priority)
{
- static int swap_task = 1;
- static int swap_table = 0;
- static int swap_page = 0;
- int counter = NR_TASKS*8;
- int pg_table;
- struct task_struct * p;
-
- counter >>= priority;
-check_task:
- if (counter-- < 0)
- return 0;
- if (swap_task >= NR_TASKS) {
- swap_task = 1;
- goto check_task;
- }
- p = task[swap_task];
- if (!p || !p->mm->swappable) {
- swap_task++;
- goto check_task;
- }
-check_dir:
- if (swap_table >= PTRS_PER_PAGE) {
- swap_table = 0;
- swap_task++;
- goto check_task;
- }
- pg_table = ((unsigned long *) p->tss.cr3)[swap_table];
- if (pg_table >= high_memory || (mem_map[MAP_NR(pg_table)] & MAP_PAGE_RESERVED)) {
- swap_table++;
- goto check_dir;
- }
- if (!(PAGE_PRESENT & pg_table)) {
- printk("bad page-table at pg_dir[%d]: %08x\n",
- swap_table,pg_table);
- ((unsigned long *) p->tss.cr3)[swap_table] = 0;
- swap_table++;
- goto check_dir;
- }
- pg_table &= PAGE_MASK;
-check_table:
- if (swap_page >= PTRS_PER_PAGE) {
- swap_page = 0;
- swap_table++;
- goto check_dir;
- }
- switch (try_to_swap_out(swap_page + (unsigned long *) pg_table)) {
- case 0: break;
- case 1: p->mm->rss--; return 1;
- default: p->mm->rss--;
- }
- swap_page++;
- goto check_table;
-}
+ static int swap_task;
+ int loop;
+ int counter = NR_TASKS * 2 >> priority;
+ struct task_struct *p;
-#endif
+ counter = NR_TASKS * 2 >> priority;
+ for(; counter >= 0; counter--, swap_task++) {
+ /*
+ * Check that swap_task is suitable for swapping. If not, look for
+ * the next suitable process.
+ */
+ loop = 0;
+ while(1) {
+ if (swap_task >= NR_TASKS) {
+ swap_task = 1;
+ if (loop)
+ /* all processes are unswappable or already swapped out */
+ return 0;
+ loop = 1;
+ }
+
+ p = task[swap_task];
+ if (p && p->mm->swappable && p->mm->rss)
+ break;
+
+ swap_task++;
+ }
+
+ /*
+ * Determine the number of pages to swap from this process.
+ */
+ if (!p->mm->swap_cnt) {
+ p->mm->dec_flt = (p->mm->dec_flt * 3) / 4 + p->mm->maj_flt - p->mm->old_maj_flt;
+ p->mm->old_maj_flt = p->mm->maj_flt;
+
+ if (p->mm->dec_flt >= SWAP_RATIO / SWAP_MIN) {
+ p->mm->dec_flt = SWAP_RATIO / SWAP_MIN;
+ p->mm->swap_cnt = SWAP_MIN;
+ } else if (p->mm->dec_flt <= SWAP_RATIO / SWAP_MAX)
+ p->mm->swap_cnt = SWAP_MAX;
+ else
+ p->mm->swap_cnt = SWAP_RATIO / p->mm->dec_flt;
+ }
+ if (swap_out_process(p)) {
+ if ((--p->mm->swap_cnt) == 0)
+ swap_task++;
+ return 1;
+ }
+ }
+ return 0;
+}
static int try_to_free_page(int priority)
{
int i=6;
while (i--) {
- if (priority != GFP_NOBUFFER && shrink_buffers(i))
+ if (priority != GFP_NOBUFFER && shrink_buffers(i))
return 1;
if (shm_swap(i))
return 1;
add_mem_queue(free_area_list+order, (struct mem_list *) addr);
}
+static inline void check_free_buffers(unsigned long addr)
+{
+ struct buffer_head * bh;
+
+ bh = buffer_pages[MAP_NR(addr)];
+ if (bh) {
+ struct buffer_head *tmp = bh;
+ do {
+ if (tmp->b_list == BUF_SHARED && tmp->b_dev != 0xffff)
+ refile_buffer(tmp);
+ tmp = tmp->b_this_page;
+ } while (tmp != bh);
+ }
+}
+
void free_pages(unsigned long addr, unsigned long order)
{
if (addr < high_memory) {
delete_from_swap_cache(addr);
}
restore_flags(flag);
- if(*map == 1) {
- int j;
- struct buffer_head * bh, *tmp;
-
- bh = buffer_pages[MAP_NR(addr)];
- if(bh)
- for(j = 0, tmp = bh; tmp && (!j || tmp != bh);
- tmp = tmp->b_this_page, j++)
- if(tmp->b_list == BUF_SHARED && tmp->b_dev != 0xffff)
- refile_buffer(tmp);
- }
+ if (*map == 1)
+ check_free_buffers(addr);
}
return;
}
return 0;
}
restore_flags(flags);
- if (priority != GFP_BUFFER && try_to_free_page(priority))
+ if (priority != GFP_BUFFER && try_to_free_page(priority))
goto repeat;
return 0;
}