Horrors with open/reread_partition exclusion are starting to get fixed.
It's not the final variant, but at least we are getting the logics into
one place; switch to final variant will happen once we get per-disk
analog of gendisks. New fields - ->bd_part_sem and ->bd_part_count.
The latter counts the amount of opened partitions. The former protects
said count _and_ is held while we are rereading partition tables.
Helpers - dev_part_lock()/dev_part_unlock() (currently taking kdev_t; that
will change pretty soon). No more ->open() and ->release() for partitions,
all that logics went to generic code. Lock hierachy is currently messy:
->bd_sem for partitions -> ->bd_part_sem -> ->bd_sem for entire disks
Ugly, but that'll go away and to get the final variant of locking right
now would take _really_ big patch - with a lot of steps glued together.
The damn thing is large as it is...
#define NEED_1_RECAL -2
#define NEED_2_RECAL -3
int cylinder;
- unsigned int access_count;
- unsigned int busy;
struct {
char recal;
char report;
static struct hd_struct mfm[MFM_MAXDRIVES << 6];
static int mfm_sizes[MFM_MAXDRIVES << 6];
-static DECLARE_WAIT_QUEUE_HEAD(mfm_wait_open);
/* Stuff from the assembly routines */
extern unsigned int hdc63463_baseaddress; /* Controller base address */
static int mfm_open(struct inode *inode, struct file *file)
{
int dev = DEVICE_NR(minor(inode->i_rdev));
-
if (dev >= mfm_drives)
return -ENODEV;
-
- while (mfm_info[dev].busy)
- sleep_on (&mfm_wait_open);
-
- mfm_info[dev].access_count++;
- return 0;
-}
-
-/*
- * Releasing a block device means we sync() it, so that it can safely
- * be forgotten about...
- */
-static int mfm_release(struct inode *inode, struct file *file)
-{
- mfm_info[DEVICE_NR(minor(inode->i_rdev))].access_count--;
return 0;
}
{
owner: THIS_MODULE,
open: mfm_open,
- release: mfm_release,
ioctl: mfm_ioctl,
};
/*
* This routine is called to flush all partitions and partition tables
* for a changed MFM disk, and then re-read the new partition table.
- * If we are revalidating due to an ioctl, we have USAGE == 1.
*/
static int mfm_reread_partitions(kdev_t dev)
{
- unsigned int start, i, maxp, target = DEVICE_NR(minor(dev));
- unsigned long flags;
-
- local_irq_save(flags);
- if (mfm_info[target].busy || mfm_info[target].access_count > 1) {
- local_irq_restore (flags);
- return -EBUSY;
- }
- mfm_info[target].busy = 1;
- local_irq_restore (flags);
-
- maxp = 1 << mfm_gendisk.minor_shift;
- start = target << mfm_gendisk.minor_shift;
-
- wipe_partitions(mk_kdev(MAJOR_NR, start));
-
+ unsigned int unit = DEVICE_NR(minor(dev));
+ kdev_t device = mk_kdev(MAJOR_NR, unit << mfm_gendisk.minor_shift);
+ int err = dev_lock_part(device);
+ if (err)
+ return err;
+ wipe_partitions(device);
/* Divide by 2, since sectors are 2 times smaller than usual ;-) */
-
- grok_partitions(&mfm_gendisk, target, 1<<6, mfm_info[target].heads *
- mfm_info[target].cylinders * mfm_info[target].sectors / 2);
-
- mfm_info[target].busy = 0;
- wake_up (&mfm_wait_open);
+ grok_partitions(device, mfm_info[unit].heads *
+ mfm_info[unit].cylinders * mfm_info[unit].sectors / 2);
+ dev_unlock_part(device);
return 0;
}
static int acsi_sizes[MAX_DEV<<4] = { 0, };
static struct hd_struct acsi_part[MAX_DEV<<4] = { {0,0}, };
static int access_count[MAX_DEV] = { 0, };
-static char busy[MAX_DEV] = { 0, };
-static DECLARE_WAIT_QUEUE_HEAD(busy_wait);
static int CurrentNReq;
static int CurrentNSect;
if (device >= NDevices)
return -ENXIO;
aip = &acsi_info[device];
- while (busy[device])
- sleep_on(&busy_wait);
if (access_count[device] == 0 && aip->removable) {
#if 0
}
#endif
-#define DEVICE_BUSY busy[device]
-#define USAGE access_count[device]
-#define GENDISK_STRUCT acsi_gendisk
-
/*
* This routine is called to flush all partitions and partition tables
* for a changed scsi disk, and then re-read the new partition table.
static int revalidate_acsidisk( int dev, int maxusage )
{
- int device;
- struct gendisk * gdev;
- int res;
- struct acsi_info_struct *aip;
-
- device = DEVICE_NR(minor(dev));
- aip = &acsi_info[device];
- gdev = &GENDISK_STRUCT;
+ int unit = DEVICE_NR(minor(dev));
+ struct acsi_info_struct *aip = &acsi_info[unit];
+ kdev_t device = mk_kdev(MAJOR_NR, unit<<4);
+ int res = dev_lock_part(device);
- cli();
- if (DEVICE_BUSY || USAGE > maxusage) {
- sti();
- return -EBUSY;
- };
- DEVICE_BUSY = 1;
- sti();
+ if (res < 0)
+ return res;
- res = wipe_partitions(dev);
+ res = wipe_partitions(device);
stdma_lock( NULL, NULL );
stdma_release();
if (!res)
- grok_partitions(dev, aip->size);
+ grok_partitions(device, aip->size);
- DEVICE_BUSY = 0;
- wake_up(&busy_wait);
+ dev_unlock_part(device);
return res;
}
}
/* Borrowed and adapted from sd.c */
+/*
+ * FIXME: we are missing the exclusion with ->open() here - it can happen
+ * just as we are rereading partition tables.
+ */
static int revalidate_logvol(kdev_t dev, int maxusage)
{
int ctlr, target;
hba[ctlr]->drv[logvol].usage_count = 0;
max_p = 1 << gdev->minor_shift;
start = logvol<< gdev->minor_shift;
+ kdev = mk_kdev(MAJOR_NR + ctlr, logvol<< gdev->minor_shift);
- wipe_partitions(MAJOR_NR + ctlr, start);
-
+ wipe_partitions(kdev);
++hba[ctlr]->num_luns;
gdev->nr_real = hba[ctlr]->highest_lun + 1;
/* setup partitions per disk */
- kdev = mk_kdev(MAJOR_NR + ctlr, logvol<< gdev->minor_shift);
grok_partitions(kdev, hba[ctlr]->drv[logvol].nr_blocks);
kfree(ld_buff);
}
/* Borrowed and adapted from sd.c */
+/*
+ * FIXME: exclusion with ->open()
+ */
static int revalidate_logvol(kdev_t dev, int maxusage)
{
int ctlr, target;
}
static int pd_open (struct inode *inode, struct file *file)
+{
+ int unit = DEVICE_NR(inode->i_rdev);
-{ int unit = DEVICE_NR(inode->i_rdev);
-
- if ((unit >= PD_UNITS) || (!PD.present)) return -ENODEV;
-
- wait_event (pd_wait_open, pd_valid);
+ if ((unit >= PD_UNITS) || (!PD.present))
+ return -ENODEV;
PD.access++;
}
static int pd_release (struct inode *inode, struct file *file)
-
-{ kdev_t devp;
- int unit;
-
- devp = inode->i_rdev;
- unit = DEVICE_NR(devp);
+{
+ int unit = DEVICE_NR(inode->i_rdev);
if ((unit >= PD_UNITS) || (PD.access <= 0))
return -EINVAL;
static int pd_revalidate(kdev_t dev)
{
- int unit, res;
- long flags;
+ int unit = DEVICE_NR(dev);
+ kdev_t device = mk_kdev(MAJOR_NR, unit << PD_BITS);
+ int res;
- unit = DEVICE_NR(dev);
if ((unit >= PD_UNITS) || !PD.present)
return -ENODEV;
- save_flags(flags);
- cli();
- if (PD.access > 1) {
- restore_flags(flags);
- return -EBUSY;
- }
- pd_valid = 0;
- restore_flags(flags);
-
- res = wipe_partitions(dev);
+ res = dev_part_lock(device);
+ if (res < 0)
+ return res;
+ res = wipe_partitions(device);
if (res == 0 && pd_identify(unit))
- grok_partitions(dev, PD.capacity);
+ grok_partitions(device, PD.capacity);
- pd_valid = 1;
- wake_up(&pd_wait_open);
+ dev_unlock_part(device);
return res;
}
static int ps2esdi_open(struct inode *inode, struct file *file);
-static int ps2esdi_release(struct inode *inode, struct file *file);
-
static int ps2esdi_ioctl(struct inode *inode, struct file *file,
u_int cmd, u_long arg);
static u_int dma_arb_level; /* DMA arbitration level */
static DECLARE_WAIT_QUEUE_HEAD(ps2esdi_int);
-static DECLARE_WAIT_QUEUE_HEAD(ps2esdi_wait_open);
static int no_int_yet;
-static int access_count[MAX_HD];
-static char ps2esdi_valid[MAX_HD];
static int ps2esdi_sizes[MAX_HD << 6];
static int ps2esdi_drives;
static struct hd_struct ps2esdi[MAX_HD << 6];
{
owner: THIS_MODULE,
open: ps2esdi_open,
- release: ps2esdi_release,
ioctl: ps2esdi_ioctl,
};
}
blk_queue_max_sectors(BLK_DEFAULT_QUEUE(MAJOR_NR), 128);
- for (i = 0; i < ps2esdi_drives; i++) {
+ for (i = 0; i < ps2esdi_drives; i++)
register_disk(&ps2esdi_gendisk,mk_kdev(MAJOR_NR,i<<6),1<<6,
&ps2esdi_fops,
ps2esdi_info[i].head * ps2esdi_info[i].sect *
ps2esdi_info[i].cyl);
- ps2esdi_valid[i] = 1;
- }
return 0;
err_out3:
static int ps2esdi_open(struct inode *inode, struct file *file)
{
int dev = DEVICE_NR(inode->i_rdev);
-
- if (dev < ps2esdi_drives) {
- while (!ps2esdi_valid[dev])
- sleep_on(&ps2esdi_wait_open);
-
- access_count[dev]++;
-
- return (0);
- } else
- return (-ENODEV);
-}
-
-
-
-static int ps2esdi_release(struct inode *inode, struct file *file)
-{
- int dev = DEVICE_NR(inode->i_rdev);
-
- if (dev < ps2esdi_drives) {
- access_count[dev]--;
- }
+ if (dev >= ps2esdi_drives)
+ return -ENODEV;
return 0;
}
-
-
static int ps2esdi_ioctl(struct inode *inode,
struct file *file, u_int cmd, u_long arg)
{
static int ps2esdi_reread_partitions(kdev_t dev)
{
int target = DEVICE_NR(dev);
- int res;
+ kdev_t device = mk_kdev(MAJOR_NR, target << 6);
+ int res = dev_lock_part(device);
- cli();
- ps2esdi_valid[target] = (access_count[target] != 1);
- sti();
- if (ps2esdi_valid[target])
- return (-EBUSY);
+ if (res < 0)
+ return res;
- res = wipe_partitions(dev);
+ res = wipe_partitions(device);
if (res == 0)
- grok_partitions(dev, ps2esdi_info[target].head
+ grok_partitions(device, ps2esdi_info[target].head
* ps2esdi_info[target].cyl
* ps2esdi_info[target].sect);
- ps2esdi_valid[target] = 1;
- wake_up(&ps2esdi_wait_open);
-
- return (res);
+ dev_unlock_part(device);
+ return res;
}
static void ps2esdi_reset_timer(unsigned long unused)
* Note no locks taken out here. In a worst case scenario, we could drop
* a chunk of system memory. But that should never happen, since validation
* happens at open or mount time, when locks are held.
+ *
+ * That's crap, since doing that while some partitions are opened
+ * or mounted will give you really nasty results.
*/
static int mm_revalidate(kdev_t i_rdev)
{
- int i;
-
int card_number = DEVICE_NR(i_rdev);
- /* first partition, # of partitions */
- int part1 = (card_number << MM_SHIFT) + 1;
- int npart = (1 << MM_SHIFT) -1;
-
- /* first clear old partition information */
- for (i=0; i<npart ;i++) {
- mm_gendisk.sizes[part1+i]=0;
- mm_gendisk.part[part1+i].start_sect = 0;
- mm_gendisk.part[part1+i].nr_sects = 0;
- }
-
- mm_gendisk.part[card_number << MM_SHIFT].nr_sects =
- cards[card_number].mm_size << 1;
-
-
- /* then fill new info */
+ kdev_t device = mk_mdev(MAJOR_NR, card_number << MM_SHIFT);
+ int res = dev_lock_part(device);
+ if (res < 0)
+ return res;
+ wipe_partitions(device);
printk(KERN_INFO "mm partition check: (%d)\n", card_number);
- grok_partitions(mk_kdev(major_nr,part1-1),
- mm_gendisk.sizes[card_number<<MM_SHIFT]);
+ grok_partitions(device, cards[card_number].mm_size << 1);
+ dev_unlock_part(device);
return 0;
}
/*
}
/*
-----------------------------------------------------------------------------------
--- mm_do_release
------------------------------------------------------------------------------------
-*/
-static int mm_do_release(struct inode *i, struct file *filp)
-{
- return 0;
-}
-/*
------------------------------------------------------------------------------------
-- mm_fops
-----------------------------------------------------------------------------------
*/
static struct block_device_operations mm_fops = {
owner: THIS_MODULE,
open: mm_open,
- release: mm_do_release,
ioctl: mm_ioctl,
revalidate: mm_revalidate,
check_media_change: mm_check_change,
};
static struct hd_struct xd_struct[XD_MAXDRIVES << 6];
-static int xd_sizes[XD_MAXDRIVES << 6], xd_access[XD_MAXDRIVES];
+static int xd_sizes[XD_MAXDRIVES << 6];
static spinlock_t xd_lock = SPIN_LOCK_UNLOCKED;
static struct block_device_operations xd_fops = {
owner: THIS_MODULE,
open: xd_open,
- release: xd_release,
ioctl: xd_ioctl,
};
static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int);
-static DECLARE_WAIT_QUEUE_HEAD(xd_wait_open);
-static u_char xd_valid[XD_MAXDRIVES] = { 0,0 };
static u_char xd_drives, xd_irq = 5, xd_dma = 3, xd_maxsectors;
static u_char xd_override __initdata = 0, xd_type __initdata = 0;
static u_short xd_iobase = 0x320;
/* xd_maxsectors depends on controller - so set after detection */
blk_queue_max_sectors(BLK_DEFAULT_QUEUE(MAJOR_NR), xd_maxsectors);
- for (i = 0; i < xd_drives; i++) {
- xd_valid[i] = 1;
+ for (i = 0; i < xd_drives; i++)
register_disk(&xd_gendisk, mk_kdev(MAJOR_NR,i<<6), 1<<6,
&xd_fops,
xd_info[i].heads * xd_info[i].cylinders *
xd_info[i].sectors);
- }
-
xd_gendisk.nr_real = xd_drives;
}
static int xd_open (struct inode *inode,struct file *file)
{
int dev = DEVICE_NR(inode->i_rdev);
-
- if (dev < xd_drives) {
- while (!xd_valid[dev])
- sleep_on(&xd_wait_open);
-
- xd_access[dev]++;
-
- return (0);
- }
-
- return -ENXIO;
+ if (dev >= xd_drives)
+ return -ENXIO;
+ return 0;
}
/* do_xd_request: handle an incoming request */
}
}
-/* xd_release: release the device */
-static int xd_release (struct inode *inode, struct file *file)
-{
- int target = DEVICE_NR(inode->i_rdev);
- if (target < xd_drives)
- xd_access[target]--;
- return 0;
-}
-
/* xd_reread_partitions: rereads the partition table from a drive */
static int xd_reread_partitions(kdev_t dev)
{
- int target;
- int res;
+ int target = DEVICE_NR(dev);
+ kdev_t device = mk_kdev(MAJOR_NR, target << 6);
+ int res = dev_lock_part(device);
- target = DEVICE_NR(dev);
-
- cli();
- xd_valid[target] = (xd_access[target] != 1);
- sti();
- if (xd_valid[target])
- return -EBUSY;
+ if (res < 0)
+ return 0;
- res = wipe_partitions(dev);
+ res = wipe_partitions(device);
if (!res)
- grok_partitions(dev, xd_info[target].heads
+ grok_partitions(device, xd_info[target].heads
* xd_info[target].cylinders
* xd_info[target].sectors);
- xd_valid[target] = 1;
- wake_up(&xd_wait_open);
+ dev_unlock_part(device);
return res;
}
static int xd_open (struct inode *inode,struct file *file);
static void do_xd_request (request_queue_t * q);
static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg);
-static int xd_release (struct inode *inode,struct file *file);
static int xd_reread_partitions (kdev_t dev);
static int xd_readwrite (u_char operation,u_char drive,char *buffer,u_int block,u_int count);
static void xd_recalibrate (u_char drive);
static char recalibrate[MAX_HD];
static char special_op[MAX_HD];
-static int access_count[MAX_HD];
-static char busy[MAX_HD];
-static DECLARE_WAIT_QUEUE_HEAD(busy_wait);
static int reset;
static int hd_error;
static int hd_open(struct inode * inode, struct file * filp)
{
- int target;
- target = DEVICE_NR(inode->i_rdev);
-
+ int target = DEVICE_NR(inode->i_rdev);
if (target >= NR_HD)
return -ENODEV;
- while (busy[target])
- sleep_on(&busy_wait);
- access_count[target]++;
return 0;
}
* Releasing a block device means we sync() it, so that it can safely
* be forgotten about...
*/
-static int hd_release(struct inode * inode, struct file * file)
-{
- int target = DEVICE_NR(inode->i_rdev);
- access_count[target]--;
- return 0;
-}
extern struct block_device_operations hd_fops;
static struct block_device_operations hd_fops = {
.open = hd_open,
- .release = hd_release,
.ioctl = hd_ioctl,
};
return 0;
}
-#define DEVICE_BUSY busy[target]
-#define USAGE access_count[target]
#define CAPACITY (hd_info[target].head*hd_info[target].sect*hd_info[target].cyl)
/* We assume that the BIOS parameters do not change, so the disk capacity
will not change */
-#undef MAYBE_REINIT
-#define GENDISK_STRUCT hd_gendisk
/*
* This routine is called to flush all partitions and partition tables
*/
static int revalidate_hddisk(kdev_t dev, int maxusage)
{
- int target;
- struct gendisk * gdev;
- int res;
- long flags;
-
- target = DEVICE_NR(dev);
- gdev = &GENDISK_STRUCT;
-
- save_flags(flags);
- cli();
- if (DEVICE_BUSY || USAGE > maxusage) {
- restore_flags(flags);
- return -EBUSY;
- }
- DEVICE_BUSY = 1;
- restore_flags(flags);
-
- res = wipe_partitions(dev);
- if (res)
- goto leave;
-
-#ifdef MAYBE_REINIT
- MAYBE_REINIT;
-#endif
-
- grok_partitions(dev, CAPACITY);
-
-leave:
- DEVICE_BUSY = 0;
- wake_up(&busy_wait);
+ int target = DEVICE_NR(dev);
+ kdev_t device = mk_kdev(MAJOR_NR, target << 6);
+ int res = dev_lock_part(device);
+ if (res < 0)
+ return res;
+ res = wipe_partitions(device);
+ if (!res)
+ grok_partitions(device, CAPACITY);
+ dev_unlock_part(device);
return res;
}
if (drive->driver == NULL)
ide_driver_module();
- while (drive->busy)
- sleep_on(&drive->wqueue);
-
++drive->usage;
if (ata_ops(drive) && ata_ops(drive)->open)
return ata_ops(drive)->open(inode, filp, drive);
*/
int ata_revalidate(kdev_t i_rdev)
{
+ kdev_t device = mk_kdev(major(i_rdev), minor(i_rdev) & ~PARTN_MASK);
struct ata_device *drive;
- unsigned long flags;
int res;
- if ((drive = get_info_ptr(i_rdev)) == NULL)
+ if ((drive = get_info_ptr(device)) == NULL)
return -ENODEV;
- /* FIXME: The locking here doesn't make the slightest sense! */
- spin_lock_irqsave(&ide_lock, flags);
-
- if (drive->busy || (drive->usage > 1)) {
- spin_unlock_irqrestore(&ide_lock, flags);
-
- return -EBUSY;
- }
-
- drive->busy = 1;
MOD_INC_USE_COUNT;
- spin_unlock_irqrestore(&ide_lock, flags);
+ res = dev_lock_part(device);
+ if (res < 0) {
+ MOD_DEC_USE_COUNT;
+ return res;
+ }
- res = wipe_partitions(i_rdev);
+ res = wipe_partitions(device);
if (!res) {
if (ata_ops(drive) && ata_ops(drive)->revalidate) {
ata_get(ata_ops(drive));
ata_ops(drive)->revalidate(drive);
ata_put(ata_ops(drive));
} else
- grok_partitions(i_rdev, ata_capacity(drive));
+ grok_partitions(device, ata_capacity(drive));
}
- drive->busy = 0;
- wake_up(&drive->wqueue);
-
+ dev_unlock_part(device);
MOD_DEC_USE_COUNT;
-
return res;
}
static int ftl_reread_partitions(kdev_t dev)
{
- int minor = minor(dev);
- partition_t *part = myparts[minor >> 4];
- int res;
-
- DEBUG(0, "ftl_cs: ftl_reread_partition(%d)\n", minor);
- if ((atomic_read(&part->open) > 1)) {
- return -EBUSY;
- }
-
- res = wipe_partitions(dev);
- if (res)
- goto leave;
-
- scan_header(part);
-
- register_disk(&ftl_gendisk, whole >> PART_BITS, MAX_PART,
- &ftl_blk_fops, le32_to_cpu(part->header.FormattedSize)/SECTOR_SIZE);
-
- return res;
+ int minor = minor(dev);
+ partition_t *part = myparts[minor >> 4];
+ kdev_t device = mk_kdev(MAJOR_NR, minor & ~15);
+ int res = dev_lock_part(device);
+ if (rec < 0)
+ return res;
+ res = wipe_partitions(device);
+ if (!res) {
+ scan_header(part);
+ grok_partitions(device,
+ le32_to_cpu(part->header.FormattedSize)/SECTOR_SIZE);
+ }
+ dev_unlock_part(device);
+ return res;
}
/*======================================================================
partition->mtd = mtd;
- if ((scan_header(partition) == 0) &&
- (build_maps(partition) == 0)) {
-
+ if ((scan_header(partition) == 0) && (build_maps(partition) == 0)) {
partition->state = FTL_FORMATTED;
atomic_set(&partition->open, 0);
myparts[device] = partition;
- ftl_reread_partitions(device << 4);
+ register_disk(&ftl_gendisk, mk_kdev(MAJOR_NR, device << 4),
+ MAX_PART, &ftl_blk_fops,
+ le32_to_cpu(partition->header.FormattedSize)/SECTOR_SIZE);
#ifdef PCMCIA_DEBUG
printk(KERN_INFO "ftl_cs: opening %d kb FTL partition\n",
le32_to_cpu(partition->header.FormattedSize) >> 10);
#endif
/* linux stuff */
- nftl->usecount = 0;
nftl->cylinders = 1024;
nftl->heads = 16;
#if LINUX_VERSION_CODE < 0x20328
resetup_one_dev(&nftl_gendisk, firstfree);
#else
- grok_partitions(mk_kdev(MAJOR_NR,firstfree<<NFTL_PARTN_BITS),
- nftl->nr_sects);
+ register_disk(&nftl_gendisk,
+ mk_kdev(MAJOR_NR,firstfree<<NFTL_PARTN_BITS),
+ 1<<NFTL_PARTN_BITS, &nftl_fops, nftl->nr_sects);
#endif
}
case BLKRRPART:
if (!capable(CAP_SYS_ADMIN)) return -EACCES;
- if (nftl->usecount > 1) return -EBUSY;
- /*
- * We have to flush all buffers and invalidate caches,
- * or we won't be able to re-use the partitions,
- * if there was a change and we don't want to reboot
- */
- res = wipe_partitions(inode->i_rdev);
+ {
+ kdev_t device = mk_kdev(MAJOR_NR,
+ minor(inode->i_rdev) & -(1<<NFTL_PARTN_BITS));
+ res = dev_lock_part(device);
+ if (res < 0)
+ return res;
+ res = wipe_partitions(device);
if (!res)
- grok_partitions(inode->i_rdev, nftl->nr_sects);
-
+ grok_partitions(device, nftl->nr_sects);
+ dev_unlock_part(device);
+ }
return res;
#if (LINUX_VERSION_CODE < 0x20303)
return -EROFS;
#endif /* !CONFIG_NFTL_RW */
- thisNFTL->usecount++;
if (!get_mtd_device(thisNFTL->mtd, -1))
return /* -E'SBUGGEREDOFF */ -ENXIO;
if (thisNFTL->mtd->sync)
thisNFTL->mtd->sync(thisNFTL->mtd);
- thisNFTL->usecount--;
put_mtd_device(thisNFTL->mtd);
*/
if (!scsi_block_when_processing_errors(sdp))
return -ENXIO;
- /*
- * Make sure that only one process can do a check_change_disk at
- * one time. This is also used to lock out further access when
- * the partition table is being re-read.
- */
-
- while (sdp->busy) {
- barrier();
- cpu_relax();
- }
/*
* The following code can sleep.
* Module unloading must be prevented
sdp->access_count++;
if (sdp->removable) {
- sdp->allow_revalidate = 1;
check_disk_change(inode->i_rdev);
- sdp->allow_revalidate = 0;
/*
* If the drive is empty, just let the open fail.
int revalidate_scsidisk(kdev_t dev, int maxusage)
{
int dsk_nr = DEVICE_NR(dev);
- int res;
Scsi_Disk * sdkp;
Scsi_Device * sdp;
+ kdev_t device = mk_kdev(major(dev), minor(dev) & ~15);
+ int res;
SCSI_LOG_HLQUEUE(3, printk("revalidate_scsidisk: dsk_nr=%d\n",
DEVICE_NR(dev)));
if ((NULL == sdkp) || (NULL == (sdp = sdkp->device)))
return -ENODEV;
- if (sdp->busy || ((sdp->allow_revalidate == 0) &&
- (sdp->access_count > maxusage))) {
- printk(KERN_WARNING "Device busy for revalidation "
- "(access_count=%d)\n", sdp->access_count);
- return -EBUSY;
- }
- sdp->busy = 1;
+ res = dev_lock_part(device);
+ if (res < 0)
+ return res;
- res = wipe_partitions(dev);
+ res = wipe_partitions(device);
if (res)
goto leave;
sd_init_onedisk(sdkp, dsk_nr);
- grok_partitions(dev, sdkp->capacity);
+ grok_partitions(device, sdkp->capacity);
leave:
- sdp->busy = 0;
+ dev_unlock_part(device);
return res;
}
new_bdev->bd_queue = NULL;
new_bdev->bd_contains = NULL;
new_bdev->bd_inode = inode;
+ new_bdev->bd_part_count = 0;
+ sema_init(&new_bdev->bd_part_sem, 1);
inode->i_mode = S_IFBLK;
inode->i_rdev = kdev;
inode->i_bdev = new_bdev;
Return the function table of a device.
Load the driver if needed.
*/
-const struct block_device_operations * get_blkfops(unsigned int major)
+struct block_device_operations * get_blkfops(unsigned int major)
{
- const struct block_device_operations *ret = NULL;
+ struct block_device_operations *ret = NULL;
/* major 0 is used for non-device mounts */
if (major && major < MAX_BLKDEV) {
int check_disk_change(kdev_t dev)
{
int i;
- const struct block_device_operations * bdops = NULL;
+ struct block_device_operations * bdops = NULL;
i = major(dev);
if (i < MAX_BLKDEV)
}
}
}
- if (current_ops->open) {
- ret = current_ops->open(inode, file);
- if (ret)
- goto out2;
+ if (bdev->bd_contains == bdev) {
+ if (current_ops->open) {
+ ret = current_ops->open(inode, file);
+ if (ret)
+ goto out2;
+ }
+ } else {
+ down(&bdev->bd_contains->bd_part_sem);
+ bdev->bd_contains->bd_part_count++;
+ up(&bdev->bd_contains->bd_part_sem);
}
if (!bdev->bd_op)
bdev->bd_op = ops;
}
if (!--bdev->bd_openers)
kill_bdev(bdev);
- if (bdev->bd_op->release)
- ret = bdev->bd_op->release(bd_inode, NULL);
+ if (bdev->bd_contains == bdev) {
+ if (bdev->bd_op->release)
+ ret = bdev->bd_op->release(bd_inode, NULL);
+ } else {
+ down(&bdev->bd_contains->bd_part_sem);
+ bdev->bd_contains->bd_part_count--;
+ up(&bdev->bd_contains->bd_part_sem);
+ }
if (!bdev->bd_openers) {
if (bdev->bd_op->owner)
__MOD_DEC_USE_COUNT(bdev->bd_op->owner);
sprintf(state->name, "p");
}
bdev = bdget(kdev_t_to_nr(dev));
- bdev->bd_contains = bdev;
- bdev->bd_inode->i_size = (loff_t)hd->part[minor(dev)].nr_sects << 9;
- if (!bdev->bd_openers) {
- struct blk_dev_struct *p = blk_dev + major(dev);
- unsigned bsize = bdev_hardsect_size(bdev);
- while (bsize < PAGE_CACHE_SIZE) {
- if (bdev->bd_inode->i_size & bsize)
- break;
- bsize <<= 1;
- }
- if (p->queue)
- bdev->bd_queue = p->queue(dev);
- else
- bdev->bd_queue = &p->request_queue;
- bdev->bd_block_size = bsize;
- bdev->bd_inode->i_blkbits = blksize_bits(bsize);
- }
+ if (blkdev_get(bdev, FMODE_READ, 0, BDEV_RAW))
+ goto out;
state->limit = 1<<hd->minor_shift;
for (i = 0; check_part[i]; i++) {
int res, j;
printk(" unknown partition table\n");
setup_devfs:
- invalidate_bdev(bdev, 1);
- truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
- bdput(bdev);
-
+ blkdev_put(bdev, BDEV_RAW);
+out:
/* Setup driverfs tree */
if (hd->sizes)
driverfs_create_partitions(hd, minor(dev));
struct inode * bd_inode;
dev_t bd_dev; /* not a kdev_t - it's a search key */
int bd_openers;
- const struct block_device_operations *bd_op;
+ struct block_device_operations *bd_op;
struct request_queue *bd_queue;
struct semaphore bd_sem; /* open/close mutex */
struct list_head bd_inodes;
struct block_device * bd_contains;
unsigned bd_block_size;
unsigned long bd_offset;
+ struct semaphore bd_part_sem;
+ unsigned bd_part_count;
};
struct inode {
extern void blk_run_queues(void);
/* fs/devices.c */
-extern const struct block_device_operations *get_blkfops(unsigned int);
+extern struct block_device_operations *get_blkfops(unsigned int);
extern int register_chrdev(unsigned int, const char *, struct file_operations *);
extern int unregister_chrdev(unsigned int, const char *);
extern int chrdev_open(struct inode *, struct file *);
return res;
}
+/* NOTE NOTE NOTE: this interface _will_ change in a couple of patches */
+
+static inline int dev_lock_part(kdev_t dev)
+{
+ struct block_device *bdev = bdget(kdev_t_to_nr(dev));
+ if (!bdev)
+ return -ENOMEM;
+ if (!down_trylock(&bdev->bd_part_sem)) {
+ if (!bdev->bd_part_count)
+ return 0;
+ up(&bdev->bd_part_sem);
+ }
+ bdput(bdev);
+ return -EBUSY;
+}
+
+static inline void dev_unlock_part(kdev_t dev)
+{
+ struct block_device *bdev = bdget(kdev_t_to_nr(dev));
+ if (!bdev)
+ BUG();
+ up(&bdev->bd_part_sem);
+ bdput(bdev);
+ bdput(bdev);
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */