W: http://www.dandelion.com/Linux/
S: Maintained
+CIRRUS LOGIC GENERIC FBDEV DRIVER
+P: Jeff Garzik
+M: jgarzik@pobox.com
+L: linux-fbdev@vuser.vu.union.edu
+S: Maintained
+
CONFIGURE, MENUCONFIG, XCONFIG
P: Michael Elizabeth Chastain
M: mec@shout.net
VERSION = 2
PATCHLEVEL = 2
-SUBLEVEL = 10
+SUBLEVEL = 11
EXTRAVERSION =
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
#include <asm/system.h>
#include <asm/pci.h>
#include <asm/hwrpb.h>
-#include <asm/mmu_context.h>
#define __EXTERN_INLINE inline
#include <asm/io.h>
#define DO_EV4_MMU \
max_asn: EV4_MAX_ASN, \
- mmu_context_mask: ~0UL, \
mv_get_mmu_context: ev4_get_mmu_context, \
mv_flush_tlb_current: ev4_flush_tlb_current, \
mv_flush_tlb_other: ev4_flush_tlb_other, \
#define DO_EV5_MMU \
max_asn: EV5_MAX_ASN, \
- mmu_context_mask: ~0UL, \
mv_get_mmu_context: ev5_get_mmu_context, \
mv_flush_tlb_current: ev5_flush_tlb_current, \
mv_flush_tlb_other: ev5_flush_tlb_other, \
#define DO_EV6_MMU \
max_asn: EV6_MAX_ASN, \
- mmu_context_mask: 0xfffffffffful, \
mv_get_mmu_context: ev5_get_mmu_context, \
mv_flush_tlb_current: ev5_flush_tlb_current, \
mv_flush_tlb_other: ev5_flush_tlb_other, \
smp_store_cpu_info(int cpuid)
{
cpu_data[cpuid].loops_per_sec = loops_per_sec;
+ cpu_data[cpuid].last_asn
+ = (cpuid << WIDTH_HARDWARE_ASN) + ASN_FIRST_VERSION;
}
/*
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <asm/io.h>
#define __EXTERN_INLINE inline
#include <asm/mmu_context.h>
extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
-#ifdef __SMP__
-unsigned long last_asn[NR_CPUS] = { /* gag */
- ASN_FIRST_VERSION + (0 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (1 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (2 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (3 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (4 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (5 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (6 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (7 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (8 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (9 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (10 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (11 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (12 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (13 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (14 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (15 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (16 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (17 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (18 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (19 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (20 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (21 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (22 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (23 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (24 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (25 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (26 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (27 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (28 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (29 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (30 << WIDTH_HARDWARE_ASN),
- ASN_FIRST_VERSION + (31 << WIDTH_HARDWARE_ASN)
-};
-#else
-unsigned long asn_cache = ASN_FIRST_VERSION;
-#endif /* __SMP__ */
-
/*
- * Select a new ASN for a task.
+ * Force a new ASN for a task.
*/
+#ifndef __SMP__
+int last_asn = ASN_FIRST_VERSION;
+#endif
+
void
get_new_mmu_context(struct task_struct *p, struct mm_struct *mm)
{
- unsigned long asn = asn_cache;
-
- if ((asn & HARDWARE_ASN_MASK) < MAX_ASN)
- ++asn;
- else {
- tbiap();
- imb();
- asn = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
- }
- asn_cache = asn;
- mm->context = asn; /* full version + asn */
- p->tss.asn = asn & HARDWARE_ASN_MASK; /* just asn */
+ p->tss.asn = HARDWARE_ASN_MASK & __get_new_mmu_context(p, mm);
}
+
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to handle_mm_fault().
extern unsigned long free_area_init(unsigned long, unsigned long);
-static struct thread_struct *
+static inline struct thread_struct *
load_PCB(struct thread_struct * pcb)
{
register unsigned long sp __asm__("$30");
#define RS_STROBE_TIME (10*HZ)
#define RS_ISR_PASS_LIMIT 256
-#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT)
+#define IRQ_T(state) \
+ ((state->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT)
#define SERIAL_INLINE
} else
handler = rs_interrupt_single;
- retval = request_irq(state->irq, handler, IRQ_T(info),
+ retval = request_irq(state->irq, handler, IRQ_T(state),
"serial", NULL);
if (retval) {
if (capable(CAP_SYS_ADMIN)) {
if (IRQ_ports[state->irq]) {
free_irq(state->irq, NULL);
retval = request_irq(state->irq, rs_interrupt_single,
- IRQ_T(info), "serial", NULL);
+ IRQ_T(state), "serial", NULL);
if (retval)
printk("serial shutdown: request_irq: error %d"
else
handler = rs_interrupt;
- retval = request_irq(state->irq, handler, IRQ_T(info),
+ retval = request_irq(state->irq, handler, IRQ_T(state),
"serial", NULL);
if (retval) {
printk("Couldn't reallocate serial interrupt "
by <tymm@computer.org>
0.451 5-Nov-98 Fixed mca stuff cuz I'm a dummy. <tymm@computer.org>
0.5 14-Nov-98 Re-spin for 2.1.x kernels.
+ 0.51 27-Jun-99 Correct received packet length for CRC from
+ report by <worm@dkik.dk>
=========================================================================
*/
-static const char *version = "depca.c:v0.5 1998/11/14 davies@maniac.ultranet.com\n";
+static const char *version = "depca.c:v0.51 1999/6/27 davies@maniac.ultranet.com\n";
#include <linux/config.h>
#include <linux/module.h>
if (status & R_CRC) lp->stats.rx_crc_errors++;
if (status & R_BUFF) lp->stats.rx_fifo_errors++;
} else {
- short len, pkt_len = readw(&lp->rx_ring[entry].msg_length);
+ short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4;
struct sk_buff *skb;
skb = dev_alloc_skb(pkt_len+2);
SCpnt->transfersize = rscsi_disks[dev].sector_size;
SCpnt->underflow = this_count << 9;
+ SCpnt->cmd_len = 0;
scsi_do_cmd (SCpnt, (void *) cmd, buff,
this_count * rscsi_disks[dev].sector_size,
rw_intr,
* (micz). From Kim.Berts@fisub.mail.abb.com
* 11.05.99 0.22 Implemented the IMIX call to mute recording monitor.
* Guenter Geiger <geiger@epy.co.at>
+ * 15.06.99 0.23 Fix bad allocation bug.
+ * Thanks to Deti Fliegl <fliegl@in.tum.de>
*
* some important things missing in Ensoniq documentation:
*
db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
if (!db->rawbuf) {
db->ready = db->mapped = 0;
- for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER && !db->rawbuf; order--)
- db->rawbuf = (void *)__get_free_pages(GFP_KERNEL, order);
+ for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
+ if ((db->rawbuf = (void *)__get_free_pages(GFP_KERNEL, order)))
+ break;
if (!db->rawbuf)
return -ENOMEM;
db->buforder = order;
if (!pci_present()) /* No PCI bus in this machine! */
return -ENODEV;
- printk(KERN_INFO "es1370: version v0.22 time " __TIME__ " " __DATE__ "\n");
+ printk(KERN_INFO "es1370: version v0.23 time " __TIME__ " " __DATE__ "\n");
while (index < NR_DEVICE &&
(pcidev = pci_find_device(PCI_VENDOR_ID_ENSONIQ, PCI_DEVICE_ID_ENSONIQ_ES1370, pcidev))) {
if (pcidev->base_address[0] == 0 ||
* reported by "Ivan N. Kokshaysky" <ink@jurassic.park.msu.ru>
* Note: joystick address handling might still be wrong on archs
* other than i386
+ * 15.06.99 0.12 Fix bad allocation bug.
+ * Thanks to Deti Fliegl <fliegl@in.tum.de>
*
*/
db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
if (!db->rawbuf) {
db->ready = db->mapped = 0;
- for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER && !db->rawbuf; order--)
- db->rawbuf = (void *)__get_free_pages(GFP_KERNEL, order);
+ for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
+ if ((db->rawbuf = (void *)__get_free_pages(GFP_KERNEL, order)))
+ break;
if (!db->rawbuf)
return -ENOMEM;
db->buforder = order;
if (!pci_present()) /* No PCI bus in this machine! */
return -ENODEV;
- printk(KERN_INFO "es1371: version v0.11 time " __TIME__ " " __DATE__ "\n");
+ printk(KERN_INFO "es1371: version v0.12 time " __TIME__ " " __DATE__ "\n");
while (index < NR_DEVICE &&
(pcidev = pci_find_device(PCI_VENDOR_ID_ENSONIQ, PCI_DEVICE_ID_ENSONIQ_ES1371, pcidev))) {
if (pcidev->base_address[0] == 0 ||
* SOUND_PCM_READ_CHANNELS, SOUND_PCM_READ_BITS;
* Alpha fixes reported by Peter Jones <pjones@redhat.com>
* Note: dmaio hack might still be wrong on archs other than i386
+ * 15.06.99 0.15 Fix bad allocation bug.
+ * Thanks to Deti Fliegl <fliegl@in.tum.de>
*
*/
db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
if (!db->rawbuf) {
db->ready = db->mapped = 0;
- for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER && !db->rawbuf; order--)
- db->rawbuf = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, order);
+ for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
+ if ((db->rawbuf = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, order)))
+ break;
if (!db->rawbuf)
return -ENOMEM;
db->buforder = order;
if (!pci_present()) /* No PCI bus in this machine! */
return -ENODEV;
- printk(KERN_INFO "sv: version v0.14 time " __TIME__ " " __DATE__ "\n");
+ printk(KERN_INFO "sv: version v0.15 time " __TIME__ " " __DATE__ "\n");
#if 0
if (!(wavetable_mem = __get_free_pages(GFP_KERNEL, 20-PAGE_SHIFT)))
printk(KERN_INFO "sv: cannot allocate 1MB of contiguous nonpageable memory for wavetable data\n");
while ( alias != &inode->i_dentry ) {
alias_de = list_entry(alias, struct dentry, d_alias);
coda_flag_children(alias_de, flag);
- shrink_dcache_parent(alias_de);
alias = alias->next;
+ shrink_dcache_parent(alias_de);
}
}
extern int coda_debug;
extern int coda_print_entry;
+inline int coda_fideq(ViceFid *fid1, ViceFid *fid2)
+{
+ if (fid1->Vnode != fid2->Vnode)
+ return 0;
+ if (fid1->Volume != fid2->Volume)
+ return 0;
+ if (fid1->Unique != fid2->Unique)
+ return 0;
+ return 1;
+}
+
/* cnode.c */
static void coda_fill_inode(struct inode *inode, struct coda_vattr *attr)
{
}
cnp = ITOC(*inode);
- if ( cnp->c_magic != 0 ) {
- printk("coda_cnode make on initialized inode %ld, old %s new
+ /* see if we've got it already */
+ if ( cnp->c_magic != 0 && coda_fideq(fid, &cnp->c_fid)) {
+ return 0;
+ }
+
+ /* not fresh: collision */
+ if ( cnp->c_magic != 0 ) {
+ printk("coda_cnode_make on initialized inode %ld, old %s new
%s!\n",
(*inode)->i_ino, coda_f2s(&cnp->c_fid), coda_f2s2(fid));
iput(*inode);
return 0;
}
-inline int coda_fideq(ViceFid *fid1, ViceFid *fid2)
-{
- int eq;
- eq = ( (fid1->Vnode == fid2->Vnode) &&
- (fid1->Volume == fid2->Volume) &&
- (fid1->Unique == fid2->Unique) );
- return eq;
-}
void coda_replace_fid(struct inode *inode, struct ViceFid *oldfid,
struct ViceFid *newfid)
if (attr->va_ctime.tv_sec != -1)
inode->i_ctime = attr->va_ctime.tv_sec;
}
+
+
/*
* BSD sets attributes that need not be modified to -1.
* Linux uses the valid field to indicate what should be
shrink_dcache_parent(de);
+ /* propagate for a flush */
+ if (cii->c_flags & C_FLUSH)
+ coda_flag_inode_children(inode, C_FLUSH);
+
if (de->d_count > 1) {
/* pretend it's valid, but don't change the flags */
CDEBUG(D_DOWNCALL, "BOOM for: ino %ld, %s\n",
return 1;
}
- /* propagate for a flush */
- if (cii->c_flags & C_FLUSH)
- coda_flag_inode_children(inode, C_FLUSH);
-
/* clear the flags. */
cii->c_flags &= ~(C_VATTR | C_PURGE | C_FLUSH);
if ( sbi->sbi_sb ) {
printk("Already mounted\n");
+ unlock_super(sb);
+ EXIT;
+ MOD_DEC_USE_COUNT;
return NULL;
}
printk("coda_read_super: coda_get_rootfid failed with %d\n",
error);
sb->s_dev = 0;
- unlock_super(sb);
goto error;
}
printk("coda_read_super: rootfid is %s\n", coda_f2s(&fid));
if ( error || !root ) {
printk("Failure of coda_cnode_make for root: error %d\n", error);
sb->s_dev = 0;
- unlock_super(sb);
goto error;
}
return sb;
error:
+ unlock_super(sb);
EXIT;
MOD_DEC_USE_COUNT;
if (sbi) {
sb->s_dev = 0;
coda_cache_clear_all(sb);
sb_info = coda_sbp(sb);
-/* sb_info->sbi_vcomm->vc_inuse = 0; You can not do this: psdev_release would see usagecount == 0 and would refuse to decrease MOD_USE_COUNT --pavel */
coda_super_info.sbi_sb = NULL;
printk("Coda: Bye bye.\n");
memset(sb_info, 0, sizeof(* sb_info));
EXIT;
}
-static int coda_notify_change(struct dentry *de, struct iattr *iattr)
+static int coda_notify_change(struct dentry *de, struct iattr *iattr)
{
struct inode *inode = de->d_inode;
struct coda_inode_info *cii;
return error;
}
-/* we need _something_ for this routine. Let's mimic AFS */
static int coda_statfs(struct super_block *sb, struct statfs *buf,
int bufsiz)
{
struct statfs tmp;
+ int error;
+
+ memset(&tmp, 0, sizeof(struct statfs));
+ error = venus_statfs(sb, &tmp);
+
+ if (error) {
+ /* fake something like AFS does */
+ tmp.f_blocks = 9000000;
+ tmp.f_bfree = 9000000;
+ tmp.f_bavail = 9000000;
+ tmp.f_files = 9000000;
+ tmp.f_ffree = 9000000;
+ }
+
+ /* and fill in the rest */
tmp.f_type = CODA_SUPER_MAGIC;
tmp.f_bsize = 1024;
- tmp.f_blocks = 9000000;
- tmp.f_bfree = 9000000;
- tmp.f_bavail = 9000000 ;
- tmp.f_files = 9000000;
- tmp.f_ffree = 9000000;
- tmp.f_namelen = 0;
+ tmp.f_namelen = CODA_MAXNAMLEN;
+
copy_to_user(buf, &tmp, bufsiz);
+
return 0;
}
struct coda_cache_inv_stats coda_cache_inv_stat;
struct coda_upcall_stats_entry coda_upcall_stat[CODA_NCALLS];
struct coda_upcallstats coda_callstats;
+int coda_upcall_timestamping = 0;
/* keep this in sync with coda.h! */
char *coda_upcall_names[] = {
"purgeuser ", /* 26 */
"zapfile ", /* 27 */
"zapdir ", /* 28 */
- "zapvnode ", /* 28 */
+ "noop2 ", /* 29 */
"purgefid ", /* 30 */
- "open_by_path" /* 31 */
+ "open_by_path", /* 31 */
+ "resolve ", /* 32 */
+ "reintegrate ", /* 33 */
+ "statfs " /* 34 */
};
void do_time_stats( struct coda_upcall_stats_entry * pentry,
unsigned long runtime )
{
-
- unsigned long time = runtime * 1000 /HZ; /* time in ms */
+ unsigned long time = runtime; /* time in us */
CDEBUG(D_SPECIAL, "time: %ld\n", time);
if ( pentry->count == 0 ) {
{
if ( write ) {
reset_coda_vfs_stats();
+
+ filp->f_pos += *lenp;
+ } else {
+ *lenp = 0;
}
-
- *lenp = 0;
+
return 0;
}
size_t * lenp )
{
if ( write ) {
+ if (*lenp > 0) {
+ char c;
+ if (get_user(c, (char *)buffer))
+ return -EFAULT;
+ coda_upcall_timestamping = (c == '1');
+ }
reset_coda_upcall_stats();
+
+ filp->f_pos += *lenp;
+ } else {
+ *lenp = 0;
}
-
- *lenp = 0;
+
return 0;
}
{
if ( write ) {
reset_coda_permission_stats();
+
+ filp->f_pos += *lenp;
+ } else {
+ *lenp = 0;
}
-
- *lenp = 0;
+
return 0;
}
{
if ( write ) {
reset_coda_cache_inv_stats();
+
+ filp->f_pos += *lenp;
+ } else {
+ *lenp = 0;
}
- *lenp = 0;
return 0;
}
if ( offset < 160)
len += sprintf( buffer + len,"%-79s\n", "======================");
if ( offset < 240)
- len += sprintf( buffer + len,"%-79s\n", "upcall\t\t count\tavg time(ms)\tstd deviation(ms)");
+ len += sprintf( buffer + len,"%-79s\n", "upcall count avg time(us) std deviation(us)");
if ( offset < 320)
- len += sprintf( buffer + len,"%-79s\n", "------\t\t -----\t------------\t-----------------");
+ len += sprintf( buffer + len,"%-79s\n", "------ ----- ------------ -----------------");
pos = 320;
for ( i = 0 ; i < CODA_NCALLS ; i++ ) {
- tmplen += sprintf(tmpbuf,"%s\t%9d\t%10ld\t%10ld",
+ tmplen += sprintf(tmpbuf,"%s %9d %10ld %10ld",
coda_upcall_names[i],
coda_upcall_stat[i].count,
get_time_average(&coda_upcall_stat[i]),
MOD_INC_USE_COUNT;
else
MOD_DEC_USE_COUNT;
+
}
#endif
return error;
}
+int venus_statfs(struct super_block *sb, struct statfs *sfs)
+{
+ union inputArgs *inp;
+ union outputArgs *outp;
+ int insize, outsize, error;
+
+ insize = max(INSIZE(statfs), OUTSIZE(statfs));
+ UPARG(CODA_STATFS);
+
+ error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+ if (!error) {
+ sfs->f_blocks = outp->coda_statfs.stat.f_blocks;
+ sfs->f_bfree = outp->coda_statfs.stat.f_bfree;
+ sfs->f_bavail = outp->coda_statfs.stat.f_bavail;
+ sfs->f_files = outp->coda_statfs.stat.f_files;
+ sfs->f_ffree = outp->coda_statfs.stat.f_ffree;
+ } else {
+ printk("coda_statfs: Venus returns: %d\n", error);
+ }
+
+ if (inp) CODA_FREE(inp, insize);
+ CDEBUG(D_INODE, " result %d\n",error);
+ EXIT;
+ return error;
+}
+
/*
* coda_upcall and coda_downcall routines.
*
static inline unsigned long coda_waitfor_upcall(struct upc_req *vmp)
{
struct wait_queue wait = { current, NULL };
- unsigned long posttime;
+ struct timeval begin = { 0, 0 }, end = { 0, 0 };
vmp->uc_posttime = jiffies;
- posttime = jiffies;
+
+ if (coda_upcall_timestamping)
+ do_gettimeofday(&begin);
add_wait_queue(&vmp->uc_sleep, &wait);
for (;;) {
remove_wait_queue(&vmp->uc_sleep, &wait);
current->state = TASK_RUNNING;
- CDEBUG(D_SPECIAL, "posttime: %ld, returned: %ld\n", posttime, jiffies-posttime);
- return (jiffies - posttime);
+ if (coda_upcall_timestamping && begin.tv_sec != 0) {
+ do_gettimeofday(&end);
+
+ if (end.tv_usec < begin.tv_usec) {
+ end.tv_usec += 1000000; end.tv_sec--;
+ }
+ end.tv_sec -= begin.tv_sec;
+ end.tv_usec -= begin.tv_usec;
+ }
+
+ CDEBUG(D_SPECIAL, "begin: %ld.%06ld, elapsed: %ld.%06ld\n",
+ begin.tv_sec, begin.tv_usec, end.tv_sec, end.tv_usec);
+ return ((end.tv_sec * 1000000) + end.tv_usec);
}
* Revised list management to avoid races
* -- Bill Hawes, <whawes@star.net>, 9/98
*
+ * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
+ * As the consequence the locking was moved from dquot_decr_...(),
+ * dquot_incr_...() to calling functions.
+ * invalidate_dquots() now writes modified dquots.
+ * Serialized quota_off() and quota_on() for mount point.
+ * Fixed a few bugs in grow_dquots. Fixed deadlock in write_dquot().
+ * reset_dquot_ptrs() now traverse through inodes not filps.
+ * add_dquot_ref() restarts after blocking
+ * Added check for bogus uid and fixed check for group in quotactl.
+ * Jan Kara, <jack@atrey.karlin.mff.cuni.cz>, 4-6/99
+ *
* (C) Copyright 1994 - 1997 Marco van Wieringen
*/
int nr_dquots = 0, nr_free_dquots = 0;
int max_dquots = NR_DQUOTS;
+/* We need this list for invalidating dquots... */
+extern struct list_head inode_in_use;
+extern spinlock_t inode_lock;
+
static char quotamessage[MAX_QUOTA_MESSAGE];
static char *quotatypes[] = INITQFNAMES;
{
/* sanity check */
if (list_empty(&dquot->dq_free)) {
- printk("remove_free_dquot: dquot not on free list??\n");
+ printk("remove_free_dquot: dquot not on the free list??\n");
+ return; /* J.K. Just don't do anything */
}
list_del(&dquot->dq_free);
INIT_LIST_HEAD(&dquot->dq_free);
static void write_dquot(struct dquot *dquot)
{
short type = dquot->dq_type;
- struct file *filp = dquot->dq_mnt->mnt_dquot.files[type];
+ struct file *filp;
mm_segment_t fs;
loff_t offset;
ssize_t ret;
+ struct dqblk data;
+ struct semaphore *sem = &dquot->dq_mnt->mnt_dquot.dqio_sem;
- lock_dquot(dquot);
- down(&dquot->dq_mnt->mnt_dquot.semaphore);
+ /*
+ * We copy our data to preserve consistency when dquot is not locked.
+ * We can't lock it because it can cause deadlocks - think about
+ * growing the quota file...
+ */
+ memcpy(&data, &dquot->dq_dqb, sizeof(struct dqblk));
+ down(sem);
+ if (!dquot->dq_mnt) { /* Invalidated quota? */
+ up(sem);
+ return;
+ }
+ filp = dquot->dq_mnt->mnt_dquot.files[type];
offset = dqoff(dquot->dq_id);
fs = get_fs();
set_fs(KERNEL_DS);
dquot->dq_flags &= ~DQ_MOD;
ret = 0;
if (filp)
- ret = filp->f_op->write(filp, (char *)&dquot->dq_dqb,
+ ret = filp->f_op->write(filp, (char *)&data,
sizeof(struct dqblk), &offset);
if (ret != sizeof(struct dqblk))
printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
kdevname(dquot->dq_dev));
- up(&dquot->dq_mnt->mnt_dquot.semaphore);
+ /*
+ * Be sure that anybody didn't invalidated the dquot in the mean time...
+ * Nasty but I don't see other choice when we don't want to lock dquot.
+ */
+ up(sem);
set_fs(fs);
- unlock_dquot(dquot);
dqstats.writes++;
}
static void read_dquot(struct dquot *dquot)
{
- short type;
+ short type = dquot->dq_type;
struct file *filp;
mm_segment_t fs;
loff_t offset;
- type = dquot->dq_type;
filp = dquot->dq_mnt->mnt_dquot.files[type];
-
if (filp == (struct file *)NULL)
return;
lock_dquot(dquot);
- down(&dquot->dq_mnt->mnt_dquot.semaphore);
+ if (!dquot->dq_mnt) /* Invalidated quota? */
+ goto out_lock;
+ /* Now we are sure filp is valid - the dquot isn't invalidated */
+ down(&dquot->dq_mnt->mnt_dquot.dqio_sem);
offset = dqoff(dquot->dq_id);
fs = get_fs();
set_fs(KERNEL_DS);
filp->f_op->read(filp, (char *)&dquot->dq_dqb, sizeof(struct dqblk), &offset);
- up(&dquot->dq_mnt->mnt_dquot.semaphore);
+ up(&dquot->dq_mnt->mnt_dquot.dqio_sem);
set_fs(fs);
if (dquot->dq_bhardlimit == 0 && dquot->dq_bsoftlimit == 0 &&
dquot->dq_ihardlimit == 0 && dquot->dq_isoftlimit == 0)
dquot->dq_flags |= DQ_FAKE;
- unlock_dquot(dquot);
dqstats.reads++;
+out_lock:
+ unlock_dquot(dquot);
}
/*
void invalidate_dquots(kdev_t dev, short type)
{
- struct dquot *dquot, *next = inuse_list;
+ struct dquot *dquot, *next;
int need_restart;
restart:
+ next = inuse_list; /* Here it is better. Otherwise the restart doesn't have any sense ;-) */
need_restart = 0;
while ((dquot = next) != NULL) {
next = dquot->dq_next;
continue;
if (dquot->dq_type != type)
continue;
+ if (!dquot->dq_mnt) /* Already invalidated entry? */
+ continue;
if (dquot->dq_flags & DQ_LOCKED) {
__wait_on_dquot(dquot);
continue;
if (dquot->dq_type != type)
continue;
+ if (!dquot->dq_mnt)
+ continue;
+ }
+ /*
+ * Because inodes needn't to be the only holders of dquot
+ * the quota needn't to be written to disk. So we write it
+ * ourselves before discarding the data just for sure...
+ */
+ if (dquot->dq_flags & DQ_MOD && dquot->dq_mnt)
+ {
+ write_dquot(dquot);
+ need_restart = 1; /* We slept on IO */
}
clear_dquot(dquot);
}
int sync_dquots(kdev_t dev, short type)
{
- struct dquot *dquot, *next = inuse_list;
+ struct dquot *dquot, *next;
int need_restart;
restart:
+ next = inuse_list;
need_restart = 0;
while ((dquot = next) != NULL) {
next = dquot->dq_next;
continue;
if (type != -1 && dquot->dq_type != type)
continue;
+ if (!dquot->dq_mnt) /* Invalidated? */
+ continue;
if (!(dquot->dq_flags & (DQ_LOCKED | DQ_MOD)))
continue;
* checking and doesn't need to be written. It's just an empty
* dquot that is put back on to the freelist.
*/
- if (dquot->dq_mnt != (struct vfsmount *)NULL) {
+ if (dquot->dq_mnt)
dqstats.drops++;
we_slept:
- wait_on_dquot(dquot);
+ wait_on_dquot(dquot);
+ if (dquot->dq_mnt) {
if (dquot->dq_count > 1) {
dquot->dq_count--;
return;
/* sanity check */
if (!list_empty(&dquot->dq_free)) {
printk("dqput: dquot already on free list??\n");
+ dquot->dq_count--; /* J.K. Just decrementing use count seems safer... */
+ return;
}
if (--dquot->dq_count == 0) {
+ /* Sanity check. Locked quota without owner isn't good idea... */
+ if (dquot->dq_flags & DQ_LOCKED) {
+ printk(KERN_ERR "VFS: Locked quota to be put on the free list.\n");
+ dquot->dq_flags &= ~DQ_LOCKED;
+ }
+ dquot->dq_flags &= ~DQ_MOD; /* Modified flag has no sense on free list */
/* Place at end of LRU free queue */
put_dquot_last(dquot);
wake_up(&dquot_wait);
return;
}
-static void grow_dquots(void)
+static int grow_dquots(void)
{
struct dquot *dquot;
- int cnt = 32;
+ int cnt = 0;
- while (cnt > 0) {
+ while (cnt < 32) {
dquot = kmem_cache_alloc(dquot_cachep, SLAB_KERNEL);
if(!dquot)
- return;
+ return cnt;
nr_dquots++;
memset((caddr_t)dquot, 0, sizeof(struct dquot));
+ init_waitqueue(&dquot->dq_wait);
/* all dquots go on the inuse_list */
put_inuse(dquot);
put_dquot_head(dquot);
- cnt--;
+ cnt++;
}
+ return cnt;
}
static struct dquot *find_best_candidate_weighted(void)
while ((tmp = tmp->next) != &free_dquots && --limit) {
dquot = list_entry(tmp, struct dquot, dq_free);
+ /* This should never happen... */
if (dquot->dq_flags & (DQ_LOCKED | DQ_MOD))
continue;
myscore = dquot->dq_referenced;
if (!dquot)
goto pressure;
got_it:
- if (dquot->dq_flags & (DQ_LOCKED | DQ_MOD)) {
- wait_on_dquot(dquot);
- if (dquot->dq_flags & DQ_MOD)
- {
- if(dquot->dq_mnt != (struct vfsmount *)NULL)
- write_dquot(dquot);
- }
- /*
- * The dquot may be back in use now, so we
- * must recheck the free list.
- */
- goto repeat;
- }
- /* sanity check ... */
+ /* Sanity checks */
+ if (dquot->dq_flags & DQ_LOCKED)
+ printk(KERN_ERR "VFS: Locked dquot on the free list\n");
if (dquot->dq_count != 0)
printk(KERN_ERR "VFS: free dquot count=%d\n", dquot->dq_count);
return dquot;
pressure:
- if (nr_dquots < max_dquots) {
- grow_dquots();
- goto repeat;
- }
+ if (nr_dquots < max_dquots)
+ if (grow_dquots())
+ goto repeat;
dquot = find_best_candidate_weighted();
if (dquot)
struct dquot *dquot, *empty = NULL;
struct vfsmount *vfsmnt;
- if ((vfsmnt = lookup_vfsmnt(dev)) == (struct vfsmount *)NULL || is_enabled(vfsmnt, type) == 0)
+ if ((vfsmnt = lookup_vfsmnt(dev)) == (struct vfsmount *)NULL || !is_enabled(vfsmnt, type))
return(NODQUOT);
we_slept:
while (dquot_updating[hashent])
sleep_on(&update_wait);
+ if (!dquot->dq_mnt) { /* Has somebody invalidated entry under us? */
+ /*
+ * Do it as if the quota was invalidated before we started
+ */
+ dqput(dquot);
+ return NODQUOT;
+ }
dquot->dq_referenced++;
dqstats.lookups++;
return dquot;
}
+static struct dquot *dqduplicate(struct dquot *dquot)
+{
+ if (dquot == NODQUOT || !dquot->dq_mnt)
+ return NODQUOT;
+ dquot->dq_count++;
+ wait_on_dquot(dquot);
+ if (!dquot->dq_mnt) {
+ dquot->dq_count--;
+ return NODQUOT;
+ }
+ return dquot;
+}
+
+static int dqinit_needed(struct inode *inode, short type)
+{
+ int cnt;
+
+ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
+ return 0;
+ if (type != -1)
+ return inode->i_dquot[type] == NODQUOT;
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (inode->i_dquot[cnt] == NODQUOT)
+ return 1;
+ return 0;
+}
+
static void add_dquot_ref(kdev_t dev, short type)
{
struct super_block *sb = get_super(dev);
if (!sb || !sb->dq_op)
return; /* nothing to do */
+restart:
for (filp = inuse_filps; filp; filp = filp->f_next) {
if (!filp->f_dentry)
continue;
inode = filp->f_dentry->d_inode;
if (!inode)
continue;
- /* N.B. race problem -- filp could become unused */
- if (filp->f_mode & FMODE_WRITE) {
+ /* Didn't we already initialized this inode? */
+ if (filp->f_mode & FMODE_WRITE && dqinit_needed(inode, type)) {
sb->dq_op->initialize(inode, type);
inode->i_flags |= S_QUOTA;
+ /* as we may have blocked we had better restart */
+ goto restart;
}
}
}
+static int reset_inode_dquot_ptrs(struct inode *inode, short type)
+{
+ struct dquot *dquot = inode->i_dquot[type];
+ int cnt;
+
+ inode->i_dquot[type] = NODQUOT;
+ /* any other quota in use? */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (inode->i_dquot[cnt] != NODQUOT)
+ goto put_it;
+ }
+ inode->i_flags &= ~S_QUOTA;
+put_it:
+ if (dquot != NODQUOT) {
+ spin_unlock(&inode_lock); /* We may block so drop the lock... */
+ dqput(dquot);
+ spin_lock(&inode_lock); /* And capture lock again */
+ /* we may have blocked ... */
+ return 1;
+ }
+ return 0;
+}
+
static void reset_dquot_ptrs(kdev_t dev, short type)
{
struct super_block *sb = get_super(dev);
- struct file *filp;
struct inode *inode;
- struct dquot *dquot;
- int cnt;
+ struct list_head *act_head;
+ int need_list = 3;
if (!sb || !sb->dq_op)
return; /* nothing to do */
-restart:
- /* free any quota for unused dentries */
- shrink_dcache_sb(sb);
+ /* We have to be protected against other CPUs */
+ spin_lock(&inode_lock);
- for (filp = inuse_filps; filp; filp = filp->f_next) {
- if (!filp->f_dentry)
- continue;
- if (filp->f_dentry->d_sb != sb)
- continue;
- inode = filp->f_dentry->d_inode;
- if (!inode)
- continue;
- /*
- * Note: we restart after each blocking operation,
- * as the inuse_filps list may have changed.
- */
- if (IS_QUOTAINIT(inode)) {
- dquot = inode->i_dquot[type];
- inode->i_dquot[type] = NODQUOT;
- /* any other quota in use? */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] != NODQUOT)
- goto put_it;
+ do {
+ if (need_list & 1) {
+ need_list &= ~1;
+ restart_in_use:
+ for (act_head = inode_in_use.next; act_head != &inode_in_use; act_head = act_head->next) {
+ inode = list_entry(act_head, struct inode, i_list);
+ if (inode->i_sb != sb || !IS_QUOTAINIT(inode))
+ continue;
+ if (reset_inode_dquot_ptrs(inode, type)) {
+ need_list |= 2;
+ goto restart_in_use;
+ }
}
- inode->i_flags &= ~S_QUOTA;
- put_it:
- if (dquot != NODQUOT) {
- dqput(dquot);
- /* we may have blocked ... */
- goto restart;
+ }
+ if (need_list & 2) {
+ need_list &= ~2;
+ restart_dirty:
+ for (act_head = sb->s_dirty.next; act_head != &sb->s_dirty; act_head = act_head->next) {
+ inode = list_entry(act_head, struct inode, i_list);
+ if (IS_QUOTAINIT(inode) && reset_inode_dquot_ptrs(inode, type)) {
+ need_list |= 1;
+ goto restart_dirty;
+ }
}
}
}
+ while (need_list);
+
+ spin_unlock(&inode_lock);
}
static inline void dquot_incr_inodes(struct dquot *dquot, unsigned long number)
{
- lock_dquot(dquot);
dquot->dq_curinodes += number;
dquot->dq_flags |= DQ_MOD;
- unlock_dquot(dquot);
}
static inline void dquot_incr_blocks(struct dquot *dquot, unsigned long number)
{
- lock_dquot(dquot);
dquot->dq_curblocks += number;
dquot->dq_flags |= DQ_MOD;
- unlock_dquot(dquot);
}
static inline void dquot_decr_inodes(struct dquot *dquot, unsigned long number)
{
- lock_dquot(dquot);
if (dquot->dq_curinodes > number)
dquot->dq_curinodes -= number;
else
dquot->dq_itime = (time_t) 0;
dquot->dq_flags &= ~DQ_INODES;
dquot->dq_flags |= DQ_MOD;
- unlock_dquot(dquot);
}
static inline void dquot_decr_blocks(struct dquot *dquot, unsigned long number)
{
- lock_dquot(dquot);
if (dquot->dq_curblocks > number)
dquot->dq_curblocks -= number;
else
dquot->dq_btime = (time_t) 0;
dquot->dq_flags &= ~DQ_BLKS;
dquot->dq_flags |= DQ_MOD;
- unlock_dquot(dquot);
}
static inline char need_print_warning(short type, uid_t initiator, struct dquot *dquot)
return(initiator == 0 && dquot->dq_mnt->mnt_dquot.rsquash[dquot->dq_type] == 0);
}
-static int check_idq(struct dquot *dquot, short type, u_long short inodes, uid_t initiator,
+static int check_idq(struct dquot *dquot, short type, u_long inodes, uid_t initiator,
struct tty_struct *tty)
{
if (inodes <= 0 || dquot->dq_flags & DQ_FAKE)
if (dquot == NODQUOT)
goto out;
+ lock_dquot(dquot); /* We must protect against invalidating the quota */
error = -EFAULT;
if (dqblk && !copy_to_user(dqblk, &dquot->dq_dqb, sizeof(struct dqblk)))
error = 0;
+ unlock_dquot(dquot);
dqput(dquot);
out:
return error;
break;
}
dquot = dqget(inode->i_dev, id, cnt);
+ if (dquot == NODQUOT)
+ continue;
if (inode->i_dquot[cnt] != NODQUOT) {
dqput(dquot);
continue;
int dquot_alloc_block(const struct inode *inode, unsigned long number, uid_t initiator,
char warn)
{
- unsigned short cnt;
+ int cnt;
struct tty_struct *tty = current->tty;
+ struct dquot *dquot[MAXQUOTAS];
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ dquot[cnt] = dqduplicate(inode->i_dquot[cnt]);
+ if (dquot[cnt] == NODQUOT)
continue;
- if (check_bdq(inode->i_dquot[cnt], cnt, number, initiator, tty, warn))
- return(NO_QUOTA);
+ lock_dquot(dquot[cnt]);
+ if (check_bdq(dquot[cnt], cnt, number, initiator, tty, warn))
+ goto put_all;
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (dquot[cnt] == NODQUOT)
continue;
- dquot_incr_blocks(inode->i_dquot[cnt], number);
+ dquot_incr_blocks(dquot[cnt], number);
+ unlock_dquot(dquot[cnt]);
+ dqput(dquot[cnt]);
}
- return(QUOTA_OK);
+ return QUOTA_OK;
+put_all:
+ for (; cnt >= 0; cnt--) {
+ if (dquot[cnt] == NODQUOT)
+ continue;
+ unlock_dquot(dquot[cnt]);
+ dqput(dquot[cnt]);
+ }
+ return NO_QUOTA;
}
/*
*/
int dquot_alloc_inode(const struct inode *inode, unsigned long number, uid_t initiator)
{
- unsigned short cnt;
+ int cnt;
struct tty_struct *tty = current->tty;
+ struct dquot *dquot[MAXQUOTAS];
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ dquot[cnt] = dqduplicate(inode -> i_dquot[cnt]);
+ if (dquot[cnt] == NODQUOT)
continue;
- if (check_idq(inode->i_dquot[cnt], cnt, number, initiator, tty))
- return(NO_QUOTA);
+ lock_dquot(dquot[cnt]);
+ if (check_idq(dquot[cnt], cnt, number, initiator, tty))
+ goto put_all;
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ if (dquot[cnt] == NODQUOT)
continue;
- dquot_incr_inodes(inode->i_dquot[cnt], number);
+ dquot_incr_inodes(dquot[cnt], number);
+ unlock_dquot(dquot[cnt]);
+ dqput(dquot[cnt]);
}
- return(QUOTA_OK);
+ return QUOTA_OK;
+put_all:
+ for (; cnt >= 0; cnt--) {
+ if (dquot[cnt] == NODQUOT)
+ continue;
+ unlock_dquot(dquot[cnt]);
+ dqput(dquot[cnt]);
+ }
+ return NO_QUOTA;
}
/*
void dquot_free_block(const struct inode *inode, unsigned long number)
{
unsigned short cnt;
+ struct dquot *dquot;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ dquot = inode->i_dquot[cnt];
+ if (dquot == NODQUOT)
continue;
- dquot_decr_blocks(inode->i_dquot[cnt], number);
+ wait_on_dquot(dquot);
+ dquot_decr_blocks(dquot, number);
}
}
void dquot_free_inode(const struct inode *inode, unsigned long number)
{
unsigned short cnt;
+ struct dquot *dquot;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
+ dquot = inode->i_dquot[cnt];
+ if (dquot == NODQUOT)
continue;
- dquot_decr_inodes(inode->i_dquot[cnt], number);
+ wait_on_dquot(dquot);
+ dquot_decr_inodes(dquot, number);
}
}
*
* Note: this is a blocking operation.
*/
-int dquot_transfer(struct inode *inode, struct iattr *iattr, char direction, uid_t initiator)
+int dquot_transfer(struct dentry *dentry, struct iattr *iattr, uid_t initiator)
{
+ struct inode *inode = dentry -> d_inode;
unsigned long blocks;
struct dquot *transfer_from[MAXQUOTAS];
struct dquot *transfer_to[MAXQUOTAS];
struct tty_struct *tty = current->tty;
short cnt, disc;
+ int error = -EDQUOT;
+ if (!inode)
+ return -ENOENT;
/*
* Find out if this filesystem uses i_blocks.
*/
case USRQUOTA:
if (inode->i_uid == iattr->ia_uid)
continue;
- transfer_from[cnt] = dqget(inode->i_dev, (direction) ? iattr->ia_uid : inode->i_uid, cnt);
- transfer_to[cnt] = dqget(inode->i_dev, (direction) ? inode->i_uid : iattr->ia_uid, cnt);
+ /* We can get transfer_from from inode, can't we? */
+ transfer_from[cnt] = dqget(inode->i_dev, inode->i_uid, cnt);
+ transfer_to[cnt] = dqget(inode->i_dev, iattr->ia_uid, cnt);
break;
case GRPQUOTA:
if (inode->i_gid == iattr->ia_gid)
continue;
- transfer_from[cnt] = dqget(inode->i_dev, (direction) ? iattr->ia_gid : inode->i_gid, cnt);
- transfer_to[cnt] = dqget(inode->i_dev, (direction) ? inode->i_gid : iattr->ia_gid, cnt);
+ transfer_from[cnt] = dqget(inode->i_dev, inode->i_gid, cnt);
+ transfer_to[cnt] = dqget(inode->i_dev, iattr->ia_gid, cnt);
break;
}
- if (check_idq(transfer_to[cnt], cnt, 1, initiator, tty) == NO_QUOTA ||
- check_bdq(transfer_to[cnt], cnt, blocks, initiator, tty, 0) == NO_QUOTA) {
- for (disc = 0; disc <= cnt; disc++) {
- dqput(transfer_from[disc]);
- dqput(transfer_to[disc]);
+ /* Something bad (eg. quotaoff) happened while we were sleeping? */
+ if (transfer_from[cnt] == NODQUOT || transfer_to[cnt] == NODQUOT)
+ {
+ if (transfer_from[cnt] != NODQUOT) {
+ dqput(transfer_from[cnt]);
+ transfer_from[cnt] = NODQUOT;
+ }
+ if (transfer_to[cnt] != NODQUOT) {
+ dqput(transfer_to[cnt]);
+ transfer_to[cnt] = NODQUOT;
}
- return(NO_QUOTA);
+ continue;
+ }
+ /*
+ * We have to lock the quotas to prevent races...
+ */
+ if (transfer_from[cnt] < transfer_to[cnt])
+ {
+ lock_dquot(transfer_from[cnt]);
+ lock_dquot(transfer_to[cnt]);
+ }
+ else
+ {
+ lock_dquot(transfer_to[cnt]);
+ lock_dquot(transfer_from[cnt]);
+ }
+
+ /*
+ * The entries might got invalidated while locking. The second
+ * dqget() could block and so the first structure might got
+ * invalidated or locked...
+ */
+ if (!transfer_to[cnt]->dq_mnt || !transfer_from[cnt]->dq_mnt ||
+ check_idq(transfer_to[cnt], cnt, 1, initiator, tty) == NO_QUOTA ||
+ check_bdq(transfer_to[cnt], cnt, blocks, initiator, tty, 0) == NO_QUOTA) {
+ cnt++;
+ goto put_all;
}
}
+ if ((error = notify_change(dentry, iattr)))
+ goto put_all;
/*
* Finally perform the needed transfer from transfer_from to transfer_to,
* and release any pointers to dquots not needed anymore.
if (transfer_from[cnt] == NODQUOT && transfer_to[cnt] == NODQUOT)
continue;
- if (transfer_from[cnt] != NODQUOT) {
- dquot_decr_inodes(transfer_from[cnt], 1);
- dquot_decr_blocks(transfer_from[cnt], blocks);
- }
+ dquot_decr_inodes(transfer_from[cnt], 1);
+ dquot_decr_blocks(transfer_from[cnt], blocks);
- if (transfer_to[cnt] != NODQUOT) {
- dquot_incr_inodes(transfer_to[cnt], 1);
- dquot_incr_blocks(transfer_to[cnt], blocks);
- }
+ dquot_incr_inodes(transfer_to[cnt], 1);
+ dquot_incr_blocks(transfer_to[cnt], blocks);
+ unlock_dquot(transfer_from[cnt]);
+ dqput(transfer_from[cnt]);
if (inode->i_dquot[cnt] != NODQUOT) {
struct dquot *temp = inode->i_dquot[cnt];
inode->i_dquot[cnt] = transfer_to[cnt];
+ unlock_dquot(transfer_to[cnt]);
dqput(temp);
- dqput(transfer_from[cnt]);
} else {
- dqput(transfer_from[cnt]);
+ unlock_dquot(transfer_to[cnt]);
dqput(transfer_to[cnt]);
}
}
- return(QUOTA_OK);
+ return 0;
+put_all:
+ for (disc = 0; disc < cnt; disc++) {
+ /* There should be none or both pointers set but... */
+ if (transfer_to[disc] != NODQUOT) {
+ unlock_dquot(transfer_to[disc]);
+ dqput(transfer_to[disc]);
+ }
+ if (transfer_from[disc] != NODQUOT) {
+ unlock_dquot(transfer_from[disc]);
+ dqput(transfer_from[disc]);
+ }
+ }
+ return error;
}
struct vfsmount *vfsmnt;
struct file *filp;
short cnt;
+ int enabled = 0;
+ /* We don't need to search for vfsmnt each time - umount has to wait for us */
+ vfsmnt = lookup_vfsmnt(dev);
+ if (!vfsmnt || !vfsmnt->mnt_sb)
+ goto out;
+
+ /* We need to serialize quota_off() for device */
+ down(&vfsmnt->mnt_dquot.dqoff_sem);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
-
- vfsmnt = lookup_vfsmnt(dev);
- if (!vfsmnt)
- goto out;
- if (!vfsmnt->mnt_sb)
- goto out;
if (!is_enabled(vfsmnt, cnt))
continue;
reset_enable_flags(vfsmnt, cnt);
reset_dquot_ptrs(dev, cnt);
invalidate_dquots(dev, cnt);
+ /* Wait for any pending IO - remove me as soon as invalidate is more polite */
+ down(&vfsmnt->mnt_dquot.dqio_sem);
filp = vfsmnt->mnt_dquot.files[cnt];
vfsmnt->mnt_dquot.files[cnt] = (struct file *)NULL;
vfsmnt->mnt_dquot.inode_expire[cnt] = 0;
vfsmnt->mnt_dquot.block_expire[cnt] = 0;
+ up(&vfsmnt->mnt_dquot.dqio_sem);
fput(filp);
- }
+ }
/*
* Check whether any quota is still enabled,
* and if not clear the dq_op pointer.
*/
- vfsmnt = lookup_vfsmnt(dev);
- if (vfsmnt && vfsmnt->mnt_sb) {
- int enabled = 0;
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- enabled |= is_enabled(vfsmnt, cnt);
- if (!enabled)
- vfsmnt->mnt_sb->dq_op = NULL;
- }
-
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ enabled |= is_enabled(vfsmnt, cnt);
+ if (!enabled)
+ vfsmnt->mnt_sb->dq_op = NULL;
+ up(&vfsmnt->mnt_dquot.dqoff_sem);
out:
return(0);
}
if (is_enabled(vfsmnt, type))
return -EBUSY;
- mnt_dquot = &vfsmnt->mnt_dquot;
+ mnt_dquot = &vfsmnt->mnt_dquot;
+ down(&mnt_dquot->dqoff_sem);
tmp = getname(path);
error = PTR_ERR(tmp);
if (IS_ERR(tmp))
- return error;
+ goto out_lock;
f = filp_open(tmp, O_RDWR, 0600);
putname(tmp);
- if (IS_ERR(f))
- return PTR_ERR(f);
- /* sanity checks */
+ error = PTR_ERR(f);
+ if (IS_ERR(f))
+ goto out_lock;
error = -EIO;
if (!f->f_op->read && !f->f_op->write)
- goto cleanup;
+ goto out_f;
inode = f->f_dentry->d_inode;
error = -EACCES;
if (!S_ISREG(inode->i_mode))
- goto cleanup;
+ goto out_f;
error = -EINVAL;
if (inode->i_size == 0 || (inode->i_size % sizeof(struct dqblk)) != 0)
- goto cleanup;
+ goto out_f;
- /* OK, there we go */
set_enable_flags(vfsmnt, type);
mnt_dquot->files[type] = f;
dquot = dqget(dev, 0, type);
- mnt_dquot->inode_expire[type] = (dquot) ? dquot->dq_itime : MAX_IQ_TIME;
- mnt_dquot->block_expire[type] = (dquot) ? dquot->dq_btime : MAX_DQ_TIME;
+ mnt_dquot->inode_expire[type] = (dquot != NODQUOT) ? dquot->dq_itime : MAX_IQ_TIME;
+ mnt_dquot->block_expire[type] = (dquot != NODQUOT) ? dquot->dq_btime : MAX_DQ_TIME;
dqput(dquot);
vfsmnt->mnt_sb->dq_op = &dquot_operations;
add_dquot_ref(dev, type);
- return(0);
+ up(&mnt_dquot->dqoff_sem);
+ return 0;
-cleanup:
- fput(f);
- return error;
+out_f:
+ filp_close(f, NULL);
+out_lock:
+ up(&mnt_dquot->dqoff_sem);
+
+ return error;
}
/*
if ((u_int) type >= MAXQUOTAS)
goto out;
+ if (id & ~0xFFFF)
+ goto out;
+
ret = -EPERM;
switch (cmds) {
case Q_SYNC:
break;
case Q_GETQUOTA:
if (((type == USRQUOTA && current->euid != id) ||
- (type == GRPQUOTA && current->egid != id)) &&
+ (type == GRPQUOTA && in_group_p(id))) &&
!capable(CAP_SYS_RESOURCE))
goto out;
break;
}
ret = -EINVAL;
- dev = 0;
+ dev = NODEV;
if (special != NULL || (cmds != Q_SYNC && cmds != Q_GETSTATS)) {
mode_t mode;
struct dentry * dentry;
* allowing for low-overhead inode sync() operations.
*/
-static LIST_HEAD(inode_in_use);
+LIST_HEAD(inode_in_use);
static LIST_HEAD(inode_unused);
static struct list_head inode_hashtable[HASH_SIZE];
while ((fl = *before) != NULL) {
if ((fl->fl_flags & FL_FLOCK) && fl->fl_file == filp) {
int (*lock)(struct file *, int, struct file_lock *);
- lock = filp->f_op->lock;
+ lock = NULL;
+ if (filp->f_op)
+ lock = filp->f_op->lock;
if (lock) {
file_lock = *fl;
file_lock.fl_type = F_UNLCK;
lptr->mnt_dev = sb->s_dev;
lptr->mnt_flags = sb->s_flags;
- sema_init(&lptr->mnt_dquot.semaphore, 1);
+ sema_init(&lptr->mnt_dquot.dqio_sem, 1);
+ sema_init(&lptr->mnt_dquot.dqoff_sem, 1);
lptr->mnt_dquot.flags = 0;
/* N.B. Is it really OK to have a vfsmount without names? */
int rtc_port;
int max_asn;
unsigned long max_dma_address;
- unsigned long mmu_context_mask;
unsigned long irq_probe_mask;
unsigned long iack_sc;
# endif
#endif
-#ifdef __SMP__
-#define WIDTH_THIS_PROCESSOR 5
/*
- * last_asn[processor]:
+ * cpu_last_asn(processor):
* 63 0
* +-------------+----------------+--------------+
* | asn version | this processor | hardware asn |
* +-------------+----------------+--------------+
*/
-extern unsigned long last_asn[];
-#define asn_cache last_asn[p->processor]
+#ifdef __SMP__
+#include <asm/smp.h>
+#define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn)
#else
-#define WIDTH_THIS_PROCESSOR 0
-/*
- * asn_cache:
- * 63 0
- * +------------------------------+--------------+
- * | asn version | hardware asn |
- * +------------------------------+--------------+
- */
-extern unsigned long asn_cache;
+extern int last_asn;
+#define cpu_last_asn(cpuid) last_asn
#endif /* __SMP__ */
#define WIDTH_HARDWARE_ASN 8
+#define WIDTH_THIS_PROCESSOR 5
#define ASN_FIRST_VERSION (1UL << (WIDTH_THIS_PROCESSOR + WIDTH_HARDWARE_ASN))
#define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1)
extern void get_new_mmu_context(struct task_struct *p, struct mm_struct *mm);
-__EXTERN_INLINE void ev4_get_mmu_context(struct task_struct *p)
+static inline unsigned long
+__get_new_mmu_context(struct task_struct *p, struct mm_struct *mm)
{
- /* As described, ASN's are broken. */
+ long asn = cpu_last_asn(smp_processor_id());
+ long next = asn + 1;
+
+ if ((next ^ asn) & ~MAX_ASN) {
+ tbiap();
+ next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
+ }
+ cpu_last_asn(smp_processor_id()) = next;
+ mm->context = next; /* full version + asn */
+ return next;
}
-__EXTERN_INLINE void ev5_get_mmu_context(struct task_struct *p)
+__EXTERN_INLINE void
+ev4_get_mmu_context(struct task_struct *p)
+{
+ /* As described, ASN's are broken. But we can optimize for
+ switching between threads -- if the mm is unchanged from
+ current we needn't flush. */
+ if (current->mm != p->mm)
+ tbiap();
+}
+
+__EXTERN_INLINE void
+ev5_get_mmu_context(struct task_struct *p)
{
- struct mm_struct * mm = p->mm;
-
- if (mm) {
- unsigned long asn = asn_cache;
- /* Check if our ASN is of an older version,
- or on a different CPU, and thus invalid. */
- if ((mm->context ^ asn) & ~HARDWARE_ASN_MASK)
- get_new_mmu_context(p, mm);
+ /* Check if our ASN is of an older version, or on a different CPU,
+ and thus invalid. */
+
+ long asn = cpu_last_asn(smp_processor_id());
+ struct mm_struct *mm = p->mm;
+ long mmc = mm->context;
+
+ if (((int)p->tss.asn ^ asn) & ~HARDWARE_ASN_MASK) {
+ if ((mmc ^ asn) & ~HARDWARE_ASN_MASK)
+ mmc = __get_new_mmu_context(p, mm);
+ p->tss.asn = mmc & HARDWARE_ASN_MASK;
}
}
# endif
#endif
-extern inline void init_new_context(struct mm_struct *mm)
+extern inline void
+init_new_context(struct mm_struct *mm)
{
mm->context = 0;
}
-extern inline void destroy_context(struct mm_struct *mm)
+extern inline void
+destroy_context(struct mm_struct *mm)
{
/* Nothing to do. */
}
+#ifdef __MMU_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __MMU_EXTERN_INLINE
+#endif
/*
* Force a context reload. This is needed when we change the page
* table pointer or when we update the ASN of the current process.
*/
-#if defined(CONFIG_ALPHA_GENERIC)
-#define MASK_CONTEXT(tss) \
- ((struct thread_struct *)((unsigned long)(tss) & alpha_mv.mmu_context_mask))
-#elif defined(CONFIG_ALPHA_DP264)
-#define MASK_CONTEXT(tss) \
- ((struct thread_struct *)((unsigned long)(tss) & 0xfffffffffful))
-#else
-#define MASK_CONTEXT(tss) (tss)
+/* Don't get into trouble with dueling __EXTERN_INLINEs. */
+#ifndef __EXTERN_INLINE
+#include <asm/io.h>
#endif
-__EXTERN_INLINE struct thread_struct *
+extern inline unsigned long
__reload_tss(struct thread_struct *tss)
{
- register struct thread_struct *a0 __asm__("$16");
- register struct thread_struct *v0 __asm__("$0");
-
- a0 = MASK_CONTEXT(tss);
+ register unsigned long a0 __asm__("$16");
+ register unsigned long v0 __asm__("$0");
+ a0 = virt_to_phys(tss);
__asm__ __volatile__(
"call_pal %2 #__reload_tss"
: "=r"(v0), "=r"(a0)
return v0;
}
-__EXTERN_INLINE void
+extern inline void
reload_context(struct task_struct *task)
{
__reload_tss(&task->tss);
}
/*
- * After we have set current->mm to a new value, this activates the
- * context for the new mm so we see the new mappings.
+ * After setting current->mm to a new value, activate the context for the
+ * new mm so we see the new mappings.
*/
-__EXTERN_INLINE void
+extern inline void
activate_context(struct task_struct *task)
{
- get_mmu_context(task);
+ get_new_mmu_context(task, task->mm);
reload_context(task);
}
-#ifdef __MMU_EXTERN_INLINE
-#undef __EXTERN_INLINE
-#undef __MMU_EXTERN_INLINE
-#endif
-
#endif /* __ALPHA_MMU_CONTEXT_H */
unsigned long ipi_count;
unsigned long prof_multiplier;
unsigned long prof_counter;
+ int last_asn;
} __cacheline_aligned;
extern struct cpuinfo_alpha cpu_data[NR_CPUS];
#endif
#ifdef __CYGWIN32__
-typedef unsigned char u_int8_t;
struct timespec {
time_t tv_sec; /* seconds */
long tv_nsec; /* nanoseconds */
};
#endif
+#ifndef __BIT_TYPES_DEFINED__
+#define __BIT_TYPES_DEFINED__
+typedef signed char int8_t;
+typedef unsigned char u_int8_t;
+typedef short int16_t;
+typedef unsigned short u_int16_t;
+typedef int int32_t;
+typedef unsigned int u_int32_t;
+#endif
+
/*
* Cfs constants
struct venus_dirent {
unsigned long d_fileno; /* file number of entry */
unsigned short d_reclen; /* length of this record */
- char d_type; /* file type, see below */
- char d_namlen; /* length of string in d_name */
+ unsigned char d_type; /* file type, see below */
+ unsigned char d_namlen; /* length of string in d_name */
char d_name[CODA_MAXNAMLEN + 1];/* name must be no longer than this */
};
#undef DIRSIZ
#endif
-#ifndef __BIT_TYPES_DEFINED__
-#define u_int32_t unsigned int
-#endif
-
-
#ifndef _VUID_T_
#define _VUID_T_
typedef u_int32_t vuid_t;
#ifndef _CODACRED_T_
#define _CODACRED_T_
struct coda_cred {
- vuid_t cr_uid, cr_euid, cr_suid, cr_fsuid;
- vgid_t cr_groupid, cr_egid, cr_sgid, cr_fsgid;
-#if defined(CODA_SUPPORTS_SUPPLEMENTARY_GROUPS)
- int cr_nsupgps;
- vgid_t cr_supgps[NGROUPS];
-#endif /* defined(CODA_SUPPORTS_SUPPLEMENTARY_GROUPS) */
+ vuid_t cr_uid, cr_euid, cr_suid, cr_fsuid; /* Real, efftve, set, fs uid*/
+ vgid_t cr_groupid, cr_egid, cr_sgid, cr_fsgid; /* same for groups */
};
#endif
enum coda_vtype { C_VNON, C_VREG, C_VDIR, C_VBLK, C_VCHR, C_VLNK, C_VSOCK, C_VFIFO, C_VBAD };
struct coda_vattr {
- int va_type; /* vnode type (for create) */
+ long va_type; /* vnode type (for create) */
u_short va_mode; /* files access mode and type */
short va_nlink; /* number of references to file */
vuid_t va_uid; /* owner user id */
#endif
+/* structure used by CODA_STATFS for getting cache information from venus */
+struct coda_statfs {
+ int32_t f_blocks;
+ int32_t f_bfree;
+ int32_t f_bavail;
+ int32_t f_files;
+ int32_t f_ffree;
+};
+
/*
* Kernel <--> Venus communications.
*/
#define CODA_OPEN_BY_PATH 31
#define CODA_RESOLVE 32
#define CODA_REINTEGRATE 33
-#define CODA_NCALLS 34
+#define CODA_STATFS 34
+#define CODA_NCALLS 35
#define DOWNCALL(opcode) (opcode >= CODA_REPLACE && opcode <= CODA_PURGEFID)
int path;
};
+/* coda_statfs: NO_IN */
+struct coda_statfs_in {
+ struct coda_in_hdr in;
+};
+
+struct coda_statfs_out {
+ struct coda_out_hdr oh;
+ struct coda_statfs stat;
+};
+
/*
* Occasionally, we don't cache the fid returned by CODA_LOOKUP.
* For instance, if the fid is inconsistent.
struct coda_inactive_in coda_inactive;
struct coda_vget_in coda_vget;
struct coda_rdwr_in coda_rdwr;
- struct coda_open_by_path_in coda_open_by_path;
+ struct coda_open_by_path_in coda_open_by_path;
+ struct coda_statfs_in coda_statfs;
};
union outputArgs {
struct coda_purgefid_out coda_purgefid;
struct coda_rdwr_out coda_rdwr;
struct coda_replace_out coda_replace;
- struct coda_open_by_path_out coda_open_by_path;
+ struct coda_open_by_path_out coda_open_by_path;
+ struct coda_statfs_out coda_statfs;
};
union coda_downcalls {
short out_size; /* Maximum size of output buffer, <= 2K */
};
-#if defined(__CYGWIN32__) || defined(DJGPP)
struct PioctlData {
- unsigned long cmd;
const char *path;
int follow;
struct ViceIoctl vi;
};
-#else
-struct PioctlData {
- const char *path;
- int follow;
- struct ViceIoctl vi;
-};
-#endif
#define CODA_CONTROL ".CONTROL"
#define CODA_CONTROLLEN 8
extern struct coda_vfs_stats coda_vfs_stat;
extern struct coda_permission_stats coda_permission_stat;
extern struct coda_cache_inv_stats coda_cache_inv_stat;
+extern int coda_upcall_timestamping;
/* reset statistics to 0 */
void reset_coda_vfs_stats( void );
unsigned int cmd, struct PioctlData *data);
int coda_downcall(int opcode, union outputArgs *out, struct super_block *sb);
int venus_fsync(struct super_block *sb, struct ViceFid *fid);
+int venus_statfs(struct super_block *sb, struct statfs *sfs);
/* messages between coda filesystem in kernel and Venus */
int (*alloc_inode) (const struct inode *, unsigned long, uid_t);
void (*free_block) (const struct inode *, unsigned long);
void (*free_inode) (const struct inode *, unsigned long);
- int (*transfer) (struct inode *, struct iattr *, char, uid_t);
+ int (*transfer) (struct dentry *, struct iattr *, uid_t);
};
struct file_system_type {
struct quota_mount_options
{
unsigned int flags; /* Flags for diskquotas on this device */
- struct semaphore semaphore; /* lock device while I/O in progress */
+ struct semaphore dqio_sem; /* lock device while I/O in progress */
+ struct semaphore dqoff_sem; /* serialize quota_off() and quota_on() on device */
struct file *files[MAXQUOTAS]; /* fp's to quotafiles */
time_t inode_expire[MAXQUOTAS]; /* expiretime for inode-quota */
time_t block_expire[MAXQUOTAS]; /* expiretime for block-quota */
- char rsquash[MAXQUOTAS]; /* for quotas threath root as any other user */
+ char rsquash[MAXQUOTAS]; /* for quotas threat root as any other user */
};
struct vfsmount
extern void dquot_free_block(const struct inode *inode, unsigned long number);
extern void dquot_free_inode(const struct inode *inode, unsigned long number);
-extern int dquot_transfer(struct inode *inode, struct iattr *iattr,
- char direction, uid_t initiator);
+extern int dquot_transfer(struct dentry *dentry, struct iattr *iattr,
+ uid_t initiator);
/*
* Operations supported for diskquotas.
if (dentry->d_inode->i_sb->dq_op) {
if (IS_QUOTAINIT(dentry->d_inode) == 0)
dentry->d_inode->i_sb->dq_op->initialize(dentry->d_inode, -1);
- if (dentry->d_inode->i_sb->dq_op->transfer(dentry->d_inode, iattr, 0, current->fsuid))
- goto out;
- error = notify_change(dentry, iattr);
- if (error)
- dentry->d_inode->i_sb->dq_op->transfer(dentry->d_inode, iattr, 1, current->fsuid);
+ error = dentry->d_inode->i_sb->dq_op->transfer(dentry, iattr, current->fsuid);
} else {
error = notify_change(dentry, iattr);
}
-out:
return error;
}