NETWORKING [IPv4/IPv6]
P: David S. Miller
-M: davem@dm.cobaltmicro.com
+M: davem@redhat.com
P: Andi Kleen
M: ak@muc.de
P: Alexey Kuznetsov
static int nr_buffers = 0;
static int nr_buffers_type[NR_LIST] = {0,};
-static int size_buffers_type[NR_LIST] = {0,};
static int nr_buffer_heads = 0;
static int nr_unused_buffer_heads = 0;
each time we call refill */
int nref_dirt; /* Dirty buffer threshold for activating bdflush
when trying to refill buffers. */
- int pct_dirt; /* Max %age of mem for dirty buffers before
- activating bdflush */
+ int dummy1; /* unused */
int age_buffer; /* Time for normal buffer to age before
we flush it */
int age_super; /* Time for superblock to age before we
return;
}
nr_buffers_type[bh->b_list]--;
- size_buffers_type[bh->b_list] -= bh->b_size;
remove_from_hash_queue(bh);
remove_from_lru_list(bh);
}
(*bhp)->b_prev_free = bh;
nr_buffers_type[bh->b_list]++;
- size_buffers_type[bh->b_list] += bh->b_size;
/* Put the buffer in new hash-queue if it has a device. */
bh->b_next = NULL;
file_buffer(buf, dispose);
if(dispose == BUF_DIRTY) {
int too_many = (nr_buffers * bdf_prm.b_un.nfract/100);
- int too_large = (num_physpages * bdf_prm.b_un.pct_dirt/100);
/* This buffer is dirty, maybe we need to start flushing.
* If too high a percentage of the buffers are dirty...
*/
- if (nr_buffers_type[BUF_DIRTY] > too_many ||
- size_buffers_type[BUF_DIRTY]/PAGE_SIZE > too_large) {
- if (nr_buffers_type[BUF_LOCKED] > 3 * bdf_prm.b_un.ndirty)
- wakeup_bdflush(1);
- else
- wakeup_bdflush(0);
- }
-
+ if (nr_buffers_type[BUF_DIRTY] > too_many)
+ wakeup_bdflush(1);
+
/* If this is a loop device, and
* more than half of the buffers are dirty...
* (Prevents no-free-buffers deadlock with loop device.)
return error;
}
-/*
- * To avoid retaining a stale inode reference, we check the dentry
- * use count prior to the operation, and return EBUSY if it has
- * multiple users.
- *
- * We update inode->i_nlink and free the inode prior to the operation
- * to avoid possible races if the server reuses the inode.
- *
- * FIXME! We don't do it anymore (2.1.131) - it interacts badly with
- * new rmdir(). -- AV
- */
static int nfs_rmdir(struct inode *dir, struct dentry *dentry)
{
int error;
if (dentry->d_name.len > NFS_MAXNAMLEN)
goto out;
- error = -EBUSY;
- if (!list_empty(&dentry->d_hash))
- goto out;
-
#ifdef NFS_PARANOIA
if (dentry->d_inode->i_count > 1)
printk("nfs_rmdir: %s/%s inode busy?? i_count=%d, i_nlink=%d\n",
dentry->d_inode->i_count, dentry->d_inode->i_nlink);
#endif
- /*
- * Update i_nlink and free the inode before unlinking.
- */
- if (dentry->d_inode->i_nlink)
- dentry->d_inode->i_nlink --;
nfs_invalidate_dircache(dir);
error = nfs_proc_rmdir(NFS_SERVER(dir), NFS_FH(dentry->d_parent),
dentry->d_name.name);
+
+ /* Update i_nlink and invalidate dentry. */
+ if (!error) {
+ d_drop(dentry);
+ if (dentry->d_inode->i_nlink)
+ dentry->d_inode->i_nlink --;
+ }
+
out:
return error;
}
{ NFSERR_FBIG, EFBIG },
{ NFSERR_NOSPC, ENOSPC },
{ NFSERR_ROFS, EROFS },
+ { NFSERR_OPNOTSUPP, EOPNOTSUPP },
{ NFSERR_NAMETOOLONG, ENAMETOOLONG },
{ NFSERR_NOTEMPTY, ENOTEMPTY },
{ NFSERR_DQUOT, EDQUOT },
(test_and_clear_bit(PG_swap_cache, &(page)->flags))
/*
- * page->reserved denotes a page which must never be accessed (which
+ * Various page->flags bits:
+ *
+ * PG_reserved is set for a page which must never be accessed (which
* may not even be present).
*
- * page->dma is set for those pages which lie in the range of
+ * PG_DMA is set for those pages which lie in the range of
* physical addresses capable of carrying DMA transfers.
*
* Multiple processes may "see" the same page. E.g. for untouched
* The following discussion applies only to them.
*
* A page may belong to an inode's memory mapping. In this case,
- * page->inode is the inode, and page->offset is the file offset
- * of the page (not necessarily a multiple of PAGE_SIZE).
+ * page->inode is the pointer to the inode, and page->offset is the
+ * file offset of the page (not necessarily a multiple of PAGE_SIZE).
*
* A page may have buffers allocated to it. In this case,
* page->buffers is a circular list of these buffer heads. Else,
* fields are also used for freelist management when page->count==0.)
* There is also a hash table mapping (inode,offset) to the page
* in memory if present. The lists for this hash table use the fields
- * page->next_hash and page->prev_hash.
+ * page->next_hash and page->pprev_hash.
*
* All process pages can do I/O:
* - inode pages may need to be read from disk,
* to be written to disk,
* - private pages which have been modified may need to be swapped out
* to swap space and (later) to be read back into memory.
- * During disk I/O, page->locked is true. This bit is set before I/O
+ * During disk I/O, PG_locked is used. This bit is set before I/O
* and reset when I/O completes. page->wait is a wait queue of all
* tasks waiting for the I/O on this page to complete.
- * page->uptodate tells whether the page's contents is valid.
+ * PG_uptodate tells whether the page's contents is valid.
* When a read completes, the page becomes uptodate, unless a disk I/O
* error happened.
- * When a write completes, and page->free_after is true, the page is
+ * When a write completes, and PG_free_after is set, the page is
* freed without any further delay.
*
* For choosing which pages to swap out, inode pages carry a
- * page->referenced bit, which is set any time the system accesses
+ * PG_referenced bit, which is set any time the system accesses
* that page through the (inode,offset) hash table.
+ *
+ * PG_skip is used on sparc/sparc64 architectures to "skip" certain
+ * parts of the address space.
+ *
+ * PG_error is set to indicate that an I/O error occurred on this page.
*/
extern mem_map_t * mem_map;
#define GFP_DMA __GFP_DMA
-/*
- * Decide if we should try to do some swapout..
- */
-extern int free_memory_available(void);
-
/* vma is the first one with address < vma->vm_end,
* and even address < vma->vm_start. Have to extend vma. */
static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
NFSERR_FBIG = 27,
NFSERR_NOSPC = 28,
NFSERR_ROFS = 30,
+ NFSERR_OPNOTSUPP = 45,
NFSERR_NAMETOOLONG = 63,
NFSERR_NOTEMPTY = 66,
NFSERR_DQUOT = 69,
write_unlock_irqrestore(&waitqueue_lock, flags);
}
+#define __wait_event(wq, condition) \
+do { \
+ struct wait_queue __wait; \
+ \
+ __wait.task = current; \
+ add_wait_queue(&wq, &__wait); \
+ for (;;) { \
+ current->state = TASK_UNINTERRUPTIBLE; \
+ if (condition) \
+ break; \
+ schedule(); \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&wq, &__wait); \
+} while (0)
+
+#define wait_event(wq, condition) \
+do { \
+ if (condition) \
+ break; \
+ __wait_event(wq, condition); \
+} while (0)
+
+#define __wait_event_interruptible(wq, condition, ret) \
+do { \
+ struct wait_queue __wait; \
+ \
+ __wait.task = current; \
+ add_wait_queue(&wq, &__wait); \
+ for (;;) { \
+ current->state = TASK_INTERRUPTIBLE; \
+ if (condition) \
+ break; \
+ if (!signal_pending(current)) { \
+ schedule(); \
+ continue; \
+ } \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&wq, &__wait); \
+} while (0)
+
+#define wait_event_interruptible(wq, condition) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __wait_event_interruptible(wq, condition, __ret); \
+ __ret; \
+})
+
#define REMOVE_LINKS(p) do { \
(p)->next_task->prev_task = (p)->prev_task; \
(p)->prev_task->next_task = (p)->next_task; \
error = verify_area(VERIFY_WRITE,buf,len);
if (error)
goto out;
- cli();
- error = -ERESTARTSYS;
- while (!log_size) {
- if (signal_pending(current)) {
- sti();
- goto out;
- }
- interruptible_sleep_on(&log_wait);
- }
+ error = wait_event_interruptible(log_wait, log_size);
+ if (error)
+ goto out;
i = 0;
while (log_size && i < len) {
c = *((char *) log_buf+log_start);