- me: fix GFB_BUFFER thinkos. Make buffer syncing more efficient.
Make sure we don't leave buffers on the LOCKED list forever
- David Miller: networking and sparc updates
IP-Aliasing:
============
+IP-aliases are additional IP-adresses/masks hooked up to a base
+interface by adding a colon and a string when running ifconfig.
+This string is usually numeric, but this is not a must.
+
+IP-Aliases are avail if CONFIG_INET (`standard' IPv4 networking)
+is configured in the kernel.
-o For IP aliasing you must have IP_ALIAS support included by static
- linking.
o Alias creation.
- Alias creation is done by 'magic' iface naming: eg. to create a
+ Alias creation is done by 'magic' interface naming: eg. to create a
200.1.1.1 alias for eth0 ...
# ifconfig eth0:0 200.1.1.1 etc,etc....
~~ -> request alias #0 creation (if not yet exists) for eth0
- and routing stuff also ...
- # route add -host 200.1.1.1 dev eth0:0 (if same IP network as
- main device)
-
- # route add -net 200.1.1.0 dev eth0:0 (if completely new network wanted
- for eth0:0)
+
+ The corresponding route is also set up by this command.
+ Please note: The route always points to the base interface.
+
o Alias deletion.
- Also done by shutting the interface down:
+ The alias is removed by shutting the alias down:
# ifconfig eth0:0 down
~~~~~~~~~~ -> will delete alias
-Alias (re-)configuring
+o Alias (re-)configuring
- Aliases are not real devices, but programs` should be able to configure and
+ Aliases are not real devices, but programs should be able to configure and
refer to them as usual (ifconfig, route, etc).
-Relationship with main device
------------------------------
- - the main device is an alias itself like additional aliases and can
- be shut down without deleting other aliases.
+o Relationship with main device
+
+ If the base device is shut down the added aliases will be deleted
+ too.
+
Contact
-------
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 6
-EXTRAVERSION =-pre7
+EXTRAVERSION =-pre8
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
static void __init mcheck_disable(char *str, int *unused)
{
mce_disabled = 1;
+ return 1;
}
__setup("nomce", mcheck_disable);
CONFIG_USB_IBMCAM=m
CONFIG_USB_OV511=m
CONFIG_USB_PWC=m
+CONFIG_USB_SE401=m
CONFIG_USB_DSBR=m
CONFIG_USB_DABUSB=m
#
CONFIG_USB_PLUSB=m
CONFIG_USB_PEGASUS=m
+CONFIG_USB_CATC=m
CONFIG_USB_NET1080=m
#
# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_CYBERJACK=m
CONFIG_USB_SERIAL_OMNINET=m
#
-/* $Id: pci_common.c,v 1.25 2001/06/14 16:57:41 davem Exp $
+/* $Id: pci_common.c,v 1.26 2001/06/28 01:32:18 davem Exp $
* pci_common.c: PCI controller common support.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
line = ((pci_irq_line - 1) & 3);
}
- /* Now figure out the slot. */
+ /* Now figure out the slot.
+ *
+ * Basically, device number zero on the top-level bus is
+ * always the PCI host controller. Slot 0 is then device 1.
+ * PBM A supports two external slots (0 and 1), and PBM B
+ * supports 4 external slots (0, 1, 2, and 3). On-board PCI
+ * devices are wired to device numbers outside of these
+ * ranges. -DaveM
+ */
if (pdev->bus->number == pbm->pci_first_busno) {
- if (pbm == &pbm->parent->pbm_A)
- slot = (pdev->devfn >> 3) - 1;
- else
- slot = (pdev->devfn >> 3) - 2;
+ slot = (pdev->devfn >> 3) - 1;
} else {
- if (pbm == &pbm->parent->pbm_A)
- slot = (pdev->bus->self->devfn >> 3) - 1;
- else
- slot = (pdev->bus->self->devfn >> 3) - 2;
+ /* Underneath a bridge, use slot number of parent
+ * bridge.
+ */
+ slot = (pdev->bus->self->devfn >> 3) - 1;
}
slot = slot << 2;
struct buffer_head *bh;
do {
- bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER);
+ bh = kmem_cache_alloc(bh_cachep, SLAB_NOIO);
if (bh)
break;
* so can we :-)
*/
do {
- bh->b_page = alloc_page(GFP_BUFFER);
+ bh->b_page = alloc_page(GFP_NOIO);
if (bh->b_page)
break;
lo->ioctl = NULL;
figure_loop_size(lo);
lo->old_gfp_mask = inode->i_mapping->gfp_mask;
- inode->i_mapping->gfp_mask = GFP_BUFFER;
+ inode->i_mapping->gfp_mask = GFP_NOIO;
bs = 0;
if (blksize_size[MAJOR(lo_device)])
do {
- sock->sk->allocation = GFP_BUFFER;
+ sock->sk->allocation = GFP_NOIO;
iov.iov_base = buf;
iov.iov_len = size;
msg.msg_name = NULL;
md_spin_unlock_irq(&conf->device_lock);
if (cnt == 0)
break;
- t = (struct buffer_head *)kmalloc(sizeof(struct buffer_head), GFP_BUFFER);
+ t = (struct buffer_head *)kmalloc(sizeof(struct buffer_head), GFP_NOIO);
if (t) {
memset(t, 0, sizeof(*t));
t->b_next = bh;
md_spin_unlock_irq(&conf->device_lock);
if (r1_bh)
return r1_bh;
- r1_bh = (struct raid1_bh *) kmalloc(sizeof(struct raid1_bh),
- GFP_BUFFER);
+ r1_bh = (struct raid1_bh *) kmalloc(sizeof(struct raid1_bh), GFP_NOIO);
if (r1_bh) {
memset(r1_bh, 0, sizeof(*r1_bh));
return r1_bh;
for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
{
- struct sk_buff *newskb = __dev_alloc_skb(NEW_SKB_SIZE, GFP_BUFFER);
+ struct sk_buff *newskb = __dev_alloc_skb(NEW_SKB_SIZE, GFP_NOFS); /* Why not GFP_KERNEL? */
if (!newskb)
return -ENOMEM;
bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
-/* $Id: sunhme.c,v 1.119 2001/05/17 04:12:16 davem Exp $
+/* $Id: sunhme.c,v 1.120 2001/06/14 17:37:23 jgarzik Exp $
* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
* auto carrier detecting ethernet driver. Also known as the
* "Happy Meal Ethernet" found on SunSwift SBUS cards.
-/* $Id: sab82532.c,v 1.62 2001/06/10 06:48:47 davem Exp $
+/* $Id: sab82532.c,v 1.63 2001/06/29 21:23:44 davem Exp $
* sab82532.c: ASYNC Driver for the SIEMENS SAB82532 DUSCC.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
static int set_modem_info(struct sab82532 * info, unsigned int cmd,
unsigned int *value)
{
- int error;
unsigned int arg;
- error = get_user(arg, value);
- if (error)
- return error;
+ if (get_user(arg, value))
+ return -EFAULT;
switch (cmd) {
case TIOCMBIS:
if (arg & TIOCM_RTS) {
static int sab82532_ioctl(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg)
{
- int error;
struct sab82532 * info = (struct sab82532 *)tty->driver_data;
struct async_icount cprev, cnow; /* kernel counter temps */
struct serial_icounter_struct *p_cuser; /* user space */
case TIOCGSOFTCAR:
return put_user(C_CLOCAL(tty) ? 1 : 0, (int *) arg);
case TIOCSSOFTCAR:
- error = get_user(arg, (unsigned int *) arg);
- if (error)
- return error;
+ if (get_user(arg, (unsigned int *) arg))
+ return -EFAULT;
tty->termios->c_cflag =
((tty->termios->c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
cnow = info->icount;
sti();
p_cuser = (struct serial_icounter_struct *) arg;
- error = put_user(cnow.cts, &p_cuser->cts);
- if (error) return error;
- error = put_user(cnow.dsr, &p_cuser->dsr);
- if (error) return error;
- error = put_user(cnow.rng, &p_cuser->rng);
- if (error) return error;
- error = put_user(cnow.dcd, &p_cuser->dcd);
- if (error) return error;
+ if (put_user(cnow.cts, &p_cuser->cts) ||
+ put_user(cnow.dsr, &p_cuser->dsr) ||
+ put_user(cnow.rng, &p_cuser->rng) ||
+ put_user(cnow.dcd, &p_cuser->dcd))
+ return -EFAULT;
return 0;
default:
static inline void __init show_serial_version(void)
{
- char *revision = "$Revision: 1.62 $";
+ char *revision = "$Revision: 1.63 $";
char *version, *p;
version = strchr(revision, ' ');
-/* $Id: su.c,v 1.50 2001/05/16 08:37:03 davem Exp $
+/* $Id: su.c,v 1.52 2001/06/29 21:54:32 davem Exp $
* su.c: Small serial driver for keyboard/mouse interface on sparc32/PCI
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
kdev_t device, const char *routine)
{
#ifdef SERIAL_PARANOIA_CHECK
- static const char *badmagic =
+ static const char *badmagic = KERN_WARNING
"Warning: bad magic number for serial struct (%s) in %s\n";
- static const char *badinfo =
+ static const char *badinfo = KERN_WARNING
"Warning: null su_struct for (%s) in %s\n";
if (!info) {
static int
set_modem_info(struct su_struct * info, unsigned int cmd, unsigned int *value)
{
- int error;
unsigned int arg;
unsigned long flags;
- error = get_user(arg, value);
- if (error)
- return error;
+ if (get_user(arg, value))
+ return -EFAULT;
switch (cmd) {
case TIOCMBIS:
if (arg & TIOCM_RTS)
su_ioctl(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg)
{
- int error;
struct su_struct * info = (struct su_struct *)tty->driver_data;
struct async_icount cprev, cnow; /* kernel counter temps */
struct serial_icounter_struct *p_cuser; /* user space */
cnow = info->icount;
sti();
p_cuser = (struct serial_icounter_struct *) arg;
- error = put_user(cnow.cts, &p_cuser->cts);
- if (error) return error;
- error = put_user(cnow.dsr, &p_cuser->dsr);
- if (error) return error;
- error = put_user(cnow.rng, &p_cuser->rng);
- if (error) return error;
- error = put_user(cnow.dcd, &p_cuser->dcd);
- if (error) return error;
+ if (put_user(cnow.cts, &p_cuser->cts) ||
+ put_user(cnow.dsr, &p_cuser->dsr) ||
+ put_user(cnow.rng, &p_cuser->rng) ||
+ put_user(cnow.dcd, &p_cuser->dcd))
+ return -EFAULT;
return 0;
default:
*/
static __inline__ void __init show_su_version(void)
{
- char *revision = "$Revision: 1.50 $";
+ char *revision = "$Revision: 1.52 $";
char *version, *p;
version = strchr(revision, ' ');
-/* $Id: zs.c,v 1.65 2001/05/09 07:00:10 davem Exp $
+/* $Id: zs.c,v 1.66 2001/06/29 21:33:22 davem Exp $
* zs.c: Zilog serial port driver for the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#define REGCTRL 0xfd
};
struct zs_logent zslog[32];
-int zs_curlog = 0;
+int zs_curlog;
#define ZSLOG(__reg, __val, __write) \
do{ int index = zs_curlog; \
zslog[index].reg = (__reg); \
static void show_serial_version(void)
{
- char *revision = "$Revision: 1.65 $";
+ char *revision = "$Revision: 1.66 $";
char *version, *p;
version = strchr(revision, ' ');
int i, j, key;
if (!AFFS_INODE->i_lc) {
- char *ptr = (char *)get_zeroed_page(GFP_BUFFER);
+ char *ptr = (char *)get_zeroed_page(GFP_NOFS);
if (!ptr)
return -ENOMEM;
AFFS_INODE->i_lc = (u32 *)ptr;
atomic_dec(&bh->b_count);
}
-/* Call sync_buffers with wait!=0 to ensure that the call does not
- * return until all buffer writes have completed. Sync() may return
- * before the writes have finished; fsync() may not.
- */
-
-/* Godamity-damn. Some buffers (bitmaps for filesystems)
- * spontaneously dirty themselves without ever brelse being called.
- * We will ultimately want to put these in a separate list, but for
- * now we search all of the lists for dirty buffers.
- */
-static int sync_buffers(kdev_t dev, int wait)
+/* End-of-write handler.. Just mark it up-to-date and unlock the buffer. */
+static void end_buffer_write(struct buffer_head *bh, int uptodate)
{
- int i, retry, pass = 0, err = 0;
- struct buffer_head * bh, *next;
+ mark_buffer_uptodate(bh, uptodate);
+ unlock_buffer(bh);
+}
- /* One pass for no-wait, three for wait:
- * 0) write out all dirty, unlocked buffers;
- * 1) write out all dirty buffers, waiting if locked;
- * 2) wait for completion by waiting for all buffers to unlock.
- */
+/* The buffers have been marked clean and locked. Just submit the dang things.. */
+static void write_locked_buffers(struct buffer_head **array, unsigned int count)
+{
do {
- retry = 0;
-
- /* We search all lists as a failsafe mechanism, not because we expect
- * there to be dirty buffers on any of the other lists.
- */
-repeat:
- spin_lock(&lru_list_lock);
- bh = lru_list[BUF_DIRTY];
- if (!bh)
- goto repeat2;
-
- for (i = nr_buffers_type[BUF_DIRTY]*2 ; i-- > 0 ; bh = next) {
- next = bh->b_next_free;
+ struct buffer_head * bh = *array++;
+ bh->b_end_io = end_buffer_write;
+ submit_bh(WRITE, bh);
+ } while (--count);
+}
- if (!lru_list[BUF_DIRTY])
- break;
- if (dev && bh->b_dev != dev)
- continue;
- if (buffer_locked(bh)) {
- /* Buffer is locked; skip it unless wait is
- * requested AND pass > 0.
- */
- if (!wait || !pass) {
- retry = 1;
- continue;
- }
- atomic_inc(&bh->b_count);
- spin_unlock(&lru_list_lock);
- wait_on_buffer (bh);
- atomic_dec(&bh->b_count);
- goto repeat;
- }
+#define NRSYNC (32)
+static void write_unlocked_buffers(kdev_t dev)
+{
+ struct buffer_head *next;
+ struct buffer_head *array[NRSYNC];
+ unsigned int count;
+ int nr;
- /* If an unlocked buffer is not uptodate, there has
- * been an IO error. Skip it.
- */
- if (wait && buffer_req(bh) && !buffer_locked(bh) &&
- !buffer_dirty(bh) && !buffer_uptodate(bh)) {
- err = -EIO;
- continue;
- }
+repeat:
+ spin_lock(&lru_list_lock);
+ next = lru_list[BUF_DIRTY];
+ nr = nr_buffers_type[BUF_DIRTY] * 2;
+ count = 0;
+ while (next && --nr >= 0) {
+ struct buffer_head * bh = next;
+ next = bh->b_next_free;
- /* Don't write clean buffers. Don't write ANY buffers
- * on the third pass.
- */
- if (!buffer_dirty(bh) || pass >= 2)
+ if (dev && bh->b_dev != dev)
+ continue;
+ if (test_and_set_bit(BH_Lock, &bh->b_state))
+ continue;
+ if (atomic_set_buffer_clean(bh)) {
+ __refile_buffer(bh);
+ array[count++] = bh;
+ if (count < NRSYNC)
continue;
- atomic_inc(&bh->b_count);
spin_unlock(&lru_list_lock);
- ll_rw_block(WRITE, 1, &bh);
- atomic_dec(&bh->b_count);
- retry = 1;
+ write_locked_buffers(array, count);
goto repeat;
}
+ unlock_buffer(bh);
+ }
+ spin_unlock(&lru_list_lock);
- repeat2:
- bh = lru_list[BUF_LOCKED];
- if (!bh) {
- spin_unlock(&lru_list_lock);
- break;
- }
- for (i = nr_buffers_type[BUF_LOCKED]*2 ; i-- > 0 ; bh = next) {
- next = bh->b_next_free;
+ if (count)
+ write_locked_buffers(array, count);
+}
- if (!lru_list[BUF_LOCKED])
- break;
- if (dev && bh->b_dev != dev)
- continue;
- if (buffer_locked(bh)) {
- /* Buffer is locked; skip it unless wait is
- * requested AND pass > 0.
- */
- if (!wait || !pass) {
- retry = 1;
- continue;
- }
- atomic_inc(&bh->b_count);
- spin_unlock(&lru_list_lock);
- wait_on_buffer (bh);
- spin_lock(&lru_list_lock);
- atomic_dec(&bh->b_count);
- goto repeat2;
- }
+static int wait_for_locked_buffers(kdev_t dev, int index, int refile)
+{
+ struct buffer_head * next;
+ int nr;
+
+repeat:
+ spin_lock(&lru_list_lock);
+ next = lru_list[index];
+ nr = nr_buffers_type[index] * 2;
+ while (next && --nr >= 0) {
+ struct buffer_head *bh = next;
+ next = bh->b_next_free;
+
+ if (!buffer_locked(bh)) {
+ if (refile)
+ __refile_buffer(bh);
+ continue;
}
- spin_unlock(&lru_list_lock);
+ if (dev && bh->b_dev != dev)
+ continue;
- /* If we are waiting for the sync to succeed, and if any dirty
- * blocks were written, then repeat; on the second pass, only
- * wait for buffers being written (do not pass to write any
- * more buffers on the second pass).
- */
- } while (wait && retry && ++pass<=2);
- return err;
+ atomic_inc(&bh->b_count);
+ spin_unlock(&lru_list_lock);
+ wait_on_buffer (bh);
+ atomic_dec(&bh->b_count);
+ goto repeat;
+ }
+ spin_unlock(&lru_list_lock);
+ return 0;
}
-void sync_dev(kdev_t dev)
+/* Call sync_buffers with wait!=0 to ensure that the call does not
+ * return until all buffer writes have completed. Sync() may return
+ * before the writes have finished; fsync() may not.
+ */
+
+/* Godamity-damn. Some buffers (bitmaps for filesystems)
+ * spontaneously dirty themselves without ever brelse being called.
+ * We will ultimately want to put these in a separate list, but for
+ * now we search all of the lists for dirty buffers.
+ */
+static int sync_buffers(kdev_t dev, int wait)
{
- sync_supers(dev);
- sync_inodes(dev);
- DQUOT_SYNC(dev);
- /* sync all the dirty buffers out to disk only _after_ all the
- high level layers finished generated buffer dirty data
- (or we'll return with some buffer still dirty on the blockdevice
- so breaking the semantics of this call) */
- sync_buffers(dev, 0);
- /*
- * FIXME(eric) we need to sync the physical devices here.
- * This is because some (scsi) controllers have huge amounts of
- * cache onboard (hundreds of Mb), and we need to instruct
- * them to commit all of the dirty memory to disk, and we should
- * not return until this has happened.
- *
- * This would need to get implemented by going through the assorted
- * layers so that each block major number can be synced, and this
- * would call down into the upper and mid-layer scsi.
+ int err = 0;
+
+ /* One pass for no-wait, three for wait:
+ * 0) write out all dirty, unlocked buffers;
+ * 1) wait for all dirty locked buffers;
+ * 2) write out all dirty, unlocked buffers;
+ * 2) wait for completion by waiting for all buffers to unlock.
*/
+ write_unlocked_buffers(dev);
+ if (wait) {
+ err = wait_for_locked_buffers(dev, BUF_DIRTY, 0);
+ write_unlocked_buffers(dev);
+ err |= wait_for_locked_buffers(dev, BUF_LOCKED, 1);
+ }
+ return err;
}
int fsync_super(struct super_block *sb)
return sync_buffers(dev, 1);
}
+/*
+ * There's no real reason to pretend we should
+ * ever do anything differently
+ */
+void sync_dev(kdev_t dev)
+{
+ fsync_dev(dev);
+}
+
asmlinkage long sys_sync(void)
{
fsync_dev(0);
{
balance_dirty(NODEV);
if (free_shortage())
- page_launder(GFP_BUFFER, 0);
+ page_launder(GFP_NOFS, 0);
if (!grow_buffers(size)) {
wakeup_bdflush(1);
current->policy |= SCHED_YIELD;
}
spin_unlock(&unused_list_lock);
- /* This is critical. We can't swap out pages to get
- * more buffer heads, because the swap-out may need
- * more buffer-heads itself. Thus SLAB_BUFFER.
+ /* This is critical. We can't call out to the FS
+ * to get more buffer heads, because the FS may need
+ * more buffer-heads itself. Thus SLAB_NOFS.
*/
- if((bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER)) != NULL) {
+ if((bh = kmem_cache_alloc(bh_cachep, SLAB_NOFS)) != NULL) {
bh->b_blocknr = -1;
bh->b_this_page = NULL;
return bh;
return 0;
}
- page = alloc_page(GFP_BUFFER);
+ page = alloc_page(GFP_NOFS);
if (!page)
goto out;
LockPage(page);
* 1 - start IO for dirty buffers
* 2 - wait for completion of locked buffers
*/
-static void sync_page_buffers(struct buffer_head *bh, int wait)
+static void sync_page_buffers(struct buffer_head *bh, unsigned int gfp_mask)
{
struct buffer_head * tmp = bh;
struct buffer_head *p = tmp;
tmp = tmp->b_this_page;
if (buffer_locked(p)) {
- if (wait > 1)
+ if (gfp_mask & __GFP_WAIT)
__wait_on_buffer(p);
} else if (buffer_dirty(p))
ll_rw_block(WRITE, 1, &p);
* obtain a reference to a buffer head within a page. So we must
* lock out all of these paths to cleanly toss the page.
*/
-int try_to_free_buffers(struct page * page, int wait)
+int try_to_free_buffers(struct page * page, unsigned int gfp_mask)
{
struct buffer_head * tmp, * bh = page->buffers;
int index = BUFSIZE_INDEX(bh->b_size);
spin_unlock(&free_list[index].lock);
write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock);
- if (wait) {
- sync_page_buffers(bh, wait);
+ if (gfp_mask & __GFP_IO) {
+ sync_page_buffers(bh, gfp_mask);
/* We waited synchronously, so we can free the buffers. */
- if (wait > 1 && !loop) {
+ if ((gfp_mask & __GFP_WAIT) && !loop) {
loop = 1;
goto cleaned_buffers_try_again;
}
static int mem_open(struct inode* inode, struct file* file)
{
- file->private_data = (void*)(current->self_exec_id);
+ file->private_data = (void*)((long)current->self_exec_id);
return 0;
}
atomic_inc(&mm->mm_users);
task_unlock(task);
- if (file->private_data != (void*)(current->self_exec_id) ) {
+ if (file->private_data != (void*)((long)current->self_exec_id) ) {
mmput(mm);
return -EIO;
}
return 0;
}
-static unsigned char proc_alloc_map[PROC_NDYNAMIC / 8];
+static unsigned long proc_alloc_map[PROC_NDYNAMIC / 8];
spinlock_t proc_alloc_map_lock = SPIN_LOCK_UNLOCKED;
{
int i;
spin_lock(&proc_alloc_map_lock);
- i = find_first_zero_bit((void *) proc_alloc_map, PROC_NDYNAMIC);
- if (i<0 || i>=PROC_NDYNAMIC) {
+ i = find_first_zero_bit(proc_alloc_map, PROC_NDYNAMIC);
+ if (i < 0 || i >= PROC_NDYNAMIC) {
i = -1;
goto out;
}
- set_bit(i, (void *) proc_alloc_map);
+ set_bit(i, proc_alloc_map);
i += PROC_DYNAMIC_FIRST;
out:
spin_unlock(&proc_alloc_map_lock);
de->next = NULL;
if (S_ISDIR(de->mode))
parent->nlink--;
- clear_bit(de->low_ino-PROC_DYNAMIC_FIRST,
- (void *) proc_alloc_map);
+ clear_bit(de->low_ino - PROC_DYNAMIC_FIRST,
+ proc_alloc_map);
proc_kill_inodes(de);
de->nlink = 0;
if (!atomic_read(&de->count))
if (d_reclen <= 32) {
local_buf = small_buf ;
} else {
- local_buf = kmalloc(d_reclen, GFP_BUFFER) ;
+ local_buf = kmalloc(d_reclen, GFP_NOFS) ;
if (!local_buf) {
pathrelse (&path_to_entry);
return -ENOMEM ;
dcache shrinking). So, release path and collected
resourses here */
free_buffers_in_tb (tb);
- buf = reiserfs_kmalloc(size, GFP_BUFFER, tb->tb_sb);
+ buf = reiserfs_kmalloc(size, GFP_NOFS, tb->tb_sb);
if ( !buf ) {
#ifdef CONFIG_REISERFS_CHECK
reiserfs_warning ("vs-8345: get_mem_for_virtual_node: "
struct reiserfs_bitmap_node *bn ;
static int id = 0 ;
- bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_BUFFER) ;
+ bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS) ;
if (!bn) {
return NULL ;
}
- bn->data = kmalloc(p_s_sb->s_blocksize, GFP_BUFFER) ;
+ bn->data = kmalloc(p_s_sb->s_blocksize, GFP_NOFS) ;
if (!bn->data) {
kfree(bn) ;
return NULL ;
}
trans_id = le32_to_cpu(desc->j_trans_id) ;
/* now we know we've got a good transaction, and it was inside the valid time ranges */
- log_blocks = kmalloc(le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), GFP_BUFFER) ;
- real_blocks = kmalloc(le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), GFP_BUFFER) ;
+ log_blocks = kmalloc(le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), GFP_NOFS) ;
+ real_blocks = kmalloc(le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), GFP_NOFS) ;
if (!log_blocks || !real_blocks) {
brelse(c_bh) ;
brelse(d_bh) ;
static void commit_flush_async(struct super_block *p_s_sb, int jindex) {
struct reiserfs_journal_commit_task *ct ;
- /* using GFP_BUFFER, GFP_KERNEL could try to flush inodes, which will try
+ /* using GFP_NOFS, GFP_KERNEL could try to flush inodes, which will try
** to start/join a transaction, which will deadlock
*/
- ct = kmalloc(sizeof(struct reiserfs_journal_commit_task), GFP_BUFFER) ;
+ ct = kmalloc(sizeof(struct reiserfs_journal_commit_task), GFP_NOFS) ;
if (ct) {
setup_commit_task_arg(ct, p_s_sb, jindex) ;
queue_task(&(ct->task), &reiserfs_commit_thread_tq);
#endif
get_page(bh->b_page) ;
- new_pl = reiserfs_kmalloc(sizeof(struct reiserfs_page_list), GFP_BUFFER,
+ new_pl = reiserfs_kmalloc(sizeof(struct reiserfs_page_list), GFP_NOFS,
inode->i_sb) ;
if (!new_pl) {
put_page(bh->b_page) ;
/* get memory for composing the entry */
buflen = DEH_SIZE + ROUND_UP (namelen);
if (buflen > sizeof (small_buf)) {
- buffer = reiserfs_kmalloc (buflen, GFP_BUFFER, dir->i_sb);
+ buffer = reiserfs_kmalloc (buflen, GFP_NOFS, dir->i_sb);
if (buffer == 0)
return -ENOMEM;
} else
return -ENAMETOOLONG;
}
- name = kmalloc (item_len, GFP_BUFFER);
+ name = kmalloc (item_len, GFP_NOFS);
if (!name) {
iput(inode) ;
return -ENOMEM;
int i, bmp, dl ;
struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK(s);
- SB_AP_BITMAP (s) = reiserfs_kmalloc (sizeof (struct buffer_head *) * le16_to_cpu (rs->s_bmap_nr), GFP_BUFFER, s);
+ SB_AP_BITMAP (s) = reiserfs_kmalloc (sizeof (struct buffer_head *) * le16_to_cpu (rs->s_bmap_nr), GFP_NOFS, s);
if (SB_AP_BITMAP (s) == 0)
return 1;
memset (SB_AP_BITMAP (s), 0, sizeof (struct buffer_head *) * le16_to_cpu (rs->s_bmap_nr));
int bmp1 = (REISERFS_OLD_DISK_OFFSET_IN_BYTES / s->s_blocksize) + 1; /* first of bitmap blocks */
/* read true bitmap */
- SB_AP_BITMAP (s) = reiserfs_kmalloc (sizeof (struct buffer_head *) * le16_to_cpu (rs->s_bmap_nr), GFP_BUFFER, s);
+ SB_AP_BITMAP (s) = reiserfs_kmalloc (sizeof (struct buffer_head *) * le16_to_cpu (rs->s_bmap_nr), GFP_NOFS, s);
if (SB_AP_BITMAP (s) == 0)
return 1;
extern int fs_may_remount_ro(struct super_block *);
-extern int try_to_free_buffers(struct page *, int);
+extern int try_to_free_buffers(struct page *, unsigned int);
extern void refile_buffer(struct buffer_head * buf);
/* reiserfs_writepage needs this */
#define __GFP_HIGHMEM 0x02
/* Action modifiers - doesn't change the zoning */
-#define __GFP_WAIT 0x10
-#define __GFP_HIGH 0x20
-#define __GFP_IO 0x40
-#define __GFP_BUFFER 0x80
+#define __GFP_WAIT 0x10 /* Can wait and reschedule? */
+#define __GFP_HIGH 0x20 /* Should access emergency pools? */
+#define __GFP_IO 0x40 /* Can start physical IO? */
+#define __GFP_FS 0x80 /* Can call down to low-level FS? */
-#define GFP_BUFFER (__GFP_HIGH | __GFP_WAIT | __GFP_BUFFER)
+#define GFP_NOIO (__GFP_HIGH | __GFP_WAIT)
+#define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
#define GFP_ATOMIC (__GFP_HIGH)
-#define GFP_USER ( __GFP_WAIT | __GFP_IO)
-#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHMEM)
-#define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
-#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
-#define GFP_KSWAPD ( __GFP_IO)
+#define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
+#define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_KSWAPD ( __GFP_IO | __GFP_FS)
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */
#include <linux/cache.h>
/* flags for kmem_cache_alloc() */
-#define SLAB_BUFFER GFP_BUFFER
+#define SLAB_NOFS GFP_NOFS
+#define SLAB_NOIO GFP_NOIO
#define SLAB_ATOMIC GFP_ATOMIC
#define SLAB_USER GFP_USER
#define SLAB_KERNEL GFP_KERNEL
#define SLAB_NFS GFP_NFS
#define SLAB_DMA GFP_DMA
-#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_BUFFER)
+#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS)
#define SLAB_NO_GROW 0x00001000UL /* don't grow a cache */
/* flags to pass to kmem_cache_create().
{
int i;
- spin_lock(&__br_write_locks[idx].lock);
again:
+ spin_lock(&__br_write_locks[idx].lock);
for (i = 0; i < smp_num_cpus; i++)
- if (__brlock_array[cpu_logical_map(i)][idx] != 0)
+ if (__brlock_array[cpu_logical_map(i)][idx] != 0) {
+ spin_unlock(&__br_write_locks[idx].lock);
+ barrier();
goto again;
+ }
}
void __br_write_unlock (enum brlock_indices idx)
struct page *page;
repeat_alloc:
- page = alloc_page(GFP_BUFFER);
+ page = alloc_page(GFP_NOIO);
if (page)
return page;
/*
struct buffer_head *bh;
repeat_alloc:
- bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER);
+ bh = kmem_cache_alloc(bh_cachep, SLAB_NOIO);
if (bh)
return bh;
/*
* in that case we bail out to prevent infinite loops and
* hanging device drivers ...
*
- * Another issue are GFP_BUFFER allocations; because they
- * do not have __GFP_IO set it's possible we cannot make
+ * Another issue are GFP_NOFS allocations; because they
+ * do not have __GFP_FS set it's possible we cannot make
* any progress freeing pages, in that case it's better
* to give up than to deadlock the kernel looping here.
*/
if (gfp_mask & __GFP_WAIT) {
if (!order || free_shortage()) {
int progress = try_to_free_pages(gfp_mask);
- if (progress || (gfp_mask & __GFP_IO))
+ if (progress || (gfp_mask & __GFP_FS))
goto try_again;
/*
* Fail in case no progress was made and the
* go out to Matthew Dillon.
*/
#define MAX_LAUNDER (4 * (1 << page_cluster))
+#define CAN_DO_FS (gfp_mask & __GFP_FS)
#define CAN_DO_IO (gfp_mask & __GFP_IO)
-#define CAN_DO_BUFFERS (gfp_mask & __GFP_BUFFER)
int page_launder(int gfp_mask, int sync)
{
int launder_loop, maxscan, cleaned_pages, maxlaunder;
goto page_active;
/* First time through? Move it to the back of the list */
- if (!launder_loop || !CAN_DO_IO) {
+ if (!launder_loop || !CAN_DO_FS) {
list_del(page_lru);
list_add(page_lru, &inactive_dirty_list);
UnlockPage(page);
* buffer pages
*/
if (page->buffers) {
- int wait, clearedbuf;
+ unsigned int buffer_mask;
+ int clearedbuf;
int freed_page = 0;
/*
* Since we might be doing disk IO, we have to
spin_unlock(&pagemap_lru_lock);
/* Will we do (asynchronous) IO? */
- wait = 0; /* No IO */
- if (launder_loop) {
- if (maxlaunder == 0 && sync)
- wait = 2; /* Synchrounous IO */
- else if (maxlaunder-- > 0)
- wait = 1; /* Async IO */
- }
+ if (launder_loop && maxlaunder == 0 && sync)
+ buffer_mask = gfp_mask; /* Do as much as we can */
+ else if (launder_loop && maxlaunder-- > 0)
+ buffer_mask = gfp_mask & ~__GFP_WAIT; /* Don't wait, async write-out */
+ else
+ buffer_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); /* Don't even start IO */
/* Try to free the page buffers. */
- clearedbuf = try_to_free_buffers(page, wait);
+ clearedbuf = try_to_free_buffers(page, buffer_mask);
/*
* Re-take the spinlock. Note that we cannot
* loads, flush out the dirty pages before we have to wait on
* IO.
*/
- if ((CAN_DO_IO || CAN_DO_BUFFERS) && !launder_loop && free_shortage()) {
+ if ((CAN_DO_IO || CAN_DO_FS) && !launder_loop && free_shortage()) {
launder_loop = 1;
/* If we cleaned pages, never do synchronous IO. */
if (cleaned_pages)
*/
static void __aarp_expire(struct aarp_entry *a)
{
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&a->packet_queue)) != NULL)
- kfree_skb(skb);
-
+ skb_queue_purge(&a->packet_queue);
kfree(a);
}
}
static struct notifier_block aarp_notifier = {
- aarp_device_event,
- NULL,
- 0
+ notifier_call: aarp_device_event,
};
static char aarp_snap_id[] = { 0x00, 0x00, 0x00, 0x80, 0xF3 };
int len, ct;
len = sprintf(buffer,
- "%-10.10s ""%-10.10s""%-18.18s""%12.12s""%12.12s"" xmit_count status\n",
- "address","device","hw addr","last_sent", "expires");
+ "%-10.10s %-10.10s%-18.18s%12.12s%12.12s xmit_count status\n",
+ "address", "device", "hw addr", "last_sent", "expires");
spin_lock_bh(&aarp_lock);
/* Last fragment received ? */
if (ax25->fragno == 0) {
- if ((skbn = alloc_skb(AX25_MAX_HEADER_LEN + ax25->fraglen, GFP_ATOMIC)) == NULL) {
- while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL)
- kfree_skb(skbo);
+ skbn = alloc_skb(AX25_MAX_HEADER_LEN +
+ ax25->fraglen,
+ GFP_ATOMIC);
+ if (!skbn) {
+ skb_queue_purge(&ax25->frag_queue);
return 1;
}
} else {
/* First fragment received */
if (*skb->data & AX25_SEG_FIRST) {
- while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL)
- kfree_skb(skbo);
+ skb_queue_purge(&ax25->frag_queue);
ax25->fragno = *skb->data & AX25_SEG_REM;
skb_pull(skb, 1); /* skip fragno */
ax25->fraglen = skb->len;
/*
* Sort out any digipeated paths.
*/
- if (dp.ndigi != 0 && ax25->digipeat == NULL && (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
+ if (dp.ndigi && !ax25->digipeat &&
+ (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
kfree_skb(skb);
ax25_destroy_socket(ax25);
return 0;
/*
* Receive an AX.25 frame via a SLIP interface.
*/
-int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype)
+int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype)
{
skb->sk = NULL; /* Initially we don't know who it's for */
skb->destructor = NULL; /* Who initializes this, dammit?! */
*/
void ax25_clear_queues(ax25_cb *ax25)
{
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&ax25->write_queue)) != NULL)
- kfree_skb(skb);
-
- while ((skb = skb_dequeue(&ax25->ack_queue)) != NULL)
- kfree_skb(skb);
-
- while ((skb = skb_dequeue(&ax25->reseq_queue)) != NULL)
- kfree_skb(skb);
-
- while ((skb = skb_dequeue(&ax25->frag_queue)) != NULL)
- kfree_skb(skb);
+ skb_queue_purge(&ax25->write_queue);
+ skb_queue_purge(&ax25->ack_queue);
+ skb_queue_purge(&ax25->reseq_queue);
+ skb_queue_purge(&ax25->frag_queue);
}
/*
* handler for protocols to use and generic option handler.
*
*
- * Version: $Id: sock.c,v 1.110 2001/04/20 20:46:19 davem Exp $
+ * Version: $Id: sock.c,v 1.111 2001/06/26 23:29:17 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
* Andi Kleen : Fix write_space callback
* Chris Evans : Security fixes - signedness again
+ * Arnaldo C. Melo : cleanups, use skb_queue_purge
*
* To Fix:
*
#endif
int val;
int valbool;
- int err;
struct linger ling;
int ret = 0;
if(optlen<sizeof(int))
return(-EINVAL);
- err = get_user(val, (int *)optval);
- if (err)
- return err;
+ if (get_user(val, (int *)optval))
+ return -EFAULT;
valbool = val?1:0;
void sklist_destroy_socket(struct sock **list,struct sock *sk)
{
- struct sk_buff *skb;
if(list)
sklist_remove_socket(list, sk);
- while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
- {
- kfree_skb(skb);
- }
+ skb_queue_purge(&sk->receive_queue);
if(atomic_read(&sk->wmem_alloc) == 0 &&
atomic_read(&sk->rmem_alloc) == 0 &&
unsigned long handle;
};
-static unsigned long aun_seq = 0;
+static unsigned long aun_seq;
/* Queue of packets waiting to be transmitted. */
static struct sk_buff_head aun_queue;
* Check legality
*/
- if (addr_len < sizeof(struct sockaddr_ec))
- return -EINVAL;
- if (sec->sec_family != AF_ECONET)
+ if (addr_len < sizeof(struct sockaddr_ec) ||
+ sec->sec_family != AF_ECONET)
return -EINVAL;
sk->protinfo.af_econet->cb = sec->cb;
static int econet_release(struct socket *sock)
{
- struct sk_buff *skb;
struct sock *sk = sock->sk;
if (!sk)
/* Purge queues */
- while ((skb=skb_dequeue(&sk->receive_queue))!=NULL)
- kfree_skb(skb);
+ skb_queue_purge(&sk->receive_queue);
if (atomic_read(&sk->rmem_alloc) || atomic_read(&sk->wmem_alloc)) {
sk->timer.data=(unsigned long)sk;
*
* This source is covered by the GNU GPL, the same as all kernel sources.
*
- * Version: $Id: inetpeer.c,v 1.3 2000/10/03 07:29:00 anton Exp $
+ * Version: $Id: inetpeer.c,v 1.6 2001/06/21 20:30:14 davem Exp $
*
* Authors: Andrey V. Savochkin <saw@msu.ru>
*/
* <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values
* myself. --SAW
*/
- if (si.totalram <= 32768*1024)
+ if (si.totalram <= (32768*1024)/PAGE_SIZE)
inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
- if (si.totalram <= 16384*1024)
+ if (si.totalram <= (16384*1024)/PAGE_SIZE)
inet_peer_threshold >>= 1; /* about 512KB */
- if (si.totalram <= 8192*1024)
+ if (si.totalram <= (8192*1024)/PAGE_SIZE)
inet_peer_threshold >>= 2; /* about 128KB */
peer_cachep = kmem_cache_create("inet_peer_cache",
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: ipmr.c,v 1.59 2001/02/23 06:32:11 davem Exp $
+ * Version: $Id: ipmr.c,v 1.60 2001/06/29 21:33:22 davem Exp $
*
* Fixes:
* Michael Chastain : Incorrect size of copying.
#define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
-int mroute_do_assert = 0; /* Set in PIM assert */
-int mroute_do_pim = 0;
+int mroute_do_assert; /* Set in PIM assert */
+int mroute_do_pim;
static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
if (skb->ip_summed != CHECKSUM_HW) {
memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
- skb->csum = csum_block_add(skb->csum, next_skb->csum, skb->len);
+ skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
}
/* Update sequence range on original skb. */
pdata->retransmit.expires = jiffies + spx_calc_rtt(0);
add_timer(&pdata->retransmit);
- skb2 = skb_clone(skb, GFP_BUFFER);
+ skb2 = skb_clone(skb, GFP_NOFS); /* Why? Why not GFP_KERNEL? */
if(skb2 == NULL)
return -ENOBUFS;
skb_queue_tail(&pdata->retransmit_queue, skb2);
*/
void lapb_clear_queues(lapb_cb *lapb)
{
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&lapb->write_queue)) != NULL)
- kfree_skb(skb);
-
- while ((skb = skb_dequeue(&lapb->ack_queue)) != NULL)
- kfree_skb(skb);
+ skb_queue_purge(&lapb->write_queue);
+ skb_queue_purge(&lapb->ack_queue);
}
/*
static void __exit netlink_proto_exit(void)
{
- sock_unregister(PF_NETLINK);
- remove_proc_entry("net/netlink", NULL);
+ sock_unregister(PF_NETLINK);
+ remove_proc_entry("net/netlink", NULL);
}
module_init(netlink_proto_init);
*
* PACKET - implements raw packet sockets.
*
- * Version: $Id: af_packet.c,v 1.54 2001/03/03 01:20:11 davem Exp $
+ * Version: $Id: af_packet.c,v 1.55 2001/06/28 01:34:29 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
}
-static int packet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int packet_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
{
struct sock *sk = sock->sk;
- int err;
- int pid;
switch(cmd)
{
return put_user(amount, (int *)arg);
}
case FIOSETOWN:
- case SIOCSPGRP:
- err = get_user(pid, (int *) arg);
- if (err)
- return err;
+ case SIOCSPGRP: {
+ int pid;
+ if (get_user(pid, (int *) arg))
+ return -EFAULT;
if (current->pid != pid && current->pgrp != -pid &&
!capable(CAP_NET_ADMIN))
return -EPERM;
sk->proc = pid;
- return(0);
+ break;
+ }
case FIOGETOWN:
case SIOCGPGRP:
return put_user(sk->proc, (int *)arg);
case SIOCGSTAMP:
if(sk->stamp.tv_sec==0)
return -ENOENT;
- err = -EFAULT;
- if (!copy_to_user((void *)arg, &sk->stamp, sizeof(struct timeval)))
- err = 0;
- return err;
+ if (copy_to_user((void *)arg, &sk->stamp,
+ sizeof(struct timeval)))
+ return -EFAULT;
+ break;
case SIOCGIFFLAGS:
#ifndef CONFIG_INET
case SIOCSIFFLAGS:
case SIOCSIFDIVERT:
#ifdef CONFIG_NET_DIVERT
return(divert_ioctl(cmd, (struct divert_cf *) arg));
-#else
- return -ENOPKG;
#endif /* CONFIG_NET_DIVERT */
return -ENOPKG;
#endif
return -EOPNOTSUPP;
}
- /*NOTREACHED*/
- return(0);
+ return 0;
}
#ifndef CONFIG_PACKET_MMAP
{
struct rose_neigh *s;
unsigned long flags;
- struct sk_buff *skb;
rose_stop_ftimer(rose_neigh);
rose_stop_t0timer(rose_neigh);
- while ((skb = skb_dequeue(&rose_neigh->queue)) != NULL)
- kfree_skb(skb);
+ skb_queue_purge(&rose_neigh->queue);
save_flags(flags); cli();
static void rose_del_route_by_neigh(struct rose_neigh *rose_neigh)
{
struct rose_route *rose_route, *s;
- struct sk_buff *skb;
rose_neigh->restarted = 0;
rose_stop_t0timer(rose_neigh);
rose_start_ftimer(rose_neigh);
- while ((skb = skb_dequeue(&rose_neigh->queue)) != NULL)
- kfree_skb(skb);
+ skb_queue_purge(&rose_neigh->queue);
rose_route = rose_route_list;
*/
void rose_clear_queues(struct sock *sk)
{
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&sk->write_queue)) != NULL)
- kfree_skb(skb);
-
- while ((skb = skb_dequeue(&sk->protinfo.rose->ack_queue)) != NULL)
- kfree_skb(skb);
+ skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->protinfo.rose->ack_queue);
}
/*
static void gred_reset(struct Qdisc* sch)
{
- struct sk_buff *skb;
int i;
-
struct gred_sched_data *q;
struct gred_sched *t= (struct gred_sched *)sch->data;
- while((skb=__skb_dequeue(&sch->q))!=NULL)
- kfree_skb(skb);
+ __skb_queue_purge(&sch->q);
sch->stats.backlog = 0;
static void red_reset(struct Qdisc* sch)
{
struct red_sched_data *q = (struct red_sched_data *)sch->data;
- struct sk_buff *skb;
- while((skb=__skb_dequeue(&sch->q))!=NULL)
- kfree_skb(skb);
+ __skb_queue_purge(&sch->q);
sch->stats.backlog = 0;
PSCHED_SET_PASTPERFECT(q->qidlestart);
q->qave = 0;
/*
* If you pass two values to the sock_alloc_send_skb
- * it tries to grab the large buffer with GFP_BUFFER
+ * it tries to grab the large buffer with GFP_NOFS
* (which can fail easily), and if it fails grab the
* fallback size buffer which is under a page and will
* succeed. [Alan]
* Here we are. Hitlist is filled. Die.
*/
- while ((skb=__skb_dequeue(&hitlist))!=NULL) {
- kfree_skb(skb);
- }
-
+ __skb_queue_purge(&hitlist);
up(&unix_gc_sem);
}
{
struct x25_neigh *s;
unsigned long flags;
- struct sk_buff *skb;
- while ((skb = skb_dequeue(&x25_neigh->queue)) != NULL)
- kfree_skb(skb);
+ skb_queue_purge(&x25_neigh->queue);
x25_stop_t20timer(x25_neigh);
* X.25 002 Jonathan Naylor Centralised disconnection processing.
* mar/20/00 Daniela Squassoni Disabling/enabling of facilities
* negotiation.
+ * jun/24/01 Arnaldo C. Melo use skb_queue_purge, cleanups
*/
#include <linux/errno.h>
*/
void x25_clear_queues(struct sock *sk)
{
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&sk->write_queue)) != NULL)
- kfree_skb(skb);
-
- while ((skb = skb_dequeue(&sk->protinfo.x25->ack_queue)) != NULL)
- kfree_skb(skb);
-
- while ((skb = skb_dequeue(&sk->protinfo.x25->interrupt_in_queue)) != NULL)
- kfree_skb(skb);
-
- while ((skb = skb_dequeue(&sk->protinfo.x25->interrupt_out_queue)) != NULL)
- kfree_skb(skb);
-
- while ((skb = skb_dequeue(&sk->protinfo.x25->fragment_queue)) != NULL)
- kfree_skb(skb);
+ skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->protinfo.x25->ack_queue);
+ skb_queue_purge(&sk->protinfo.x25->interrupt_in_queue);
+ skb_queue_purge(&sk->protinfo.x25->interrupt_out_queue);
+ skb_queue_purge(&sk->protinfo.x25->fragment_queue);
}
void x25_frames_acked(struct sock *sk, unsigned short nr)
{
struct sk_buff *skb;
- int modulus;
-
- modulus = (sk->protinfo.x25->neighbour->extended) ? X25_EMODULUS : X25_SMODULUS;
+ int modulus = sk->protinfo.x25->neighbour->extended ? X25_EMODULUS :
+ X25_SMODULUS;
/*
* Remove all the ack-ed frames from the ack queue.
*/
- if (sk->protinfo.x25->va != nr) {
- while (skb_peek(&sk->protinfo.x25->ack_queue) != NULL && sk->protinfo.x25->va != nr) {
+ if (sk->protinfo.x25->va != nr)
+ while (skb_peek(&sk->protinfo.x25->ack_queue) != NULL &&
+ sk->protinfo.x25->va != nr) {
skb = skb_dequeue(&sk->protinfo.x25->ack_queue);
kfree_skb(skb);
- sk->protinfo.x25->va = (sk->protinfo.x25->va + 1) % modulus;
+ sk->protinfo.x25->va = (sk->protinfo.x25->va + 1) %
+ modulus;
}
- }
}
void x25_requeue_frames(struct sock *sk)
int x25_validate_nr(struct sock *sk, unsigned short nr)
{
unsigned short vc = sk->protinfo.x25->va;
- int modulus;
-
- modulus = (sk->protinfo.x25->neighbour->extended) ? X25_EMODULUS : X25_SMODULUS;
+ int modulus = sk->protinfo.x25->neighbour->extended ? X25_EMODULUS :
+ X25_SMODULUS;
while (vc != sk->protinfo.x25->vs) {
if (nr == vc) return 1;
vc = (vc + 1) % modulus;
}
- if (nr == sk->protinfo.x25->vs) return 1;
-
- return 0;
+ return nr == sk->protinfo.x25->vs ? 1 : 0;
}
/*
*/
switch (frametype) {
case X25_CALL_REQUEST:
- len += 1 + X25_ADDR_LEN + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
+ len += 1 + X25_ADDR_LEN + X25_MAX_FAC_LEN +
+ X25_MAX_CUD_LEN;
break;
case X25_CALL_ACCEPTED:
len += 1 + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
case X25_RESET_CONFIRMATION:
break;
default:
- printk(KERN_ERR "X.25: invalid frame type %02X\n", frametype);
+ printk(KERN_ERR "X.25: invalid frame type %02X\n",
+ frametype);
return;
}
/*
* Unpick the contents of the passed X.25 Packet Layer frame.
*/
-int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m)
+int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
+ int *d, int *m)
{
- unsigned char *frame;
-
- frame = skb->data;
+ unsigned char *frame = skb->data;
*ns = *nr = *q = *d = *m = 0;
}
}
- printk(KERN_DEBUG "X.25: invalid PLP frame %02X %02X %02X\n", frame[0], frame[1], frame[2]);
+ printk(KERN_DEBUG "X.25: invalid PLP frame %02X %02X %02X\n",
+ frame[0], frame[1], frame[2]);
return X25_ILLEGAL;
}
-void x25_disconnect(struct sock *sk, int reason, unsigned char cause, unsigned char diagnostic)
+void x25_disconnect(struct sock *sk, int reason, unsigned char cause,
+ unsigned char diagnostic)
{
x25_clear_queues(sk);
x25_stop_timer(sk);