]> git.neil.brown.name Git - history.git/commitdiff
v2.4.5.7 -> v2.4.5.8
authorLinus Torvalds <torvalds@athlon.transmeta.com>
Tue, 5 Feb 2002 02:48:25 +0000 (18:48 -0800)
committerLinus Torvalds <torvalds@athlon.transmeta.com>
Tue, 5 Feb 2002 02:48:25 +0000 (18:48 -0800)
  - me: fix GFB_BUFFER thinkos. Make buffer syncing more efficient.
  Make sure we don't leave buffers on the LOCKED list forever
  - David Miller: networking and sparc updates

49 files changed:
Documentation/networking/alias.txt
Makefile
arch/i386/kernel/bluesmoke.c
arch/sparc64/defconfig
arch/sparc64/kernel/pci_common.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/md/raid1.c
drivers/net/defxx.c
drivers/net/sunhme.c
drivers/sbus/char/sab82532.c
drivers/sbus/char/su.c
drivers/sbus/char/zs.c
fs/affs/file.c
fs/buffer.c
fs/proc/base.c
fs/proc/generic.c
fs/reiserfs/dir.c
fs/reiserfs/fix_node.c
fs/reiserfs/journal.c
fs/reiserfs/namei.c
fs/reiserfs/super.c
include/linux/fs.h
include/linux/mm.h
include/linux/slab.h
lib/brlock.c
mm/highmem.c
mm/page_alloc.c
mm/vmscan.c
net/appletalk/aarp.c
net/ax25/ax25_in.c
net/ax25/ax25_subr.c
net/core/sock.c
net/econet/af_econet.c
net/ipv4/inetpeer.c
net/ipv4/ipmr.c
net/ipv4/tcp_output.c
net/ipx/af_spx.c
net/lapb/lapb_subr.c
net/netlink/af_netlink.c
net/packet/af_packet.c
net/rose/rose_route.c
net/rose/rose_subr.c
net/sched/sch_gred.c
net/sched/sch_red.c
net/unix/af_unix.c
net/unix/garbage.c
net/x25/x25_link.c
net/x25/x25_subr.c

index c6c0b0cf3a35fbfe3369551be125ef94e2fa34d8..19025b19815d95dd3f50fd9f277ac6b68243cb48 100644 (file)
@@ -2,40 +2,43 @@
 IP-Aliasing:
 ============
 
+IP-aliases are additional IP-adresses/masks hooked up to a base 
+interface by adding a colon and a string when running ifconfig. 
+This string is usually numeric, but this is not a must.
+
+IP-Aliases are avail if CONFIG_INET (`standard' IPv4 networking) 
+is configured in the kernel.
 
-o For IP aliasing you must have IP_ALIAS support included by static
-  linking.
 
 o Alias creation.
-  Alias creation is done by 'magic' iface naming: eg. to create a
+  Alias creation is done by 'magic' interface naming: eg. to create a
   200.1.1.1 alias for eth0 ...
   
     # ifconfig eth0:0 200.1.1.1  etc,etc....
                    ~~ -> request alias #0 creation (if not yet exists) for eth0
-    and routing stuff also ...
-    # route add -host 200.1.1.1 dev eth0:0  (if same IP network as
-                                           main device)
-   
-    # route add -net 200.1.1.0 dev eth0:0   (if completely new network wanted
-                                           for eth0:0)
+
+    The corresponding route is also set up by this command. 
+    Please note: The route always points to the base interface.
+       
 
 o Alias deletion.
-  Also done by shutting the interface down:
+  The alias is removed by shutting the alias down:
 
     # ifconfig eth0:0 down
                  ~~~~~~~~~~ -> will delete alias
 
                                   
-Alias (re-)configuring
+Alias (re-)configuring
 
-  Aliases are not real devices, but programs` should be able to configure and
+  Aliases are not real devices, but programs should be able to configure and
   refer to them as usual (ifconfig, route, etc).
 
-Relationship with main device
------------------------------
 
-  - the main device is an alias itself like additional aliases and can
-    be shut down without deleting other aliases.
+o Relationship with main device
+
+  If the base device is shut down the added aliases will be deleted 
+  too.
+
 
 Contact
 -------
index fd6487a1c34b66c56479d8bf7842364eea2f6705..2686eefa7eb51986c9d4830b09dec8f678d362ae 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 4
 SUBLEVEL = 6
-EXTRAVERSION =-pre7
+EXTRAVERSION =-pre8
 
 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
 
index 48dd60f4cb4a5df788fa2acfac61ed0d935745ea..022b4e5cdbbfbee4d59b7aca60caf30d5de77745 100644 (file)
@@ -236,5 +236,6 @@ void __init mcheck_init(struct cpuinfo_x86 *c)
 static void __init mcheck_disable(char *str, int *unused)
 {
        mce_disabled = 1;
+       return 1;
 }
 __setup("nomce", mcheck_disable);
index a8ae55a1232bbcdea1c7161f754d2c883f1a6e1a..acdb9243a93fd4985aa91f79c968bda3fbaf15c4 100644 (file)
@@ -696,6 +696,7 @@ CONFIG_USB_MICROTEK=m
 CONFIG_USB_IBMCAM=m
 CONFIG_USB_OV511=m
 CONFIG_USB_PWC=m
+CONFIG_USB_SE401=m
 CONFIG_USB_DSBR=m
 CONFIG_USB_DABUSB=m
 
@@ -704,6 +705,7 @@ CONFIG_USB_DABUSB=m
 #
 CONFIG_USB_PLUSB=m
 CONFIG_USB_PEGASUS=m
+CONFIG_USB_CATC=m
 CONFIG_USB_NET1080=m
 
 #
@@ -732,6 +734,8 @@ CONFIG_USB_SERIAL_KEYSPAN=m
 # CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
 # CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
 CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_CYBERJACK=m
 CONFIG_USB_SERIAL_OMNINET=m
 
 #
index 5924998af9c471187b2ec9d21c7d04de4e92b17d..02dfa9b7904b464872c8f3ebe72679e6a2a51724 100644 (file)
@@ -1,4 +1,4 @@
-/* $Id: pci_common.c,v 1.25 2001/06/14 16:57:41 davem Exp $
+/* $Id: pci_common.c,v 1.26 2001/06/28 01:32:18 davem Exp $
  * pci_common.c: PCI controller common support.
  *
  * Copyright (C) 1999 David S. Miller (davem@redhat.com)
@@ -713,17 +713,22 @@ static void __init pdev_fixup_irq(struct pci_dev *pdev)
                        line = ((pci_irq_line - 1) & 3);
                }
 
-               /* Now figure out the slot. */
+               /* Now figure out the slot.
+                *
+                * Basically, device number zero on the top-level bus is
+                * always the PCI host controller.  Slot 0 is then device 1.
+                * PBM A supports two external slots (0 and 1), and PBM B
+                * supports 4 external slots (0, 1, 2, and 3).  On-board PCI
+                * devices are wired to device numbers outside of these
+                * ranges. -DaveM
+                */
                if (pdev->bus->number == pbm->pci_first_busno) {
-                       if (pbm == &pbm->parent->pbm_A)
-                               slot = (pdev->devfn >> 3) - 1;
-                       else
-                               slot = (pdev->devfn >> 3) - 2;
+                       slot = (pdev->devfn >> 3) - 1;
                } else {
-                       if (pbm == &pbm->parent->pbm_A)
-                               slot = (pdev->bus->self->devfn >> 3) - 1;
-                       else
-                               slot = (pdev->bus->self->devfn >> 3) - 2;
+                       /* Underneath a bridge, use slot number of parent
+                        * bridge.
+                        */
+                       slot = (pdev->bus->self->devfn >> 3) - 1;
                }
                slot = slot << 2;
 
index 6169ae76c7ec01b0d12b7e341fc241b7f0524e49..26d06cd8177a246a4ba625bd8f0adacff9365050 100644 (file)
@@ -386,7 +386,7 @@ static struct buffer_head *loop_get_buffer(struct loop_device *lo,
        struct buffer_head *bh;
 
        do {
-               bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER);
+               bh = kmem_cache_alloc(bh_cachep, SLAB_NOIO);
                if (bh)
                        break;
 
@@ -408,7 +408,7 @@ static struct buffer_head *loop_get_buffer(struct loop_device *lo,
         * so can we :-)
         */
        do {
-               bh->b_page = alloc_page(GFP_BUFFER);
+               bh->b_page = alloc_page(GFP_NOIO);
                if (bh->b_page)
                        break;
 
@@ -648,7 +648,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file, kdev_t dev,
        lo->ioctl = NULL;
        figure_loop_size(lo);
        lo->old_gfp_mask = inode->i_mapping->gfp_mask;
-       inode->i_mapping->gfp_mask = GFP_BUFFER;
+       inode->i_mapping->gfp_mask = GFP_NOIO;
 
        bs = 0;
        if (blksize_size[MAJOR(lo_device)])
index 940a2c19c0c5bf57cae9e6a950b2011a2830227a..f37c95d299c79fdaa0de7320c9aa500e2c0cc19c 100644 (file)
@@ -109,7 +109,7 @@ static int nbd_xmit(int send, struct socket *sock, char *buf, int size, int msg_
 
 
        do {
-               sock->sk->allocation = GFP_BUFFER;
+               sock->sk->allocation = GFP_NOIO;
                iov.iov_base = buf;
                iov.iov_len = size;
                msg.msg_name = NULL;
index e8005f009436d4ef32d3991e0e6bc2d7a33690ad..8742149139176f925ec305816a0db12490572dbd 100644 (file)
@@ -75,7 +75,7 @@ static struct buffer_head *raid1_alloc_bh(raid1_conf_t *conf, int cnt)
                md_spin_unlock_irq(&conf->device_lock);
                if (cnt == 0)
                        break;
-               t = (struct buffer_head *)kmalloc(sizeof(struct buffer_head), GFP_BUFFER);
+               t = (struct buffer_head *)kmalloc(sizeof(struct buffer_head), GFP_NOIO);
                if (t) {
                        memset(t, 0, sizeof(*t));
                        t->b_next = bh;
@@ -165,8 +165,7 @@ static struct raid1_bh *raid1_alloc_r1bh(raid1_conf_t *conf)
                md_spin_unlock_irq(&conf->device_lock);
                if (r1_bh)
                        return r1_bh;
-               r1_bh = (struct raid1_bh *) kmalloc(sizeof(struct raid1_bh),
-                                       GFP_BUFFER);
+               r1_bh = (struct raid1_bh *) kmalloc(sizeof(struct raid1_bh), GFP_NOIO);
                if (r1_bh) {
                        memset(r1_bh, 0, sizeof(*r1_bh));
                        return r1_bh;
index 06a1cff39fd3ce4f572da5bb09dfd321600a4800..15398e5b0745aef318674f27265a7349a3121d66 100644 (file)
@@ -2732,7 +2732,7 @@ static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
        for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
                for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
                {
-                       struct sk_buff *newskb = __dev_alloc_skb(NEW_SKB_SIZE, GFP_BUFFER);
+                       struct sk_buff *newskb = __dev_alloc_skb(NEW_SKB_SIZE, GFP_NOFS);       /* Why not GFP_KERNEL? */
                        if (!newskb)
                                return -ENOMEM;
                        bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
index 70214f0772121e27f0b5b6a6d026ef8e8cc45fe5..54e8dfb746f5171f7468bdcae1ee2dd65984a913 100644 (file)
@@ -1,4 +1,4 @@
-/* $Id: sunhme.c,v 1.119 2001/05/17 04:12:16 davem Exp $
+/* $Id: sunhme.c,v 1.120 2001/06/14 17:37:23 jgarzik Exp $
  * sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
  *           auto carrier detecting ethernet driver.  Also known as the
  *           "Happy Meal Ethernet" found on SunSwift SBUS cards.
index 995ddc51b9caa969bd69fda857f0e34772078f0c..4b6668bed13d9aaa74bc7a56bb7ab11f36b66e7a 100644 (file)
@@ -1,4 +1,4 @@
-/* $Id: sab82532.c,v 1.62 2001/06/10 06:48:47 davem Exp $
+/* $Id: sab82532.c,v 1.63 2001/06/29 21:23:44 davem Exp $
  * sab82532.c: ASYNC Driver for the SIEMENS SAB82532 DUSCC.
  *
  * Copyright (C) 1997  Eddie C. Dost  (ecd@skynet.be)
@@ -1368,12 +1368,10 @@ static int get_modem_info(struct sab82532 * info, unsigned int *value)
 static int set_modem_info(struct sab82532 * info, unsigned int cmd,
                          unsigned int *value)
 {
-       int error;
        unsigned int arg;
 
-       error = get_user(arg, value);
-       if (error)
-               return error;
+       if (get_user(arg, value))
+               return -EFAULT;
        switch (cmd) {
        case TIOCMBIS: 
                if (arg & TIOCM_RTS) {
@@ -1442,7 +1440,6 @@ static void sab82532_break(struct tty_struct *tty, int break_state)
 static int sab82532_ioctl(struct tty_struct *tty, struct file * file,
                    unsigned int cmd, unsigned long arg)
 {
-       int error;
        struct sab82532 * info = (struct sab82532 *)tty->driver_data;
        struct async_icount cprev, cnow;        /* kernel counter temps */
        struct serial_icounter_struct *p_cuser; /* user space */
@@ -1462,9 +1459,8 @@ static int sab82532_ioctl(struct tty_struct *tty, struct file * file,
                case TIOCGSOFTCAR:
                        return put_user(C_CLOCAL(tty) ? 1 : 0, (int *) arg);
                case TIOCSSOFTCAR:
-                       error = get_user(arg, (unsigned int *) arg);
-                       if (error)
-                               return error;
+                       if (get_user(arg, (unsigned int *) arg))
+                               return -EFAULT;
                        tty->termios->c_cflag =
                                ((tty->termios->c_cflag & ~CLOCAL) |
                                 (arg ? CLOCAL : 0));
@@ -1534,14 +1530,11 @@ static int sab82532_ioctl(struct tty_struct *tty, struct file * file,
                        cnow = info->icount;
                        sti();
                        p_cuser = (struct serial_icounter_struct *) arg;
-                       error = put_user(cnow.cts, &p_cuser->cts);
-                       if (error) return error;
-                       error = put_user(cnow.dsr, &p_cuser->dsr);
-                       if (error) return error;
-                       error = put_user(cnow.rng, &p_cuser->rng);
-                       if (error) return error;
-                       error = put_user(cnow.dcd, &p_cuser->dcd);
-                       if (error) return error;
+                       if (put_user(cnow.cts, &p_cuser->cts) ||
+                           put_user(cnow.dsr, &p_cuser->dsr) ||
+                           put_user(cnow.rng, &p_cuser->rng) ||
+                           put_user(cnow.dcd, &p_cuser->dcd))
+                               return -EFAULT;
                        return 0;
 
                default:
@@ -2212,7 +2205,7 @@ static void __init sab82532_kgdb_hook(int line)
 
 static inline void __init show_serial_version(void)
 {
-       char *revision = "$Revision: 1.62 $";
+       char *revision = "$Revision: 1.63 $";
        char *version, *p;
 
        version = strchr(revision, ' ');
index 1429a6dfe3ae6c64b219f7169d0eca1ccb42118b..912a17ce1fc2fa606d3c34784bc33d0502ae3a85 100644 (file)
@@ -1,4 +1,4 @@
-/* $Id: su.c,v 1.50 2001/05/16 08:37:03 davem Exp $
+/* $Id: su.c,v 1.52 2001/06/29 21:54:32 davem Exp $
  * su.c: Small serial driver for keyboard/mouse interface on sparc32/PCI
  *
  * Copyright (C) 1997  Eddie C. Dost  (ecd@skynet.be)
@@ -228,9 +228,9 @@ static inline int serial_paranoia_check(struct su_struct *info,
                                        kdev_t device, const char *routine)
 {
 #ifdef SERIAL_PARANOIA_CHECK
-       static const char *badmagic =
+       static const char *badmagic = KERN_WARNING
                "Warning: bad magic number for serial struct (%s) in %s\n";
-       static const char *badinfo =
+       static const char *badinfo = KERN_WARNING
                "Warning: null su_struct for (%s) in %s\n";
 
        if (!info) {
@@ -1500,13 +1500,11 @@ get_modem_info(struct su_struct * info, unsigned int *value)
 static int
 set_modem_info(struct su_struct * info, unsigned int cmd, unsigned int *value)
 {
-       int error;
        unsigned int arg;
        unsigned long flags;
 
-       error = get_user(arg, value);
-       if (error)
-               return error;
+       if (get_user(arg, value))
+               return -EFAULT;
        switch (cmd) {
        case TIOCMBIS: 
                if (arg & TIOCM_RTS)
@@ -1583,7 +1581,6 @@ static int
 su_ioctl(struct tty_struct *tty, struct file * file,
                    unsigned int cmd, unsigned long arg)
 {
-       int error;
        struct su_struct * info = (struct su_struct *)tty->driver_data;
        struct async_icount cprev, cnow;        /* kernel counter temps */
        struct serial_icounter_struct *p_cuser; /* user space */
@@ -1663,14 +1660,11 @@ su_ioctl(struct tty_struct *tty, struct file * file,
                        cnow = info->icount;
                        sti();
                        p_cuser = (struct serial_icounter_struct *) arg;
-                       error = put_user(cnow.cts, &p_cuser->cts);
-                       if (error) return error;
-                       error = put_user(cnow.dsr, &p_cuser->dsr);
-                       if (error) return error;
-                       error = put_user(cnow.rng, &p_cuser->rng);
-                       if (error) return error;
-                       error = put_user(cnow.dcd, &p_cuser->dcd);
-                       if (error) return error;
+                       if (put_user(cnow.cts, &p_cuser->cts) ||
+                           put_user(cnow.dsr, &p_cuser->dsr) ||
+                           put_user(cnow.rng, &p_cuser->rng) ||
+                           put_user(cnow.dcd, &p_cuser->dcd))
+                               return -EFAULT;
                        return 0;
 
                default:
@@ -2261,7 +2255,7 @@ done:
  */
 static __inline__ void __init show_su_version(void)
 {
-       char *revision = "$Revision: 1.50 $";
+       char *revision = "$Revision: 1.52 $";
        char *version, *p;
 
        version = strchr(revision, ' ');
index 29df2aa97ee17b69f572f0b5f74c98275d08051f..c4d4bc50dcebc82dd265b4eac79075790a8a86d3 100644 (file)
@@ -1,4 +1,4 @@
-/* $Id: zs.c,v 1.65 2001/05/09 07:00:10 davem Exp $
+/* $Id: zs.c,v 1.66 2001/06/29 21:33:22 davem Exp $
  * zs.c: Zilog serial port driver for the Sparc.
  *
  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -177,7 +177,7 @@ struct zs_logent {
 #define REGCTRL        0xfd
 };
 struct zs_logent zslog[32];
-int zs_curlog = 0;
+int zs_curlog;
 #define ZSLOG(__reg, __val, __write) \
 do{    int index = zs_curlog; \
        zslog[index].reg = (__reg); \
@@ -1945,7 +1945,7 @@ int zs_open(struct tty_struct *tty, struct file * filp)
 
 static void show_serial_version(void)
 {
-       char *revision = "$Revision: 1.65 $";
+       char *revision = "$Revision: 1.66 $";
        char *version, *p;
 
        version = strchr(revision, ' ');
index 7a85afd03acf5aae1382d58ac074b0437cd0ad6b..3fbf70afc7d14db4492fc945026362c237256e13 100644 (file)
@@ -90,7 +90,7 @@ affs_grow_extcache(struct inode *inode, u32 lc_idx)
        int i, j, key;
 
        if (!AFFS_INODE->i_lc) {
-               char *ptr = (char *)get_zeroed_page(GFP_BUFFER);
+               char *ptr = (char *)get_zeroed_page(GFP_NOFS);
                if (!ptr)
                        return -ENOMEM;
                AFFS_INODE->i_lc = (u32 *)ptr;
index e052447f4e447dc6a529dbb166ed048f95836149..48090340578962c3bce0e62175d1b9168d9acac8 100644 (file)
@@ -161,144 +161,120 @@ void __wait_on_buffer(struct buffer_head * bh)
        atomic_dec(&bh->b_count);
 }
 
-/* Call sync_buffers with wait!=0 to ensure that the call does not
- * return until all buffer writes have completed.  Sync() may return
- * before the writes have finished; fsync() may not.
- */
-
-/* Godamity-damn.  Some buffers (bitmaps for filesystems)
- * spontaneously dirty themselves without ever brelse being called.
- * We will ultimately want to put these in a separate list, but for
- * now we search all of the lists for dirty buffers.
- */
-static int sync_buffers(kdev_t dev, int wait)
+/* End-of-write handler.. Just mark it up-to-date and unlock the buffer. */
+static void end_buffer_write(struct buffer_head *bh, int uptodate)
 {
-       int i, retry, pass = 0, err = 0;
-       struct buffer_head * bh, *next;
+       mark_buffer_uptodate(bh, uptodate);
+       unlock_buffer(bh);
+}
 
-       /* One pass for no-wait, three for wait:
-        * 0) write out all dirty, unlocked buffers;
-        * 1) write out all dirty buffers, waiting if locked;
-        * 2) wait for completion by waiting for all buffers to unlock.
-        */
+/* The buffers have been marked clean and locked. Just submit the dang things.. */
+static void write_locked_buffers(struct buffer_head **array, unsigned int count)
+{
        do {
-               retry = 0;
-
-               /* We search all lists as a failsafe mechanism, not because we expect
-                * there to be dirty buffers on any of the other lists.
-                */
-repeat:
-               spin_lock(&lru_list_lock);
-               bh = lru_list[BUF_DIRTY];
-               if (!bh)
-                       goto repeat2;
-
-               for (i = nr_buffers_type[BUF_DIRTY]*2 ; i-- > 0 ; bh = next) {
-                       next = bh->b_next_free;
+               struct buffer_head * bh = *array++;
+               bh->b_end_io = end_buffer_write;
+               submit_bh(WRITE, bh);
+       } while (--count);
+}
 
-                       if (!lru_list[BUF_DIRTY])
-                               break;
-                       if (dev && bh->b_dev != dev)
-                               continue;
-                       if (buffer_locked(bh)) {
-                               /* Buffer is locked; skip it unless wait is
-                                * requested AND pass > 0.
-                                */
-                               if (!wait || !pass) {
-                                       retry = 1;
-                                       continue;
-                               }
-                               atomic_inc(&bh->b_count);
-                               spin_unlock(&lru_list_lock);
-                               wait_on_buffer (bh);
-                               atomic_dec(&bh->b_count);
-                               goto repeat;
-                       }
+#define NRSYNC (32)
+static void write_unlocked_buffers(kdev_t dev)
+{
+       struct buffer_head *next;
+       struct buffer_head *array[NRSYNC];
+       unsigned int count;
+       int nr;
 
-                       /* If an unlocked buffer is not uptodate, there has
-                        * been an IO error. Skip it.
-                        */
-                       if (wait && buffer_req(bh) && !buffer_locked(bh) &&
-                           !buffer_dirty(bh) && !buffer_uptodate(bh)) {
-                               err = -EIO;
-                               continue;
-                       }
+repeat:
+       spin_lock(&lru_list_lock);
+       next = lru_list[BUF_DIRTY];
+       nr = nr_buffers_type[BUF_DIRTY] * 2;
+       count = 0;
+       while (next && --nr >= 0) {
+               struct buffer_head * bh = next;
+               next = bh->b_next_free;
 
-                       /* Don't write clean buffers.  Don't write ANY buffers
-                        * on the third pass.
-                        */
-                       if (!buffer_dirty(bh) || pass >= 2)
+               if (dev && bh->b_dev != dev)
+                       continue;
+               if (test_and_set_bit(BH_Lock, &bh->b_state))
+                       continue;
+               if (atomic_set_buffer_clean(bh)) {
+                       __refile_buffer(bh);
+                       array[count++] = bh;
+                       if (count < NRSYNC)
                                continue;
 
-                       atomic_inc(&bh->b_count);
                        spin_unlock(&lru_list_lock);
-                       ll_rw_block(WRITE, 1, &bh);
-                       atomic_dec(&bh->b_count);
-                       retry = 1;
+                       write_locked_buffers(array, count);
                        goto repeat;
                }
+               unlock_buffer(bh);
+       }
+       spin_unlock(&lru_list_lock);
 
-    repeat2:
-               bh = lru_list[BUF_LOCKED];
-               if (!bh) {
-                       spin_unlock(&lru_list_lock);
-                       break;
-               }
-               for (i = nr_buffers_type[BUF_LOCKED]*2 ; i-- > 0 ; bh = next) {
-                       next = bh->b_next_free;
+       if (count)
+               write_locked_buffers(array, count);
+}
 
-                       if (!lru_list[BUF_LOCKED])
-                               break;
-                       if (dev && bh->b_dev != dev)
-                               continue;
-                       if (buffer_locked(bh)) {
-                               /* Buffer is locked; skip it unless wait is
-                                * requested AND pass > 0.
-                                */
-                               if (!wait || !pass) {
-                                       retry = 1;
-                                       continue;
-                               }
-                               atomic_inc(&bh->b_count);
-                               spin_unlock(&lru_list_lock);
-                               wait_on_buffer (bh);
-                               spin_lock(&lru_list_lock);
-                               atomic_dec(&bh->b_count);
-                               goto repeat2;
-                       }
+static int wait_for_locked_buffers(kdev_t dev, int index, int refile)
+{
+       struct buffer_head * next;
+       int nr;
+
+repeat:
+       spin_lock(&lru_list_lock);
+       next = lru_list[index];
+       nr = nr_buffers_type[index] * 2;
+       while (next && --nr >= 0) {
+               struct buffer_head *bh = next;
+               next = bh->b_next_free;
+
+               if (!buffer_locked(bh)) {
+                       if (refile)
+                               __refile_buffer(bh);
+                       continue;
                }
-               spin_unlock(&lru_list_lock);
+               if (dev && bh->b_dev != dev)
+                       continue;
 
-               /* If we are waiting for the sync to succeed, and if any dirty
-                * blocks were written, then repeat; on the second pass, only
-                * wait for buffers being written (do not pass to write any
-                * more buffers on the second pass).
-                */
-       } while (wait && retry && ++pass<=2);
-       return err;
+               atomic_inc(&bh->b_count);
+               spin_unlock(&lru_list_lock);
+               wait_on_buffer (bh);
+               atomic_dec(&bh->b_count);
+               goto repeat;
+       }
+       spin_unlock(&lru_list_lock);
+       return 0;
 }
 
-void sync_dev(kdev_t dev)
+/* Call sync_buffers with wait!=0 to ensure that the call does not
+ * return until all buffer writes have completed.  Sync() may return
+ * before the writes have finished; fsync() may not.
+ */
+
+/* Godamity-damn.  Some buffers (bitmaps for filesystems)
+ * spontaneously dirty themselves without ever brelse being called.
+ * We will ultimately want to put these in a separate list, but for
+ * now we search all of the lists for dirty buffers.
+ */
+static int sync_buffers(kdev_t dev, int wait)
 {
-       sync_supers(dev);
-       sync_inodes(dev);
-       DQUOT_SYNC(dev);
-       /* sync all the dirty buffers out to disk only _after_ all the
-          high level layers finished generated buffer dirty data
-          (or we'll return with some buffer still dirty on the blockdevice
-          so breaking the semantics of this call) */
-       sync_buffers(dev, 0);
-       /*
-        * FIXME(eric) we need to sync the physical devices here.
-        * This is because some (scsi) controllers have huge amounts of
-        * cache onboard (hundreds of Mb), and we need to instruct
-        * them to commit all of the dirty memory to disk, and we should
-        * not return until this has happened.
-        *
-        * This would need to get implemented by going through the assorted
-        * layers so that each block major number can be synced, and this
-        * would call down into the upper and mid-layer scsi.
+       int err = 0;
+
+       /* One pass for no-wait, three for wait:
+        * 0) write out all dirty, unlocked buffers;
+        * 1) wait for all dirty locked buffers;
+        * 2) write out all dirty, unlocked buffers;
+        * 2) wait for completion by waiting for all buffers to unlock.
         */
+       write_unlocked_buffers(dev);
+       if (wait) {
+               err = wait_for_locked_buffers(dev, BUF_DIRTY, 0);
+               write_unlocked_buffers(dev);
+               err |= wait_for_locked_buffers(dev, BUF_LOCKED, 1);
+       }
+       return err;
 }
 
 int fsync_super(struct super_block *sb)
@@ -331,6 +307,15 @@ int fsync_dev(kdev_t dev)
        return sync_buffers(dev, 1);
 }
 
+/*
+ * There's no real reason to pretend we should
+ * ever do anything differently
+ */
+void sync_dev(kdev_t dev)
+{
+       fsync_dev(dev);
+}
+
 asmlinkage long sys_sync(void)
 {
        fsync_dev(0);
@@ -762,7 +747,7 @@ static void refill_freelist(int size)
 {
        balance_dirty(NODEV);
        if (free_shortage())
-               page_launder(GFP_BUFFER, 0);
+               page_launder(GFP_NOFS, 0);
        if (!grow_buffers(size)) {
                wakeup_bdflush(1);
                current->policy |= SCHED_YIELD;
@@ -1223,11 +1208,11 @@ static struct buffer_head * get_unused_buffer_head(int async)
        }
        spin_unlock(&unused_list_lock);
 
-       /* This is critical.  We can't swap out pages to get
-        * more buffer heads, because the swap-out may need
-        * more buffer-heads itself.  Thus SLAB_BUFFER.
+       /* This is critical.  We can't call out to the FS
+        * to get more buffer heads, because the FS may need
+        * more buffer-heads itself.  Thus SLAB_NOFS.
         */
-       if((bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER)) != NULL) {
+       if((bh = kmem_cache_alloc(bh_cachep, SLAB_NOFS)) != NULL) {
                bh->b_blocknr = -1;
                bh->b_this_page = NULL;
                return bh;
@@ -2243,7 +2228,7 @@ static int grow_buffers(int size)
                return 0;
        }
 
-       page = alloc_page(GFP_BUFFER);
+       page = alloc_page(GFP_NOFS);
        if (!page)
                goto out;
        LockPage(page);
@@ -2305,7 +2290,7 @@ out:
  *     1 - start IO for dirty buffers
  *     2 - wait for completion of locked buffers
  */
-static void sync_page_buffers(struct buffer_head *bh, int wait)
+static void sync_page_buffers(struct buffer_head *bh, unsigned int gfp_mask)
 {
        struct buffer_head * tmp = bh;
 
@@ -2313,7 +2298,7 @@ static void sync_page_buffers(struct buffer_head *bh, int wait)
                struct buffer_head *p = tmp;
                tmp = tmp->b_this_page;
                if (buffer_locked(p)) {
-                       if (wait > 1)
+                       if (gfp_mask & __GFP_WAIT)
                                __wait_on_buffer(p);
                } else if (buffer_dirty(p))
                        ll_rw_block(WRITE, 1, &p);
@@ -2337,7 +2322,7 @@ static void sync_page_buffers(struct buffer_head *bh, int wait)
  *       obtain a reference to a buffer head within a page.  So we must
  *      lock out all of these paths to cleanly toss the page.
  */
-int try_to_free_buffers(struct page * page, int wait)
+int try_to_free_buffers(struct page * page, unsigned int gfp_mask)
 {
        struct buffer_head * tmp, * bh = page->buffers;
        int index = BUFSIZE_INDEX(bh->b_size);
@@ -2388,10 +2373,10 @@ busy_buffer_page:
        spin_unlock(&free_list[index].lock);
        write_unlock(&hash_table_lock);
        spin_unlock(&lru_list_lock);
-       if (wait) {
-               sync_page_buffers(bh, wait);
+       if (gfp_mask & __GFP_IO) {
+               sync_page_buffers(bh, gfp_mask);
                /* We waited synchronously, so we can free the buffers. */
-               if (wait > 1 && !loop) {
+               if ((gfp_mask & __GFP_WAIT) && !loop) {
                        loop = 1;
                        goto cleaned_buffers_try_again;
                }
index 94bdc4f54fcdb80b35a04966888012ba7a6ab90b..1be054c3286b5ce3f0de4b469a3b0ddd610daa88 100644 (file)
@@ -315,7 +315,7 @@ static struct file_operations proc_info_file_operations = {
 
 static int mem_open(struct inode* inode, struct file* file)
 {
-       file->private_data = (void*)(current->self_exec_id);
+       file->private_data = (void*)((long)current->self_exec_id);
        return 0;
 }
 
@@ -342,7 +342,7 @@ static ssize_t mem_read(struct file * file, char * buf,
                atomic_inc(&mm->mm_users);
        task_unlock(task);
 
-       if (file->private_data != (void*)(current->self_exec_id) ) {
+       if (file->private_data != (void*)((long)current->self_exec_id) ) {
                mmput(mm);
                return -EIO;
        }
index 8fe752c075e7fa430a48ec69715bc135b1989bda..e31efb0510199a89659e98e42faa2c0e45321dd7 100644 (file)
@@ -190,7 +190,7 @@ static int xlate_proc_name(const char *name,
        return 0;
 }
 
-static unsigned char proc_alloc_map[PROC_NDYNAMIC / 8];
+static unsigned long proc_alloc_map[PROC_NDYNAMIC / 8];
 
 spinlock_t proc_alloc_map_lock = SPIN_LOCK_UNLOCKED;
 
@@ -198,12 +198,12 @@ static int make_inode_number(void)
 {
        int i;
        spin_lock(&proc_alloc_map_lock);
-       i = find_first_zero_bit((void *) proc_alloc_map, PROC_NDYNAMIC);
-       if (i<0 || i>=PROC_NDYNAMIC) {
+       i = find_first_zero_bit(proc_alloc_map, PROC_NDYNAMIC);
+       if (i < 0 || i >= PROC_NDYNAMIC) {
                i = -1;
                goto out;
        }
-       set_bit(i, (void *) proc_alloc_map);
+       set_bit(i, proc_alloc_map);
        i += PROC_DYNAMIC_FIRST;
 out:
        spin_unlock(&proc_alloc_map_lock);
@@ -555,8 +555,8 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
                de->next = NULL;
                if (S_ISDIR(de->mode))
                        parent->nlink--;
-               clear_bit(de->low_ino-PROC_DYNAMIC_FIRST,
-                               (void *) proc_alloc_map);
+               clear_bit(de->low_ino - PROC_DYNAMIC_FIRST,
+                         proc_alloc_map);
                proc_kill_inodes(de);
                de->nlink = 0;
                if (!atomic_read(&de->count))
index ad184a145e7d4dc761acf3def252b1b0866f1916..806677990cd74f5ecb210f1a231a2d37753896dd 100644 (file)
@@ -157,7 +157,7 @@ static int reiserfs_readdir (struct file * filp, void * dirent, filldir_t filldi
                if (d_reclen <= 32) {
                  local_buf = small_buf ;
                } else {
-                   local_buf = kmalloc(d_reclen, GFP_BUFFER) ;
+                   local_buf = kmalloc(d_reclen, GFP_NOFS) ;
                    if (!local_buf) {
                        pathrelse (&path_to_entry);
                        return -ENOMEM ;
index 37354b0e1f76593e0f10a8c99b4fa720c305400c..bc9c0520925bd9b6488c1096de98c61da4f26098 100644 (file)
@@ -2250,7 +2250,7 @@ static int get_mem_for_virtual_node (struct tree_balance * tb)
                dcache shrinking). So, release path and collected
                resourses here */
            free_buffers_in_tb (tb);
-           buf = reiserfs_kmalloc(size, GFP_BUFFER, tb->tb_sb);
+           buf = reiserfs_kmalloc(size, GFP_NOFS, tb->tb_sb);
            if ( !buf ) {
 #ifdef CONFIG_REISERFS_CHECK
                reiserfs_warning ("vs-8345: get_mem_for_virtual_node: "
index 396beb24a2d4bacc0bb6824296cdcbeadba53358..37fb1edc7ddc692351a6d2cac01e7665acc230f4 100644 (file)
@@ -128,11 +128,11 @@ allocate_bitmap_node(struct super_block *p_s_sb) {
   struct reiserfs_bitmap_node *bn ;
   static int id = 0 ;
 
-  bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_BUFFER) ;
+  bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS) ;
   if (!bn) {
     return NULL ;
   }
-  bn->data = kmalloc(p_s_sb->s_blocksize, GFP_BUFFER) ;
+  bn->data = kmalloc(p_s_sb->s_blocksize, GFP_NOFS) ;
   if (!bn->data) {
     kfree(bn) ;
     return NULL ;
@@ -1492,8 +1492,8 @@ static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cu
   }
   trans_id = le32_to_cpu(desc->j_trans_id) ;
   /* now we know we've got a good transaction, and it was inside the valid time ranges */
-  log_blocks = kmalloc(le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), GFP_BUFFER) ;
-  real_blocks = kmalloc(le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), GFP_BUFFER) ;
+  log_blocks = kmalloc(le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), GFP_NOFS) ;
+  real_blocks = kmalloc(le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), GFP_NOFS) ;
   if (!log_blocks  || !real_blocks) {
     brelse(c_bh) ;
     brelse(d_bh) ;
@@ -1786,10 +1786,10 @@ static void setup_commit_task_arg(struct reiserfs_journal_commit_task *ct,
 
 static void commit_flush_async(struct super_block *p_s_sb, int jindex) {
   struct reiserfs_journal_commit_task *ct ;
-  /* using GFP_BUFFER, GFP_KERNEL could try to flush inodes, which will try
+  /* using GFP_NOFS, GFP_KERNEL could try to flush inodes, which will try
   ** to start/join a transaction, which will deadlock
   */
-  ct = kmalloc(sizeof(struct reiserfs_journal_commit_task), GFP_BUFFER) ;
+  ct = kmalloc(sizeof(struct reiserfs_journal_commit_task), GFP_NOFS) ;
   if (ct) {
     setup_commit_task_arg(ct, p_s_sb, jindex) ;
     queue_task(&(ct->task), &reiserfs_commit_thread_tq);
@@ -2886,7 +2886,7 @@ int reiserfs_add_page_to_flush_list(struct reiserfs_transaction_handle *th,
 #endif
 
   get_page(bh->b_page) ;
-  new_pl = reiserfs_kmalloc(sizeof(struct reiserfs_page_list), GFP_BUFFER,
+  new_pl = reiserfs_kmalloc(sizeof(struct reiserfs_page_list), GFP_NOFS,
                             inode->i_sb) ;
   if (!new_pl) {
     put_page(bh->b_page) ;
index ed782d90938298354b0557b4382a848746c48e29..b7c612986f0b89200cdfd22e03e8b08f9e89290f 100644 (file)
@@ -437,7 +437,7 @@ static int reiserfs_add_entry (struct reiserfs_transaction_handle *th, struct in
     /* get memory for composing the entry */
     buflen = DEH_SIZE + ROUND_UP (namelen);
     if (buflen > sizeof (small_buf)) {
-       buffer = reiserfs_kmalloc (buflen, GFP_BUFFER, dir->i_sb);
+       buffer = reiserfs_kmalloc (buflen, GFP_NOFS, dir->i_sb);
        if (buffer == 0)
            return -ENOMEM;
     } else
@@ -878,7 +878,7 @@ int reiserfs_symlink (struct inode * dir, struct dentry * dentry, const char * s
        return -ENAMETOOLONG;
     }
   
-    name = kmalloc (item_len, GFP_BUFFER);
+    name = kmalloc (item_len, GFP_NOFS);
     if (!name) {
        iput(inode) ;
        return -ENOMEM;
index cf590d6f44c2617fc361eaeb57e1670b28dc8e95..ca96e53eeaf111a61a4dc8b54441967e9a9898ac 100644 (file)
@@ -305,7 +305,7 @@ static int read_bitmaps (struct super_block * s)
     int i, bmp, dl ;
     struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK(s);
 
-    SB_AP_BITMAP (s) = reiserfs_kmalloc (sizeof (struct buffer_head *) * le16_to_cpu (rs->s_bmap_nr), GFP_BUFFER, s);
+    SB_AP_BITMAP (s) = reiserfs_kmalloc (sizeof (struct buffer_head *) * le16_to_cpu (rs->s_bmap_nr), GFP_NOFS, s);
     if (SB_AP_BITMAP (s) == 0)
        return 1;
     memset (SB_AP_BITMAP (s), 0, sizeof (struct buffer_head *) * le16_to_cpu (rs->s_bmap_nr));
@@ -334,7 +334,7 @@ static int read_old_bitmaps (struct super_block * s)
   int bmp1 = (REISERFS_OLD_DISK_OFFSET_IN_BYTES / s->s_blocksize) + 1;  /* first of bitmap blocks */
 
   /* read true bitmap */
-  SB_AP_BITMAP (s) = reiserfs_kmalloc (sizeof (struct buffer_head *) * le16_to_cpu (rs->s_bmap_nr), GFP_BUFFER, s);
+  SB_AP_BITMAP (s) = reiserfs_kmalloc (sizeof (struct buffer_head *) * le16_to_cpu (rs->s_bmap_nr), GFP_NOFS, s);
   if (SB_AP_BITMAP (s) == 0)
     return 1;
 
index 29deb173f565ada049998d2903b92bd7b3b947fa..f6866ccf6abb499b0c212fcdd02f6a8d9febe7e2 100644 (file)
@@ -1056,7 +1056,7 @@ extern struct file_operations rdwr_pipe_fops;
 
 extern int fs_may_remount_ro(struct super_block *);
 
-extern int try_to_free_buffers(struct page *, int);
+extern int try_to_free_buffers(struct page *, unsigned int);
 extern void refile_buffer(struct buffer_head * buf);
 
 /* reiserfs_writepage needs this */
index 31ebd2ba14b2a758700ac4031f2b4c9e7762f3db..6e5d72757d05375634faa112d4ffe6a8339ba329 100644 (file)
@@ -533,18 +533,19 @@ extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int);
 #define __GFP_HIGHMEM  0x02
 
 /* Action modifiers - doesn't change the zoning */
-#define __GFP_WAIT     0x10
-#define __GFP_HIGH     0x20
-#define __GFP_IO       0x40
-#define __GFP_BUFFER   0x80
+#define __GFP_WAIT     0x10    /* Can wait and reschedule? */
+#define __GFP_HIGH     0x20    /* Should access emergency pools? */
+#define __GFP_IO       0x40    /* Can start physical IO? */
+#define __GFP_FS       0x80    /* Can call down to low-level FS? */
 
-#define GFP_BUFFER     (__GFP_HIGH | __GFP_WAIT | __GFP_BUFFER)
+#define GFP_NOIO       (__GFP_HIGH | __GFP_WAIT)
+#define GFP_NOFS       (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
 #define GFP_ATOMIC     (__GFP_HIGH)
-#define GFP_USER       (             __GFP_WAIT | __GFP_IO)
-#define GFP_HIGHUSER   (             __GFP_WAIT | __GFP_IO | __GFP_HIGHMEM)
-#define GFP_KERNEL     (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
-#define GFP_NFS                (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
-#define GFP_KSWAPD     (                          __GFP_IO)
+#define GFP_USER       (             __GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_HIGHUSER   (             __GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
+#define GFP_KERNEL     (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_NFS                (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_KSWAPD     (                          __GFP_IO | __GFP_FS)
 
 /* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
    platforms, used as appropriate on others */
index 32895d79186bf6c3fae2d09f8518945669d98da7..e70bf400632c84f6106506b7b41d69c6e80d1b49 100644 (file)
@@ -15,14 +15,15 @@ typedef struct kmem_cache_s kmem_cache_t;
 #include       <linux/cache.h>
 
 /* flags for kmem_cache_alloc() */
-#define        SLAB_BUFFER             GFP_BUFFER
+#define        SLAB_NOFS               GFP_NOFS
+#define        SLAB_NOIO               GFP_NOIO
 #define        SLAB_ATOMIC             GFP_ATOMIC
 #define        SLAB_USER               GFP_USER
 #define        SLAB_KERNEL             GFP_KERNEL
 #define        SLAB_NFS                GFP_NFS
 #define        SLAB_DMA                GFP_DMA
 
-#define SLAB_LEVEL_MASK                (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_BUFFER)
+#define SLAB_LEVEL_MASK                (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS)
 #define        SLAB_NO_GROW            0x00001000UL    /* don't grow a cache */
 
 /* flags to pass to kmem_cache_create().
index cec2e51016764f10b61d3ee734ea12e5cf18f543..d9e739830fd7bab56f2001af4cecf7b494747ac1 100644 (file)
@@ -48,11 +48,14 @@ void __br_write_lock (enum brlock_indices idx)
 {
        int i;
 
-       spin_lock(&__br_write_locks[idx].lock);
 again:
+       spin_lock(&__br_write_locks[idx].lock);
        for (i = 0; i < smp_num_cpus; i++)
-               if (__brlock_array[cpu_logical_map(i)][idx] != 0)
+               if (__brlock_array[cpu_logical_map(i)][idx] != 0) {
+                       spin_unlock(&__br_write_locks[idx].lock);
+                       barrier();
                        goto again;
+               }
 }
 
 void __br_write_unlock (enum brlock_indices idx)
index 8bbb0c597b77830bebcfbe0e6fb929e276627fff..70bc76731aeba089ad3e738d45d23af2a6d1c860 100644 (file)
@@ -302,7 +302,7 @@ struct page *alloc_bounce_page (void)
        struct page *page;
 
 repeat_alloc:
-       page = alloc_page(GFP_BUFFER);
+       page = alloc_page(GFP_NOIO);
        if (page)
                return page;
        /*
@@ -340,7 +340,7 @@ struct buffer_head *alloc_bounce_bh (void)
        struct buffer_head *bh;
 
 repeat_alloc:
-       bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER);
+       bh = kmem_cache_alloc(bh_cachep, SLAB_NOIO);
        if (bh)
                return bh;
        /*
index 7163bdace41953beeb764c4338872388a2cb29d6..1e30dbf28cdb2425de8ce43c73bf00aec846c9c8 100644 (file)
@@ -442,15 +442,15 @@ try_again:
                 * in that case we bail out to prevent infinite loops and
                 * hanging device drivers ...
                 *
-                * Another issue are GFP_BUFFER allocations; because they
-                * do not have __GFP_IO set it's possible we cannot make
+                * Another issue are GFP_NOFS allocations; because they
+                * do not have __GFP_FS set it's possible we cannot make
                 * any progress freeing pages, in that case it's better
                 * to give up than to deadlock the kernel looping here.
                 */
                if (gfp_mask & __GFP_WAIT) {
                        if (!order || free_shortage()) {
                                int progress = try_to_free_pages(gfp_mask);
-                               if (progress || (gfp_mask & __GFP_IO))
+                               if (progress || (gfp_mask & __GFP_FS))
                                        goto try_again;
                                /*
                                 * Fail in case no progress was made and the
index 4eb2902b2ca57c9b4b821a1072f8851684eabce9..01e10608716f84a29e2b5dee08b0519b83013e64 100644 (file)
@@ -424,8 +424,8 @@ out:
  * go out to Matthew Dillon.
  */
 #define MAX_LAUNDER            (4 * (1 << page_cluster))
+#define CAN_DO_FS              (gfp_mask & __GFP_FS)
 #define CAN_DO_IO              (gfp_mask & __GFP_IO)
-#define CAN_DO_BUFFERS         (gfp_mask & __GFP_BUFFER)
 int page_launder(int gfp_mask, int sync)
 {
        int launder_loop, maxscan, cleaned_pages, maxlaunder;
@@ -482,7 +482,7 @@ dirty_page_rescan:
                                goto page_active;
 
                        /* First time through? Move it to the back of the list */
-                       if (!launder_loop || !CAN_DO_IO) {
+                       if (!launder_loop || !CAN_DO_FS) {
                                list_del(page_lru);
                                list_add(page_lru, &inactive_dirty_list);
                                UnlockPage(page);
@@ -512,7 +512,8 @@ dirty_page_rescan:
                 * buffer pages
                 */
                if (page->buffers) {
-                       int wait, clearedbuf;
+                       unsigned int buffer_mask;
+                       int clearedbuf;
                        int freed_page = 0;
                        /*
                         * Since we might be doing disk IO, we have to
@@ -524,16 +525,15 @@ dirty_page_rescan:
                        spin_unlock(&pagemap_lru_lock);
 
                        /* Will we do (asynchronous) IO? */
-                       wait = 0;               /* No IO */
-                       if (launder_loop) {
-                               if (maxlaunder == 0 && sync)
-                                       wait = 2;       /* Synchrounous IO */
-                               else if (maxlaunder-- > 0)
-                                       wait = 1;       /* Async IO */
-                       }
+                       if (launder_loop && maxlaunder == 0 && sync)
+                               buffer_mask = gfp_mask;                         /* Do as much as we can */
+                       else if (launder_loop && maxlaunder-- > 0)
+                               buffer_mask = gfp_mask & ~__GFP_WAIT;                   /* Don't wait, async write-out */
+                       else
+                               buffer_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);      /* Don't even start IO */
 
                        /* Try to free the page buffers. */
-                       clearedbuf = try_to_free_buffers(page, wait);
+                       clearedbuf = try_to_free_buffers(page, buffer_mask);
 
                        /*
                         * Re-take the spinlock. Note that we cannot
@@ -613,7 +613,7 @@ page_active:
         * loads, flush out the dirty pages before we have to wait on
         * IO.
         */
-       if ((CAN_DO_IO || CAN_DO_BUFFERS) && !launder_loop && free_shortage()) {
+       if ((CAN_DO_IO || CAN_DO_FS) && !launder_loop && free_shortage()) {
                launder_loop = 1;
                /* If we cleaned pages, never do synchronous IO. */
                if (cleaned_pages)
index ce5fc6d0dc0bd0f27b686bcbf49f03348d0d6cab..3c399c826b5152ab47b9ac0212e4464bec255385 100644 (file)
@@ -97,11 +97,7 @@ static struct timer_list aarp_timer;
  */
 static void __aarp_expire(struct aarp_entry *a)
 {
-       struct sk_buff *skb;
-       
-       while ((skb = skb_dequeue(&a->packet_queue)) != NULL)
-               kfree_skb(skb);
-
+       skb_queue_purge(&a->packet_queue);
        kfree(a);
 }
 
@@ -844,9 +840,7 @@ out0:       kfree_skb(skb);
 }
 
 static struct notifier_block aarp_notifier = {
-       aarp_device_event,
-       NULL,
-       0
+       notifier_call:  aarp_device_event,
 };
 
 static char aarp_snap_id[] = { 0x00, 0x00, 0x00, 0x80, 0xF3 };
@@ -888,8 +882,8 @@ static int aarp_get_info(char *buffer, char **start, off_t offset, int length)
        int len, ct;
 
        len = sprintf(buffer,
-               "%-10.10s  ""%-10.10s""%-18.18s""%12.12s""%12.12s"" xmit_count  status\n",
-               "address","device","hw addr","last_sent", "expires");
+               "%-10.10s  %-10.10s%-18.18s%12.12s%12.12s xmit_count  status\n",
+               "address", "device", "hw addr", "last_sent", "expires");
 
        spin_lock_bh(&aarp_lock);
 
index 7c1484d6e9122d346531f2cdf499dfb010f13f6a..1c40efac0afd94571f227f125a153c3a055a7dd4 100644 (file)
@@ -83,9 +83,11 @@ static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
 
                                /* Last fragment received ? */
                                if (ax25->fragno == 0) {
-                                       if ((skbn = alloc_skb(AX25_MAX_HEADER_LEN + ax25->fraglen, GFP_ATOMIC)) == NULL) {
-                                               while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL)
-                                                       kfree_skb(skbo);
+                                       skbn = alloc_skb(AX25_MAX_HEADER_LEN +
+                                                        ax25->fraglen,
+                                                        GFP_ATOMIC);
+                                       if (!skbn) {
+                                               skb_queue_purge(&ax25->frag_queue);
                                                return 1;
                                        }
 
@@ -113,8 +115,7 @@ static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
        } else {
                /* First fragment received */
                if (*skb->data & AX25_SEG_FIRST) {
-                       while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL)
-                               kfree_skb(skbo);
+                       skb_queue_purge(&ax25->frag_queue);
                        ax25->fragno = *skb->data & AX25_SEG_REM;
                        skb_pull(skb, 1);               /* skip fragno */
                        ax25->fraglen = skb->len;
@@ -416,7 +417,8 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, ax25_address *d
        /*
         *      Sort out any digipeated paths.
         */
-       if (dp.ndigi != 0 && ax25->digipeat == NULL && (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
+       if (dp.ndigi && !ax25->digipeat &&
+           (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
                kfree_skb(skb);
                ax25_destroy_socket(ax25);
                return 0;
@@ -468,7 +470,8 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, ax25_address *d
 /*
  *     Receive an AX.25 frame via a SLIP interface.
  */
-int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype)
+int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
+                 struct packet_type *ptype)
 {
        skb->sk = NULL;         /* Initially we don't know who it's for */
        skb->destructor = NULL; /* Who initializes this, dammit?! */
index 66978964628a9153fd8c751f587c9f660db8f955..3ac22ea070d55317b9be484edc4890a43329e8a4 100644 (file)
  */
 void ax25_clear_queues(ax25_cb *ax25)
 {
-       struct sk_buff *skb;
-
-       while ((skb = skb_dequeue(&ax25->write_queue)) != NULL)
-               kfree_skb(skb);
-
-       while ((skb = skb_dequeue(&ax25->ack_queue)) != NULL)
-               kfree_skb(skb);
-
-       while ((skb = skb_dequeue(&ax25->reseq_queue)) != NULL)
-               kfree_skb(skb);
-
-       while ((skb = skb_dequeue(&ax25->frag_queue)) != NULL)
-               kfree_skb(skb);
+       skb_queue_purge(&ax25->write_queue);
+       skb_queue_purge(&ax25->ack_queue);
+       skb_queue_purge(&ax25->reseq_queue);
+       skb_queue_purge(&ax25->frag_queue);
 }
 
 /*
index 1841ce5079e55fa4c815bd88e6a1cce6f24fda9e..3b836e33296202243b9ff9e6caca2261080e4980 100644 (file)
@@ -7,7 +7,7 @@
  *             handler for protocols to use and generic option handler.
  *
  *
- * Version:    $Id: sock.c,v 1.110 2001/04/20 20:46:19 davem Exp $
+ * Version:    $Id: sock.c,v 1.111 2001/06/26 23:29:17 davem Exp $
  *
  * Authors:    Ross Biro, <bir7@leland.Stanford.Edu>
  *             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -80,6 +80,7 @@
  *             Andi Kleen      :       Add sock_kmalloc()/sock_kfree_s()
  *             Andi Kleen      :       Fix write_space callback
  *             Chris Evans     :       Security fixes - signedness again
+ *             Arnaldo C. Melo :       cleanups, use skb_queue_purge
  *
  * To Fix:
  *
@@ -172,7 +173,6 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
 #endif
        int val;
        int valbool;
-       int err;
        struct linger ling;
        int ret = 0;
        
@@ -192,9 +192,8 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
        if(optlen<sizeof(int))
                return(-EINVAL);
        
-       err = get_user(val, (int *)optval);
-       if (err)
-               return err;
+       if (get_user(val, (int *)optval))
+               return -EFAULT;
        
        valbool = val?1:0;
 
@@ -918,14 +917,10 @@ static void sklist_destroy_timer(unsigned long data)
  
 void sklist_destroy_socket(struct sock **list,struct sock *sk)
 {
-       struct sk_buff *skb;
        if(list)
                sklist_remove_socket(list, sk);
 
-       while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
-       {
-               kfree_skb(skb);
-       }
+       skb_queue_purge(&sk->receive_queue);
 
        if(atomic_read(&sk->wmem_alloc) == 0 &&
           atomic_read(&sk->rmem_alloc) == 0 &&
index 076e56768c42f5f55c065dfc7fa11737036c5a0f..d54391b27903ee709836377607a09d6c753715e2 100644 (file)
@@ -68,7 +68,7 @@ struct aunhdr
        unsigned long handle;
 };
 
-static unsigned long aun_seq = 0;
+static unsigned long aun_seq;
 
 /* Queue of packets waiting to be transmitted. */
 static struct sk_buff_head aun_queue;
@@ -172,9 +172,8 @@ static int econet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
         *      Check legality
         */
         
-       if (addr_len < sizeof(struct sockaddr_ec))
-               return -EINVAL;
-       if (sec->sec_family != AF_ECONET)
+       if (addr_len < sizeof(struct sockaddr_ec) ||
+           sec->sec_family != AF_ECONET)
                return -EINVAL;
        
        sk->protinfo.af_econet->cb = sec->cb;
@@ -485,7 +484,6 @@ static void econet_destroy_timer(unsigned long data)
 
 static int econet_release(struct socket *sock)
 {
-       struct sk_buff  *skb;
        struct sock *sk = sock->sk;
 
        if (!sk)
@@ -505,8 +503,7 @@ static int econet_release(struct socket *sock)
 
        /* Purge queues */
 
-       while ((skb=skb_dequeue(&sk->receive_queue))!=NULL)
-               kfree_skb(skb);
+       skb_queue_purge(&sk->receive_queue);
 
        if (atomic_read(&sk->rmem_alloc) || atomic_read(&sk->wmem_alloc)) {
                sk->timer.data=(unsigned long)sk;
index 8c103857c45bc151edf31000398dff8623acfa00..514418f051eb2f74e969a1b8544ebfae923c8d26 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  This source is covered by the GNU GPL, the same as all kernel sources.
  *
- *  Version:   $Id: inetpeer.c,v 1.3 2000/10/03 07:29:00 anton Exp $
+ *  Version:   $Id: inetpeer.c,v 1.6 2001/06/21 20:30:14 davem Exp $
  *
  *  Authors:   Andrey V. Savochkin <saw@msu.ru>
  */
@@ -109,11 +109,11 @@ void __init inet_initpeers(void)
         * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
         * myself.  --SAW
         */
-       if (si.totalram <= 32768*1024)
+       if (si.totalram <= (32768*1024)/PAGE_SIZE)
                inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
-       if (si.totalram <= 16384*1024)
+       if (si.totalram <= (16384*1024)/PAGE_SIZE)
                inet_peer_threshold >>= 1; /* about 512KB */
-       if (si.totalram <= 8192*1024)
+       if (si.totalram <= (8192*1024)/PAGE_SIZE)
                inet_peer_threshold >>= 2; /* about 128KB */
 
        peer_cachep = kmem_cache_create("inet_peer_cache",
index 2bf972fdabf9956f2a0b1e8ff4a4dbfb409385ca..6a696b74cf24fe2988ea90553d0feea03b67c4c6 100644 (file)
@@ -9,7 +9,7 @@
  *     as published by the Free Software Foundation; either version
  *     2 of the License, or (at your option) any later version.
  *
- *     Version: $Id: ipmr.c,v 1.59 2001/02/23 06:32:11 davem Exp $
+ *     Version: $Id: ipmr.c,v 1.60 2001/06/29 21:33:22 davem Exp $
  *
  *     Fixes:
  *     Michael Chastain        :       Incorrect size of copying.
@@ -83,8 +83,8 @@ static int maxvif;
 
 #define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
 
-int mroute_do_assert = 0;                              /* Set in PIM assert    */
-int mroute_do_pim = 0;
+int mroute_do_assert;                                  /* Set in PIM assert    */
+int mroute_do_pim;
 
 static struct mfc_cache *mfc_cache_array[MFC_LINES];   /* Forwarding cache     */
 
index 4d624206cdd198a69e90637f03655029b6b6ad5d..2f4929242533afa04f56eefdf12b74a3067d7122 100644 (file)
@@ -722,7 +722,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
 
                if (skb->ip_summed != CHECKSUM_HW) {
                        memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
-                       skb->csum = csum_block_add(skb->csum, next_skb->csum, skb->len);
+                       skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
                }
 
                /* Update sequence range on original skb. */
index 575e874b83159c2dea0a59e3a4b971afdf2d3fd3..0a6cdd225548933ec9a2f79f312b1c5815855330 100644 (file)
@@ -398,7 +398,7 @@ static int spx_route_skb(struct spx_opt *pdata, struct sk_buff *skb, int type)
                        pdata->retransmit.expires = jiffies + spx_calc_rtt(0);
                        add_timer(&pdata->retransmit);
 
-                       skb2 = skb_clone(skb, GFP_BUFFER);
+                       skb2 = skb_clone(skb, GFP_NOFS);        /* Why? Why not GFP_KERNEL? */
                        if(skb2 == NULL)
                                return -ENOBUFS;
                        skb_queue_tail(&pdata->retransmit_queue, skb2);
index aa701ad2148f1799815e87c0385ef3af951e418c..439a6dd8a8b86888eabb25b4f1ae93623941dc75 100644 (file)
  */
 void lapb_clear_queues(lapb_cb *lapb)
 {
-       struct sk_buff *skb;
-
-       while ((skb = skb_dequeue(&lapb->write_queue)) != NULL)
-               kfree_skb(skb);
-
-       while ((skb = skb_dequeue(&lapb->ack_queue)) != NULL)
-               kfree_skb(skb);
+       skb_queue_purge(&lapb->write_queue);
+       skb_queue_purge(&lapb->ack_queue);
 }
 
 /*
index 0ad59327f32b6a7b52552ab4a06fa6e730b53a67..613e233fd886ecbd0bd73016da60c7d87554cc6a 100644 (file)
@@ -990,8 +990,8 @@ static int __init netlink_proto_init(void)
 
 static void __exit netlink_proto_exit(void)
 {
-       sock_unregister(PF_NETLINK);
-       remove_proc_entry("net/netlink", NULL);
+       sock_unregister(PF_NETLINK);
+       remove_proc_entry("net/netlink", NULL);
 }
 
 module_init(netlink_proto_init);
index 13d14e30f8e9c6eb8df95700b976c620bd052816..3249ac79828b4671fb23a7d6ddb8d5ee97d2ad18 100644 (file)
@@ -5,7 +5,7 @@
  *
  *             PACKET - implements raw packet sockets.
  *
- * Version:    $Id: af_packet.c,v 1.54 2001/03/03 01:20:11 davem Exp $
+ * Version:    $Id: af_packet.c,v 1.55 2001/06/28 01:34:29 davem Exp $
  *
  * Authors:    Ross Biro, <bir7@leland.Stanford.Edu>
  *             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -1407,11 +1407,10 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
 }
 
 
-static int packet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int packet_ioctl(struct socket *sock, unsigned int cmd,
+                       unsigned long arg)
 {
        struct sock *sk = sock->sk;
-       int err;
-       int pid;
 
        switch(cmd) 
        {
@@ -1433,25 +1432,26 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg
                        return put_user(amount, (int *)arg);
                }
                case FIOSETOWN:
-               case SIOCSPGRP:
-                       err = get_user(pid, (int *) arg);
-                       if (err)
-                               return err
+               case SIOCSPGRP: {
+                       int pid;
+                       if (get_user(pid, (int *) arg))
+                               return -EFAULT
                        if (current->pid != pid && current->pgrp != -pid && 
                            !capable(CAP_NET_ADMIN))
                                return -EPERM;
                        sk->proc = pid;
-                       return(0);
+                       break;
+               }
                case FIOGETOWN:
                case SIOCGPGRP:
                        return put_user(sk->proc, (int *)arg);
                case SIOCGSTAMP:
                        if(sk->stamp.tv_sec==0)
                                return -ENOENT;
-                       err = -EFAULT;
-                       if (!copy_to_user((void *)arg, &sk->stamp, sizeof(struct timeval)))
-                               err = 0;
-                       return err;
+                       if (copy_to_user((void *)arg, &sk->stamp,
+                                        sizeof(struct timeval)))
+                               return -EFAULT;
+                       break;
                case SIOCGIFFLAGS:
 #ifndef CONFIG_INET
                case SIOCSIFFLAGS:
@@ -1493,8 +1493,6 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg
                case SIOCSIFDIVERT:
 #ifdef CONFIG_NET_DIVERT
                        return(divert_ioctl(cmd, (struct divert_cf *) arg));
-#else
-                       return -ENOPKG;
 #endif /* CONFIG_NET_DIVERT */
 
                        return -ENOPKG;
@@ -1530,8 +1528,7 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg
 #endif
                        return -EOPNOTSUPP;
        }
-       /*NOTREACHED*/
-       return(0);
+       return 0;
 }
 
 #ifndef CONFIG_PACKET_MMAP
index 1bedfdf0d3b492002eb8f56de1e54164096e6ebc..387e6ac7c1c0e25d112977649faf356c90d302bc 100644 (file)
@@ -221,13 +221,11 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
 {
        struct rose_neigh *s;
        unsigned long flags;
-       struct sk_buff *skb;
 
        rose_stop_ftimer(rose_neigh);
        rose_stop_t0timer(rose_neigh);
 
-       while ((skb = skb_dequeue(&rose_neigh->queue)) != NULL)
-               kfree_skb(skb);
+       skb_queue_purge(&rose_neigh->queue);
 
        save_flags(flags); cli();
 
@@ -684,15 +682,13 @@ int rose_rt_ioctl(unsigned int cmd, void *arg)
 static void rose_del_route_by_neigh(struct rose_neigh *rose_neigh)
 {
        struct rose_route *rose_route, *s;
-       struct sk_buff    *skb;
 
        rose_neigh->restarted = 0;
 
        rose_stop_t0timer(rose_neigh);
        rose_start_ftimer(rose_neigh);
 
-       while ((skb = skb_dequeue(&rose_neigh->queue)) != NULL)
-               kfree_skb(skb);
+       skb_queue_purge(&rose_neigh->queue);
 
        rose_route = rose_route_list;
 
index 384347a0e94bad03d74cc3b5504608c16633961a..98658b4392a3207a8c5dc26b5bf3f43d4816d400 100644 (file)
  */
 void rose_clear_queues(struct sock *sk)
 {
-       struct sk_buff *skb;
-
-       while ((skb = skb_dequeue(&sk->write_queue)) != NULL)
-               kfree_skb(skb);
-
-       while ((skb = skb_dequeue(&sk->protinfo.rose->ack_queue)) != NULL)
-               kfree_skb(skb);
+       skb_queue_purge(&sk->write_queue);
+       skb_queue_purge(&sk->protinfo.rose->ack_queue);
 }
 
 /*
index 3a48c3ebf3bd5c138f1e07e2f8dc0584eeefe958..0f03ba4e1ed8c0d6e1d93e7749ff605562b8a9fb 100644 (file)
@@ -298,14 +298,11 @@ gred_drop(struct Qdisc* sch)
 
 static void gred_reset(struct Qdisc* sch)
 {
-       struct sk_buff *skb;
        int i;
-
        struct gred_sched_data *q;
        struct gred_sched *t= (struct gred_sched *)sch->data;
 
-       while((skb=__skb_dequeue(&sch->q))!=NULL)
-               kfree_skb(skb);
+       __skb_queue_purge(&sch->q);
 
        sch->stats.backlog = 0;
 
index ae948ba9219bfa1757b2aeba00cc7b26a5cc70aa..e377e0e0f29f5c46817a72b0d82da530cb6e19be 100644 (file)
@@ -363,10 +363,8 @@ red_drop(struct Qdisc* sch)
 static void red_reset(struct Qdisc* sch)
 {
        struct red_sched_data *q = (struct red_sched_data *)sch->data;
-       struct sk_buff *skb;
 
-       while((skb=__skb_dequeue(&sch->q))!=NULL)
-               kfree_skb(skb);
+       __skb_queue_purge(&sch->q);
        sch->stats.backlog = 0;
        PSCHED_SET_PASTPERFECT(q->qidlestart);
        q->qave = 0;
index 29769fe6105f33b1072ffd047ed5cbd5084a2de2..f224e03b1388aeabf1610230c621b04b9e3eb7da 100644 (file)
@@ -1332,7 +1332,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, int len,
 
                /*
                 *      If you pass two values to the sock_alloc_send_skb
-                *      it tries to grab the large buffer with GFP_BUFFER
+                *      it tries to grab the large buffer with GFP_NOFS
                 *      (which can fail easily), and if it fails grab the
                 *      fallback size buffer which is under a page and will
                 *      succeed. [Alan]
index bc89eef4ec5285b94a48127498a38139d0695fac..5abf21e7a8dcb8b1879b91ea8c53a636598ac491 100644 (file)
@@ -296,9 +296,6 @@ void unix_gc(void)
         *      Here we are. Hitlist is filled. Die.
         */
 
-       while ((skb=__skb_dequeue(&hitlist))!=NULL) {
-               kfree_skb(skb);
-       }
-
+       __skb_queue_purge(&hitlist);
        up(&unix_gc_sem);
 }
index 841499325f7e1072e98c7559b580ee2b2452e85d..37c96d681403b04f26a181850b0de3edf9473398 100644 (file)
@@ -309,10 +309,8 @@ static void x25_remove_neigh(struct x25_neigh *x25_neigh)
 {
        struct x25_neigh *s;
        unsigned long flags;
-       struct sk_buff *skb;
 
-       while ((skb = skb_dequeue(&x25_neigh->queue)) != NULL)
-               kfree_skb(skb);
+       skb_queue_purge(&x25_neigh->queue);
 
        x25_stop_t20timer(x25_neigh);
 
index 635c872e557debd288ce62511529af1499bb90fe..a5804fa308666a9062848d7f73f3cab4fa40e133 100644 (file)
@@ -17,6 +17,7 @@
  *     X.25 002        Jonathan Naylor   Centralised disconnection processing.
  *     mar/20/00       Daniela Squassoni Disabling/enabling of facilities 
  *                                       negotiation.
+ *     jun/24/01       Arnaldo C. Melo   use skb_queue_purge, cleanups
  */
 
 #include <linux/errno.h>
  */
 void x25_clear_queues(struct sock *sk)
 {
-       struct sk_buff *skb;
-
-       while ((skb = skb_dequeue(&sk->write_queue)) != NULL)
-               kfree_skb(skb);
-
-       while ((skb = skb_dequeue(&sk->protinfo.x25->ack_queue)) != NULL)
-               kfree_skb(skb);
-
-       while ((skb = skb_dequeue(&sk->protinfo.x25->interrupt_in_queue)) != NULL)
-               kfree_skb(skb);
-
-       while ((skb = skb_dequeue(&sk->protinfo.x25->interrupt_out_queue)) != NULL)
-               kfree_skb(skb);
-
-       while ((skb = skb_dequeue(&sk->protinfo.x25->fragment_queue)) != NULL)
-               kfree_skb(skb);
+       skb_queue_purge(&sk->write_queue);
+       skb_queue_purge(&sk->protinfo.x25->ack_queue);
+       skb_queue_purge(&sk->protinfo.x25->interrupt_in_queue);
+       skb_queue_purge(&sk->protinfo.x25->interrupt_out_queue);
+       skb_queue_purge(&sk->protinfo.x25->fragment_queue);
 }
 
 
@@ -72,20 +62,20 @@ void x25_clear_queues(struct sock *sk)
 void x25_frames_acked(struct sock *sk, unsigned short nr)
 {
        struct sk_buff *skb;
-       int modulus;
-
-       modulus = (sk->protinfo.x25->neighbour->extended) ? X25_EMODULUS : X25_SMODULUS;
+       int modulus = sk->protinfo.x25->neighbour->extended ? X25_EMODULUS :
+                                                             X25_SMODULUS;
 
        /*
         * Remove all the ack-ed frames from the ack queue.
         */
-       if (sk->protinfo.x25->va != nr) {
-               while (skb_peek(&sk->protinfo.x25->ack_queue) != NULL && sk->protinfo.x25->va != nr) {
+       if (sk->protinfo.x25->va != nr)
+               while (skb_peek(&sk->protinfo.x25->ack_queue) != NULL &&
+                      sk->protinfo.x25->va != nr) {
                        skb = skb_dequeue(&sk->protinfo.x25->ack_queue);
                        kfree_skb(skb);
-                       sk->protinfo.x25->va = (sk->protinfo.x25->va + 1) % modulus;
+                       sk->protinfo.x25->va = (sk->protinfo.x25->va + 1) %
+                                               modulus;
                }
-       }
 }
 
 void x25_requeue_frames(struct sock *sk)
@@ -113,18 +103,15 @@ void x25_requeue_frames(struct sock *sk)
 int x25_validate_nr(struct sock *sk, unsigned short nr)
 {
        unsigned short vc = sk->protinfo.x25->va;
-       int modulus;
-
-       modulus = (sk->protinfo.x25->neighbour->extended) ? X25_EMODULUS : X25_SMODULUS;
+       int modulus = sk->protinfo.x25->neighbour->extended ? X25_EMODULUS :
+                                                             X25_SMODULUS;
 
        while (vc != sk->protinfo.x25->vs) {
                if (nr == vc) return 1;
                vc = (vc + 1) % modulus;
        }
 
-       if (nr == sk->protinfo.x25->vs) return 1;
-
-       return 0;
+       return nr == sk->protinfo.x25->vs ? 1 : 0;
 }
 
 /* 
@@ -150,7 +137,8 @@ void x25_write_internal(struct sock *sk, int frametype)
         */
        switch (frametype) {
                case X25_CALL_REQUEST:
-                       len += 1 + X25_ADDR_LEN + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
+                       len += 1 + X25_ADDR_LEN + X25_MAX_FAC_LEN +
+                              X25_MAX_CUD_LEN;
                        break;
                case X25_CALL_ACCEPTED:
                        len += 1 + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
@@ -167,7 +155,8 @@ void x25_write_internal(struct sock *sk, int frametype)
                case X25_RESET_CONFIRMATION:
                        break;
                default:
-                       printk(KERN_ERR "X.25: invalid frame type %02X\n", frametype);
+                       printk(KERN_ERR "X.25: invalid frame type %02X\n",
+                              frametype);
                        return;
        }
 
@@ -262,11 +251,10 @@ void x25_write_internal(struct sock *sk, int frametype)
 /*
  *     Unpick the contents of the passed X.25 Packet Layer frame.
  */
-int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m)
+int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
+              int *d, int *m)
 {
-       unsigned char *frame;
-
-       frame = skb->data;
+       unsigned char *frame = skb->data;
 
        *ns = *nr = *q = *d = *m = 0;
 
@@ -323,12 +311,14 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q, i
                }
        }
 
-       printk(KERN_DEBUG "X.25: invalid PLP frame %02X %02X %02X\n", frame[0], frame[1], frame[2]);
+       printk(KERN_DEBUG "X.25: invalid PLP frame %02X %02X %02X\n",
+              frame[0], frame[1], frame[2]);
 
        return X25_ILLEGAL;
 }
 
-void x25_disconnect(struct sock *sk, int reason, unsigned char cause, unsigned char diagnostic)
+void x25_disconnect(struct sock *sk, int reason, unsigned char cause,
+                   unsigned char diagnostic)
 {
        x25_clear_queues(sk);
        x25_stop_timer(sk);