]> git.neil.brown.name Git - history.git/commitdiff
Import 2.3.99pre7-4 2.3.99pre7-4
authorLinus Torvalds <torvalds@linuxfoundation.org>
Fri, 23 Nov 2007 20:34:26 +0000 (15:34 -0500)
committerLinus Torvalds <torvalds@linuxfoundation.org>
Fri, 23 Nov 2007 20:34:26 +0000 (15:34 -0500)
13 files changed:
arch/i386/kernel/acpi.c
arch/i386/kernel/pci-irq.c
drivers/net/pcnet32.c
drivers/pci/quirks.c
fs/super.c
include/asm-i386/spinlock.h
include/linux/pagemap.h
ipc/shm.c
kernel/exit.c
mm/filemap.c
mm/highmem.c
mm/swap_state.c
mm/vmscan.c

index c505bd6c23e24f9cae4f833427009205ddd3e090..d9dabac2f417f3f3c89d33817476ee276b410db5 100644 (file)
@@ -700,11 +700,7 @@ static int __init acpi_init_piix4(struct pci_dev *dev)
        if (!(pmregmisc & ACPI_PIIX4_PMIOSE))
                return -ENODEV;
        
-       pci_read_config_dword(dev, 0x40, &base);
-       if (!(base & PCI_BASE_ADDRESS_SPACE_IO))
-               return -ENODEV;
-       
-       base &= PCI_BASE_ADDRESS_IO_MASK;
+       base = dev->resource[PCI_BRIDGE_RESOURCES].start & PCI_BASE_ADDRESS_IO_MASK;
        if (!base)
                return -ENODEV;
 
@@ -757,16 +753,13 @@ static int __init acpi_init_via(struct pci_dev *dev)
        if (!(tmp & 0x80))
                return -ENODEV;
 
-       pci_read_config_byte(dev, PCI_CLASS_REVISION, &tmp);
-       tmp = (tmp & 0x10 ? 0x48 : 0x20);
-
-       pci_read_config_dword(dev, tmp, &base);
-       if (!(base & PCI_BASE_ADDRESS_SPACE_IO))
-               return -ENODEV;
-
+       base = pci_resource_start(dev, PCI_BRIDGE_RESOURCES);
+       if (!base) {
+               base = pci_resource_start(dev, PCI_BASE_ADDRESS_4);
+               if (!base)
+                       return -ENODEV;
+       }
        base &= PCI_BASE_ADDRESS_IO_MASK;
-       if (!base)
-               return -ENODEV;
 
        pci_read_config_byte(dev, 0x42, &irq);
 
index fa3f70e4159cecf769435f4dd349044407c273dc..d678801b69e9b3b8544c73b813ab59c31ac82f05 100644 (file)
@@ -433,7 +433,6 @@ void __init pcibios_fixup_irqs(void)
                                        dev->irq = irq;
                                }
                        }
-                       pirq_table = NULL;      /* Avoid automatic IRQ assignment */
                }
 #endif
                /*
index 662135b6165e3de939392bf7076848dcfa56b10a..7b1f7644808afc03f0348b5887bec5b841df8c3b 100644 (file)
@@ -15,7 +15,6 @@
 
 static const char *version = "pcnet32.c:v1.25kf 26.9.1999 tsbogend@alpha.franken.de\n";
 
-#include <linux/config.h>
 #include <linux/module.h>
 
 #include <linux/kernel.h>
index 8029b19824f80349c32d8cf26d307d2c2af1f02c..3e14858376990522a5ee2b39aeff8f563d04c905 100644 (file)
@@ -163,6 +163,23 @@ static void __init quirk_piix4acpi(struct pci_dev *dev)
        quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1);
 }
 
+/*
+ * VIA ACPI: One IO region pointed to by longword at
+ *     0x48 or 0x20 (256 bytes of ACPI registers)
+ */
+static void __init quirk_via_acpi(struct pci_dev *dev)
+{
+       u8 rev;
+       u32 region;
+
+       pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
+       if (rev & 0x10) {
+               pci_read_config_dword(dev, 0x48, &region);
+               region &= PCI_BASE_ADDRESS_IO_MASK;
+               quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES);
+       }
+}
+
 /*
  *  The main table of quirks.
  */
@@ -192,6 +209,8 @@ static struct pci_fixup pci_fixups[] __initdata = {
        { PCI_FIXUP_FINAL,      PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82443BX_2,  quirk_natoma },
        { PCI_FIXUP_FINAL,      PCI_VENDOR_ID_SI,       PCI_DEVICE_ID_SI_5597,          quirk_nopcipci },
        { PCI_FIXUP_FINAL,      PCI_VENDOR_ID_SI,       PCI_DEVICE_ID_SI_496,           quirk_nopcipci },
+       { PCI_FIXUP_FINAL,      PCI_VENDOR_ID_VIA,      PCI_DEVICE_ID_VIA_82C586_3,     quirk_via_acpi },
+       { PCI_FIXUP_FINAL,      PCI_VENDOR_ID_VIA,      PCI_DEVICE_ID_VIA_82C686_4,     quirk_via_acpi },
        { PCI_FIXUP_FINAL,      PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82371AB_3,  quirk_piix4acpi },
        { PCI_FIXUP_FINAL,      PCI_VENDOR_ID_AL,       PCI_DEVICE_ID_AL_M7101,         quirk_ali7101 },
        { 0 }
index a6670c47470bd5fcf08aeefaf7b67277c6fc6b44..939ca9f368886e4a6d015bef2a47a8105aa8e9f7 100644 (file)
@@ -4,9 +4,11 @@
  *  Copyright (C) 1991, 1992  Linus Torvalds
  *
  *  super.c contains code to handle: - mount structures
- *                                   - super-block tables.
+ *                                   - super-block tables
+ *                                   - filesystem drivers list
  *                                   - mount system call
  *                                   - umount system call
+ *                                   - ustat system call
  *
  *  Added options to /proc/mounts
  *  Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
@@ -288,7 +290,7 @@ static struct vfsmount *add_vfsmnt(struct super_block *sb,
        struct vfsmount *mnt;
        char *name;
 
-       mnt = (struct vfsmount *)kmalloc(sizeof(struct vfsmount), GFP_KERNEL);
+       mnt = kmalloc(sizeof(struct vfsmount), GFP_KERNEL);
        if (!mnt)
                goto out;
        memset(mnt, 0, sizeof(struct vfsmount));
@@ -302,18 +304,16 @@ static struct vfsmount *add_vfsmnt(struct super_block *sb,
 
        /* N.B. Is it really OK to have a vfsmount without names? */
        if (dev_name) {
-               name = (char *) kmalloc(strlen(dev_name)+1, GFP_KERNEL);
+               name = kmalloc(strlen(dev_name)+1, GFP_KERNEL);
                if (name) {
                        strcpy(name, dev_name);
                        mnt->mnt_devname = name;
                }
        }
-       if (dir_name) {
-               name = (char *) kmalloc(strlen(dir_name)+1, GFP_KERNEL);
-               if (name) {
-                       strcpy(name, dir_name);
-                       mnt->mnt_dirname = name;
-               }
+       name = kmalloc(strlen(dir_name)+1, GFP_KERNEL);
+       if (name) {
+               strcpy(name, dir_name);
+               mnt->mnt_dirname = name;
        }
 
        list_add(&mnt->mnt_instances, &sb->s_mounts);
@@ -336,12 +336,12 @@ static void move_vfsmnt(struct vfsmount *mnt,
        char *new_devname = NULL, *new_dirname = NULL;
 
        if (dev_name) {
-               new_devname = (char *) kmalloc(strlen(dev_name)+1, GFP_KERNEL);
+               new_devname = kmalloc(strlen(dev_name)+1, GFP_KERNEL);
                if (new_devname)
                        strcpy(new_devname, dev_name);
        }
        if (dir_name) {
-               new_dirname = (char *) kmalloc(strlen(dir_name)+1, GFP_KERNEL);
+               new_dirname = kmalloc(strlen(dir_name)+1, GFP_KERNEL);
                if (new_dirname)
                        strcpy(new_dirname, dir_name);
        }
index 234e7927506efb7f57d621b9a0691cb6889a2218..ff0524fceae8211bdacce0c30f012b6f9af91fe5 100644 (file)
@@ -12,7 +12,7 @@ extern int printk(const char * fmt, ...)
  * initialize their spinlocks properly, tsk tsk.
  * Remember to turn this off in 2.4. -ben
  */
-#define SPINLOCK_DEBUG 1
+#define SPINLOCK_DEBUG 0
 
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
index 813dd78ea25b0734d3b091a41fe07dac113f65f2..a88cbc9b89129a8ca183cc09eab35ca4ef6d53f0 100644 (file)
@@ -80,6 +80,7 @@ extern void lock_page(struct page *page);
 extern void __add_page_to_hash_queue(struct page * page, struct page **p);
 
 extern void add_to_page_cache(struct page * page, struct address_space *mapping, unsigned long index);
+extern void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index);
 
 extern inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long index)
 {
index 17ebedb828276f1bdf758bee6ec7af391d4f8633..9a31dfa148c6f44ed9d468f246e5bdeefcc119dd 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1455,6 +1455,7 @@ static int shm_swap_core(struct shmid_kernel *shp, unsigned long idx, swp_entry_
           reading a not yet uptodate block from disk.
           NOTE: we just accounted the swap space reference for this
           swap cache page at __get_swap_page() time. */
+       lock_page(page_map);
        add_to_swap_cache(*outpage = page_map, swap_entry);
        return OKAY;
 }
index d72741a7655e17f56741305a80f1812f234712a2..c7fd6814d55e70e823cfb694a936a1b94c647f91 100644 (file)
@@ -429,12 +429,12 @@ fake_volatile:
 #ifdef CONFIG_BSD_PROCESS_ACCT
        acct_process(code);
 #endif
+       task_lock(tsk);
        sem_exit();
        __exit_mm(tsk);
        __exit_files(tsk);
        __exit_fs(tsk);
        __exit_sighand(tsk);
-       task_lock(tsk);
        exit_thread();
        tsk->state = TASK_ZOMBIE;
        tsk->exit_code = code;
index 5d507d6654b0c9cd4c814fe17b91666ae8c86286..1d353bf60348a7a4cf098434b6ff67f3c6a41168 100644 (file)
@@ -46,7 +46,7 @@ unsigned int page_hash_bits;
 struct page **page_hash_table;
 struct list_head lru_cache;
 
-spinlock_t pagecache_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t pagecache_lock = SPIN_LOCK_UNLOCKED;
 /*
  * NOTE: to avoid deadlocking you must never acquire the pagecache_lock with
  *       the pagemap_lru_lock held.
@@ -485,6 +485,26 @@ int generic_buffer_fdatasync(struct inode *inode, unsigned long start_idx, unsig
        return retval;
 }
 
+/*
+ * Add a page to the inode page cache.
+ *
+ * The caller must have locked the page and 
+ * set all the page flags correctly..
+ */
+void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index)
+{
+       if (!PageLocked(page))
+               BUG();
+
+       get_page(page);
+       spin_lock(&pagecache_lock);
+       page->index = index;
+       add_page_to_inode_queue(mapping, page);
+       __add_page_to_hash_queue(page, page_hash(mapping, index));
+       lru_cache_add(page);
+       spin_unlock(&pagecache_lock);
+}
+
 /*
  * This adds a page to the page cache, starting out as locked,
  * owned by us, referenced, but not uptodate and with no errors.
@@ -1514,12 +1534,8 @@ static int filemap_write_page(struct file *file,
                              struct page * page,
                              int wait)
 {
-       int result;
-       struct dentry * dentry;
-       struct inode * inode;
-
-       dentry = file->f_dentry;
-       inode = dentry->d_inode;
+       struct dentry * dentry = file->f_dentry;
+       struct inode * inode = dentry->d_inode;
 
        /*
         * If a task terminates while we're swapping the page, the vma and
@@ -1527,10 +1543,7 @@ static int filemap_write_page(struct file *file,
         * vma/file is guaranteed to exist in the unmap/sync cases because
         * mmap_sem is held.
         */
-       lock_page(page);
-       result = inode->i_mapping->a_ops->writepage(file, dentry, page);
-       UnlockPage(page);
-       return result;
+       return inode->i_mapping->a_ops->writepage(file, dentry, page);
 }
 
 
@@ -1588,7 +1601,9 @@ static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
                printk("weirdness: pgoff=%lu index=%lu address=%lu vm_start=%lu vm_pgoff=%lu\n",
                        pgoff, page->index, address, vma->vm_start, vma->vm_pgoff);
        }
+       lock_page(page);
        error = filemap_write_page(vma->vm_file, pgoff, page, 1);
+       UnlockPage(page);
        page_cache_free(page);
        return error;
 }
index 691e3df1f7f3bbd41221a6bb0361bb21147aac89..3e028dcedc39af98f2b23c650382c24f7c82d8f6 100644 (file)
 
 unsigned long highmem_mapnr;
 
+/*
+ * Take one locked page, return another low-memory locked page.
+ */
 struct page * prepare_highmem_swapout(struct page * page)
 {
+       struct page *new_page;
        unsigned long regular_page;
        unsigned long vaddr;
        /*
@@ -36,6 +40,14 @@ struct page * prepare_highmem_swapout(struct page * page)
        if (!PageHighMem(page))
                return page;
 
+       /*
+        * Here we break the page lock, and we split the
+        * dirty page into two. We can unlock the old page,
+        * and we'll now have two of them. Too bad, it would
+        * have been nice to continue to potentially share
+        * across a fork().
+        */
+       UnlockPage(page);
        regular_page = __get_free_page(GFP_ATOMIC);
        if (!regular_page)
                return NULL;
@@ -49,8 +61,9 @@ struct page * prepare_highmem_swapout(struct page * page)
         * we stored its data into the new regular_page.
         */
        __free_page(page);
-
-       return mem_map + MAP_NR(regular_page);
+       new_page = mem_map + MAP_NR(regular_page);
+       LockPage(new_page);
+       return new_page;
 }
 
 struct page * replace_with_highmem(struct page * page)
index 26dcf6feaaa0cf9c215f4a73ca330817efa1e5a8..ad686e4c386a1340c47a20d676303d361ca46e9a 100644 (file)
@@ -47,14 +47,20 @@ void show_swap_cache_info(void)
 
 void add_to_swap_cache(struct page *page, swp_entry_t entry)
 {
+       unsigned long flags;
+
 #ifdef SWAP_CACHE_INFO
        swap_cache_add_total++;
 #endif
+       if (!PageLocked(page))
+               BUG();
        if (PageTestandSetSwapCache(page))
                BUG();
        if (page->mapping)
                BUG();
-       add_to_page_cache(page, &swapper_space, entry.val);
+       flags = page->flags & ~((1 << PG_error) | (1 << PG_dirty));
+       page->flags = flags | (1 << PG_referenced) | (1 << PG_uptodate);
+       add_to_page_cache_locked(page, &swapper_space, entry.val);
 }
 
 static inline void remove_from_swap_cache(struct page *page)
@@ -225,6 +231,7 @@ struct page * read_swap_cache_async(swp_entry_t entry, int wait)
        /* 
         * Add it to the swap cache and read its contents.
         */
+       lock_page(new_page);
        add_to_swap_cache(new_page, entry);
        rw_swap_page(READ, new_page, wait);
        return new_page;
index 911b464ae234f637f657377b545ef1b7a5249cc9..0c2a983ae17659fcb84b39849d569048a4a3f7a2 100644 (file)
@@ -60,8 +60,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
                goto out_failed;
        }
 
-#error Do not let this one slip through..
-       if (PageLocked(page))
+       if (TryLockPage(page))
                goto out_failed;
 
        /*
@@ -77,6 +76,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
                swap_duplicate(entry);
                set_pte(page_table, swp_entry_to_pte(entry));
 drop_pte:
+               UnlockPage(page);
                vma->vm_mm->rss--;
                flush_tlb_page(vma, address);
                __free_page(page);
@@ -108,7 +108,7 @@ drop_pte:
         * locks etc.
         */
        if (!(gfp_mask & __GFP_IO))
-               goto out_failed;
+               goto out_unlock;
 
        /*
         * Ok, it's really dirty. That means that
@@ -139,6 +139,7 @@ drop_pte:
                flush_tlb_page(vma, address);
                vmlist_access_unlock(vma->vm_mm);
                error = swapout(page, file);
+               UnlockPage(page);
                if (file) fput(file);
                if (!error)
                        goto out_free_success;
@@ -154,15 +155,16 @@ drop_pte:
         */
        entry = get_swap_page();
        if (!entry.val)
-               goto out_failed; /* No swap space left */
-               
+               goto out_unlock; /* No swap space left */
+
        if (!(page = prepare_highmem_swapout(page)))
                goto out_swap_free;
 
        swap_duplicate(entry);  /* One for the process, one for the swap cache */
 
-       /* This will also lock the page */
+       /* Add it to the swap cache */
        add_to_swap_cache(page, entry);
+
        /* Put the swap entry into the pte after the page is in swapcache */
        vma->vm_mm->rss--;
        set_pte(page_table, swp_entry_to_pte(entry));
@@ -179,7 +181,9 @@ out_swap_free:
        swap_free(entry);
 out_failed:
        return 0;
-
+out_unlock:
+       UnlockPage(page);
+       return 0;
 }
 
 /*
@@ -528,15 +532,15 @@ int kswapd(void *unused)
                pgdat = pgdat_list;
                while (pgdat) {
                        for (i = 0; i < MAX_NR_ZONES; i++) {
-                           int count = SWAP_CLUSTER_MAX;
-                           zone = pgdat->node_zones + i;
-                           do {
-                               if (tsk->need_resched)
-                                       schedule();
+                               int count = SWAP_CLUSTER_MAX;
+                               zone = pgdat->node_zones + i;
                                if ((!zone->size) || (!zone->zone_wake_kswapd))
                                        continue;
-                               do_try_to_free_pages(GFP_KSWAPD, zone);
-                          } while (zone->free_pages < zone->pages_low &&
+                               do {
+                                       if (tsk->need_resched)
+                                               schedule();
+                                       do_try_to_free_pages(GFP_KSWAPD, zone);
+                               } while (zone->free_pages < zone->pages_low &&
                                           --count);
                        }
                        pgdat = pgdat->node_next;