if (!(pmregmisc & ACPI_PIIX4_PMIOSE))
return -ENODEV;
- pci_read_config_dword(dev, 0x40, &base);
- if (!(base & PCI_BASE_ADDRESS_SPACE_IO))
- return -ENODEV;
-
- base &= PCI_BASE_ADDRESS_IO_MASK;
+ base = dev->resource[PCI_BRIDGE_RESOURCES].start & PCI_BASE_ADDRESS_IO_MASK;
if (!base)
return -ENODEV;
if (!(tmp & 0x80))
return -ENODEV;
- pci_read_config_byte(dev, PCI_CLASS_REVISION, &tmp);
- tmp = (tmp & 0x10 ? 0x48 : 0x20);
-
- pci_read_config_dword(dev, tmp, &base);
- if (!(base & PCI_BASE_ADDRESS_SPACE_IO))
- return -ENODEV;
-
+ base = pci_resource_start(dev, PCI_BRIDGE_RESOURCES);
+ if (!base) {
+ base = pci_resource_start(dev, PCI_BASE_ADDRESS_4);
+ if (!base)
+ return -ENODEV;
+ }
base &= PCI_BASE_ADDRESS_IO_MASK;
- if (!base)
- return -ENODEV;
pci_read_config_byte(dev, 0x42, &irq);
dev->irq = irq;
}
}
- pirq_table = NULL; /* Avoid automatic IRQ assignment */
}
#endif
/*
static const char *version = "pcnet32.c:v1.25kf 26.9.1999 tsbogend@alpha.franken.de\n";
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1);
}
+/*
+ * VIA ACPI: One IO region pointed to by longword at
+ * 0x48 or 0x20 (256 bytes of ACPI registers)
+ */
+static void __init quirk_via_acpi(struct pci_dev *dev)
+{
+ u8 rev;
+ u32 region;
+
+ pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
+ if (rev & 0x10) {
+ pci_read_config_dword(dev, 0x48, ®ion);
+ region &= PCI_BASE_ADDRESS_IO_MASK;
+ quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES);
+ }
+}
+
/*
* The main table of quirks.
*/
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma },
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci },
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci },
+ { PCI_FIXUP_FINAL, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi },
+ { PCI_FIXUP_FINAL, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi },
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4acpi },
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101 },
{ 0 }
* Copyright (C) 1991, 1992 Linus Torvalds
*
* super.c contains code to handle: - mount structures
- * - super-block tables.
+ * - super-block tables
+ * - filesystem drivers list
* - mount system call
* - umount system call
+ * - ustat system call
*
* Added options to /proc/mounts
* Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
struct vfsmount *mnt;
char *name;
- mnt = (struct vfsmount *)kmalloc(sizeof(struct vfsmount), GFP_KERNEL);
+ mnt = kmalloc(sizeof(struct vfsmount), GFP_KERNEL);
if (!mnt)
goto out;
memset(mnt, 0, sizeof(struct vfsmount));
/* N.B. Is it really OK to have a vfsmount without names? */
if (dev_name) {
- name = (char *) kmalloc(strlen(dev_name)+1, GFP_KERNEL);
+ name = kmalloc(strlen(dev_name)+1, GFP_KERNEL);
if (name) {
strcpy(name, dev_name);
mnt->mnt_devname = name;
}
}
- if (dir_name) {
- name = (char *) kmalloc(strlen(dir_name)+1, GFP_KERNEL);
- if (name) {
- strcpy(name, dir_name);
- mnt->mnt_dirname = name;
- }
+ name = kmalloc(strlen(dir_name)+1, GFP_KERNEL);
+ if (name) {
+ strcpy(name, dir_name);
+ mnt->mnt_dirname = name;
}
list_add(&mnt->mnt_instances, &sb->s_mounts);
char *new_devname = NULL, *new_dirname = NULL;
if (dev_name) {
- new_devname = (char *) kmalloc(strlen(dev_name)+1, GFP_KERNEL);
+ new_devname = kmalloc(strlen(dev_name)+1, GFP_KERNEL);
if (new_devname)
strcpy(new_devname, dev_name);
}
if (dir_name) {
- new_dirname = (char *) kmalloc(strlen(dir_name)+1, GFP_KERNEL);
+ new_dirname = kmalloc(strlen(dir_name)+1, GFP_KERNEL);
if (new_dirname)
strcpy(new_dirname, dir_name);
}
* initialize their spinlocks properly, tsk tsk.
* Remember to turn this off in 2.4. -ben
*/
-#define SPINLOCK_DEBUG 1
+#define SPINLOCK_DEBUG 0
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
extern void __add_page_to_hash_queue(struct page * page, struct page **p);
extern void add_to_page_cache(struct page * page, struct address_space *mapping, unsigned long index);
+extern void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index);
extern inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long index)
{
reading a not yet uptodate block from disk.
NOTE: we just accounted the swap space reference for this
swap cache page at __get_swap_page() time. */
+ lock_page(page_map);
add_to_swap_cache(*outpage = page_map, swap_entry);
return OKAY;
}
#ifdef CONFIG_BSD_PROCESS_ACCT
acct_process(code);
#endif
+ task_lock(tsk);
sem_exit();
__exit_mm(tsk);
__exit_files(tsk);
__exit_fs(tsk);
__exit_sighand(tsk);
- task_lock(tsk);
exit_thread();
tsk->state = TASK_ZOMBIE;
tsk->exit_code = code;
struct page **page_hash_table;
struct list_head lru_cache;
-spinlock_t pagecache_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t pagecache_lock = SPIN_LOCK_UNLOCKED;
/*
* NOTE: to avoid deadlocking you must never acquire the pagecache_lock with
* the pagemap_lru_lock held.
return retval;
}
+/*
+ * Add a page to the inode page cache.
+ *
+ * The caller must have locked the page and
+ * set all the page flags correctly..
+ */
+void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index)
+{
+ if (!PageLocked(page))
+ BUG();
+
+ get_page(page);
+ spin_lock(&pagecache_lock);
+ page->index = index;
+ add_page_to_inode_queue(mapping, page);
+ __add_page_to_hash_queue(page, page_hash(mapping, index));
+ lru_cache_add(page);
+ spin_unlock(&pagecache_lock);
+}
+
/*
* This adds a page to the page cache, starting out as locked,
* owned by us, referenced, but not uptodate and with no errors.
struct page * page,
int wait)
{
- int result;
- struct dentry * dentry;
- struct inode * inode;
-
- dentry = file->f_dentry;
- inode = dentry->d_inode;
+ struct dentry * dentry = file->f_dentry;
+ struct inode * inode = dentry->d_inode;
/*
* If a task terminates while we're swapping the page, the vma and
* vma/file is guaranteed to exist in the unmap/sync cases because
* mmap_sem is held.
*/
- lock_page(page);
- result = inode->i_mapping->a_ops->writepage(file, dentry, page);
- UnlockPage(page);
- return result;
+ return inode->i_mapping->a_ops->writepage(file, dentry, page);
}
printk("weirdness: pgoff=%lu index=%lu address=%lu vm_start=%lu vm_pgoff=%lu\n",
pgoff, page->index, address, vma->vm_start, vma->vm_pgoff);
}
+ lock_page(page);
error = filemap_write_page(vma->vm_file, pgoff, page, 1);
+ UnlockPage(page);
page_cache_free(page);
return error;
}
unsigned long highmem_mapnr;
+/*
+ * Take one locked page, return another low-memory locked page.
+ */
struct page * prepare_highmem_swapout(struct page * page)
{
+ struct page *new_page;
unsigned long regular_page;
unsigned long vaddr;
/*
if (!PageHighMem(page))
return page;
+ /*
+ * Here we break the page lock, and we split the
+ * dirty page into two. We can unlock the old page,
+ * and we'll now have two of them. Too bad, it would
+ * have been nice to continue to potentially share
+ * across a fork().
+ */
+ UnlockPage(page);
regular_page = __get_free_page(GFP_ATOMIC);
if (!regular_page)
return NULL;
* we stored its data into the new regular_page.
*/
__free_page(page);
-
- return mem_map + MAP_NR(regular_page);
+ new_page = mem_map + MAP_NR(regular_page);
+ LockPage(new_page);
+ return new_page;
}
struct page * replace_with_highmem(struct page * page)
void add_to_swap_cache(struct page *page, swp_entry_t entry)
{
+ unsigned long flags;
+
#ifdef SWAP_CACHE_INFO
swap_cache_add_total++;
#endif
+ if (!PageLocked(page))
+ BUG();
if (PageTestandSetSwapCache(page))
BUG();
if (page->mapping)
BUG();
- add_to_page_cache(page, &swapper_space, entry.val);
+ flags = page->flags & ~((1 << PG_error) | (1 << PG_dirty));
+ page->flags = flags | (1 << PG_referenced) | (1 << PG_uptodate);
+ add_to_page_cache_locked(page, &swapper_space, entry.val);
}
static inline void remove_from_swap_cache(struct page *page)
/*
* Add it to the swap cache and read its contents.
*/
+ lock_page(new_page);
add_to_swap_cache(new_page, entry);
rw_swap_page(READ, new_page, wait);
return new_page;
goto out_failed;
}
-#error Do not let this one slip through..
- if (PageLocked(page))
+ if (TryLockPage(page))
goto out_failed;
/*
swap_duplicate(entry);
set_pte(page_table, swp_entry_to_pte(entry));
drop_pte:
+ UnlockPage(page);
vma->vm_mm->rss--;
flush_tlb_page(vma, address);
__free_page(page);
* locks etc.
*/
if (!(gfp_mask & __GFP_IO))
- goto out_failed;
+ goto out_unlock;
/*
* Ok, it's really dirty. That means that
flush_tlb_page(vma, address);
vmlist_access_unlock(vma->vm_mm);
error = swapout(page, file);
+ UnlockPage(page);
if (file) fput(file);
if (!error)
goto out_free_success;
*/
entry = get_swap_page();
if (!entry.val)
- goto out_failed; /* No swap space left */
-
+ goto out_unlock; /* No swap space left */
+
if (!(page = prepare_highmem_swapout(page)))
goto out_swap_free;
swap_duplicate(entry); /* One for the process, one for the swap cache */
- /* This will also lock the page */
+ /* Add it to the swap cache */
add_to_swap_cache(page, entry);
+
/* Put the swap entry into the pte after the page is in swapcache */
vma->vm_mm->rss--;
set_pte(page_table, swp_entry_to_pte(entry));
swap_free(entry);
out_failed:
return 0;
-
+out_unlock:
+ UnlockPage(page);
+ return 0;
}
/*
pgdat = pgdat_list;
while (pgdat) {
for (i = 0; i < MAX_NR_ZONES; i++) {
- int count = SWAP_CLUSTER_MAX;
- zone = pgdat->node_zones + i;
- do {
- if (tsk->need_resched)
- schedule();
+ int count = SWAP_CLUSTER_MAX;
+ zone = pgdat->node_zones + i;
if ((!zone->size) || (!zone->zone_wake_kswapd))
continue;
- do_try_to_free_pages(GFP_KSWAPD, zone);
- } while (zone->free_pages < zone->pages_low &&
+ do {
+ if (tsk->need_resched)
+ schedule();
+ do_try_to_free_pages(GFP_KSWAPD, zone);
+ } while (zone->free_pages < zone->pages_low &&
--count);
}
pgdat = pgdat->node_next;