VERSION = 2
PATCHLEVEL = 1
-SUBLEVEL = 109
+SUBLEVEL = 110
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
return 0;
}
+__initfunc(static int assign_irq_vector(int irq))
+{
+ static int current_vector = IRQ0_TRAP_VECTOR, offset = 0;
+ if (IO_APIC_VECTOR(irq) > 0)
+ return IO_APIC_VECTOR(irq);
+ current_vector += 8;
+ if (current_vector > 0xFE) {
+ offset++;
+ current_vector = IRQ0_TRAP_VECTOR + offset;
+ printk("WARNING: ASSIGN_IRQ_VECTOR wrapped back to %02X\n",
+ current_vector);
+ }
+ IO_APIC_VECTOR(irq) = current_vector;
+ return current_vector;
+}
+
__initfunc(void setup_IO_APIC_irqs (void))
{
struct IO_APIC_route_entry entry;
if (!IO_APIC_IRQ(irq))
continue;
- entry.vector = IO_APIC_VECTOR(irq);
+ entry.vector = assign_irq_vector(irq);
bus = mp_irqs[idx].mpc_srcbus;
entry.mask = 0; /* unmask IRQ now */
entry.dest.logical.logical_dest = 0xff; /* all CPUs */
- entry.vector = IO_APIC_VECTOR(irq);
+ entry.vector = assign_irq_vector(irq);
entry.polarity=0;
entry.trigger=0;
entry.mask = 0; /* unmask IRQ now */
entry.dest.logical.logical_dest = 0x01; /* all CPUs */
- entry.vector = IO_APIC_VECTOR(pin); /* it's ignored */
+ entry.vector = 0; /* it's ignored */
entry.polarity=0;
entry.trigger=0;
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
for (i = 0; i < NR_IRQS ; i++) {
- if ((IO_APIC_VECTOR(i) <= 0xfe) /* HACK */ &&
- (IO_APIC_IRQ(i))) {
+ if (IO_APIC_IRQ(i)) {
if (IO_APIC_irq_trigger(i))
irq_desc[i].handler = &ioapic_level_irq_type;
else
* mptable:
*/
setup_IO_APIC_irqs ();
+ init_IRQ_SMP();
check_timer();
print_IO_APIC();
*
* (0x0000ffff for NR_IRQS==16, 0x00ffffff for NR_IRQS=24)
*/
-unsigned int cached_irq_mask = (1<<NR_IRQS)-1;
+#if NR_IRQS == 64
+unsigned long long cached_irq_mask = -1;
+#else
+unsigned long long cached_irq_mask = (((unsigned long long) 1)<<NR_IRQS)-1;
+#endif
#define cached_21 ((cached_irq_mask | io_apic_irqs) & 0xff)
#define cached_A1 (((cached_irq_mask | io_apic_irqs) >> 8) & 0xff)
* To get IO-APIC interrupts we turn some of them into IO-APIC
* interrupts during boot.
*/
-unsigned int io_apic_irqs = 0;
+unsigned long long io_apic_irqs = 0;
static void do_8259A_IRQ (unsigned int irq, int cpu, struct pt_regs * regs);
static void enable_8259A_irq (unsigned int irq);
irq_desc_t irq_desc[NR_IRQS] = {
[0 ... 15] = { 0, 0, 0, &i8259A_irq_type, }, /* default to standard ISA IRQs */
- [16 ... 23] = { 0, 0, 0, &no_irq_type, }, /* 'high' PCI IRQs filled in on demand */
+ [16 ... 63] = { 0, 0, 0, &no_irq_type, }, /* 'high' PCI IRQs filled in on demand */
};
+int irq_vector[NR_IRQS] = { IRQ0_TRAP_VECTOR , 0 };
+
/*
* These have to be protected by the irq controller spinlock
*/
BUILD_IRQ(23)
+BUILD_IRQ(24)
+BUILD_IRQ(25) BUILD_IRQ(26) BUILD_IRQ(27) BUILD_IRQ(28) BUILD_IRQ(29)
+BUILD_IRQ(30) BUILD_IRQ(31) BUILD_IRQ(32) BUILD_IRQ(33) BUILD_IRQ(34)
+BUILD_IRQ(35) BUILD_IRQ(36) BUILD_IRQ(37) BUILD_IRQ(38) BUILD_IRQ(39)
+BUILD_IRQ(40) BUILD_IRQ(41) BUILD_IRQ(42) BUILD_IRQ(43) BUILD_IRQ(44)
+BUILD_IRQ(45) BUILD_IRQ(46) BUILD_IRQ(47) BUILD_IRQ(48) BUILD_IRQ(49)
+BUILD_IRQ(50) BUILD_IRQ(51) BUILD_IRQ(52) BUILD_IRQ(53) BUILD_IRQ(54)
+BUILD_IRQ(55) BUILD_IRQ(56) BUILD_IRQ(57) BUILD_IRQ(58) BUILD_IRQ(59)
+BUILD_IRQ(60) BUILD_IRQ(61) BUILD_IRQ(62) BUILD_IRQ(63)
+
/*
* The following vectors are part of the Linux architecture, there
* is no hardware IRQ pin equivalent for them, they are triggered
IRQ12_interrupt, IRQ13_interrupt, IRQ14_interrupt, IRQ15_interrupt
#ifdef __SMP__
,IRQ16_interrupt, IRQ17_interrupt, IRQ18_interrupt, IRQ19_interrupt,
- IRQ20_interrupt, IRQ21_interrupt, IRQ22_interrupt, IRQ23_interrupt
+ IRQ20_interrupt, IRQ21_interrupt, IRQ22_interrupt, IRQ23_interrupt,
+ IRQ24_interrupt, IRQ25_interrupt, IRQ26_interrupt, IRQ27_interrupt,
+ IRQ28_interrupt, IRQ29_interrupt,
+ IRQ30_interrupt, IRQ31_interrupt, IRQ32_interrupt, IRQ33_interrupt,
+ IRQ34_interrupt, IRQ35_interrupt, IRQ36_interrupt, IRQ37_interrupt,
+ IRQ38_interrupt, IRQ39_interrupt,
+ IRQ40_interrupt, IRQ41_interrupt, IRQ42_interrupt, IRQ43_interrupt,
+ IRQ44_interrupt, IRQ45_interrupt, IRQ46_interrupt, IRQ47_interrupt,
+ IRQ48_interrupt, IRQ49_interrupt,
+ IRQ50_interrupt, IRQ51_interrupt, IRQ52_interrupt, IRQ53_interrupt,
+ IRQ54_interrupt, IRQ55_interrupt, IRQ56_interrupt, IRQ57_interrupt,
+ IRQ58_interrupt, IRQ59_interrupt,
+ IRQ60_interrupt, IRQ61_interrupt, IRQ62_interrupt, IRQ63_interrupt
#endif
};
if (!shared) {
#ifdef __SMP__
if (IO_APIC_IRQ(irq)) {
- if (IO_APIC_VECTOR(irq) > 0xfe)
- /*
- * break visibly for now, FIXME
- */
- panic("ayiee, tell mingo");
-
/*
* First disable it in the 8259A:
*/
#ifdef __SMP__
- for (i = 0; i < NR_IRQS ; i++)
- if (IO_APIC_VECTOR(i) <= 0xfe) /* hack -- mingo */
- set_intr_gate(IO_APIC_VECTOR(i),interrupt[i]);
+ /*
+ IRQ0 must be given a fixed assignment and initialized
+ before init_IRQ_SMP.
+ */
+ set_intr_gate(IRQ0_TRAP_VECTOR, interrupt[0]);
/*
* The reschedule interrupt slowly changes it's functionality,
setup_x86_irq(13, &irq13);
}
+#ifdef __SMP__
+
+__initfunc(void init_IRQ_SMP(void))
+{
+ int i;
+ for (i = 0; i < NR_IRQS ; i++)
+ if (IO_APIC_VECTOR(i) > 0)
+ set_intr_gate(IO_APIC_VECTOR(i), interrupt[i]);
+}
+
+#endif
unsigned int unused[3];
} irq_desc_t;
+#define IRQ0_TRAP_VECTOR 0x51
+
extern irq_desc_t irq_desc[NR_IRQS];
+extern int irq_vector[NR_IRQS];
+extern void init_IRQ_SMP(void);
extern int handle_IRQ_event(unsigned int, struct pt_regs *);
/*
void init_pic_mode (void);
void print_IO_APIC (void);
-extern unsigned int io_apic_irqs;
-extern unsigned int cached_irq_mask;
+extern unsigned long long io_apic_irqs;
+extern unsigned long long cached_irq_mask;
-#define IO_APIC_VECTOR(irq) (0x51+((irq)<<3))
+#define IO_APIC_VECTOR(irq) irq_vector[irq]
#define MAX_IRQ_SOURCES 128
#define MAX_MP_BUSSES 32
memory_end = memory_alt_end;
}
#endif
+ if (memory_end > (1024-64)*1024*1024)
+ memory_end = (1024-64)*1024*1024;
memory_end &= PAGE_MASK;
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
NULL, "Pentium II (Deschutes)", NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL }},
{ X86_VENDOR_AMD, 4,
- { NULL, NULL, NULL, "DX/2", NULL, NULL, NULL, "DX/2-WB", "DX/4",
- "DX/4-WB", NULL, NULL, NULL, NULL, "Am5x86-WT", "Am5x86-WB" }},
+ { NULL, NULL, NULL, "486 DX/2", NULL, NULL, NULL, "486 DX/2-WB",
+ "486 DX/4", "486 DX/4-WB", NULL, NULL, NULL, NULL, "Am5x86-WT",
+ "Am5x86-WB" }},
{ X86_VENDOR_AMD, 5,
{ "K5/SSA5 (PR75, PR90, PR100)", "K5 (PR120, PR133)",
"K5 (PR166)", "K5 (PR200)", NULL, NULL,
volatile unsigned long kstack_ptr; /* Stack vector for booting CPUs */
struct cpuinfo_x86 cpu_data[NR_CPUS]; /* Per CPU bogomips and other parameters */
static unsigned int num_processors = 1; /* Internal processor count */
-static unsigned long io_apic_addr = 0xFEC00000; /* Address of the I/O apic (not yet used) */
+unsigned long mp_ioapic_addr = 0xFEC00000; /* Address of the I/O apic (not yet used) */
unsigned char boot_cpu_id = 0; /* Processor that is doing the boot up */
static int smp_activated = 0; /* Tripped once we need to start cross invalidating */
int apic_version[NR_CPUS]; /* APIC version number */
printk("I/O APIC #%d Version %d at 0x%lX.\n",
m->mpc_apicid,m->mpc_apicver,
m->mpc_apicaddr);
- io_apic_addr = m->mpc_apicaddr;
+ mp_ioapic_addr = m->mpc_apicaddr;
}
mpt+=sizeof(*m);
count+=sizeof(*m);
#ifdef __SMP__
{
extern unsigned long mp_lapic_addr;
+ extern unsigned long mp_ioapic_addr;
pte_t pte;
unsigned long apic_area = (unsigned long)APIC_BASE;
*/
apic_area = 0xFEC00000; /*(unsigned long)IO_APIC_BASE;*/
pg_table = pte_offset((pmd_t *)pg_dir, apic_area);
- pte = mk_pte_phys(apic_area, PAGE_KERNEL);
+ pte = mk_pte_phys(mp_ioapic_addr, PAGE_KERNEL);
set_pte(pg_table, pte);
} else {
/*
}
}
if(do_insert)
- ntfs_memcpy(ino->attrs+i+1,ino->attrs+i,(ino->attr_count-i)*
+ ntfs_memmove(ino->attrs+i+1,ino->attrs+i,(ino->attr_count-i)*
sizeof(ntfs_attribute));
ino->attr_count++;
ino->attrs[i].type=type;
}
}
if(attr->d.r.len>cnum)
- ntfs_memcpy(attr->d.r.runlist+cnum+1,attr->d.r.runlist+cnum,
+ ntfs_memmove(attr->d.r.runlist+cnum+1,attr->d.r.runlist+cnum,
(attr->d.r.len-cnum)*sizeof(ntfs_runlist));
attr->d.r.runlist[cnum].cluster=cluster;
attr->d.r.runlist[cnum].len=len;
/* Inform the kernel that a device block is a NTFS cluster */
sb->s_blocksize=vol->clustersize;
- for(i=sb->s_blocksize,sb->s_blocksize_bits=0;i;i>>=1)
+ for(i=sb->s_blocksize,sb->s_blocksize_bits=0;i != 1;i>>=1)
sb->s_blocksize_bits++;
set_blocksize(sb->s_dev,sb->s_blocksize);
ntfs_debug(DEBUG_OTHER, "set_blocksize\n");
}
/*
- * perform the Cyrix 5/2 test (!0 means it's a Cyrix)
+ * Perform the Cyrix 5/2 test. A Cyrix won't change
+ * the flags, while other 486 chips will.
*/
static inline int test_cyrix_52div(void)
{
- int test;
-
- __asm__ __volatile__("xor %%eax,%%eax\n\t"
- "sahf\n\t"
- "movb $5,%%al\n\t"
- "movb $2,%%bl\n\t"
- "div %%bl\n\t"
- "lahf\n\t"
- "andl $0x200,%%eax": "=a" (test) : : "bx", "cc");
-
- return test;
+ unsigned int test;
+
+ __asm__ __volatile__(
+ "sahf\n\t" /* clear flags (%eax = 0x0005) */
+ "div %b2\n\t" /* divide 5 by 2 */
+ "lahf" /* store flags into %ah */
+ : "=a" (test)
+ : "0" (5), "q" (2)
+ : "cc");
+
+ /* AH is 0x02 on Cyrix after the divide.. */
+ return (unsigned char) (test >> 8) == 0x02;
}
/*
__initfunc(static void check_cyrix_cpu(void))
{
- if (boot_cpu_data.cpuid_level == -1 && boot_cpu_data.x86 == 4
+ if ((boot_cpu_data.cpuid_level == -1) && (boot_cpu_data.x86 == 4)
&& test_cyrix_52div()) {
- /* default to an unknown Cx486, (we will diferentiate later) */
+ /* default to an unknown Cx486, (we will differentiate later) */
/* NOTE: using 0xff since 0x00 is a valid DIR0 value */
strcpy(boot_cpu_data.x86_vendor_id, "CyrixInstead");
boot_cpu_data.x86_model = 0xff;
}
/*
- * Fix two problems with the Cyrix 686 and 686L:
+ * Fix two problems with the Cyrix 6x86 and 6x86L:
* -- the cpuid is disabled on power up, enable it, use it.
* -- the SLOP bit needs resetting on some motherboards due to old BIOS,
* so that the udelay loop calibration works well. Recalibrate.
__initfunc(static void check_cx686_cpuid_slop(void))
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX &&
- (boot_cpu_data.x86_model & 0xf0) == 0x30) { /* 686(L) */
+ (boot_cpu_data.x86_model & 0xf0) == 0x30) { /* 6x86(L) */
int dummy;
unsigned char ccr3, ccr5;
* Right now the APIC is only used for SMP, but this
* may change.
*/
-#define NR_IRQS 24
+#define NR_IRQS 64
static __inline__ int irq_cannonicalize(int irq)
{
extern int do_munmap(unsigned long, size_t);
/* filemap.c */
+extern void remove_inode_page(struct page *);
extern unsigned long page_unuse(struct page *);
extern int shrink_mmap(int, int);
extern void truncate_inode_pages(struct inode *, unsigned long);
}
}
+/*
+ * Remove a page from the page cache and free it.
+ */
+void remove_inode_page(struct page *page)
+{
+ remove_page_from_hash_queue(page);
+ remove_page_from_inode_queue(page);
+ __free_page(page);
+}
+
+/*
+ * Check whether we can free this page.
+ */
+static inline int shrink_one_page(struct page *page, int gfp_mask)
+{
+ struct buffer_head *tmp, *bh;
+
+ if (PageLocked(page))
+ goto next;
+ if ((gfp_mask & __GFP_DMA) && !PageDMA(page))
+ goto next;
+ /* First of all, regenerate the page's referenced bit
+ * from any buffers in the page
+ */
+ bh = page->buffers;
+ if (bh) {
+ tmp = bh;
+ do {
+ if (buffer_touched(tmp)) {
+ clear_bit(BH_Touched, &tmp->b_state);
+ set_bit(PG_referenced, &page->flags);
+ }
+ tmp = tmp->b_this_page;
+ } while (tmp != bh);
+
+ /* Refuse to swap out all buffer pages */
+ if ((buffermem >> PAGE_SHIFT) * 100 < (buffer_mem.min_percent * num_physpages))
+ goto next;
+ }
+
+ /* We can't throw away shared pages, but we do mark
+ them as referenced. This relies on the fact that
+ no page is currently in both the page cache and the
+ buffer cache; we'd have to modify the following
+ test to allow for that case. */
+
+ switch (atomic_read(&page->count)) {
+ case 1:
+ /* is it a swap-cache or page-cache page? */
+ if (page->inode) {
+ if (test_and_clear_bit(PG_referenced, &page->flags)) {
+ touch_page(page);
+ break;
+ }
+ age_page(page);
+ if (page->age)
+ break;
+ if (page_cache_size * 100 < (page_cache.min_percent * num_physpages))
+ break;
+ if (PageSwapCache(page)) {
+ delete_from_swap_cache(page);
+ return 1;
+ }
+ remove_inode_page(page);
+ return 1;
+ }
+ /* It's not a cache page, so we don't do aging.
+ * If it has been referenced recently, don't free it */
+ if (test_and_clear_bit(PG_referenced, &page->flags))
+ break;
+
+ /* is it a buffer cache page? */
+ if ((gfp_mask & __GFP_IO) && bh && try_to_free_buffer(bh, &bh, 6))
+ return 1;
+ break;
+
+ default:
+ /* more than one user: we can't throw it away */
+ set_bit(PG_referenced, &page->flags);
+ /* fall through */
+ case 0:
+ /* nothing */
+ }
+next:
+ return 0;
+}
+
int shrink_mmap(int priority, int gfp_mask)
{
static unsigned long clock = 0;
- struct page * page;
unsigned long limit = num_physpages;
- struct buffer_head *tmp, *bh;
+ struct page * page;
int count_max, count_min;
count_max = (limit<<2) >> (priority>>1);
page = mem_map + clock;
do {
+ if (shrink_one_page(page, gfp_mask))
+ return 1;
count_max--;
if (page->inode || page->buffers)
count_min--;
-
- if (PageLocked(page))
- goto next;
- if ((gfp_mask & __GFP_DMA) && !PageDMA(page))
- goto next;
- /* First of all, regenerate the page's referenced bit
- from any buffers in the page */
- bh = page->buffers;
- if (bh) {
- tmp = bh;
- do {
- if (buffer_touched(tmp)) {
- clear_bit(BH_Touched, &tmp->b_state);
- set_bit(PG_referenced, &page->flags);
- }
- tmp = tmp->b_this_page;
- } while (tmp != bh);
-
- /* Refuse to swap out all buffer pages */
- if ((buffermem >> PAGE_SHIFT) * 100 < (buffer_mem.min_percent * num_physpages))
- goto next;
- }
-
- /* We can't throw away shared pages, but we do mark
- them as referenced. This relies on the fact that
- no page is currently in both the page cache and the
- buffer cache; we'd have to modify the following
- test to allow for that case. */
-
- switch (atomic_read(&page->count)) {
- case 1:
- /* is it a swap-cache or page-cache page? */
- if (page->inode) {
- if (test_and_clear_bit(PG_referenced, &page->flags)) {
- touch_page(page);
- break;
- }
- age_page(page);
- if (page->age || page_cache_size * 100 < (page_cache.min_percent * num_physpages))
- break;
- if (PageSwapCache(page)) {
- delete_from_swap_cache(page);
- return 1;
- }
- remove_page_from_hash_queue(page);
- remove_page_from_inode_queue(page);
- __free_page(page);
- return 1;
- }
- /* It's not a cache page, so we don't do aging.
- * If it has been referenced recently, don't free it */
- if (test_and_clear_bit(PG_referenced, &page->flags))
- break;
-
- /* is it a buffer cache page? */
- if ((gfp_mask & __GFP_IO) && bh && try_to_free_buffer(bh, &bh, 6))
- return 1;
- break;
-
- default:
- /* more than one users: we can't throw it away */
- set_bit(PG_referenced, &page->flags);
- /* fall through */
- case 0:
- /* nothing */
- }
-next:
page++;
clock++;
if (clock >= limit) {
return count;
if (PageSwapCache(page))
panic ("Doing a normal page_unuse of a swap cache page");
- remove_page_from_hash_queue(page);
- remove_page_from_inode_queue(page);
- __free_page(page);
+ remove_inode_page(page);
return 1;
}
return;
}
if (p->swap_map && !p->swap_map[offset]) {
- printk("Hmm.. Trying to %s unallocated swap (%08lx)\n",
- (rw == READ) ? "read" : "write",
- entry);
+ printk(KERN_ERR "rw_swap_page: "
+ "Trying to %s unallocated swap (%08lx)\n",
+ (rw == READ) ? "read" : "write", entry);
return;
}
if (!(p->flags & SWP_USED)) {
- printk("Trying to swap to unused swap-device\n");
+ printk(KERN_ERR "rw_swap_page: "
+ "Trying to swap to unused swap-device\n");
return;
}
if (!PageLocked(page)) {
- printk("VM: swap page is unlocked\n");
+ printk(KERN_ERR "VM: swap page is unlocked\n");
return;
}
* hashing for locked pages.
*/
if (!PageSwapCache(page)) {
- printk("VM: swap page is not in swap cache\n");
+ printk(KERN_ERR "VM: swap page is not in swap cache\n");
return;
}
if (page->offset != entry) {
- printk ("swap entry mismatch");
+ printk (KERN_ERR "VM: swap entry mismatch\n");
return;
}
clear_bit(PG_locked, &page->flags);
wake_up(&page->wait);
} else
- printk("rw_swap_page: no swap file or device\n");
+ printk(KERN_ERR "rw_swap_page: no swap file or device\n");
+ /* This shouldn't happen, but check to be sure. */
+ if (atomic_read(&page->count) == 1)
+ printk(KERN_ERR "rw_swap_page: page unused while waiting!\n");
atomic_dec(&page->count);
if (offset && !test_and_clear_bit(offset,p->swap_lockmap))
- printk("rw_swap_page: lock already cleared\n");
+ printk(KERN_ERR "rw_swap_page: lock already cleared\n");
wake_up(&lock_queue);
#ifdef DEBUG_SWAP
printk ("DebugVM: %s_swap_page finished on page %p (count %d)\n",
add_wait_queue(&kswapd_wait, &wait);
while (1) {
int tries;
- int tried = 0;
current->state = TASK_INTERRUPTIBLE;
flush_signals(current);
/* Always wake the user up when an error occurred */
if (sock_wspace(sk) >= space || sk->err)
mask |= POLLOUT | POLLWRNORM;
- if (tp->urg_data)
+ if (tp->urg_data & URG_VALID)
mask |= POLLPRI;
}
return mask;