#define ELF_PLAT_INIT(_r) ia64_elf32_init(_r)
#define setup_arg_pages(bprm) ia32_setup_arg_pages(bprm)
#define elf_map elf32_map
+
+#undef SET_PERSONALITY
#define SET_PERSONALITY(ex, ibcs2) elf32_set_personality()
/* Ugly but avoids duplication */
ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec)
{
unsigned long bundle[2];
- unsigned long opcode, btype, qp, offset;
+ unsigned long opcode, btype, qp, offset, cpl;
unsigned long next_ip;
struct siginfo siginfo;
struct illegal_op_return rv;
* AR[PFS].pec = AR[EC]
* AR[PFS].ppl = PSR.cpl
*/
+ cpl = ia64_psr(regs)->cpl;
regs->ar_pfs = ((regs->cr_ifs & 0x3fffffffff)
- | (ar_ec << 52)
- | ((unsigned long) ia64_psr(regs)->cpl << 62));
+ | (ar_ec << 52) | (cpl << 62));
/*
* CFM.sof -= CFM.sol
#include <asm/kregs.h>
#include <asm/offsets.h>
#include <asm/pgtable.h>
+#include <asm/percpu.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
data8 sys_fremovexattr
data8 sys_tkill
data8 sys_futex // 1230
- data8 ia64_ni_syscall
- data8 ia64_ni_syscall
+ data8 sys_sched_setaffinity
+ data8 sys_sched_getaffinity
data8 ia64_ni_syscall
data8 ia64_ni_syscall
data8 ia64_ni_syscall // 1235
EXPORT_SYMBOL(last_cli_ip);
#endif
-#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
EXPORT_SYMBOL(flush_tlb_range);
verify_guid (efi_guid_t *test, efi_guid_t *target)
{
int rc;
+#ifdef IA64_MCA_DEBUG_INFO
char out[40];
+#endif
if ((rc = efi_guidcmp(*test, *target))) {
IA64_MCA_DEBUG(KERN_DEBUG
/* Not needed, since we enable all devices at startup. */
- printk(KERN_INFO "PCI: Found IRQ %d for device %s\n", dev->irq,
- dev->slot_name);
-
+ printk(KERN_INFO "PCI: Found IRQ %d for device %s\n", dev->irq, dev->slot_name);
return 0;
}
#include <asm/efi.h>
#include <asm/elf.h>
#include <asm/perfmon.h>
-#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sal.h>
#include <asm/uaccess.h>
/*
* We use this if we don't have any better idle routine..
*/
-static void
+void
default_idle (void)
{
/* may want to do PAL_LIGHT_HALT here... */
{
struct task_struct *tsk;
- tsk = __get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER);
+ tsk = (void *) __get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER);
if (!tsk)
return NULL;
/*
* IA-64 semaphore implementation (derived from x86 version).
*
- * Copyright (C) 1999-2000 Hewlett-Packard Co
- * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
*/
#include <linux/sched.h>
+#include <asm/errno.h>
#include <asm/semaphore.h>
/*
extern char _end;
+#ifdef CONFIG_SMP
unsigned long __per_cpu_offset[NR_CPUS];
+#endif
+
struct cpuinfo_ia64 cpu_info __per_cpu_data;
unsigned long ia64_phys_stacked_size_p8;
void
cpu_init (void)
{
- extern char __per_cpu_start[], __phys_per_cpu_start[], __per_cpu_end[];
+ extern char __per_cpu_start[], __phys_per_cpu_start[];
extern void __init ia64_mmu_init (void *);
unsigned long num_phys_stacked;
pal_vm_info_2_u_t vmi;
unsigned int max_ctx;
struct cpuinfo_ia64 *my_cpu_info;
void *my_cpu_data;
+
+#ifdef CONFIG_SMP
+ extern char __per_cpu_end[];
int cpu = smp_processor_id();
my_cpu_data = alloc_bootmem_pages(__per_cpu_end - __per_cpu_start);
memcpy(my_cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
-
__per_cpu_offset[cpu] = (char *) my_cpu_data - __per_cpu_start;
-
+ my_cpu_info = my_cpu_data + ((char *) &cpu_info - __per_cpu_start);
+#else
+ my_cpu_data = __phys_per_cpu_start;
+#endif
my_cpu_info = my_cpu_data + ((char *) &cpu_info - __per_cpu_start);
/*
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
- if (rgn_offset(addr) + len > RGN_MAP_LIMIT) /* no risk of overflow here... */
+ if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) /* no risk of overflow here... */
return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start)
return addr;
}
/* Check against unimplemented/unmapped addresses: */
- if ((newbrk - oldbrk) > RGN_MAP_LIMIT || rgn_offset(newbrk) > RGN_MAP_LIMIT)
+ if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT)
goto out;
/* Check against rlimit.. */
* or across a region boundary. Note: RGN_MAP_LIMIT is equal to 2^n-PAGE_SIZE
* (for some integer n <= 61) and len > 0.
*/
- roff = rgn_offset(addr);
+ roff = REGION_OFFSET(addr);
if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len))) {
addr = -EINVAL;
goto out;
if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
- if (rgn_index(address) != rgn_index(vma->vm_start)
- || rgn_offset(address) >= RGN_MAP_LIMIT)
+ if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
+ || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
} else {
vma = prev_vma;
- if (rgn_index(address) != rgn_index(vma->vm_start)
- || rgn_offset(address) >= RGN_MAP_LIMIT)
+ if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
+ || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
goto bad_area;
if (expand_backing_store(vma, address))
goto bad_area;
static int pgt_cache_water[2] = { 25, 50 };
-int
+void
check_pgt_cache (void)
{
- int low, high, freed = 0;
+ int low, high;
low = pgt_cache_water[0];
high = pgt_cache_water[1];
if (pgtable_cache_size > high) {
do {
if (pgd_quicklist)
- free_page((unsigned long)pgd_alloc_one_fast(0)), ++freed;
+ free_page((unsigned long)pgd_alloc_one_fast(0));
if (pmd_quicklist)
- free_page((unsigned long)pmd_alloc_one_fast(0, 0)), ++freed;
+ free_page((unsigned long)pmd_alloc_one_fast(0, 0));
} while (pgtable_cache_size > low);
}
- return freed;
}
/*
#include <linux/smp.h>
#include <linux/mm.h>
+#include <asm/delay.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/pal.h>
-#include <asm/delay.h>
+#include <asm/tlbflush.h>
#define SUPPORTED_PGBITS ( \
1 << _PAGE_SIZE_256M | \
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
-#include <linux/mm.h>
-
#include <asm/bitops.h>
#include <asm/page.h>
extern void flush_icache_range (unsigned long start, unsigned long end);
-#define flush_icache_user_range(vma, page, user_addr, len) \
-do { \
- unsigned long _addr = page_address(page) + ((user_addr) & ~PAGE_MASK); \
- flush_icache_range(_addr, _addr + (len)); \
+#define flush_icache_user_range(vma, page, user_addr, len) \
+do { \
+ unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
+ flush_icache_range(_addr, _addr + (len)); \
} while (0)
#endif /* _ASM_IA64_CACHEFLUSH_H */
ide_default_irq (ide_ioreg_t base)
{
switch (base) {
- case 0x1f0: return isa_irq_to_vector(14);
- case 0x170: return isa_irq_to_vector(15);
- case 0x1e8: return isa_irq_to_vector(11);
- case 0x168: return isa_irq_to_vector(10);
- case 0x1e0: return isa_irq_to_vector(8);
- case 0x160: return isa_irq_to_vector(12);
- default:
- return 0;
+ case 0x1f0: return isa_irq_to_vector(14);
+ case 0x170: return isa_irq_to_vector(15);
+ case 0x1e8: return isa_irq_to_vector(11);
+ case 0x168: return isa_irq_to_vector(10);
+ case 0x1e0: return isa_irq_to_vector(8);
+ case 0x160: return isa_irq_to_vector(12);
+ default:
+ return 0;
}
}
ide_default_io_base (int index)
{
switch (index) {
- case 0: return 0x1f0;
- case 1: return 0x170;
- case 2: return 0x1e8;
- case 3: return 0x168;
- case 4: return 0x1e0;
- case 5: return 0x160;
- default:
- return 0;
+ case 0: return 0x1f0;
+ case 1: return 0x170;
+ case 2: return 0x1e8;
+ case 3: return 0x168;
+ case 4: return 0x1e0;
+ case 5: return 0x160;
+ default:
+ return 0;
}
}
free_page((unsigned long) pte);
}
-extern int do_check_pgt_cache (int, int);
+extern void check_pgt_cache (void);
+
+/*
+ * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
+ * information. However, we use this macro to take care of any (delayed) i-cache flushing
+ * that may be necessary.
+ */
+static inline void
+update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
+{
+ unsigned long addr;
+ struct page *page;
+
+ if (!pte_exec(pte))
+ return; /* not an executable page... */
+
+ page = pte_page(pte);
+ /* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
+ addr = (unsigned long) page_address(page);
+
+ if (test_bit(PG_arch_1, &page->flags))
+ return; /* i-cache is already coherent with d-cache */
+
+ flush_icache_range(addr, addr + PAGE_SIZE);
+ set_bit(PG_arch_1, &page->flags); /* mark page as clean */
+}
#endif /* _ASM_IA64_PGALLOC_H */
#include <linux/config.h>
-#include <asm/cacheflush.h>
#include <asm/mman.h>
#include <asm/page.h>
#include <asm/processor.h>
# ifndef __ASSEMBLY__
#include <asm/bitops.h>
+#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init (void);
-/*
- * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
- * information. However, we use this macro to take care of any (delayed) i-cache flushing
- * that may be necessary.
- */
-static inline void
-update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
-{
- unsigned long addr;
- struct page *page;
-
- if (!pte_exec(pte))
- return; /* not an executable page... */
-
- page = pte_page(pte);
- /* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
- addr = (unsigned long) page_address(page);
-
- if (test_bit(PG_arch_1, &page->flags))
- return; /* i-cache is already coherent with d-cache */
-
- flush_icache_range(addr, addr + PAGE_SIZE);
- set_bit(PG_arch_1, &page->flags); /* mark page as clean */
-}
-
#define SWP_TYPE(entry) (((entry).val >> 1) & 0xff)
#define SWP_OFFSET(entry) (((entry).val << 1) >> 10)
#define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 9) })
#include <linux/config.h>
-#include <linux/compiler.h>
+#include <linux/percpu.h>
#include <asm/ptrace.h>
#include <asm/kregs.h>
*/
#define IA64_USEC_PER_CYC_SHIFT 41
-#define __HAVE_ARCH_PER_CPU
-
-#define THIS_CPU(var) (var)
-
#ifndef __ASSEMBLY__
#include <linux/threads.h>
#include <asm/unwind.h>
#include <asm/atomic.h>
-extern unsigned long __per_cpu_offset[NR_CPUS];
-
-#define per_cpu(var, cpu) (*(__typeof__(&(var))) ((void *) &(var) + __per_cpu_offset[cpu]))
-#define this_cpu(var) (var)
-
/* like above but expressed as bitfields for more efficient access: */
struct ia64_psr {
__u64 reserved0 : 1;
#define TI_ADDR_LIMIT 0x10
#define TI_PRE_COUNT 0x18
+#define PREEMPT_ACTIVE 0x4000000
+
#ifndef __ASSEMBLY__
/*
* address TASK_SIZE is never valid. We also need to make sure that the address doesn't
* point inside the virtually mapped linear page table.
*/
-#define __access_ok(addr,size,segment) (((unsigned long) (addr)) <= (segment).seg \
- && ((segment).seg == KERNEL_DS.seg || rgn_offset((unsigned long) (addr)) < RGN_MAP_LIMIT))
+#define __access_ok(addr,size,segment) (((unsigned long) (addr)) <= (segment).seg \
+ && ((segment).seg == KERNEL_DS.seg \
+ || REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))
#define access_ok(type,addr,size) __access_ok((addr),(size),get_fs())
static inline int
#define __NR_fremovexattr 1228
#define __NR_tkill 1229
#define __NR_futex 1230
+#define __NR_sched_setaffinity 1231
+#define __NR_sched_getaffinity 1232
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
extern void setup_serial_acpi(void *);
+#define ACPI_SIG_LEN 4
+
/* ACPI table signatures */
#define ACPI_SPCRT_SIGNATURE "SPCR"
#define ACPI_DBGPT_SIGNATURE "DBGP"