- Loadlin 1.6a
- Sh-utils 1.16 ; basename --v
- Autofs 3.1.1 ; automount --version
-- NFS 2.2beta40 ; showmount --version
+- NFS (client) 2.2beta40 ; showmount --version
+- nfs-utils (server) 0.1.4
- Bash 1.14.7 ; bash -version
- Ncpfs 2.2.0 ; ncpmount -v
- Pcmcia-cs 3.1.2 ; cardmgr -V
ftp://ftp.mathematik.th-darmstadt.de/pub/linux/okir/dontuse/nfs-server-2.2beta40.tar.gz
ftp://linux.nrao.edu/mirrors/fb0429.mathematik.th-darmstadt.de/pub/linux/okir/dontuse/nfs-server-2.2beta40.tar.gz
-The kernel-level 12/04/98 release:
-ftp://ftp.yggdrasil.com/private/hjl/knfsd-981204.tar.gz
-ftp://ftp.kernel.org/pub/linux/devel/gcc/knfsd-981204.tar.gz
+The kernel-level nfs-utils-0.1.4 release:
+ftp://nfs.sourceforge.net/pub/nfs/nfs-utils-0.1.4.tar.gz
Net-tools
=========
Most people say N here.
-NFS server support (EXPERIMENTAL)
+NFS server support
CONFIG_NFSD
- If you want your Linux box to act as a NFS *server*, so that other
+ If you want your Linux box to act as an NFS *server*, so that other
computers on your local network which support NFS can access certain
directories on your box transparently, you have two options: you can
use the self-contained user space program nfsd, in which case you
- should say N here, or you can say Y and use this new experimental
- kernel based NFS server. The advantage of the kernel based solution
- is that it is faster; it might not be completely stable yet, though.
+ should say N here, or you can say Y and use the kernel based NFS
+ server. The advantage of the kernel based solution is that it is
+ faster.
In either case, you will need support software; the respective
locations are given in the file Documentation/Changes in the NFS
section.
Please read the NFS-HOWTO, available from
- http://metalab.unc.edu/mdw/linux.html#howto .
+ http://www.linuxdoc.org/HOWTO/NFS-HOWTO.html .
+
The NFS server is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
* - (0x48) enable all memory requests from ISA to be channeled to PCI
* - (0x42) disable ping-pong (as per errata)
* - (0x40) enable PCI packet retry
+ * - (0x44) Route INTA to IRQ11
* - (0x83) don't use CPU park enable, park on last master, disable GAT bit
* - (0x80) default rotating priorities
* - (0x81) rotate bank 4
pci_write_config_byte(dev, 0x48, 0xff);
pci_write_config_byte(dev, 0x42, 0x00);
pci_write_config_byte(dev, 0x40, 0x22);
+ pci_write_config_word(dev, 0x44, 0xb000);
pci_write_config_byte(dev, 0x83, 0x02);
pci_write_config_byte(dev, 0x80, 0xe0);
pci_write_config_byte(dev, 0x81, 0x01);
void __init dc21285_init(void)
{
+ static struct resource csrmem, csrio;
unsigned int mem_size;
unsigned long cntl;
*CSR_PCIADDR_EXTN = 0;
#ifdef CONFIG_HOST_FOOTBRIDGE
+
+ csrio.flags = IORESOURCE_IO;
+ csrmem.flags = IORESOURCE_MEM;
+
+ allocate_resource(&ioport_resource, &csrio, 128,
+ 0xff00, 0xffff, 128, NULL, NULL);
+ allocate_resource(&iomem_resource, &csrmem, 128,
+ 0xf4000000, 0xf8000000, 128, NULL, NULL);
+
/*
* Map our SDRAM at a known address in PCI space, just in case
* the firmware had other ideas. Using a nonzero base is
* in the range 0x000a0000 to 0x000c0000. (eg, S3 cards).
*/
*CSR_PCICACHELINESIZE = 0x00002008;
- *CSR_PCICSRBASE = 0;
- *CSR_PCICSRIOBASE = 0;
+ *CSR_PCICSRBASE = csrmem.start;
+ *CSR_PCICSRIOBASE = csrio.start;
*CSR_PCISDRAMBASE = virt_to_bus((void *)PAGE_OFFSET);
*CSR_PCIROMBASE = 0;
*CSR_PCICMD = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
*/
if (machine_is_netwinder()) {
unsigned long flags;
+ extern int isapnp_disable;
wb977_init();
cpld_init();
spin_lock_irqsave(&gpio_lock, flags);
gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS);
spin_unlock_irqrestore(&gpio_lock, flags);
+
+#ifdef CONFIG_ISAPNP
+ /*
+ * We must not use the kernels ISAPnP code
+ * on the NetWinder - it will reset the settings
+ * for the WaveArtist chip and render it inoperable.
+ */
+ isapnp_disable = 1;
+#endif
}
#endif
#ifdef CONFIG_CATS
#include <asm/pgtable.h>
#include <asm/uaccess.h>
-unsigned long
-resource_fixup(struct pci_dev * dev, struct resource * res,
- unsigned long start, unsigned long size)
-{
- return start;
-}
-
#ifdef CONFIG_CPU_32
asmlinkage int sys_iopl(unsigned long turn_on)
{
#endif
}
+#define O_PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define P_PFN_DOWN(x) O_PFN_DOWN((x) - PHYS_OFFSET)
+#define V_PFN_DOWN(x) O_PFN_DOWN(__pa(x))
+
+#define O_PFN_UP(x) (PAGE_ALIGN(x) >> PAGE_SHIFT)
+#define P_PFN_UP(x) O_PFN_UP((x) - PHYS_OFFSET)
+#define V_PFN_UP(x) O_PFN_UP(__pa(x))
+
+#define PFN_SIZE(x) ((x) >> PAGE_SHIFT)
+#define PFN_RANGE(s,e) PFN_SIZE(PAGE_ALIGN((unsigned long)(e)) - \
+ (((unsigned long)(s)) & PAGE_MASK))
+
+#define free_bootmem(s,sz) free_bootmem((s)<<PAGE_SHIFT, (sz)<<PAGE_SHIFT)
+#define reserve_bootmem(s,sz) reserve_bootmem((s)<<PAGE_SHIFT, (sz)<<PAGE_SHIFT)
+
+static unsigned int __init find_bootmap_pfn(unsigned int bootmap_pages)
+{
+ unsigned int start_pfn, bank, bootmap_pfn;
+
+ start_pfn = V_PFN_UP(&_end);
+ bootmap_pfn = 0;
+
+ /*
+ * FIXME: We really want to avoid allocating the bootmap
+ * over the top of the initrd.
+ */
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start) {
+ if (__pa(initrd_end) > (meminfo.end + PHYS_OFFSET)) {
+ printk ("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx) - disabling initrd\n",
+ __pa(initrd_end), meminfo.end + PHYS_OFFSET);
+ initrd_start = 0;
+ initrd_end = 0;
+ }
+ }
+#endif
+
+ for (bank = 0; bank < meminfo.nr_banks; bank ++) {
+ unsigned int start, end;
+
+ if (meminfo.bank[bank].size == 0)
+ continue;
+
+ start = O_PFN_UP(meminfo.bank[bank].start);
+ end = O_PFN_DOWN(meminfo.bank[bank].size +
+ meminfo.bank[bank].start);
+
+ if (end < start_pfn)
+ continue;
+
+ if (start < start_pfn)
+ start = start_pfn;
+
+ if (end <= start)
+ continue;
+
+ if (end - start >= bootmap_pages) {
+ bootmap_pfn = start;
+ break;
+ }
+ }
+
+ if (bootmap_pfn == 0)
+ BUG();
+
+ return bootmap_pfn;
+}
+
/*
- * Work out our memory regions. Note that "pfn" is the physical page number
- * relative to the first physical page, not the physical address itself.
+ * Initialise the bootmem allocator.
*/
static void __init setup_bootmem(void)
{
- unsigned int end_pfn, bootmem_end;
- int bank;
+ unsigned int end_pfn, start_pfn, bootmap_pages, bootmap_pfn;
+ unsigned int i;
/*
- * Calculate the end of memory.
+ * Calculate the physical address of the top of memory.
*/
- for (bank = 0; bank < meminfo.nr_banks; bank++) {
- if (meminfo.bank[bank].size) {
- unsigned long end;
+ meminfo.end = 0;
+ for (i = 0; i < meminfo.nr_banks; i++) {
+ unsigned long end;
- end = meminfo.bank[bank].start +
- meminfo.bank[bank].size;
+ if (meminfo.bank[i].size != 0) {
+ end = meminfo.bank[i].start + meminfo.bank[i].size;
if (meminfo.end < end)
meminfo.end = end;
}
}
- bootmem_end = __pa(PAGE_ALIGN((unsigned long)&_end));
- end_pfn = meminfo.end >> PAGE_SHIFT;
+ start_pfn = O_PFN_UP(PHYS_OFFSET);
+ end_pfn = O_PFN_DOWN(meminfo.end);
+ bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
+ bootmap_pfn = find_bootmap_pfn(bootmap_pages);
/*
* Initialise the boot-time allocator
*/
- bootmem_end += init_bootmem(bootmem_end >> PAGE_SHIFT, end_pfn, PHYS_OFFSET);
+ init_bootmem_start(bootmap_pfn, start_pfn, end_pfn);
/*
* Register all available RAM with the bootmem allocator.
- * The address is relative to the start of physical memory.
*/
- for (bank = 0; bank < meminfo.nr_banks; bank ++)
- free_bootmem(meminfo.bank[bank].start, meminfo.bank[bank].size);
+ for (i = 0; i < meminfo.nr_banks; i++)
+ if (meminfo.bank[i].size)
+ free_bootmem(O_PFN_UP(meminfo.bank[i].start),
+ PFN_SIZE(meminfo.bank[i].size));
/*
- * reserve the following regions:
- * physical page 0 - it contains the exception vectors
- * kernel and the bootmem structure
- * swapper page directory (if any)
- * initrd (if any)
+ * Register the reserved regions with bootmem
*/
- reserve_bootmem(0, PAGE_SIZE);
+ reserve_bootmem(bootmap_pfn, bootmap_pages);
+ reserve_bootmem(V_PFN_DOWN(&_stext), PFN_RANGE(&_stext, &_end));
+
#ifdef CONFIG_CPU_32
- reserve_bootmem(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(void *));
+ /*
+ * Reserve the page tables. These are already in use.
+ */
+ reserve_bootmem(V_PFN_DOWN(swapper_pg_dir),
+ PFN_SIZE(PTRS_PER_PGD * sizeof(void *)));
#endif
- reserve_bootmem(__pa(&_stext), bootmem_end - __pa(&_stext));
#ifdef CONFIG_BLK_DEV_INITRD
- if (__pa(initrd_end) > (end_pfn << PAGE_SHIFT)) {
- printk ("initrd extends beyond end of memory "
- "(0x%08lx > 0x%08x) - disabling initrd\n",
- __pa(initrd_end), end_pfn << PAGE_SHIFT);
- initrd_start = 0;
- }
-
if (initrd_start)
- reserve_bootmem(__pa(initrd_start),
- initrd_end - initrd_start);
+ reserve_bootmem(O_PFN_DOWN(initrd_start),
+ PFN_RANGE(initrd_start, initrd_end));
#endif
}
virt_start = __phys_to_virt(meminfo.bank[i].start);
virt_end = virt_start + meminfo.bank[i].size - 1;
- res = alloc_bootmem(sizeof(*res));
+ res = alloc_bootmem_low(sizeof(*res));
res->name = "System RAM";
res->start = __virt_to_bus(virt_start);
res->end = __virt_to_bus(virt_end);
}
for (i = 0; i < 4; i++) {
- meminfo.bank[i].start = i << 26;
+ meminfo.bank[i].start = PHYS_OFFSET + (i << 26);
meminfo.bank[i].size =
params->u1.s.pages_in_bank[i] *
params->u1.s.page_size;
if (meminfo.nr_banks == 0) {
meminfo.nr_banks = 1;
- meminfo.bank[0].start = 0;
+ meminfo.bank[0].start = PHYS_OFFSET;
if (params)
meminfo.bank[0].size = params->u1.s.nr_pages << PAGE_SHIFT;
else
printk ("pc not in code space\n");
}
-spinlock_t die_lock;
+spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
/*
* This function is protected against re-entrancy.
initpages = &__init_end - &__init_begin;
max_mapnr = max_low_pfn;
- high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
+ high_memory = (void *)__va(PHYS_OFFSET + max_low_pfn * PAGE_SIZE);
/*
* We may have non-contiguous memory. Setup the PageSkip stuff,
printk(" %ldMB", meminfo.bank[i].size >> 20);
}
- printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
+ printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
printk("Memory: %luKB available (%dK code, %dK data, %dK init)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
codepages >> 10, datapages >> 10, initpages >> 10);
- if (PAGE_SIZE >= 16384 && max_mapnr <= 128) {
+ if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
extern int sysctl_overcommit_memory;
/*
* On a machine this small we won't get
cr_alignment &= ~4;
cr_no_alignment &= ~4;
set_cr(cr_alignment);
+ return 1;
}
static int __init nowrite_setup(char *__unused)
cr_alignment &= ~(8|4);
cr_no_alignment &= ~(8|4);
set_cr(cr_alignment);
+ return 1;
}
__setup("nocache", nocache_setup);
if (meminfo.bank[i].size) {
unsigned int end;
- end = (meminfo.bank[i].start +
+ end = (meminfo.bank[i].start - PHYS_OFFSET +
meminfo.bank[i].size) >> PAGE_SHIFT;
if (end > zone_size[0])
zone_size[0] = end;
/*
* Setup the above mappings
*/
- init_map[0].physical = PHYS_OFFSET;
+ init_map[0].physical = virt_to_phys(alloc_bootmem_low_pages(PAGE_SIZE));
init_map[5].physical = FLUSH_BASE_PHYS;
init_map[5].virtual = FLUSH_BASE;
#ifdef FLUSH_BASE_MINICACHE
#endif
for (i = 0; i < meminfo.nr_banks; i++) {
- init_map[i+1].physical = PHYS_OFFSET + meminfo.bank[i].start;
- init_map[i+1].virtual = PAGE_OFFSET + meminfo.bank[i].start;
+ init_map[i+1].physical = meminfo.bank[i].start;
+ init_map[i+1].virtual = meminfo.bank[i].start +
+ PAGE_OFFSET - PHYS_OFFSET;
init_map[i+1].length = meminfo.bank[i].size;
}
struct page *pg = NULL;
unsigned int i;
+#define PFN(x) (((x) - PHYS_OFFSET) >> PAGE_SHIFT)
+
for (i = 0; i < meminfo.nr_banks; i++) {
if (meminfo.bank[i].size == 0)
continue;
- start_pfn = meminfo.bank[i].start >> PAGE_SHIFT;
+ start_pfn = PFN(meminfo.bank[i].start);
/*
* subtle here - if we have a full bank, then
set_bit(PG_skip, &pg->flags);
pg->next_hash = mem_map + start_pfn;
- start_pfn = PAGE_ALIGN(__pa(pg + 1));
- end_pfn = __pa(pg->next_hash) & PAGE_MASK;
+ start_pfn = PFN(PAGE_ALIGN(__pa(pg + 1)));
+ end_pfn = PFN(__pa(pg->next_hash) & PAGE_MASK);
if (end_pfn != start_pfn)
free_bootmem(start_pfn, end_pfn - start_pfn);
pg = NULL;
}
- end_pfn = (meminfo.bank[i].start +
- meminfo.bank[i].size) >> PAGE_SHIFT;
+ end_pfn = PFN(meminfo.bank[i].start + meminfo.bank[i].size);
if (end_pfn != meminfo.end >> PAGE_SHIFT)
pg = mem_map + end_pfn;
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
+CONFIG_SCSI_DEBUG_QUEUES=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_LOGGING is not set
*c = boot_cpu_data;
c->pte_quick = 0;
c->pmd_quick = 0;
+ c->pgd_quick = 0;
c->pgtable_cache_sz = 0;
identify_cpu(c);
/*
alert_counter[cpu]++;
if (alert_counter[cpu] == 5*HZ) {
spin_lock(&nmi_print_lock);
- spin_unlock(&console_lock); // we are in trouble anyway
+ console_lock.lock = 0; // we are in trouble anyway
printk("NMI Watchdog detected LOCKUP on CPU%d, registers:\n", cpu);
show_registers(regs);
printk("console shuts up ...\n");
#include <asm/e820.h>
unsigned long highstart_pfn, highend_pfn;
-unsigned long *pgd_quicklist = (unsigned long *)0;
static unsigned long totalram_pages = 0;
static unsigned long totalhigh_pages = 0;
int freed = 0;
if(pgtable_cache_size > high) {
do {
- if(pgd_quicklist) {
- mmlist_modify_lock();
- free_pgd_slow(get_pgd_fast());
- mmlist_modify_unlock();
- freed++;
- }
+ if(pgd_quicklist)
+ free_pgd_slow(get_pgd_fast()), freed++;
if(pmd_quicklist)
free_pmd_slow(get_pmd_fast()), freed++;
if(pte_quicklist)
# parent makes..
#
-L_TARGET := acorn-char.a
+O_TARGET := acorn-char.o
M_OBJS :=
-L_OBJS :=
+O_OBJS :=
-L_OBJS_arc := keyb_arc.o
-L_OBJS_a5k := keyb_arc.o
-L_OBJS_rpc := keyb_ps2.o
+O_OBJS_arc := keyb_arc.o
+O_OBJS_a5k := keyb_arc.o
+O_OBJS_rpc := keyb_ps2.o
ifeq ($(MACHINE),rpc)
- ifeq ($(CONFIG_MOUSE),y)
- LX_OBJS += mouse_rpc.o
+ ifeq ($(CONFIG_BUSMOUSE),y)
+ OX_OBJS += mouse_rpc.o
else
- ifeq ($(CONFIG_MOUSE),m)
+ ifeq ($(CONFIG_BUSMOUSE),m)
MX_OBJS += mouse_rpc.o
endif
endif
endif
ifeq ($(CONFIG_ATOMWIDE_SERIAL),y)
- L_OBJS += serial-atomwide.o
+ O_OBJS += serial-atomwide.o
else
ifeq ($(CONFIG_ATOMWIDE_SERIAL),m)
- M_OBJS += serial-atomwide.o
+ O_OBJS += serial-atomwide.o
endif
endif
ifeq ($(CONFIG_DUALSP_SERIAL),y)
- L_OBJS += serial-dualsp.o
+ O_OBJS += serial-dualsp.o
else
ifeq ($(CONFIG_DUALSP_SERIAL),m)
M_OBJS += serial-dualsp.o
endif
endif
-L_OBJS += $(L_OBJS_$(MACHINE))
+O_OBJS += $(O_OBJS_$(MACHINE))
include $(TOPDIR)/Rules.make
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/interrupt.h>
+#include <linux/init.h>
#include <asm/hardware.h>
#include <asm/irq.h>
6, "arcmouse", NULL, NULL, 7
};
-int
-mouse_rpc_init(void)
+static int __init mouse_rpc_init(void)
{
mousedev = register_busmouse(&rpcmouse);
return mousedev >= 0 ? 0 : -ENODEV;
}
-#ifdef MODULE
-int
-init_module(void)
-{
- return mouse_rpc_init();
-}
-
-int
-cleanup_module(void)
+static void __exit mouse_rpc_exit(void)
{
if (mousedev >= 0) {
unregister_busmouse(mousedev);
free_irq(IRQ_VSYNCPULSE, &mousedev);
}
}
-#endif
+
+module_init(mouse_rpc_init);
+module_exit(mouse_rpc_exit);
printk("%s%02X", i & 31 ? " " : "\n ", message[i]);
printk("\n");
-reject_message:
/*
* Something strange seems to be happening here -
* I can't use SETATN since the chip gives me an
case READ_CAPACITY:
case TEST_UNIT_READY:
case MODE_SENSE:
+ case REQUEST_SENSE:
break;
default:
host->dma_channel = ecs[count]->dma;
info = (PowerTecScsi_Info *)host->hostdata;
+ if (host->dma_channel != NO_DMA)
+ set_dma_speed(host->dma_channel, 180);
+
info->control.term_port = host->io_port + POWERTEC_TERM_CONTROL;
info->control.terms = term[count] ? POWERTEC_TERM_ENABLE : 0;
powertecscsi_terminator_ctl(host, info->control.terms);
info->info.ifcfg.select_timeout = 255;
info->info.ifcfg.asyncperiod = POWERTEC_ASYNC_PERIOD;
info->info.ifcfg.sync_max_depth = POWERTEC_SYNC_DEPTH;
- info->info.ifcfg.cntl3 = /*CNTL3_BS8 |*/ CNTL3_FASTSCSI | CNTL3_FASTCLK;
+ info->info.ifcfg.cntl3 = CNTL3_BS8 | CNTL3_FASTSCSI | CNTL3_FASTCLK;
info->info.ifcfg.disconnect_ok = 1;
info->info.ifcfg.wide_max_size = 0;
info->info.dma.setup = powertecscsi_dma_setup;
/*
* SG-DMA support.
*
- * Similar to the BM-DMA, but we use the RiscPCs IOMD
- * DMA controllers. There is only one DMA controller
- * per card, which means that only one drive can be
- * accessed at one time. NOTE! We do not inforce that
- * here, but we rely on the main IDE driver spotting
- * that both interfaces use the same IRQ, which should
- * guarantee this.
- *
- * We are limited by the drives IOR/IOW pulse time.
- * The closest that we can get to the requirements is
- * a type C cycle for both mode 1 and mode 2. However,
- * this does give a burst of 8MB/s.
- *
- * This has been tested with a couple of Conner
- * Peripherals 1080MB CFS1081A drives, one on each
- * interface, which deliver about 2MB/s each. I
- * believe that this is limited by the lack of
- * on-board drive cache.
+ * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
+ * There is only one DMA controller per card, which means that only
+ * one drive can be accessed at one time. NOTE! We do not enforce that
+ * here, but we rely on the main IDE driver spotting that both
+ * interfaces use the same IRQ, which should guarantee this.
*/
#define TABLE_SIZE 2048
}
static int
-icside_config_drive(ide_drive_t *drive, int mode)
+icside_config_if(ide_drive_t *drive, int xfer_mode)
{
- int speed, err;
-
- if (mode == 2) {
- speed = XFER_MW_DMA_2;
+ int func = ide_dma_off;
+
+ switch (xfer_mode) {
+ case XFER_MW_DMA_2:
+ /*
+ * The cycle time is limited to 250ns by the r/w
+ * pulse width (90ns), however we should still
+ * have a maximum burst transfer rate of 8MB/s.
+ */
drive->drive_data = 250;
- } else {
- speed = XFER_MW_DMA_1;
+ break;
+
+ case XFER_MW_DMA_1:
drive->drive_data = 250;
- }
+ break;
- err = ide_config_drive_speed(drive, (byte) speed);
+ case XFER_MW_DMA_0:
+ drive->drive_data = 480;
+ break;
- if (err == 0) {
- drive->id->dma_mword &= 0x00ff;
- drive->id->dma_mword |= 256 << mode;
- } else
+ default:
drive->drive_data = 0;
+ break;
+ }
+
+ if (drive->drive_data &&
+ ide_config_drive_speed(drive, (byte) xfer_mode) == 0)
+ func = ide_dma_on;
+ else
+ drive->drive_data = 480;
+
+ printk("%s: %s selected (peak %dMB/s)\n", drive->name,
+ ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data);
- return err;
+ return func;
}
static int
struct hd_driveid *id = drive->id;
ide_hwif_t *hwif = HWIF(drive);
int autodma = hwif->autodma;
+ int xfer_mode = XFER_PIO_2;
+ int func = ide_dma_off_quietly;
- if (id && (id->capability & 1) && autodma) {
- int dma_mode = 0;
+ if (!id || !(id->capability & 1) || !autodma)
+ goto out;
- /* Consult the list of known "bad" drives */
- if (ide_dmaproc(ide_dma_bad_drive, drive))
- return hwif->dmaproc(ide_dma_off, drive);
-
- /* Enable DMA on any drive that has
- * UltraDMA (mode 0/1/2) enabled
- */
- if (id->field_valid & 4 && id->dma_ultra & 7)
- dma_mode = 2;
-
- /* Enable DMA on any drive that has mode1
- * or mode2 multiword DMA enabled
- */
- if (id->field_valid & 2 && id->dma_mword & 6)
- dma_mode = id->dma_mword & 4 ? 2 : 1;
+ /*
+ * Consult the list of known "bad" drives
+ */
+ if (ide_dmaproc(ide_dma_bad_drive, drive)) {
+ func = ide_dma_off;
+ goto out;
+ }
- /* Consult the list of known "good" drives */
- if (ide_dmaproc(ide_dma_good_drive, drive))
- dma_mode = 1;
+ /*
+ * Enable DMA on any drive that has multiword DMA
+ */
+ if (id->field_valid & 2) {
+ if (id->dma_mword & 4) {
+ xfer_mode = XFER_MW_DMA_2;
+ func = ide_dma_on;
+ } else if (id->dma_mword & 2) {
+ xfer_mode = XFER_MW_DMA_1;
+ func = ide_dma_on;
+ } else if (id->dma_mword & 1) {
+ xfer_mode = XFER_MW_DMA_0;
+ func = ide_dma_on;
+ }
+ goto out;
+ }
- if (dma_mode && icside_config_drive(drive, dma_mode) == 0)
- return hwif->dmaproc(ide_dma_on, drive);
+ /*
+ * Consult the list of known "good" drives
+ */
+ if (ide_dmaproc(ide_dma_good_drive, drive)) {
+ if (id->eide_dma_time > 150)
+ goto out;
+ xfer_mode = XFER_MW_DMA_1;
+ func = ide_dma_on;
}
- return hwif->dmaproc(ide_dma_off_quietly, drive);
+
+out:
+ func = icside_config_if(drive, xfer_mode);
+
+ return hwif->dmaproc(func, drive);
}
static int
/*#define BROKEN_MOUSE*/
extern int sun_mouse_init(void);
-extern void mouse_rpc_init (void);
struct busmouse_data {
struct miscdevice miscdev;
{
#ifdef CONFIG_SUN_MOUSE
sun_mouse_init();
-#endif
-#ifdef CONFIG_RPCMOUSE
- mouse_rpc_init();
#endif
return 0;
}
extern int pc110pad_init(void);
extern int pmu_device_init(void);
extern int qpmouse_init(void);
-extern int ds1620_init(void);
-extern int nwbutton_init(void);
-extern int nwflash_init(void);
static int misc_read_proc(char *buf, char **start, off_t offset,
int len, int *eof, void *private)
#ifdef CONFIG_SGI
streamable_init ();
#endif
-#ifdef CONFIG_DS1620
- ds1620_init();
-#endif
-#ifdef CONFIG_NWBUTTON
- nwbutton_init();
-#endif
-#ifdef CONFIG_NWFLASH
- nwflash_init();
-#endif
#ifdef CONFIG_SGI_NEWPORT_GFX
gfx_register ();
#endif
for (port = portlist; port; port = port->next)
drv->attach (port);
+ /* For compatibility with 2.2, check the (obsolete) parport_lowlevel
+ * alias in case some people haven't changed to post-install rules
+ * yet. parport_enumerate (itself deprecated) will printk a
+ * friendly reminder. */
+ if (!portlist)
+ parport_enumerate ();
+
return 0;
}
comment 'Some SCSI devices (e.g. CD jukebox) support multiple LUNs'
-if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+#if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
bool 'Enable extra checks in new queueing code' CONFIG_SCSI_DEBUG_QUEUES
-fi
+#fi
bool ' Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN
if (res == 0x55aa)
break;
}
- } while (timeout--);
+ } while (--timeout);
if (timeout == 0) {
printk(KERN_WARNING "WaveArtist: reset timeout ");
char rev[3], dev_name[64];
int my_dev;
- waveartist_reset(devc);
+ if (waveartist_reset(devc))
+ return -ENODEV;
sprintf(dev_name, "%s (%s", devc->hw.name, devc->chip_name);
MODULE_PARM(dma, "i"); /* DMA */
MODULE_PARM(dma2, "i"); /* DMA2 */
-int io = CONFIG_WAVEARTIST_BASE;
-int irq = CONFIG_WAVEARTIST_IRQ;
-int dma = CONFIG_WAVEARTIST_DMA;
-int dma2 = CONFIG_WAVEARTIST_DMA2;
+static int io = CONFIG_WAVEARTIST_BASE;
+static int irq = CONFIG_WAVEARTIST_IRQ;
+static int dma = CONFIG_WAVEARTIST_DMA;
+static int dma2 = CONFIG_WAVEARTIST_DMA2;
static int attached;
-struct address_info hw_config;
+static struct address_info hw_config;
int init_module(void)
{
unsigned char crtc[19];
unsigned int width;
unsigned int pitch;
+ unsigned int fetch;
/*
* Other
static void cyber2000fb_set_timing(struct par_info *hw)
{
- unsigned int fetchrow, i;
+ unsigned int i;
/*
* Blank palette
/* PLL registers */
cyber2000_grphw(0xb0, hw->clock_mult);
cyber2000_grphw(0xb1, hw->clock_div);
+
cyber2000_grphw(0xb2, 0xdb);
cyber2000_grphw(0xb3, 0x54); /* MCLK: 75MHz */
cyber2000_grphw(0x90, 0x01);
cyber2000_outb(0x20, 0x3c0);
cyber2000_outb(0xff, 0x3c6);
- fetchrow = hw->pitch + 1;
- cyber2000_grphw(0x14, fetchrow);
- /* FIXME: is this the right way round? */
- cyber2000_grphw(0x15, ((fetchrow >> 4) & 0xf0) | ((hw->pitch >> 8) & 0x0f));
+ cyber2000_grphw(0x14, hw->fetch);
+ cyber2000_grphw(0x15, ((hw->fetch >> 8) & 0x03) | ((hw->pitch >> 4) & 0x30));
cyber2000_grphw(0x77, hw->visualid);
- cyber2000_grphw(0x33, 0x1c);
+ cyber2000_grphw(0x33, 0x0c);
/*
* Set up accelerator registers
* mult = reg0xb0.7:0
* div1 = (reg0xb1.5:0 + 1)
* div2 = 2^(reg0xb1.7:6)
- * fpll should be between 150 and 220 MHz
- * (6667ps and 4545ps)
+ * fpll should be between 115 and 257 MHz
+ * (8696ps and 3891ps)
*/
static int
cyber2000fb_decode_clock(struct par_info *hw, struct fb_var_screeninfo *var)
break;
}
#else
+ /*
+ * 1600x1200 1280x1024 1152x864 1024x768 800x600 640x480
+ * 5051 5051 yes 76*
+ * 5814 5814 no 66
+ * 6411 6411 no 60
+ * 7408 7408 yes 75*
+ * 74*
+ * 7937 7937 yes 70*
+ * 9091 4545 yes 80*
+ * 75* 100*
+ * 9260 4630 yes 60*
+ * 10000 5000 no 70 90
+ * 12500 6250 yes 47-lace* 60*
+ * 43-lace*
+ * 12699 6349 yes 75*
+ * 13334 6667 no 72
+ * 70
+ * 14815 7407 yes 100*
+ * 15385 7692 yes 47-lace* 60*
+ * 43-lace*
+ * 17656 4414 no 90
+ * 20000 5000 no 72
+ * 20203 5050 yes 75*
+ * 22272 5568 yes 43-lace* 70* 100*
+ * 25000 6250 yes 60*
+ * 25057 6264 no 90
+ * 27778 6944 yes 56*
+ * 48-lace*
+ * 31747 7936 yes 75*
+ * 32052 8013 no 72
+ * 39722 /6 6620 no
+ * 39722 /8 4965 yes 60*
+ */
/* /1 /2 /4 /6 /8 */
/* (2010) (2000) */
- if (pll_ps == 4630) { /* 216.0, 108.0, 54.00, 36.000 27.000 */
- mult = 181; /* 4630 9260 18520 27780 37040 */
- div1 = 12;
- } else if (pll_ps == 4965) { /* 201.0, 100.5, 50.25, 33.500 25.125 */
- mult = 211; /* 4965 9930 19860 29790 39720 */
- div1 = 15;
- } else if (pll_ps == 5050) { /* 198.0, 99.0, 49.50, 33.000 24.750 */
- mult = 83; /* 5050 10100 20200 30300 40400 */
- div1 = 6;
- } else if (pll_ps == 6349) { /* 158.0, 79.0, 39.50, 26.333 19.750 */
- mult = 209; /* 6349 12698 25396 38094 50792 */
- div1 = 19;
- } else if (pll_ps == 6422) { /* 156.0, 78.0, 39.00, 26.000 19.500 */
- mult = 190; /* 6422 12844 25688 38532 51376 */
- div1 = 17;
+ if (pll_ps >= 4543 && pll_ps <= 4549) {
+ mult = 169; /*u220.0 110.0 54.99 36.663 27.497 */
+ div1 = 11; /* 4546 9092 18184 27276 36367 */
+ } else if (pll_ps >= 4596 && pll_ps <= 4602) {
+ mult = 243; /* 217.5 108.7 54.36 36.243 27.181 */
+ div1 = 16; /* 4599 9197 18395 27592 36789 */
+ } else if (pll_ps >= 4627 && pll_ps <= 4633) {
+ mult = 181; /*u216.0, 108.0, 54.00, 36.000 27.000 */
+ div1 = 12; /* 4630 9260 18520 27780 37040 */
+ } else if (pll_ps >= 4962 && pll_ps <= 4968) {
+ mult = 211; /*u201.0, 100.5, 50.25, 33.500 25.125 */
+ div1 = 15; /* 4965 9930 19860 29790 39720 */
+ } else if (pll_ps >= 5005 && pll_ps <= 5011) {
+ mult = 251; /* 200.0 99.8 49.92 33.280 24.960 */
+ div1 = 18; /* 5008 10016 20032 30048 40064 */
+ } else if (pll_ps >= 5047 && pll_ps <= 5053) {
+ mult = 83; /*u198.0, 99.0, 49.50, 33.000 24.750 */
+ div1 = 6; /* 5050 10100 20200 30300 40400 */
+ } else if (pll_ps >= 5490 && pll_ps <= 5496) {
+ mult = 89; /* 182.0 91.0 45.51 30.342 22.756 */
+ div1 = 7; /* 5493 10986 21972 32958 43944 */
+ } else if (pll_ps >= 5567 && pll_ps <= 5573) {
+ mult = 163; /*u179.5 89.8 44.88 29.921 22.441 */
+ div1 = 13; /* 5570 11140 22281 33421 44562 */
+ } else if (pll_ps >= 6246 && pll_ps <= 6252) {
+ mult = 190; /*u160.0, 80.0, 40.00, 26.671 20.003 */
+ div1 = 17; /* 6249 12498 24996 37494 49992 */
+ } else if (pll_ps >= 6346 && pll_ps <= 6352) {
+ mult = 209; /*u158.0, 79.0, 39.50, 26.333 19.750 */
+ div1 = 19; /* 6349 12698 25396 38094 50792 */
+ } else if (pll_ps >= 6648 && pll_ps <= 6655) {
+ mult = 210; /*u150.3 75.2 37.58 25.057 18.792 */
+ div1 = 20; /* 6652 13303 26606 39909 53213 */
+ } else if (pll_ps >= 6943 && pll_ps <= 6949) {
+ mult = 181; /*u144.0 72.0 36.00 23.996 17.997 */
+ div1 = 18; /* 6946 13891 27782 41674 55565 */
+ } else if (pll_ps >= 7404 && pll_ps <= 7410) {
+ mult = 198; /*u134.0 67.5 33.75 22.500 16.875 */
+ div1 = 21; /* 7407 14815 29630 44445 59260 */
+ } else if (pll_ps >= 7689 && pll_ps <= 7695) {
+ mult = 227; /*u130.0 65.0 32.50 21.667 16.251 */
+ div1 = 25; /* 7692 15384 30768 46152 61536 */
+ } else if (pll_ps >= 7808 && pll_ps <= 7814) {
+ mult = 152; /* 128.0 64.0 32.00 21.337 16.003 */
+ div1 = 17; /* 7811 15623 31245 46868 62490 */
+ } else if (pll_ps >= 7934 && pll_ps <= 7940) {
+ mult = 44; /*u126.0 63.0 31.498 20.999 15.749 */
+ div1 = 5; /* 7937 15874 31748 47622 63494 */
} else
return -EINVAL;
+ /* 187 13 -> 4855 */
+ /* 181 18 -> 6946 */
+ /* 163 13 -> 5570 */
+ /* 169 11 -> 4545 */
#endif
/*
* Step 3:
debug_printf("%02X ", hw->crtc[i]);
debug_printf("%02X\n", hw->crtc_ofl);
}
- hw->width -= 1;
+ hw->width -= 1;
+ hw->fetch = hw->pitch;
+ if (current_par.bus_64bit == 0)
+ hw->fetch <<= 1;
+ hw->fetch += 1;
return 0;
}
static char igs_regs[] __initdata = {
0x10, 0x10, 0x12, 0x00, 0x13, 0x00,
-/* 0x30, 0x21,*/ 0x31, 0x00, 0x32, 0x00, 0x33, 0x01,
+ 0x31, 0x00, 0x32, 0x00, 0x33, 0x01,
0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00,
0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x01,
0x58, 0x00, 0x59, 0x00, 0x5a, 0x00,
- 0x70, 0x0b,/* 0x71, 0x10, 0x72, 0x45,*/ 0x73, 0x30,
- 0x74, 0x1b, 0x75, 0x1e, 0x76, 0x00, 0x7a, 0xc8
+ 0x70, 0x0b, 0x73, 0x30,
+ 0x74, 0x0b, 0x75, 0x17, 0x76, 0x00, 0x7a, 0xc8
};
static void __init cyber2000fb_hw_init(void)
cyber2000_grphw(igs_regs[i], igs_regs[i+1]);
}
+static unsigned short device_ids[] __initdata = {
+ PCI_DEVICE_ID_INTERG_2000,
+ PCI_DEVICE_ID_INTERG_2010,
+ PCI_DEVICE_ID_INTERG_5000
+};
+
/*
* Initialization
*/
struct pci_dev *dev;
u_int h_sync, v_sync;
u_long mmio_base, smem_base, smem_size;
- int err = 0;
-
- dev = pci_find_device(PCI_VENDOR_ID_INTERG,
- PCI_DEVICE_ID_INTERG_2000, NULL);
+ int err = 0, i;
- if (!dev)
+ for (i = 0; i < sizeof(device_ids) / sizeof(device_ids[0]); i++) {
dev = pci_find_device(PCI_VENDOR_ID_INTERG,
- PCI_DEVICE_ID_INTERG_2010, NULL);
+ device_ids[i], NULL);
+ if (dev)
+ break;
+ }
if (!dev)
return -ENXIO;
cyber2000_outb(0x08, 0x46e8);
/*
- * get the video RAM size from the VGA register.
+ * get the video RAM size and width from the VGA register.
* This should have been already initialised by the BIOS,
* but if it's garbage, claim default 1MB VRAM (woody)
*/
cyber2000_outb(0x72, 0x3ce);
- switch (cyber2000_inb(0x3cf) & 3) {
+ i = cyber2000_inb(0x3cf);
+ current_par.bus_64bit = i & 4;
+
+ switch (i & 3) {
case 2: smem_size = 0x00400000; break;
case 1: smem_size = 0x00200000; break;
default: smem_size = 0x00100000; break;
err = -ENOMEM;
goto release_smem_resource;
}
-current_par.screen_base += IO_FUDGE_FACTOR;
+
current_par.screen_size = smem_size;
current_par.screen_base_p = smem_base + 0x80000000;
current_par.regs_base_p = mmio_base + 0x80000000;
/* Not reached because the usecount will never be
decremented to zero */
unregister_framebuffer(&fb_info);
- /* TODO: clean up ... */
iounmap(current_par.screen_base);
iounmap(CyberRegs);
char dev_name[32];
unsigned int initialised;
unsigned int dev_id;
+ unsigned int bus_64bit:1;
/*
* palette
#define VFAC_CTL2_INVERT_BLANK 0x40 /* invert blank output */
#define VFAC_CTL2_INVERT_OVSYNC 0x80 /* invert other vsync input */
+#define VFAC_CTL3 0xea
+#define VFAC_CTL3_CAP_IRQ 0x40 /* enable capture interrupt */
+
+#define CAP_MEM_START 0xeb /* 18 bits */
+#define CAP_MAP_WIDTH 0xed /* high 6 bits */
+#define CAP_PITCH 0xee /* 8 bits */
+
+#define CAP_CTL_MISC 0xef
+#define CAP_CTL_MISC_HDIV 0x01
+#define CAP_CTL_MISC_HDIV4 0x02
+#define CAP_CTL_MISC_ODDEVEN 0x04
+#define CAP_CTL_MISC_HSYNCDIV2 0x08
+#define CAP_CTL_MISC_SYNCTZHIGH 0x10
+#define CAP_CTL_MISC_SYNCTZOR 0x20
+#define CAP_CTL_MISC_DISPUSED 0x80
+
+#define REG_BANK 0xfa
+#define REG_BANK_Y 0x01
+#define REG_BANK_K 0x05
+
+#define K_CAP_X2_CTL1 0x49
+
+#define CAP_X_START 0x60
+#define CAP_X_END 0x62
+#define CAP_Y_START 0x64
+#define CAP_Y_END 0x66
+#define CAP_DDA_X_INIT 0x68
+#define CAP_DDA_X_INC 0x6a
+#define CAP_DDA_Y_INIT 0x6c
+#define CAP_DDA_Y_INC 0x6e
+
+#define EXT_FIFO_CTL 0x74
+
+#define CAP_PIP_X_START 0x80
+#define CAP_PIP_X_END 0x82
+#define CAP_PIP_Y_START 0x84
+#define CAP_PIP_Y_END 0x86
+
+#define CAP_NEW_CTL1 0x88
+
+#define CAP_NEW_CTL2 0x89
+
+#define CAP_MODE1 0xa4
+#define CAP_MODE1_8BIT 0x01 /* enable 8bit capture mode */
+#define CAP_MODE1_CCIR656 0x02 /* CCIR656 mode */
+#define CAP_MODE1_IGNOREVGT 0x04 /* ignore VGT */
+#define CAP_MODE1_ALTFIFO 0x10 /* use alternate FIFO for capture */
+#define CAP_MODE1_SWAPUV 0x20 /* swap UV bytes */
+#define CAP_MODE1_MIRRORY 0x40 /* mirror vertically */
+#define CAP_MODE1_MIRRORX 0x80 /* mirror horizontally */
+
+#define CAP_MODE2 0xa5
+
+#define Y_TV_CTL 0xae
+
+#define EXT_MEM_START 0xc0 /* ext start address 21 bits */
+#define HOR_PHASE_SHIFT 0xc2 /* high 3 bits */
+#define EXT_SRC_WIDTH 0xc3 /* ext offset phase 10 bits */
+#define EXT_SRC_HEIGHT 0xc4 /* high 6 bits */
+#define EXT_X_START 0xc5 /* ext->screen, 16 bits */
+#define EXT_X_END 0xc7 /* ext->screen, 16 bits */
+#define EXT_Y_START 0xc9 /* ext->screen, 16 bits */
+#define EXT_Y_END 0xcb /* ext->screen, 16 bits */
+#define EXT_SRC_WIN_WIDTH 0xcd /* 8 bits */
+#define EXT_COLOUR_COMPARE 0xce /* 24 bits */
+#define EXT_DDA_X_INIT 0xd1 /* ext->screen 16 bits */
+#define EXT_DDA_X_INC 0xd3 /* ext->screen 16 bits */
+#define EXT_DDA_Y_INIT 0xd5 /* ext->screen 16 bits */
+#define EXT_DDA_Y_INC 0xd7 /* ext->screen 16 bits */
+
+#define VID_FIFO_CTL 0xd9
+
+#define VID_CAP_VFC 0xdb
+#define VID_CAP_VFC_YUV422 0x00 /* formats - does this cause conversion? */
+#define VID_CAP_VFC_RGB555 0x01
+#define VID_CAP_VFC_RGB565 0x02
+#define VID_CAP_VFC_RGB888_24 0x03
+#define VID_CAP_VFC_RGB888_32 0x04
+#define VID_CAP_VFC_DUP_PIX_ZOON 0x08 /* duplicate pixel zoom */
+#define VID_CAP_VFC_MOD_3RD_PIX 0x20 /* modify 3rd duplicated pixel */
+#define VID_CAP_VFC_DBL_H_PIX 0x40 /* double horiz pixels */
+#define VID_CAP_VFC_UV128 0x80 /* UV data offset by 128 */
+
+#define VID_DISP_CTL1 0xdc
+#define VID_DISP_CTL1_INTRAM 0x01 /* video pixels go to internal RAM */
+#define VID_DISP_CTL1_IGNORE_CCOMP 0x02 /* ignore colour compare registers */
+#define VID_DISP_CTL1_NOCLIP 0x04 /* do not clip to 16235,16240 */
+#define VID_DISP_CTL1_UV_AVG 0x08 /* U/V data is averaged */
+#define VID_DISP_CTL1_Y128 0x10 /* Y data offset by 128 */
+#define VID_DISP_CTL1_VINTERPOL_OFF 0x20 /* vertical interpolation off */
+#define VID_DISP_CTL1_VID_OUT_WIN_FULL 0x40 /* video out window full */
+#define VID_DISP_CTL1_ENABLE_VID_WINDOW 0x80 /* enable video window */
+
+#define VID_FIFO_CTL1 0xdd
+
+#define VFAC_CTL1 0xe8
+#define VFAC_CTL1_CAPTURE 0x01 /* capture enable */
+#define VFAC_CTL1_VFAC_ENABLE 0x02 /* vfac enable */
+#define VFAC_CTL1_FREEZE_CAPTURE 0x04 /* freeze capture */
+#define VFAC_CTL1_FREEZE_CAPTURE_SYNC 0x08 /* sync freeze capture */
+#define VFAC_CTL1_VALIDFRAME_SRC 0x10 /* select valid frame source */
+#define VFAC_CTL1_PHILIPS 0x40 /* select Philips mode */
+#define VFAC_CTL1_MODVINTERPOLCLK 0x80 /* modify vertical interpolation clocl */
+
+#define VFAC_CTL2 0xe9
+#define VFAC_CTL2_INVERT_VIDDATAVALID 0x01 /* invert video data valid */
+#define VFAC_CTL2_INVERT_GRAPHREADY 0x02 /* invert graphic ready output sig */
+#define VFAC_CTL2_INVERT_DATACLK 0x04 /* invert data clock signal */
+#define VFAC_CTL2_INVERT_HSYNC 0x08 /* invert hsync input */
+#define VFAC_CTL2_INVERT_VSYNC 0x10 /* invert vsync input */
+#define VFAC_CTL2_INVERT_FRAME 0x20 /* invert frame odd/even input */
+#define VFAC_CTL2_INVERT_BLANK 0x40 /* invert blank output */
+#define VFAC_CTL2_INVERT_OVSYNC 0x80 /* invert other vsync input */
+
#define VFAC_CTL3 0xea
#define CAP_MEM_START 0xeb /* 18 bits */
}
link[i] = '\0';
affs_brelse(bh);
+ SetPageUptodate(page);
kunmap(page);
UnlockPage(page);
return 0;
brelse(bh);
}
link[size] = '\0';
+ SetPageUptodate(page);
kunmap(page);
UnlockPage(page);
return 0;
if (inode->u.ext2_i.i_flags & EXT2_SYNC_FL)
inode->i_flags |= MS_SYNCHRONOUS;
insert_inode_hash(inode);
- inode->i_generation++;
+ inode->i_generation = event++;
mark_inode_dirty(inode);
unlock_super (sb);
case F_SETLKW:
err = fcntl_setlk(fd, cmd, (struct flock *) arg);
break;
-#if BIT_PER_LONG == 32 /* LFS only on 32 bit platforms */
- case F_GETLK64:
- err = fcntl_getlk64(fd, (struct flock64 *) arg);
- break;
- case F_SETLK64:
- err = fcntl_setlk64(fd, cmd, (struct flock64 *) arg);
- break;
- case F_SETLKW64:
- err = fcntl_setlk64(fd, cmd, (struct flock64 *) arg);
- break;
-#endif
case F_GETOWN:
/*
* XXX If f_owner is a process group, the
struct nlm_block **head, *block;
struct file_lock *fl;
- dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
+ dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %ld-%ld ty=%d\n",
file, lock->fl.fl_pid, lock->fl.fl_start,
lock->fl.fl_end, lock->fl.fl_type);
for (head = &nlm_blocked; (block = *head); head = &block->b_next) {
fl = &block->b_call.a_args.lock.fl;
- dprintk(" check f=%p pd=%d %Ld-%Ld ty=%d\n",
+ dprintk(" check f=%p pd=%d %ld-%ld ty=%d\n",
block->b_file, fl->fl_pid, fl->fl_start,
fl->fl_end, fl->fl_type);
if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
struct nlm_block *block;
int error;
- dprintk("lockd: nlmsvc_lock(%04x/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
+ dprintk("lockd: nlmsvc_lock(%04x/%ld, ty=%d, pi=%d, %ld-%ld, bl=%d)\n",
file->f_file.f_dentry->d_inode->i_dev,
file->f_file.f_dentry->d_inode->i_ino,
lock->fl.fl_type, lock->fl.fl_pid,
{
struct file_lock *fl;
- dprintk("lockd: nlmsvc_testlock(%04x/%ld, ty=%d, %Ld-%Ld)\n",
+ dprintk("lockd: nlmsvc_testlock(%04x/%ld, ty=%d, %ld-%ld)\n",
file->f_file.f_dentry->d_inode->i_dev,
file->f_file.f_dentry->d_inode->i_ino,
lock->fl.fl_type,
lock->fl.fl_end);
if ((fl = posix_test_lock(&file->f_file, &lock->fl)) != NULL) {
- dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
+ dprintk("lockd: conflicting lock(ty=%d, %ld-%ld)\n",
fl->fl_type, fl->fl_start, fl->fl_end);
conflock->caller = "somehost"; /* FIXME */
conflock->oh.len = 0; /* don't return OH info */
{
int error;
- dprintk("lockd: nlmsvc_unlock(%04x/%ld, pi=%d, %Ld-%Ld)\n",
+ dprintk("lockd: nlmsvc_unlock(%04x/%ld, pi=%d, %ld-%ld)\n",
file->f_file.f_dentry->d_inode->i_dev,
file->f_file.f_dentry->d_inode->i_ino,
lock->fl.fl_pid,
{
struct nlm_block *block;
- dprintk("lockd: nlmsvc_cancel(%04x/%ld, pi=%d, %Ld-%Ld)\n",
+ dprintk("lockd: nlmsvc_cancel(%04x/%ld, pi=%d, %ld-%ld)\n",
file->f_file.f_dentry->d_inode->i_dev,
file->f_file.f_dentry->d_inode->i_ino,
lock->fl.fl_pid,
fl->fl_pid = ntohl(*p++);
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK; /* as good as anything else */
- fl->fl_start = (u_long)ntohl(*p++); // Up to 4G-1
+ fl->fl_start = ntohl(*p++);
len = ntohl(*p++);
if (len == 0 || (fl->fl_end = fl->fl_start + len - 1) < 0)
fl->fl_end = NLM_OFFSET_MAX;
return NULL;
*p++ = htonl(fl->fl_pid);
- *p++ = htonl((u_long)lock->fl.fl_start);
+ *p++ = htonl(lock->fl.fl_start);
if (lock->fl.fl_end == NLM_OFFSET_MAX)
*p++ = xdr_zero;
else
- *p++ = htonl((u_long)(lock->fl.fl_end - lock->fl.fl_start + 1));
+ *p++ = htonl(lock->fl.fl_end - lock->fl.fl_start + 1);
return p;
}
if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
return 0;
- *p++ = htonl((u_long)fl->fl_start);
+ *p++ = htonl(fl->fl_start);
if (fl->fl_end == NLM_OFFSET_MAX)
*p++ = xdr_zero;
else
- *p++ = htonl((u_long)(fl->fl_end - fl->fl_start + 1));
+ *p++ = htonl(fl->fl_end - fl->fl_start + 1);
}
return p;
fl->fl_flags = FL_POSIX;
fl->fl_type = excl? F_WRLCK : F_RDLCK;
- fl->fl_start = (u_long)ntohl(*p++);
+ fl->fl_start = ntohl(*p++);
len = ntohl(*p++);
if (len == 0 || (fl->fl_end = fl->fl_start + len - 1) < 0)
fl->fl_end = NLM_OFFSET_MAX;
#include <asm/uaccess.h>
-#define OFFSET_MAX (~(loff_t)0ULL >> 1) /* FIXME: move elsewhere? */
+#define OFFSET_MAX ((off_t)LONG_MAX) /* FIXME: move elsewhere? */
static int flock_make_lock(struct file *filp, struct file_lock *fl,
unsigned int cmd);
static int posix_make_lock(struct file *filp, struct file_lock *fl,
- struct flock64 *l);
+ struct flock *l);
static int flock_locks_conflict(struct file_lock *caller_fl,
struct file_lock *sys_fl);
static int posix_locks_conflict(struct file_lock *caller_fl,
if (waiter->fl_prevblock) {
printk(KERN_ERR "locks_insert_block: remove duplicated lock "
- "(pid=%d %Ld-%Ld type=%d)\n",
+ "(pid=%d %ld-%ld type=%d)\n",
waiter->fl_pid, waiter->fl_start,
waiter->fl_end, waiter->fl_type);
locks_delete_block(waiter->fl_prevblock, waiter);
/* Report the first existing lock that would conflict with l.
* This implements the F_GETLK command of fcntl().
*/
-static int do_fcntl_getlk(unsigned int fd, struct flock64 *flock)
+int fcntl_getlk(unsigned int fd, struct flock *l)
{
struct file *filp;
struct file_lock *fl,file_lock;
+ struct flock flock;
int error;
+ error = -EFAULT;
+ if (copy_from_user(&flock, l, sizeof(flock)))
+ goto out;
error = -EINVAL;
- if ((flock->l_type != F_RDLCK) && (flock->l_type != F_WRLCK))
+ if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
goto out;
error = -EBADF;
if (!filp->f_dentry || !filp->f_dentry->d_inode)
goto out_putf;
- if (!posix_make_lock(filp, &file_lock, flock))
+ if (!posix_make_lock(filp, &file_lock, &flock))
goto out_putf;
if (filp->f_op->lock) {
fl = posix_test_lock(filp, &file_lock);
}
- flock->l_type = F_UNLCK;
+ flock.l_type = F_UNLCK;
if (fl != NULL) {
- flock->l_pid = fl->fl_pid;
- flock->l_start = fl->fl_start;
- flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
+ flock.l_pid = fl->fl_pid;
+ flock.l_start = fl->fl_start;
+ flock.l_len = fl->fl_end == OFFSET_MAX ? 0 :
fl->fl_end - fl->fl_start + 1;
- flock->l_whence = 0;
- flock->l_type = fl->fl_type;
+ flock.l_whence = 0;
+ flock.l_type = fl->fl_type;
}
+ error = -EFAULT;
+ if (!copy_to_user(l, &flock, sizeof(flock)))
+ error = 0;
out_putf:
fput(filp);
/* Apply the lock described by l to an open file descriptor.
* This implements both the F_SETLK and F_SETLKW commands of fcntl().
*/
-static int do_fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock64 *flock)
+int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
{
struct file *filp;
struct file_lock file_lock;
+ struct flock flock;
struct dentry * dentry;
struct inode *inode;
int error;
+ /*
+ * This might block, so we do it before checking the inode.
+ */
+ error = -EFAULT;
+ if (copy_from_user(&flock, l, sizeof(flock)))
+ goto out;
+
/* Get arguments and validate them ...
*/
}
error = -EINVAL;
- if (!posix_make_lock(filp, &file_lock, flock))
+ if (!posix_make_lock(filp, &file_lock, &flock))
goto out_putf;
error = -EBADF;
- switch (flock->l_type) {
+ switch (flock.l_type) {
case F_RDLCK:
if (!(filp->f_mode & FMODE_READ))
goto out_putf;
return error;
}
-int fcntl_getlk(unsigned int fd, struct flock *l)
-{
- struct flock flock;
- struct flock64 fl64;
- int error;
-
- error = -EFAULT;
- if (copy_from_user(&flock, l, sizeof(flock)))
- goto out;
-
- /* Convert to 64-bit offsets for internal use */
- fl64.l_type = flock.l_type;
- fl64.l_whence = flock.l_whence;
- fl64.l_start = (unsigned long)flock.l_start;
- fl64.l_len = (unsigned long)flock.l_len;
- fl64.l_pid = flock.l_pid;
-
- error = do_fcntl_getlk(fd, &fl64);
- if (error)
- goto out;
-
- /* and back again... */
- flock.l_type = fl64.l_type;
- flock.l_whence = fl64.l_whence;
- flock.l_start = (unsigned long)fl64.l_start;
- flock.l_len = (unsigned long)fl64.l_len;
- flock.l_pid = fl64.l_pid;
-
- if (copy_to_user(l, &flock, sizeof(flock)))
- error = -EFAULT;
-out:
- return error;
-}
-
-int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
-{
- struct flock flock;
- struct flock64 fl64;
- int error;
-
- error = -EFAULT;
- if (copy_from_user(&flock, l, sizeof(flock)))
- goto out;
-
- /* Convert to 64-bit offsets for internal use */
- fl64.l_type = flock.l_type;
- fl64.l_whence = flock.l_whence;
- fl64.l_start = (unsigned long)flock.l_start;
- fl64.l_len = (unsigned long)flock.l_len;
- fl64.l_pid = flock.l_pid;
-
- error = do_fcntl_setlk(fd, cmd, &fl64);
-out:
- return error;
-}
-
-#if BITS_PER_LONG == 32 /* LFS versions for 32 bit platforms */
-int fcntl_getlk64(unsigned int fd, struct flock64 *l)
-{
- struct flock64 fl64;
- int error;
-
- error = -EFAULT;
- if (copy_from_user(&fl64, l, sizeof(fl64)))
- goto out;
-
- error = do_fcntl_getlk(fd, &fl64);
-
- if (!error && copy_to_user(l, &fl64, sizeof(fl64)))
- error = -EFAULT;
-out:
- return error;
-}
-
-int fcntl_setlk64(unsigned int fd, unsigned int cmd, struct flock64 *l)
-{
- struct flock64 fl64;
- int error;
-
- error = -EFAULT;
- if (copy_from_user(&fl64, l, sizeof(fl64)))
- goto out;
-
- error = do_fcntl_setlk(fd, cmd, &fl64);
-out:
- return error;
-}
-#endif
-
/*
* This function is called when the file is being removed
* from the task's fd array.
* style lock.
*/
static int posix_make_lock(struct file *filp, struct file_lock *fl,
- struct flock64 *l)
+ struct flock *l)
{
off_t start;
p += sprintf(p, "FLOCK ADVISORY ");
}
p += sprintf(p, "%s ", (fl->fl_type == F_RDLCK) ? "READ " : "WRITE");
- p += sprintf(p, "%d %s:%ld %Ld %Ld ",
+ p += sprintf(p, "%d %s:%ld %ld %ld ",
fl->fl_pid,
kdevname(inode->i_dev), inode->i_ino, fl->fl_start,
fl->fl_end);
kfree(link);
if (error)
goto fail;
+ SetPageUptodate(page);
kunmap(page);
UnlockPage(page);
return 0;
dprintk("NFS: nfs_lock(f=%4x/%ld, t=%x, fl=%x, r=%ld:%ld)\n",
inode->i_dev, inode->i_ino,
fl->fl_type, fl->fl_flags,
- (unsigned long) fl->fl_start, (unsigned long) fl->fl_end);
+ fl->fl_start, fl->fl_end);
if (!inode)
return -EINVAL;
goto out_unlock;
attr->ia_valid |= ATTR_MODE;
- attr->ia_mode = type | mode;
+ attr->ia_mode = mode;
/* Special treatment for non-regular files according to the
* gospel of sun micro
rdev = 0;
} else if (type == S_IFCHR && !(attr->ia_valid & ATTR_SIZE)) {
/* If you think you've seen the worst, grok this. */
- attr->ia_mode = S_IFIFO | mode;
type = S_IFIFO;
} else if (size != rdev) {
/* dev got truncated because of 16bit Linux dev_t */
break;
case S_IFDIR:
opfunc = (nfsd_dirop_t) dirp->i_op->mkdir;
- /* Odd, indeed, but filesystems did it anyway */
- iap->ia_mode &= (S_IRWXUGO|S_ISVTX) & ~current->fs->umask;
break;
case S_IFCHR:
case S_IFBLK:
if (!(iap->ia_valid & ATTR_MODE))
iap->ia_mode = 0;
+ iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
/*
* Call the dir op function to create the object.
__kernel_pid_t l_pid;
};
-#ifdef __KERNEL__
-#define flock64 flock
-#endif
-
#endif
extern __inline__ pgd_t *get_pgd_slow(void)
{
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
if (ret) {
+ init = pgd_offset(&init_mm, 0UL);
memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- pgd_val(ret[PTRS_PER_PGD]) =
- pte_val(mk_pte(mem_map + MAP_NR(ret), PAGE_KERNEL));
+ memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+
+ pgd_val(ret[PTRS_PER_PGD])
+ = pte_val(mk_pte(mem_map + MAP_NR(ret), PAGE_KERNEL));
}
return ret;
}
-extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
-{
- pgd_t *init;
-
- init = pgd_offset(&init_mm, 0UL);
- memcpy (pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-}
-
extern __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
- }
+ } else
+ ret = (unsigned long *)get_pgd_slow();
return (pgd_t *)ret;
}
#define pmd_free_kernel(pmd) free_pmd_fast(pmd)
#define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_alloc() get_pgd_fast()
extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
{
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
+ struct task_struct * p;
pgd_t *pgd;
- mmlist_access_lock();
- mmlist_set_pgdir(address, entry);
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[(address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)] = entry;
- mmlist_access_unlock();
}
#endif /* _ALPHA_PGALLOC_H */
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-/*
- * This architecture does not require any delayed IO, and
- * has the constant-optimised IO
- */
-#undef ARCH_IO_DELAY
+#define IO_SPACE_LIMIT 0xffffffff
/*
* We use two different types of addressing - PC style addresses, and ARM
addr; \
})
+#define inb(p) (__builtin_constant_p((p)) ? __inbc(p) : __inb(p))
+#define inw(p) (__builtin_constant_p((p)) ? __inwc(p) : __inw(p))
+#define inl(p) (__builtin_constant_p((p)) ? __inlc(p) : __inl(p))
+#define outb(v,p) (__builtin_constant_p((p)) ? __outbc(v,p) : __outb(v,p))
+#define outw(v,p) (__builtin_constant_p((p)) ? __outwc(v,p) : __outw(v,p))
+#define outl(v,p) (__builtin_constant_p((p)) ? __outlc(v,p) : __outl(v,p))
+#define __ioaddr(p) (__builtin_constant_p((p)) ? __ioaddr(p) : __ioaddrc(p))
+
/*
* Translated address IO functions
*
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-/*
- * This architecture does not require any delayed IO, and
- * has the constant-optimised IO
- */
-#undef ARCH_IO_DELAY
+#define IO_SPACE_LIMIT 0xffffffff
/*
* We use two different types of addressing - PC style addresses, and ARM
#define __ioaddrc(port) \
(__PORT_PCIO((port)) ? PCIO_BASE + ((port) << 2) : IO_BASE + ((port) << 2))
+#define inb(p) (__builtin_constant_p((p)) ? __inbc(p) : __inb(p))
+#define inw(p) (__builtin_constant_p((p)) ? __inwc(p) : __inw(p))
+#define inl(p) (__builtin_constant_p((p)) ? __inlc(p) : __inl(p))
+#define outb(v,p) (__builtin_constant_p((p)) ? __outbc(v,p) : __outb(v,p))
+#define outw(v,p) (__builtin_constant_p((p)) ? __outwc(v,p) : __outw(v,p))
+#define outl(v,p) (__builtin_constant_p((p)) ? __outlc(v,p) : __outl(v,p))
+#define __ioaddr(p) (__builtin_constant_p((p)) ? __ioaddr(p) : __ioaddrc(p))
+
/*
* Translated address IO functions
*
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-/*
- * This architecture does not require any delayed IO, and
- * has the constant-optimised IO
- */
-#undef ARCH_IO_DELAY
+#define IO_SPACE_LIMIT 0xffffffff
/*
* We use two different types of addressing - PC style addresses, and ARM
addr; \
})
+#define inb(p) (__builtin_constant_p((p)) ? __inbc(p) : __inb(p))
+#define inw(p) (__builtin_constant_p((p)) ? __inwc(p) : __inw(p))
+#define inl(p) (__builtin_constant_p((p)) ? __inlc(p) : __inl(p))
+#define outb(v,p) (__builtin_constant_p((p)) ? __outbc(v,p) : __outb(v,p))
+#define outw(v,p) (__builtin_constant_p((p)) ? __outwc(v,p) : __outw(v,p))
+#define outl(v,p) (__builtin_constant_p((p)) ? __outlc(v,p) : __outl(v,p))
+#define __ioaddr(p) (__builtin_constant_p((p)) ? __ioaddr(p) : __ioaddrc(p))
+
/*
* Translated address IO functions
*
#include <asm/arch/memory.h>
#ifdef CONFIG_HOST_FOOTBRIDGE
-/* Virtual Physical
- * 0xfff00000 0x40000000 X-Bus
- * 0xff000000 0x7c000000 PCI I/O space
+/* Virtual Physical Size
+ * 0xff800000 0x40000000 1MB X-Bus
+ * 0xff000000 0x7c000000 1MB PCI I/O space
*
- * 0xfe000000 0x42000000 CSR
- * 0xfd000000 0x78000000 Outbound write flush
- * 0xfc000000 0x79000000 PCI IACK/special space
+ * 0xfe000000 0x42000000 1MB CSR
+ * 0xfd000000 0x78000000 1MB Outbound write flush (not supported)
+ * 0xfc000000 0x79000000 1MB PCI IACK/special space
*
- * 0xf9000000 0x7a000000 PCI Config type 1
- * 0xf8000000 0x7b000000 PCI Config type 0
- *
+ * 0xfb000000 0x7a000000 16MB PCI Config type 1
+ * 0xfa000000 0x7b000000 16MB PCI Config type 0
+ *
+ * 0xf9000000 0x50000000 1MB Cache flush
+ * 0xf8000000 0x41000000 16MB Flash memory
+ *
+ * 0xe1000000 unmapped (to catch bad ISA/PCI)
+ *
+ * 0xe0000000 0x80000000 16MB ISA memory
*/
#define XBUS_SIZE 0x00100000
-#define XBUS_BASE 0xfff00000
+#define XBUS_BASE 0xff800000
#define PCIO_SIZE 0x00100000
#define PCIO_BASE 0xff000000
-#define ARMCSR_SIZE 0x01000000
+#define ARMCSR_SIZE 0x00100000
#define ARMCSR_BASE 0xfe000000
-#define WFLUSH_SIZE 0x01000000
+#define WFLUSH_SIZE 0x00100000
#define WFLUSH_BASE 0xfd000000
-#define PCIIACK_SIZE 0x01000000
+#define PCIIACK_SIZE 0x00100000
#define PCIIACK_BASE 0xfc000000
#define PCICFG1_SIZE 0x01000000
-#define PCICFG1_BASE 0xf9000000
+#define PCICFG1_BASE 0xfb000000
#define PCICFG0_SIZE 0x01000000
-#define PCICFG0_BASE 0xf8000000
-
-#define PCIMEM_SIZE 0x18000000
-#define PCIMEM_BASE 0xe0000000
+#define PCICFG0_BASE 0xfa000000
#define FLUSH_SIZE 0x00100000
-#define FLUSH_BASE 0xdf000000
+#define FLUSH_BASE 0xf9000000
-#define FLASH_SIZE 0x00400000
-#define FLASH_BASE 0xd8000000
+#define FLASH_SIZE 0x01000000
+#define FLASH_BASE 0xf8000000
+
+#define PCIMEM_SIZE 0x01000000
+#define PCIMEM_BASE 0xe0000000
#elif defined(CONFIG_ARCH_CO285)
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-#include <asm/dec21285.h>
+#define IO_SPACE_LIMIT 0xffff
/*
- * This architecture does not require any delayed IO, and
- * has the constant-optimised IO
+ * Translation of various region addresses to virtual addresses
*/
-#undef ARCH_IO_DELAY
-#define ARCH_READWRITE
+#define __io_pci(a) (PCIO_BASE + (a))
+#if 0
+#define __mem_pci(a) ((unsigned long)(a))
+#define __mem_isa(a) (PCIMEM_BASE + (unsigned long)(a))
+#else
-#define __pci_io_addr(x) (PCIO_BASE + (unsigned int)(x))
+extern __inline__ unsigned long ___mem_pci(unsigned long a)
+{
+ if (a <= 0xc0000000 || a >= 0xe0000000)
+ BUG();
+ return a;
+}
-#define __inb(p) (*(volatile unsigned char *)__pci_io_addr(p))
-#define __inl(p) (*(volatile unsigned long *)__pci_io_addr(p))
+extern __inline__ unsigned long ___mem_isa(unsigned long a)
+{
+ if (a >= 16*1048576)
+ BUG();
+ return PCIMEM_BASE + a;
+}
+#define __mem_pci(a) ___mem_pci((unsigned long)(a))
+#define __mem_isa(a) ___mem_isa((unsigned long)(a))
+#endif
+
+/* the following macro is depreciated */
+#define __ioaddr(p) __io_pci(p)
-extern __inline__ unsigned int __inw(unsigned int port)
+/*
+ * Generic virtual read/write
+ */
+#define __arch_getb(a) (*(volatile unsigned char *)(a))
+#define __arch_getl(a) (*(volatile unsigned long *)(a))
+
+extern __inline__ unsigned int __arch_getw(unsigned long a)
{
unsigned int value;
- __asm__ __volatile__(
- "ldr%?h %0, [%1, %2] @ inw"
- : "=&r" (value)
- : "r" (PCIO_BASE), "r" (port));
+ __asm__ __volatile__("ldr%?h %0, [%1, #0] @ getw"
+ : "=&r" (value)
+ : "r" (a));
return value;
}
-#define __outb(v,p) (*(volatile unsigned char *)__pci_io_addr(p) = (v))
-#define __outl(v,p) (*(volatile unsigned long *)__pci_io_addr(p) = (v))
+#define __arch_putb(v,a) (*(volatile unsigned char *)(a) = (v))
+#define __arch_putl(v,a) (*(volatile unsigned long *)(a) = (v))
-extern __inline__ void __outw(unsigned int value, unsigned int port)
+extern __inline__ void __arch_putw(unsigned int value, unsigned long a)
{
- __asm__ __volatile__(
- "str%?h %0, [%1, %2] @ outw"
- : : "r" (value), "r" (PCIO_BASE), "r" (port));
+ __asm__ __volatile__("str%?h %0, [%1, #0] @ putw"
+ : : "r" (value), "r" (a));
}
-#define __ioaddr(p) __pci_io_addr(p)
+#define inb(p) __arch_getb(__io_pci(p))
+#define inw(p) __arch_getw(__io_pci(p))
+#define inl(p) __arch_getl(__io_pci(p))
+
+#define outb(v,p) __arch_putb(v,__io_pci(p))
+#define outw(v,p) __arch_putw(v,__io_pci(p))
+#define outl(v,p) __arch_putl(v,__io_pci(p))
+
+#include <asm/dec21285.h>
/*
* ioremap support - validate a PCI memory address,
* and convert a PCI memory address to a physical
* address for the page tables.
*/
-#define valid_ioaddr(iomem,size) ((iomem) < 0x80000000 && (iomem) + (size) <= 0x80000000)
-#define io_to_phys(iomem) ((iomem) + DC21285_PCI_MEM)
-
-/*
- * Fudge up IO addresses by this much. Once we're confident that nobody
- * is using read*() and so on with addresses they didn't get from ioremap
- * this can go away.
- */
-#define IO_FUDGE_FACTOR PCIMEM_BASE
-
-#define __pci_mem_addr(x) ((void *)(IO_FUDGE_FACTOR + (unsigned long)(x)))
+#define valid_ioaddr(off,sz) ((off) < 0x80000000 && (off) + (sz) <= 0x80000000)
+#define io_to_phys(off) ((off) + DC21285_PCI_MEM)
/*
* ioremap takes a PCI memory address, as specified in
* linux/Documentation/IO-mapping.txt
*/
-#define ioremap(iomem_addr,size) \
-({ \
- unsigned long _addr = (iomem_addr), _size = (size); \
- void *_ret = NULL; \
- if (valid_ioaddr(_addr, _size)) { \
- _addr = io_to_phys(_addr); \
- _ret = __ioremap(_addr, _size, 0); \
- if (_ret) \
- _ret = (void *)((int) _ret - IO_FUDGE_FACTOR); \
- } \
- _ret; })
-
-#define ioremap_nocache(iomem_addr,size) ioremap((iomem_addr),(size))
-
-#define iounmap(_addr) do { __iounmap(__pci_mem_addr((_addr))); } while (0)
-
-#define readb(addr) (*(volatile unsigned char *)__pci_mem_addr(addr))
-#define readw(addr) (*(volatile unsigned short *)__pci_mem_addr(addr))
-#define readl(addr) (*(volatile unsigned long *)__pci_mem_addr(addr))
-
-#define writeb(b,addr) (*(volatile unsigned char *)__pci_mem_addr(addr) = (b))
-#define writew(b,addr) (*(volatile unsigned short *)__pci_mem_addr(addr) = (b))
-#define writel(b,addr) (*(volatile unsigned long *)__pci_mem_addr(addr) = (b))
-
-#define memset_io(a,b,c) memset(__pci_mem_addr(a),(b),(c))
-#define memcpy_fromio(a,b,c) memcpy((a),__pci_mem_addr(b),(c))
-#define memcpy_toio(a,b,c) memcpy(__pci_mem_addr(a),(b),(c))
-
-#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__pci_mem_addr(b),(c),(d))
+#define __arch_ioremap(off,size,nocache) \
+({ \
+ unsigned long _off = (off), _size = (size); \
+ void *_ret = NULL; \
+ if (valid_ioaddr(_off, _size)) \
+ _ret = __ioremap(io_to_phys(_off), _size, 0); \
+ _ret; \
+})
#endif
/*
- * linux/include/asm-arm/arch-ebsa110/io.h
+ * linux/include/asm-arm/arch-nexuspci/io.h
*
* Copyright (C) 1997,1998 Russell King
*
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-/*
- * This architecture does not require any delayed IO, and
- * has the constant-optimised IO
- */
-#undef ARCH_IO_DELAY
+#define IO_SPACE_LIMIT 0xffffffff
/*
* Dynamic IO functions - let the compiler
addr; \
})
+#define inb(p) (__builtin_constant_p((p)) ? __inbc(p) : __inb(p))
+#define inw(p) (__builtin_constant_p((p)) ? __inwc(p) : __inw(p))
+#define inl(p) (__builtin_constant_p((p)) ? __inlc(p) : __inl(p))
+#define outb(v,p) (__builtin_constant_p((p)) ? __outbc(v,p) : __outb(v,p))
+#define outw(v,p) (__builtin_constant_p((p)) ? __outwc(v,p) : __outw(v,p))
+#define outl(v,p) (__builtin_constant_p((p)) ? __outlc(v,p) : __outl(v,p))
+#define __ioaddr(p) (__builtin_constant_p((p)) ? __ioaddr(p) : __ioaddrc(p))
+
/*
* Translated address IO functions
*
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-/*
- * This architecture does not require any delayed IO, and
- * has the constant-optimised IO
- */
-#undef ARCH_IO_DELAY
+#define IO_SPACE_LIMIT 0xffffffff
/*
* We use two different types of addressing - PC style addresses, and ARM
#define __PORT_PCIO(x) (!((x) & 0x80000000))
/*
- * Dynamic IO functions - let the compiler
- * optimize the expressions
+ * Dynamic IO functions.
*/
extern __inline__ void __outb (unsigned int value, unsigned int port)
{
#define __ioaddrc(port) \
(__PORT_PCIO((port)) ? PCIO_BASE + ((port) << 2) : IO_BASE + ((port) << 2))
+#define inb(p) (__builtin_constant_p((p)) ? __inbc(p) : __inb(p))
+#define inw(p) (__builtin_constant_p((p)) ? __inwc(p) : __inw(p))
+#define inl(p) (__builtin_constant_p((p)) ? __inlc(p) : __inl(p))
+#define outb(v,p) (__builtin_constant_p((p)) ? __outbc(v,p) : __outb(v,p))
+#define outw(v,p) (__builtin_constant_p((p)) ? __outwc(v,p) : __outw(v,p))
+#define outl(v,p) (__builtin_constant_p((p)) ? __outlc(v,p) : __outl(v,p))
+#define __ioaddr(p) (__builtin_constant_p((p)) ? __ioaddr(p) : __ioaddrc(p))
+
/*
* Translated address IO functions
*
/*
- * linux/include/asm-arm/arch-ebsa285/io.h
+ * linux/include/asm-arm/arch-sa1100/io.h
*
* Copyright (C) 1997-1999 Russell King
*
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-/*
- * This architecture does not require any delayed IO
- */
-#undef ARCH_IO_DELAY
+#define IO_SPACE_LIMIT 0xffffffff
+
+#define __io_pci(a) (PCIO_BASE + (a))
-#define __pci_io_addr(x) (PCIO_BASE + (unsigned int)(x))
+#define __ioaddr(p) __io_pci(p)
-#define __inb(p) (*(volatile unsigned char *)__pci_io_addr(p))
-#define __inl(p) (*(volatile unsigned long *)__pci_io_addr(p))
+/*
+ * Generic virtual read/write
+ */
+#define __arch_getb(a) (*(volatile unsigned char *)(a))
+#define __arch_getl(a) (*(volatile unsigned long *)(a))
-extern __inline__ unsigned int __inw(unsigned int port)
+extern __inline__ unsigned int __arch_getw(unsigned long a)
{
unsigned int value;
- __asm__ __volatile__(
- "ldr%?h %0, [%1, %2] @ inw"
- : "=&r" (value)
- : "r" (PCIO_BASE), "r" (port));
+ __asm__ __volatile__("ldr%?h %0, [%1, #0] @ getw"
+ : "=&r" (value)
+ : "r" (a));
return value;
}
-#define __outb(v,p) (*(volatile unsigned char *)__pci_io_addr(p) = (v))
-#define __outl(v,p) (*(volatile unsigned long *)__pci_io_addr(p) = (v))
+#define __arch_putb(v,a) (*(volatile unsigned char *)(a) = (v))
+#define __arch_putl(v,a) (*(volatile unsigned long *)(a) = (v))
-extern __inline__ void __outw(unsigned int value, unsigned int port)
+extern __inline__ void __arch_putw(unsigned int value, unsigned long a)
{
- __asm__ __volatile__(
- "str%?h %0, [%1, %2] @ outw"
- : : "r" (value), "r" (PCIO_BASE), "r" (port));
+ __asm__ __volatile__("str%?h %0, [%1, #0] @ putw"
+ : : "r" (value), "r" (a));
}
-#define __ioaddr(p) __pci_io_addr(p)
+#define inb(p) __arch_getb(__io_pci(p))
+#define inw(p) __arch_getw(__io_pci(p))
+#define inl(p) __arch_getl(__io_pci(p))
+
+#define outb(v,p) __arch_putb(v,__io_pci(p))
+#define outw(v,p) __arch_putw(v,__io_pci(p))
+#define outl(v,p) __arch_putl(v,__io_pci(p))
#endif
/*
* linux/include/asm-arm/io.h
*
- * Copyright (C) 1996 Russell King
+ * Copyright (C) 1996-1999 Russell King
*
* Modifications:
* 16-Sep-1996 RMK Inlined the inx/outx functions & optimised for both
* specific IO header files.
* 27-Mar-1999 PJB Second parameter of memcpy_toio is const..
* 04-Apr-1999 PJB Added check_signature.
+ * 12-Dec-1999 RMK More cleanups
*/
#ifndef __ASM_ARM_IO_H
#define __ASM_ARM_IO_H
+#include <asm/arch/hardware.h>
+#include <asm/arch/io.h>
+#include <asm/proc/io.h>
+
+#define outb_p(val,port) outb((val),(port))
+#define outw_p(val,port) outw((val),(port))
+#define outl_p(val,port) outl((val),(port))
+#define inb_p(port) inb((port))
+#define inw_p(port) inw((port))
+#define inl_p(port) inl((port))
+
+extern void outsb(unsigned int port, const void *from, int len);
+extern void outsw(unsigned int port, const void *from, int len);
+extern void outsl(unsigned int port, const void *from, int len);
+extern void insb(unsigned int port, void *from, int len);
+extern void insw(unsigned int port, void *from, int len);
+extern void insl(unsigned int port, void *from, int len);
+
+#define outsb_p(port,from,len) outsb(port,from,len)
+#define outsw_p(port,from,len) outsw(port,from,len)
+#define outsl_p(port,from,len) outsl(port,from,len)
+#define insb_p(port,to,len) insb(port,to,len)
+#define insw_p(port,to,len) insw(port,to,len)
+#define insl_p(port,to,len) insl(port,to,len)
+
#ifdef __KERNEL__
#ifndef NULL
#define NULL ((void *) 0)
#endif
-extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-extern void __iounmap(void *addr);
-
-#endif
-
-#include <asm/arch/hardware.h>
#include <asm/arch/memory.h>
-#include <asm/arch/io.h>
-#include <asm/proc/io.h>
-/* unsigned long virt_to_phys(void *x) */
-#define virt_to_phys(x) (__virt_to_phys((unsigned long)(x)))
-
-/* void *phys_to_virt(unsigned long x) */
-#define phys_to_virt(x) ((void *)(__phys_to_virt((unsigned long)(x))))
+extern __inline__ unsigned long virt_to_phys(volatile void *x)
+{
+ return __virt_to_phys((unsigned long)(x));
+}
-/*
- * Virtual view <-> DMA view memory address translations
- * virt_to_bus: Used to translate the virtual address to an
- * address suitable to be passed to set_dma_addr
- * bus_to_virt: Used to convert an address for DMA operations
- * to an address that the kernel can use.
- */
-#define virt_to_bus(x) (__virt_to_bus((unsigned long)(x)))
-#define bus_to_virt(x) ((void *)(__bus_to_virt((unsigned long)(x))))
+extern __inline__ void *phys_to_virt(unsigned long x)
+{
+ return (void *)(__phys_to_virt((unsigned long)(x)));
+}
/*
- * These macros actually build the multi-value IO function prototypes
+ * Virtual <-> DMA view memory address translations
*/
-#define __OUTS(s,i,x) extern void outs##s(unsigned int port, const void *from, int len);
-#define __INS(s,i,x) extern void ins##s(unsigned int port, void *to, int len);
+#define virt_to_bus(x) (__virt_to_bus((unsigned long)(x)))
+#define bus_to_virt(x) ((void *)(__bus_to_virt((unsigned long)(x))))
-#define __IO(s,i,x) \
- __OUTS(s,i,x) \
- __INS(s,i,x)
-
-__IO(b,"b",char)
-__IO(w,"h",short)
-__IO(l,"",long)
+/* the following macro is depreciated */
+#define ioaddr(port) __ioaddr((port))
/*
- * Note that due to the way __builtin_constant_t() works, you
- * - can't use it inside an inline function (it will never be true)
- * - you don't have to worry about side effects withing the __builtin..
+ * ioremap and friends
*/
-#ifdef __outbc
-#define outb(val,port) \
- (__builtin_constant_p((port)) ? __outbc((val),(port)) : __outb((val),(port)))
-#else
-#define outb(val,port) __outb((val),(port))
-#endif
-
-#ifdef __outwc
-#define outw(val,port) \
- (__builtin_constant_p((port)) ? __outwc((val),(port)) : __outw((val),(port)))
-#else
-#define outw(val,port) __outw((val),(port))
-#endif
-
-#ifdef __outlc
-#define outl(val,port) \
- (__builtin_constant_p((port)) ? __outlc((val),(port)) : __outl((val),(port)))
-#else
-#define outl(val,port) __outl((val),(port))
-#endif
-
-#ifdef __inbc
-#define inb(port) \
- (__builtin_constant_p((port)) ? __inbc((port)) : __inb((port)))
-#else
-#define inb(port) __inb((port))
-#endif
-
-#ifdef __inwc
-#define inw(port) \
- (__builtin_constant_p((port)) ? __inwc((port)) : __inw((port)))
-#else
-#define inw(port) __inw((port))
-#endif
+extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+extern void __iounmap(void *addr);
-#ifdef __inlc
-#define inl(port) \
- (__builtin_constant_p((port)) ? __inlc((port)) : __inl((port)))
-#else
-#define inl(port) __inl((port))
-#endif
+#define ioremap(off,sz) __arch_ioremap((off),(sz),0)
+#define ioremap_nocache(off,sz) __arch_ioremap((off),(sz),1)
+#define iounmap(_addr) __iounmap(_addr)
-/*
- * This macro will give you the translated IO address for this particular
- * architecture, which can be used with the out_t... functions.
- */
-#ifdef __ioaddrc
-#define ioaddr(port) \
- (__builtin_constant_p((port)) ? __ioaddrc((port)) : __ioaddr((port)))
-#else
-#define ioaddr(port) __ioaddr((port))
-#endif
+extern void __readwrite_bug(const char *fn);
-#ifndef ARCH_IO_DELAY
/*
- * This architecture does not require any delayed IO.
- * It is handled in the hardware.
+ * String version of IO memory access ops:
*/
-#define outb_p(val,port) outb((val),(port))
-#define outw_p(val,port) outw((val),(port))
-#define outl_p(val,port) outl((val),(port))
-#define inb_p(port) inb((port))
-#define inw_p(port) inw((port))
-#define inl_p(port) inl((port))
-#define outsb_p(port,from,len) outsb(port,from,len)
-#define outsw_p(port,from,len) outsw(port,from,len)
-#define outsl_p(port,from,len) outsl(port,from,len)
-#define insb_p(port,to,len) insb(port,to,len)
-#define insw_p(port,to,len) insw(port,to,len)
-#define insl_p(port,to,len) insl(port,to,len)
-
-#else
+extern void _memcpy_fromio(void *, unsigned long, unsigned long);
+extern void _memcpy_toio(unsigned long, const void *, unsigned long);
+extern void _memset_io(unsigned long, int, unsigned long);
/*
- * We have to delay the IO...
+ * If this architecture has PCI memory IO, then define the read/write
+ * macros.
*/
-#ifdef __outbc_p
-#define outb_p(val,port) \
- (__builtin_constant_p((port)) ? __outbc_p((val),(port)) : __outb_p((val),(port)))
-#else
-#define outb_p(val,port) __outb_p((val),(port))
-#endif
-
-#ifdef __outwc_p
-#define outw_p(val,port) \
- (__builtin_constant_p((port)) ? __outwc_p((val),(port)) : __outw_p((val),(port)))
-#else
-#define outw_p(val,port) __outw_p((val),(port))
-#endif
-
-#ifdef __outlc_p
-#define outl_p(val,port) \
- (__builtin_constant_p((port)) ? __outlc_p((val),(port)) : __outl_p((val),(port)))
-#else
-#define outl_p(val,port) __outl_p((val),(port))
-#endif
-
-#ifdef __inbc_p
-#define inb_p(port) \
- (__builtin_constant_p((port)) ? __inbc_p((port)) : __inb_p((port)))
-#else
-#define inb_p(port) __inb_p((port))
-#endif
-
-#ifdef __inwc_p
-#define inw_p(port) \
- (__builtin_constant_p((port)) ? __inwc_p((port)) : __inw_p((port)))
-#else
-#define inw_p(port) __inw_p((port))
-#endif
+#ifdef __mem_pci
-#ifdef __inlc_p
-#define inl_p(port) \
- (__builtin_constant_p((port)) ? __inlc_p((port)) : __inl_p((port)))
-#else
-#define inl_p(port) __inl_p((port))
-#endif
+#define readb(addr) __arch_getb(__mem_pci(addr))
+#define readw(addr) __arch_getw(__mem_pci(addr))
+#define readl(addr) __arch_getl(__mem_pci(addr))
+#define writeb(val,addr) __arch_putb(val,__mem_pci(addr))
+#define writew(val,addr) __arch_putw(val,__mem_pci(addr))
+#define writel(val,addr) __arch_putl(val,__mem_pci(addr))
-#endif
+#define memset_io(a,b,c) _memset_io(__mem_pci(a),(b),(c))
+#define memcpy_fromio(a,b,c) _memcpy_fromio((a),__mem_pci(b),(c))
+#define memcpy_toio(a,b,c) _memcpy_toio(__mem_pci(a),(b),(c))
-extern void __readwrite_bug(const char *fn);
+#define eth_io_copy_and_sum(a,b,c,d) \
+ eth_copy_and_sum((a),__mem_pci(b),(c),(d))
-#ifndef ARCH_READWRITE
+static inline int
+check_signature(unsigned long io_addr, const unsigned char *signature,
+ int length)
+{
+ int retval = 0;
+ do {
+ if (readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+ retval = 1;
+out:
+ return retval;
+}
-#define readb(p) (__readwrite_bug("readb"),0)
-#define readw(p) (__readwrite_bug("readw"),0)
-#define readl(p) (__readwrite_bug("readl"),0)
-#define writeb(v,p) __readwrite_bug("writeb")
-#define writew(v,p) __readwrite_bug("writew")
-#define writel(v,p) __readwrite_bug("writel")
+#else /* __mem_pci */
-#endif
+#define readb(addr) (__readwrite_bug("readb"),0)
+#define readw(addr) (__readwrite_bug("readw"),0)
+#define readl(addr) (__readwrite_bug("readl"),0)
+#define writeb(v,addr) __readwrite_bug("writeb")
+#define writew(v,addr) __readwrite_bug("writew")
+#define writel(v,addr) __readwrite_bug("writel")
-#ifndef memcpy_fromio
-/*
- * String version of IO memory access ops:
- */
-extern void _memcpy_fromio(void *, unsigned long, unsigned long);
-extern void _memcpy_toio(unsigned long, const void *, unsigned long);
-extern void _memset_io(unsigned long, int, unsigned long);
+#define eth_io_copy_and_sum(a,b,c,d) __readwrite_bug("eth_io_copy_and_sum")
-#define memcpy_fromio(to,from,len) _memcpy_fromio((to),(unsigned long)(from),(len))
-#define memcpy_toio(to,from,len) _memcpy_toio((unsigned long)(to),(from),(len))
-#define memset_io(addr,c,len) _memset_io((unsigned long)(addr),(c),(len))
-#endif
+#define check_signature(io,sig,len) (0)
-#define IO_SPACE_LIMIT 0xffff
+#endif /* __mem_pci */
/*
- * This isn't especially architecture dependent so it seems like it
- * might as well go here as anywhere.
+ * If this architecture has ISA IO, then define the isa_read/isa_write
+ * macros.
*/
-static inline int check_signature(unsigned long io_addr,
- const unsigned char *signature, int length)
+#ifdef __mem_isa
+
+#define isa_readb(addr) __arch_getb(__mem_isa(addr))
+#define isa_readw(addr) __arch_getw(__mem_isa(addr))
+#define isa_readl(addr) __arch_getl(__mem_isa(addr))
+#define isa_writeb(val,addr) __arch_putb(val,__mem_isa(addr))
+#define isa_writew(val,addr) __arch_putw(val,__mem_isa(addr))
+#define isa_writel(val,addr) __arch_putl(val,__mem_isa(addr))
+#define isa_memset_io(a,b,c) _memset_io(__mem_isa(a),(b),(c))
+#define isa_memcpy_fromio(a,b,c) _memcpy_fromio((a),__mem_isa((b)),(c))
+#define isa_memcpy_toio(a,b,c) _memcpy_toio(__mem_isa((a)),(b),(c))
+
+#define isa_eth_io_copy_and_sum(a,b,c,d) \
+ eth_copy_and_sum((a),__mem_isa((b),(c),(d))
+
+static inline int
+isa_check_signature(unsigned long io_addr, const unsigned char *signature,
+ int length)
{
int retval = 0;
do {
- if (readb(io_addr) != *signature)
+ if (isa_readb(io_addr) != *signature)
goto out;
io_addr++;
signature++;
return retval;
}
-#undef ARCH_READWRITE
-#undef ARCH_IO_DELAY
-#undef ARCH_IO_CONSTANT
+#else /* __mem_isa */
-#endif
+#define isa_readb(addr) (__readwrite_bug("isa_readb"),0)
+#define isa_readw(addr) (__readwrite_bug("isa_readw"),0)
+#define isa_readl(addr) (__readwrite_bug("isa_readl"),0)
+#define isa_writeb(val,addr) __readwrite_bug("isa_writeb")
+#define isa_writew(val,addr) __readwrite_bug("isa_writew")
+#define isa_writel(val,addr) __readwrite_bug("isa_writel")
+#define isa_memset_io(a,b,c) __readwrite_bug("isa_memset_io")
+#define isa_memcpy_fromio(a,b,c) __readwrite_bug("isa_memcpy_fromio")
+#define isa_memcpy_toio(a,b,c) __readwrite_bug("isa_memcpy_toio")
+
+#define isa_eth_io_copy_and_sum(a,b,c,d) \
+ __readwrite_bug("isa_eth_io_copy_and_sum")
+
+#define isa_check_signature(io,sig,len) (0)
+#endif /* __mem_isa */
+#endif /* __KERNEL__ */
+#endif /* __ASM_ARM_IO_H */
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-struct page *empty_zero_page;
+extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page)
/*
#define VGA_MAP_MEM(x) (PCIMEM_BASE + (x))
-#define vga_readb(x) (*(x))
-#define vga_writeb(x,y) (*(y) = (x))
+#define vga_readb(x) (*((volatile unsigned char *)x))
+#define vga_writeb(x,y) (*((volatile unsigned char *)y) = (x))
#endif
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
-#define F_GETLK64 12 /* using 'struct flock64' */
-#define F_SETLK64 13
-#define F_SETLKW64 14
-
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
pid_t l_pid;
};
-struct flock64 {
- short l_type;
- short l_whence;
- loff_t l_start;
- loff_t l_len;
- pid_t l_pid;
-};
-
#endif
#include <asm/fixmap.h>
#include <linux/threads.h>
-extern unsigned long *pgd_quicklist;
+#define pgd_quicklist (current_cpu_data.pgd_quick)
#define pmd_quicklist (current_cpu_data.pmd_quick)
#define pte_quicklist (current_cpu_data.pte_quick)
#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
#else
memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
#endif
+ memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return ret;
}
-extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
-{
- memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-}
-
extern __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
- }
+ } else
+ ret = (unsigned long *)get_pgd_slow();
return (pgd_t *)ret;
}
#define pte_free_kernel(pte) free_pte_slow(pte)
#define pte_free(pte) free_pte_slow(pte)
-#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_free(pgd) free_pgd_slow(pgd)
+#define pgd_alloc() get_pgd_fast()
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
+ struct task_struct * p;
pgd_t *pgd;
-
- mmlist_access_lock();
- mmlist_set_pgdir(address, entry);
- for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned
- long *)pgd)
+#ifdef __SMP__
+ int i;
+#endif
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
+#ifndef __SMP__
+ for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
- mmlist_access_unlock();
+#else
+ /* To pgd_alloc/pgd_free, one holds master kernel lock and so does our callee, so we can
+ modify pgd caches of other CPUs as well. -jj */
+ for (i = 0; i < NR_CPUS; i++)
+ for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+ pgd[address >> PGDIR_SHIFT] = entry;
+#endif
}
/*
int f00f_bug;
int coma_bug;
unsigned long loops_per_sec;
+ unsigned long *pgd_quick;
unsigned long *pmd_quick;
unsigned long *pte_quick;
unsigned long pgtable_cache_sz;
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
-#define F_GETLK64 12 /* using 'struct flock64' */
-#define F_SETLK64 13
-#define F_SETLKW64 14
-
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
pid_t l_pid;
};
-struct flock64 {
- short l_type;
- short l_whence;
- loff_t l_start;
- loff_t l_len;
- pid_t l_pid;
-};
-
#endif /* _M68K_FCNTL_H */
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
-#define F_GETLK64 12 /* using 'struct flock64' */
-#define F_SETLK64 13
-#define F_SETLKW64 14
-
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
long pad[4]; /* ZZZZZZZZZZZZZZZZZZZZZZZZZZ */
} flock_t;
-typedef struct flock64 {
- short l_type;
- short l_whence;
- loff_t l_start;
- loff_t l_len;
- pid_t l_pid;
-} flock64_t;
-
#endif /* __ASM_MIPS_FCNTL_H */
extern __inline__ pgd_t *get_pgd_slow(void)
{
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
- if (ret)
+ if (ret) {
+ init = pgd_offset(&init_mm, 0);
pgd_init((unsigned long)ret);
- return ret;
-}
-
-extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
-{
- pgd_t *init;
-
- init = pgd_offset(&init_mm, 0);
- memcpy (pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return ret;
}
extern __inline__ pgd_t *get_pgd_fast(void)
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
- }
+ } else
+ ret = (unsigned long *)get_pgd_slow();
return (pgd_t *)ret;
}
#define pte_free_kernel(pte) free_pte_fast(pte)
#define pte_free(pte) free_pte_fast(pte)
#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_alloc() get_pgd_fast()
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
+ struct task_struct * p;
pgd_t *pgd;
#ifdef __SMP__
int i;
#endif
-
- mmlist_access_lock();
- mmlist_set_pgdir(address, entry);
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
#ifndef __SMP__
for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
#endif
- mmlist_access_unlock();
}
extern pgd_t swapper_pg_dir[1024];
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
-#define F_GETLK64 12 /* using 'struct flock64' */
-#define F_SETLK64 13
-#define F_SETLKW64 14
-
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
pid_t l_pid;
};
-struct flock64 {
- short l_type;
- short l_whence;
- loff_t l_start;
- loff_t l_len;
- pid_t l_pid;
-};
-
#endif
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
+ struct task_struct * p;
pgd_t *pgd;
#ifdef __SMP__
int i;
#endif
-
- mmlist_access_lock();
- mmlist_set_pgdir(address, entry);
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
#ifndef __SMP__
for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
for (pgd = (pgd_t *)cpu_data[i].pgd_cache; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
#endif
- mmlist_access_unlock();
}
/* We don't use pmd cache, so this is a dummy routine */
extern __inline__ pgd_t *get_pgd_slow(void)
{
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
-
- if (ret)
+ pgd_t *ret, *init;
+ /*if ( (ret = (pgd_t *)get_zero_page_fast()) == NULL )*/
+ if ( (ret = (pgd_t *)__get_free_page(GFP_KERNEL)) != NULL )
memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- return ret;
-}
-
-extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
-{
- pgd_t *init;
-
- init = pgd_offset(&init_mm, 0);
- memcpy (pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ if (ret) {
+ init = pgd_offset(&init_mm, 0);
+ memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return ret;
}
extern __inline__ pgd_t *get_pgd_fast(void)
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
- }
+ } else
+ ret = (unsigned long *)get_pgd_slow();
return (pgd_t *)ret;
}
#define pte_free_kernel(pte) free_pte_fast(pte)
#define pte_free(pte) free_pte_fast(pte)
#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_alloc() get_pgd_fast()
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
{
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
-#define F_GETLK64 12 /* for LFS */
-#define F_SETLK64 13
-#define F_SETLKW64 14
-
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
pid_t l_pid;
};
-struct flock64 {
- short l_type;
- short l_whence;
- loff_t l_start;
- loff_t l_len;
- pid_t l_pid;
-};
-
#endif /* __ASM_SH_FCNTL_H */
{
pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
- if (ret)
+ if (ret) {
+ /* Clear User space */
memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- return ret;
-}
-extern __inline__ void get_pgd_uptodate(pgd_t *pgd)
-{
- /* XXX: Copy vmalloc-ed space??? */
- memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ /* XXX: Copy vmalloc-ed space??? */
+ memcpy(ret + USER_PTRS_PER_PGD,
+ swapper_pg_dir + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return ret;
}
extern __inline__ pgd_t *get_pgd_fast(void)
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
- }
+ } else
+ ret = (unsigned long *)get_pgd_slow();
return (pgd_t *)ret;
}
#define pte_free_kernel(pte) free_pte_slow(pte)
#define pte_free(pte) free_pte_slow(pte)
#define pgd_free(pgd) free_pgd_slow(pgd)
+#define pgd_alloc() get_pgd_fast()
extern __inline__ pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
+ struct task_struct * p;
pgd_t *pgd;
- mmlist_access_lock();
- mmlist_set_pgdir(address, entry);
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
- mmlist_access_unlock();
}
extern pgd_t swapper_pg_dir[1024];
__kernel_pid_t32 l_pid;
short __unused;
};
-
-#define flock64 flock
#endif
#endif /* !(_SPARC64_FCNTL_H) */
pgd_quicklist = (unsigned long *)ret->next_hash;
ret = (struct page *)(page_address(ret) + off);
pgd_cache_size--;
+ } else {
+ ret = (struct page *) __get_free_page(GFP_KERNEL);
+ if(ret) {
+ struct page *page = mem_map + MAP_NR(ret);
+
+ memset(ret, 0, PAGE_SIZE);
+ (unsigned long)page->pprev_hash = 2;
+ (unsigned long *)page->next_hash = pgd_quicklist;
+ pgd_quicklist = (unsigned long *)page;
+ pgd_cache_size++;
+ }
}
return (pgd_t *)ret;
}
-extern __inline__ pgd_t *get_pgd_slow(void)
-{
- pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL);
-
- if(ret)
- memset(ret, 0, PAGE_SIZE);
- return (pgd_t *)ret;
-}
-
-extern __inline__ pgd_t *get_pgd_uptodate(pgd_t *pgd)
-{
- struct page *page = mem_map + MAP_NR(pgd);
-
- (unsigned long)page->pprev_hash = 2;
- (unsigned long *)page->next_hash = pgd_quicklist;
- pgd_quicklist = (unsigned long *)page;
- pgd_cache_size++;
-}
#else /* __SMP__ */
extern __inline__ void free_pgd_fast(pgd_t *pgd)
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
+ } else {
+ ret = (unsigned long *) __get_free_page(GFP_KERNEL);
+ if(ret)
+ memset(ret, 0, PAGE_SIZE);
}
return (pgd_t *)ret;
}
-extern __inline__ pgd_t *get_pgd_slow(void)
-{
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
-
- if(ret)
- memset(ret, 0, PAGE_SIZE);
- return(ret);
-}
-
-extern __inline__ pgd_t *get_pgd_uptodate(pgd_t *pgd)
-{
-}
-
extern __inline__ void free_pgd_slow(pgd_t *pgd)
{
free_page((unsigned long)pgd);
#define pmd_free_kernel(pmd) free_pmd_fast(pmd)
#define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_alloc() get_pgd_fast()
extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
{
struct file *fl_file;
unsigned char fl_flags;
unsigned char fl_type;
- loff_t fl_start;
- loff_t fl_end;
+ off_t fl_start;
+ off_t fl_end;
void (*fl_notify)(struct file_lock *); /* unblock callback */
extern int fcntl_getlk(unsigned int, struct flock *);
extern int fcntl_setlk(unsigned int, unsigned int, struct flock *);
-extern int fcntl_getlk64(unsigned int fd, struct flock64 *l);
-extern int fcntl_setlk64(unsigned int fd, unsigned int cmd, struct flock64 *l);
-
/* fs/locks.c */
extern void locks_remove_posix(struct file *, fl_owner_t);
extern void locks_remove_flock(struct file *);
#define I2C_BUSID_PARPORT 2 /* Bit banging on a parallel port */
#define I2C_BUSID_BUZ 3
#define I2C_BUSID_ZORAN 4
+#define I2C_BUSID_CYBER2000 5 /* I2C bus on a Cyber2000 */
/*
* struct for a driver for a i2c chip (tuner, soundprocessor,
#define vmlist_modify_lock(mm) vmlist_access_lock(mm)
#define vmlist_modify_unlock(mm) vmlist_access_unlock(mm)
-extern spinlock_t mm_lock;
-#define mmlist_access_lock() spin_lock(&mm_lock)
-#define mmlist_access_unlock() spin_unlock(&mm_lock)
-#define mmlist_modify_lock() mmlist_access_lock()
-#define mmlist_modify_unlock() mmlist_access_unlock()
-
-#define for_each_mm(mm) \
- for (mm = list_entry(init_mm.mmlist.next, struct mm_struct, mmlist); \
- (mm != &init_mm); \
- (mm = list_entry(mm->mmlist.next, struct mm_struct, mmlist)))
-
-static inline void mmlist_set_pgdir(unsigned long address, pgd_t entry)
-{
- struct mm_struct *mm;
-
- for_each_mm(mm)
- *pgd_offset(mm,address) = entry;
-}
-
#endif /* __KERNEL__ */
#endif
unsigned long cpu_vm_mask;
unsigned long swap_cnt; /* number of pages to swap on next pass */
unsigned long swap_address;
- struct list_head mmlist; /* active mm list */
/*
* This is an architecture-specific pointer: the portable
* part of Linux does not know about any segments.
0, 0, 0, \
0, 0, 0, 0, \
0, 0, 0, \
- 0, 0, 0, 0, \
- LIST_HEAD_INIT(init_mm.mmlist), NULL }
+ 0, 0, 0, 0, NULL }
struct signal_struct {
atomic_t count;
/* SLAB cache for mm_struct's. */
kmem_cache_t *mm_cachep;
-spinlock_t mm_lock = SPIN_LOCK_UNLOCKED;
/* SLAB cache for files structs */
kmem_cache_t *files_cachep;
atomic_set(&mm->mm_count, 1);
init_MUTEX(&mm->mmap_sem);
mm->page_table_lock = SPIN_LOCK_UNLOCKED;
- mmlist_modify_lock();
- if ((mm->pgd = get_pgd_fast())) {
- list_add_tail(&mm->mmlist, &init_mm.mmlist);
- mmlist_modify_unlock();
+ mm->pgd = pgd_alloc();
+ if (mm->pgd)
return mm;
- }
- mmlist_modify_unlock();
- if ((mm->pgd = get_pgd_slow())) {
- mmlist_modify_lock();
- get_pgd_uptodate(mm->pgd);
- list_add_tail(&mm->mmlist, &init_mm.mmlist);
- mmlist_modify_unlock();
- return mm;
- }
kmem_cache_free(mm_cachep, mm);
}
return NULL;
inline void __mmdrop(struct mm_struct *mm)
{
if (mm == &init_mm) BUG();
- mmlist_modify_lock();
pgd_free(mm->pgd);
destroy_context(mm);
- list_del(&mm->mmlist);
- mmlist_modify_unlock();
kmem_cache_free(mm_cachep, mm);
}
/*
- * linux/kernel/ktimer.c
+ * linux/kernel/timer.c
*
* Kernel internal timers, kernel timekeeping, basic process system calls
*
* Alan Cox : Datagram iovec handling
* Darryl Miles : Fixed non-blocking SOCK_STREAM.
* Alan Cox : POSIXisms
+ * Pete Wyckoff : Unconnected accept() fix.
*
*/
/* Sequenced packets can come disconnected. If so we report the problem */
error = -ENOTCONN;
- if(connection_based(sk) && sk->state!=TCP_ESTABLISHED)
+ if(connection_based(sk) && !(sk->state==TCP_ESTABLISHED || sk->state==TCP_LISTEN))
goto out;
/* handle signals */