D: Initial Mac68K port
D: Video4Linux design, bw-qcam and PMS driver ports.
D: 2.1.x modular sound
-S: c/o I2IT Limited
-S: The Innovation Centre
-S: Singleton Park
-S: Swansea, SA2 8PP
-S: Wales, United Kingdom
+S: c/o Red Hat UK Ltd
+S: Alexandra House
+S: Alexandra Terrace
+S: Guildford, GU1 3DA
+S: United Kingdom
N: Laurence Culhane
E: loz@holmes.demon.co.uk
"What should you avoid when writing PCI drivers"
- by Martin Mares <mj@atrey.karlin.mff.cuni.cz> on 03-Nov-1999
+ by Martin Mares <mj@suse.cz> on 21-Nov-1999
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
See Documentation/IO-mapping.txt for how to access device memory.
+ You still need to call request_region() for I/O regions and request_mem_region()
+for memory regions to make sure nobody else is using the same device.
+
+ All interrupt handlers should be registered with SA_SHIRQ and use the devid
+to map IRQs to devices (remember that all PCI interrupts are shared).
+
5. Other interesting functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pci_find_slot() Find pci_dev corresponding to given bus and
PCI SUBSYSTEM
P: Martin Mares
-M: mj@atrey.karlin.mff.cuni.cz
+M: mj@suse.cz
L: linux-kernel@vger.rutgers.edu
S: Maintained
endif
# For TSUNAMI, we must have the assembler not emulate our instructions.
-# The same is true for POLARIS, and now PYXIS.
+# The same is true for IRONGATE, POLARIS, PYXIS.
# BWX is most important, but we don't really want any emulation ever.
+
ifeq ($(old_gas),y)
- ifneq ($(CONFIG_ALPHA_GENERIC)$(CONFIG_ALPHA_TSUNAMI)$(CONFIG_ALPHA_POLARIS)$(CONFIG_ALPHA_PYXIS),)
- # How do we do #error in make?
- CFLAGS := --error-please-upgrade-your-assembler
- endif
-else
- ifeq ($(CONFIG_ALPHA_GENERIC),y)
- CFLAGS := $(CFLAGS) -Wa,-mev6
- endif
- ifeq ($(CONFIG_ALPHA_PYXIS),y)
- CFLAGS := $(CFLAGS) -Wa,-m21164a
- endif
- ifeq ($(CONFIG_ALPHA_POLARIS),y)
- CFLAGS := $(CFLAGS) -Wa,-m21164pc
- endif
- ifeq ($(CONFIG_ALPHA_TSUNAMI),y)
- CFLAGS := $(CFLAGS) -Wa,-mev6
- endif
+ # How do we do #error in make?
+ CFLAGS := --error-please-upgrade-your-assembler
endif
+CFLAGS := $(CFLAGS) -Wa,-mev6
HEAD := arch/alpha/kernel/head.o
EB64+ CONFIG_ALPHA_EB64P \
EB66 CONFIG_ALPHA_EB66 \
EB66+ CONFIG_ALPHA_EB66P \
+ Eiger CONFIG_ALPHA_EIGER \
Jensen CONFIG_ALPHA_JENSEN \
LX164 CONFIG_ALPHA_LX164 \
Miata CONFIG_ALPHA_MIATA \
Mikasa CONFIG_ALPHA_MIKASA \
+ Nautilus CONFIG_ALPHA_NAUTILUS \
Noname CONFIG_ALPHA_NONAME \
Noritake CONFIG_ALPHA_NORITAKE \
PC164 CONFIG_ALPHA_PC164 \
unset CONFIG_ALPHA_LCA CONFIG_ALPHA_APECS CONFIG_ALPHA_CIA
unset CONFIG_ALPHA_T2 CONFIG_ALPHA_PYXIS CONFIG_ALPHA_POLARIS
unset CONFIG_ALPHA_TSUNAMI CONFIG_ALPHA_MCPCIA
+unset CONFIG_ALPHA_IRONGATE
if [ "$CONFIG_ALPHA_GENERIC" = "y" ]
then
define_bool CONFIG_ALPHA_EV5 y
define_bool CONFIG_ALPHA_PYXIS y
fi
-if [ "$CONFIG_ALPHA_DP264" = "y" ]
+if [ "$CONFIG_ALPHA_DP264" = "y" -o "$CONFIG_ALPHA_EIGER" = "y" ]
then
define_bool CONFIG_PCI y
define_bool CONFIG_ALPHA_EV6 y
then
define_bool CONFIG_ALPHA_EV4 y
fi
+if [ "$CONFIG_ALPHA_NAUTILUS" = "y" ]
+then
+ define_bool CONFIG_PCI y
+ define_bool CONFIG_ALPHA_EV6 y
+ define_bool CONFIG_ALPHA_IRONGATE y
+fi
if [ "$CONFIG_ALPHA_CABRIOLET" = "y" -o "$CONFIG_ALPHA_AVANTI" = "y" \
-o "$CONFIG_ALPHA_EB64P" = "y" -o "$CONFIG_ALPHA_JENSEN" = "y" \
-o "$CONFIG_ALPHA_SABLE" = "y" -o "$CONFIG_ALPHA_MIATA" = "y" \
-o "$CONFIG_ALPHA_NORITAKE" = "y" -o "$CONFIG_ALPHA_PC164" = "y" \
-o "$CONFIG_ALPHA_LX164" = "y" -o "$CONFIG_ALPHA_SX164" = "y" \
- -o "$CONFIG_ALPHA_DP264" = "y" -o "$CONFIG_ALPHA_RAWHIDE" = "y" ]
+ -o "$CONFIG_ALPHA_DP264" = "y" -o "$CONFIG_ALPHA_RAWHIDE" = "y" \
+ -o "$CONFIG_ALPHA_EIGER" = "y" ]
then
bool 'Use SRM as bootloader' CONFIG_ALPHA_SRM
fi
mainmenu_option next_comment
comment 'Console drivers'
bool 'VGA text console' CONFIG_VGA_CONSOLE
- bool 'Support for frame buffer devices' CONFIG_FB
+ source drivers/video/Config.in
if [ "$CONFIG_FB" = "y" ]; then
define_bool CONFIG_PCI_CONSOLE y
fi
- source drivers/video/Config.in
endmenu
fi
# CONFIG_ALPHA_LX164 is not set
# CONFIG_ALPHA_MIATA is not set
# CONFIG_ALPHA_MIKASA is not set
+# CONFIG_ALPHA_NAUTILUS is not set
# CONFIG_ALPHA_NONAME is not set
# CONFIG_ALPHA_NORITAKE is not set
# CONFIG_ALPHA_PC164 is not set
.S.o:
$(CC) -D__ASSEMBLY__ $(AFLAGS) -c -o $*.o $<
-all: kernel.o head.o
-
O_TARGET := kernel.o
O_OBJS := entry.o traps.o process.o osf_sys.o irq.o signal.o setup.o \
ptrace.o time.o fpreg.o semaphore.o
ifdef CONFIG_ALPHA_GENERIC
-O_OBJS += core_apecs.o core_cia.o core_lca.o core_mcpcia.o core_pyxis.o \
- core_t2.o core_tsunami.o core_polaris.o \
- sys_alcor.o sys_cabriolet.o sys_dp264.o sys_eb64p.o \
- sys_jensen.o sys_miata.o sys_mikasa.o sys_noritake.o \
- sys_rawhide.o sys_ruffian.o sys_sable.o sys_sio.o \
- sys_sx164.o sys_takara.o sys_rx164.o \
+O_OBJS += core_apecs.o core_cia.o core_irongate.o core_lca.o core_mcpcia.o \
+ core_polaris.o core_pyxis.o core_t2.o core_tsunami.o \
+ sys_alcor.o sys_cabriolet.o sys_dp264.o sys_eb64p.o sys_eiger.o \
+ sys_jensen.o sys_miata.o sys_mikasa.o sys_nautilus.o \
+ sys_noritake.o sys_rawhide.o sys_ruffian.o sys_rx164.o \
+ sys_sable.o sys_sio.o sys_sx164.o sys_takara.o sys_rx164.o \
es1888.o smc37c669.o smc37c93x.o ns87312.o pci.o
else
ifdef CONFIG_ALPHA_CIA
O_OBJS += core_cia.o
endif
+ifdef CONFIG_ALPHA_IRONGATE
+O_OBJS += core_irongate.o
+endif
ifdef CONFIG_ALPHA_LCA
O_OBJS += core_lca.o
endif
ifneq ($(CONFIG_ALPHA_EB64P)$(CONFIG_ALPHA_EB66),)
O_OBJS += sys_eb64p.o
endif
+ifdef CONFIG_ALPHA_EIGER
+O_OBJS += sys_eiger.o
+endif
ifdef CONFIG_ALPHA_JENSEN
O_OBJS += sys_jensen.o
endif
ifdef CONFIG_ALPHA_MIKASA
O_OBJS += sys_mikasa.o
endif
+ifdef CONFIG_ALPHA_NAUTILUS
+O_OBJS += sys_nautilus.o
+endif
ifdef CONFIG_ALPHA_NORITAKE
O_OBJS += sys_noritake.o
endif
hae_mem->start = 0;
hae_mem->end = CIA_MEM_R1_MASK;
hae_mem->name = pci_hae0_name;
+ hae_mem->flags = IORESOURCE_MEM;
- request_resource(&iomem_resource, hae_mem);
+ if (request_resource(&iomem_resource, hae_mem) < 0)
+ printk(KERN_ERR "Failed to request HAE_MEM\n");
}
static inline void
--- /dev/null
+/*
+ * linux/arch/alpha/kernel/core_irongate.c
+ *
+ * Based on code written by David A. Rusling (david.rusling@reo.mts.dec.com).
+ *
+ * Copyright (C) 1999 Alpha Processor, Inc.,
+ * (David Daniel, Stig Telfer, Soohoon Lee)
+ *
+ * Code common to all IRONGATE core logic chips.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/pci.h>
+#include <asm/hwrpb.h>
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_irongate.h>
+#undef __EXTERN_INLINE
+
+#include "proto.h"
+#include "pci_impl.h"
+
+
+/*
+ * NOTE: Herein lie back-to-back mb instructions. They are magic.
+ * One plausible explanation is that the I/O controller does not properly
+ * handle the system transaction. Another involves timing. Ho hum.
+ */
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address accordingly. It is therefore not safe
+ * to have concurrent invocations to configuration space access
+ * routines, but there really shouldn't be any need for this.
+ *
+ * addr[31:24] reserved
+ * addr[23:16] bus number (8 bits = 128 possible buses)
+ * addr[15:11] Device number (5 bits)
+ * addr[10: 8] function number
+ * addr[ 7: 2] register number
+ *
+ * For IRONGATE:
+ * if (bus = addr[23:16]) == 0
+ * then
+ * type 0 config cycle:
+ * addr_on_pci[31:11] = id selection for device = addr[15:11]
+ * addr_on_pci[10: 2] = addr[10: 2] ???
+ * addr_on_pci[ 1: 0] = 00
+ * else
+ * type 1 config cycle (pass on with no decoding):
+ * addr_on_pci[31:24] = 0
+ * addr_on_pci[23: 2] = addr[23: 2]
+ * addr_on_pci[ 1: 0] = 01
+ * fi
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_dev *dev, int where, unsigned long *pci_addr,
+ unsigned char *type1)
+{
+ unsigned long addr;
+ u8 bus = dev->bus->number;
+ u8 device_fn = dev->devfn;
+
+ DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
+ "pci_addr=0x%p, type1=0x%p)\n",
+ bus, device_fn, where, pci_addr, type1));
+
+ *type1 = (bus != 0);
+
+ addr = (bus << 16) | (device_fn << 8) | where;
+ addr |= IRONGATE_CONF;
+
+ *pci_addr = addr;
+ DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+static int
+irongate_read_config_byte(struct pci_dev *dev, int where, u8 *value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(dev, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ *value = __kernel_ldbu(*(vucp)addr);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+irongate_read_config_word(struct pci_dev *dev, int where, u16 *value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(dev, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ *value = __kernel_ldwu(*(vusp)addr);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+irongate_read_config_dword(struct pci_dev *dev, int where, u32 *value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(dev, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ *value = *(vuip)addr;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+irongate_write_config_byte(struct pci_dev *dev, int where, u8 value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(dev, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ __kernel_stb(value, *(vucp)addr);
+ mb();
+ __kernel_ldbu(*(vucp)addr);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+irongate_write_config_word(struct pci_dev *dev, int where, u16 value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(dev, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ __kernel_stw(value, *(vusp)addr);
+ mb();
+ __kernel_ldwu(*(vusp)addr);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+irongate_write_config_dword(struct pci_dev *dev, int where, u32 value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(dev, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ *(vuip)addr = value;
+ mb();
+ *(vuip)addr;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+
+struct pci_ops irongate_pci_ops =
+{
+ read_byte: irongate_read_config_byte,
+ read_word: irongate_read_config_word,
+ read_dword: irongate_read_config_dword,
+ write_byte: irongate_write_config_byte,
+ write_word: irongate_write_config_word,
+ write_dword: irongate_write_config_dword
+};
+\f
+#if 0
+static void
+irongate_register_dump(const char *function_name)
+{
+ printk("%s: Irongate registers:\n"
+ "\tdev_vendor\t0x%08x\n"
+ "\tstat_cmd\t0x%08x\n"
+ "\tclass\t\t0x%08x\n"
+ "\tlatency\t\t0x%08x\n"
+ "\tbar0\t\t0x%08x\n"
+ "\tbar1\t\t0x%08x\n"
+ "\tbar2\t\t0x%08x\n"
+ "\trsrvd0[0]\t0x%08x\n"
+ "\trsrvd0[1]\t0x%08x\n"
+ "\trsrvd0[2]\t0x%08x\n"
+ "\trsrvd0[3]\t0x%08x\n"
+ "\trsrvd0[4]\t0x%08x\n"
+ "\trsrvd0[5]\t0x%08x\n"
+ "\tcapptr\t\t0x%08x\n"
+ "\trsrvd1[0]\t0x%08x\n"
+ "\trsrvd1[1]\t0x%08x\n"
+ "\tbacsr10\t\t0x%08x\n"
+ "\tbacsr32\t\t0x%08x\n"
+ "\tbacsr54\t\t0x%08x\n"
+ "\trsrvd2[0]\t0x%08x\n"
+ "\tdrammap\t\t0x%08x\n"
+ "\tdramtm\t\t0x%08x\n"
+ "\tdramms\t\t0x%08x\n"
+ "\trsrvd3[0]\t0x%08x\n"
+ "\tbiu0\t\t0x%08x\n"
+ "\tbiusip\t\t0x%08x\n"
+ "\trsrvd4[0]\t0x%08x\n"
+ "\trsrvd4[1]\t0x%08x\n"
+ "\tmro\t\t0x%08x\n"
+ "\trsrvd5[0]\t0x%08x\n"
+ "\trsrvd5[1]\t0x%08x\n"
+ "\trsrvd5[2]\t0x%08x\n"
+ "\twhami\t\t0x%08x\n"
+ "\tpciarb\t\t0x%08x\n"
+ "\tpcicfg\t\t0x%08x\n"
+ "\trsrvd6[0]\t0x%08x\n"
+ "\trsrvd6[1]\t0x%08x\n"
+ "\trsrvd6[2]\t0x%08x\n"
+ "\trsrvd6[3]\t0x%08x\n"
+ "\trsrvd6[4]\t0x%08x\n"
+ "\tagpcap\t\t0x%08x\n"
+ "\tagpstat\t\t0x%08x\n"
+ "\tagpcmd\t\t0x%08x\n"
+ "\tagpva\t\t0x%08x\n"
+ "\tagpmode\t\t0x%08x\n",
+ function_name,
+ IRONGATE0->dev_vendor,
+ IRONGATE0->stat_cmd,
+ IRONGATE0->class,
+ IRONGATE0->latency,
+ IRONGATE0->bar0,
+ IRONGATE0->bar1,
+ IRONGATE0->bar2,
+ IRONGATE0->rsrvd0[0],
+ IRONGATE0->rsrvd0[1],
+ IRONGATE0->rsrvd0[2],
+ IRONGATE0->rsrvd0[3],
+ IRONGATE0->rsrvd0[4],
+ IRONGATE0->rsrvd0[5],
+ IRONGATE0->capptr,
+ IRONGATE0->rsrvd1[0],
+ IRONGATE0->rsrvd1[1],
+ IRONGATE0->bacsr10,
+ IRONGATE0->bacsr32,
+ IRONGATE0->bacsr54,
+ IRONGATE0->rsrvd2[0],
+ IRONGATE0->drammap,
+ IRONGATE0->dramtm,
+ IRONGATE0->dramms,
+ IRONGATE0->rsrvd3[0],
+ IRONGATE0->biu0,
+ IRONGATE0->biusip,
+ IRONGATE0->rsrvd4[0],
+ IRONGATE0->rsrvd4[1],
+ IRONGATE0->mro,
+ IRONGATE0->rsrvd5[0],
+ IRONGATE0->rsrvd5[1],
+ IRONGATE0->rsrvd5[2],
+ IRONGATE0->whami,
+ IRONGATE0->pciarb,
+ IRONGATE0->pcicfg,
+ IRONGATE0->rsrvd6[0],
+ IRONGATE0->rsrvd6[1],
+ IRONGATE0->rsrvd6[2],
+ IRONGATE0->rsrvd6[3],
+ IRONGATE0->rsrvd6[4],
+ IRONGATE0->agpcap,
+ IRONGATE0->agpstat,
+ IRONGATE0->agpcmd,
+ IRONGATE0->agpva,
+ IRONGATE0->agpmode);
+}
+#else
+#define irongate_register_dump(x)
+#endif
+
+int
+irongate_pci_clr_err(void)
+{
+ unsigned int nmi_ctl=0;
+ unsigned int IRONGATE_jd;
+
+again:
+ IRONGATE_jd = IRONGATE0->stat_cmd;
+ printk("Iron stat_cmd %x\n", IRONGATE_jd);
+ IRONGATE0->stat_cmd = IRONGATE_jd; /* write again clears error bits */
+ mb();
+ IRONGATE_jd = IRONGATE0->stat_cmd; /* re-read to force write */
+
+ IRONGATE_jd = IRONGATE0->dramms;
+ printk("Iron dramms %x\n", IRONGATE_jd);
+ IRONGATE0->dramms = IRONGATE_jd; /* write again clears error bits */
+ mb();
+ IRONGATE_jd = IRONGATE0->dramms; /* re-read to force write */
+
+ /* Clear ALI NMI */
+ nmi_ctl = inb(0x61);
+ nmi_ctl |= 0x0c;
+ outb(nmi_ctl, 0x61);
+ nmi_ctl &= ~0x0c;
+ outb(nmi_ctl, 0x61);
+
+ IRONGATE_jd = IRONGATE0->dramms;
+ if (IRONGATE_jd & 0x300) goto again;
+
+ return 0;
+}
+
+void __init
+irongate_init_arch(void)
+{
+ struct pci_controler *hose;
+
+ irongate_pci_clr_err();
+ irongate_register_dump(__FUNCTION__);
+
+ /*
+ * Create our single hose.
+ */
+
+ hose = alloc_pci_controler();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->config_space = IRONGATE_CONF;
+ hose->index = 0;
+}
# define DBG_CFG(args)
#endif
-#define MCPCIA_MAX_HOSES 2
+#define MCPCIA_MAX_HOSES 4
/* Dodge has PCI0 and PCI1 at MID 4 and 5 respectively. Durango adds
PCI2 and PCI3 at MID 6 and 7 respectively. */
static int
mcpcia_read_config_byte(struct pci_dev *dev, int where, u8 *value)
{
- struct pci_controler *hose = dev->sysdata;
+ struct pci_controler *hose = dev->sysdata ? : probing_hose;
unsigned long addr, w;
unsigned char type1;
static int
mcpcia_read_config_word(struct pci_dev *dev, int where, u16 *value)
{
- struct pci_controler *hose = dev->sysdata;
+ struct pci_controler *hose = dev->sysdata ? : probing_hose;
unsigned long addr, w;
unsigned char type1;
static int
mcpcia_read_config_dword(struct pci_dev *dev, int where, u32 *value)
{
- struct pci_controler *hose = dev->sysdata;
+ struct pci_controler *hose = dev->sysdata ? : probing_hose;
unsigned long addr;
unsigned char type1;
static int
mcpcia_write_config(struct pci_dev *dev, int where, u32 value, long mask)
{
- struct pci_controler *hose = dev->sysdata;
+ struct pci_controler *hose = dev->sysdata ? : probing_hose;
unsigned long addr;
unsigned char type1;
mb();
mb();
draina();
+ wrmces(7);
mcheck_expected(cpu) = 1;
mcheck_taken(cpu) = 0;
mcheck_extra(cpu) = mid;
io->start = MCPCIA_IO(mid) - MCPCIA_IO_BIAS;
io->end = io->start + 0xffff;
io->name = pci_io_names[h];
+ io->flags = IORESOURCE_IO;
mem->start = MCPCIA_DENSE(mid) - MCPCIA_MEM_BIAS;
mem->end = mem->start + 0xffffffff;
mem->name = pci_mem_names[h];
+ mem->flags = IORESOURCE_MEM;
hae_mem->start = mem->start;
hae_mem->end = mem->start + MCPCIA_MEM_MASK;
hae_mem->name = pci_hae0_name;
+ hae_mem->flags = IORESOURCE_MEM;
+
+ if (request_resource(&ioport_resource, io) < 0)
+ printk(KERN_ERR "Failed to request IO on hose %d\n", h);
+ if (request_resource(&iomem_resource, mem) < 0)
+ printk(KERN_ERR "Failed to request MEM on hose %d\n", h);
+ if (request_resource(mem, hae_mem) < 0)
+ printk(KERN_ERR "Failed to request HAE_MEM on hose %d\n", h);
+}
- request_resource(&ioport_resource, io);
- request_resource(&iomem_resource, mem);
- request_resource(mem, hae_mem);
+static void
+mcpcia_pci_clr_err(int mid)
+{
+ *(vuip)MCPCIA_CAP_ERR(mid);
+ *(vuip)MCPCIA_CAP_ERR(mid) = 0xffffffff; /* Clear them all. */
+ mb();
+ *(vuip)MCPCIA_CAP_ERR(mid); /* Re-read for force write. */
}
static void __init
int mid = hose2mid(hose->index);
unsigned int tmp;
+ mcpcia_pci_clr_err(mid);
+
/*
- * Set up error reporting. Make sure CPU_PE is OFF in the mask.
+ * Set up error reporting.
*/
-#if 0
- tmp = *(vuip)MCPCIA_ERR_MASK(mid);
- tmp &= ~4;
- *(vuip)MCPCIA_ERR_MASK(mid) = tmp;
- mb();
- tmp = *(vuip)MCPCIA_ERR_MASK(mid);
-#endif
-
tmp = *(vuip)MCPCIA_CAP_ERR(mid);
- tmp |= 0x0006; /* master/target abort */
+ tmp |= 0x0006; /* master/target abort */
*(vuip)MCPCIA_CAP_ERR(mid) = tmp;
mb();
tmp = *(vuip)MCPCIA_CAP_ERR(mid);
void __init
mcpcia_init_arch(void)
{
- extern asmlinkage void entInt(void);
- struct pci_controler *hose;
- int h, hose_count = 0;
-
- /* Ho hum.. init_arch is called before init_IRQ, but we need to be
- able to handle machine checks. So install the handler now. */
- wrent(entInt, 0);
-
/* With multiple PCI busses, we play with I/O as physical addrs. */
ioport_resource.end = ~0UL;
iomem_resource.end = ~0UL;
+ /* Allocate hose 0. That's the one that all the ISA junk hangs
+ off of, from which we'll be registering stuff here in a bit.
+ Other hose detection is done in mcpcia_init_hoses, which is
+ called from init_IRQ. */
+
+ mcpcia_new_hose(0);
+}
+
+/* This is called from init_IRQ, since we cannot take interrupts
+ before then. Which means we cannot do this in init_arch. */
+
+void __init
+mcpcia_init_hoses(void)
+{
+ struct pci_controler *hose;
+ int h, hose_count = 0;
+
/* First, find how many hoses we have. */
for (h = 0; h < MCPCIA_MAX_HOSES; ++h) {
if (mcpcia_probe_hose(h)) {
- mcpcia_new_hose(h);
+ if (h != 0)
+ mcpcia_new_hose(h);
hose_count++;
}
}
- printk("mcpcia_init_arch: found %d hoses\n", hose_count);
+ printk("mcpcia_init_hoses: found %d hoses\n", hose_count);
/* Now do init for each hose. */
for (hose = hose_head; hose; hose = hose->next)
mcpcia_startup_hose(hose);
}
-static void
-mcpcia_pci_clr_err(int mid)
-{
- *(vuip)MCPCIA_CAP_ERR(mid);
- *(vuip)MCPCIA_CAP_ERR(mid) = 0xffffffff; /* Clear them all. */
- mb();
- *(vuip)MCPCIA_CAP_ERR(mid); /* Re-read for force write. */
-}
-
static void
mcpcia_print_uncorrectable(struct el_MCPCIA_uncorrected_frame_mcheck *logout)
{
/* Print PAL fields */
for (i = 0; i < 24; i += 2) {
- printk("\tpal temp[%d-%d]\t\t= %16lx %16lx\n\r",
+ printk(" paltmp[%d-%d] = %16lx %16lx\n",
i, i+1, frame->paltemp[i], frame->paltemp[i+1]);
}
for (i = 0; i < 8; i += 2) {
- printk("\tshadow[%d-%d]\t\t= %16lx %16lx\n\r",
+ printk(" shadow[%d-%d] = %16lx %16lx\n",
i, i+1, frame->shadow[i],
frame->shadow[i+1]);
}
- printk("\tAddr of excepting instruction\t= %16lx\n\r",
+ printk(" Addr of excepting instruction = %16lx\n",
frame->exc_addr);
- printk("\tSummary of arithmetic traps\t= %16lx\n\r",
+ printk(" Summary of arithmetic traps = %16lx\n",
frame->exc_sum);
- printk("\tException mask\t\t\t= %16lx\n\r",
+ printk(" Exception mask = %16lx\n",
frame->exc_mask);
- printk("\tBase address for PALcode\t= %16lx\n\r",
+ printk(" Base address for PALcode = %16lx\n",
frame->pal_base);
- printk("\tInterrupt Status Reg\t\t= %16lx\n\r",
+ printk(" Interrupt Status Reg = %16lx\n",
frame->isr);
- printk("\tCURRENT SETUP OF EV5 IBOX\t= %16lx\n\r",
+ printk(" CURRENT SETUP OF EV5 IBOX = %16lx\n",
frame->icsr);
- printk("\tI-CACHE Reg %s parity error\t= %16lx\n\r",
+ printk(" I-CACHE Reg %s parity error = %16lx\n",
(frame->ic_perr_stat & 0x800L) ?
"Data" : "Tag",
frame->ic_perr_stat);
- printk("\tD-CACHE error Reg\t\t= %16lx\n\r",
+ printk(" D-CACHE error Reg = %16lx\n",
frame->dc_perr_stat);
if (frame->dc_perr_stat & 0x2) {
switch (frame->dc_perr_stat & 0x03c) {
case 8:
- printk("\t\tData error in bank 1\n\r");
+ printk(" Data error in bank 1\n");
break;
case 4:
- printk("\t\tData error in bank 0\n\r");
+ printk(" Data error in bank 0\n");
break;
case 20:
- printk("\t\tTag error in bank 1\n\r");
+ printk(" Tag error in bank 1\n");
break;
case 10:
- printk("\t\tTag error in bank 0\n\r");
+ printk(" Tag error in bank 0\n");
break;
}
}
- printk("\tEffective VA\t\t\t= %16lx\n\r",
+ printk(" Effective VA = %16lx\n",
frame->va);
- printk("\tReason for D-stream\t\t= %16lx\n\r",
+ printk(" Reason for D-stream = %16lx\n",
frame->mm_stat);
- printk("\tEV5 SCache address\t\t= %16lx\n\r",
+ printk(" EV5 SCache address = %16lx\n",
frame->sc_addr);
- printk("\tEV5 SCache TAG/Data parity\t= %16lx\n\r",
+ printk(" EV5 SCache TAG/Data parity = %16lx\n",
frame->sc_stat);
- printk("\tEV5 BC_TAG_ADDR\t\t\t= %16lx\n\r",
+ printk(" EV5 BC_TAG_ADDR = %16lx\n",
frame->bc_tag_addr);
- printk("\tEV5 EI_ADDR: Phys addr of Xfer\t= %16lx\n\r",
+ printk(" EV5 EI_ADDR: Phys addr of Xfer = %16lx\n",
frame->ei_addr);
- printk("\tFill Syndrome\t\t\t= %16lx\n\r",
+ printk(" Fill Syndrome = %16lx\n",
frame->fill_syndrome);
- printk("\tEI_STAT reg\t\t\t= %16lx\n\r",
+ printk(" EI_STAT reg = %16lx\n",
frame->ei_stat);
- printk("\tLD_LOCK\t\t\t\t= %16lx\n\r",
+ printk(" LD_LOCK = %16lx\n",
frame->ld_lock);
}
if (mcheck_expected(cpu)) {
mcpcia_pci_clr_err(mcheck_extra(cpu));
} else {
- /* FIXME: how do we figure out which hose the error was on? */
+ /* FIXME: how do we figure out which hose the
+ error was on? */
struct pci_controler *hose;
for (hose = hose_head; hose; hose = hose->next)
mcpcia_pci_clr_err(hose2mid(hose->index));
__kernel_stw(value, *(vusp)pci_addr);
mb();
- __kernel_ldbu(*(vusp)pci_addr);
+ __kernel_ldwu(*(vusp)pci_addr);
return PCIBIOS_SUCCESSFUL;
}
static inline void
pyxis_pci_clr_err(void)
{
- *(vuip)PYXIS_ERR;
- *(vuip)PYXIS_ERR = 0x0180;
+ unsigned int tmp;
+
+ tmp = *(vuip)PYXIS_ERR;
+ *(vuip)PYXIS_ERR = tmp;
mb();
*(vuip)PYXIS_ERR; /* re-read to force write */
}
mk_conf_addr(struct pci_dev *dev, int where, unsigned long *pci_addr,
unsigned char *type1)
{
- struct pci_controler *hose = dev->sysdata;
+ struct pci_controler *hose = dev->sysdata ? : probing_hose;
unsigned long addr;
u8 bus = dev->bus->number;
u8 device_fn = dev->devfn;
__kernel_stb(value, *(vucp)addr);
mb();
+ __kernel_ldbu(*(vucp)addr);
return PCIBIOS_SUCCESSFUL;
}
__kernel_stw(value, *(vusp)addr);
mb();
+ __kernel_ldwu(*(vusp)addr);
return PCIBIOS_SUCCESSFUL;
}
*(vuip)addr = value;
mb();
+ *(vuip)addr;
return PCIBIOS_SUCCESSFUL;
}
hose->index = index;
hose->io_space->start = TSUNAMI_IO(index) - TSUNAMI_IO_BIAS;
- hose->io_space->end = hose->io_space->start + TSUNAMI_IO_SPACE;
+ hose->io_space->end = hose->io_space->start + 0xffff;
hose->io_space->name = pci_io_names[index];
hose->io_space->flags = IORESOURCE_IO;
hose->mem_space->start = TSUNAMI_MEM(index) - TSUNAMI_MEM_BIAS;
- /* the IOMEM address space is larger than 32bit but most pci
- cars doesn't support 64bit address space so we stick with
- 32bit here (see the TSUNAMI_MEM_SPACE define). */
hose->mem_space->end = hose->mem_space->start + 0xffffffff;
hose->mem_space->name = pci_mem_names[index];
hose->mem_space->flags = IORESOURCE_MEM;
if (request_resource(&ioport_resource, hose->io_space) < 0)
- printk(KERN_ERR "failed to request IO on hose %d", index);
+ printk(KERN_ERR "Failed to request IO on hose %d\n", index);
if (request_resource(&iomem_resource, hose->mem_space) < 0)
- printk(KERN_ERR "failed to request IOMEM on hose %d", index);
+ printk(KERN_ERR "Failed to request MEM on hose %d\n", index);
/*
* Set up the PCI->physical memory translation windows.
int __local_bh_count;
#endif
-#if NR_IRQS > 64
-# error Unable to handle more than 64 irq levels.
+#if NR_IRQS > 128
+# error Unable to handle more than 128 irq levels.
#endif
#ifdef CONFIG_ALPHA_GENERIC
/*
* Shadow-copy of masked interrupts.
*/
-unsigned long alpha_irq_mask = ~0UL;
+
+unsigned long _alpha_irq_masks[2] = { ~0UL, ~0UL };
/*
* The ack_irq routine used by 80% of the systems.
# define IACK_SC TSUNAMI_IACK_SC
#elif defined(CONFIG_ALPHA_POLARIS)
# define IACK_SC POLARIS_IACK_SC
+#elif defined(CONFIG_ALPHA_IRONGATE)
+# define IACK_SC IRONGATE_IACK_SC
#else
/* This is bogus but necessary to get it to compile on all platforms. */
# define IACK_SC 1L
static inline void
mask_irq(unsigned long irq)
{
- alpha_mv.update_irq_hw(irq, alpha_irq_mask |= 1UL << irq, 0);
+ set_bit(irq, _alpha_irq_masks);
+ alpha_mv.update_irq_hw(irq, alpha_irq_mask, 0);
}
static inline void
unmask_irq(unsigned long irq)
{
- alpha_mv.update_irq_hw(irq, alpha_irq_mask &= ~(1UL << irq), 1);
+ clear_bit(irq, _alpha_irq_masks);
+ alpha_mv.update_irq_hw(irq, alpha_irq_mask, 1);
}
void
get_irqlock(int cpu, void* where)
{
if (!spin_trylock(&global_irq_lock)) {
- /* do we already hold the lock? */
- if (cpu == global_irq_holder) {
-#if 0
- printk("get_irqlock: already held at %08lx\n",
- previous_irqholder);
-#endif
+ /* Do we already hold the lock? */
+ if (cpu == global_irq_holder)
return;
- }
- /* Uhhuh.. Somebody else got it. Wait.. */
+ /* Uhhuh.. Somebody else got it. Wait. */
spin_lock(&global_irq_lock);
}
+
/*
* Ok, we got the lock bit.
* But that's actually just the easy part.. Now
unsigned long delay;
unsigned int i;
- for (i = ACTUAL_NR_IRQS - 1; i > 0; i--) {
+ /* Handle only the first 64 IRQs here. This is enough for
+ [E]ISA, which is the only thing that needs probing anyway. */
+ for (i = (ACTUAL_NR_IRQS - 1) & 63; i > 0; i--) {
if (!(PROBE_MASK & (1UL << i))) {
continue;
}
{
int i;
+ /* Handle only the first 64 IRQs here. This is enough for
+ [E]ISA, which is the only thing that needs probing anyway. */
irqs &= alpha_irq_mask;
if (!irqs)
return 0;
outb(0, DMA1_CLR_MASK_REG); \
outb(0, DMA2_CLR_MASK_REG)
-extern unsigned long alpha_irq_mask;
+extern unsigned long _alpha_irq_masks[2];
+#define alpha_irq_mask _alpha_irq_masks[0]
extern void common_ack_irq(unsigned long irq);
extern void isa_device_interrupt(unsigned long vector, struct pt_regs * regs);
#define TIMER_IRQ RTC_IRQ /* timer is the rtc */
#endif
+/*
+ * PROBE_MASK is the bitset of irqs that we consider for autoprobing.
+ */
+
+/* NOTE: we only handle the first 64 IRQs in this code. */
+
+/* The normal mask includes all the IRQs except timer IRQ 0. */
+#define _PROBE_MASK(nr_irqs) \
+ (((nr_irqs > 63) ? ~0UL : ((1UL << (nr_irqs & 63)) - 1)) & ~1UL)
+
+/* Mask out unused timer irq 0 and RTC irq 8. */
+#define P2K_PROBE_MASK (_PROBE_MASK(16) & ~0x101UL)
+
+/* Mask out unused timer irq 0, "irqs" 20-30, and the EISA cascade. */
+#define ALCOR_PROBE_MASK (_PROBE_MASK(48) & ~0xfff000000001UL)
+
+/* Leave timer IRQ 0 in the mask. */
+#define RUFFIAN_PROBE_MASK (_PROBE_MASK(48) | 1UL)
+
+/* Do not probe/enable beyond the PCI devices. */
+#define TSUNAMI_PROBE_MASK _PROBE_MASK(48)
+
+#if defined(CONFIG_ALPHA_GENERIC)
+# define PROBE_MASK alpha_mv.irq_probe_mask
+#elif defined(CONFIG_ALPHA_P2K)
+# define PROBE_MASK P2K_PROBE_MASK
+#elif defined(CONFIG_ALPHA_ALCOR) || defined(CONFIG_ALPHA_XLT)
+# define PROBE_MASK ALCOR_PROBE_MASK
+#elif defined(CONFIG_ALPHA_RUFFIAN)
+# define PROBE_MASK RUFFIAN_PROBE_MASK
+#elif defined(CONFIG_ALPHA_DP264)
+# define PROBE_MASK TSUNAMI_PROBE_MASK
+#else
+# define PROBE_MASK _PROBE_MASK(NR_IRQS)
+#endif
+
+
extern char _stext;
static inline void alpha_do_profile (unsigned long pc)
{
#include <linux/config.h>
#include <asm/pgalloc.h>
-/* Whee. Both TSUNAMI and POLARIS don't have an HAE. Fix things up for
+/* Whee. IRONGATE, POLARIS and TSUNAMI don't have an HAE. Fix things up for
the GENERIC kernel by defining the HAE address to be that of the cache.
Now we can read and write it as we like. ;-) */
-#define TSUNAMI_HAE_ADDRESS (&alpha_mv.hae_cache)
+#define IRONGATE_HAE_ADDRESS (&alpha_mv.hae_cache)
#define POLARIS_HAE_ADDRESS (&alpha_mv.hae_cache)
+#define TSUNAMI_HAE_ADDRESS (&alpha_mv.hae_cache)
#if CIA_ONE_HAE_WINDOW
#define CIA_HAE_ADDRESS (&alpha_mv.hae_cache)
#define DO_APECS_IO IO(APECS,apecs)
#define DO_CIA_IO IO(CIA,cia)
+#define DO_IRONGATE_IO IO(IRONGATE,irongate)
#define DO_LCA_IO IO(LCA,lca)
#define DO_MCPCIA_IO IO(MCPCIA,mcpcia)
-#define DO_PYXIS_IO IO(PYXIS,pyxis)
#define DO_POLARIS_IO IO(POLARIS,polaris)
+#define DO_PYXIS_IO IO(PYXIS,pyxis)
#define DO_T2_IO IO(T2,t2)
#define DO_TSUNAMI_IO IO(TSUNAMI,tsunami)
#define DO_APECS_BUS BUS(apecs)
#define DO_CIA_BUS BUS(cia)
+#define DO_IRONGATE_BUS BUS(irongate)
#define DO_LCA_BUS BUS(lca)
#define DO_MCPCIA_BUS BUS(mcpcia)
#define DO_PYXIS_BUS BUS(pyxis)
*/
struct pci_controler *hose_head, **hose_tail = &hose_head;
+struct pci_controler *probing_hose;
/*
* Quirks.
#define KB 1024
#define MB (1024*KB)
#define GB (1024*MB)
-unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
- unsigned long start, unsigned long size)
+
+void __init
+pcibios_align_resource(void *data, struct resource *res, unsigned long size)
{
+ struct pci_dev * dev = data;
unsigned long alignto;
+ unsigned long start = res->start;
- if (res->flags & IORESOURCE_IO)
- {
+ if (res->flags & IORESOURCE_IO) {
/*
* Aligning to 0x800 rather than the minimum base of
* 0x400 is an attempt to avoid having devices in
alignto = MAX(0x800, size);
start = ALIGN(start, alignto);
}
- else if (res->flags & IORESOURCE_MEM)
- {
+ else if (res->flags & IORESOURCE_MEM) {
/*
* The following holds at least for the Low Cost
* Alpha implementation of the PCI interface:
* address space must be accessed through
* dense memory space only!
*/
- /* align to multiple of size of minimum base */
+
+ /* Align to multiple of size of minimum base. */
alignto = MAX(0x1000, size);
start = ALIGN(start, alignto);
- if (size > 7 * 16*MB)
+ if (size > 7 * 16*MB) {
printk(KERN_WARNING "PCI: dev %s "
"requests %ld bytes of contiguous "
"address space---don't use sparse "
"memory accesses on this device!\n",
dev->name, size);
- else
- {
+ } else {
if (((start / (16*MB)) & 0x7) == 0) {
start &= ~(128*MB - 1);
start += 16*MB;
}
}
- return start;
+ res->start = start;
}
#undef MAX
#undef ALIGN
so leave them unchanged. This is true, for instance, of the
Contaq 82C693 as seen on SX164 and DP264. */
- if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE)
- {
+ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) {
int i;
/* Resource 1 of IDE controller is the address of HD_CMD
{
/* Propogate hose info into the subordinate devices. */
- struct pci_controler *hose = (struct pci_controler *) bus->sysdata;
+ struct pci_controler *hose = probing_hose;
struct pci_dev *dev;
bus->resource[0] = hose->io_space;
bus->resource[1] = hose->mem_space;
- for (dev = bus->devices; dev; dev = dev->sibling)
+ for (dev = bus->devices; dev; dev = dev->sibling) {
+ dev->sysdata = hose;
if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
pcibios_fixup_device_resources(dev, bus);
+ }
}
void __init
where = PCI_BASE_ADDRESS_0 + (resource * 4);
reg = (res->start - root->start) | (res->flags & 0xf);
pci_write_config_dword(dev, where, reg);
- if ((res->flags & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK))
- == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64))
- {
+ if ((res->flags & (PCI_BASE_ADDRESS_SPACE
+ | PCI_BASE_ADDRESS_MEM_TYPE_MASK))
+ == (PCI_BASE_ADDRESS_SPACE_MEMORY
+ | PCI_BASE_ADDRESS_MEM_TYPE_64)) {
pci_write_config_dword(dev, where+4, 0);
printk(KERN_WARNING "PCI: dev %s type 64-bit\n", dev->name);
}
}
void __init
-pcibios_fixup_pbus_ranges(struct pci_bus * bus, struct pbus_set_ranges_data * ranges)
+pcibios_fixup_pbus_ranges(struct pci_bus * bus,
+ struct pbus_set_ranges_data * ranges)
{
ranges->io_start -= bus->resource[0]->start;
ranges->io_end -= bus->resource[0]->start;
for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
hose->first_busno = next_busno;
hose->last_busno = 0xff;
+ probing_hose = hose;
bus = pci_scan_bus(next_busno, alpha_mv.pci_ops, hose);
hose->bus = bus;
next_busno = hose->last_busno = bus->subordinate;
next_busno += 1;
}
+ probing_hose = NULL;
pci_assign_unassigned_resources(alpha_mv.min_io_address,
alpha_mv.min_mem_address);
/* The hose list. */
extern struct pci_controler *hose_head, **hose_tail;
+extern struct pci_controler *probing_hose;
extern void common_init_pci(void);
extern u8 common_swizzle(struct pci_dev *, u8 *);
#ifdef CONFIG_RTC
/* Reset rtc to defaults. */
- {
- unsigned char control;
-
- cli();
-
- /* Reset periodic interrupt frequency. */
- CMOS_WRITE(0x26, RTC_FREQ_SELECT);
-
- /* Turn on periodic interrupts. */
- control = CMOS_READ(RTC_CONTROL);
- control |= RTC_PIE;
- CMOS_WRITE(control, RTC_CONTROL);
- CMOS_READ(RTC_INTR_FLAGS);
-
- sti();
- }
+ rtc_kill_pit();
#endif
if (!alpha_using_srm && mode != LINUX_REBOOT_CMD_RESTART) {
extern void cia_init_arch(void);
extern void cia_machine_check(u64, u64, struct pt_regs *);
+/* core_irongate.c */
+extern struct pci_ops irongate_pci_ops;
+extern int irongate_pci_clr_err(void);
+extern void irongate_init_arch(void);
+extern void irongate_machine_check(u64, u64, struct pt_regs *);
+
/* core_lca.c */
extern struct pci_ops lca_pci_ops;
extern void lca_init_arch(void);
/* core_mcpcia.c */
extern struct pci_ops mcpcia_pci_ops;
extern void mcpcia_init_arch(void);
+extern void mcpcia_init_hoses(void);
extern void mcpcia_machine_check(u64, u64, struct pt_regs *);
/* core_polaris.c */
/* time.c */
extern void timer_interrupt(int irq, void *dev, struct pt_regs * regs);
extern void rtc_init_pit(void);
+extern void rtc_kill_pit(void);
extern void common_init_pit(void);
extern unsigned long est_cycle_freq;
* | | v
* +================================+
*/
-#define PT_REG(reg) (PAGE_SIZE*2 - sizeof(struct pt_regs) \
- + (long)&((struct pt_regs *)0)->reg)
-
-#define SW_REG(reg) (PAGE_SIZE*2 - sizeof(struct pt_regs) \
- - sizeof(struct switch_stack) \
- + (long)&((struct switch_stack *)0)->reg)
/*
* The following table maps a register index into the stack offset at
WEAK(eb64p_mv);
WEAK(eb66_mv);
WEAK(eb66p_mv);
+WEAK(eiger_mv);
WEAK(jensen_mv);
WEAK(lx164_mv);
WEAK(miata_mv);
WEAK(mikasa_mv);
WEAK(mikasa_primo_mv);
WEAK(monet_mv);
+WEAK(nautilus_mv);
WEAK(noname_mv);
WEAK(noritake_mv);
WEAK(noritake_primo_mv);
#define for_each_mem_cluster(memdesc, cluster, i) \
for ((cluster) = (memdesc)->cluster, (i) = 0; \
(i) < (memdesc)->numclusters; (i)++, (cluster)++)
-static void __init setup_memory(void)
+
+static void __init
+setup_memory(void)
{
struct memclust_struct * cluster;
struct memdesc_struct * memdesc;
extern char _end[];
int i;
- /* find free clusters, and init and free the bootmem accordingly */
- memdesc = (struct memdesc_struct *) (hwrpb->mddt_offset + (unsigned long) hwrpb);
+ /* Find free clusters, and init and free the bootmem accordingly. */
+ memdesc = (struct memdesc_struct *)
+ (hwrpb->mddt_offset + (unsigned long) hwrpb);
- for_each_mem_cluster(memdesc, cluster, i)
- {
+ for_each_mem_cluster(memdesc, cluster, i) {
printk("memcluster %d, usage %01lx, start %8lu, end %8lu\n",
i, cluster->usage, cluster->start_pfn,
cluster->start_pfn + cluster->numpages);
/* Bit 0 is console/PALcode reserved. Bit 1 is
non-volatile memory -- we might want to mark
- this for later */
+ this for later. */
if (cluster->usage & 3)
continue;
if (end > max_low_pfn)
max_low_pfn = end;
}
+
/* Enforce maximum of 2GB even if there is more. Blah. */
if (max_low_pfn > PFN_MAX)
max_low_pfn = PFN_MAX;
printk("max_low_pfn %ld\n", max_low_pfn);
- /* find the end of the kernel memory */
+ /* Find the end of the kernel memory. */
start_pfn = PFN_UP(virt_to_phys(_end));
printk("_end %p, start_pfn %ld\n", _end, start_pfn);
if (max_low_pfn <= start_pfn)
panic("not enough memory to boot");
- /* we need to know how many physically contigous pages
- we'll need for the bootmap */
+ /* We need to know how many physically contigous pages
+ we'll need for the bootmap. */
bootmap_pages = bootmem_bootmap_pages(max_low_pfn);
printk("bootmap size: %ld pages\n", bootmap_pages);
- /* now find a good region where to allocate the bootmap */
- for_each_mem_cluster(memdesc, cluster, i)
- {
+ /* Now find a good region where to allocate the bootmap. */
+ for_each_mem_cluster(memdesc, cluster, i) {
if (cluster->usage & 3)
continue;
start = start_pfn;
if (end > max_low_pfn)
end = max_low_pfn;
- if (end - start >= bootmap_pages)
- {
+ if (end - start >= bootmap_pages) {
printk("allocating bootmap in area %ld:%ld\n",
start, start+bootmap_pages);
bootmap_start = start;
}
}
- if (bootmap_start == -1)
- {
+ if (bootmap_start == -1) {
max_low_pfn >>= 1;
printk("bootmap area not found now trying with %ld pages\n",
max_low_pfn);
goto try_again;
}
- /* allocate the bootmap and mark the whole MM as reserved */
+ /* Allocate the bootmap and mark the whole MM as reserved. */
bootmap_size = init_bootmem(bootmap_start, max_low_pfn);
- /* mark the free regions */
- for_each_mem_cluster(memdesc, cluster, i)
- {
+ /* Mark the free regions. */
+ for_each_mem_cluster(memdesc, cluster, i) {
if (cluster->usage & 3)
continue;
PFN_UP(start), PFN_DOWN(end));
}
- /* reserve the bootmap memory */
+ /* Reserve the bootmap memory. */
reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size);
printk("reserving bootmap %ld:%ld\n", bootmap_start,
bootmap_start + PFN_UP(bootmap_size));
if (initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
printk("initrd extends beyond end of memory "
"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
- initrd_end, phys_to_virt(PFN_PHYS(max_low_pfn)));
+ initrd_end,
+ phys_to_virt(PFN_PHYS(max_low_pfn)));
initrd_start = initrd_end = 0;
+ } else {
+ reserve_bootmem(virt_to_phys(initrd_start),
+ INITRD_SIZE);
}
- else
- reserve_bootmem(virt_to_phys(initrd_start), INITRD_SIZE);
}
#endif /* CONFIG_BLK_DEV_INITRD */
}
struct percpu_struct *cpu;
char *type_name, *var_name, *p;
- hwrpb = (struct hwrpb_struct*)(IDENT_ADDR + INIT_HWRPB->phys_addr);
+ hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr);
/*
* Locate the command line.
type_name, (*var_name ? " variation " : ""), var_name,
hwrpb->sys_type, hwrpb->sys_variation);
}
- if (vec != &alpha_mv)
+ if (vec != &alpha_mv) {
alpha_mv = *vec;
-
+ }
+
#ifdef CONFIG_ALPHA_GENERIC
/* Assume that we've booted from SRM if we havn't booted from MILO.
Detect the later by looking for "MILO" in the system serial nr. */
"Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1",
"Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake",
"Cortex", "29", "Miata", "XXM", "Takara", "Yukon",
- "Tsunami", "Wildfire", "CUSCO"
+ "Tsunami", "Wildfire", "CUSCO", "Eiger"
};
static char unofficial_names[][8] = {"100", "Ruffian"};
+static char api_names[][16] = {"200", "Nautilus"};
+
static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"};
static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4};
};
static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8};
-
static struct alpha_machine_vector * __init
get_sysvec(long type, long variation, long cpu)
{
NULL, /* Tsunami -- see variation. */
NULL, /* Wildfire */
NULL, /* CUSCO */
+ &eiger_mv, /* Eiger */
};
static struct alpha_machine_vector *unofficial_vecs[] __initlocaldata =
&ruffian_mv,
};
+ static struct alpha_machine_vector *api_vecs[] __initlocaldata =
+ {
+ NULL, /* 200 */
+ &nautilus_mv,
+ };
+
static struct alpha_machine_vector *alcor_vecs[] __initlocaldata =
{
&alcor_mv, &xlt_mv, &xlt_mv
vec = NULL;
if (type < N(systype_vecs)) {
vec = systype_vecs[type];
+ } else if ((type > ST_API_BIAS) &&
+ (type - ST_API_BIAS) < N(api_vecs)) {
+ vec = api_vecs[type - ST_API_BIAS];
} else if ((type > ST_UNOFFICIAL_BIAS) &&
(type - ST_UNOFFICIAL_BIAS) < N(unofficial_vecs)) {
vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
&eb64p_mv,
&eb66_mv,
&eb66p_mv,
+ &eiger_mv,
&jensen_mv,
&lx164_mv,
&miata_mv,
&mikasa_mv,
&mikasa_primo_mv,
&monet_mv,
+ &nautilus_mv,
&noname_mv,
&noritake_mv,
&noritake_primo_mv,
else set type name to family */
if (type < N(systype_names)) {
*type_name = systype_names[type];
+ } else if ((type > ST_API_BIAS) &&
+ (type - ST_API_BIAS) < N(api_names)) {
+ *type_name = api_names[type - ST_API_BIAS];
} else if ((type > ST_UNOFFICIAL_BIAS) &&
(type - ST_UNOFFICIAL_BIAS) < N(unofficial_names)) {
*type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
min_mem_address: DEFAULT_MEM_BASE,
nr_irqs: 64,
- irq_probe_mask: _PROBE_MASK(64),
+ irq_probe_mask: TSUNAMI_PROBE_MASK,
update_irq_hw: dp264_update_irq_hw,
ack_irq: common_ack_irq,
device_interrupt: dp264_device_interrupt,
min_mem_address: DEFAULT_MEM_BASE,
nr_irqs: 64,
- irq_probe_mask: _PROBE_MASK(64),
+ irq_probe_mask: TSUNAMI_PROBE_MASK,
update_irq_hw: dp264_update_irq_hw,
ack_irq: common_ack_irq,
device_interrupt: dp264_device_interrupt,
min_mem_address: DEFAULT_MEM_BASE,
nr_irqs: 64,
- irq_probe_mask: _PROBE_MASK(64),
+ irq_probe_mask: TSUNAMI_PROBE_MASK,
update_irq_hw: dp264_update_irq_hw,
ack_irq: common_ack_irq,
device_interrupt: dp264_device_interrupt,
init_arch: tsunami_init_arch,
init_irq: dp264_init_irq,
init_pit: common_init_pit,
- init_pci: dp264_init_pci,
+ init_pci: common_init_pci,
kill_arch: common_kill_arch,
pci_map_irq: webbrick_map_irq,
pci_swizzle: common_swizzle,
min_mem_address: DEFAULT_MEM_BASE,
nr_irqs: 64,
- irq_probe_mask: _PROBE_MASK(64),
+ irq_probe_mask: TSUNAMI_PROBE_MASK,
update_irq_hw: clipper_update_irq_hw,
ack_irq: common_ack_irq,
device_interrupt: dp264_device_interrupt,
--- /dev/null
+/*
+ * linux/arch/alpha/kernel/sys_eiger.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1996, 1999 Jay A Estabrook
+ * Copyright (C) 1998, 1999 Richard Henderson
+ * Copyright (C) 1999 Iain Grant
+ *
+ * Code supporting the EIGER (EV6+TSUNAMI).
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pci.h>
+#include <asm/pgtable.h>
+#include <asm/core_tsunami.h>
+#include <asm/hwrpb.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+
+/*
+ * HACK ALERT! only the boot cpu is used for interrupts.
+ */
+
+static void
+eiger_update_irq_hw(unsigned long irq, unsigned long unused, int unmask_p)
+{
+ unsigned int regaddr;
+ unsigned long mask;
+
+ if (irq <= 15) {
+ if (irq <= 7)
+ outb(alpha_irq_mask, 0x21); /* ISA PIC1 */
+ else
+ outb(alpha_irq_mask >> 8, 0xA1); /* ISA PIC2 */
+ } else {
+ if (irq > 63)
+ mask = _alpha_irq_masks[1] << 16;
+ else
+ mask = _alpha_irq_masks[0] >> ((irq - 16) & 0x30);
+
+ regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c);
+
+ outl(mask & 0xffff0000UL, regaddr);
+ }
+}
+
+static void
+eiger_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ unsigned intstatus;
+
+ /*
+ * The PALcode will have passed us vectors 0x800 or 0x810,
+ * which are fairly arbitrary values and serve only to tell
+ * us whether an interrupt has come in on IRQ0 or IRQ1. If
+ * it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's
+ * probably ISA, but PCI interrupts can come through IRQ0
+ * as well if the interrupt controller isn't in accelerated
+ * mode.
+ *
+ * OTOH, the accelerator thing doesn't seem to be working
+ * overly well, so what we'll do instead is try directly
+ * examining the Master Interrupt Register to see if it's a
+ * PCI interrupt, and if _not_ then we'll pass it on to the
+ * ISA handler.
+ */
+
+ intstatus = inw(0x500) & 15;
+ if (intstatus) {
+ /*
+ * This is a PCI interrupt. Check each bit and
+ * despatch an interrupt if it's set.
+ */
+
+ if (intstatus & 8) handle_irq(16+3, 16+3, regs);
+ if (intstatus & 4) handle_irq(16+2, 16+2, regs);
+ if (intstatus & 2) handle_irq(16+1, 16+1, regs);
+ if (intstatus & 1) handle_irq(16+0, 16+0, regs);
+ } else {
+ isa_device_interrupt (vector, regs);
+ }
+}
+
+static void
+eiger_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ int irq = (vector - 0x800) >> 4;
+ handle_irq(irq, irq, regs);
+}
+
+static void __init
+eiger_init_irq(void)
+{
+ outb(0, DMA1_RESET_REG);
+ outb(0, DMA2_RESET_REG);
+ outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
+ outb(0, DMA2_MASK_REG);
+
+ if (alpha_using_srm)
+ alpha_mv.device_interrupt = eiger_srm_device_interrupt;
+
+ eiger_update_irq_hw(16, alpha_irq_mask, 0);
+
+ enable_irq(2);
+}
+
+static int __init
+eiger_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ u8 irq_orig;
+
+ /* The SRM console has already calculated out the IRQ value's for
+ option cards. As this works lets just read in the value already
+ set and change it to a useable value by Linux.
+
+ All the IRQ values generated by the console are greater than 90,
+ so we subtract 80 because it is (90 - allocated ISA IRQ's). */
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq_orig);
+
+ return irq_orig - 0x80;
+}
+
+static u8 __init
+eiger_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ struct pci_controler *hose = dev->sysdata;
+ int slot, pin = *pinp;
+ int bridge_count = 0;
+
+ /* Find the number of backplane bridges. */
+ int backplane = inw(0x502) & 0x0f;
+
+ switch (backplane)
+ {
+ case 0x00: bridge_count = 0; break; /* No bridges */
+ case 0x01: bridge_count = 1; break; /* 1 */
+ case 0x03: bridge_count = 2; break; /* 2 */
+ case 0x07: bridge_count = 3; break; /* 3 */
+ case 0x0f: bridge_count = 4; break; /* 4 */
+ };
+
+ /* Check first for the built-in bridges on hose 0. */
+ if (hose->index == 0
+ && PCI_SLOT(dev->bus->self->devfn) > 20-bridge_count) {
+ slot = PCI_SLOT(dev->devfn);
+ } else {
+ /* Must be a card-based bridge. */
+ do {
+ /* Check for built-in bridges on hose 0. */
+ if (hose->index == 0
+ && (PCI_SLOT(dev->bus->self->devfn)
+ > 20 - bridge_count)) {
+ slot = PCI_SLOT(dev->devfn);
+ break;
+ }
+ pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
+
+ /* Move up the chain of bridges. */
+ dev = dev->bus->self;
+ /* Slot of the next bridge. */
+ slot = PCI_SLOT(dev->devfn);
+ } while (dev->bus->self);
+ }
+ *pinp = pin;
+ return slot;
+}
+
+/*
+ * The System Vectors
+ */
+
+struct alpha_machine_vector eiger_mv __initmv = {
+ vector_name: "Eiger",
+ DO_EV6_MMU,
+ DO_DEFAULT_RTC,
+ DO_TSUNAMI_IO,
+ DO_TSUNAMI_BUS,
+ machine_check: tsunami_machine_check,
+ max_dma_address: ALPHA_MAX_DMA_ADDRESS,
+ min_io_address: DEFAULT_IO_BASE,
+ min_mem_address: DEFAULT_MEM_BASE,
+
+ nr_irqs: 128,
+ irq_probe_mask: TSUNAMI_PROBE_MASK,
+ update_irq_hw: eiger_update_irq_hw,
+ ack_irq: common_ack_irq,
+ device_interrupt: eiger_device_interrupt,
+
+ init_arch: tsunami_init_arch,
+ init_irq: eiger_init_irq,
+ init_pit: common_init_pit,
+ init_pci: common_init_pci,
+ kill_arch: common_kill_arch,
+ pci_map_irq: eiger_map_irq,
+ pci_swizzle: eiger_swizzle,
+};
+ALIAS_MV(eiger)
--- /dev/null
+/*
+ * linux/arch/alpha/kernel/sys_nautilus.c
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1998 Richard Henderson
+ * Copyright (C) 1999 Alpha Processor, Inc.,
+ * (David Daniel, Stig Telfer, Soohoon Lee)
+ *
+ * Code supporting NAUTILUS systems.
+ *
+ *
+ * NAUTILUS has the following I/O features:
+ *
+ * a) Driven by AMD 751 aka IRONGATE (northbridge):
+ * 4 PCI slots
+ * 1 AGP slot
+ *
+ * b) Driven by ALI M1543C (southbridge)
+ * 2 ISA slots
+ * 2 IDE connectors
+ * 1 dual drive capable FDD controller
+ * 2 serial ports
+ * 1 ECP/EPP/SP parallel port
+ * 2 USB ports
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/mmu_context.h>
+#include <asm/io.h>
+#include <asm/pci.h>
+#include <asm/pgtable.h>
+#include <asm/core_irongate.h>
+#include <asm/hwrpb.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+#include "pci_impl.h"
+#include "machvec_impl.h"
+
+#define dev2hose(d) (bus2hose[(d)->bus->number]->pci_hose_index)
+
+static void
+nautilus_update_irq_hw(unsigned long irq, unsigned long mask, int unmask_p)
+{
+ /* The timer is connected to PIC interrupt line also on Nautilus.
+ The timer interrupt handler enables the PIC line, so in order
+ not to get multiple timer interrupt sources, we mask it out
+ at all times. */
+
+ mask |= 0x100;
+ if (irq >= 8)
+ outb(mask >> 8, 0xA1);
+ else
+ outb(mask, 0x21);
+}
+
+static void __init
+nautilus_init_irq(void)
+{
+ STANDARD_INIT_IRQ_PROLOG;
+
+ enable_irq(2); /* enable cascade */
+ disable_irq(8);
+}
+
+static int __init
+nautilus_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ /* Preserve the IRQ set up by the console. */
+
+ u8 irq;
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
+ return irq;
+}
+
+void
+nautilus_kill_arch (int mode, char *restart_cmd)
+{
+ u8 tmp;
+
+#ifdef CONFIG_RTC
+ rtc_kill_pit();
+#endif
+
+ switch(mode) {
+ case LINUX_REBOOT_CMD_HALT:
+ printk("Press Reset bottun");
+ break;
+ case LINUX_REBOOT_CMD_RESTART:
+ pcibios_read_config_byte(0, 0x38, 0x43, &tmp);
+ pcibios_write_config_byte(0, 0x38, 0x43, tmp | 0x80);
+ outb(1, 0x92);
+ outb(0, 0x92);
+ printk("Press Reset button");
+ break;
+ case LINUX_REBOOT_CMD_POWER_OFF:
+ }
+
+ while (1);
+}
+
+/* Machine check handler code
+ *
+ * Perform analysis of a machine check that was triggered by the EV6
+ * CPU's fault-detection mechanism.
+ */
+
+/* IPR structures for EV6, containing the necessary data for the
+ * machine check handler to unpick the logout frame
+ */
+
+/* I_STAT */
+
+#define EV6__I_STAT__PAR ( 1 << 29 )
+
+/* MM_STAT */
+
+#define EV6__MM_STAT__DC_TAG_PERR ( 1 << 10 )
+
+/* DC_STAT */
+
+#define EV6__DC_STAT__SEO ( 1 << 4 )
+#define EV6__DC_STAT__ECC_ERR_LD ( 1 << 3 )
+#define EV6__DC_STAT__ECC_ERR_ST ( 1 << 2 )
+#define EV6__DC_STAT__TPERR_P1 ( 1 << 1 )
+#define EV6__DC_STAT__TPERR_P0 ( 1 )
+
+/* C_STAT */
+
+#define EV6__C_STAT__BC_PERR ( 0x01 )
+#define EV6__C_STAT__DC_PERR ( 0x02 )
+#define EV6__C_STAT__DSTREAM_MEM_ERR ( 0x03 )
+#define EV6__C_STAT__DSTREAM_BC_ERR ( 0x04 )
+#define EV6__C_STAT__DSTREAM_DC_ERR ( 0x05 )
+#define EV6__C_STAT__PROBE_BC_ERR0 ( 0x06 )
+#define EV6__C_STAT__PROBE_BC_ERR1 ( 0x07 )
+#define EV6__C_STAT__ISTREAM_MEM_ERR ( 0x0B )
+#define EV6__C_STAT__ISTREAM_BC_ERR ( 0x0C )
+#define EV6__C_STAT__DSTREAM_MEM_DBL ( 0x13 )
+#define EV6__C_STAT__DSTREAM_BC_DBL ( 0x14 )
+#define EV6__C_STAT__ISTREAM_MEM_DBL ( 0x1B )
+#define EV6__C_STAT__ISTREAM_BC_DBL ( 0x1C )
+
+
+/* Take the two syndromes from the CBOX error chain and convert them
+ * into a bit number. */
+
+/* NOTE - since I don't know of any difference between C0 and C1 I
+ just ignore C1, since in all cases I've seen so far they are
+ identical. */
+
+static const unsigned char ev6_bit_to_syndrome[72] =
+{
+ 0xce, 0xcb, 0xd3, 0xd5, 0xd6, 0xd9, 0xda, 0xdc, /* 0 */
+ 0x23, 0x25, 0x26, 0x29, 0x2a, 0x2c, 0x31, 0x34, /* 8 */
+ 0x0e, 0x0b, 0x13, 0x15, 0x16, 0x19, 0x1a, 0x1c, /* 16 */
+ 0xe3, 0xe5, 0xe6, 0xe9, 0xea, 0xec, 0xf1, 0xf4, /* 24 */
+ 0x4f, 0x4a, 0x52, 0x54, 0x57, 0x58, 0x5b, 0x5d, /* 32 */
+ 0xa2, 0xa4, 0xa7, 0xa8, 0xab, 0xad, 0xb0, 0xb5, /* 40 */
+ 0x8f, 0x8a, 0x92, 0x94, 0x97, 0x98, 0x9b, 0x9d, /* 48 */
+ 0x62, 0x64, 0x67, 0x68, 0x6b, 0x6d, 0x70, 0x75, /* 56 */
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 /* 64 */
+};
+
+
+static int ev6_syn2bit(unsigned long c0, unsigned long c1)
+{
+ int bit;
+
+ for (bit = 0; bit < 72; bit++)
+ if (ev6_bit_to_syndrome[bit] == c0) return bit;
+ for (bit = 0; bit < 72; bit++)
+ if (ev6_bit_to_syndrome[bit] == c1) return bit + 64;
+
+ return -1; /* not found */
+}
+
+
+/* Single bit ECC errors are categorized here. */
+
+#if 0
+static const char *interr = "CPU internal error";
+static const char *slotb= "Slot-B error";
+static const char *membus= "Memory/EV6-bus error";
+#else
+static const char *interr = "";
+static const char *slotb = "";
+static const char *membus = "";
+#endif
+
+static void
+ev6_crd_interp(char *interp, struct el_common_EV6_mcheck * L)
+{
+ /* Icache data or tag parity error. */
+ if (L->I_STAT & EV6__I_STAT__PAR) {
+ sprintf(interp, "%s: I_STAT[PAR]\n "
+ "Icache data or tag parity error", interr);
+ return;
+ }
+
+ /* Dcache tag parity error (on issue) (DFAULT). */
+ if (L->MM_STAT & EV6__MM_STAT__DC_TAG_PERR) {
+ sprintf(interp, "%s: MM_STAT[DC_TAG_PERR]\n "
+ "Dcache tag parity error(on issue)", interr);
+ return;
+ }
+
+ /* Errors relating to D-stream set non-zero DC_STAT.
+ Mask CRD bits. */
+ switch (L->DC_STAT & (EV6__DC_STAT__ECC_ERR_ST
+ | EV6__DC_STAT__ECC_ERR_LD)) {
+ case EV6__DC_STAT__ECC_ERR_ST:
+ /* Dcache single-bit ECC error on small store */
+ sprintf(interp, "%s: DC_STAT[ECC_ERR_ST]\n "
+ "Dcache single-bit ECC error on small store", interr);
+ return;
+
+ case EV6__DC_STAT__ECC_ERR_LD:
+ switch (L->C_STAT) {
+ case 0:
+ /* Dcache single-bit error on speculative load */
+ /* Bcache victim read on Dcache/Bcache miss */
+ sprintf(interp, "%s: DC_STAT[ECC_ERR_LD] C_STAT=0\n "
+ "Dcache single-bit ECC error on speculative load",
+ slotb);
+ return;
+
+ case EV6__C_STAT__DSTREAM_DC_ERR:
+ /* Dcache single bit error on load */
+ sprintf(interp, "%s: DC_STAT[ECC_ERR_LD] C_STAT[DSTREAM_DC_ERR]\n"
+ " Dcache single-bit ECC error on speculative load, bit %d",
+ interr, ev6_syn2bit(L->DC0_SYNDROME, L->DC1_SYNDROME));
+ return;
+
+ case EV6__C_STAT__DSTREAM_BC_ERR:
+ /* Bcache single-bit error on Dcache fill */
+ sprintf(interp, "%s: DC_STAT[ECC_ERR_LD] C_STAT[DSTREAM_BC_ERR]\n"
+ " Bcache single-bit error on Dcache fill, bit %d",
+ slotb, ev6_syn2bit(L->DC0_SYNDROME, L->DC1_SYNDROME));
+ return;
+
+ case EV6__C_STAT__DSTREAM_MEM_ERR:
+ /* Memory single-bit error on Dcache fill */
+ sprintf(interp, "%s (to Dcache): DC_STAT[ECC_ERR_LD] "
+ "C_STAT[DSTREAM_MEM_ERR]\n "
+ "Memory single-bit error on Dcache fill, "
+ "Address 0x%lX, bit %d",
+ membus, L->C_ADDR, ev6_syn2bit(L->DC0_SYNDROME,
+ L->DC1_SYNDROME));
+ return;
+ }
+ }
+
+ /* I-stream, other misc errors go on C_STAT alone */
+ switch (L->C_STAT) {
+ case EV6__C_STAT__ISTREAM_BC_ERR:
+ /* Bcache single-bit error on Icache fill (also MCHK) */
+ sprintf(interp, "%s: C_STAT[ISTREAM_BC_ERR]\n "
+ "Bcache single-bit error on Icache fill, bit %d",
+ slotb, ev6_syn2bit(L->DC0_SYNDROME, L->DC1_SYNDROME));
+ return;
+
+ case EV6__C_STAT__ISTREAM_MEM_ERR:
+ /* Memory single-bit error on Icache fill (also MCHK) */
+ sprintf(interp, "%s : C_STATISTREAM_MEM_ERR]\n "
+ "Memory single-bit error on Icache fill "
+ "addr 0x%lX, bit %d",
+ membus, L->C_ADDR, ev6_syn2bit(L->DC0_SYNDROME,
+ L->DC1_SYNDROME));
+ return;
+
+ case EV6__C_STAT__PROBE_BC_ERR0:
+ case EV6__C_STAT__PROBE_BC_ERR1:
+ /* Bcache single-bit error on a probe hit */
+ sprintf(interp, "%s: C_STAT[PROBE_BC_ERR]\n "
+ "Bcache single-bit error on a probe hit, "
+ "addr 0x%lx, bit %d",
+ slotb, L->C_ADDR, ev6_syn2bit(L->DC0_SYNDROME,
+ L->DC1_SYNDROME));
+ return;
+ }
+}
+
+static void
+ev6_mchk_interp(char *interp, struct el_common_EV6_mcheck * L)
+{
+ /* Machine check errors described by DC_STAT */
+ switch (L->DC_STAT) {
+ case EV6__DC_STAT__TPERR_P0:
+ case EV6__DC_STAT__TPERR_P1:
+ /* Dcache tag parity error (on retry) */
+ sprintf(interp, "%s: DC_STAT[TPERR_P0|TPERR_P1]\n "
+ "Dcache tag parity error(on retry)", interr);
+ return;
+
+ case EV6__DC_STAT__SEO:
+ /* Dcache second error on store */
+ sprintf(interp, "%s: DC_STAT[SEO]\n "
+ "Dcache second error during mcheck", interr);
+ return;
+ }
+
+ /* Machine check errors described by C_STAT */
+ switch (L->C_STAT) {
+ case EV6__C_STAT__DC_PERR:
+ /* Dcache duplicate tag parity error */
+ sprintf(interp, "%s: C_STAT[DC_PERR]\n "
+ "Dcache duplicate tag parity error at 0x%lX",
+ interr, L->C_ADDR);
+ return;
+
+ case EV6__C_STAT__BC_PERR:
+ /* Bcache tag parity error */
+ sprintf(interp, "%s: C_STAT[BC_PERR]\n "
+ "Bcache tag parity error at 0x%lX",
+ slotb, L->C_ADDR);
+ return;
+
+ case EV6__C_STAT__ISTREAM_BC_ERR:
+ /* Bcache single-bit error on Icache fill (also CRD) */
+ sprintf(interp, "%s: C_STAT[ISTREAM_BC_ERR]\n "
+ "Bcache single-bit error on Icache fill 0x%lX bit %d",
+ slotb, L->C_ADDR,
+ ev6_syn2bit(L->DC0_SYNDROME, L->DC1_SYNDROME));
+ return;
+
+
+ case EV6__C_STAT__ISTREAM_MEM_ERR:
+ /* Memory single-bit error on Icache fill (also CRD) */
+ sprintf(interp, "%s: C_STAT[ISTREAM_MEM_ERR]\n "
+ "Memory single-bit error on Icache fill 0x%lX, bit %d",
+ membus, L->C_ADDR,
+ ev6_syn2bit(L->DC0_SYNDROME, L->DC1_SYNDROME));
+ return;
+
+
+ case EV6__C_STAT__ISTREAM_BC_DBL:
+ /* Bcache double-bit error on Icache fill */
+ sprintf(interp, "%s: C_STAT[ISTREAM_BC_DBL]\n "
+ "Bcache double-bit error on Icache fill at 0x%lX",
+ slotb, L->C_ADDR);
+ return;
+ case EV6__C_STAT__DSTREAM_BC_DBL:
+ /* Bcache double-bit error on Dcache fill */
+ sprintf(interp, "%s: C_STAT[DSTREAM_BC_DBL]\n "
+ "Bcache double-bit error on Dcache fill at 0x%lX",
+ slotb, L->C_ADDR);
+ return;
+
+ case EV6__C_STAT__ISTREAM_MEM_DBL:
+ /* Memory double-bit error on Icache fill */
+ sprintf(interp, "%s: C_STAT[ISTREAM_MEM_DBL]\n "
+ "Memory double-bit error on Icache fill at 0x%lX",
+ membus, L->C_ADDR);
+ return;
+
+ case EV6__C_STAT__DSTREAM_MEM_DBL:
+ /* Memory double-bit error on Dcache fill */
+ sprintf(interp, "%s: C_STAT[DSTREAM_MEM_DBL]\n "
+ "Memory double-bit error on Dcache fill at 0x%lX",
+ membus, L->C_ADDR);
+ return;
+ }
+}
+
+static void
+ev6_cpu_machine_check(unsigned long vector, struct el_common_EV6_mcheck *L,
+ struct pt_regs *regs)
+{
+ char interp[80];
+
+ /* This is verbose and looks intimidating. Should it be printed for
+ corrected (CRD) machine checks? */
+
+ printk(KERN_CRIT "PALcode logout frame: "
+ "MCHK_Code %d "
+ "MCHK_Frame_Rev %d\n"
+ "I_STAT %016lx "
+ "DC_STAT %016lx "
+ "C_ADDR %016lx\n"
+ "SYND1 %016lx "
+ "SYND0 %016lx "
+ "C_STAT %016lx\n"
+ "C_STS %016lx "
+ "RES %016lx "
+ "EXC_ADDR%016lx\n"
+ "IER_CM %016lx "
+ "ISUM %016lx "
+ "MM_STAT %016lx\n"
+ "PALBASE %016lx "
+ "I_CTL %016lx "
+ "PCTX %016lx\n"
+ "CPU registers: "
+ "PC %016lx "
+ "Return %016lx\n",
+ L->MCHK_Code, L->MCHK_Frame_Rev, L->I_STAT, L->DC_STAT,
+ L->C_ADDR, L->DC1_SYNDROME, L->DC0_SYNDROME, L->C_STAT,
+ L->C_STS, L->RESERVED0, L->EXC_ADDR, L->IER_CM, L->ISUM,
+ L->MM_STAT, L->PAL_BASE, L->I_CTL, L->PCTX,
+ regs->pc, regs->r26);
+
+ /* Attempt an interpretation on the meanings of the fields above. */
+ sprintf(interp, "No interpretation available!" );
+ if (vector == SCB_Q_PROCERR)
+ ev6_crd_interp(interp, L);
+ else if (vector == SCB_Q_PROCMCHK)
+ ev6_mchk_interp(interp, L);
+
+ printk(KERN_CRIT "interpretation: %s\n\n", interp);
+}
+
+
+/* Perform analysis of a machine check that arrived from the system (NMI) */
+
+static void
+naut_sys_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs *regs)
+{
+ printk("xtime %lx\n", CURRENT_TIME);
+ printk("PC %lx RA %lx\n", regs->pc, regs->r26);
+ irongate_pci_clr_err();
+}
+
+/* Machine checks can come from two sources - those on the CPU and those
+ in the system. They are analysed separately but all starts here. */
+
+void
+nautilus_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs *regs)
+{
+ char *mchk_class;
+ unsigned cpu_analysis=0, sys_analysis=0;
+
+ /* Now for some analysis. Machine checks fall into two classes --
+ those picked up by the system, and those picked up by the CPU.
+ Add to that the two levels of severity - correctable or not. */
+
+ if (vector == SCB_Q_SYSMCHK
+ && ((IRONGATE0->dramms & 0x3FF) == 0x300)) {
+ unsigned long nmi_ctl, temp;
+
+ /* Clear ALI NMI */
+ nmi_ctl = inb(0x61);
+ nmi_ctl |= 0x0c;
+ outb(nmi_ctl, 0x61);
+ nmi_ctl &= ~0x0c;
+ outb(nmi_ctl, 0x61);
+
+ temp = IRONGATE0->stat_cmd;
+ IRONGATE0->stat_cmd = temp; /* write again clears error bits */
+ mb();
+ temp = IRONGATE0->stat_cmd; /* re-read to force write */
+
+ temp = IRONGATE0->dramms;
+ IRONGATE0->dramms = temp; /* write again clears error bits */
+ mb();
+ temp = IRONGATE0->dramms; /* re-read to force write */
+
+ draina();
+ wrmces(0x7);
+ mb();
+ return;
+ }
+
+ switch (vector) {
+ case SCB_Q_SYSERR:
+ mchk_class = "Correctable System Machine Check (NMI)";
+ sys_analysis = 1;
+ break;
+ case SCB_Q_SYSMCHK:
+ mchk_class = "Fatal System Machine Check (NMI)";
+ sys_analysis = 1;
+ break;
+
+ case SCB_Q_PROCERR:
+ mchk_class = "Correctable Processor Machine Check";
+ cpu_analysis = 1;
+ break;
+ case SCB_Q_PROCMCHK:
+ mchk_class = "Fatal Processor Machine Check";
+ cpu_analysis = 1;
+ break;
+
+ default:
+ mchk_class = "Unknown vector!";
+ break;
+ }
+
+ printk(KERN_CRIT "NAUTILUS Machine check 0x%lx [%s]\n",
+ vector, mchk_class);
+
+ if (cpu_analysis)
+ ev6_cpu_machine_check(vector,
+ (struct el_common_EV6_mcheck *)la_ptr,
+ regs);
+ if (sys_analysis)
+ naut_sys_machine_check(vector, la_ptr, regs);
+
+ /* Tell the PALcode to clear the machine check */
+ draina();
+ wrmces(0x7);
+ mb();
+}
+
+
+
+/*
+ * The System Vectors
+ */
+
+struct alpha_machine_vector nautilus_mv __initmv = {
+ vector_name: "Nautilus",
+ DO_EV6_MMU,
+ DO_DEFAULT_RTC,
+ DO_IRONGATE_IO,
+ DO_IRONGATE_BUS,
+ machine_check: nautilus_machine_check,
+ max_dma_address: ALPHA_NAUTILUS_MAX_DMA_ADDRESS,
+ min_io_address: DEFAULT_IO_BASE,
+ min_mem_address: DEFAULT_MEM_BASE,
+
+ nr_irqs: 16,
+ irq_probe_mask: (_PROBE_MASK(16) & ~0x101UL),
+ update_irq_hw: nautilus_update_irq_hw,
+ ack_irq: common_ack_irq,
+ device_interrupt: isa_device_interrupt,
+
+ init_arch: irongate_init_arch,
+ init_irq: nautilus_init_irq,
+ init_pit: common_init_pit,
+ init_pci: common_init_pci,
+ kill_arch: nautilus_kill_arch,
+ pci_map_irq: nautilus_map_irq,
+ pci_swizzle: common_swizzle,
+};
+ALIAS_MV(nautilus)
#include "pci_impl.h"
#include "machvec_impl.h"
+static unsigned int hose_irq_masks[4] = {
+ 0xff0000, 0xfe0000, 0xff0000, 0xff0000
+};
+
+
+/* Note that `mask' initially contains only the low 64 bits. */
static void
rawhide_update_irq_hw(unsigned long irq, unsigned long mask, int unmask_p)
{
- if (irq >= 40) {
- /* PCI bus 1 with builtin NCR810 SCSI */
- *(vuip)MCPCIA_INT_MASK0(5) =
- (~((mask) >> 40) & 0x00ffffffU) | 0x00fe0000U;
- mb();
- /* ... and read it back to make sure it got written. */
- *(vuip)MCPCIA_INT_MASK0(5);
+ unsigned int saddle, hose, new_irq;
+
+ if (irq < 16) {
+ if (irq < 8)
+ outb(mask, 0x21); /* ISA PIC1 */
+ else
+ outb(mask >> 8, 0xA1); /* ISA PIC2 */
+ return;
}
- else if (irq >= 16) {
- /* PCI bus 0 with EISA bridge */
- *(vuip)MCPCIA_INT_MASK0(4) =
- (~((mask) >> 16) & 0x00ffffffU) | 0x00ff0000U;
- mb();
- /* ... and read it back to make sure it got written. */
- *(vuip)MCPCIA_INT_MASK0(4);
+
+ saddle = (irq > 63);
+ mask = _alpha_irq_masks[saddle];
+
+ if (saddle == 0) {
+ /* Saddle 0 includes EISA interrupts. */
+ mask >>= 16;
+ new_irq = irq - 16;
+ } else {
+ new_irq = irq - 64;
+ }
+
+ hose = saddle << 1;
+ if (new_irq >= 24) {
+ mask >>= 24;
+ hose += 1;
}
- else if (irq >= 8)
- outb(mask >> 8, 0xA1); /* ISA PIC2 */
- else
- outb(mask, 0x21); /* ISA PIC1 */
+
+ *(vuip)MCPCIA_INT_MASK0(hose) =
+ (~mask & 0x00ffffff) | hose_irq_masks[hose];
+ mb();
+ *(vuip)MCPCIA_INT_MASK0(hose);
}
static void
rawhide_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
{
- int irq, ack;
-
- ack = irq = (vector - 0x800) >> 4;
+ int irq;
- /* ??? A 4 bus RAWHIDE has 67 interrupts. Oops. We need
- something wider than one word for our own internal
- manipulations. */
+ irq = (vector - 0x800) >> 4;
/*
* The RAWHIDE SRM console reports PCI interrupts with a vector
* it line up with the actual bit numbers from the REQ registers,
* which is how we manage the interrupts/mask. Sigh...
*
- * also, PCI #1 interrupts are offset some more... :-(
+ * Also, PCI #1 interrupts are offset some more... :-(
*/
- if (irq == 52)
- ack = irq = 56; /* SCSI on PCI 1 is special */
- else {
- if (irq >= 24) /* adjust all PCI interrupts down 8 */
- ack = irq = irq - 8;
- if (irq >= 48) /* adjust PCI bus 1 interrupts down another 8 */
- ack = irq = irq - 8;
+
+ if (irq == 52) {
+ /* SCSI on PCI1 is special. */
+ irq = 72;
}
- handle_irq(irq, ack, regs);
+ /* Adjust by which hose it is from. */
+ irq -= ((irq + 16) >> 2) & 0x38;
+
+ handle_irq(irq, irq, regs);
}
static void __init
rawhide_init_irq(void)
{
- STANDARD_INIT_IRQ_PROLOG;
+ struct pci_controler *hose;
- /* HACK ALERT! only PCI busses 0 and 1 are used currently,
- (MIDs 4 and 5 respectively) and routing is only to CPU #1*/
+ mcpcia_init_hoses();
- *(vuip)MCPCIA_INT_MASK0(4) =
- (~((alpha_irq_mask) >> 16) & 0x00ffffffU) | 0x00ff0000U; mb();
- /* ... and read it back to make sure it got written. */
- *(vuip)MCPCIA_INT_MASK0(4);
+ STANDARD_INIT_IRQ_PROLOG;
+
+ /* HACK ALERT! Routing is only to CPU #0. */
+ for (hose = hose_head; hose; hose = hose->next) {
+ int h = hose->index;
- *(vuip)MCPCIA_INT_MASK0(5) =
- (~((alpha_irq_mask) >> 40) & 0x00ffffffU) | 0x00fe0000U; mb();
- /* ... and read it back to make sure it got written. */
- *(vuip)MCPCIA_INT_MASK0(5);
+ *(vuip)MCPCIA_INT_MASK0(h) = hose_irq_masks[h];
+ mb();
+ *(vuip)MCPCIA_INT_MASK0(h);
+ }
enable_irq(2);
}
min_io_address: DEFAULT_IO_BASE,
min_mem_address: MCPCIA_DEFAULT_MEM_BASE,
- nr_irqs: 64,
- irq_probe_mask: _PROBE_MASK(64),
+ nr_irqs: 128,
+ irq_probe_mask: _PROBE_MASK(128),
update_irq_hw: rawhide_update_irq_hw,
ack_irq: common_ack_irq,
device_interrupt: rawhide_srm_device_interrupt,
outb(mask, 0x21); /* ISA PIC1 */
else
outb(mask >> 8, 0xA1); /* ISA PIC2 */
- } else if (irq <= 31) {
- regaddr = 0x510 + ((irq - 16) & 0x0c);
- outl((mask >> ((irq - 16) & 0x0c)) & 0xf0000Ul, regaddr);
+ } else {
+ if (irq > 63)
+ mask = _alpha_irq_masks[1] << 16;
+ else
+ mask = mask >> ((irq - 16) & 0x30);
+ regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c);
+ outl(mask & 0xffff0000UL, regaddr);
}
}
takara_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
{
int irq = (vector - 0x800) >> 4;
-
- if (irq > 15)
- irq = ((vector - 0x800) >> 6) + 12;
-
handle_irq(irq, irq, regs);
}
{
STANDARD_INIT_IRQ_PROLOG;
- if (alpha_using_srm)
+ if (alpha_using_srm) {
alpha_mv.device_interrupt = takara_srm_device_interrupt;
-
- if (!alpha_using_srm) {
+ } else {
unsigned int ctlreg = inl(0x500);
/* Return to non-accelerated mode. */
* assign it whatever the hell IRQ we like and it doesn't matter.
*/
+static int __init
+takara_map_irq_srm(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static char irq_tab[15][5] __initlocaldata = {
+ { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */
+ { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */
+ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */
+ { -1, -1, -1, -1, -1}, /* slot 9 == nothing */
+ { -1, -1, -1, -1, -1}, /* slot 10 == nothing */
+ { -1, -1, -1, -1, -1}, /* slot 11 == nothing */
+ /* These are behind the bridges. */
+ { 12, 12, 13, 14, 15}, /* slot 12 == nothing */
+ { 8, 8, 9, 19, 11}, /* slot 13 == nothing */
+ { 4, 4, 5, 6, 7}, /* slot 14 == nothing */
+ { 0, 0, 1, 2, 3}, /* slot 15 == nothing */
+ { -1, -1, -1, -1, -1}, /* slot 16 == nothing */
+ {64+ 0, 64+0, 64+1, 64+2, 64+3}, /* slot 17= device 4 */
+ {48+ 0, 48+0, 48+1, 48+2, 48+3}, /* slot 18= device 3 */
+ {32+ 0, 32+0, 32+1, 32+2, 32+3}, /* slot 19= device 2 */
+ {16+ 0, 16+0, 16+1, 16+2, 16+3}, /* slot 20= device 1 */
+ };
+ const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5;
+ int irq = COMMON_TABLE_LOOKUP;
+ if (irq >= 0 && irq < 16) {
+ /* Guess that we are behind a bridge. */
+ unsigned int busslot = PCI_SLOT(dev->bus->self->devfn);
+ irq += irq_tab[busslot-min_idsel][0];
+ }
+ return irq;
+}
+
static int __init
takara_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
if (pin == 1)
pin += (20 - busslot);
else {
- /* Must be a card-based bridge. */
- printk(KERN_WARNING "takara_swizzle: cannot handle "
- "card-bridge behind builtin bridge yet.\n");
+ printk(KERN_WARNING "takara_swizzle: can only "
+ "handle cards with INTA IRQ pin.\n");
}
+ } else {
+ /* Must be a card-based bridge. */
+ printk(KERN_WARNING "takara_swizzle: cannot handle "
+ "card-bridge behind builtin bridge yet.\n");
}
*pinp = pin;
static void __init
takara_init_pci(void)
{
+ if (alpha_using_srm)
+ alpha_mv.pci_map_irq = takara_map_irq_srm;
+
common_init_pci();
- /* ns87312_enable_ide(0x26e); */
+ ns87312_enable_ide(0x26e);
}
min_io_address: DEFAULT_IO_BASE,
min_mem_address: CIA_DEFAULT_MEM_BASE,
- nr_irqs: 20,
- irq_probe_mask: _PROBE_MASK(20),
+ nr_irqs: 128,
+ irq_probe_mask: _PROBE_MASK(48),
update_irq_hw: takara_update_irq_hw,
ack_irq: common_ack_irq,
device_interrupt: takara_device_interrupt,
#ifdef CONFIG_RTC
void
-rtc_init_pit (void)
+rtc_init_pit(void)
{
unsigned char control;
outb(0x31, 0x42);
outb(0x13, 0x42);
}
+
+void
+rtc_kill_pit(void)
+{
+ unsigned char control;
+
+ cli();
+
+ /* Reset periodic interrupt frequency. */
+ CMOS_WRITE(0x26, RTC_FREQ_SELECT);
+
+ /* Turn on periodic interrupts. */
+ control = CMOS_READ(RTC_CONTROL);
+ control |= RTC_PIE;
+ CMOS_WRITE(control, RTC_CONTROL);
+ CMOS_READ(RTC_INTR_FLAGS);
+
+ sti();
+}
#endif
void
* Alpha IO and memory functions.. Just expand the inlines in the header
* files..
*/
+
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/string.h>
+
#include <asm/io.h>
unsigned int _inb(unsigned long addr)
}
mb();
}
+
+void
+scr_memcpyw(u16 *d, const u16 *s, unsigned int count)
+{
+ if (! __is_ioaddr((unsigned long) s)) {
+ /* Source is memory. */
+ if (! __is_ioaddr((unsigned long) d))
+ memcpy(d, s, count);
+ else
+ memcpy_toio(d, s, count);
+ } else {
+ /* Source is screen. */
+ if (! __is_ioaddr((unsigned long) d))
+ memcpy_fromio(d, s, count);
+ else {
+ /* FIXME: Should handle unaligned ops and
+ operation widening. */
+ count /= 2;
+ while (count--) {
+ u16 tmp = __raw_readw((unsigned long)(s++));
+ __raw_writew(tmp, (unsigned long)(d++));
+ }
+ }
+ }
+}
* paging_init() sets up the page tables: in the alpha version this actually
* unmaps the bootup page table (as we're now in KSEG, so we don't need it).
*/
-void paging_init(void)
+void
+paging_init(void)
{
unsigned long newptbr;
unsigned long original_pcb_ptr;
if (dma_pfn > high_pfn)
zones_size[ZONE_DMA] = high_pfn;
- else
- {
+ else {
zones_size[ZONE_DMA] = dma_pfn;
zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
}
- /* initialize mem_map[] */
+ /* Initialize mem_map[]. */
free_area_init(zones_size);
/* Initialize the kernel's page tables. Linux puts the vptb in
}
#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
+void
+free_initrd_mem(unsigned long start, unsigned long end)
{
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(mem_map + MAP_NR(start));
}
return str;
}
+
+void __init
+pcibios_align_resource(void *data, struct resource *res, unsigned long size)
+{
+}
static void ecard_proc_init(void)
{
- proc_bus_ecard_dir = create_proc_entry("ecard", S_IFDIR, proc_bus);
+ proc_bus_ecard_dir = proc_mkdir("ecard", proc_bus);
create_proc_info_entry("devices", 0, proc_bus_ecard_dir,
get_ecard_dev_info);
}
*/
static inline void check_smp_invalidate(int cpu)
{
- if (test_bit(cpu, &smp_invalidate_needed)) {
- struct mm_struct *mm = current->mm;
- clear_bit(cpu, &smp_invalidate_needed);
- if (mm)
- atomic_set_mask(1 << cpu, &mm->cpu_vm_mask);
- local_flush_tlb();
- }
+ if (test_bit(cpu, &smp_invalidate_needed))
+ do_flush_tlb_local();
}
static void show(char * str)
* Hannover, Germany
* hm@ix.de
*
- * Copyright 1997--1999 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ * Copyright 1997--1999 Martin Mares <mj@suse.cz>
*
* For more information, please consult the following manuals (look at
* http://www.pcisig.com/ for how to get them):
printk(KERN_ERR "PCI: I/O Region %s/%d too large (%ld bytes)\n", dev->slot_name, i, size);
return -EFBIG;
}
- if (allocate_resource(pr, r, size, 0x1000, ~0, 1024, dev)) {
+ if (allocate_resource(pr, r, size, 0x1000, ~0, 1024, NULL, NULL)) {
printk(KERN_ERR "PCI: Allocation of I/O region %s/%d (%ld bytes) failed\n", dev->slot_name, i, size);
return -EBUSY;
}
} else {
- if (allocate_resource(pr, r, size, 0x10000000, ~0, size, dev)) {
+ if (allocate_resource(pr, r, size, 0x10000000, ~0, size, NULL, NULL)) {
printk(KERN_ERR "PCI: Allocation of memory region %s/%d (%ld bytes) failed\n", dev->slot_name, i, size);
return -EBUSY;
}
pcibios_assign_resources();
}
-unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
- unsigned long start, unsigned long size)
-{
- return start;
-}
-
int pcibios_enable_resources(struct pci_dev *dev)
{
u16 cmd, old_cmd;
/*
* Low-Level PCI Support for PC
*
- * (c) 1999 Martin Mares <mj@ucw.cz>
+ * (c) 1999 Martin Mares <mj@suse.cz>
*/
#include <linux/config.h>
DBG("PCI: BIOS probe returned s=%02x hw=%02x ver=%02x.%02x l=%02x\n",
status, hw_mech, major_ver, minor_ver, last_bus);
if (status || signature != PCI_SIGNATURE) {
- printk (KERN_ERR "PCI: BIOS BUG #%x[%08x] found, report to <mj@ucw.cz>\n",
+ printk (KERN_ERR "PCI: BIOS BUG #%x[%08x] found, report to <mj@suse.cz>\n",
status, signature);
return 0;
}
if (sum != 0)
continue;
if (check->fields.revision != 0) {
- printk("PCI: unsupported BIOS32 revision %d at 0x%p, report to <mj@ucw.cz>\n",
+ printk("PCI: unsupported BIOS32 revision %d at 0x%p, report to <mj@suse.cz>\n",
check->fields.revision, check);
continue;
}
/*
* Low-Level PCI Support for SGI Visual Workstation
*
- * (c) 1999 Martin Mares <mj@ucw.cz>
+ * (c) 1999 Martin Mares <mj@suse.cz>
*/
#include <linux/config.h>
#define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
-/* System RAM - interrupted by the 640kB-1M hole */
-#define code_resource (ram_resources[3])
-#define data_resource (ram_resources[4])
-static struct resource ram_resources[] = {
- { "System RAM", 0x000000, 0x09ffff, IORESOURCE_BUSY },
- { "System RAM", 0x100000, 0x100000, IORESOURCE_BUSY },
- { "Video RAM area", 0x0a0000, 0x0bffff, IORESOURCE_BUSY },
- { "Kernel code", 0x100000, 0 },
- { "Kernel data", 0, 0 }
-};
+static struct resource code_resource = { "Kernel code", 0x100000, 0 };
+static struct resource data_resource = { "Kernel data", 0, 0 };
+static struct resource vram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_BUSY };
/* System ROM resources */
#define MAXROMS 6
*/
bootmap_size = init_bootmem(start_pfn, max_low_pfn);
- /*
- * FIXME: what about high memory?
- */
- ram_resources[1].end = PFN_PHYS(max_low_pfn);
-
/*
* Register fully available low RAM pages with the bootmem allocator.
*/
#endif
/*
- * Request the standard RAM and ROM resources -
- * they eat up PCI memory space
+ * Request address space for all standard RAM and ROM resources
+ * and also for regions reported as reserved by the e820.
*/
- request_resource(&iomem_resource, ram_resources+0);
- request_resource(&iomem_resource, ram_resources+1);
- request_resource(&iomem_resource, ram_resources+2);
- request_resource(ram_resources+1, &code_resource);
- request_resource(ram_resources+1, &data_resource);
probe_roms();
+ for (i = 0; i < e820.nr_map; i++) {
+ struct resource *res;
+ if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
+ continue;
+ res = alloc_bootmem_low(sizeof(struct resource));
+ switch (e820.map[i].type) {
+ case E820_RAM: res->name = "System RAM"; break;
+ case E820_ACPI: res->name = "ACPI Tables"; break;
+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
+ default: res->name = "reserved";
+ }
+ res->start = e820.map[i].addr;
+ res->end = res->start + e820.map[i].size - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ request_resource(&iomem_resource, res);
+ if (e820.map[i].type == E820_RAM) {
+ /*
+ * We dont't know which RAM region contains kernel data,
+ * so we try it repeatedly and let the resource manager
+ * test it.
+ */
+ request_resource(res, &code_resource);
+ request_resource(res, &data_resource);
+ }
+ }
+ request_resource(&iomem_resource, &vram_resource);
/* request I/O space for devices used on all i[345]86 PCs */
for (i = 0; i < STANDARD_IO_RESOURCES; i++)
/* The 'big kernel lock' */
spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
-volatile unsigned long smp_invalidate_needed;
+volatile unsigned long smp_invalidate_needed; /* immediate flush required */
+unsigned int cpu_tlbbad[NR_CPUS]; /* flush before returning to user space */
/*
* the following functions deal with sending IPIs between CPUs.
/*
* Take care of "crossing" invalidates
*/
- if (test_bit(cpu, &smp_invalidate_needed)) {
- struct mm_struct *mm = current->mm;
- clear_bit(cpu, &smp_invalidate_needed);
- if (mm)
- atomic_set_mask(1 << cpu, &mm->cpu_vm_mask);
- local_flush_tlb();
- }
+ if (test_bit(cpu, &smp_invalidate_needed))
+ do_flush_tlb_local();
+
--stuck;
if (!stuck) {
printk("stuck on TLB IPI wait (CPU#%d)\n",cpu);
*/
void flush_tlb_current_task(void)
{
- unsigned long vm_mask = 1 << current->processor;
+ unsigned long vm_mask = 1 << smp_processor_id();
struct mm_struct *mm = current->mm;
unsigned long cpu_mask = mm->cpu_vm_mask & ~vm_mask;
void flush_tlb_mm(struct mm_struct * mm)
{
- unsigned long vm_mask = 1 << current->processor;
+ unsigned long vm_mask = 1 << smp_processor_id();
unsigned long cpu_mask = mm->cpu_vm_mask & ~vm_mask;
mm->cpu_vm_mask = 0;
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{
- unsigned long vm_mask = 1 << current->processor;
+ unsigned long vm_mask = 1 << smp_processor_id();
struct mm_struct *mm = vma->vm_mm;
unsigned long cpu_mask = mm->cpu_vm_mask & ~vm_mask;
flush_tlb_others(cpu_mask);
}
-void flush_tlb_all(void)
+static inline void do_flush_tlb_all_local(void)
{
- flush_tlb_others(~(1 << current->processor));
local_flush_tlb();
+ if(current->mm==0) {
+ unsigned long cpu = smp_processor_id();
+ clear_bit(cpu, ¤t->active_mm->cpu_vm_mask);
+
+ cpu_tlbbad[cpu] = 1;
+ }
+}
+
+static void flush_tlb_all_ipi(void* info)
+{
+ do_flush_tlb_all_local();
}
+void flush_tlb_all(void)
+{
+ if(cpu_online_map ^ (1<<smp_processor_id()))
+ smp_call_function (flush_tlb_all_ipi,0,1,1);
+
+ do_flush_tlb_all_local();
+}
/*
* this function sends a 'reschedule' IPI to another CPU.
*/
asmlinkage void smp_invalidate_interrupt(void)
{
- struct task_struct *tsk = current;
- unsigned int cpu = tsk->processor;
+ if (test_bit(smp_processor_id(), &smp_invalidate_needed))
+ do_flush_tlb_local();
- if (test_and_clear_bit(cpu, &smp_invalidate_needed)) {
- struct mm_struct *mm = tsk->mm;
- if (mm)
- atomic_set_mask(1 << cpu, &mm->cpu_vm_mask);
- local_flush_tlb();
- }
ack_APIC_irq();
}
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
/* XXX FIXME - update OF device tree node interrupt property */
}
+
+void __init
+pcibios_align_resource(void *data, struct resource *res, unsigned long size)
+{
+}
{
static PROC_DirectoryEntry_T *StatusProcEntry;
int ControllerNumber;
- DAC960_ProcDirectoryEntry = create_proc_entry("driver/rd", S_IFDIR, NULL);
+ DAC960_ProcDirectoryEntry = proc_mkdir("driver/rd", NULL);
StatusProcEntry = create_proc_read_entry("status", 0,
DAC960_ProcDirectoryEntry,
DAC960_ProcReadStatus, NULL);
static void __init ida_procinit(int i)
{
if (proc_array == NULL) {
- proc_array = create_proc_entry("driver/array", S_IFDIR, NULL);
+ proc_array = proc_mkdir("driver/array", NULL);
if (!proc_array) return;
}
if (drive->proc)
continue;
- drive->proc = create_proc_entry(drive->name, S_IFDIR, parent);
+ drive->proc = proc_mkdir(drive->name, parent);
if (drive->proc) {
ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
if (driver) {
for (h = 0; h < MAX_HWIFS; h++) {
ide_hwif_t *hwif = &ide_hwifs[h];
- int exist = (hwif->proc != NULL);
if (!hwif->present)
continue;
- if (!exist)
- hwif->proc = create_proc_entry(hwif->name, S_IFDIR, proc_ide_root);
- if (!hwif->proc)
- return;
- if (!exist)
+ if (!hwif->proc) {
+ hwif->proc = proc_mkdir(hwif->name, proc_ide_root);
+ if (!hwif->proc)
+ return;
ide_add_proc_entries(hwif->proc, hwif_entries, hwif);
+ }
create_proc_ide_drives(hwif);
}
}
void proc_ide_create(void)
{
- proc_ide_root = create_proc_entry("ide", S_IFDIR, 0);
+ proc_ide_root = proc_mkdir("ide", 0);
if (!proc_ide_root) return;
create_proc_ide_interfaces();
struct proc_dir_entry *ent;
int i, j;
- drm_root = create_proc_entry("video", S_IFDIR, NULL);
+ drm_root = proc_mkdir("video", NULL);
if (!drm_root) {
DRM_ERROR("Cannot create /proc/video\n");
return -1;
add some global support for /proc/video. */
for (i = 0; i < 8; i++) {
sprintf(drm_slot_name, "video/%d", i);
- drm_dev_root = create_proc_entry(drm_slot_name, S_IFDIR, NULL);
+ drm_dev_root = proc_mkdir(drm_slot_name, NULL);
if (!drm_dev_root) {
DRM_ERROR("Cannot create /proc/%s\n", drm_slot_name);
remove_proc_entry("video", NULL);
}
for (i = 0; i < DRM_PROC_ENTRIES; i++) {
- ent = create_proc_entry(drm_proc_list[i].name,
- S_IFREG|S_IRUGO, drm_dev_root);
- if (!ent) {
- DRM_ERROR("Cannot create /proc/%s/%s\n",
- drm_slot_name, drm_proc_list[i].name);
- for (j = 0; j < i; j++)
- remove_proc_entry(drm_proc_list[i].name,
- drm_dev_root);
- remove_proc_entry(drm_slot_name, NULL);
- remove_proc_entry("video", NULL);
- return -1;
- }
- ent->read_proc = drm_proc_list[i].f;
- ent->data = dev;
+ if (create_proc_read_entry(drm_proc_list[i].name,0,drm_dev_root,
+ drm_proc_list[i].f, dev))
+ continue;
+
+ DRM_ERROR("Cannot create /proc/%s/%s\n",
+ drm_slot_name, drm_proc_list[i].name);
+ for (j = 0; j < i; j++)
+ remove_proc_entry(drm_proc_list[i].name, drm_dev_root);
+ remove_proc_entry(drm_slot_name, NULL);
+ remove_proc_entry("video", NULL);
+ return -1;
}
return 0;
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/errno.h>
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/config.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/wrapper.h>
sprintf(buff, "iop%d", pctrl->unit);
- dir = create_proc_entry(buff, S_IFDIR, root);
+ dir = proc_mkdir(buff, root);
if(!dir)
return -1;
{
sprintf(buff, "%0#5x", dev->lct_data->tid);
- dir1 = create_proc_entry(buff, S_IFDIR, dir);
+ dir1 = proc_mkdir(buff, dir);
dev->proc_entry = dir1;
if(!dir1)
struct i2o_controller *pctrl = NULL;
int i;
- i2o_proc_dir_root = create_proc_entry("i2o", S_IFDIR, 0);
+ i2o_proc_dir_root = proc_mkdir("i2o", 0);
if(!i2o_proc_dir_root)
return -1;
*pp = card;
driver->ncontroller++;
sprintf(card->procfn, "capi/controllers/%d", card->cnr);
- card->procent = create_proc_entry(card->procfn, 0, 0);
- if (card->procent) {
- card->procent->read_proc =
- (int (*)(char *,char **,off_t,int,int *,void *))
- driver->ctr_read_proc;
- card->procent->data = card;
- }
-
+ card->procent = create_proc_read_entry(card->procfn, 0, 0,
+ driver->ctr_read_proc, card);
ncards++;
printk(KERN_NOTICE "kcapi: Controller %d: %s attached\n",
card->cnr, card->name);
t1isa_driver = driver;
#endif
sprintf(driver->procfn, "capi/drivers/%s", driver->name);
- driver->procent = create_proc_entry(driver->procfn, 0, 0);
- if (driver->procent) {
- if (driver->driver_read_proc) {
- driver->procent->read_proc =
- (int (*)(char *,char **,off_t,int,int *,void *))
- driver->driver_read_proc;
- } else {
- driver->procent->read_proc = driver_read_proc;
- }
- driver->procent->data = driver;
- }
+ driver->procent = create_proc_read_entry(driver->procfn, 0, 0,
+ driver->driver_read_proc
+ ? driver->driver_read_proc
+ : driver_read_proc,
+ driver);
return &di;
}
init_waitqueue_head(&rd_queue);
#ifdef CONFIG_PROC_FS
- isdn_proc_entry = create_proc_entry("isdn", S_IFDIR | S_IRUGO | S_IXUGO ,proc_net);
+ isdn_proc_entry = proc_mkdir("isdn", proc_net);
if (!isdn_proc_entry)
return(-1);
- isdn_divert_entry = create_proc_entry("divert",S_IFREG | S_IRUGO,isdn_proc_entry);
+ isdn_divert_entry = create_proc_entry("divert",0,isdn_proc_entry);
if (!isdn_divert_entry)
{
remove_proc_entry("isdn",proc_net);
#if OLYMPIC_NETWORK_MONITOR
#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *ent ;
-
- ent = create_proc_entry("net/olympic_tr",0,0);
- ent->read_proc = &olympic_proc_info ;
+ create_proc_read_entry("net/olympic_tr",0,0,olympic_proc_info,NULL);
#endif
#endif
for (i = 0; (i<OLYMPIC_MAX_ADAPTERS); i++) {
struct nubus_dir dir;
sprintf(name, "%x", ent.type);
- e = create_proc_entry(name, S_IFDIR, parent);
+ e = proc_mkdir(name, parent);
if (!e) return;
/* And descend */
/* Create a directory */
sprintf(name, "%x", dev->board->slot);
- e = dev->procdir = create_proc_entry(name, S_IFDIR,
- proc_bus_nubus_dir);
+ e = dev->procdir = proc_mkdir(name, proc_bus_nubus_dir);
if (!e)
return -ENOMEM;
{
if (!MACH_IS_MAC)
return;
- proc_bus_nubus_dir = create_proc_entry("nubus", S_IFDIR, proc_bus);
+ proc_bus_nubus_dir = proc_mkdir("nubus", proc_bus);
create_proc_info_entry("devices", 0, proc_bus_nubus_dir,
get_nubus_dev_info);
proc_bus_nubus_add_devices();
*
* PCI Bus Services -- Function For Backward Compatibility
*
- * Copyright 1998, 1999 Martin Mares <mj@ucw.cz>
+ * Copyright 1998, 1999 Martin Mares <mj@suse.cz>
*/
#include <linux/types.h>
/*
* Generate devlist.h from the PCI ID file.
*
- * (c) 1999 Martin Mares <mj@ucw.cz>
+ * (c) 1999 Martin Mares <mj@suse.cz>
*/
#include <stdio.h>
* Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
* David Mosberger-Tang
*
- * Copyright 1997 -- 1999 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ * Copyright 1997 -- 1999 Martin Mares <mj@suse.cz>
*/
#include <linux/types.h>
dev = dev_cache;
memset(dev, 0, sizeof(*dev));
dev->bus = bus;
- dev->sysdata = bus->sysdata;
dev->devfn = devfn;
if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
child->self = dev;
child->parent = bus;
child->ops = bus->ops;
- child->sysdata = bus->sysdata;
/*
* Set up the primary, secondary and subordinate
1056 ICL
# Motorola made a mistake and used this value, please duplicate Moto
# entries here -- Cort
-1507 Motorola Computer Group
+1507 Motorola
0001 MPC105 [Eagle]
0002 MPC106 [Grackle]
+ 0003 MPC8240 [Kahlua]
0100 MC145575 [HFC-PCI]
0431 KTI829c 100VG
4801 Raven
4802 Falcon
4803 Hawk
4806 CPX8216
-1057 Motorola Computer Group
+1057 Motorola
0001 MPC105 [Eagle]
0002 MPC106 [Grackle]
0100 MC145575 [HFC-PCI]
*
* Procfs interface for the PCI bus.
*
- * Copyright (c) 1997, 1998 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ * Copyright (c) 1997--1999 Martin Mares <mj@suse.cz>
*/
#include <linux/types.h>
if (!(de = bus->procdir)) {
sprintf(name, "%02x", bus->number);
- de = bus->procdir = create_proc_entry(name, S_IFDIR, proc_bus_pci_dir);
+ de = bus->procdir = proc_mkdir(name, proc_bus_pci_dir);
if (!de)
return -ENOMEM;
}
static int __init pci_proc_init(void)
{
if (pci_present()) {
- proc_bus_pci_dir = create_proc_entry("pci", S_IFDIR, proc_bus);
+ proc_bus_pci_dir = proc_mkdir("pci", proc_bus);
create_proc_info_entry("devices",0, proc_bus_pci_dir,
get_pci_dev_info);
proc_bus_pci_add(pci_root);
* bugs. Devices present only on certain architectures (host
* bridges et cetera) should be handled in arch-specific code.
*
- * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
+ * Copyright (c) 1999 Martin Mares <mj@suse.cz>
*
* The bridge optimization stuff has been removed. If you really
* have a silly BIOS which is unable to set your host bridge right,
DBGC((" for root[%lx:%lx] min[%lx] size[%lx]\n",
root->start, root->end, min, size));
- if (allocate_resource(root, res, size, min, -1, size, dev) < 0) {
+ if (allocate_resource(root, res, size, min, -1, size, pcibios_align_resource, dev) < 0) {
printk(KERN_ERR
"PCI: Failed to allocate resource %d for %s\n",
i, dev->name);
#ifdef CONFIG_PROC_FS
if (proc_pccard) {
char name[3];
-#ifdef PCMCIA_DEBUG
- struct proc_dir_entry *ent;
-#endif
sprintf(name, "%02d", i);
- s->proc = create_proc_entry(name, S_IFDIR, proc_pccard);
+ s->proc = proc_mkdir(name, proc_pccard);
#ifdef PCMCIA_DEBUG
- ent = create_proc_entry("clients", 0, s->proc);
- ent->read_proc = proc_read_clients;
- ent->data = s;
+ create_proc_read_entry("clients",0,s->proc,proc_read_clients,s);
#endif
ss_entry(ns, SS_ProcSetup, s->proc);
}
apm_register_callback(&handle_apm_event);
#endif
#ifdef CONFIG_PROC_FS
- proc_pccard = create_proc_entry("pccard", S_IFDIR, proc_bus);
+ proc_pccard = proc_mkdir("pccard", proc_bus);
#endif
return 0;
}
major_dev = i;
#ifdef CONFIG_PROC_FS
- if (proc_pccard) {
- struct proc_dir_entry *ent;
- ent = create_proc_entry("drivers", 0, proc_pccard);
- ent->read_proc = proc_read_drivers;
- }
+ if (proc_pccard)
+ create_proc_read_entry("drivers",0,proc_pccard,proc_read_drivers,NULL);
init_status = 0;
#endif
return 0;
static void pcic_proc_setup(u_short sock, struct proc_dir_entry *base)
{
socket_info_t *s = &socket[sock];
- struct proc_dir_entry *ent;
- ent = create_proc_entry("info", 0, base);
- ent->read_proc = proc_read_info;
- ent->data = s;
- ent = create_proc_entry("exca", 0, base);
- ent->read_proc = proc_read_exca;
- ent->data = s;
+ create_proc_read_entry("info", 0, base, proc_read_info, s);
+ create_proc_read_entry("exca", 0, base, proc_read_exca, s);
#ifdef CONFIG_PCI
- if (s->flags & (IS_PCI|IS_CARDBUS)) {
- ent = create_proc_entry("pci", 0, base);
- ent->read_proc = proc_read_pci;
- ent->data = s;
- }
+ if (s->flags & (IS_PCI|IS_CARDBUS))
+ create_proc_read_entry("pci", 0, base, proc_read_pci, s);
#endif
#ifdef CONFIG_CARDBUS
- if (s->flags & IS_CARDBUS) {
- ent = create_proc_entry("cardbus", 0, base);
- ent->read_proc = proc_read_cardbus;
- ent->data = s;
- }
+ if (s->flags & IS_CARDBUS)
+ create_proc_read_entry("cardbus", 0, base, proc_read_cardbus, s);
#endif
s->proc = base;
}
* This makes /proc/scsi and /proc/scsi/scsi visible.
*/
#ifdef CONFIG_PROC_FS
- proc_scsi = create_proc_entry ("scsi", S_IFDIR, 0);
+ proc_scsi = proc_mkdir("scsi", 0);
if (!proc_scsi) {
printk (KERN_ERR "cannot init /proc/scsi\n");
return -ENOMEM;
* This makes /proc/scsi and /proc/scsi/scsi visible.
*/
#ifdef CONFIG_PROC_FS
- proc_scsi = create_proc_entry ("scsi", S_IFDIR, 0);
+ proc_scsi = proc_mkdir("scsi", 0);
if (!proc_scsi) {
printk (KERN_ERR "cannot init /proc/scsi\n");
return -ENOMEM;
struct Scsi_Host *hpnt;
char name[10]; /* see scsi_unregister_host() */
- tpnt->proc_dir = create_proc_entry(tpnt->proc_name, S_IFDIR, proc_scsi);
+ tpnt->proc_dir = proc_mkdir(tpnt->proc_name, proc_scsi);
tpnt->proc_dir->owner = tpnt->module;
hpnt = scsi_hostlist;
goto err_dev4;
#ifdef ES1371_DEBUG
/* intialize the debug proc device */
- s->ps = create_proc_entry("es1371", S_IFREG | S_IRUGO, NULL);
- if (s->ps)
- s->ps->read_proc = proc_es1371_dump;
+ s->ps = create_proc_read_entry("es1371",0,NULL,proc_es1371_dump,NULL);
#endif /* ES1371_DEBUG */
/* initialize codec registers */
if (!usbdir)
return;
sprintf(buf, "%03d", bus->busnum);
- if (!(bus->proc_entry = create_proc_entry(buf, S_IFDIR, usbdir)))
+ if (!(bus->proc_entry = proc_mkdir(buf, usbdir)))
return;
bus->proc_entry->data = bus;
}
int proc_usb_init (void)
{
- usbdir = create_proc_entry ("usb", S_IFDIR, proc_bus);
+ usbdir = proc_mkdir ("usb", proc_bus);
if (!usbdir) {
printk ("proc_usb: cannot create /proc/bus/usb entry\n");
return -1;
char name[4];
sprintf(name, "%02x", slot);
- entry = create_proc_entry(name, S_IFREG | S_IRUGO, proc_bus_zorro_dir);
+ entry = create_proc_entry(name, 0, proc_bus_zorro_dir);
if (!entry)
return -ENOMEM;
entry->ops = &proc_bus_zorro_inode_operations;
if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO))
return;
- proc_bus_zorro_dir = create_proc_entry("zorro", S_IFDIR, proc_bus);
+ proc_bus_zorro_dir = proc_mkdir("zorro", proc_bus);
create_proc_info_entry("devices", 0, proc_bus_zorro_dir, get_zorro_dev_info);
for (slot = 0; slot < zorro_num_autocon; slot++)
zorro_proc_attach_device(slot);
int error = -ENOENT;
struct proc_dir_entry *status = NULL, *reg;
- bm_dir = create_proc_entry("sys/fs/binfmt_misc", S_IFDIR, NULL);
+ bm_dir = proc_mkdir("sys/fs/binfmt_misc", NULL); /* WTF??? */
if (!bm_dir)
goto out;
bm_dir->owner = THIS_MODULE;
NULL, /* truncate */
coda_permission, /* permission */
NULL, /* smap */
- NULL, /* update page */
coda_revalidate_inode /* revalidate */
};
reset_coda_cache_inv_stats();
#ifdef CONFIG_PROC_FS
- proc_fs_coda = create_proc_entry("coda", S_IFDIR, proc_root_fs);
+ proc_fs_coda = proc_mkdir("coda", proc_root_fs);
proc_fs_coda->owner = THIS_MODULE;
coda_proc_create("vfs_stats", coda_vfs_stats_get_info);
coda_proc_create("upcall_stats", coda_upcall_stats_get_info);
void proc_export_init(void)
{
- if (!create_proc_entry("fs/nfs", S_IFDIR, 0))
+ if (!proc_mkdir("fs/nfs", 0))
return;
create_proc_read_entry("fs/nfs/exports", 0, 0, exp_procfs_exports,NULL);
}
struct device_node *root;
if ( !have_of )
return;
- proc_device_tree = create_proc_entry("device-tree", S_IFDIR, 0);
+ proc_device_tree = proc_mkdir("device-tree", 0);
if (proc_device_tree == 0)
return;
root = find_path_device("/");
*/
void __init proc_tty_init(void)
{
- struct proc_dir_entry *ent;
-
- ent = create_proc_entry("tty", S_IFDIR, 0);
- if (!ent)
+ if (!proc_mkdir("tty", 0))
return;
- proc_tty_ldisc = create_proc_entry("tty/ldisc", S_IFDIR, 0);
- proc_tty_driver = create_proc_entry("tty/driver", S_IFDIR, 0);
+ proc_tty_ldisc = proc_mkdir("tty/ldisc", 0);
+ proc_tty_driver = proc_mkdir("tty/driver", 0);
- ent = create_proc_entry("tty/ldiscs", 0, 0);
- ent->read_proc = tty_ldiscs_read_proc;
-
- ent = create_proc_entry("tty/drivers", 0, 0);
- ent->read_proc = tty_drivers_read_proc;
+ create_proc_read_entry("tty/ldiscs", 0, 0, tty_ldiscs_read_proc,NULL);
+ create_proc_read_entry("tty/drivers", 0, 0, tty_drivers_read_proc,NULL);
}
-
struct buffer_head *bh;
int res = -EIO;
- QNX4DEBUG(("qnx4: readpage offset=[%ld]\n", (long) page->offset));
+ QNX4DEBUG(("qnx4: readpage index=[%ld]\n", (long) page->index));
if (qnx4_ino->i_xblk != 0) {
printk("qnx4: sorry, this file is extended, don't know how to handle it (yet) !\n");
buf = page_address(page);
clear_bit(PG_uptodate, &page->flags);
clear_bit(PG_error, &page->flags);
- offset = page->offset;
+ offset = page->index<<PAGE_SHIFT;
if (offset < inode->i_size) {
res = 0;
sb->s_dirt = 0;
}
-static void qnx4_put_inode(struct inode *inode)
-{
- if (inode->i_nlink != 0) {
- return;
- }
- inode->i_size = 0;
-}
-
static void qnx4_write_inode(struct inode *inode)
{
struct qnx4_inode_entry *raw_inode;
#else
NULL,
#endif
+ NULL, /* put_inode */
#ifdef CONFIG_QNX4FS_RW
- qnx4_put_inode,
qnx4_delete_inode,
- NULL, /* notify_change */
#else
- NULL, /* put_inode */
NULL, /* delete_inode */
- NULL, /* notify_change */
#endif
+ NULL, /* notify_change */
qnx4_put_super,
#ifdef CONFIG_QNX4FS_RW
qnx4_write_super,
*/
#define DATA_BUFFER_USED(bh) \
- (atomic_read(&bh->b_count) || buffer_locked(bh))
+ (atomic_read(&bh->b_count)>1 || buffer_locked(bh))
/* We throw away any data beyond inode->i_size. */
#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
#define DATA_BUFFER_USED(bh) \
- (atomic_read(&bh->b_count) || buffer_locked(bh))
+ (atomic_read(&bh->b_count)>1 || buffer_locked(bh))
static int ufs_trunc_direct (struct inode * inode)
{
--- /dev/null
+#ifndef __ALPHA_IRONGATE__H__
+#define __ALPHA_IRONGATE__H__
+
+#include <linux/types.h>
+#include <asm/compiler.h>
+
+/*
+ * IRONGATE is the internal name for the AMD-751 K7 core logic chipset
+ * which provides memory controller and PCI access for NAUTILUS-based
+ * EV6 (21264) systems.
+ *
+ * This file is based on:
+ *
+ * IronGate management library, (c) 1999 Alpha Processor, Inc.
+ * Begun 19 January 1999 by Stig Telfer, Alpha Processor, Inc.
+ */
+
+/*
+ * The 21264 supports, and internally recognizes, a 44-bit physical
+ * address space that is divided equally between memory address space
+ * and I/O address space. Memory address space resides in the lower
+ * half of the physical address space (PA[43]=0) and I/O address space
+ * resides in the upper half of the physical address space (PA[43]=1).
+ *
+ */
+
+/* Eh? Not offset from memory? */
+#define IRONGATE_DMA_WIN_BASE (0U)
+#define IRONGATE_DMA_WIN_SIZE (0U)
+
+/*
+ * Irongate CSR map. Some of the CSRs are 8 or 16 bits, but all access
+ * through the routines given is 32-bit.
+ *
+ * The first 0x40 bytes are standard as per the PCI spec.
+ */
+
+typedef volatile __u32 igcsr32;
+
+typedef struct {
+ igcsr32 dev_vendor; /* 0x00 - device ID, vendor ID */
+ igcsr32 stat_cmd; /* 0x04 - status, command */
+ igcsr32 class; /* 0x08 - class code, rev ID */
+ igcsr32 latency; /* 0x0C - header type, PCI latency */
+ igcsr32 bar0; /* 0x10 - BAR0 - AGP */
+ igcsr32 bar1; /* 0x14 - BAR1 - GART */
+ igcsr32 bar2; /* 0x18 - Power Management reg block */
+
+ igcsr32 rsrvd0[6]; /* 0x1C-0x33 reserved */
+
+ igcsr32 capptr; /* 0x34 - Capabilities pointer */
+
+ igcsr32 rsrvd1[2]; /* 0x38-0x3F reserved */
+
+ igcsr32 bacsr10; /* 0x40 - base address chip selects */
+ igcsr32 bacsr32; /* 0x44 - base address chip selects */
+ igcsr32 bacsr54; /* 0x48 - base address chip selects */
+
+ igcsr32 rsrvd2[1]; /* 0x4C-0x4F reserved */
+
+ igcsr32 drammap; /* 0x50 - address mapping control */
+ igcsr32 dramtm; /* 0x54 - timing, driver strength */
+ igcsr32 dramms; /* 0x58 - ECC, mode/status */
+
+ igcsr32 rsrvd3[1]; /* 0x5C-0x5F reserved */
+
+ igcsr32 biu0; /* 0x60 - bus interface unit */
+ igcsr32 biusip; /* 0x64 - Serial initialisation pkt */
+
+ igcsr32 rsrvd4[2]; /* 0x68-0x6F reserved */
+
+ igcsr32 mro; /* 0x70 - memory request optimiser */
+
+ igcsr32 rsrvd5[3]; /* 0x74-0x7F reserved */
+
+ igcsr32 whami; /* 0x80 - who am I */
+ igcsr32 pciarb; /* 0x84 - PCI arbitration control */
+ igcsr32 pcicfg; /* 0x88 - PCI config status */
+
+ igcsr32 rsrvd6[5]; /* 0x8C-0x9F reserved */
+
+ /* AGP (bus 1) control registers */
+ igcsr32 agpcap; /* 0xA0 - AGP Capability Identifier */
+ igcsr32 agpstat; /* 0xA4 - AGP status register */
+ igcsr32 agpcmd; /* 0xA8 - AGP control register */
+ igcsr32 agpva; /* 0xAC - AGP Virtual Address Space */
+ igcsr32 agpmode; /* 0xB0 - AGP/GART mode control */
+} Irongate0;
+
+/* Bitfield and mask register definitions */
+
+/* Device, vendor IDs - offset 0x00 */
+
+typedef union {
+ igcsr32 i; /* integer value of CSR */
+ struct {
+ unsigned v : 16;
+ unsigned d : 16;
+ } r; /* structured interpretation */
+} ig_dev_vendor_t;
+
+
+/* Status, command registers - offset 0x04 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned command;
+ unsigned status;
+ } s;
+ struct {
+ /* command register fields */
+ unsigned iospc : 1; /* always reads zero */
+ unsigned memspc : 1; /* PCI memory space accesses? */
+ unsigned iten : 1; /* always 1: can be bus initiator */
+ unsigned scmon : 1; /* always 0 special cycles not chckd */
+ unsigned mwic : 1; /* always 0 - no mem write & invalid */
+ unsigned vgaps : 1; /* always 0 - palette rds not special */
+ unsigned per : 1; /* parity error resp: always 0 */
+ unsigned step : 1; /* address/data stepping : always 0 */
+ unsigned serre : 1; /* 1 = sys err output driver enable */
+ unsigned fbbce : 1; /* fast back-back cycle : always 0 */
+ unsigned zero1 : 6; /* must be zero */
+
+ /* status register fields */
+ unsigned zero2 : 4; /* must be zero */
+ unsigned cl : 1; /* config space capa list: always 1 */
+ unsigned pci66 : 1; /* 66 MHz PCI support - always 0 */
+ unsigned udf : 1; /* user defined features - always 0 */
+ unsigned fbbc : 1; /* back-back transactions - always 0 */
+ unsigned ppe : 1; /* PCI parity error detected (0) */
+ unsigned devsel : 2; /* DEVSEL timing (always 01) */
+ unsigned sta : 1; /* signalled target abort (0) */
+ unsigned rta : 1; /* recvd target abort */
+ unsigned ria : 1; /* recvd initiator abort */
+ unsigned serr : 1; /* SERR has been asserted */
+ unsigned dpe : 1; /* DRAM parity error (0) */
+ } r;
+} ig_stat_cmd_t;
+
+
+/* Revision ID, Programming interface, subclass, baseclass - offset 0x08 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ /* revision ID */
+ unsigned step : 4; /* stepping Revision ID */
+ unsigned die : 4; /* die Revision ID */
+ unsigned pif : 8; /* programming interface (0x00) */
+ unsigned sub : 8; /* subclass code (0x00) */
+ unsigned base: 8; /* baseclass code (0x06) */
+ } r;
+} ig_class_t;
+
+
+/* Latency Timer, PCI Header type - offset 0x0C */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned zero1:8; /* reserved */
+ unsigned lat : 8; /* latency in PCI bus clocks */
+ unsigned hdr : 8; /* PCI header type */
+ unsigned zero2:8; /* reserved */
+ } r;
+} ig_latency_t;
+
+
+/* Base Address Register 0 - offset 0x10 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned mem : 1; /* Reg pts to memory (always 0) */
+ unsigned type: 2; /* 32 bit register = 0b00 */
+ unsigned pref: 1; /* graphics mem prefetchable=1 */
+ unsigned baddrl : 21; /* 32M = minimum alloc -> all zero */
+ unsigned size : 6; /* size requirements for AGP */
+ unsigned zero : 1; /* reserved=0 */
+ } r;
+} ig_bar0_t;
+
+
+/* Base Address Register 1 - offset 0x14 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned mem : 1; /* BAR0 maps to memory -> 0 */
+ unsigned type : 2; /* BAR1 is 32-bit -> 0b00 */
+ unsigned pref : 1; /* graphics mem prefetchable=1 */
+ unsigned baddrl : 8; /* 4K alloc for AGP CSRs -> 0b00 */
+ unsigned baddrh : 20; /* base addr of AGP CSRs A[30:11] */
+ } r;
+} ig_bar1_t;
+
+
+/* Base Address Register 2 - offset 0x18 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned io : 1; /* BAR2 maps to I/O space -> 1 */
+ unsigned zero1: 1; /* reserved */
+ unsigned addr : 22; /* BAR2[31:10] - PM2_BLK base */
+ unsigned zero2: 8; /* reserved */
+ } r;
+} ig_bar2_t;
+
+
+/* Capabilities Pointer - offset 0x34 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned cap : 8; /* =0xA0, offset of AGP ctrl regs */
+ unsigned zero: 24; /* reserved */
+ } r;
+} ig_capptr_t;
+
+
+/* Base Address Chip Select Register 1,0 - offset 0x40 */
+/* Base Address Chip Select Register 3,2 - offset 0x44 */
+/* Base Address Chip Select Register 5,4 - offset 0x48 */
+
+typedef union {
+
+ igcsr32 i;
+ struct {
+ /* lower bank */
+ unsigned en0 : 1; /* memory bank enabled */
+ unsigned mask0 : 6; /* Address mask for A[28:23] */
+ unsigned base0 : 9; /* Bank Base Address A[31:23] */
+
+ /* upper bank */
+ unsigned en1 : 1; /* memory bank enabled */
+ unsigned mask1 : 6; /* Address mask for A[28:23] */
+ unsigned base1 : 9; /* Bank Base Address A[31:23] */
+ } r;
+} ig_bacsr_t, ig_bacsr10_t, ig_bacsr32_t, ig_bacsr54_t;
+
+
+/* SDRAM Address Mapping Control Register - offset 0x50 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned z1 : 1; /* reserved */
+ unsigned bnks0: 1; /* 0->2 banks in chip select 0 */
+ unsigned am0 : 1; /* row/column addressing */
+ unsigned z2 : 1; /* reserved */
+
+ unsigned z3 : 1; /* reserved */
+ unsigned bnks1: 1; /* 0->2 banks in chip select 1 */
+ unsigned am1 : 1; /* row/column addressing */
+ unsigned z4 : 1; /* reserved */
+
+ unsigned z5 : 1; /* reserved */
+ unsigned bnks2: 1; /* 0->2 banks in chip select 2 */
+ unsigned am2 : 1; /* row/column addressing */
+ unsigned z6 : 1; /* reserved */
+
+ unsigned z7 : 1; /* reserved */
+ unsigned bnks3: 1; /* 0->2 banks in chip select 3 */
+ unsigned am3 : 1; /* row/column addressing */
+ unsigned z8 : 1; /* reserved */
+
+ unsigned z9 : 1; /* reserved */
+ unsigned bnks4: 1; /* 0->2 banks in chip select 4 */
+ unsigned am4 : 1; /* row/column addressing */
+ unsigned z10 : 1; /* reserved */
+
+ unsigned z11 : 1; /* reserved */
+ unsigned bnks5: 1; /* 0->2 banks in chip select 5 */
+ unsigned am5 : 1; /* row/column addressing */
+ unsigned z12 : 1; /* reserved */
+
+ unsigned rsrvd: 8; /* reserved */
+ } r;
+} ig_drammap_t;
+
+
+/* DRAM timing and driver strength register - offset 0x54 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ /* DRAM timing parameters */
+ unsigned trcd : 2;
+ unsigned tcl : 2;
+ unsigned tras: 3;
+ unsigned trp : 2;
+ unsigned trc : 3;
+ unsigned icl: 2;
+ unsigned ph : 2;
+
+ /* Chipselect driver strength */
+ unsigned adra : 1;
+ unsigned adrb : 1;
+ unsigned ctrl : 3;
+ unsigned dqm : 1;
+ unsigned cs : 1;
+ unsigned clk: 1;
+ unsigned rsrvd:8;
+ } r;
+} ig_dramtm_t;
+
+
+/* DRAM Mode / Status and ECC Register - offset 0x58 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned chipsel : 6; /* failing ECC chip select */
+ unsigned zero1 : 2; /* always reads zero */
+ unsigned status : 2; /* ECC Detect logic status */
+ unsigned zero2 : 6; /* always reads zero */
+
+ unsigned cycles : 2; /* cycles per refresh, see table */
+ unsigned en : 1; /* ECC enable */
+ unsigned r : 1; /* Large burst enable (=0) */
+ unsigned bre : 1; /* Burst refresh enable */
+ unsigned zero3 : 2; /* reserved = 0 */
+ unsigned mwe : 1; /* Enable writes to DRAM mode reg */
+ unsigned type : 1; /* SDRAM = 0, default */
+ unsigned sdraminit : 1; /* SDRAM init - set params first! */
+ unsigned zero4 : 6; /* reserved = 0 */
+ } r;
+} ig_dramms_t;
+
+
+/*
+ * Memory spaces:
+ */
+
+/* ??? the following probably needs fixing */
+/* Irongate is consistent with a subset of the Tsunami memory map */
+/* XXX: Do we need to conditionalize on this? */
+#ifdef USE_48_BIT_KSEG
+#define IRONGATE_BIAS 0x80000000000UL
+#else
+#define IRONGATE_BIAS 0x10000000000UL
+#endif
+
+
+#define IRONGATE_MEM (IDENT_ADDR | IRONGATE_BIAS | 0x000000000UL)
+#define IRONGATE_IACK_SC (IDENT_ADDR | IRONGATE_BIAS | 0x1F8000000UL)
+#define IRONGATE_IO (IDENT_ADDR | IRONGATE_BIAS | 0x1FC000000UL)
+#define IRONGATE_CONF (IDENT_ADDR | IRONGATE_BIAS | 0x1FE000000UL)
+
+
+#define IRONGATE0 ((Irongate0 *) IRONGATE_CONF)
+
+/*
+ * Data structure for handling IRONGATE machine checks:
+ * This is the standard OSF logout frame
+ */
+
+#define SCB_Q_SYSERR 0x620 /* OSF definitions */
+#define SCB_Q_PROCERR 0x630
+#define SCB_Q_SYSMCHK 0x660
+#define SCB_Q_PROCMCHK 0x670
+
+struct el_IRONGATE_sysdata_mcheck {
+ __u32 FrameSize; /* Bytes, including this field */
+ __u32 FrameFlags; /* <31> = Retry, <30> = Second Error */
+ __u32 CpuOffset; /* Offset to CPU-specific into */
+ __u32 SystemOffset; /* Offset to system-specific info */
+ __u32 MCHK_Code;
+ __u32 MCHK_Frame_Rev;
+ __u64 I_STAT;
+ __u64 DC_STAT;
+ __u64 C_ADDR;
+ __u64 DC1_SYNDROME;
+ __u64 DC0_SYNDROME;
+ __u64 C_STAT;
+ __u64 C_STS;
+ __u64 RESERVED0;
+ __u64 EXC_ADDR;
+ __u64 IER_CM;
+ __u64 ISUM;
+ __u64 MM_STAT;
+ __u64 PAL_BASE;
+ __u64 I_CTL;
+ __u64 PCTX;
+};
+
+
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * Translate physical memory address as seen on (PCI) bus into
+ * a kernel virtual address and vv.
+ */
+
+__EXTERN_INLINE unsigned long irongate_virt_to_bus(void * address)
+{
+ return virt_to_phys(address) + IRONGATE_DMA_WIN_BASE;
+}
+
+__EXTERN_INLINE void * irongate_bus_to_virt(unsigned long address)
+{
+ return phys_to_virt(address - IRONGATE_DMA_WIN_BASE);
+}
+
+/*
+ * I/O functions:
+ *
+ * IRONGATE (AMD-751) PCI/memory support chip for the EV6 (21264) and
+ * K7 can only use linear accesses to get at PCI memory and I/O spaces.
+ */
+
+#define vucp volatile unsigned char *
+#define vusp volatile unsigned short *
+#define vuip volatile unsigned int *
+#define vulp volatile unsigned long *
+
+__EXTERN_INLINE unsigned int irongate_inb(unsigned long addr)
+{
+ return __kernel_ldbu(*(vucp)(addr + IRONGATE_IO));
+}
+
+__EXTERN_INLINE void irongate_outb(unsigned char b, unsigned long addr)
+{
+ __kernel_stb(b, *(vucp)(addr + IRONGATE_IO));
+ mb();
+}
+
+__EXTERN_INLINE unsigned int irongate_inw(unsigned long addr)
+{
+ return __kernel_ldwu(*(vusp)(addr + IRONGATE_IO));
+}
+
+__EXTERN_INLINE void irongate_outw(unsigned short b, unsigned long addr)
+{
+ __kernel_stw(b, *(vusp)(addr + IRONGATE_IO));
+ mb();
+}
+
+__EXTERN_INLINE unsigned int irongate_inl(unsigned long addr)
+{
+ return *(vuip)(addr + IRONGATE_IO);
+}
+
+__EXTERN_INLINE void irongate_outl(unsigned int b, unsigned long addr)
+{
+ *(vuip)(addr + IRONGATE_IO) = b;
+ mb();
+}
+
+/*
+ * Memory functions. All accesses are done through linear space.
+ */
+
+__EXTERN_INLINE unsigned long irongate_readb(unsigned long addr)
+{
+ return __kernel_ldbu(*(vucp)addr);
+}
+
+__EXTERN_INLINE unsigned long irongate_readw(unsigned long addr)
+{
+ return __kernel_ldwu(*(vusp)addr);
+}
+
+__EXTERN_INLINE unsigned long irongate_readl(unsigned long addr)
+{
+ return *(vuip)addr;
+}
+
+__EXTERN_INLINE unsigned long irongate_readq(unsigned long addr)
+{
+ return *(vulp)addr;
+}
+
+__EXTERN_INLINE void irongate_writeb(unsigned char b, unsigned long addr)
+{
+ __kernel_stb(b, *(vucp)addr);
+}
+
+__EXTERN_INLINE void irongate_writew(unsigned short b, unsigned long addr)
+{
+ __kernel_stw(b, *(vusp)addr);
+}
+
+__EXTERN_INLINE void irongate_writel(unsigned int b, unsigned long addr)
+{
+ *(vuip)addr = b;
+}
+
+__EXTERN_INLINE void irongate_writeq(unsigned long b, unsigned long addr)
+{
+ *(vulp)addr = b;
+}
+
+__EXTERN_INLINE unsigned long irongate_ioremap(unsigned long addr)
+{
+ return addr + IRONGATE_MEM;
+}
+
+__EXTERN_INLINE int irongate_is_ioaddr(unsigned long addr)
+{
+ return addr >= IRONGATE_MEM;
+}
+
+#undef vucp
+#undef vusp
+#undef vuip
+#undef vulp
+
+#ifdef __WANT_IO_DEF
+
+#define virt_to_bus irongate_virt_to_bus
+#define bus_to_virt irongate_bus_to_virt
+
+#define __inb irongate_inb
+#define __inw irongate_inw
+#define __inl irongate_inl
+#define __outb irongate_outb
+#define __outw irongate_outw
+#define __outl irongate_outl
+#define __readb irongate_readb
+#define __readw irongate_readw
+#define __writeb irongate_writeb
+#define __writew irongate_writew
+#define __readl irongate_readl
+#define __readq irongate_readq
+#define __writel irongate_writel
+#define __writeq irongate_writeq
+#define __ioremap irongate_ioremap
+#define __is_ioaddr irongate_is_ioaddr
+
+#define inb(port) __inb((port))
+#define inw(port) __inw((port))
+#define inl(port) __inl((port))
+#define outb(v, port) __outb((v),(port))
+#define outw(v, port) __outw((v),(port))
+#define outl(v, port) __outl((v),(port))
+
+#define __raw_readb(a) __readb((unsigned long)(a))
+#define __raw_readw(a) __readw((unsigned long)(a))
+#define __raw_readl(a) __readl((unsigned long)(a))
+#define __raw_readq(a) __readq((unsigned long)(a))
+#define __raw_writeb(v,a) __writeb((v),(unsigned long)(a))
+#define __raw_writew(v,a) __writew((v),(unsigned long)(a))
+#define __raw_writel(v,a) __writel((v),(unsigned long)(a))
+#define __raw_writeq(v,a) __writeq((v),(unsigned long)(a))
+
+#endif /* __WANT_IO_DEF */
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_IRONGATE__H__ */
#define TSUNAMI_IO_BIAS TSUNAMI_IO(0)
#define TSUNAMI_MEM_BIAS TSUNAMI_MEM(0)
-/* The IO address space is larger than 0xffff */
-#define TSUNAMI_IO_SPACE (TSUNAMI_CONF(0) - TSUNAMI_IO(0))
-#define TSUNAMI_MEM_SPACE (_TSUNAMI_IACK_SC(0) - TSUNAMI_MEM(0))
/*
* Data structure for handling TSUNAMI machine checks:
*/
#define ALPHA_XL_MAX_DMA_ADDRESS (IDENT_ADDR+0x3000000UL)
#define ALPHA_RUFFIAN_MAX_DMA_ADDRESS (IDENT_ADDR+0x1000000UL)
+#define ALPHA_NAUTILUS_MAX_DMA_ADDRESS (IDENT_ADDR+0x1000000UL)
#define ALPHA_MAX_DMA_ADDRESS (~0UL)
#ifdef CONFIG_ALPHA_GENERIC
# define MAX_DMA_ADDRESS ALPHA_XL_MAX_DMA_ADDRESS
# elif defined(CONFIG_ALPHA_RUFFIAN)
# define MAX_DMA_ADDRESS ALPHA_RUFFIAN_MAX_DMA_ADDRESS
+# elif defined(CONFIG_ALPHA_NAUTILUS)
+# define MAX_DMA_ADDRESS ALPHA_NAUTILUS_MAX_DMA_ADDRESS
# else
# define MAX_DMA_ADDRESS ALPHA_MAX_DMA_ADDRESS
# endif
#ifdef CONFIG_ALPHA_GENERIC
# define CROSS_64KB(a,s) (__CROSS_64KB(a,s) && ~alpha_mv.max_dma_address)
#else
-# if defined(CONFIG_ALPHA_XL) || defined(CONFIG_ALPHA_RUFFIAN)
+# if defined(CONFIG_ALPHA_XL) || defined(CONFIG_ALPHA_RUFFIAN) || defined(CONFIG_ALPHA_NAUTILUS)
# define CROSS_64KB(a,s) __CROSS_64KB(a,s)
# else
# define CROSS_64KB(a,s) (0)
#define ST_DEC_TSUNAMI 34 /* Tsunami systype */
#define ST_DEC_WILDFIRE 35 /* Wildfire systype */
#define ST_DEC_CUSCO 36 /* CUSCO systype */
+#define ST_DEC_EIGER 37 /* Eiger systype */
/* UNOFFICIAL!!! */
#define ST_UNOFFICIAL_BIAS 100
#define ST_DTI_RUFFIAN 101 /* RUFFIAN systype */
+/* Alpha Processor, Inc. systems */
+#define ST_API_BIAS 200 /* Offset for API systems */
+#define ST_API_NAUTILUS (ST_API_BIAS + 1) /* Nautilus systype */
+
struct pcb_struct {
unsigned long ksp;
unsigned long usp;
# include <asm/core_apecs.h>
#elif defined(CONFIG_ALPHA_CIA)
# include <asm/core_cia.h>
+#elif defined(CONFIG_ALPHA_IRONGATE)
+# include <asm/core_irongate.h>
+#elif defined(CONFIG_ALPHA_JENSEN)
+# include <asm/jensen.h>
#elif defined(CONFIG_ALPHA_LCA)
# include <asm/core_lca.h>
#elif defined(CONFIG_ALPHA_MCPCIA)
# include <asm/core_mcpcia.h>
+#elif defined(CONFIG_ALPHA_POLARIS)
+# include <asm/core_polaris.h>
#elif defined(CONFIG_ALPHA_PYXIS)
# include <asm/core_pyxis.h>
#elif defined(CONFIG_ALPHA_T2)
# include <asm/core_t2.h>
#elif defined(CONFIG_ALPHA_TSUNAMI)
# include <asm/core_tsunami.h>
-#elif defined(CONFIG_ALPHA_JENSEN)
-# include <asm/jensen.h>
-#elif defined(CONFIG_ALPHA_POLARIS)
-# include <asm/core_polaris.h>
#else
#error "What system is this?"
#endif
# define outl_p outl
#endif
+#define IO_SPACE_LIMIT 0xffff
+
#else
/* Userspace declarations. */
many places throughout the kernel to size static arrays. That's ok,
we'll use alpha_mv.nr_irqs when we want the real thing. */
-# define NR_IRQS 64
+# define NR_IRQS 128
#elif defined(CONFIG_ALPHA_CABRIOLET) || \
defined(CONFIG_ALPHA_EB66P) || \
# define NR_IRQS 40
#elif defined(CONFIG_ALPHA_DP264) || \
- defined(CONFIG_ALPHA_RAWHIDE)
+ defined(CONFIG_ALPHA_EIGER)
# define NR_IRQS 64
-#elif defined(CONFIG_ALPHA_TAKARA)
-# define NR_IRQS 20
+#elif defined(CONFIG_ALPHA_RAWHIDE) || \
+ defined(CONFIG_ALPHA_TAKARA)
+# define NR_IRQS 128
#else /* everyone else */
# define NR_IRQS 16
#endif
-/*
- * PROBE_MASK is the bitset of irqs that we consider for autoprobing.
- */
-
-/* The normal mask includes all the IRQs except the timer. */
-#define _PROBE_MASK(nr_irqs) (((1UL << (nr_irqs & 63)) - 1) & ~1UL)
-
-/* Mask out unused timer irq 0 and RTC irq 8. */
-#define P2K_PROBE_MASK (_PROBE_MASK(16) & ~0x101UL)
-
-/* Mask out unused timer irq 0, "irqs" 20-30, and the EISA cascade. */
-#define ALCOR_PROBE_MASK (_PROBE_MASK(48) & ~0xfff000000001UL)
-
-/* Leave timer irq 0 in the mask. */
-#define RUFFIAN_PROBE_MASK (_PROBE_MASK(48) | 1UL)
-
-#if defined(CONFIG_ALPHA_GENERIC)
-# define PROBE_MASK alpha_mv.irq_probe_mask
-#elif defined(CONFIG_ALPHA_P2K)
-# define PROBE_MASK P2K_PROBE_MASK
-#elif defined(CONFIG_ALPHA_ALCOR) || defined(CONFIG_ALPHA_XLT)
-# define PROBE_MASK ALCOR_PROBE_MASK
-#elif defined(CONFIG_ALPHA_RUFFIAN)
-# define PROBE_MASK RUFFIAN_PROBE_MASK
-#else
-# define PROBE_MASK _PROBE_MASK(NR_IRQS)
-#endif
-
-
static __inline__ int irq_cannonicalize(int irq)
{
/*
#define forget_segments() do { } while (0)
unsigned long get_wchan(struct task_struct *p);
-/*
-* See arch/alpha/kernel/ptrace.c for details.
-*/
-#define PT_REG(reg) (PAGE_SIZE - sizeof(struct pt_regs) \
- + (long)&((struct pt_regs *)0)->reg)
+
+/* See arch/alpha/kernel/ptrace.c for details. */
+#define PT_REG(reg) (PAGE_SIZE*2 - sizeof(struct pt_regs) \
+ + (long)&((struct pt_regs *)0)->reg)
+
+#define SW_REG(reg) (PAGE_SIZE*2 - sizeof(struct pt_regs) \
+ - sizeof(struct switch_stack) \
+ + (long)&((struct switch_stack *)0)->reg)
+
#define KSTK_EIP(tsk) \
- (*(unsigned long *)(PT_REG(pc) + PAGE_SIZE + (unsigned long)(tsk)))
+ (*(unsigned long *)(PT_REG(pc) + (unsigned long)(tsk)))
+
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
/* NOTE: The task struct and the stack go together! */
unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/
};
+struct el_common_EV6_mcheck {
+ unsigned int FrameSize; /* Bytes, including this field */
+ unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */
+ unsigned int CpuOffset; /* Offset to CPU-specific info */
+ unsigned int SystemOffset; /* Offset to system-specific info */
+ unsigned int MCHK_Code;
+ unsigned int MCHK_Frame_Rev;
+ unsigned long I_STAT; /* EV6 Internal Processor Registers */
+ unsigned long DC_STAT; /* (See the 21264 Spec) */
+ unsigned long C_ADDR;
+ unsigned long DC1_SYNDROME;
+ unsigned long DC0_SYNDROME;
+ unsigned long C_STAT;
+ unsigned long C_STS;
+ unsigned long RESERVED0;
+ unsigned long EXC_ADDR;
+ unsigned long IER_CM;
+ unsigned long ISUM;
+ unsigned long MM_STAT;
+ unsigned long PAL_BASE;
+ unsigned long I_CTL;
+ unsigned long PCTX;
+};
+
extern void halt(void) __attribute__((noreturn));
#define prepare_to_switch() do { } while(0)
#define VT_BUF_HAVE_RW
#define VT_BUF_HAVE_MEMSETW
+#define VT_BUF_HAVE_MEMCPYW
#define VT_BUF_HAVE_MEMCPYF
extern inline void scr_writew(u16 val, u16 *addr)
memsetw(s, c, count);
}
-extern inline void scr_memcpyw_from(u16 *d, const u16 *s, unsigned int count)
-{
- memcpy_fromio(d, s, count);
-}
-
-extern inline void scr_memcpyw_to(u16 *d, const u16 *s, unsigned int count)
-{
- memcpy_toio(d, s, count);
-}
+/* Do not trust that the usage will be correct; analyze the arguments. */
+extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count);
+#define scr_memcpyw_from scr_memcpyw
+#define scr_memcpyw_to scr_memcpyw
/* ??? These are currently only used for downloading character sets. As
such, they don't need memory barriers. Is this all they are intended
#define memset_io(addr,c,len) _memset_io((unsigned long)(addr),(c),(len))
#endif
+#define IO_SPACE_LIMIT 0xffff
+
/*
* This isn't especially architecture dependent so it seems like it
* might as well go here as anywhere.
__OUTS(w)
__OUTS(l)
+#define IO_SPACE_LIMIT 0xffff
+
#ifdef __KERNEL__
#include <linux/vmalloc.h>
return retval;
}
-
-
-
/* Nothing to do */
#define dma_cache_inv(_start,_size) do { } while (0)
#include <asm/desc.h>
#include <asm/atomic.h>
+#include <asm/pgalloc.h>
/*
* possibly do the LDT unload here?
#define destroy_context(mm) do { } while(0)
#define init_new_context(tsk,mm) do { } while (0)
+#ifdef __SMP__
+extern unsigned int cpu_tlbbad[NR_CPUS];
+#endif
+
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
{
-
if (prev != next) {
/*
* Re-load LDT if necessary
asm volatile("movl %0,%%cr3": :"r" (__pa(next->pgd)));
clear_bit(cpu, &prev->cpu_vm_mask);
}
+#ifdef __SMP__
+ else {
+ if(cpu_tlbbad[cpu])
+ local_flush_tlb();
+ }
+ cpu_tlbbad[cpu] = 0;
+#endif
set_bit(cpu, &next->cpu_vm_mask);
}
flush_tlb_mm(mm);
}
+extern volatile unsigned long smp_invalidate_needed;
+extern unsigned int cpu_tlbbad[NR_CPUS];
+
+static inline void do_flush_tlb_local(void)
+{
+ unsigned long cpu = smp_processor_id();
+ struct mm_struct *mm = current->mm;
+
+ clear_bit(cpu, &smp_invalidate_needed);
+ if (mm) {
+ set_bit(cpu, &mm->cpu_vm_mask);
+ local_flush_tlb();
+ } else {
+ cpu_tlbbad[cpu] = 1;
+ }
+}
+
#endif
#endif /* _I386_PGALLOC_H */
#define outb(x,addr) ((void) writeb(x,addr))
#define outb_p(x,addr) outb(x,addr)
+#define IO_SPACE_LIMIT 0xffff
+
/* Values for nocacheflag and cmode */
#define IOMAP_FULL_CACHING 0
__inslc((port),(addr),(count)) : \
__insl((port),(addr),(count)))
+#define IO_SPACE_LIMIT 0xffff
+
/*
* The caches on some architectures aren't dma-coherent and have need to
* handle this in software. There are three types of operations that
extern void _insl_ns(volatile unsigned long *port, void *buf, int nl);
extern void _outsl_ns(volatile unsigned long *port, const void *buf, int nl);
+#define IO_SPACE_LIMIT 0xffff
+
#define memset_io(a,b,c) memset((a),(b),(c))
#define memcpy_fromio(a,b,c) memcpy((a),(b),(c))
#define memcpy_toio(a,b,c) memcpy((a),(b),(c))
#ifdef __KERNEL__
+#define IO_SPACE_LIMIT 0xffff
+
#include <asm/addrspace.h>
/*
#define inb_p inb
#define outb_p outb
+#define IO_SPACE_LIMIT 0xffffffff
+
extern void sun4c_mapioaddr(unsigned long, unsigned long, int bus_type, int rdonly);
extern void srmmu_mapioaddr(unsigned long, unsigned long, int bus_type, int rdonly);
#define writew(__w, __addr) (_writew((__w), (unsigned long)(__addr)))
#define writel(__l, __addr) (_writel((__l), (unsigned long)(__addr)))
+#define IO_SPACE_LIMIT 0xffffffff
+
/*
* Memcpy to/from I/O space is just a regular memory operation on
* Ultra as well.
extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal);
#define alloc_bootmem(x) \
__alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_low(x) \
+ __alloc_bootmem((x), SMP_CACHE_BYTES, 0)
#define alloc_bootmem_pages(x) \
__alloc_bootmem((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low_pages(x) \
extern int request_resource(struct resource *root, struct resource *new);
extern int release_resource(struct resource *new);
-struct pci_dev;
extern int allocate_resource(struct resource *root, struct resource *new,
unsigned long size,
unsigned long min, unsigned long max,
- unsigned long align, struct pci_dev *);
+ unsigned long align,
+ void (*alignf)(void *, struct resource *, unsigned long),
+ void *alignf_data);
/* Convenience shorthand with allocation */
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name))
extern void autoirq_setup(int waittime);
extern int autoirq_report(int waittime);
-extern unsigned long resource_fixup(struct pci_dev *, struct resource *,
- unsigned long, unsigned long);
-
#endif /* _LINUX_IOPORT_H */
#ifdef __KERNEL__
-/* special shmsegs[id] values */
-#define IPC_UNUSED ((void *) -1)
-#define IPC_NOID ((void *) -2) /* being allocated/destroyed */
-
#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
#endif /* __KERNEL__ */
unsigned short msgseg;
};
-#define MSGMNI 128 /* <= IPCMNI */ /* max # of msg queue identifiers */
+#define MSGMNI 16 /* <= IPCMNI */ /* max # of msg queue identifiers */
#define MSGMAX 8192 /* <= INT_MAX */ /* max size of message (bytes) */
#define MSGMNB 16384 /* <= INT_MAX */ /* default max size of a message queue */
*
* PCI defines and function prototypes
* Copyright 1994, Drew Eckhardt
- * Copyright 1997--1999 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ * Copyright 1997--1999 Martin Mares <mj@suse.cz>
*
* For more information, please consult the following manuals (look at
* http://www.pcisig.com/ for how to get them):
int pcibios_enable_device(struct pci_dev *);
char *pcibios_setup (char *str);
+void pcibios_align_resource(void *, struct resource *, unsigned long);
void pcibios_update_resource(struct pci_dev *, struct resource *,
struct resource *, int);
void pcibios_update_irq(struct pci_dev *, int irq);
KERN_RTSIGNR=32, /* Number of rt sigs queued */
KERN_RTSIGMAX=33, /* Max queuable */
- KERN_SHMMAX=34, /* int: Maximum shared memory segment */
+ KERN_SHMMAX=34, /* long: Maximum shared memory segment */
KERN_MSGMAX=35, /* int: Maximum size of a messege */
KERN_MSGMNB=36, /* int: Maximum message queue size */
KERN_MSGPOOL=37, /* int: Maximum system message pool size */
KERN_RANDOM=40, /* Random driver */
KERN_SHMALL=41, /* int: Maximum size of shared memory */
KERN_MSGMNI=42, /* int: msg queue identifiers */
- KERN_SEM=43, /* int: sysv semaphore limits */
- KERN_SPARC_STOP_A=44 /* int: Sparc Stop-A enable */
+ KERN_SEM=43, /* struct: sysv semaphore limits */
+ KERN_SPARC_STOP_A=44, /* int: Sparc Stop-A enable */
+ KERN_SHMMNI=45 /* int: shm array identifiers */
};
smanew = sem_lock(semid);
if(smanew==NULL)
return -EIDRM;
- if(smanew != sma)
- goto out_EIDRM;
- if(sem_checkid(sma,semid))
- goto out_EIDRM;
- if(sma->sem_nsems != nsems) {
-out_EIDRM:
+ if(smanew != sma || sem_checkid(sma,semid) || sma->sem_nsems != nsems) {
sem_unlock(semid);
return -EIDRM;
}
struct semaphore sem;
};
-static int findkey (key_t key);
+static struct ipc_ids shm_ids;
+
+#define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id))
+#define shm_unlock(id) ipc_unlock(&shm_ids,id)
+#define shm_lockall() ipc_lockall(&shm_ids)
+#define shm_unlockall() ipc_unlockall(&shm_ids)
+#define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id))
+#define shm_rmid(id) ((struct shmid_kernel*)ipc_rmid(&shm_ids,id))
+#define shm_checkid(s, id) \
+ ipc_checkid(&shm_ids,&s->u.shm_perm,id)
+#define shm_buildid(id, seq) \
+ ipc_buildid(&shm_ids, id, seq)
+
static int newseg (key_t key, int shmflg, size_t size);
static int shm_map (struct vm_area_struct *shmd);
-static void killseg (int id);
+static void killseg (int shmid);
static void shm_open (struct vm_area_struct *shmd);
static void shm_close (struct vm_area_struct *shmd);
static struct page * shm_nopage(struct vm_area_struct *, unsigned long, int);
static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
#endif
-unsigned int shm_prm[3] = {SHMMAX, SHMALL, SHMMNI};
+size_t shm_ctlmax = SHMMAX;
+int shm_ctlall = SHMALL;
+int shm_ctlmni = SHMMNI;
static int shm_tot = 0; /* total number of shared memory pages */
static int shm_rss = 0; /* number of shared memory pages that are in memory */
static int shm_swp = 0; /* number of shared memory pages that are in swap */
-static int max_shmid = -1; /* every used id is <= max_shmid */
-static DECLARE_WAIT_QUEUE_HEAD(shm_wait); /* calling findkey() may need to wait */
-static struct shmid_kernel **shm_segs = NULL;
-static unsigned int num_segs = 0;
-static unsigned short shm_seq = 0; /* incremented, for recognizing stale ids */
/* locks order:
- shm_lock -> pagecache_lock (end of shm_swap)
- shp->sem -> other spinlocks (shm_nopage) */
-spinlock_t shm_lock = SPIN_LOCK_UNLOCKED;
+ pagecache_lock
+ shm_lock()/shm_lockall()
+ kernel lock
+ shp->sem
+ sem_ids.sem
+ mmap_sem
+
+ SMP assumptions:
+ - swap_free() never sleeps
+ - add_to_swap_cache() never sleeps
+ - add_to_swap_cache() doesn't acquire the big kernel lock.
+ - shm_unuse() is called with the kernel lock acquired.
+ */
/* some statistics */
static ulong swap_attempts = 0;
void __init shm_init (void)
{
+ ipc_init_ids(&shm_ids, shm_ctlmni);
#ifdef CONFIG_PROC_FS
create_proc_read_entry("sysvipc/shm", 0, 0, sysvipc_shm_read_proc, NULL);
#endif
kfree (dir);
}
-static int shm_expand (unsigned int size)
+static int shm_revalidate(struct shmid_kernel* shp, int shmid, int pagecount, int flg)
{
- int id;
- struct shmid_kernel ** new_array;
-
- spin_unlock(&shm_lock);
- new_array = kmalloc (size * sizeof(struct shmid_kernel *), GFP_KERNEL);
- spin_lock(&shm_lock);
-
- if (!new_array)
- return -ENOMEM;
-
- if (size <= num_segs){ /* We check this after kmalloc so
- nobody changes num_segs afterwards */
- /*
- * We never shrink the segment. If we shrink we have to
- * check for stale handles in newseg
- */
- kfree (new_array);
- return 0;
+ struct shmid_kernel* new;
+ new = shm_lock(shmid);
+ if(new==NULL) {
+ return -EIDRM;
}
-
- if (num_segs) {
- memcpy (new_array, shm_segs,
- size*sizeof(struct shmid_kernel *));
- kfree (shm_segs);
+ if(new!=shp || shm_checkid(shp, shmid) || shp->shm_npages != pagecount) {
+ shm_unlock(shmid);
+ return -EIDRM;
}
- for (id = num_segs; id < size; id++)
- new_array[id] = (void *) IPC_UNUSED;
-
- shm_segs = new_array;
- num_segs = size;
- return 0;
-}
-
-static int findkey (key_t key)
-{
- int id;
- struct shmid_kernel *shp;
-
- if (!num_segs)
- return -1;
-
- for (id = 0; id <= max_shmid; id++) {
- if ((shp = shm_segs[id]) == IPC_NOID) {
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue(&shm_wait, &wait);
- for(;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if ((shp = shm_segs[id]) != IPC_NOID)
- break;
- spin_unlock(&shm_lock);
- schedule();
- spin_lock(&shm_lock);
- }
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&shm_wait, &wait);
- }
- if (shp != IPC_UNUSED &&
- key == shp->u.shm_perm.key)
- return id;
+ if (ipcperms(&shp->u.shm_perm, flg)) {
+ shm_unlock(shmid);
+ return -EACCES;
}
- return -1;
+ return 0;
}
-/*
- * allocate new shmid_kernel and pgtable. protected by shm_segs[id] = NOID.
- * This has to be called with the shm_lock held
- */
static int newseg (key_t key, int shmflg, size_t size)
{
struct shmid_kernel *shp;
int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
- int id, err;
- unsigned int shmall, shmmni;
-
- shmall = shm_prm[1];
- shmmni = shm_prm[2];
- if (shmmni > IPCMNI) {
- printk ("shmmni reset to max of %u\n", IPCMNI);
- shmmni = shm_prm[2] = IPCMNI;
- }
+ int id;
- if (shmmni < used_segs)
- return -ENOSPC;
- if ((err = shm_expand (shmmni)))
- return err;
if (size < SHMMIN)
return -EINVAL;
- if (shm_tot + numpages >= shmall)
+
+ if (size > shm_ctlmax)
+ return -EINVAL;
+ if (shm_tot + numpages >= shm_ctlall)
return -ENOSPC;
- for (id = 0; id < num_segs; id++)
- if (shm_segs[id] == IPC_UNUSED) {
- shm_segs[id] = (struct shmid_kernel *) IPC_NOID;
- goto found;
- }
- return -ENOSPC;
-found:
- spin_unlock(&shm_lock);
shp = (struct shmid_kernel *) kmalloc (sizeof (*shp), GFP_KERNEL);
- if (!shp) {
- spin_lock(&shm_lock);
- shm_segs[id] = (struct shmid_kernel *) IPC_UNUSED;
- wake_up (&shm_wait);
+ if (!shp)
return -ENOMEM;
- }
+
shp->shm_dir = shm_alloc (numpages);
if (!shp->shm_dir) {
kfree(shp);
- spin_lock(&shm_lock);
- shm_segs[id] = (struct shmid_kernel *) IPC_UNUSED;
- wake_up (&shm_wait);
return -ENOMEM;
}
-
+ id = ipc_addid(&shm_ids, &shp->u.shm_perm, shm_ctlmni);
+ if(id == -1) {
+ shm_free(shp->shm_dir,numpages);
+ kfree(shp);
+ return -ENOSPC;
+ }
shp->u.shm_perm.key = key;
shp->u.shm_perm.mode = (shmflg & S_IRWXUGO);
- shp->u.shm_perm.cuid = shp->u.shm_perm.uid = current->euid;
- shp->u.shm_perm.cgid = shp->u.shm_perm.gid = current->egid;
shp->u.shm_segsz = size;
shp->u.shm_cpid = current->pid;
shp->attaches = NULL;
shp->u.shm_atime = shp->u.shm_dtime = 0;
shp->u.shm_ctime = CURRENT_TIME;
shp->shm_npages = numpages;
- shp->id = id;
+ shp->id = shm_buildid(id,shp->u.shm_perm.seq);
init_MUTEX(&shp->sem);
- spin_lock(&shm_lock);
-
shm_tot += numpages;
- shp->u.shm_perm.seq = shm_seq;
-
- if (id > max_shmid)
- max_shmid = id;
- shm_segs[id] = shp;
- used_segs++;
- wake_up (&shm_wait);
- return (unsigned int) shp->u.shm_perm.seq * IPCMNI + id;
+ shm_unlock(id);
+
+ return shm_buildid(id,shp->u.shm_perm.seq);
}
asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
{
struct shmid_kernel *shp;
int err, id = 0;
- size_t shmmax;
-
- shmmax = shm_prm[0];
- if (size > shmmax)
- return -EINVAL;
- down(¤t->mm->mmap_sem);
- spin_lock(&shm_lock);
+ down(&shm_ids.sem);
if (key == IPC_PRIVATE) {
err = newseg(key, shmflg, size);
- } else if ((id = findkey (key)) == -1) {
+ } else if ((id = ipc_findkey(&shm_ids,key)) == -1) {
if (!(shmflg & IPC_CREAT))
err = -ENOENT;
else
} else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
err = -EEXIST;
} else {
- shp = shm_segs[id];
- if (shp->u.shm_perm.mode & SHM_DEST)
- err = -EIDRM;
- else if (size > shp->u.shm_segsz)
- err = -EINVAL;
- else if (ipcperms (&shp->u.shm_perm, shmflg))
+ shp = shm_lock(id);
+ if(shp==NULL)
+ BUG();
+ if (ipcperms(&shp->u.shm_perm, shmflg))
err = -EACCES;
else
- err = (int) shp->u.shm_perm.seq * IPCMNI + id;
+ err = shm_buildid(id, shp->u.shm_perm.seq);
+ shm_unlock(id);
}
- spin_unlock(&shm_lock);
- up(¤t->mm->mmap_sem);
+ up(&shm_ids.sem);
return err;
}
* Only called after testing nattch and SHM_DEST.
* Here pages, pgtable and shmid_kernel are freed.
*/
-static void killseg (int id)
+static void killseg (int shmid)
{
struct shmid_kernel *shp;
int i, numpages;
int rss, swp;
- shp = shm_segs[id];
- if (shp == IPC_NOID || shp == IPC_UNUSED)
+ down(&shm_ids.sem);
+ shp = shm_lock(shmid);
+ if(shp==NULL) {
+out_up:
+ up(&shm_ids.sem);
+ return;
+ }
+ if(shm_checkid(shp,shmid) || shp->u.shm_nattch > 0 ||
+ !(shp->u.shm_perm.mode & SHM_DEST)) {
+ shm_unlock(shmid);
+ goto out_up;
+ }
+ shp = shm_rmid(shmid);
+ if(shp==NULL)
BUG();
- shp->u.shm_perm.seq++; /* for shmat */
- shm_seq = (shm_seq+1) % ((unsigned)(1<<31)/IPCMNI); /* increment, but avoid overflow */
- shm_segs[id] = (struct shmid_kernel *) IPC_UNUSED;
- used_segs--;
- if (id == max_shmid)
- while (max_shmid-- > 0 && (shm_segs[max_shmid] == IPC_UNUSED));
if (!shp->shm_dir)
BUG();
- spin_unlock(&shm_lock);
+ shm_unlock(shmid);
+ up(&shm_ids.sem);
+
numpages = shp->shm_npages;
for (i = 0, rss = 0, swp = 0; i < numpages ; i++) {
pte_t pte;
}
shm_free (shp->shm_dir, numpages);
kfree(shp);
- spin_lock(&shm_lock);
+ shm_lockall();
shm_rss -= rss;
shm_swp -= swp;
shm_tot -= numpages;
+ shm_unlockall();
return;
}
{
struct shmid_ds tbuf;
struct shmid_kernel *shp;
- struct ipc_perm *ipcp;
- int id, err = -EINVAL;
+ int err;
if (cmd < 0 || shmid < 0)
- goto out_unlocked;
- if (cmd == IPC_SET) {
- err = -EFAULT;
- if(copy_from_user (&tbuf, buf, sizeof (*buf)))
- goto out_unlocked;
- }
- spin_lock(&shm_lock);
+ return -EINVAL;
switch (cmd) { /* replace with proc interface ? */
case IPC_INFO:
{
struct shminfo shminfo;
- spin_unlock(&shm_lock);
- err = -EFAULT;
- if (!buf)
- goto out;
- shminfo.shmmni = shminfo.shmseg = shm_prm[2];
- shminfo.shmmax = shm_prm[0];
- shminfo.shmall = shm_prm[1];
+ memset(&shminfo,0,sizeof(shminfo));
+ shminfo.shmmni = shminfo.shmseg = shm_ctlmni;
+ shminfo.shmmax = shm_ctlmax;
+ shminfo.shmall = shm_ctlall;
shminfo.shmmin = SHMMIN;
if(copy_to_user (buf, &shminfo, sizeof(struct shminfo)))
- goto out_unlocked;
- spin_lock(&shm_lock);
- err = max_shmid < 0 ? 0 : max_shmid;
- goto out;
+ return -EFAULT;
+ /* reading a integer is always atomic */
+ err= shm_ids.max_id;
+ if(err<0)
+ err = 0;
+ return err;
}
case SHM_INFO:
{
struct shm_info shm_info;
- err = -EFAULT;
+
+ memset(&shm_info,0,sizeof(shm_info));
+ shm_lockall();
shm_info.used_ids = used_segs;
shm_info.shm_rss = shm_rss;
shm_info.shm_tot = shm_tot;
shm_info.shm_swp = shm_swp;
shm_info.swap_attempts = swap_attempts;
shm_info.swap_successes = swap_successes;
- spin_unlock(&shm_lock);
+ err = shm_ids.max_id;
+ shm_unlockall();
if(copy_to_user (buf, &shm_info, sizeof(shm_info)))
- goto out_unlocked;
- spin_lock(&shm_lock);
- err = max_shmid < 0 ? 0 : max_shmid;
- goto out;
+ return -EFAULT;
+
+ return err < 0 ? 0 : err;
}
case SHM_STAT:
- err = -EINVAL;
- if (shmid > max_shmid)
- goto out;
- shp = shm_segs[shmid];
- if (shp == IPC_UNUSED || shp == IPC_NOID)
- goto out;
+ case IPC_STAT:
+ {
+ struct shmid_ds tmp;
+ int result;
+ shp = shm_lock(shmid);
+ if(shp==NULL)
+ return -EINVAL;
+ if(cmd==SHM_STAT) {
+ err = -EINVAL;
+ if (shmid > shm_ids.max_id)
+ goto out_unlock;
+ result = shm_buildid(shmid, shp->u.shm_perm.seq);
+ } else {
+ err = -EIDRM;
+ if(shm_checkid(shp,shmid))
+ goto out_unlock;
+ result = 0;
+ }
+ err=-EACCES;
if (ipcperms (&shp->u.shm_perm, S_IRUGO))
- goto out;
- id = (unsigned int) shp->u.shm_perm.seq * IPCMNI + shmid;
- err = -EFAULT;
- spin_unlock(&shm_lock);
- if(copy_to_user (buf, &shp->u, sizeof(*buf)))
- goto out_unlocked;
- spin_lock(&shm_lock);
- err = id;
- goto out;
+ goto out_unlock;
+ memcpy(&tmp,&shp->u,sizeof(tmp));
+ shm_unlock(shmid);
+ if(copy_to_user (buf, &tmp, sizeof(tmp)))
+ return -EFAULT;
+ return result;
}
-
- err = -EINVAL;
- if ((id = (unsigned int) shmid % IPCMNI) > max_shmid)
- goto out;
- if ((shp = shm_segs[id]) == IPC_UNUSED || shp == IPC_NOID)
- goto out;
- err = -EIDRM;
- if (shp->u.shm_perm.seq != (unsigned int) shmid / IPCMNI)
- goto out;
- ipcp = &shp->u.shm_perm;
-
- switch (cmd) {
- case SHM_UNLOCK:
- err = -EPERM;
- if (!capable(CAP_IPC_LOCK))
- goto out;
- err = -EINVAL;
- if (!(ipcp->mode & SHM_LOCKED))
- goto out;
- ipcp->mode &= ~SHM_LOCKED;
- break;
case SHM_LOCK:
+ case SHM_UNLOCK:
+ {
/* Allow superuser to lock segment in memory */
/* Should the pages be faulted in here or leave it to user? */
/* need to determine interaction with current->swappable */
- err = -EPERM;
+ struct ipc_perm *ipcp;
if (!capable(CAP_IPC_LOCK))
- goto out;
- err = -EINVAL;
- if (ipcp->mode & SHM_LOCKED)
- goto out;
- ipcp->mode |= SHM_LOCKED;
- break;
- case IPC_STAT:
- err = -EACCES;
- if (ipcperms (ipcp, S_IRUGO))
- goto out;
- err = -EFAULT;
- spin_unlock(&shm_lock);
- if(copy_to_user (buf, &shp->u, sizeof(shp->u)))
- goto out_unlocked;
- spin_lock(&shm_lock);
+ return -EPERM;
+
+ shp = shm_lock(shmid);
+ if(shp==NULL)
+ return -EINVAL;
+ err=-EIDRM;
+ if(shm_checkid(shp,shmid))
+ goto out_unlock;
+ ipcp = &shp->u.shm_perm;
+ if(cmd==SHM_LOCK) {
+ if (!(ipcp->mode & SHM_LOCKED)) {
+ ipcp->mode |= SHM_LOCKED;
+ err = 0;
+ }
+ } else {
+ if (ipcp->mode & SHM_LOCKED) {
+ ipcp->mode &= ~SHM_LOCKED;
+ err = 0;
+ }
+ }
+ shm_unlock(shmid);
+ return err;
+ }
+ case IPC_RMID:
+ case IPC_SET:
break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cmd == IPC_SET) {
+ if(copy_from_user (&tbuf, buf, sizeof (*buf)))
+ return -EFAULT;
+ }
+ down(&shm_ids.sem);
+ shp = shm_lock(shmid);
+ err=-EINVAL;
+ if(shp==NULL)
+ goto out_up;
+ err=-EIDRM;
+ if(shm_checkid(shp,shmid))
+ goto out_unlock_up;
+ err=-EPERM;
+ if (current->euid != shp->u.shm_perm.uid &&
+ current->euid != shp->u.shm_perm.cuid &&
+ !capable(CAP_SYS_ADMIN)) {
+ goto out_unlock_up;
+ }
+
+ switch (cmd) {
case IPC_SET:
- if (current->euid == shp->u.shm_perm.uid ||
- current->euid == shp->u.shm_perm.cuid ||
- capable(CAP_SYS_ADMIN)) {
- ipcp->uid = tbuf.shm_perm.uid;
- ipcp->gid = tbuf.shm_perm.gid;
- ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
- | (tbuf.shm_perm.mode & S_IRWXUGO);
- shp->u.shm_ctime = CURRENT_TIME;
- break;
- }
- err = -EPERM;
- goto out;
+ shp->u.shm_perm.uid = tbuf.shm_perm.uid;
+ shp->u.shm_perm.gid = tbuf.shm_perm.gid;
+ shp->u.shm_perm.mode = (shp->u.shm_perm.mode & ~S_IRWXUGO)
+ | (tbuf.shm_perm.mode & S_IRWXUGO);
+ shp->u.shm_ctime = CURRENT_TIME;
+ break;
case IPC_RMID:
- if (current->euid == shp->u.shm_perm.uid ||
- current->euid == shp->u.shm_perm.cuid ||
- capable(CAP_SYS_ADMIN)) {
- shp->u.shm_perm.mode |= SHM_DEST;
- if (shp->u.shm_nattch <= 0)
- killseg (id);
- break;
+ shp->u.shm_perm.mode |= SHM_DEST;
+ if (shp->u.shm_nattch <= 0) {
+ shm_unlock(shmid);
+ up(&shm_ids.sem);
+ killseg (shmid);
+ return 0;
}
- err = -EPERM;
- goto out;
- default:
- err = -EINVAL;
- goto out;
}
err = 0;
-out:
- spin_unlock(&shm_lock);
-out_unlocked:
+out_unlock_up:
+ shm_unlock(shmid);
+out_up:
+ up(&shm_ids.sem);
+ return err;
+out_unlock:
+ shm_unlock(shmid);
return err;
}
{
struct shmid_kernel *shp;
struct vm_area_struct *shmd;
- int err = -EINVAL;
- unsigned int id;
+ int err;
unsigned long addr;
unsigned long len;
+ short flg = shmflg & SHM_RDONLY ? S_IRUGO : S_IRUGO|S_IWUGO;
+
- down(¤t->mm->mmap_sem);
- spin_lock(&shm_lock);
if (shmid < 0)
- goto out;
+ return -EINVAL;
- shp = shm_segs[id = (unsigned int) shmid % IPCMNI];
- if (shp == IPC_UNUSED || shp == IPC_NOID)
- goto out;
+ down(¤t->mm->mmap_sem);
+ err = -EINVAL;
+ shp = shm_lock(shmid);
+ if(shp == NULL)
+ goto out_unlock_up;
+
+ err = -EACCES;
+ if (ipcperms(&shp->u.shm_perm, flg))
+ goto out_unlock_up;
+
+ err = -EIDRM;
+ if (shm_checkid(shp,shmid))
+ goto out_unlock_up;
if (!(addr = (ulong) shmaddr)) {
if (shmflg & SHM_REMAP)
- goto out;
+ goto out_unlock_up;
err = -ENOMEM;
addr = 0;
again:
if (!(addr = get_unmapped_area(addr, (unsigned long)shp->u.shm_segsz)))
- goto out;
+ goto out_unlock_up;
if(addr & (SHMLBA - 1)) {
addr = (addr + (SHMLBA - 1)) & ~(SHMLBA - 1);
goto again;
}
} else if (addr & (SHMLBA-1)) {
+ err=-EINVAL;
if (shmflg & SHM_RND)
addr &= ~(SHMLBA-1); /* round down */
else
- goto out;
+ goto out_unlock_up;
}
/*
* Check if addr exceeds TASK_SIZE (from do_mmap)
len = PAGE_SIZE*shp->shm_npages;
err = -EINVAL;
if (addr >= TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE - len)
- goto out;
+ goto out_unlock_up;
/*
* If shm segment goes below stack, make sure there is some
* space left for the stack to grow (presently 4 pages).
*/
if (addr < current->mm->start_stack &&
addr > current->mm->start_stack - PAGE_SIZE*(shp->shm_npages + 4))
- goto out;
+ goto out_unlock_up;
if (!(shmflg & SHM_REMAP) && find_vma_intersection(current->mm, addr, addr + (unsigned long)shp->u.shm_segsz))
- goto out;
+ goto out_unlock_up;
- err = -EACCES;
- if (ipcperms(&shp->u.shm_perm, shmflg & SHM_RDONLY ? S_IRUGO : S_IRUGO|S_IWUGO))
- goto out;
- err = -EIDRM;
- if (shp->u.shm_perm.seq != (unsigned int) shmid / IPCMNI)
- goto out;
-
- spin_unlock(&shm_lock);
+ shm_unlock(shmid);
err = -ENOMEM;
shmd = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- spin_lock(&shm_lock);
- if (!shmd)
- goto out;
- if ((shp != shm_segs[id]) || (shp->u.shm_perm.seq != (unsigned int) shmid / IPCMNI)) {
+ err = shm_revalidate(shp, shmid, len/PAGE_SIZE,flg);
+ if(err) {
kmem_cache_free(vm_area_cachep, shmd);
- err = -EIDRM;
- goto out;
+ goto out_up;
}
- shmd->vm_private_data = shm_segs[id];
+ shmd->vm_private_data = shp;
shmd->vm_start = addr;
shmd->vm_end = addr + shp->shm_npages * PAGE_SIZE;
shmd->vm_mm = current->mm;
shmd->vm_ops = &shm_vm_ops;
shp->u.shm_nattch++; /* prevent destruction */
- spin_unlock(&shm_lock);
+ shm_unlock(shp->id);
err = shm_map (shmd);
- spin_lock(&shm_lock);
+ shm_lock(shmid); /* cannot fail */
if (err)
goto failed_shm_map;
*raddr = addr;
err = 0;
-out:
- spin_unlock(&shm_lock);
+out_unlock_up:
+ shm_unlock(shmid);
+out_up:
up(¤t->mm->mmap_sem);
return err;
failed_shm_map:
- if (--shp->u.shm_nattch <= 0 && shp->u.shm_perm.mode & SHM_DEST)
- killseg(id);
- spin_unlock(&shm_lock);
- up(¤t->mm->mmap_sem);
- kmem_cache_free(vm_area_cachep, shmd);
- return err;
+ {
+ int delete = 0;
+ if (--shp->u.shm_nattch <= 0 && shp->u.shm_perm.mode & SHM_DEST)
+ delete = 1;
+ shm_unlock(shmid);
+ up(¤t->mm->mmap_sem);
+ kmem_cache_free(vm_area_cachep, shmd);
+ if(delete)
+ killseg(shmid);
+ return err;
+ }
}
/* This is called by fork, once for every shm attach. */
{
struct shmid_kernel *shp;
- spin_lock(&shm_lock);
shp = (struct shmid_kernel *) shmd->vm_private_data;
+ if(shp != shm_lock(shp->id))
+ BUG();
insert_attach(shp,shmd); /* insert shmd into shp->attaches */
shp->u.shm_nattch++;
shp->u.shm_atime = CURRENT_TIME;
shp->u.shm_lpid = current->pid;
- spin_unlock(&shm_lock);
+ shm_unlock(shp->id);
}
/*
static void shm_close (struct vm_area_struct *shmd)
{
struct shmid_kernel *shp;
+ int id;
- spin_lock(&shm_lock);
/* remove from the list of attaches of the shm segment */
shp = (struct shmid_kernel *) shmd->vm_private_data;
+ if(shp != shm_lock(shp->id))
+ BUG();
remove_attach(shp,shmd); /* remove from shp->attaches */
shp->u.shm_lpid = current->pid;
shp->u.shm_dtime = CURRENT_TIME;
+ id=-1;
if (--shp->u.shm_nattch <= 0 && shp->u.shm_perm.mode & SHM_DEST)
- killseg (shp->id);
- spin_unlock(&shm_lock);
+ id=shp->id;
+ shm_unlock(shp->id);
+ if(id!=-1)
+ killseg(id);
}
/*
idx += shmd->vm_pgoff;
down(&shp->sem);
- spin_lock(&shm_lock);
+ if(shp != shm_lock(shp->id))
+ BUG();
+
pte = SHM_ENTRY(shp,idx);
if (!pte_present(pte)) {
/* page not present so shm_swap can't race with us
and the semaphore protects us by other tasks that
could potentially fault on our pte under us */
if (pte_none(pte)) {
- spin_unlock(&shm_lock);
+ shm_unlock(shp->id);
page = alloc_page(GFP_HIGHUSER);
if (!page)
goto oom;
clear_highpage(page);
- spin_lock(&shm_lock);
+ if(shp != shm_lock(shp->id))
+ BUG();
} else {
swp_entry_t entry = pte_to_swp_entry(pte);
- spin_unlock(&shm_lock);
+ shm_unlock(shp->id);
page = lookup_swap_cache(entry);
if (!page) {
lock_kernel();
lock_kernel();
swap_free(entry);
unlock_kernel();
- spin_lock(&shm_lock);
+ if(shp != shm_lock(shp->id))
+ BUG();
shm_swp--;
}
shm_rss++;
/* pte_val(pte) == SHM_ENTRY (shp, idx) */
get_page(pte_page(pte));
- spin_unlock(&shm_lock);
+ shm_unlock(shp->id);
up(&shp->sem);
current->min_flt++;
return pte_page(pte);
}
unlock_kernel();
- spin_lock(&shm_lock);
- check_id:
- shp = shm_segs[swap_id];
- if (shp == IPC_UNUSED || shp == IPC_NOID || shp->u.shm_perm.mode & SHM_LOCKED ) {
- next_id:
+ shm_lockall();
+check_id:
+ shp = shm_get(swap_id);
+ if(shp==NULL || shp->u.shm_perm.mode & SHM_LOCKED) {
+next_id:
swap_idx = 0;
- if (++swap_id > max_shmid) {
+ if (++swap_id > shm_ids.max_id) {
swap_id = 0;
if (loop)
goto failed;
}
id = swap_id;
- check_table:
+check_table:
idx = swap_idx++;
if (idx >= shp->shm_npages)
goto next_id;
if (--counter < 0) { /* failed */
failed:
- spin_unlock(&shm_lock);
+ shm_unlockall();
lock_kernel();
__swap_free(swap_entry, 2);
unlock_kernel();
}
if (page_count(page_map) != 1)
goto check_table;
+
if (!(page_map = prepare_highmem_swapout(page_map)))
goto failed;
SHM_ENTRY (shp, idx) = swp_entry_to_pte(swap_entry);
NOTE: we just accounted the swap space reference for this
swap cache page at __get_swap_page() time. */
add_to_swap_cache(page_map, swap_entry);
- spin_unlock(&shm_lock);
+ shm_unlockall();
lock_kernel();
rw_swap_page(WRITE, page_map, 0);
shm_rss++;
shm_swp--;
- spin_unlock(&shm_lock);
- lock_kernel();
swap_free(entry);
- unlock_kernel();
}
/*
{
int i, n;
- spin_lock(&shm_lock);
- for (i = 0; i <= max_shmid; i++) {
- struct shmid_kernel *seg = shm_segs[i];
- if ((seg == IPC_UNUSED) || (seg == IPC_NOID))
+ shm_lockall();
+ for (i = 0; i <= shm_ids.max_id; i++) {
+ struct shmid_kernel *shp = shm_get(i);
+ if(shp==NULL)
continue;
- for (n = 0; n < seg->shm_npages; n++) {
- if (pte_none(SHM_ENTRY(seg,n)))
+ for (n = 0; n < shp->shm_npages; n++) {
+ if (pte_none(SHM_ENTRY(shp,n)))
continue;
- if (pte_present(SHM_ENTRY(seg,n)))
+ if (pte_present(SHM_ENTRY(shp,n)))
continue;
- if (pte_to_swp_entry(SHM_ENTRY(seg,n)).val == entry.val) {
- shm_unuse_page(seg, n, entry, page);
- return;
+ if (pte_to_swp_entry(SHM_ENTRY(shp,n)).val == entry.val) {
+ shm_unuse_page(shp, n, entry, page);
+ goto out;
}
}
}
- spin_unlock(&shm_lock);
+out:
+ shm_unlockall();
}
#ifdef CONFIG_PROC_FS
off_t begin = 0;
int i, len = 0;
- len += sprintf(buffer, " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n");
+ down(&shm_ids.sem);
+ len += sprintf(buffer, " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n");
- spin_lock(&shm_lock);
- for(i = 0; i <= max_shmid; i++)
- if(shm_segs[i] != IPC_UNUSED) {
+ for(i = 0; i <= shm_ids.max_id; i++) {
+ struct shmid_kernel* shp = shm_lock(i);
+ if(shp!=NULL) {
#define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
#define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
char *format;
else
format = BIG_STRING;
len += sprintf(buffer + len, format,
- shm_segs[i]->u.shm_perm.key,
- shm_segs[i]->u.shm_perm.seq * IPCMNI + i,
- shm_segs[i]->u.shm_perm.mode,
- shm_segs[i]->u.shm_segsz,
- shm_segs[i]->u.shm_cpid,
- shm_segs[i]->u.shm_lpid,
- shm_segs[i]->u.shm_nattch,
- shm_segs[i]->u.shm_perm.uid,
- shm_segs[i]->u.shm_perm.gid,
- shm_segs[i]->u.shm_perm.cuid,
- shm_segs[i]->u.shm_perm.cgid,
- shm_segs[i]->u.shm_atime,
- shm_segs[i]->u.shm_dtime,
- shm_segs[i]->u.shm_ctime);
+ shp->u.shm_perm.key,
+ shm_buildid(i, shp->u.shm_perm.seq),
+ shp->u.shm_perm.mode,
+ shp->u.shm_segsz,
+ shp->u.shm_cpid,
+ shp->u.shm_lpid,
+ shp->u.shm_nattch,
+ shp->u.shm_perm.uid,
+ shp->u.shm_perm.gid,
+ shp->u.shm_perm.cuid,
+ shp->u.shm_perm.cgid,
+ shp->u.shm_atime,
+ shp->u.shm_dtime,
+ shp->u.shm_ctime);
+ shm_unlock(i);
pos += len;
if(pos < offset) {
if(pos > offset + length)
goto done;
}
+ }
*eof = 1;
done:
+ up(&shm_ids.sem);
*start = buffer + (offset - begin);
len -= (offset - begin);
if(len > length)
len = length;
if(len < 0)
len = 0;
- spin_unlock(&shm_lock);
return len;
}
#endif
{
int i;
sema_init(&ids->sem,1);
+
+ if(size > IPCMNI)
+ size = IPCMNI;
ids->size = size;
if(size == 0)
return;
- if(size > IPCMNI)
- size = IPCMNI;
ids->in_use = 0;
ids->max_id = -1;
ids->size = 0;
}
ids->ary = SPIN_LOCK_UNLOCKED;
- for(i=0;i<size;i++) {
+ for(i=0;i<size;i++)
ids->entries[i].p = NULL;
- }
}
int ipc_findkey(struct ipc_ids* ids, key_t key)
void* ipc_alloc(int size);
void ipc_free(void* ptr, int size);
+extern inline void ipc_lockall(struct ipc_ids* ids)
+{
+ spin_lock(&ids->ary);
+}
+
+extern inline struct ipc_perm* ipc_get(struct ipc_ids* ids, int id)
+{
+ struct ipc_perm* out;
+ int lid = id % SEQ_MULTIPLIER;
+ if(lid > ids->size)
+ return NULL;
+
+ out = ids->entries[lid].p;
+ return out;
+}
+
+extern inline void ipc_unlockall(struct ipc_ids* ids)
+{
+ spin_unlock(&ids->ary);
+}
extern inline struct ipc_perm* ipc_lock(struct ipc_ids* ids, int id)
{
struct ipc_perm* out;
#include <linux/init.h>
#include <linux/malloc.h>
#include <linux/spinlock.h>
+#include <asm/io.h>
-struct resource ioport_resource = { "PCI IO", 0x0000, 0xFFFF, IORESOURCE_IO };
-struct resource iomem_resource = { "PCI mem", 0x00000000, 0xFFFFFFFF, IORESOURCE_MEM };
+struct resource ioport_resource = { "PCI IO", 0x0000, IO_SPACE_LIMIT, IORESOURCE_IO };
+struct resource iomem_resource = { "PCI mem", 0x00000000, 0xffffffff, IORESOURCE_MEM };
static rwlock_t resource_lock = RW_LOCK_UNLOCKED;
static int find_resource(struct resource *root, struct resource *new,
unsigned long size,
unsigned long min, unsigned long max,
- unsigned long align, struct pci_dev * dev)
+ unsigned long align,
+ void (*alignf)(void *, struct resource *, unsigned long),
+ void *alignf_data)
{
struct resource *this = root->child;
- unsigned long start, end;
- start = root->start;
+ new->start = root->start;
for(;;) {
if (this)
- end = this->start;
+ new->end = this->start;
else
- end = root->end;
- if (start < min)
- start = min;
- if (end > max)
- end = max;
- start = (start + align - 1) & ~(align - 1);
- start = resource_fixup (dev, new, start, size);
- if (start < end && end - start + 1 >= size) {
- new->start = start;
- new->end = start + size - 1;
+ new->end = root->end;
+ if (new->start < min)
+ new->start = min;
+ if (new->end > max)
+ new->end = max;
+ new->start = (new->start + align - 1) & ~(align - 1);
+ if (alignf)
+ alignf(alignf_data, new, size);
+ if (new->start < new->end && new->end - new->start + 1 >= size) {
+ new->end = new->start + size - 1;
return 0;
}
if (!this)
break;
- start = this->end + 1;
+ new->start = this->end + 1;
this = this->sibling;
}
return -EBUSY;
int allocate_resource(struct resource *root, struct resource *new,
unsigned long size,
unsigned long min, unsigned long max,
- unsigned long align, struct pci_dev * dev)
+ unsigned long align,
+ void (*alignf)(void *, struct resource *, unsigned long),
+ void *alignf_data)
{
int err;
write_lock(&resource_lock);
- err = find_resource(root, new, size, min, max, align, dev);
+ err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
if (err >= 0 && __request_resource(root, new))
err = -EBUSY;
write_unlock(&resource_lock);
extern int sg_big_buff;
#endif
#ifdef CONFIG_SYSVIPC
-extern size_t shm_prm[];
+extern size_t shm_ctlmax;
+extern int shm_ctlall;
+extern int shm_ctlmni;
extern int msg_ctlmax;
extern int msg_ctlmnb;
extern int msg_ctlmni;
{KERN_RTSIGMAX, "rtsig-max", &max_queued_signals, sizeof(int),
0644, NULL, &proc_dointvec},
#ifdef CONFIG_SYSVIPC
- {KERN_SHMMAX, "shmmax", &shm_prm, 3*sizeof (size_t),
+ {KERN_SHMMAX, "shmmax", &shm_ctlmax, sizeof (size_t),
0644, NULL, &proc_doulongvec_minmax},
+ {KERN_SHMALL, "shmall", &shm_ctlall, sizeof (int),
+ 0644, NULL, &proc_dointvec},
+ {KERN_SHMMNI, "shmmni", &shm_ctlmni, sizeof (int),
+ 0644, NULL, &proc_dointvec},
{KERN_MSGMAX, "msgmax", &msg_ctlmax, sizeof (int),
0644, NULL, &proc_dointvec},
{KERN_MSGMNI, "msgmni", &msg_ctlmni, sizeof (int),
int __init atm_proc_init(void)
{
struct proc_dir_entry *dev=NULL,*pvc=NULL,*svc=NULL,*arp=NULL,*lec=NULL;
- atm_proc_root = create_proc_entry("atm", S_IFDIR, &proc_root);
+ atm_proc_root = proc_mkdir("atm", &proc_root);
if (!atm_proc_root)
return -ENOMEM;
dev = create_proc_entry("devices",0,atm_proc_root);
{
int i;
- proc_irda = create_proc_entry("net/irda", S_IFDIR, NULL);
+ proc_irda = proc_mkdir("net/irda", NULL);
proc_irda->owner = THIS_MODULE;
for (i=0;i<IRDA_ENTRIES_NUM;i++)
sock_register(&packet_family_ops);
register_netdevice_notifier(&packet_netdev_notifier);
#ifdef CONFIG_PROC_FS
- ent = create_proc_entry("net/packet", 0, 0);
- ent->read_proc = packet_read_proc;
+ create_proc_read_entry("net/packet", 0, 0, packet_read_proc, NULL);
#endif
#ifdef MODULE
return 0;
struct inode * inode;
struct socket * sock;
- lock_kernel();
- /* Damn! get_empty_inode is not SMP safe.
- I ask, why does it have decorative spinlock
- at the very beginning? Probably, dcache ops should
- be lock_kernel'ed inside inode.c
- */
inode = get_empty_inode();
- if (!inode) {
- unlock_kernel();
+ if (!inode)
return NULL;
- }
- unlock_kernel();
sock = socki_lookup(inode);
dprintk("RPC: registering /proc/net/rpc\n");
if (!proc_net_rpc) {
struct proc_dir_entry *ent;
- ent = create_proc_entry("net/rpc", S_IFDIR, 0);
+ ent = proc_mkdir("net/rpc", 0);
if (ent) {
ent->owner = THIS_MODULE;
proc_net_rpc = ent;