different D-channel protocol, or non-standard irq/port/shmem
settings.
+PCBIT-D support
+CONFIG_ISDN_DRV_PCBIT
+ This enables support for the PCBIT ISDN-cards. This card is
+ manufactured in Portugal by Octal. For running this card,
+ additional firmware is necessary, which has to be downloaded into
+ the card using a utility which is distributed separately.
+ See Documentation/isdn/README for more information.
+
+Support for AP1000 multicomputer
+CONFIG_AP1000
+ This enables support for a sparc based parallel multi-computer
+ called an AP1000+. For details on our efforts to port Linux to this
+ machine see http://cap.anu.edu.au/cap/projects/linux or mail to
+ hackers@cafe.anu.edu.au
+
# need an empty line after last entry, for sed script in Configure.
#
%
\title{{\bf Linux Allocated Devices}}
\author{Maintained by H. Peter Anvin $<$hpa@zytor.com$>$}
-\date{Last revised: April 10, 1996}
+\date{Last revised: April 20, 1996}
\maketitle
%
\noindent
permission of the author, assuming the author can be contacted without
an unreasonable effort.
+In particular, please don't sent patches for this list to Linus, at
+least not without contacting me first.
+
\section{Major numbers}
\begin{devicelist}
\major{45}{}{char }{isdn4linux ISDN BRI driver}
\major{46}{}{char }{Comtrol Rocketport serial card}
\major{47}{}{char }{Comtrol Rocketport serial card -- alternate devices}
-\major{48}{--59}{}{Unallocated}
+\major{48}{}{char }{SDL RISCom serial card}
+\major{49}{}{char }{SDL RISCom serial card -- alternate devices}
+\major{50}{--59}{}{Unallocated}
\major{60}{--63}{}{Local/experimental use}
\major{64}{--119}{}{Unallocated}
\major{120}{--127}{}{Local/experimental use}
\minor{133}{/dev/exttrp}{External device trap}
\minor{134}{/dev/apm\_bios}{Advanced Power Management BIOS}
\minor{135}{/dev/rtc}{Real Time Clock}
+ \minor{136}{/dev/qcam0}{QuickCam on {\file lp0}}
+ \minor{137}{/dev/qcam1}{QuickCam on {\file lp1}}
+ \minor{138}{/dev/qcam2}{QuickCam on {\file lp2}}
\end{devicelist}
\noindent
\minor{0}{/dev/ttyC0}{First Cyclades port}
\minordots
\minor{31}{/dev/ttyC31}{32nd Cyclades port}
-\end{devicelist}
-\noindent
-It would make more sense for these to start at 0...
-\begin{devicelist}
+\\
\major{ }{}{block}{``Double'' compressed disk}
\minor{0}{/dev/double0}{First compressed disk}
\minordots
\major{43}{}{char }{isdn4linux virtual modem}
\minor{0}{/dev/ttyI0}{First virtual modem}
\minordots
- \minor{15}{/dev/ttyI15}{16th virtual modem}
+ \minor{63}{/dev/ttyI63}{64th virtual modem}
\end{devicelist}
\begin{devicelist}
\major{44}{}{char }{isdn4linux virtual modem -- alternate devices}
\minor{0}{/dev/cui0}{Callout device corresponding to {\file ttyI0}}
\minordots
- \minor{15}{/dev/cui15}{Callout device corresponding to {\file ttyI15}}
+ \minor{63}{/dev/cui63}{Callout device corresponding to {\file ttyI63}}
\end{devicelist}
\begin{devicelist}
\major{45}{}{char }{isdn4linux ISDN BRI driver}
\minor{0}{/dev/isdn0}{First virtual B channel raw data}
\minordots
- \minor{15}{/dev/isdn15}{16th virtual B channel raw data}
- \minor{16}{/dev/isdnctrl0}{First channel control/debug}
+ \minor{63}{/dev/isdn63}{64th virtual B channel raw data}
+ \minor{64}{/dev/isdnctrl0}{First channel control/debug}
+ \minordots
+ \minor{127}{/dev/isdnctrl63}{64th channel control/debug}
+ \minor{128}{/dev/ippp0}{First SyncPPP device}
\minordots
- \minor{31}{/dev/isdnctrl15}{16th channel control/debug}
- \minor{128}{/dev/isdninfo}{ISDN monitor interface}
+ \minor{191}{/dev/ippp63}{64th SyncPPP device}
+ \minor{255}{/dev/isdninfo}{ISDN monitor interface}
\end{devicelist}
\begin{devicelist}
\end{devicelist}
\begin{devicelist}
-\major{48}{--59}{}{Unallocated}
+\major{48}{}{char }{SDL RISCom serial card}
+ \minor{0}{/dev/ttyL0}{First RISCom port}
+ \minor{1}{/dev/ttyL1}{Second RISCom port}
+ \minordots
+\end{devicelist}
+
+\begin{devicelist}
+\major{49}{}{char }{SDL RISCom serial card -- alternate devices}
+ \minor{0}{/dev/cul0}{Callout device corresponding to {\file ttyL0}}
+ \minor{1}{/dev/cul1}{Callout device corresponding to {\file ttyL1}}
+ \minordots
+\end{devicelist}
+
+\begin{devicelist}
+\major{50}{--59}{}{Unallocated}
\end{devicelist}
\begin{devicelist}
Maintained by H. Peter Anvin <hpa@zytor.com>
- Last revised: April 10, 1996
+ Last revised: April 20, 1996
This list is the successor to Rick Miller's Linux Device List, which
he stopped maintaining when he got busy with other things in 1993. It
is a registry of allocated major device numbers, as well as the
recommended /dev directory nodes for these devices.
-The latest version of this list is included with the Linux kernel
+The lastest version of this list is included with the Linux kernel
sources in LaTeX and ASCII form. In case of discrepancy, the LaTeX
version is authoritative.
permission of the author, assuming the author can be contacted without
an unreasonable effort.
+In particular, please don't sent patches for this list to Linus, at
+least not without contacting me first.
+
0 Unnamed devices (e.g. non-device mounts)
0 = reserved as null device number
133 = /dev/exttrp External device trap
134 = /dev/apm_bios Advanced Power Management BIOS
135 = /dev/rtc Real Time Clock
+ 136 = /dev/qcam0 QuickCam on lp0
+ 137 = /dev/qcam1 QuickCam on lp1
+ 138 = /dev/qcam2 QuickCam on lp2
11 char Raw keyboard device
0 = /dev/kbd Raw keyboard device
0 = /dev/sjcd Sanyo CD-ROM
19 char Cyclades serial card
- 0 = /dev/ttyC0 First Cyclades port
+ 0 = /dev/ttyC0 First Cyclades port
...
31 = /dev/ttyC31 32nd Cyclades port
-
- It would make more sense for these to start at 0...
-
block "Double" compressed disk
0 = /dev/double0 First compressed disk
...
mirror devices.
20 char Cyclades serial card - alternate devices
- 0 = /dev/cub0 Callout device corresponding to ttyC0
+ 0 = /dev/cub0 Callout device corresponding to ttyC0
...
31 = /dev/cub31 Callout device corresponding to ttyC31
block Hitachi CD-ROM (under development)
43 char isdn4linux virtual modem
0 = /dev/ttyI0 First virtual modem
...
- 15 = /dev/ttyI15 16th virtual modem
+ 63 = /dev/ttyI63 64th virtual modem
44 char isdn4linux virtual modem - alternate devices
0 = /dev/cui0 Callout device corresponding to ttyI0
...
- 15 = /dev/cui15 Callout device corresponding to ttyI15
+ 63 = /dev/cui63 Callout device corresponding to ttyI63
45 char isdn4linux ISDN BRI driver
0 = /dev/isdn0 First virtual B channel raw data
...
- 15 = /dev/isdn15 16th virtual B channel raw data
- 16 = /dev/isdnctrl0 First channel control/debug
+ 63 = /dev/isdn63 64th virtual B channel raw data
+ 64 = /dev/isdnctrl0 First channel control/debug
+ ...
+ 127 = /dev/isdnctrl63 64th channel control/debug
+
+ 128 = /dev/ippp0 First SyncPPP device
...
- 31 = /dev/isdnctrl15 16th channel control/debug
- 128 = /dev/isdninfo ISDN monitor interface
+ 191 = /dev/ippp63 64th SyncPPP device
+
+ 255 = /dev/isdninfo ISDN monitor interface
46 char Comtrol Rocketport serial card
0 = /dev/ttyR0 First Rocketport port
1 = /dev/cur1 Callout device corresponding to ttyR1
...
- 48-59 UNALLOCATED
+ 48 char SDL RISCom serial card
+ 0 = /dev/ttyL0 First RISCom port
+ 1 = /dev/ttyL1 Second RISCom port
+ ...
+
+ 49 char SDL RISCom serial card - alternate devices
+ 0 = /dev/cul0 Callout device corresponding to ttyL0
+ 1 = /dev/cul1 Callout device corresponding to ttyL1
+ ...
+
+ 50-59 UNALLOCATED
60-63 LOCAL/EXPERIMENTAL USE
Allocated for local/experimental use. For devices not
Thomas Bogendörfer (tsbogend@bigbug.franken.de)
Tester, lots of bugfixes and hints.
-Alan Cox ()
+Alan Cox (alan@cymru.net)
For help getting into standard-kernel.
Volker Götz (volker@oops.franken.de)
Andreas Kool (akool@Kool.f.EUnet.de)
For contribution of the isdnlog/isdnrep-tool
-Pedro Roque Marques
- For lot of new ideas and writing a new driver coming soon.
+Pedro Roque Marques (roque@di.fc.ul.pt)
+ For lot of new ideas and the pcbit driver.
+
+Eberhard Moenkeberg (emoenke@gwdg.de)
+ For testing and help to get into kernel.
Jan den Ouden (denouden@groovin.xs4all.nl)
For contribution of the teles-driver
term to that one, which applies to your local ISDN-environment.
When the link-level-module isdn.o is loaded, it supports up to 16
- low-level-modules with up to 16 channels. (The number 16 is arbitrarily
+ low-level-modules with up to 64 channels. (The number 64 is arbitrarily
chosen and can be configured at compile-time --ISDN_MAX in isdn.h).
A low-level-driver can register itself through an interface (which is
defined in isdnif.h) and gets assigned a slot.
In addition the following devices are made available:
- 32 tty-devices (16 cuix and 16 ttyIx) with integrated modem-emulator:
+ 128 tty-devices (64 cuix and 64 ttyIx) with integrated modem-emulator:
The functionality is almost the same as that of a serial device
(the line-discs are handled by the kernel, which lets you run
SLIP, CSLIP and asynchronous PPP through the devices. We have tested
In this case use ping with the option -i <sec> to increase the interval
between echo-packets.
+ "isdnctrl cbdelay <InterfaceName> [seconds]"
+ Sets the delay (default 5 sec) between an incoming call and start of
+ dialing when callback is enabled.
+
+ "isdnctrl cbhup <InterfaceName> [on|off]"
+ This enables (default) or disables an active hangup (reject) when getting an
+ incoming call for an interface which is configured for callback.
+
"isdnctrl encap <InterfaceName> <EncapType>"
Selects the type of packet-encapsulation. The encapsulation can be changed
only while an interface is down.
IP-address: fc:fc:i1:i2:i3:i4, where i1-4 are the IP-addr.-values.
syncppp Synchronous PPP
+ uihdlc HDLC with UI-frame-header (for use with DOS ISPA, option -h1)
+
Watching packets, using standard-tcpdump will fail for all encapsulations
except ethernet because tcpdump does not know how to handle packets
without MAC-header. A patch for tcpdump is included in the utility-package
--- /dev/null
+------------------------------------------------------------------------------
+ README file for the PCBIT-D Device Driver.
+------------------------------------------------------------------------------
+
+The PCBIT is a Euro ISDN adapter manufactured in Portugal by Octal and
+developed in cooperation with Portugal Telecom and Inesc.
+The driver interfaces with the standard kernel isdn facilities
+originaly developed by Fritz Elfert in the isdn4linux project.
+
+The common versions of the pcbit board require a firmware that is
+distributed (and copyrighted) by the manufacturer. To load this
+firmware you need "pcbitctl" availiable on the standard isdn4k-utils
+package or in the pcbit package availiable in:
+
+ftp://ftp.di.fc.ul.pt/pub/systems/Linux/isdn
+
+Known Limitations:
+
+- The board reset proceeding is at the moment incorrect and will only
+allow you to load the firmware after a hard reset.
+
+- Only HDLC in B-channels is supported at the moment. There is now
+current support to X.25 in B or D channels nor LAPD in B
+channels. The main reason is that this two other protocol modes have,
+to my knowledge, very little use. If you want to see them implemented
+*do* send me a mail.
+
+- The driver often triggers errors in the board that i and the
+manufacturer believe to be caused by bugs in the firmware. The current
+version includes several proceedings for error recovery that should
+allow normal operation. Plans for the future include cooperation with
+the manufacturer in order to solve this problems.
+
+Information/hints/help can be obtained in the linux isdn
+mailing list (isdn4linux@hub-wue.franken.de) or directly from me.
+
+regards,
+ Pedro.
+
+<roque@di.fc.ul.pt>
to answer the appropriate question when doing a "make config"
Don't forget to load the slhc.o
module before the isdn.o module, if VJ-compression support
-is not compiled into your kernel.
+is not compiled into your kernel. (e.g if you have PPP or
+CSLIP in the kernel)
Using isdn4linux with sync PPP:
-------------------------------
I've implemented one additional option for the ipppd:
'useifip' will get (if set to not 0.0.0.0) the IP address
for the negotiation from the attached network-interface.
+(also: ipppd will try to negotiate pointopoint IP as remote IP)
You must disable BSD-compression, this implementation can't
handle compressed packets.
-Check the rc.isdn.syncppp file for an example setup script.
+Check the etc/rc.isdn.syncppp in the isdn4kernel-util package
+for an example setup script.
+
+To use the MPPP stuff, you must configure a slave device
+with isdn4linux. Now call the ipppd with the '+mp' option.
+To increase the number of links, you must use the
+'addlink' option of the isdnctrl tool.
enjoy it,
michael
-PS: I also implemented generic MP (RFC 1717). But in the
-current isdn4linux link-level driver there is no
-(or better say: another) way of doing channel bundling. So,
-don't call the ipppd with the `+mp` option to enable
-MP negotiation.
--- /dev/null
+simple isdn4linux PPP FAQ .. to be continued .. not 'debugged'
+
+Q: pppd,ipppd, syncPPP , asyncPPP .. what is that ?
+ what should I use?
+A: The pppd is for asynchronous PPP .. asynchron means
+here, the framing is character based. (e.g when
+using ttyI* or tty* devices)
+
+The ipppd handles PPP packets coming in HDLC
+frames (bit based protocol) ... The PPP driver
+in isdn4linux pushs all IP packets direct
+to the network layer and all PPP protocol
+frames to the /dev/ippp* device.
+So, the ipppd is a simple externel network
+protocol handler.
+
+If you login into a remote machine using the
+/dev/ttyI* devices and then enable PPP on the
+remote terminal server -> use the 'old' pppd
+
+If your remote side immediately starts to send
+frames ... you probably connect to a
+syncPPP machine .. use the network device part
+of isdn4linux with the 'syncppp' encapsulation
+and make sure, that the ipppd is running and
+conneted to at least one /dev/ippp*. Check the
+isdn4linux manual on how to configure a network device.
+
+Q: when I start the ipppd .. I only get the
+ error message "this systems lacks PPP support"
+A: check that at least the device 'ippp0' exists.
+ (you can check this e.g with the program 'ifconfig')
+ The ipppd NEEDS this device under THIS name ..
+ If this device doesn't exists, use:
+ isdnctrl addif ippp0
+ isdnctrl encap ippp0 syncppp
+ ... (see isdn4linux doc for more) ...
+A: Maybe you have compiled the ipppd with another
+ kernel source tree than the kernel you currently
+ run ...
+
+Q: when I list the netdevices with ifconfig I see, that
+ my ISDN interface has a HWaddr and IRQ=0 and Base
+ address = 0
+A: The device is a fake ethernetdevice .. ignore IRQ and baseaddr
+ You need the HWaddr only for ethernet encapsulation.
+
+
+
+
L: linux-kernel@vger.rutgers.edu
S: Maintained
+VFAT FILESYSTEM:
+P: Gordon Chaffee
+M: chaffee@plateau.cs.berkeley.edu
+L: linux-kernel@vger.rutgers.edu
+W: http://www-plateau.cs.berkeley.edu/people/chaffee
+S: Maintained
+
DIGIBOARD DRIVER:
P: Christoph Lameter
M: clameter@fuller.edu
VERSION = 1
PATCHLEVEL = 3
-SUBLEVEL = 92
+SUBLEVEL = 93
ARCH = i386
ARCHIVES =kernel/kernel.o mm/mm.o fs/fs.o ipc/ipc.o net/network.a
FILESYSTEMS =fs/filesystems.a
DRIVERS =drivers/block/block.a \
- drivers/char/char.a \
- drivers/net/net.a
+ drivers/char/char.a
LIBS =$(TOPDIR)/lib/lib.a
SUBDIRS =kernel drivers mm fs net ipc lib
ifeq ($(CONFIG_ISDN),y)
-DRIVERS := $(DRIVERS) drivers/isdn/isdn.a
+DRIVERS := $(DRIVERS) drivers/isdn/isdn.a
endif
+DRIVERS := $(DRIVERS) drivers/net/net.a
+
ifdef CONFIG_CD_NO_IDESCSI
DRIVERS := $(DRIVERS) drivers/cdrom/cdrom.a
endif
}
#endif
current->used_math = 1;
- current->flags &= PF_USEDFPU;
+ current->flags &= ~PF_USEDFPU;
memcpy_fromfs(¤t->tss.i387.hard, buf, sizeof(*buf));
}
if (current->flags & PF_USEDFPU) {
__asm__ __volatile__("fnsave %0":"=m" (current->tss.i387.hard));
stts();
- current->flags &= PF_USEDFPU;
+ current->flags &= ~PF_USEDFPU;
}
#else
if (current == last_task_used_math) {
-# $Id: Makefile,v 1.6 1995/11/25 00:57:30 davem Exp $
+# $Id: Makefile,v 1.20 1996/04/16 08:02:50 davem Exp $
# sparc/Makefile
#
# Makefile for the architecture dependent flags and dependencies on the
LIBS := $(TOPDIR)/lib/lib.a $(LIBS) $(TOPDIR)/arch/sparc/prom/promlib.a \
$(TOPDIR)/arch/sparc/lib/lib.a
-aoutimage: vmlinux
- elftoaout -o aoutimage vmlinux
+ifdef CONFIG_AP1000
+SUBDIRS := $(SUBDIRS) arch/sparc/ap1000
+ARCHIVES := $(TOPDIR)/arch/sparc/ap1000/ap1000lib.o $(ARCHIVES)
+DRIVERS := $(DRIVERS) drivers/ap1000/ap1000.a
+endif
archclean:
rm -f $(TOPDIR)/arch/sparc/boot/boot
More will come....
Hopefully I can write this such that it will work on almost all SUN
-machines in existence. We'll see ;(
+machines in existance. We'll see ;(
-/* $Id: empirical.h,v 1.2 1995/11/25 00:57:42 davem Exp $
+/* $Id: empirical.h,v 1.1 1996/04/21 10:17:46 davem Exp $
* empirical.h: Nasty hacks....
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: init_me.c,v 1.2 1995/11/25 00:57:44 davem Exp $
+/* $Id: init_me.c,v 1.3 1996/04/21 10:30:09 davem Exp $
* init_me.c: Initialize empirical constants and gather some info from
* the boot prom.
*
#include "empirical.h" /* Don't ask... */
-#define DEBUG_INIT_ME /* Tell me what's goin on */
+#define DEBUG_INIT_ME /* Tell me what's going on */
unsigned int nwindows; /* Set in bare.S */
unsigned int nwindowsm1;
-# $Id: config.in,v 1.8 1996/03/01 07:15:47 davem Exp $
+# $Id: config.in,v 1.9 1996/04/04 16:30:03 tridge Exp $
# For a description of the syntax of this configuration file,
# see the Configure script.
#
mainmenu_option next_comment
comment 'General setup'
-# Global things across all Sparc machines.
-define_bool CONFIG_SBUS y
-define_bool CONFIG_SUN_MOUSE y
-define_bool CONFIG_SERIAL y
-define_bool CONFIG_SUN_SERIAL y
-define_bool CONFIG_SUN_KEYBOARD y
-define_bool CONFIG_SUN_CONSOLE y
+bool 'Support for AP1000 multicomputer' CONFIG_AP1000
+
+if [ "$CONFIG_AP1000" = "n" ]; then
+ # Global things across all Sun machines.
+ define_bool CONFIG_SBUS y
+ define_bool CONFIG_SUN_MOUSE y
+ define_bool CONFIG_SERIAL y
+ define_bool CONFIG_SUN_SERIAL y
+ define_bool CONFIG_SUN_KEYBOARD y
+ define_bool CONFIG_SUN_CONSOLE y
+ define_bool CONFIG_SUN_AUXIO y
+ define_bool CONFIG_SUN_IO y
+fi
+
define_bool CONFIG_NET_ALIAS n
define_bool CONFIG_BINFMT_AOUT y
+
bool 'Networking support' CONFIG_NET
bool 'System V IPC' CONFIG_SYSVIPC
tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
#
# General setup
#
+# CONFIG_AP1000 is not set
CONFIG_SBUS=y
CONFIG_SUN_MOUSE=y
+CONFIG_SERIAL=y
CONFIG_SUN_SERIAL=y
CONFIG_SUN_KEYBOARD=y
CONFIG_SUN_CONSOLE=y
+CONFIG_SUN_AUXIO=y
+CONFIG_SUN_IO=y
# CONFIG_NET_ALIAS is not set
CONFIG_BINFMT_AOUT=y
CONFIG_NET=y
CONFIG_SYSVIPC=y
-# CONFIG_BINFMT_ELF is not set
+CONFIG_BINFMT_ELF=y
#
# Floppy, IDE, and other block devices
#
# CONFIG_BLK_DEV_FD is not set
+CONFIG_BLK_DEV_RAM=y
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_BLK_DEV_LOOP is not set
#
# Networking options
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# CONFIG_AX25 is not set
+# CONFIG_BRIDGE is not set
# CONFIG_NETLINK is not set
#
#
CONFIG_BLK_DEV_SD=y
# CONFIG_CHR_DEV_ST is not set
-# CONFIG_BLK_DEV_SR is not set
+CONFIG_BLK_DEV_SR=y
# CONFIG_CHR_DEV_SG is not set
#
#
# CONFIG_SCSI_MULTI_LUN is not set
CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_AUTO_BIOSP is not set
#
# SCSI low-level drivers
CONFIG_PROC_FS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
+CONFIG_RNFS_BOOTP=y
+CONFIG_RNFS_RARP=y
# CONFIG_SMB_FS is not set
-# CONFIG_ISO9660_FS is not set
+CONFIG_ISO9660_FS=y
# CONFIG_HPFS_FS is not set
# CONFIG_SYSV_FS is not set
-# $Id: Makefile,v 1.22 1996/03/01 07:15:52 davem Exp $
+# $Id: Makefile,v 1.29 1996/04/04 16:30:17 tridge Exp $
# Makefile for the linux kernel.
#
# Note! Dependencies are done automagically by 'make dep', which also
#
# Note 2! The CFLAGS definitions are now in the main makefile...
+ifdef SMP
+
+.S.s:
+ $(CPP) -D__ASSEMBLY__ $(AFLAGS) -ansi $< -o $*.s
+
+.S.o:
+ $(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c $< -o $*.o
+
+
+else
+
.S.s:
$(CPP) -D__ASSEMBLY__ -ansi $< -o $*.s
+
.S.o:
$(CC) -D__ASSEMBLY__ -ansi -c $< -o $*.o
+
+endif
+
all: kernel.o head.o
O_TARGET := kernel.o
-O_OBJS := entry.o wof.o wuf.o etrap.o rtrap.o switch.o traps.o irq.o \
- process.o signal.o ioport.o setup.o idprom.o mp.o c_mp.o \
+IRQ_OBJS := irq.o sun4m_irq.o sun4c_irq.o
+O_OBJS := entry.o wof.o wuf.o etrap.o rtrap.o switch.o traps.o ${IRQ_OBJS} \
+ process.o signal.o ioport.o setup.o idprom.o \
sys_sparc.o sunos_asm.o sparc-stub.o systbls.o sys_sunos.o \
- sunos_ioctl.o time.o windows.o cpu.o auxio.o devices.o ksyms.o \
- sclow.o
+ sunos_ioctl.o time.o windows.o cpu.o devices.o ksyms.o \
+ sclow.o solaris.o tadpole.o tick14.o
+
+ifdef SMP
+O_OBJS += trampoline.o smp.o rirq.o
+endif
+
+ifdef CONFIG_SUN_AUXIO
+O_OBJS += auxio.o
+endif
all: kernel.o head.o
+ifdef SMP
+
+head.o: head.S
+ $(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c $*.S -o $*.o
+
+else
+
head.o: head.S
$(CC) -D__ASSEMBLY__ -ansi -c $*.S -o $*.o
+endif
+
include $(TOPDIR)/Rules.make
/* Map the register both read and write */
auxio_register = (unsigned char *) sparc_alloc_io(auxregs[0].phys_addr, 0,
auxregs[0].reg_size,
- "auxiliaryIO",
+ "auxilliaryIO",
auxregs[0].which_io, 0x0);
/* Fix the address on sun4m and sun4c. */
if((((unsigned long) auxregs[0].phys_addr) & 3) == 3 ||
+++ /dev/null
-/* $Id: c_mp.c,v 1.3 1995/11/25 00:57:50 davem Exp $
- * mp.c: SMP cpu idling and dispatch on the Sparc.
- *
- * Copyright (C) 1995 David S. Miller
- */
-
-#include <linux/kernel.h>
-
-#include <asm/mp.h>
-#include <asm/mbus.h>
-
-struct sparc_percpu *percpu_table;
-
-void
-sparc_cpu_init(void)
-{
- /* We now have our per-cpu mappings ok, and we should
- * be good to go.
- */
-
- /* Do cache crap here. */
-
- /* CPU initted, idle the puppy. */
-
- return;
-}
-
-extern thiscpus_mid;
-
-void
-sparc_cpu_idle(void)
-{
- int cpuid;
-
-/* cpuid = get_cpuid(); */
- cpuid = (thiscpus_mid&(~8));
-/* printk("SMP: cpu%d has entered idle loop", cpuid); */
-
- /* Say that we exist and set up. */
- percpu_table[cpuid].cpuid = cpuid;
- percpu_table[cpuid].cpu_is_alive = 0x1;
- percpu_table[cpuid].cpu_is_idling = 0x1;
-
- /* Let other cpus catch up. */
- while(linux_smp_still_initting) ;
- printk("cpu%d done spinning\n", get_cpuid());
- for(;;) ; /* Do something useful here... */
-
- return;
-}
#include <linux/kernel.h>
#include <asm/oplib.h>
+#include <asm/page.h>
#include <asm/head.h>
#include <asm/psr.h>
#include <asm/mbus.h>
*/
#include <linux/kernel.h>
+#include <linux/config.h>
#include <asm/page.h>
#include <asm/oplib.h>
int linux_num_cpus;
extern void cpu_probe(void);
-extern void auxio_probe(void);
+extern void clock_stop_probe(void); /* tadpole.c */
unsigned long
device_scan(unsigned long mem_start)
int cpu_nds[NCPUS]; /* One node for each cpu */
int cpu_ctr = 0;
+#if CONFIG_AP1000
+ printk("Not scanning device list for CPUs\n");
+ linux_num_cpus = 1;
+ return;
+#endif
+
prom_getstring(prom_root_node, "device_type", node_str, sizeof(node_str));
+
if(strcmp(node_str, "cpu") == 0) {
cpu_nds[0] = prom_root_node;
cpu_ctr++;
} else {
int scan;
scan = prom_getchild(prom_root_node);
+ prom_printf("root child is %08lx\n", (unsigned long) scan);
nd = 0;
while((scan = prom_getsibling(scan)) != 0) {
prom_getstring(scan, "device_type", node_str, sizeof(node_str));
linux_cpus[cpu_ctr].prom_node = scan;
prom_getproperty(scan, "mid", (char *) &thismid, sizeof(thismid));
linux_cpus[cpu_ctr].mid = thismid;
+ prom_printf("Found CPU %d <node=%08lx,mid=%d>\n",
+ cpu_ctr, (unsigned long) scan,
+ thismid);
cpu_ctr++;
}
};
linux_num_cpus = cpu_ctr;
cpu_probe();
- auxio_probe();
+#if CONFIG_SUN_AUXIO
+ {
+ extern void auxio_probe(void);
+ auxio_probe();
+ }
+#endif
+ clock_stop_probe();
return mem_start;
}
-/* $Id: entry.S,v 1.79 1996/03/01 07:15:54 davem Exp $
+/* $Id: entry.S,v 1.90 1996/04/18 01:00:37 davem Exp $
* arch/sparc/kernel/entry.S: Sparc trap low-level entry points.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#include <asm/head.h>
#include <asm/asi.h>
+#include <asm/smp.h>
#include <asm/kgdb.h>
#include <asm/contregs.h>
#include <asm/ptrace.h>
#include <asm/winmacro.h>
#include <asm/signal.h>
-#define NR_SYSCALLS 255 /* Each OS is different... */
+#include <asm/asmmacro.h>
-/* All trap entry points _must_ begin with this macro or else you
- * lose. It makes sure the kernel has a proper window so that
- * c-code can be called. Some day for SMP we'll grab klock here.
- */
-#define SAVE_ALL \
- sethi %hi(trap_setup), %l4; \
- jmpl %l4 + %lo(trap_setup), %l6; \
- nop;
-
-/* All traps low-level code here must end with this macro.
- * For SMP configurations the ret_trap_entry routine will
- * have to appropriate code to actually release the kernel
- * entry lock.
- */
-#define RESTORE_ALL \
- b ret_trap_entry; \
- nop;
+#define NR_SYSCALLS 255 /* Each OS is different... */
/* First, KGDB low level things. This is a rewrite
* of the routines found in the sparc-stub.c asm() statement
C_LABEL(trap_low):
rd %wim, %l3
SAVE_ALL
+ ENTER_SYSCALL
sethi %hi(in_trap_handler), %l4
ld [%lo(in_trap_handler) + %l4], %l5
/* Flip terminal count pin */
set C_LABEL(auxio_register), %l4
ld [%l4], %l4
- ldub [%l4], %l5
- or %l5, 0xf4, %l5
+
+ set C_LABEL(sparc_cpu_model), %l5
+ ld [%l5], %l5
+ subcc %l5, 1, %g0 /* enum { sun4c = 1 }; */
+ be 1f
+ ldub [%l4], %l5
+
+ or %l5, 0xc2, %l5
stb %l5, [%l4]
+ andn %l5, 0x02, %l5
+ b 2f
+ nop
+
+1:
+ or %l5, 0xf4, %l5
+ stb %l5, [%l4]
+ andn %l5, 0x04, %l5
+2:
/* Kill some time so the bits set */
WRITE_PAUSE
WRITE_PAUSE
- ldub [%l4], %l5
- andn %l5, 0x04, %l5
- or %l5, 0xf0, %l5
- stb %l5, [%l4]
+ stb %l5, [%l4]
/* Prevent recursion */
sethi %hi(C_LABEL(doing_pdma)), %l4
floppy_dosoftint:
rd %wim, %l3
SAVE_ALL
+ ENTER_IRQ
/* Set all IRQs off. */
or %l0, PSR_PIL, %l4
call C_LABEL(floppy_interrupt)
add %sp, REGWIN_SZ, %o1 ! struct pt_regs *regs
+ LEAVE_IRQ
RESTORE_ALL
#endif /* (CONFIG_BLK_DEV_FD) */
.globl bad_trap_handler
bad_trap_handler:
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr
WRITE_PAUSE
.globl real_irq_entry
real_irq_entry:
SAVE_ALL
+#ifdef __SMP__
+ cmp %l7, 9
+ bne 1f
+ nop
+
+ GET_PROCESSOR_MID(l4, l5)
+ set C_LABEL(sun4m_interrupts), %l5
+ ld [%l5], %l5
+ sethi %hi(0x02000000), %l6
+ sll %l4, 12, %l4
+ add %l5, %l4, %l5
+ ld [%l5], %l4
+ andcc %l4, %l6, %g0
+ be 1f
+ nop
+
+ b linux_trap_ipi9_sun4m
+ nop
+
+1:
+#endif
+ ENTER_IRQ
+
+#ifdef __SMP__
+ cmp %l7, 13
+ bne 1f
+ nop
+
+ /* This is where we catch the level 13 reschedule soft-IRQ. */
+ GET_PROCESSOR_MID(o3, o2)
+ set C_LABEL(sun4m_interrupts), %l5
+ ld [%l5], %o5
+ sethi %hi(0x20000000), %o4
+ sll %o3, 12, %o3
+ add %o5, %o3, %o5
+ ld [%o5], %o1 ! read processor irq pending reg
+ andcc %o1, %o4, %g0
+ be 1f
+ nop
+
+ b linux_trap_ipi13_sun4m
+ nop
+
+1:
+
+#endif
/* start atomic operation with respect to software interrupts */
sethi %hi(C_LABEL(intr_count)), %l4
* to work around a MicroSPARC bug of sorts.
*/
or %l0, PSR_PIL, %l4
+
wr %l4, 0x0, %psr
+ WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
be 2f
nop
- /* do_bottom_half must run at normal kernel priority, ie. all
- * IRQ's on.
- */
- rd %psr, %g4
- andn %g4, PSR_PIL, %g4
- wr %g4, 0x0, %psr
- WRITE_PAUSE
call C_LABEL(do_bottom_half)
nop
2:
st %l5, [%l4 + %lo(C_LABEL(intr_count))]
+ LEAVE_IRQ
RESTORE_ALL
- /* This routine handles illegal instructions and privileged
+ /* This routine handles illegal isntructions and privileged
* instruction attempts from user code.
*/
.align 4
.globl bad_instruction
bad_instruction:
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
mov %l2, %o2
call C_LABEL(do_illegal_instruction)
mov %l0, %o3
+
RESTORE_ALL
.align 4
.globl priv_instruction
priv_instruction:
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr
WRITE_PAUSE
.globl mna_handler
mna_handler:
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
.globl fpd_trap_handler
fpd_trap_handler:
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
fpe_trap_handler:
set fpsave_magic, %l5
cmp %l1, %l5
- bne 1f
- sethi %hi(fpsave_catch), %l5
+ be 1f
+ sethi %hi(C_LABEL(fpsave)), %l5
+ or %l5, %lo(C_LABEL(fpsave)), %l5
+ cmp %l1, %l5
+ bne 2f
+ sethi %hi(fpsave_catch2), %l5
+ or %l5, %lo(fpsave_catch2), %l5
+ wr %l0, 0x0, %psr
+ WRITE_PAUSE
+ jmp %l5
+ rett %l5 + 4
+1:
+ sethi %hi(fpsave_catch), %l5
or %l5, %lo(fpsave_catch), %l5
wr %l0, 0x0, %psr
WRITE_PAUSE
jmp %l5
rett %l5 + 4
-1:
+2:
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
.globl do_tag_overflow
do_tag_overflow:
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
.globl do_watchpoint
do_watchpoint:
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
.globl do_reg_access
do_reg_access:
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
.globl do_cp_disabled
do_cp_disabled:
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
.globl do_bad_flush
do_bad_flush:
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
.globl do_cp_exception
do_cp_exception:
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
.globl do_hw_divzero
do_hw_divzero:
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
.globl do_flush_windows
do_flush_windows:
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr
WRITE_PAUSE
/* We get these for debugging routines using __builtin_return_address() */
dfw_kernel:
FLUSH_ALL_KERNEL_WINDOWS
+
+ /* Advance over the trap instruction. */
+ ld [%sp + REGWIN_SZ + PT_NPC], %l1
+ add %l1, 0x4, %l2
+ st %l1, [%sp + REGWIN_SZ + PT_PC]
+ st %l2, [%sp + REGWIN_SZ + PT_NPC]
+
RESTORE_ALL
/* The getcc software trap. The user wants the condition codes from
.globl linux_trap_nmi_sun4c
linux_trap_nmi_sun4c:
SAVE_ALL
+ ENTER_SYSCALL
/* Ugh, we need to clear the IRQ line. This is now
- * a very sun4c specific trap handler...
+ * a very sun4c specific trap hanler...
*/
sethi %hi(C_LABEL(interrupt_enable)), %l5
ld [%l5 + %lo(C_LABEL(interrupt_enable))], %l5
RESTORE_ALL
-#if 0 /* WIP */
- /* Inter-Processor Interrupts on the Sun4m. */
+#ifdef __SMP__
+
.align 4
- .globl sun4m_ipi
-sun4m_ipi:
- SAVE_ALL_IPI4M
+ .globl linux_trap_ipi9_sun4m
+linux_trap_ipi9_sun4m:
+ sethi %hi(0x02000000), %o2
+ GET_PROCESSOR_MID(o0, o1)
+ set C_LABEL(sun4m_interrupts), %l5
+ ld [%l5], %o5
+ sll %o0, 12, %o0
+ add %o5, %o0, %o5
+ st %o2, [%o5 + 4]
+ WRITE_PAUSE
- set MAILBOX_ADDRESS, %l4
- ldub [%l4], %l5
- subcc %l5, MBOX_STOPCPU, %g0
- bne,a 1f
- subcc %l5, MBOX_STOPCPU2, %g0
+ ld [%o5], %g0
+ WRITE_PAUSE
- call C_LABEL(prom_stopcpu)
- mov 0, %o0
- ba,a 2f
+ /* IRQ's off else we deadlock. */
+ or %l0, PSR_PIL, %l4
+ wr %l4, 0x0, %psr
+ WRITE_PAUSE
-1:
- bne,a 1f
- subcc %l5, MBOX_IDLECPU, %g0
+ wr %l4, PSR_ET, %psr
+ WRITE_PAUSE
- call C_LABEL(prom_stopcpu)
- mov 0, %o0
- ba,a 2f
+ call C_LABEL(smp_message_irq)
+ nop
-1:
- bne,a 1f
- subcc %l5, MBOX_IDLECPU2, %g0
+ RESTORE_ALL_FASTIRQ
- call C_LABEL(prom_idlecpu)
- mov 0, %o0
- ba,a 2f
+ .align 4
+ .globl linux_trap_ipi13_sun4m
+linux_trap_ipi13_sun4m:
+ /* NOTE: real_irq_entry saved state and grabbed klock already. */
-1:
- bne,a 2f
+ /* start atomic operation with respect to software interrupts */
+ sethi %hi(C_LABEL(intr_count)), %l4
+ ld [%l4 + %lo(C_LABEL(intr_count))], %l5
+ add %l5, 0x1, %l5
+ st %l5, [%l4 + %lo(C_LABEL(intr_count))]
+
+ sethi %hi(0x20000000), %o2
+ GET_PROCESSOR_MID(o0, o1)
+ set C_LABEL(sun4m_interrupts), %l5
+ ld [%l5], %o5
+ sll %o0, 12, %o0
+ add %o5, %o0, %o5
+ st %o2, [%o5 + 4]
+ WRITE_PAUSE
+
+ ld [%o5], %g0
+ WRITE_PAUSE
+
+ /* IRQ's off else we deadlock. */
+ or %l0, PSR_PIL, %l4
+ wr %l4, 0x0, %psr
+ WRITE_PAUSE
+
+ wr %l4, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(smp_reschedule_irq)
nop
- call C_LABEL(prom_idlecpu)
- mov 0, %o0
- ba,a 2f
+ sethi %hi(C_LABEL(intr_count)), %l4
+ ld [%l4 + %lo(C_LABEL(intr_count))], %l5
+ sub %l5, 0x1, %l5
+ st %l5, [%l4 + %lo(C_LABEL(intr_count))]
-2:
- call C_LABEL(smp_callin)
+ LEAVE_IRQ
+ RESTORE_ALL
+
+ .align 4
+ .globl linux_trap_ipi15_sun4m
+linux_trap_ipi15_sun4m:
+ SAVE_ALL
+
+ /* First check for hard NMI memory error. */
+ sethi %hi(0xf0000000), %o2
+ set C_LABEL(sun4m_interrupts), %l5
+ set 0x4000, %o3
+ ld [%l5], %l5
+ add %l5, %o3, %l5
+ ld [%l5], %l6
+ andcc %o2, %l6, %o2
+ be 1f
+ nop
+
+ /* Asyncronous fault, why you little ?!#&%@... */
+ sethi %hi(0x80000000), %o2
+ st %o2, [%l5 + 0xc]
+ WRITE_PAUSE
+ ld [%l5], %g0
+ WRITE_PAUSE
+
+ /* All interrupts are off... now safe to enable traps
+ * and call C-code.
+ */
+ or %l0, PSR_PIL, %l4 ! I am very paranoid...
+ wr %l4, 0x0, %psr
+ WRITE_PAUSE
+ wr %l4, PSR_ET, %psr
+ WRITE_PAUSE
+ call C_LABEL(sun4m_nmi)
+ nop
+
+ sethi %hi(0x80000000), %o2
+ st %o2, [%l5 + 0x8]
+ WRITE_PAUSE
+ ld [%l5], %g0
+ WRITE_PAUSE
+
+ RESTORE_ALL_FASTIRQ
+
+1:
+ sethi %hi(0x80000000), %o2
+ GET_PROCESSOR_MID(o0, o1)
+ set C_LABEL(sun4m_interrupts), %l5
+ ld [%l5], %o5
+ sll %o0, 12, %o0
+ add %o5, %o0, %o5
+ st %o2, [%o5 + 4]
+ WRITE_PAUSE
+
+ ld [%o5], %g0
+ WRITE_PAUSE
+
+ /* IRQ's off else we deadlock. */
+ or %l0, PSR_PIL, %l4
+ wr %l4, 0x0, %psr
+ WRITE_PAUSE
+
+ wr %l4, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(smp_message_irq)
+ nop
+
+ RESTORE_ALL_FASTIRQ
- RESTORE_ALL_IPI4M
#endif
.align 4
.globl sun4c_fault
sun4c_fault:
SAVE_ALL
+ ENTER_SYSCALL
/* XXX This needs to be scheduled better */
sethi %hi(AC_SYNC_ERR), %l4
.align 4
.globl C_LABEL(srmmu_fault)
C_LABEL(srmmu_fault):
- /* Slot 1 */
mov 0x400, %l5
mov 0x300, %l4
- /* Slot 2 */
lda [%l5] ASI_M_MMUREGS, %l6 ! read sfar first
lda [%l4] ASI_M_MMUREGS, %l5 ! read sfsr last
- /* Slot 3 */
andn %l6, 0xfff, %l6
srl %l5, 6, %l5 ! and encode all info into l7
- /* Slot 4 */
and %l5, 2, %l5
or %l5, %l6, %l6
- /* Slot 5 */
or %l6, %l7, %l7 ! l7 = [addr,write,txtfault]
SAVE_ALL
+ ENTER_SYSCALL
mov %l7, %o1
mov %l7, %o2
ld [%sp + REGWIN_SZ + PT_I1], %o0
ld [%sp + REGWIN_SZ + PT_I2], %o1
ld [%sp + REGWIN_SZ + PT_I3], %o2
+ mov %o7, %l5
ld [%sp + REGWIN_SZ + PT_I4], %o3
call %l6
ld [%sp + REGWIN_SZ + PT_I5], %o4
- b scall_store_args /* so stupid... */
+ jmp %l5 + 0x8 /* so stupid... */
nop
#if 0 /* work in progress */
.align 4
.globl C_LABEL(sys_execve)
C_LABEL(sys_execve):
+ mov %o7, %l5
call C_LABEL(sparc_execve)
add %sp, REGWIN_SZ, %o0 ! pt_regs *regs arg
- b scall_store_args
+ jmp %l5 + 0x8
nop
.align 4
.globl C_LABEL(sys_pipe)
C_LABEL(sys_pipe):
+ mov %o7, %l5
+
call C_LABEL(sparc_pipe)
add %sp, REGWIN_SZ, %o0 ! pt_regs *regs arg
- b C_LABEL(ret_sys_call)
+ jmp %l5 + 0x8
nop
.align 4
add %sp, REGWIN_SZ, %o1
/* We are returning to a signal handler. */
-
RESTORE_ALL
.align 4
.globl C_LABEL(sys_sigsuspend)
C_LABEL(sys_sigsuspend):
- ld [%sp + REGWIN_SZ + PT_I0], %o0
call C_LABEL(do_sigsuspend)
- add %sp, REGWIN_SZ, %o1
+ add %sp, REGWIN_SZ, %o0
/* We are returning to a signal handler. */
-
RESTORE_ALL
.align 4
.globl C_LABEL(sys_fork), C_LABEL(sys_vfork)
C_LABEL(sys_vfork):
C_LABEL(sys_fork):
+ mov %o7, %l5
+
/* Save the kernel state as of now. */
FLUSH_ALL_KERNEL_WINDOWS;
STORE_WINDOW(sp)
call C_LABEL(do_fork)
add %sp, REGWIN_SZ, %o2 ! arg2: pt_regs ptr
- b scall_store_args
+ jmp %l5 + 0x8
nop
/* Whee, kernel threads! */
.globl C_LABEL(sys_clone)
C_LABEL(sys_clone):
+ mov %o7, %l5
+
/* Save the kernel state as of now. */
FLUSH_ALL_KERNEL_WINDOWS;
STORE_WINDOW(sp)
call C_LABEL(do_fork)
add %sp, REGWIN_SZ, %o2 ! arg2: pt_regs ptr
- b scall_store_args
+ jmp %l5 + 0x8
nop
- /* All system calls enter here... */
+ /* Linux native and SunOS system calls enter here... */
.align 4
.globl linux_sparc_syscall
linux_sparc_syscall:
blu,a 1f
sll %g1, 2, %l4
+ set C_LABEL(sys_ni_syscall), %l7
b syscall_is_too_hard
- set C_LABEL(sys_ni_syscall), %l7
+ nop
1:
ld [%l7 + %l4], %l7
syscall_is_too_hard:
rd %wim, %l3
SAVE_ALL
+ ENTER_SYSCALL
wr %l0, PSR_ET, %psr
WRITE_PAUSE
call %l7
ldd [%sp + REGWIN_SZ + PT_I4], %o4
-scall_store_args:
st %o0, [%sp + REGWIN_SZ + PT_I0]
.globl C_LABEL(ret_sys_call)
RESTORE_ALL
+ /* Solaris system calls enter here... */
+ .align 4
+ .globl solaris_syscall
+solaris_syscall:
+ /* While we are here trying to optimize our lives
+ * away, handle the easy bogus cases like a
+ * ni_syscall or sysnum > NR_SYSCALLS etc.
+ * In the cases where we cannot optimize the
+ * call inline we don't really lose anything
+ * performance wise because we are doing here
+ * things which we did anyway in the original
+ * routine. The only added complexity is a
+ * bit test, compare, and branch to decide
+ * if we need to save process state or not.
+ */
+
+ /* XXX TODO: When we have ptrace working test
+ * XXX test for PF_TRACESYS in task flags.
+ */
+
+ /* Direct access to user regs, must faster. */
+ cmp %g1, NR_SYSCALLS
+ blu,a 1f
+ sll %g1, 2, %l4
+
+ set C_LABEL(sys_ni_syscall), %l7
+ b solaris_is_too_hard
+ nop
+
+1:
+ ld [%l7 + %l4], %l7
+
+ /* If bit-1 is set, this is a "fast" syscall.
+ * This is the _complete_ overhead of this optimization,
+ * and we save ourselves a load, so it evens out to nothing.
+ */
+ andcc %l7, 0x1, %g0
+ be solaris_is_too_hard
+ andn %l7, 0x1, %l7
+
+ jmpl %l7, %g0
+ nop
+
+ .globl solaris_is_too_hard
+solaris_is_too_hard:
+ rd %wim, %l3
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
+
+2:
+ ldd [%sp + REGWIN_SZ + PT_I0], %o0
+ st %o0, [%sp + REGWIN_SZ + PT_G0] ! for restarting syscalls
+ ldd [%sp + REGWIN_SZ + PT_I2], %o2
+ call %l7
+ ldd [%sp + REGWIN_SZ + PT_I4], %o4
+
+ st %o0, [%sp + REGWIN_SZ + PT_I0]
+ set PSR_C, %l6
+ cmp %o0, -ENOIOCTLCMD
+ bgeu 1f
+ ld [%sp + REGWIN_SZ + PT_PSR], %l5
+
+ /* System call success, clear Carry condition code. */
+ andn %l5, %l6, %l5
+ b 2f
+ st %l5, [%sp + REGWIN_SZ + PT_PSR]
+
+1:
+ /* System call failure, set Carry condition code.
+ * Also, get abs(errno) to return to the process.
+ */
+ sub %g0, %o0, %o0
+ sethi %hi(C_LABEL(solaris_xlatb_rorl)), %o3
+ or %o3, %lo(C_LABEL(solaris_xlatb_rorl)), %o3
+ sll %o0, 2, %o0
+ ld [%o3 + %o0], %o0
+ st %o0, [%sp + REGWIN_SZ + PT_I0]
+ or %l5, %l6, %l5
+ st %l5, [%sp + REGWIN_SZ + PT_PSR]
+
+ /* Advance the pc and npc over the trap instruction. */
+2:
+ ld [%sp + REGWIN_SZ + PT_NPC], %l1 /* pc = npc */
+ add %l1, 0x4, %l2 /* npc = npc+4 */
+ st %l1, [%sp + REGWIN_SZ + PT_PC]
+ st %l2, [%sp + REGWIN_SZ + PT_NPC]
+
+ RESTORE_ALL
+
+ /* {net, open}bsd system calls enter here... */
+ .align 4
+ .globl bsd_syscall
+bsd_syscall:
+ /* While we are here trying to optimize our lives
+ * away, handle the easy bogus cases like a
+ * ni_syscall or sysnum > NR_SYSCALLS etc.
+ * In the cases where we cannot optimize the
+ * call inline we don't really lose anything
+ * performance wise because we are doing here
+ * things which we did anyway in the original
+ * routine. The only added complexity is a
+ * bit test, compare, and branch to decide
+ * if we need to save process state or not.
+ */
+
+ /* XXX TODO: When we have ptrace working test
+ * XXX test for PF_TRACESYS in task flags.
+ */
+
+ /* Direct access to user regs, must faster. */
+ cmp %g1, NR_SYSCALLS
+ blu,a 1f
+ sll %g1, 2, %l4
+
+ set C_LABEL(sys_ni_syscall), %l7
+ b bsd_is_too_hard
+ nop
+
+1:
+ ld [%l7 + %l4], %l7
+
+ /* If bit-1 is set, this is a "fast" syscall.
+ * This is the _complete_ overhead of this optimization,
+ * and we save ourselves a load, so it evens out to nothing.
+ */
+ andcc %l7, 0x1, %g0
+ be bsd_is_too_hard
+ andn %l7, 0x1, %l7
+
+ jmpl %l7, %g0
+ nop
+
+ .globl bsd_is_too_hard
+bsd_is_too_hard:
+ rd %wim, %l3
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
+
+2:
+ ldd [%sp + REGWIN_SZ + PT_I0], %o0
+ st %o0, [%sp + REGWIN_SZ + PT_G0] ! for restarting syscalls
+ ldd [%sp + REGWIN_SZ + PT_I2], %o2
+ call %l7
+ ldd [%sp + REGWIN_SZ + PT_I4], %o4
+
+ st %o0, [%sp + REGWIN_SZ + PT_I0]
+ set PSR_C, %l6
+ cmp %o0, -ENOIOCTLCMD
+ bgeu 1f
+ ld [%sp + REGWIN_SZ + PT_PSR], %l5
+
+ /* System call success, clear Carry condition code. */
+ andn %l5, %l6, %l5
+ b 2f
+ st %l5, [%sp + REGWIN_SZ + PT_PSR]
+
+1:
+ /* System call failure, set Carry condition code.
+ * Also, get abs(errno) to return to the process.
+ */
+ sub %g0, %o0, %o0
+#if 0 /* XXX todo XXX */
+ sethi %hi(C_LABEL(bsd_xlatb_rorl), %o3
+ or %o3, %lo(C_LABEL(bsd_xlatb_rorl)), %o3
+ sll %o0, 2, %o0
+ ld [%o3 + %o0], %o0
+#endif
+ st %o0, [%sp + REGWIN_SZ + PT_I0]
+ or %l5, %l6, %l5
+ st %l5, [%sp + REGWIN_SZ + PT_PSR]
+
+ /* Advance the pc and npc over the trap instruction. */
+2:
+ ld [%sp + REGWIN_SZ + PT_NPC], %l1 /* pc = npc */
+ add %l1, 0x4, %l2 /* npc = npc+4 */
+ st %l1, [%sp + REGWIN_SZ + PT_PC]
+ st %l2, [%sp + REGWIN_SZ + PT_NPC]
+
+ RESTORE_ALL
+
/* Saving and restoring the FPU state is best done from lowlevel code.
*
* void fpsave(unsigned long *fpregs, unsigned long *fsr,
.globl C_LABEL(fpsave)
C_LABEL(fpsave):
- st %fsr, [%o1]
+ st %fsr, [%o1] ! this can trap on us if fpu is in bogon state
ld [%o1], %g1
set 0x2000, %g4
andcc %g1, %g4, %g0
b fpsave_magic + 4
st %fsr, [%o1]
+fpsave_catch2:
+ b C_LABEL(fpsave) + 4
+ st %fsr, [%o1]
+
/* void fpload(unsigned long *fpregs, unsigned long *fsr); */
.globl C_LABEL(fpload)
sethi %hi(0x10c6), %o1
call .umul
or %o1, %lo(0x10c6), %o1
+#ifndef __SMP__
sethi %hi(C_LABEL(loops_per_sec)), %o3
call .umul
ld [%o3 + %lo(C_LABEL(loops_per_sec))], %o1
+#else
+ GET_PROCESSOR_OFFSET(o4)
+ set C_LABEL(cpu_data), %o3
+ call .umul
+ ld [%o3 + %o4], %o1
+#endif
cmp %o1, 0x0
1:
-/* $Id: etrap.S,v 1.16 1996/02/20 07:45:01 davem Exp $
+/* $Id: etrap.S,v 1.17 1996/03/07 06:26:43 davem Exp $
* etrap.S: Sparc trap window preparation for entry into the
* Linux kernel.
*
* T == Window entered when trap occurred
* S == Window we will need to save if (1<<T) == %wim
*
- * Before execution gets here, it must be guaranteed that
+ * Before execution gets here, it must be guarenteed that
* %l0 contains trap time %psr, %l1 and %l2 contain the
* trap pc and npc, and %l3 contains the trap time %wim.
*/
mov %t_kstack, %sp ! and onto kernel stack
trap_setup_user_spill:
- /* A spill occurred from either kernel or user mode
+ /* A spill occured from either kernel or user mode
* and there exist some user windows to deal with.
* A mask of the currently valid user windows
* is in %g1 upon entry to here.
wr %g2, 0x0, %wim
WRITE_PAUSE
- /* Call MMU-architecture dependent stack checking
+ /* Call MMU-architecture dependant stack checking
* routine.
*/
.globl C_LABEL(tsetup_mmu_patchme)
-/* $Id: head.S,v 1.47 1996/02/15 09:11:57 davem Exp $
+/* $Id: head.S,v 1.56 1996/04/04 16:30:22 tridge Exp $
* head.S: The initial boot code for the Sparc port of Linux.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/version.h>
+#include <linux/config.h>
#include <asm/cprefix.h>
#include <asm/head.h>
_stext:
start:
C_LABEL(trapbase):
+#ifdef __SMP__
+C_LABEL(trapbase_cpu0):
+#endif
/* We get control passed to us here at t_zero. */
t_zero: b gokernel; nop; nop; nop;
t_tflt: SPARC_TFAULT /* Inst. Access Exception */
t_irq12:TRAP_ENTRY_INTERRUPT(12) /* IRQ Zilog serial chip */
t_irq13:TRAP_ENTRY_INTERRUPT(13) /* IRQ Audio Intr. */
t_irq14:TRAP_ENTRY_INTERRUPT(14) /* IRQ Timer #2 */
+#ifndef __SMP__
t_nmi: NMI_TRAP /* Level 15 (NMI) */
+#else
+ TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
+#endif
t_racc: TRAP_ENTRY(0x20, do_reg_access) /* General Register Access Error */
t_iacce:BAD_TRAP(0x21) /* Instr Access Error */
t_bad22:BAD_TRAP(0x22) BAD_TRAP(0x23)
t_uflsh:TRAP_ENTRY(0x25, do_bad_flush) /* Unimplemented FLUSH inst. */
t_bad26:BAD_TRAP(0x26) BAD_TRAP(0x27)
t_cpexc:TRAP_ENTRY(0x28, do_cp_exception) /* Co-Processor Exception */
-t_dacce:BAD_TRAP(0x29) /* Data Access Error */
+t_dacce:SPARC_DFAULT /* Data Access Error */
t_hwdz: TRAP_ENTRY(0x2a, do_hw_divzero) /* Division by zero, you lose... */
t_dserr:BAD_TRAP(0x2b) /* Data Store Error */
t_daccm:BAD_TRAP(0x2c) /* Data Access MMU-Miss */
.globl C_LABEL(end_traptable)
C_LABEL(end_traptable):
+#ifdef __SMP__
+ /* Trap tables for the other cpus. */
+ .globl C_LABEL(trapbase_cpu1), C_LABEL(trapbase_cpu2), C_LABEL(trapbase_cpu3)
+C_LABEL(trapbase_cpu1):
+ BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
+ TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
+ WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
+ TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
+ TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
+ BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
+ TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
+ TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
+ TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
+ TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
+ TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
+ TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
+ TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
+ TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
+ TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
+ BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) TRAP_ENTRY(0x25, do_bad_flush)
+ BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
+ SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
+ BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
+ BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
+ BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
+ BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
+ BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
+ BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
+ BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
+ BAD_TRAP(0x50)
+ BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
+ BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
+ BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
+ BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
+ BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
+ BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
+ BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
+ BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
+ BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
+ BAD_TRAP(0x7e) BAD_TRAP(0x7f)
+ SUNOS_SYSCALL_TRAP BAD_TRAP(0x81) BAD_TRAP(0x82)
+ TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
+ BAD_TRAP(0x86) BAD_TRAP(0x87) SOLARIS_SYSCALL_TRAP
+ NETBSD_SYSCALL_TRAP BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
+ BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
+ LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
+ BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
+ BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
+ BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP
+ BAD_TRAP(0xa2) BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
+ BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
+ BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
+ BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
+ BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
+ BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
+ BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
+ BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
+ BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
+ BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
+ BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
+ BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
+ BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
+ BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
+ BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
+ BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
+ BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
+ BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
+ BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
+
+C_LABEL(trapbase_cpu2):
+ BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
+ TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
+ WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
+ TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
+ TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
+ BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
+ TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
+ TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
+ TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
+ TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
+ TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
+ TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
+ TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
+ TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
+ TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
+ BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) TRAP_ENTRY(0x25, do_bad_flush)
+ BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
+ SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
+ BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
+ BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
+ BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
+ BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
+ BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
+ BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
+ BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
+ BAD_TRAP(0x50)
+ BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
+ BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
+ BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
+ BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
+ BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
+ BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
+ BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
+ BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
+ BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
+ BAD_TRAP(0x7e) BAD_TRAP(0x7f)
+ SUNOS_SYSCALL_TRAP BAD_TRAP(0x81) BAD_TRAP(0x82)
+ TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
+ BAD_TRAP(0x86) BAD_TRAP(0x87) SOLARIS_SYSCALL_TRAP
+ NETBSD_SYSCALL_TRAP BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
+ BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
+ LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
+ BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
+ BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
+ BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP
+ BAD_TRAP(0xa2) BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
+ BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
+ BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
+ BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
+ BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
+ BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
+ BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
+ BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
+ BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
+ BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
+ BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
+ BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
+ BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
+ BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
+ BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
+ BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
+ BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
+ BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
+ BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
+
+C_LABEL(trapbase_cpu3):
+ BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
+ TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
+ WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
+ TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
+ TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
+ BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
+ TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
+ TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
+ TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
+ TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
+ TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
+ TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
+ TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
+ TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
+ TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
+ BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) TRAP_ENTRY(0x25, do_bad_flush)
+ BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
+ SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
+ BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
+ BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
+ BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
+ BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
+ BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
+ BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
+ BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
+ BAD_TRAP(0x50)
+ BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
+ BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
+ BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
+ BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
+ BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
+ BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
+ BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
+ BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
+ BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
+ BAD_TRAP(0x7e) BAD_TRAP(0x7f)
+ SUNOS_SYSCALL_TRAP BAD_TRAP(0x81) BAD_TRAP(0x82)
+ TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
+ BAD_TRAP(0x86) BAD_TRAP(0x87) SOLARIS_SYSCALL_TRAP
+ NETBSD_SYSCALL_TRAP BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
+ BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
+ LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
+ BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
+ BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
+ BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP
+ BAD_TRAP(0xa2) BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
+ BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
+ BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
+ BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
+ BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
+ BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
+ BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
+ BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
+ BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
+ BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
+ BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
+ BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
+ BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
+ BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
+ BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
+ BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
+ BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
+ BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
+ BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
+
+ .globl C_LABEL(cpu0_stack), C_LABEL(cpu1_stack), C_LABEL(cpu2_stack)
+ .globl C_LABEL(cpu3_stack)
+C_LABEL(cpu0_stack): .skip 0x2000
+C_LABEL(cpu1_stack): .skip 0x2000
+C_LABEL(cpu2_stack): .skip 0x2000
+C_LABEL(cpu3_stack): .skip 0x2000
+#endif
.skip 4096
/* This was the only reasonable way I could think of to properly align
.globl C_LABEL(empty_bad_page_table)
.globl C_LABEL(empty_zero_page)
.globl C_LABEL(swapper_pg_dir)
-C_LABEL(bootup_user_stack): .skip 0x1000
-C_LABEL(bootup_kernel_stack): .skip 0x1000
+C_LABEL(bootup_user_stack): .skip 0x2000
+C_LABEL(bootup_kernel_stack): .skip 0x2000
C_LABEL(swapper_pg_dir): .skip 0x1000
C_LABEL(pg0): .skip 0x1000
C_LABEL(empty_bad_page): .skip 0x1000
* label, then see what %o7 has.
*/
- /* XXX Sparc V9 detection goes here XXX */
-
mov %o7, %g4 ! Save %o7
/* Jump to it, and pray... */
/* Copy over the Prom's level 14 clock handler. */
copy_prom_lvl14:
+#if 1
+ /* DJHR
+ * preserve our linked/calculated instructions
+ */
+ set C_LABEL(lvl14_save), %g1
+ set t_irq14, %g3
+ sub %g1, %l6, %g1 ! translate to physical
+ sub %g3, %l6, %g3 ! translate to physical
+ ldd [%g3], %g4
+ std %g4, [%g1]
+ ldd [%g3+8], %g4
+ std %g4, [%g1+8]
+#endif
rd %tbr, %g1
andn %g1, 0xfff, %g1 ! proms trap table base
or %g0, (0x1e<<4), %g2 ! offset to lvl14 intr
/* Must determine whether we are on a sun4c MMU, SRMMU, or SUN4/400 MUTANT
* MMU so we can remap ourselves properly. DONT TOUCH %l0 thru %l5 in these
* remapping routines, we need their values afterwards!
- *
- * XXX UGH, need to write some sun4u SpitFire remapping V9 code RSN... XXX
*/
/* Now check whether we are already mapped, if we
* are we can skip all this garbage coming up.
/* Calculate to KERNBASE entry.
*
- * XXX Should not use empirical constant, but Gas gets an XXX
+ * XXX Should not use imperical constant, but Gas gets an XXX
* XXX upset stomach with the bitshift I would have to use XXX
*/
add %o1, 0x3c0, %o3
sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
- add %g1, 0x3c0, %g3 ! XXX AWAY WITH EMPIRICALS
+ add %g1, 0x3c0, %g3 ! XXX AWAY WITH IMPERICALS
sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
b go_to_highmem
nop ! wheee....
* I figure out and store nwindows and nwindowsm1 later on.
*/
execute_in_high_mem:
+#if CONFIG_AP1000
+ /* we don't have a prom :-( */
+ b sun4m_init
+ nop
+#endif
mov %l0, %o0 ! put back romvec
mov %l1, %o1 ! and debug_vec
* be sitting in the fault status/address registers. Read them all to
* clear them so we don't get magic faults later on.
*/
-/* This sucks, apparently this makes Vikings call prom panic, will fix later */
+/* This sucks, aparently this makes Vikings call prom panic, will fix later */
rd %psr, %o1
srl %o1, 28, %o1 ! Get a type of the CPU
set C_LABEL(current_set), %g2
st %g4, [%g2]
- /* So now this should work. */
- LOAD_CURRENT(g2, g4)
- set C_LABEL(bootup_kernel_stack), %g4
- st %g4, [%g2 + TASK_KSTACK_PG]
- st %g0, [%g2 + THREAD_UMASK]
+ set C_LABEL(bootup_kernel_stack), %g3
+ st %g3, [%g4 + TASK_KSTACK_PG]
+ st %g0, [%g4 + THREAD_UMASK]
/* Compute NWINDOWS and stash it away. Now uses %wim trick explained
* in the V8 manual. Ok, this method seems to work, Sparc is cool...
PATCH_INSN(rtrap_7win_patch4, rtrap_patch4)
PATCH_INSN(rtrap_7win_patch5, rtrap_patch5)
+#ifdef __SMP__
+
+ /* Patch for returning from an ipi... */
+ PATCH_INSN(rirq_7win_patch1, rirq_patch1)
+ PATCH_INSN(rirq_7win_patch2, rirq_patch2)
+ PATCH_INSN(rirq_7win_patch3, rirq_patch3)
+ PATCH_INSN(rirq_7win_patch4, rirq_patch4)
+ PATCH_INSN(rirq_7win_patch5, rirq_patch5)
+
+#endif
+
2:
sethi %hi( C_LABEL(nwindows) ), %g4
st %g3, [%g4 + %lo( C_LABEL(nwindows) )] ! store final value
/* Finally, turn on traps so that we can call c-code. */
rd %psr, %g3
wr %g3, 0x0, %psr
+ WRITE_PAUSE
+
wr %g3, PSR_ET, %psr
WRITE_PAUSE
.word 0
.word 0
- .align 4
+ .align 8
+
+ .globl C_LABEL(lvl14_save)
+C_LABEL(lvl14_save):
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word t_irq14
+
/* Here is the master table of Sun machines which use some implementation
* of the Sparc CPU and have a meaningful IDPROM machtype value that we
- * know about. See asm-sparc/machines.h for empirical constants.
+ * know about. See asm-sparc/machines.h for imperical constants.
*/
struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = {
/* First, Sun4's */
-/* $Id: ioport.c,v 1.14 1996/01/03 03:34:41 davem Exp $
+/* $Id: ioport.c,v 1.17 1996/03/23 02:39:13 davem Exp $
* ioport.c: Simple io mapping allocator.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
static long dvma_next_free = DVMA_VADDR;
/*
- * sparc_alloc_dev:
+ * sparc_alloc_io:
* Map and allocates an obio device.
* Implements a simple linear allocator, you can force the function
* to use your own mapping, but in practice this should not be used.
len += offset;
if(((unsigned long) virtual + len) > (IOBASE_VADDR + IOBASE_LEN)) {
- prom_printf("alloc_io: Mapping outside IOBASE area\n");
+ prom_printf("alloc_io: Mapping ouside IOBASE area\n");
prom_halt();
}
if(check_region ((vaddr | offset), len)) {
return (void *) (base_address | offset);
}
-/* Does DVMA allocations with PAGE_SIZE granularity. How this basically
+/* Does DVMA allocations with PAGE_SIZE granulatity. How this basically
* works is that the ESP chip can do DVMA transfers at ANY address with
- * certain size and boundary restrictions. But other devices that are
+ * certain size and boundry restrictions. But other devices that are
* attached to it and would like to do DVMA have to set things up in
- * a special way, if the DVMA sees a device attached to it transfer data
+ * a special way, if the DVMA see's a device attached to it transfer data
* at addresses above DVMA_VADDR it will grab them, this way it does not
* now have to know the peculiarities of where to read the Lance data
* from. (for example)
-/* $Id: irq.c,v 1.34 1996/02/20 07:45:04 davem Exp $
+/* $Id: irq.c,v 1.43 1996/04/17 12:37:45 zaitcev Exp $
* arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
* Sparc the IRQ's are basically 'cast in stone'
* and you are supposed to probe the prom's device
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
- * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@jamica.lab.ipmce.su)
+ * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@ipmce.su)
+ * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
*/
+#include <linux/config.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/psr.h>
+#include <asm/smp.h>
#include <asm/vaddrs.h>
#include <asm/timer.h>
#include <asm/openprom.h>
#include <asm/irq.h>
#include <asm/io.h>
-/* Pointer to the interrupt enable byte */
-unsigned char *interrupt_enable = 0;
-struct sun4m_intregs *sun4m_interrupts;
+/*
+ * Dave Redman (djhr@tadpole.co.uk)
+ *
+ * IRQ numbers.. These are no longer restricted to 15..
+ *
+ * this is done to enable SBUS cards and onboard IO to be masked
+ * correctly. using the interrupt level isn't good enough.
+ *
+ * For example:
+ * A device interrupting at sbus level6 and the Floppy both come in
+ * at IRQ11, but enabling and disabling them requires writing to
+ * different bits in the SLAVIO/SEC.
+ *
+ * As a result of these changes sun4m machines could now support
+ * directed CPU interrupts using the existing enable/disable irq code
+ * with tweaks.
+ *
+ */
-void sun4c_disable_irq(unsigned int irq_nr)
+static void irq_panic(void)
{
- unsigned long flags;
- unsigned char current_mask, new_mask;
-
- if(sparc_cpu_model != sun4c)
- return;
- save_flags(flags); cli();
- current_mask = *interrupt_enable;
- switch(irq_nr) {
- case 1:
- new_mask = ((current_mask) & (~(SUN4C_INT_E1)));
- break;
- case 8:
- new_mask = ((current_mask) & (~(SUN4C_INT_E8)));
- break;
- case 10:
- new_mask = ((current_mask) & (~(SUN4C_INT_E10)));
- break;
- case 14:
- new_mask = ((current_mask) & (~(SUN4C_INT_E14)));
- break;
- default:
- restore_flags(flags);
- return;
- }
- *interrupt_enable = new_mask;
- restore_flags(flags);
+ extern char *cputypval;
+ prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval);
+ prom_halt();
}
-void sun4m_disable_irq(unsigned int irq_nr)
-{
-#if 0
- printk("IRQ routines not yet written for the sun4m\n");
- panic("disable_irq: Unsupported arch.");
+void (*enable_irq)(unsigned int) = (void (*)(unsigned int)) irq_panic;
+void (*disable_irq)(unsigned int) = (void (*)(unsigned int)) irq_panic;
+void (*clear_clock_irq)( void ) = irq_panic;
+void (*clear_profile_irq)( void ) = irq_panic;
+void (*load_profile_irq)( unsigned int ) = (void (*)(unsigned int)) irq_panic;
+void (*init_timers)( void (*)(int, void *,struct pt_regs *)) =
+ (void (*)( void (*)(int, void *,struct pt_regs *))) irq_panic;
+
+#ifdef __SMP__
+void (*set_cpu_int)(int, int);
+void (*clear_cpu_int)(int, int);
+void (*set_irq_udt)(int);
#endif
-}
-
-void disable_irq(unsigned int irq_nr)
-{
- switch(sparc_cpu_model) {
- case sun4c:
- sun4c_disable_irq(irq_nr);
- break;
- case sun4m:
- sun4m_disable_irq(irq_nr);
- break;
- default:
- panic("disable_irq: Unsupported arch.");
- }
-}
-
-void sun4c_enable_irq(unsigned int irq_nr)
-{
- unsigned long flags;
- unsigned char current_mask, new_mask;
-
- if(sparc_cpu_model != sun4c)
- return;
- save_flags(flags); cli();
- current_mask = *interrupt_enable;
- switch(irq_nr) {
- case 1:
- new_mask = ((current_mask) | SUN4C_INT_E1);
- break;
- case 8:
- new_mask = ((current_mask) | SUN4C_INT_E8);
- break;
- case 10:
- new_mask = ((current_mask) | SUN4C_INT_E10);
- break;
- case 14:
- new_mask = ((current_mask) | SUN4C_INT_E14);
- break;
- default:
- restore_flags(flags);
- return;
- }
- *interrupt_enable = new_mask;
- restore_flags(flags);
-}
-
-void sun4m_enable_irq(unsigned int irq_nr)
-{
-#if 0
- printk("IRQ routines not written for the sun4m yet.\n");
- panic("IRQ unsupported arch.");
-#endif
-}
-
-void enable_irq(unsigned int irq_nr)
-{
- switch(sparc_cpu_model) {
- case sun4c:
- sun4c_enable_irq(irq_nr);
- break;
- case sun4m:
- sun4m_enable_irq(irq_nr);
- break;
- default:
- panic("IRQ unsupported arch.");
- }
-}
/*
- * Initial irq handlers.
+ * Dave Redman (djhr@tadpole.co.uk)
+ *
+ * There used to be extern calls and hard coded values here.. very sucky!
+ * instead, because some of the devices attach very early, I do something
+ * equally sucky but at least we'll never try to free statically allocated
+ * space or call kmalloc before kmalloc_init :(.
+ *
+ * In fact its the timer10 that attaches first.. then timer14
+ * then kmalloc_init is called.. then the tty interrupts attach.
+ * hmmm....
+ *
*/
-extern void timer_interrupt(int, void *, struct pt_regs *);
-extern void rs_interrupt(int, void *, struct pt_regs *);
-
-static struct irqaction timer_irq = {
- timer_interrupt,
- SA_INTERRUPT,
- 0, "timer",
- NULL, NULL
-};
-
-static struct irqaction serial_irq = {
- rs_interrupt,
- SA_INTERRUPT,
- 0, "zilog serial",
- NULL, NULL
-};
+#define MAX_STATIC_ALLOC 4
+static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
+static int static_irq_count = 0;
-static struct irqaction *irq_action[16] = {
+static struct irqaction *irq_action[NR_IRQS+1] = {
NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
- NULL, NULL, &timer_irq, NULL, &serial_irq, NULL , NULL, NULL
+ NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
};
-
int get_irq_list(char *buf)
{
int i, len = 0;
struct irqaction * action;
- for (i = 0 ; i < 16 ; i++) {
+ for (i = 0 ; i < (NR_IRQS+1) ; i++) {
action = *(i + irq_action);
if (!action)
continue;
void free_irq(unsigned int irq, void *dev_id)
{
- struct irqaction * action = *(irq + irq_action);
+ struct irqaction * action;
struct irqaction * tmp = NULL;
unsigned long flags;
-
- if (irq > 14) { /* 14 irq levels on the sparc */
+ unsigned int cpu_irq;
+
+ cpu_irq = irq & NR_IRQS;
+ action = *(cpu_irq + irq_action);
+ if (cpu_irq > 14) { /* 14 irq levels on the sparc */
printk("Trying to free bogus IRQ %d\n", irq);
return;
}
printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
return;
}
+ if (action->flags & SA_STATIC_ALLOC)
+ {
+ /* This interrupt is marked as specially allocted
+ * so it is a bad idea to free it.
+ */
+ printk("Attempt to free statically allocated IRQ%d (%s)\n",
+ irq, action->name);
+ return;
+ }
+
save_flags(flags); cli();
if (action && tmp)
tmp->next = action->next;
else
- *(irq + irq_action) = action->next;
+ *(cpu_irq + irq_action) = action->next;
kfree_s(action, sizeof(struct irqaction));
- if (!(*(irq + irq_action)))
+ if (!(*(cpu_irq + irq_action)))
disable_irq(irq);
restore_flags(flags);
void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
{
int i;
- struct irqaction * action = *(irq + irq_action);
+ struct irqaction * action;
+ unsigned int cpu_irq;
+
+ cpu_irq = irq & NR_IRQS;
+ action = *(cpu_irq + irq_action);
printk("IO device interrupt, irq = %d\n", irq);
printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
void handler_irq(int irq, struct pt_regs * regs)
{
- struct irqaction * action = *(irq + irq_action);
-
- kstat.interrupts[irq]++;
+ struct irqaction * action;
+ unsigned int cpu_irq;
+
+ cpu_irq = irq & NR_IRQS;
+ action = *(cpu_irq + irq_action);
+ kstat.interrupts[cpu_irq]++;
+#if 0
+ printk("I<%d,%d,%d>", smp_processor_id(), irq, smp_proc_in_lock[smp_processor_id()]);
+#endif
while (action) {
if (!action->handler)
unexpected_irq(irq, action->dev_id, regs);
*/
asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
{
- struct irqaction * action = *(irq + irq_action);
-
- kstat.interrupts[irq]++;
+ struct irqaction * action;
+ unsigned int cpu_irq;
+
+ cpu_irq = irq & NR_IRQS;
+ action = *(cpu_irq + irq_action);
+ kstat.interrupts[cpu_irq]++;
while (action) {
action->handler(irq, action->dev_id, regs);
action = action->next;
*/
asmlinkage void do_fast_IRQ(int irq)
{
- kstat.interrupts[irq]++;
+ kstat.interrupts[irq&NR_IRQS]++;
printk("Got FAST_IRQ number %04lx\n", (long unsigned int) irq);
return;
}
/* Fast IRQ's on the Sparc can only have one routine attached to them,
* thus no sharing possible.
*/
-int request_fast_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
+int request_fast_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags, const char *devname)
{
struct irqaction *action;
unsigned long flags;
-
- if(irq > 14)
+ unsigned int cpu_irq;
+
+ cpu_irq = irq & NR_IRQS;
+ if(cpu_irq > 14)
return -EINVAL;
if(!handler)
return -EINVAL;
- action = *(irq + irq_action);
+ action = *(cpu_irq + irq_action);
if(action) {
if(action->flags & SA_SHIRQ)
panic("Trying to register fast irq when already shared.\n");
save_flags(flags); cli();
- action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
-
+ /* If this is flagged as statically allocated then we use our
+ * private struct which is never freed.
+ */
+ if (irqflags & SA_STATIC_ALLOC)
+ if (static_irq_count < MAX_STATIC_ALLOC)
+ action = &static_irqaction[static_irq_count++];
+ else
+ printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
+ irq, devname);
+
+ if (action == NULL)
+ action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
+ GFP_KERNEL);
+
if (!action) {
restore_flags(flags);
return -ENOMEM;
}
/* Dork with trap table if we get this far. */
- sparc_ttable[SP_TRAP_IRQ1+(irq-1)].inst_one =
+ sparc_ttable[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one =
SPARC_BRANCH((unsigned long) handler,
(unsigned long) &sparc_ttable[SP_TRAP_IRQ1+(irq-1)].inst_one);
- sparc_ttable[SP_TRAP_IRQ1+(irq-1)].inst_two = SPARC_RD_PSR_L0;
- sparc_ttable[SP_TRAP_IRQ1+(irq-1)].inst_three = SPARC_NOP;
- sparc_ttable[SP_TRAP_IRQ1+(irq-1)].inst_four = SPARC_NOP;
+ sparc_ttable[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = SPARC_RD_PSR_L0;
+ sparc_ttable[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_NOP;
+ sparc_ttable[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
action->handler = handler;
action->flags = irqflags;
action->name = devname;
action->dev_id = NULL;
- *(irq + irq_action) = action;
+ *(cpu_irq + irq_action) = action;
+ enable_irq(irq);
restore_flags(flags);
return 0;
}
-int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
+int request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags, const char * devname, void *dev_id)
{
struct irqaction * action, *tmp = NULL;
unsigned long flags;
-
- if(irq > 14)
+ unsigned int cpu_irq;
+
+ cpu_irq = irq & NR_IRQS;
+ if(cpu_irq > 14)
return -EINVAL;
if (!handler)
return -EINVAL;
- action = *(irq + irq_action);
+ action = *(cpu_irq + irq_action);
if (action) {
if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
for (tmp = action; tmp->next; tmp = tmp->next);
}
save_flags(flags); cli();
- action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+ /* If this is flagged as statically allocated then we use our
+ * private struct which is never freed.
+ */
+ if (irqflags & SA_STATIC_ALLOC)
+ if (static_irq_count < MAX_STATIC_ALLOC)
+ action = &static_irqaction[static_irq_count++];
+ else
+ printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",irq, devname);
+
+ if (action == NULL)
+ action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
+ GFP_KERNEL);
+
if (!action) {
restore_flags(flags);
return -ENOMEM;
if (tmp)
tmp->next = action;
else
- *(irq + irq_action) = action;
+ *(cpu_irq + irq_action) = action;
enable_irq(irq);
restore_flags(flags);
return 0;
}
-void sun4c_init_IRQ(void)
-{
- struct linux_prom_registers int_regs[2];
- int ie_node;
-
- ie_node = prom_searchsiblings (prom_getchild(prom_root_node),
- "interrupt-enable");
- if(ie_node == 0)
- panic("Cannot find /interrupt-enable node");
- /* Depending on the "address" property is bad news... */
- prom_getproperty(ie_node, "reg", (char *) int_regs, sizeof(int_regs));
- interrupt_enable = (char *) sparc_alloc_io(int_regs[0].phys_addr, 0,
- int_regs[0].reg_size,
- "sun4c_interrupts",
- int_regs[0].which_io, 0x0);
- *interrupt_enable = (SUN4C_INT_ENABLE);
- sti();
-}
-
-void sun4m_init_IRQ(void)
-{
- int ie_node;
-
- struct linux_prom_registers int_regs[PROMREG_MAX];
- int num_regs;
-
- cli();
- if((ie_node = prom_searchsiblings(prom_getchild(prom_root_node), "obio")) == 0 ||
- (ie_node = prom_getchild (ie_node)) == 0 ||
- (ie_node = prom_searchsiblings (ie_node, "interrupt")) == 0)
- panic("Cannot find /obio/interrupt node\n");
- num_regs = prom_getproperty(ie_node, "reg", (char *) int_regs,
- sizeof(int_regs));
- num_regs = (num_regs/sizeof(struct linux_prom_registers));
-
- /* Apply the obio ranges to these registers. */
- prom_apply_obio_ranges(int_regs, num_regs);
-
- /* Map the interrupt registers for all possible cpus. */
- sun4m_interrupts = sparc_alloc_io(int_regs[0].phys_addr, 0,
- PAGE_SIZE*NCPUS, "interrupts_percpu",
- int_regs[0].which_io, 0x0);
-
- /* Map the system interrupt control registers. */
- sparc_alloc_io(int_regs[num_regs-1].phys_addr, 0,
- int_regs[num_regs-1].reg_size, "interrupts_system",
- int_regs[num_regs-1].which_io, 0x0);
- sti();
-}
+/* djhr
+ * This could probably be made indirect too and assigned in the CPU
+ * bits of the code. That would be much nicer I think and would also
+ * fit in with the idea of being able to tune your kernel for your machine
+ * by removing unrequired machine and device support.
+ *
+ */
void init_IRQ(void)
{
+ extern void sun4c_init_IRQ( void );
+ extern void sun4m_init_IRQ( void );
+#if CONFIG_AP1000
+ extern void ap_init_IRQ(void);
+ ap_init_IRQ();
+ return;
+#endif
+
switch(sparc_cpu_model) {
case sun4c:
sun4c_init_IRQ();
break;
+
case sun4m:
sun4m_init_IRQ();
break;
+
default:
prom_printf("Cannot initialize IRQ's on this Sun machine...");
break;
+++ /dev/null
-/* $Id: mp.S,v 1.2 1995/11/25 00:58:11 davem Exp $
- * mp.S: Multiprocessor low-level routines on the Sparc.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#include <asm/cprefix.h>
-#include <asm/head.h>
-#include <asm/psr.h>
-#include <asm/asi.h>
-#include <asm/vaddrs.h>
-#include <asm/contregs.h>
-
-
- .text
- .align 4
-
-/* When we start up a cpu for the first time it enters this routine.
- * This initializes the chip from whatever state the prom left it
- * in and sets PIL in %psr to 15, no irqs.
- */
-
- .globl C_LABEL(sparc_cpu_startup)
-C_LABEL(sparc_cpu_startup):
- /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
- set (PSR_PIL | PSR_S | PSR_PS), %g1
- wr %g1, 0x0, %psr ! traps off though
- WRITE_PAUSE
-
- /* Our %wim is one behind CWP */
- wr %g0, 0x2, %wim
-
- rd %tbr, %g4
- or %g0, 0x3, %g5
- sll %g5, 20, %g5
- and %g4, %g5, %g4 ! Mask cpu-id bits
-
- /* Give ourselves a stack. */
- set PERCPU_VADDR, %g1
- add %g1, %g4, %g1
- set PERCPU_KSTACK_OFFSET, %g5
- add %g1, %g5, %g1
- set 0x1000, %g5
- add %g1, %g5, %g1 ! end of stack
- sub %g1, (96+96+80), %g1 ! set up a frame
- andn %g1, 0x7, %g1
- or %g1, 0x0, %fp ! bottom of frame
- add %fp, (96+80), %sp ! top of frame
-
- /* Set up per-cpu trap table pointer. In actuality, the virtual
- * address for the trap table on every cpu points to the same
- * physical address, this virtual address is only used for cpu
- * identification purposes.
- */
-#if 0
-/* set PERCPU_VADDR, %g1 */
-/* add %g1, %g4, %g1 */
-/* add %g1, PERCPU_TBR_OFFSET, %g1 */
- set C_LABEL(thiscpus_tbr), %g1
- ld [%g1], %g1
- wr %g1, 0x0, %tbr
- WRITE_PAUSE
-#else
- set C_LABEL(trapbase), %g3
- wr %g3, 0x0, %tbr
- WRITE_PAUSE
-#endif
-
- /* Turn on traps (PSR_ET). */
- rd %psr, %g1
- wr %g1, PSR_ET, %psr ! traps on
-
-#if 0
-1: nop
- b 1b
- nop
-#endif
-
- /* Call C-code to do the rest of the real work. */
- call C_LABEL(sparc_cpu_init)
- nop
-
- /* Call cpu-idle routine so we can start it up later on. */
- call C_LABEL(sparc_cpu_idle)
- nop
-
- /* Done... This cpu should me spinning in a test loop.
- * If execution gets here, something really bad happened.
- */
- call C_LABEL(prom_halt) ! Seems reasonable...
- nop
-
}
}
-/* Probe and map in the Auxiliary I/O register */
+/* Probe and map in the Auxiliaary I/O register */
void
probe_auxio(void)
{
prom_apply_obio_ranges(auxregs, 0x1);
/* Map the register both read and write */
sparc_alloc_io(auxregs[0].phys_addr, (void *) AUXIO_VADDR,
- auxregs[0].reg_size, "auxiliaryIO", auxregs[0].which_io, 0x0);
+ auxregs[0].reg_size, "auxilliaryIO", auxregs[0].which_io, 0x0);
}
extern unsigned long probe_memory(void);
-/* $Id: process.c,v 1.42 1996/02/20 07:45:08 davem Exp $
+/* $Id: process.c,v 1.49 1996/04/20 07:37:20 davem Exp $
* linux/arch/sparc/kernel/process.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* This file handles the architecture-dependent parts of process handling..
*/
+#define __KERNEL_SYSCALLS__
+#include <stdarg.h>
+
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <asm/delay.h>
#include <asm/processor.h>
#include <asm/psr.h>
+#include <asm/system.h>
extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
int active_ds = USER_DS;
+#ifndef __SMP__
+
/*
* the idle loop on a Sparc... ;)
*/
for (;;) {
schedule();
}
+ return 0;
}
+#else
+
+/*
+ * the idle loop on a SparcMultiPenguin...
+ */
+asmlinkage int sys_idle(void)
+{
+ if (current->pid != 0)
+ return -EPERM;
+
+ /* endless idle loop with no priority at all */
+ current->counter = -100;
+ schedule();
+ return 0;
+}
+
+/* This is being executed in task 0 'user space'. */
+int cpu_idle(void *unused)
+{
+ volatile int *spap = &smp_process_available;
+ volatile int cval;
+
+ current->priority = -50;
+ while(1) {
+ if(0==read_smp_counter(spap))
+ continue;
+ while(*spap == -1)
+ ;
+ cli();
+ /* Acquire exclusive access. */
+ while((cval = smp_swap(spap, -1)) == -1)
+ ;
+ if (0==cval) {
+ /* ho hum, release it. */
+ smp_process_available = 0;
+ sti();
+ continue;
+ }
+ /* Something interesting happened, whee... */
+ smp_swap(spap, (cval - 1));
+ sti();
+ idle();
+ }
+}
+
+#endif
+
extern char saved_command_line[];
void hard_reset_now(void)
void exit_thread(void)
{
flush_user_windows();
+#ifndef __SMP__
if(last_task_used_math == current) {
+#else
+ if(current->flags & PF_USEDFPU) {
+#endif
/* Keep process from leaving FPU in a bogon state. */
put_psr(get_psr() | PSR_EF);
fpsave(¤t->tss.float_regs[0], ¤t->tss.fsr,
¤t->tss.fpqueue[0], ¤t->tss.fpqdepth);
+#ifndef __SMP__
last_task_used_math = NULL;
+#else
+ current->flags &= ~PF_USEDFPU;
+#endif
}
mmu_exit_hook();
}
current->tss.sstk_info.cur_status = 0;
current->tss.sstk_info.the_stack = 0;
+#ifndef __SMP__
if(last_task_used_math == current) {
+#else
+ if(current->flags & PF_USEDFPU) {
+#endif
/* Clean the fpu. */
put_psr(get_psr() | PSR_EF);
fpsave(¤t->tss.float_regs[0], ¤t->tss.fsr,
¤t->tss.fpqueue[0], ¤t->tss.fpqdepth);
+#ifndef __SMP__
+ last_task_used_math = NULL;
+#else
+ current->flags &= ~PF_USEDFPU;
+#endif
}
memset(¤t->tss.reg_window[0], 0,
* Parent --> %o0 == childs pid, %o1 == 0
* Child --> %o0 == parents pid, %o1 == 1
*
- * NOTE: We have a separate fork kpsr/kwim because
+ * NOTE: We have a seperate fork kpsr/kwim because
* the parent could change these values between
* sys_fork invocation and when we reach here
* if the parent should sleep while trying to
struct reg_window *old_stack, *new_stack;
unsigned long stack_offset;
+#ifndef __SMP__
if(last_task_used_math == current) {
+#else
+ if(current->flags & PF_USEDFPU) {
+#endif
put_psr(get_psr() | PSR_EF);
fpsave(&p->tss.float_regs[0], &p->tss.fsr,
&p->tss.fpqueue[0], &p->tss.fpqdepth);
+#ifdef __SMP__
+ current->flags &= ~PF_USEDFPU;
+#endif
}
/* Calculate offset to stack_frame & pt_regs */
- stack_offset = ((PAGE_SIZE*2) - TRACEREG_SZ);
+ if(sparc_cpu_model == sun4c)
+ stack_offset = ((PAGE_SIZE*3) - TRACEREG_SZ);
+ else
+ stack_offset = ((PAGE_SIZE<<2) - TRACEREG_SZ);
+
if(regs->psr & PSR_PS)
stack_offset -= REGWIN_SZ;
childregs = ((struct pt_regs *) (p->kernel_stack_page + stack_offset));
--- /dev/null
+/* rirq.S: Needed to return from an interrupt on SMP with no
+ * locks held or released.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/cprefix.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/psr.h>
+#include <asm/asi.h>
+#include <asm/smp.h>
+#include <asm/contregs.h>
+#include <asm/winmacro.h>
+#include <asm/asmmacro.h>
+
+#define t_psr l0
+#define t_pc l1
+#define t_npc l2
+#define t_wim l3
+#define twin_tmp1 l4
+#define twin_tmp2 l5
+#define twin_tmp3 l6
+
+ /* 7 WINDOW SPARC PATCH INSTRUCTIONS */
+ .globl rirq_7win_patch1, rirq_7win_patch2, rirq_7win_patch3
+ .globl rirq_7win_patch4, rirq_7win_patch5
+rirq_7win_patch1: srl %t_wim, 0x6, %twin_tmp2
+rirq_7win_patch2: and %twin_tmp2, 0x7f, %twin_tmp2
+rirq_7win_patch3: srl %g1, 7, %g2
+rirq_7win_patch4: srl %g2, 6, %g2
+rirq_7win_patch5: and %g1, 0x7f, %g1
+ /* END OF PATCH INSTRUCTIONS */
+
+ .globl ret_irq_entry, rirq_patch1, rirq_patch2
+ .globl rirq_patch3, rirq_patch4, rirq_patch5
+ret_irq_entry:
+ ld [%sp + REGWIN_SZ + PT_PSR], %t_psr
+ andcc %t_psr, PSR_PS, %g0
+ bne ret_irq_kernel
+ nop
+
+ret_irq_user:
+ wr %t_psr, 0x0, %psr
+ WRITE_PAUSE
+
+ LOAD_CURRENT(twin_tmp2, twin_tmp1)
+ ld [%twin_tmp2 + THREAD_W_SAVED], %twin_tmp1
+ orcc %g0, %twin_tmp1, %g0
+ be ret_irq_nobufwins
+ nop
+
+ /* User has toasty windows, must grab klock. */
+ ENTER_SYSCALL
+
+ wr %t_psr, PSR_ET, %psr
+ WRITE_PAUSE
+
+ mov 1, %o1
+ call C_LABEL(try_to_clear_window_buffer)
+ add %sp, REGWIN_SZ, %o0
+
+ /* We have klock, so we must return just like a normal trap. */
+ b ret_trap_entry
+ nop
+
+ret_irq_nobufwins:
+ /* Load up the user's out registers so we can pull
+ * a window from the stack, if necessary.
+ */
+ LOAD_PT_INS(sp)
+
+ /* If there are already live user windows in the
+ * set we can return from trap safely.
+ */
+ ld [%twin_tmp2 + THREAD_UMASK], %twin_tmp1
+ orcc %g0, %twin_tmp1, %g0
+ bne ret_irq_userwins_ok
+ nop
+
+ /* Calculate new %wim, we have to pull a register
+ * window from the users stack.
+ */
+ret_irq_pull_one_window:
+ rd %wim, %t_wim
+ sll %t_wim, 0x1, %twin_tmp1
+rirq_patch1: srl %t_wim, 0x7, %twin_tmp2
+ or %twin_tmp2, %twin_tmp1, %twin_tmp2
+rirq_patch2: and %twin_tmp2, 0xff, %twin_tmp2
+
+ wr %twin_tmp2, 0x0, %wim
+ WRITE_PAUSE
+
+ /* Here comes the architecture specific
+ * branch to the user stack checking routine
+ * for return from traps.
+ */
+ .globl C_LABEL(rirq_mmu_patchme)
+C_LABEL(rirq_mmu_patchme): b C_LABEL(sun4c_reti_stackchk)
+ andcc %fp, 0x7, %g0
+
+ret_irq_userwins_ok:
+ LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
+ or %t_pc, %t_npc, %g2
+ andcc %g2, 0x3, %g0
+ bne ret_irq_unaligned_pc
+ nop
+
+ LOAD_PT_YREG(sp, g1)
+ LOAD_PT_GLOBALS(sp)
+
+ wr %t_psr, 0x0, %psr
+ WRITE_PAUSE
+
+ jmp %t_pc
+ rett %t_npc
+
+ret_irq_unaligned_pc:
+ add %sp, REGWIN_SZ, %o0
+ ld [%sp + REGWIN_SZ + PT_PC], %o1
+ ld [%sp + REGWIN_SZ + PT_NPC], %o2
+ ld [%sp + REGWIN_SZ + PT_PSR], %o3
+
+ wr %t_wim, 0x0, %wim ! or else...
+ WRITE_PAUSE
+
+ /* User has unaligned crap, must grab klock. */
+ ENTER_SYSCALL
+
+ wr %t_psr, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(do_memaccess_unaligned)
+ nop
+
+ /* We have klock, so we must return just like a normal trap. */
+ b ret_trap_entry
+ nop
+
+ret_irq_kernel:
+ wr %t_psr, 0x0, %psr
+ WRITE_PAUSE
+
+ /* Will the rett land us in the invalid window? */
+ mov 2, %g1
+ sll %g1, %t_psr, %g1
+rirq_patch3: srl %g1, 8, %g2
+ or %g1, %g2, %g1
+ rd %wim, %g2
+ andcc %g2, %g1, %g0
+ be 1f ! Nope, just return from the trap
+ nop
+
+ /* We have to grab a window before returning. */
+ sll %g2, 0x1, %g1
+rirq_patch4: srl %g2, 7, %g2
+ or %g1, %g2, %g1
+rirq_patch5: and %g1, 0xff, %g1
+
+ wr %g1, 0x0, %wim
+ WRITE_PAUSE
+
+ restore %g0, %g0, %g0
+ LOAD_WINDOW(sp)
+ save %g0, %g0, %g0
+
+ /* Reload the entire frame in case this is from a
+ * kernel system call or whatever...
+ */
+1:
+ LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
+
+ wr %t_psr, 0x0, %psr
+ WRITE_PAUSE
+
+ jmp %t_pc
+ rett %t_npc
+
+ret_irq_user_stack_is_bolixed:
+ wr %t_wim, 0x0, %wim
+ WRITE_PAUSE
+
+ /* User has a toasty window, must grab klock. */
+ ENTER_SYSCALL
+
+ wr %t_psr, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(window_ret_fault)
+ add %sp, REGWIN_SZ, %o0
+
+ /* We have klock, so we must return just like a normal trap. */
+ b ret_trap_entry
+ nop
+
+ .globl C_LABEL(sun4c_reti_stackchk)
+C_LABEL(sun4c_reti_stackchk):
+ be 1f
+ and %fp, 0xfff, %g1 ! delay slot
+
+ b ret_irq_user_stack_is_bolixed
+ nop
+
+ /* See if we have to check the sanity of one page or two */
+1:
+ add %g1, 0x38, %g1
+ sra %fp, 29, %g2
+ add %g2, 0x1, %g2
+ andncc %g2, 0x1, %g0
+ be 1f
+ andncc %g1, 0xff8, %g0
+
+ /* %sp is in vma hole, yuck */
+ b ret_irq_user_stack_is_bolixed
+ nop
+
+1:
+ be sun4c_reti_onepage /* Only one page to check */
+ lda [%fp] ASI_PTE, %g2
+
+sun4c_reti_twopages:
+ add %fp, 0x38, %g1
+ sra %g1, 29, %g2
+ add %g2, 0x1, %g2
+ andncc %g2, 0x1, %g0
+ be 1f
+ lda [%g1] ASI_PTE, %g2
+
+ /* Second page is in vma hole */
+ b ret_irq_user_stack_is_bolixed
+ nop
+
+1:
+ srl %g2, 29, %g2
+ andcc %g2, 0x4, %g0
+ bne sun4c_reti_onepage
+ lda [%fp] ASI_PTE, %g2
+
+ /* Second page has bad perms */
+ b ret_irq_user_stack_is_bolixed
+ nop
+
+sun4c_reti_onepage:
+ srl %g2, 29, %g2
+ andcc %g2, 0x4, %g0
+ bne 1f
+ nop
+
+ /* A page had bad page permissions, losing... */
+ b ret_irq_user_stack_is_bolixed
+ nop
+
+ /* Whee, things are ok, load the window and continue. */
+1:
+ restore %g0, %g0, %g0
+
+ LOAD_WINDOW(sp)
+
+ save %g0, %g0, %g0
+ b ret_irq_userwins_ok
+ nop
+
+ .globl C_LABEL(srmmu_reti_stackchk)
+C_LABEL(srmmu_reti_stackchk):
+ bne ret_irq_user_stack_is_bolixed
+ sethi %hi(KERNBASE), %g1
+ cmp %g1, %fp
+ bleu ret_irq_user_stack_is_bolixed
+ mov AC_M_SFSR, %g1
+ lda [%g1] ASI_M_MMUREGS, %g0
+
+ lda [%g0] ASI_M_MMUREGS, %g1
+ or %g1, 0x2, %g1
+ sta %g1, [%g0] ASI_M_MMUREGS
+
+ restore %g0, %g0, %g0
+
+ LOAD_WINDOW(sp)
+
+ save %g0, %g0, %g0
+
+ andn %g1, 0x2, %g1
+ sta %g1, [%g0] ASI_M_MMUREGS
+
+ mov AC_M_SFAR, %g2
+ lda [%g2] ASI_M_MMUREGS, %g2
+
+ mov AC_M_SFSR, %g1
+ lda [%g1] ASI_M_MMUREGS, %g1
+ andcc %g1, 0x2, %g0
+ bne ret_irq_user_stack_is_bolixed
+ nop
+
+ b ret_irq_userwins_ok
+ nop
-/* $Id: rtrap.S,v 1.21 1996/02/20 07:45:11 davem Exp $
+/* $Id: rtrap.S,v 1.27 1996/04/03 02:14:41 davem Exp $
* rtrap.S: Return from Sparc trap low-level code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/asi.h>
+#include <asm/smp.h>
#include <asm/contregs.h>
#include <asm/winmacro.h>
+#include <asm/asmmacro.h>
#define t_psr l0
#define t_pc l1
wr %t_psr, 0x0, %psr
WRITE_PAUSE
- /* If not current fpu proc, disable fp-ops */
LOAD_CURRENT(twin_tmp2, twin_tmp1)
- set C_LABEL(last_task_used_math), %twin_tmp1
- ld [%twin_tmp1], %twin_tmp1
- cmp %twin_tmp2, %twin_tmp1
- be 1f
- nop
-
- set PSR_EF, %twin_tmp1
- andn %t_psr, %twin_tmp1, %t_psr
- st %t_psr, [%sp + REGWIN_SZ + PT_PSR]
-
-1:
ld [%twin_tmp2 + THREAD_W_SAVED], %twin_tmp1
orcc %g0, %twin_tmp1, %g0
be ret_trap_nobufwins
LOAD_PT_YREG(sp, g1)
LOAD_PT_GLOBALS(sp)
+ LEAVE_SYSCALL
+
wr %t_psr, 0x0, %psr
WRITE_PAUSE
jmp %t_pc
rett %t_npc
- /* HyperSparc special nop patching, if we are on a hypersparc
- * we nop the top two instructions and the first nop coming
- * up to be:
- * rd %iccr, %g0 <-- flush on-chip instruction cache
- * jmp %t_pc
- * rett %t_npc
- */
- nop
- nop
-
ret_trap_unaligned_pc:
add %sp, REGWIN_SZ, %o0
ld [%sp + REGWIN_SZ + PT_PC], %o1
1:
LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
+ LEAVE_SYSCALL
+
wr %t_psr, 0x0, %psr
WRITE_PAUSE
/* sclow.S: Low level special syscall handling.
- * Basically these are cases where we can completely
+ * Basically these are cases where we can completly
* handle the system call without saving any state
* because we know that the process will not sleep.
*
-/* $Id: setup.c,v 1.54 1996/02/25 06:49:18 davem Exp $
+/* $Id: setup.c,v 1.60 1996/04/04 16:30:28 tridge Exp $
* linux/arch/sparc/kernel/setup.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#include <linux/ptrace.h>
#include <linux/malloc.h>
#include <linux/ldt.h>
+#include <linux/smp.h>
#include <linux/user.h>
#include <linux/a.out.h>
#include <linux/tty.h>
16 /* orig-video-points */
};
-char wp_works_ok = 0;
unsigned int phys_bytes_of_ram, end_of_phys_memory;
unsigned long bios32_init(unsigned long memory_start, unsigned long memory_end)
return memory_start;
}
-/* Typing sync at the prom prompt calls the function pointed to by
+/* Typing sync at the prom promptcalls the function pointed to by
* romvec->pv_synchook which I set to the following function.
* This should sync all filesystems and return, for now it just
* prints out pretty messages and returns.
extern unsigned long trapbase;
extern void breakpoint(void);
+#if CONFIG_SUN_CONSOLE
extern void console_restore_palette(void);
+#endif
asmlinkage void sys_sync(void); /* it's really int */
/* Pretty sick eh? */
"nop\n\t"
"nop\n\t" : : "r" (&trapbase));
+#if CONFIG_SUN_CONSOLE
console_restore_palette ();
+#endif
prom_printf("PROM SYNC COMMAND...\n");
show_free_areas();
- if(current != task[0]) {
+ if(current->pid != 0) {
sti();
sys_sync();
cli();
#define BOOTME_SINGLE 0x2
#define BOOTME_KGDB 0x4
+void kernel_enter_debugger(void)
+{
+ if (boot_flags & BOOTME_KGDB) {
+ printk("KGDB: Entered\n");
+ breakpoint();
+ }
+}
+
+int obp_system_intr(void)
+{
+ if (boot_flags & BOOTME_KGDB) {
+ printk("KGDB: system interrupted\n");
+ breakpoint();
+ return 1;
+ }
+ if (boot_flags & BOOTME_DEBUG) {
+ printk("OBP: system interrupted\n");
+ prom_halt();
+ return 1;
+ }
+ return 0;
+}
+
/* This routine does no error checking, make sure your string is sane
* before calling this!
* XXX This is cheese, make generic and better.
printk("KGDB: Using serial line /dev/tty%c for "
"session\n", commands[i+8]);
boot_flags |= BOOTME_KGDB;
+#if CONFIG_SUN_SERIAL
if(commands[i+8]=='a')
rs_kgdb_hook(0);
else if(commands[i+8]=='b')
rs_kgdb_hook(1);
- else {
+ else
+#endif
+#if CONFIG_AP1000
+ if(commands[i+8]=='c')
+ printk("KGDB: ap1000+ debugging\n");
+ else
+#endif
+ {
printk("KGDB: whoops bogon tty line "
"requested, disabling session\n");
boot_flags &= (~BOOTME_KGDB);
void setup_arch(char **cmdline_p,
unsigned long * memory_start_p, unsigned long * memory_end_p)
{
- int total, i, panic_stuff[2];
+ int total, i, panic_stuff[2], packed;
+
+#if CONFIG_AP1000
+ register_console(prom_printf);
+ ((char *)(&cputypval))[4] = 'm'; /* ugly :-( */
+#endif
+#if 0
/* Always reboot on panic, but give 5 seconds to hit L1-A
* and look at debugging info if desired.
*/
panic_stuff[0] = 1;
panic_stuff[1] = 5;
panic_setup(0, panic_stuff);
+#endif
sparc_ttable = (struct tt_entry *) &start;
if(!strcmp(&cputypval,"sun4e")) { sparc_cpu_model=sun4e; }
if(!strcmp(&cputypval,"sun4u")) { sparc_cpu_model=sun4u; }
printk("ARCH: ");
+ packed = 0;
switch(sparc_cpu_model)
{
case sun4c:
printk("SUN4C\n");
sun4c_probe_vac();
+ packed = 0;
break;
case sun4m:
printk("SUN4M\n");
+ packed = 1;
break;
case sun4d:
printk("SUN4D\n");
+ packed = 1;
break;
case sun4e:
printk("SUN4E\n");
+ packed = 0;
break;
case sun4u:
printk("SUN4U\n");
load_mmu();
total = prom_probe_memory();
*memory_start_p = (((unsigned long) &end));
-#if 0
- prom_printf("Physical Memory: %d bytes (in hex %08lx)\n", (int) total,
- (unsigned long) total);
-#endif
- for(i=0; sp_banks[i].num_bytes != 0; i++) {
-#if 0
- printk("Bank %d: base 0x%x bytes %d\n", i,
- (unsigned int) sp_banks[i].base_addr,
- (int) sp_banks[i].num_bytes);
-#endif
- end_of_phys_memory = sp_banks[i].base_addr + sp_banks[i].num_bytes;
+ if(!packed) {
+ for(i=0; sp_banks[i].num_bytes != 0; i++)
+ end_of_phys_memory = sp_banks[i].base_addr +
+ sp_banks[i].num_bytes;
+ } else {
+ unsigned int sum = 0;
+
+ for(i = 0; sp_banks[i].num_bytes != 0; i++)
+ sum += sp_banks[i].num_bytes;
+
+ end_of_phys_memory = sum;
}
prom_setsync(prom_sync_me);
{
extern int serial_console; /* in console.c, of course */
+#if !CONFIG_SUN_SERIAL
+ serial_console = 0;
+#else
int idev = prom_query_input_device();
int odev = prom_query_output_device();
if (idev == PROMDEV_IKBD && odev == PROMDEV_OSCREEN) {
prom_printf("Inconsistent console\n");
prom_halt();
}
+#endif
}
#if 1
/* XXX ROOT_DEV hack for kgdb - davem XXX */
extern char *sparc_cpu_type[];
extern char *sparc_fpu_type[];
+extern char *smp_info(void);
+
int get_cpuinfo(char *buffer)
{
int cpuid=get_cpuid();
return sprintf(buffer, "cpu\t\t: %s\n"
"fpu\t\t: %s\n"
"promlib\t\t: Version %d Revision %d\n"
- "wp\t\t: %s\n"
"type\t\t: %s\n"
"Elf Support\t: %s\n" /* I can't remember when I do --ralp */
+#ifndef __SMP__
"BogoMips\t: %lu.%02lu\n"
- "%s",
+#else
+ "Cpu0Bogo\t: %lu.%02lu\n"
+ "Cpu1Bogo\t: %lu.%02lu\n"
+ "Cpu2Bogo\t: %lu.%02lu\n"
+ "Cpu3Bogo\t: %lu.%02lu\n"
+#endif
+ "%s"
+#ifdef __SMP__
+ "%s"
+#endif
+ ,
sparc_cpu_type[cpuid],
sparc_fpu_type[cpuid],
+#if CONFIG_AP1000
+ 0, 0,
+#else
romvec->pv_romvers, prom_rev,
- wp_works_ok ? "yes" : "no",
+#endif
&cputypval,
#if CONFIG_BINFMT_ELF
"yes",
#else
"no",
#endif
+#ifndef __SMP__
loops_per_sec/500000, (loops_per_sec/5000) % 100,
+#else
+ cpu_data[0].udelay_val/500000, (cpu_data[0].udelay_val/5000)%100,
+ cpu_data[1].udelay_val/500000, (cpu_data[1].udelay_val/5000)%100,
+ cpu_data[2].udelay_val/500000, (cpu_data[2].udelay_val/5000)%100,
+ cpu_data[3].udelay_val/500000, (cpu_data[3].udelay_val/5000)%100,
+#endif
mmu_info()
+#ifdef __SMP__
+ , smp_info()
+#endif
);
}
-/* $Id: signal.c,v 1.28 1995/12/29 21:47:18 davem Exp $
+/* $Id: signal.c,v 1.31 1996/04/18 01:00:41 davem Exp $
* linux/arch/sparc/kernel/signal.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
_sigpause_common(set, regs);
}
-asmlinkage void do_sigsuspend(unsigned int *sigmaskp, struct pt_regs *regs)
+asmlinkage void do_sigsuspend (struct pt_regs *regs)
{
- unsigned int set;
+ unsigned long mask;
+ unsigned long set;
- /* Manual does not state what is supposed to happen if
- * the sigmask ptr is bogus. It does state that EINTR
- * is the only valid return value and it indicates
- * successful signal delivery. Must investigate.
- */
- if(verify_area(VERIFY_READ, sigmaskp, sizeof(unsigned int))) {
- regs->pc = regs->npc;
- regs->npc += 4;
- regs->u_regs[UREG_I0] = EFAULT;
- regs->psr |= PSR_C;
- return;
+ set = regs->u_regs [UREG_I0];
+ mask = current->blocked;
+ current->blocked = set & _BLOCKABLE;
+ regs->pc = regs->npc;
+ regs->npc += 4;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(mask,regs)){
+ regs->psr |= PSR_C;
+ regs->u_regs [UREG_I0] = EINTR;
+ return;
+ }
}
- set = *sigmaskp;
- _sigpause_common(set, regs);
}
asmlinkage void do_sigreturn(struct pt_regs *regs)
(((unsigned long) sframep) >= KERNBASE) ||
((sparc_cpu_model == sun4 || sparc_cpu_model == sun4c) &&
((unsigned long) sframep < 0xe0000000 && (unsigned long) sframep >= 0x20000000))) {
+#if 0 /* fills up the console logs... */
printk("%s [%d]: User has trashed signal stack\n",
current->comm, current->pid);
printk("Sigstack ptr %p handler at pc<%08lx> for sig<%d>\n",
sframep, pc, signr);
+#endif
/* Don't change signal code and address, so that
* post mortem debuggers can have a look.
*/
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <asm/head.h>
+#include <asm/ptrace.h>
+
#include <linux/kernel.h>
+#include <linux/tasks.h>
#include <linux/smp.h>
-int smp_num_cpus;
-int smp_threads_ready;
-volatile unsigned long smp_msg_data;
-volatile int smp_src_cpu;
-volatile int smp_msg_id;
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/oplib.h>
-static volatile int smp_commenced = 0;
+extern ctxd_t *srmmu_ctx_table_phys;
+extern int linux_num_cpus;
+
+struct tlog {
+ unsigned long pc;
+ unsigned long psr;
+};
+
+struct tlog trap_log[4][256];
+unsigned long trap_log_ent[4] = { 0, 0, 0, 0, };
+
+extern void calibrate_delay(void);
-/* The only guaranteed locking primitive available on all Sparc
- * processors is 'ldstub [%addr_reg + imm], %dest_reg' which atomically
+volatile unsigned long stuck_pc = 0;
+volatile int smp_processors_ready = 0;
+
+int smp_found_config = 0;
+unsigned long cpu_present_map = 0;
+int smp_num_cpus = 1;
+int smp_threads_ready=0;
+unsigned char mid_xlate[NR_CPUS] = { 0, 0, 0, 0, };
+volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
+volatile unsigned long smp_invalidate_needed[NR_CPUS] = { 0, };
+volatile unsigned long smp_spinning[NR_CPUS] = { 0, };
+struct cpuinfo_sparc cpu_data[NR_CPUS];
+unsigned char boot_cpu_id = 0;
+static int smp_activated = 0;
+static volatile unsigned char smp_cpu_in_msg[NR_CPUS];
+static volatile unsigned long smp_msg_data;
+static volatile int smp_src_cpu;
+static volatile int smp_msg_id;
+volatile int cpu_number_map[NR_CPUS];
+volatile int cpu_logical_map[NR_CPUS];
+
+/* The only guarenteed locking primitive available on all Sparc
+ * processors is 'ldstub [%reg + immediate], %dest_reg' which atomicly
* places the current byte at the effective address into dest_reg and
* places 0xff there afterwards. Pretty lame locking primitive
* compared to the Alpha and the intel no? Most Sparcs have 'swap'
* instruction which is much better...
*/
-klock_t kernel_lock;
+klock_t kernel_flag = KLOCK_CLEAR;
+volatile unsigned char active_kernel_processor = NO_PROC_ID;
+volatile unsigned long kernel_counter = 0;
+volatile unsigned long syscall_count = 0;
+volatile unsigned long ipi_count;
+#ifdef __SMP_PROF__
+volatile unsigned long smp_spins[NR_CPUS]={0};
+volatile unsigned long smp_spins_syscall[NR_CPUS]={0};
+volatile unsigned long smp_spins_syscall_cur[NR_CPUS]={0};
+volatile unsigned long smp_spins_sys_idle[NR_CPUS]={0};
+volatile unsigned long smp_idle_count[1+NR_CPUS]={0,};
+#endif
+#if defined (__SMP_PROF__)
+volatile unsigned long smp_idle_map=0;
+#endif
+
+volatile unsigned long smp_proc_in_lock[NR_CPUS] = {0,};
+volatile int smp_process_available=0;
+
+/*#define SMP_DEBUG*/
+
+#ifdef SMP_DEBUG
+#define SMP_PRINTK(x) printk x
+#else
+#define SMP_PRINTK(x)
+#endif
+
+static volatile int smp_commenced = 0;
+
+static char smp_buf[512];
+
+char *smp_info(void)
+{
+ sprintf(smp_buf,
+"\n CPU0\t\tCPU1\t\tCPU2\t\tCPU3\n"
+"State: %s\t\t%s\t\t%s\t\t%s\n"
+"Lock: %08lx\t\t%08lx\t%08lx\t\t%08lx\n"
+"\n"
+"klock: %x\n",
+ (cpu_present_map & 1) ? ((active_kernel_processor == 0) ? "akp" : "online") : "offline",
+ (cpu_present_map & 2) ? ((active_kernel_processor == 1) ? "akp" : "online") : "offline",
+ (cpu_present_map & 4) ? ((active_kernel_processor == 2) ? "akp" : "online") : "offline",
+ (cpu_present_map & 8) ? ((active_kernel_processor == 3) ? "akp" : "online") : "offline",
+ smp_proc_in_lock[0], smp_proc_in_lock[1], smp_proc_in_lock[2],
+ smp_proc_in_lock[3],
+ kernel_flag);
+ return smp_buf;
+}
+
+static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
+{
+ __asm__ __volatile__("swap [%1], %0\n\t" :
+ "=&r" (val), "=&r" (ptr) :
+ "0" (val), "1" (ptr));
+ return val;
+}
+
+/*
+ * The bootstrap kernel entry code has set these up. Save them for
+ * a given CPU
+ */
+
+void smp_store_cpu_info(int id)
+{
+ cpu_data[id].udelay_val = loops_per_sec; /* this is it on sparc. */
+}
+
+/*
+ * Architecture specific routine called by the kernel just before init is
+ * fired off. This allows the BP to have everything in order [we hope].
+ * At the end of this all the AP's will hit the system scheduling and off
+ * we go. Each AP will load the system gdt's and jump through the kernel
+ * init into idle(). At this point the scheduler will one day take over
+ * and give them jobs to do. smp_callin is a standard routine
+ * we use to track CPU's as they power up.
+ */
void smp_commence(void)
{
/*
* Lets the callin's below out of their loop.
*/
+ local_flush_cache_all();
+ local_flush_tlb_all();
smp_commenced = 1;
+ local_flush_cache_all();
+ local_flush_tlb_all();
}
void smp_callin(void)
{
- int cpuid = smp_get_processor_id();
-
- /* XXX Clear the software interrupts _HERE_. */
+ int cpuid = smp_processor_id();
sti();
+ local_flush_cache_all();
+ local_flush_tlb_all();
calibrate_delay();
smp_store_cpu_info(cpuid);
- set_bit(cpuid, (unsigned long *)&cpu_callin_map[0]);
- local_invalidate_all();
- while(!smp_commenced);
- if(cpu_number_map[cpuid] == -1)
- while(1);
- local_invalidate_all();
+ local_flush_cache_all();
+ local_flush_tlb_all();
+ cli();
+
+ /* Allow master to continue. */
+ swap((unsigned long *)&cpu_callin_map[cpuid], 1);
+ local_flush_cache_all();
+ local_flush_tlb_all();
+ while(!smp_commenced)
+ barrier();
+ local_flush_cache_all();
+ local_flush_tlb_all();
+
+ /* Fix idle thread fields. */
+ current->mm->mmap->vm_page_prot = PAGE_SHARED;
+ current->mm->mmap->vm_start = KERNBASE;
+ current->mm->mmap->vm_end = init_task.mm->mmap->vm_end;
+
+ local_flush_cache_all();
+ local_flush_tlb_all();
+
+ sti();
+}
+
+void cpu_panic(void)
+{
+ printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
+ panic("SMP bolixed\n");
}
+/*
+ * Cycle through the processors asking the PROM to start each one.
+ */
+
+extern struct prom_cpuinfo linux_cpus[NCPUS];
+static struct linux_prom_registers penguin_ctable;
+
void smp_boot_cpus(void)
{
+ int cpucount = 0;
+ int i = 0;
+
+ printk("Entering SparclinuxMultiPenguin(SMP) Mode...\n");
+
+ penguin_ctable.which_io = 0;
+ penguin_ctable.phys_addr = (char *) srmmu_ctx_table_phys;
+ penguin_ctable.reg_size = 0;
+
+ sti();
+ cpu_present_map |= (1 << smp_processor_id());
+ cpu_present_map = 0;
+ for(i=0; i < linux_num_cpus; i++)
+ cpu_present_map |= (1<<i);
+ for(i=0; i < NR_CPUS; i++)
+ cpu_number_map[i] = -1;
+ for(i=0; i < NR_CPUS; i++)
+ cpu_logical_map[i] = -1;
+ mid_xlate[boot_cpu_id] = (linux_cpus[boot_cpu_id].mid & ~8);
+ cpu_number_map[boot_cpu_id] = 0;
+ cpu_logical_map[0] = boot_cpu_id;
+ active_kernel_processor = boot_cpu_id;
+ smp_store_cpu_info(boot_cpu_id);
+ set_irq_udt(0);
+ local_flush_cache_all();
+ if(linux_num_cpus == 1)
+ return; /* Not an MP box. */
+ for(i = 0; i < NR_CPUS; i++) {
+ if(i == boot_cpu_id)
+ continue;
+
+ if(cpu_present_map & (1 << i)) {
+ extern unsigned long sparc_cpu_startup;
+ unsigned long *entry = &sparc_cpu_startup;
+ int timeout;
+
+ /* See trampoline.S for details... */
+ entry += ((i-1) * 6);
+
+ /* whirrr, whirrr, whirrrrrrrrr... */
+ printk("Starting CPU %d at %p\n", i, entry);
+ mid_xlate[i] = (linux_cpus[i].mid & ~8);
+ current_set[i] = &init_task;
+ local_flush_cache_all();
+ prom_startcpu(linux_cpus[i].prom_node,
+ &penguin_ctable, 0, (char *)entry);
+
+ /* wheee... it's going... */
+ for(timeout = 0; timeout < 5000000; timeout++) {
+ if(cpu_callin_map[i])
+ break;
+ udelay(100);
+ }
+ if(cpu_callin_map[i]) {
+ /* Another "Red Snapper". */
+ cpucount++;
+ cpu_number_map[i] = i;
+ cpu_logical_map[i] = i;
+ } else {
+ printk("Penguin %d is stuck in the bottle.\n", i);
+ }
+ current_set[i] = 0;
+ }
+ if(!(cpu_callin_map[i])) {
+ cpu_present_map &= ~(1 << i);
+ cpu_number_map[i] = -1;
+ }
+ }
+ local_flush_cache_all();
+ if(cpucount == 0) {
+ printk("Error: only one Penguin found.\n");
+ cpu_present_map = (1 << smp_processor_id());
+ } else {
+ unsigned long bogosum = 0;
+ for(i = 0; i < NR_CPUS; i++) {
+ if(cpu_present_map & (1 << i))
+ bogosum += cpu_data[i].udelay_val;
+ }
+ printk("Total of %d Penguins activated (%lu.%02lu PenguinMIPS).\n",
+ cpucount + 1,
+ (bogosum + 2500)/500000,
+ ((bogosum + 2500)/5000)%100);
+ smp_activated = 1;
+ smp_num_cpus = cpucount + 1;
+ }
+ smp_processors_ready = 1;
+}
+
+static inline void send_ipi(unsigned long target_map, int irq)
+{
+ int i;
+
+ for(i = 0; i < 4; i++) {
+ if((1<<i) & target_map)
+ set_cpu_int(mid_xlate[i], irq);
+ }
}
+/*
+ * A non wait message cannot pass data or cpu source info. This current
+ * setup is only safe because the kernel lock owner is the only person
+ * who can send a message.
+ *
+ * Wrapping this whole block in a spinlock is not the safe answer either.
+ * A processor may get stuck with irq's off waiting to send a message and
+ * thus not replying to the person spinning for a reply....
+ *
+ * In the end invalidate ought to be the NMI and a very very short
+ * function (to avoid the old IDE disk problems), and other messages sent
+ * with IRQ's enabled in a civilised fashion. That will also boost
+ * performance.
+ */
+
+static volatile int message_cpu = NO_PROC_ID;
+
void smp_message_pass(int target, int msg, unsigned long data, int wait)
{
- struct sparc_ipimsg *msg = (struct sparc_ipimsg *) data;
unsigned long target_map;
int p = smp_processor_id();
- static volatile int message_cpu = NO_PROC_ID;
+ int irq = 15;
+ int i;
- if(!smp_activated || !smp_commenced)
+ /* Before processors have been placed into their initial
+ * patterns do not send messages.
+ */
+ if(!smp_processors_ready)
return;
+ /* Skip the reschedule if we are waiting to clear a
+ * message at this time. The reschedule cannot wait
+ * but is not critical.
+ */
if(msg == MSG_RESCHEDULE) {
+ irq = 13;
if(smp_cpu_in_msg[p])
return;
}
- if(message_cpu != NO_PROC_ID && msg != MSG_STOP_CPU) {
- panic("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
+ /* Sanity check we don't re-enter this across CPU's. Only the kernel
+ * lock holder may send messages. For a STOP_CPU we are bringing the
+ * entire box to the fastest halt we can.. A reschedule carries
+ * no data and can occur during a flush.. guess what panic
+ * I got to notice this bug...
+ */
+ if(message_cpu != NO_PROC_ID && msg != MSG_STOP_CPU && msg != MSG_RESCHEDULE) {
+ printk("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
smp_processor_id(),msg,message_cpu, smp_msg_id);
+
+ /* I don't know how to gracefully die so that debugging
+ * this doesn't completely eat up my filesystems...
+ * let's try this...
+ */
+ smp_cpu_in_msg[p] = 0; /* In case we come back here... */
+ intr_count = 0; /* and so panic don't barf... */
+ smp_swap(&message_cpu, NO_PROC_ID); /* push the store buffer */
+ sti();
+ printk("spinning, please L1-A, type ctrace and send output to davem\n");
+ while(1)
+ barrier();
}
- message_cpu = smp_processor_id();
+ smp_swap(&message_cpu, smp_processor_id()); /* store buffers... */
+
+ /* We are busy. */
smp_cpu_in_msg[p]++;
+
+ /* Reschedule is currently special. */
if(msg != MSG_RESCHEDULE) {
smp_src_cpu = p;
smp_msg_id = msg;
smp_msg_data = data;
}
+#if 0
+ printk("SMP message pass from cpu %d to cpu %d msg %d\n", p, target, msg);
+#endif
+
+ /* Set the target requirement. */
+ for(i = 0; i < smp_num_cpus; i++)
+ swap((unsigned long *) &cpu_callin_map[i], 0);
if(target == MSG_ALL_BUT_SELF) {
- target_map = cpu_present_map;
- cpu_callin_map[0] = (1<<smp_src_cpu);
+ target_map = (cpu_present_map & ~(1<<p));
+ swap((unsigned long *) &cpu_callin_map[p], 1);
} else if(target == MSG_ALL) {
target_map = cpu_present_map;
- cpu_callin_map[0] = 0;
} else {
+ for(i = 0; i < smp_num_cpus; i++)
+ if(i != target)
+ swap((unsigned long *) &cpu_callin_map[i], 1);
target_map = (1<<target);
- cpu_callin_map[0] = 0;
}
- /* XXX Send lvl15 soft interrupt to cpus here XXX */
+ /* Fire it off. */
+ send_ipi(target_map, irq);
switch(wait) {
case 1:
- while(cpu_callin_map[0] != target_map);
+ for(i = 0; i < smp_num_cpus; i++)
+ while(!cpu_callin_map[i])
+ barrier();
break;
case 2:
- while(smp_invalidate_needed);
+ for(i = 0; i < smp_num_cpus; i++)
+ while(smp_invalidate_needed[i])
+ barrier();
break;
+ case 3:
+ /* For cross calls we hold message_cpu and smp_cpu_in_msg[]
+ * until all processors disperse. Else we have _big_ problems.
+ */
+ return;
}
smp_cpu_in_msg[p]--;
- message_cpu = NO_PROC_ID;
+ smp_swap(&message_cpu, NO_PROC_ID);
}
-inline void smp_invalidate(int type, unsigned long a, unsigned long b, unsigned long c)
+struct smp_funcall {
+ smpfunc_t func;
+ unsigned long arg1;
+ unsigned long arg2;
+ unsigned long arg3;
+ unsigned long arg4;
+ unsigned long arg5;
+ unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
+ unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
+} ccall_info;
+
+/* Returns failure code if for example any of the cpu's failed to respond
+ * within a certain timeout period.
+ */
+
+#define CCALL_TIMEOUT 5000000 /* enough for initial testing */
+
+/* #define DEBUG_CCALL */
+
+/* Some nice day when we really thread the kernel I'd like to synchronize
+ * this with either a broadcast conditional variable, a resource adaptive
+ * generic mutex, or a convoy semaphore scheme of some sort. No reason
+ * we can't let multiple processors in here if the appropriate locking
+ * is done. Note that such a scheme assumes we will have a
+ * prioritized ipi scheme using different software level irq's.
+ */
+void smp_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
{
+ unsigned long me = smp_processor_id();
unsigned long flags;
+ int i, timeout;
- smp_invalidate_needed = cpu_present_map & ~(1<<smp_processor_id());
- save_flags(flags); cli();
- smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0L, 2);
- local_invalidate();
+#ifdef DEBUG_CCALL
+ printk("xc%d<", me);
+#endif
+ if(smp_processors_ready) {
+ save_flags(flags); cli();
+ if(me != active_kernel_processor)
+ goto cross_call_not_master;
+
+ /* Init function glue. */
+ ccall_info.func = func;
+ ccall_info.arg1 = arg1;
+ ccall_info.arg2 = arg2;
+ ccall_info.arg3 = arg3;
+ ccall_info.arg4 = arg4;
+ ccall_info.arg5 = arg5;
+
+ /* Init receive/complete mapping. */
+ for(i = 0; i < smp_num_cpus; i++) {
+ ccall_info.processors_in[i] = 0;
+ ccall_info.processors_out[i] = 0;
+ }
+ ccall_info.processors_in[me] = 1;
+ ccall_info.processors_out[me] = 1;
+
+ /* Fire it off. */
+ smp_message_pass(MSG_ALL_BUT_SELF, MSG_CROSS_CALL, 0, 3);
+
+ /* For debugging purposes right now we can timeout
+ * on both callin and callexit.
+ */
+ timeout = CCALL_TIMEOUT;
+ for(i = 0; i < smp_num_cpus; i++) {
+ while(!ccall_info.processors_in[i] && timeout-- > 0)
+ barrier();
+ if(!ccall_info.processors_in[i])
+ goto procs_time_out;
+ }
+#ifdef DEBUG_CCALL
+ printk("I");
+#endif
+
+ /* Run local copy. */
+ func(arg1, arg2, arg3, arg4, arg5);
+
+ /* Spin on proc dispersion. */
+ timeout = CCALL_TIMEOUT;
+ for(i = 0; i < smp_num_cpus; i++) {
+ while(!ccall_info.processors_out[i] && timeout-- > 0)
+ barrier();
+ if(!ccall_info.processors_out[i])
+ goto procs_time_out;
+ }
+#ifdef DEBUG_CCALL
+ printk("O>");
+#endif
+ /* See wait case 3 in smp_message_pass()... */
+ smp_cpu_in_msg[me]--;
+ smp_swap(&message_cpu, NO_PROC_ID); /* store buffers... */
+ restore_flags(flags);
+ return; /* made it... */
+
+procs_time_out:
+ printk("smp: Wheee, penguin drops off the bus\n");
+ smp_cpu_in_msg[me]--;
+ message_cpu = NO_PROC_ID;
+ restore_flags(flags);
+ return; /* why me... why me... */
+ }
+
+ /* Just need to run local copy. */
+ func(arg1, arg2, arg3, arg4, arg5);
+ return;
+
+cross_call_not_master:
+ printk("Cross call initiated by non master cpu\n");
+ printk("akp=%x me=%08lx\n", active_kernel_processor, me);
restore_flags(flags);
+ panic("penguin cross call");
}
-void smp_invalidate_all(void)
-{
- smp_invalidate(0, 0, 0, 0);
+void smp_flush_cache_all(void)
+{ xc0((smpfunc_t) local_flush_cache_all); }
+
+void smp_flush_tlb_all(void)
+{ xc0((smpfunc_t) local_flush_tlb_all); }
+
+void smp_flush_cache_mm(struct mm_struct *mm)
+{
+ if(mm->context != NO_CONTEXT)
+ xc1((smpfunc_t) local_flush_cache_mm, (unsigned long) mm);
}
-void smp_invalidate_mm(struct mm_struct *mm)
+void smp_flush_tlb_mm(struct mm_struct *mm)
{
- smp_invalidate(1, (unsigned long) mm, 0, 0);
+ if(mm->context != NO_CONTEXT)
+ xc1((smpfunc_t) local_flush_tlb_mm, (unsigned long) mm);
}
-void smp_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+void smp_flush_cache_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
{
- smp_invalidate(2, (unsigned long) mm, start, end);
+ if(mm->context != NO_CONTEXT)
+ xc3((smpfunc_t) local_flush_cache_range, (unsigned long) mm,
+ start, end);
}
-void smp_invalidate_page(struct vm_area_struct *vmap, unsigned long page)
+void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
{
- smp_invalidate(3, (unsigned long)vmap->vm_mm, page, 0);
+ if(mm->context != NO_CONTEXT)
+ xc3((smpfunc_t) local_flush_tlb_range, (unsigned long) mm,
+ start, end);
}
-void smp_reschedule_irq(int cpl, struct pt_regs *regs)
+void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{ xc2((smpfunc_t) local_flush_cache_page, (unsigned long) vma, page); }
+
+void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{ xc2((smpfunc_t) local_flush_tlb_page, (unsigned long) vma, page); }
+
+void smp_flush_page_to_ram(unsigned long page)
+{ xc1((smpfunc_t) local_flush_page_to_ram, page); }
+
+/* Reschedule call back. */
+void smp_reschedule_irq(void)
{
if(smp_processor_id() != active_kernel_processor)
panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
smp_processor_id(), active_kernel_processor);
- if(user_mode(regs)) {
- current->utime++;
- if (current->pid) {
- if (current->priority < 15)
- kstat.cpu_nice++;
- else
- kstat.cpu_user++;
- }
- /* Update ITIMER_VIRT for current task if not in a system call */
- if (current->it_virt_value && !(--current->it_virt_value)) {
- current->it_virt_value = current->it_virt_incr;
- send_sig(SIGVTALRM,current,1);
- }
- } else {
- current->stime++;
- if(current->pid)
- kstat.cpu_system++;
-#ifdef CONFIG_PROFILE
- if (prof_buffer && current->pid) {
- extern int _stext;
- unsigned long eip = regs->eip - (unsigned long) &_stext;
- eip >>= CONFIG_PROFILE_SHIFT;
- if (eip < prof_len)
- prof_buffer[eip]++;
- }
-#endif
- }
- /*
- * check the cpu time limit on the process.
- */
- if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
- (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
- send_sig(SIGKILL, current, 1);
- if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
- (((current->stime + current->utime) % HZ) == 0)) {
- unsigned long psecs = (current->stime + current->utime) / HZ;
- /* send when equal */
- if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
- send_sig(SIGXCPU, current, 1);
- /* and every five seconds thereafter. */
- else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
- ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
- send_sig(SIGXCPU, current, 1);
- }
+ need_resched=1;
+}
- /* Update ITIMER_PROF for the current task */
- if (current->it_prof_value && !(--current->it_prof_value)) {
- current->it_prof_value = current->it_prof_incr;
- send_sig(SIGPROF,current,1);
+/* XXX FIXME: this still doesn't work right... XXX */
+
+/* #define DEBUG_CAPTURE */
+
+static volatile unsigned long release = 1;
+static volatile int capture_level = 0;
+
+void smp_capture(void)
+{
+ unsigned long flags;
+
+ if(!smp_activated || !smp_commenced)
+ return;
+#ifdef DEBUG_CAPTURE
+ printk("C<%d>", smp_processor_id());
+#endif
+ save_flags(flags); cli();
+ if(!capture_level) {
+ release = 0;
+ smp_message_pass(MSG_ALL_BUT_SELF, MSG_CAPTURE, 0, 1);
}
+ capture_level++;
+ restore_flags(flags);
+}
+
+void smp_release(void)
+{
+ unsigned long flags;
+ int i;
- if(0 > --current->counter || current->pid == 0) {
- current->counter = 0;
- need_resched = 1;
+ if(!smp_activated || !smp_commenced)
+ return;
+#ifdef DEBUG_CAPTURE
+ printk("R<%d>", smp_processor_id());
+#endif
+ save_flags(flags); cli();
+ if(!(capture_level - 1)) {
+ release = 1;
+ for(i = 0; i < smp_num_cpus; i++)
+ while(cpu_callin_map[i])
+ barrier();
}
+ capture_level -= 1;
+ restore_flags(flags);
}
-void smp_message_irq(int cpl, struct pt_regs *regs)
+/* Park a processor, we must watch for more IPI's to invalidate
+ * our cache's and TLB's. And also note we can only wait for
+ * "lock-less" IPI's and process those, as a result of such IPI's
+ * being non-maskable traps being on is enough to receive them.
+ */
+
+/* Message call back. */
+void smp_message_irq(void)
{
int i=smp_processor_id();
-/* static int n=0;
- if(n++<NR_CPUS)
- printk("IPI %d->%d(%d,%ld)\n",smp_src_cpu,i,smp_msg_id,smp_msg_data);*/
- switch(smp_msg_id)
- {
- case 0: /* IRQ 13 testing - boring */
- return;
-
- /*
- * A TLB flush is needed.
- */
-
- case MSG_INVALIDATE_TLB:
- if(clear_bit(i,(unsigned long *)&smp_invalidate_needed))
- local_invalidate();
- set_bit(i, (unsigned long *)&cpu_callin_map[0]);
- cpu_callin_map[0]|=1<<smp_processor_id();
- break;
-
+
+ switch(smp_msg_id) {
+ case MSG_CROSS_CALL:
+ /* Do it to it. */
+ ccall_info.processors_in[i] = 1;
+ ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
+ ccall_info.arg4, ccall_info.arg5);
+ ccall_info.processors_out[i] = 1;
+ break;
+
/*
* Halt other CPU's for a panic or reboot
*/
- case MSG_STOP_CPU:
- while(1)
- {
- if(cpu_data[smp_processor_id()].hlt_works_ok)
- __asm__("hlt");
- }
- default:
- printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
- smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
- break;
+ case MSG_STOP_CPU:
+ sti();
+ while(1)
+ barrier();
+
+ default:
+ printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
+ smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
+ break;
}
- /*
- * Clear the IPI, so we can receive future IPI's
- */
-
- apic_read(APIC_SPIV); /* Dummy read */
- apic_write(APIC_EOI, 0); /* Docs say use 0 for future compatibility */
}
--- /dev/null
+/* solaris.c: Solaris binary emulation, whee...
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+
+#include <asm/errno.h>
+#include <asm/solerrno.h>
+
+unsigned long solaris_xlatb_rorl[] = {
+ 0, SOL_EPERM, SOL_ENOENT, SOL_ESRCH, SOL_EINTR, SOL_EIO,
+ SOL_ENXIO, SOL_E2BIG, SOL_ENOEXEC, SOL_EBADF, SOL_ECHILD,
+ SOL_EAGAIN, SOL_ENOMEM, SOL_EACCES, SOL_EFAULT,
+ SOL_ENOTBLK, SOL_EBUSY, SOL_EEXIST, SOL_EXDEV, SOL_ENODEV,
+ SOL_ENOTDIR, SOL_EISDIR, SOL_EINVAL, SOL_ENFILE, SOL_EMFILE,
+ SOL_ENOTTY, SOL_ETXTBSY, SOL_EFBIG, SOL_ENOSPC, SOL_ESPIPE,
+ SOL_EROFS, SOL_EMLINK, SOL_EPIPE, SOL_EDOM, SOL_ERANGE,
+ SOL_EWOULDBLOCK, SOL_EINPROGRESS, SOL_EALREADY, SOL_ENOTSOCK,
+ SOL_EDESTADDRREQ, SOL_EMSGSIZE, SOL_EPROTOTYPE, SOL_ENOPROTOOPT,
+ SOL_EPROTONOSUPPORT, SOL_ESOCKTNOSUPPORT, SOL_EOPNOTSUPP,
+ SOL_EPFNOSUPPORT, SOL_EAFNOSUPPORT, SOL_EADDRINUSE,
+ SOL_EADDRNOTAVAIL, SOL_ENETDOWN, SOL_ENETUNREACH, SOL_ENETRESET,
+ SOL_ECONNABORTED, SOL_ECONNRESET, SOL_ENOBUFS, SOL_EISCONN,
+ SOL_ENOTCONN, SOL_ESHUTDOWN, SOL_ETOOMANYREFS, SOL_ETIMEDOUT,
+ SOL_ECONNREFUSED, SOL_ELOOP, SOL_ENAMETOOLONG, SOL_EHOSTDOWN,
+ SOL_EHOSTUNREACH, SOL_ENOTEMPTY, SOL_EUSERS, SOL_EUSERS,
+ SOL_EDQUOT, SOL_ESTALE, SOL_EREMOTE, SOL_ENOSTR, SOL_ETIME,
+ SOL_ENOSR, SOL_ENOMSG, SOL_EBADMSG, SOL_EIDRM, SOL_EDEADLK,
+ SOL_ENOLCK, SOL_ENONET, SOL_EINVAL, SOL_ENOLINK, SOL_EADV,
+ SOL_ESRMNT, SOL_ECOMM, SOL_EPROTO, SOL_EMULTIHOP, SOL_EINVAL,
+ SOL_EREMCHG, SOL_ENOSYS
+};
+
+extern asmlinkage int sys_open(const char *,int,int);
+
+asmlinkage int solaris_open(const char *filename, int flags, int mode)
+{
+ int newflags = flags & 0xf;
+
+ flags &= ~0xf;
+ if(flags & 0x8050)
+ newflags |= FASYNC;
+ if(flags & 0x80)
+ newflags |= O_NONBLOCK;
+ if(flags & 0x100)
+ newflags |= O_CREAT;
+ if(flags & 0x200)
+ newflags |= O_TRUNC;
+ if(flags & 0x400)
+ newflags |= O_EXCL;
+ if(flags & 0x800)
+ newflags |= O_NOCTTY;
+ return sys_open(filename, newflags, mode);
+}
+
+
-/* $Id: sparc-stub.c,v 1.10 1996/02/15 09:12:09 davem Exp $
+/* $Id: sparc-stub.c,v 1.15 1996/04/04 12:41:35 davem Exp $
* sparc-stub.c: KGDB support for the Linux kernel.
*
* Modifications to run under Linux
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*
- * This file originally came from the gdb sources, and the
+ * This file origionally came from the gdb sources, and the
* copyright notices have been retained below.
*/
#include <asm/system.h>
#include <asm/vac-ops.h>
#include <asm/kgdb.h>
-
+#include <asm/pgtable.h>
/*
*
* external low-level support routines
return entry;
}
+static void flush_cache_all_nop(void)
+{
+}
+
/* Place where we save old trap entries for restoration */
struct tt_entry kgdb_savettable[256];
typedef void (*trapfunc_t)(void);
set_debug_traps(void)
{
struct hard_trap_info *ht;
+ unsigned long flags;
unsigned char c;
+ save_flags(flags); cli();
+ flush_cache_all = flush_cache_all_nop;
+
/* Initialize our copy of the Linux Sparc trap table */
eh_init();
putDebugChar('+'); /* ack it */
initialized = 1; /* connect! */
+ restore_flags(flags);
}
/* Convert the SPARC hardware trap type code to a unix signal number. */
}
/*
- * This function does all command processing for interfacing to gdb. It
+ * This function does all command procesing for interfacing to gdb. It
* returns 1 if you should skip the instruction at the trap address, 0
* otherwise.
*/
* breakpoint, and the icache probably has no way of knowing that a data ref to
* some location may have changed something that is in the instruction cache.
*/
- /* Only instruction cache flushing on the sun4c/sun4
- * for now. We assume control flow during the kgdb
- * transaction has not left the context in which it
- * was entered.
- */
- if((sparc_cpu_model==sun4 || sparc_cpu_model==sun4c) &&
- (sun4c_vacinfo.num_bytes && sun4c_vacinfo.on))
- sun4c_flush_context();
- /* XXX SRMMU and on-chip v8 instruction cache
- * XXX flushing goes here!
- */
-
+ flush_cache_all();
return;
/* kill the program */
--- /dev/null
+/* sun4c_irq.c
+ * arch/sparc/kernel/sun4c_irq.c:
+ *
+ * djhr: Hacked out of irq.c into a CPU dependant version.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@ipmce.su)
+ * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/malloc.h>
+
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/psr.h>
+#include <asm/vaddrs.h>
+#include <asm/timer.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/traps.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+/* Pointer to the interrupt enable byte
+ *
+ * Dave Redman (djhr@tadpole.co.uk)
+ * What you may not be aware of is that entry.S requires this variable.
+ *
+ * --- linux_trap_nmi_sun4c --
+ *
+ * so don't go making it static, like I tried. sigh.
+ */
+unsigned char *interrupt_enable = 0;
+
+static void sun4c_disable_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+ unsigned char current_mask, new_mask;
+
+ save_flags(flags); cli();
+ irq_nr &= NR_IRQS;
+ current_mask = *interrupt_enable;
+ switch(irq_nr) {
+ case 1:
+ new_mask = ((current_mask) & (~(SUN4C_INT_E1)));
+ break;
+ case 8:
+ new_mask = ((current_mask) & (~(SUN4C_INT_E8)));
+ break;
+ case 10:
+ new_mask = ((current_mask) & (~(SUN4C_INT_E10)));
+ break;
+ case 14:
+ new_mask = ((current_mask) & (~(SUN4C_INT_E14)));
+ break;
+ default:
+ restore_flags(flags);
+ return;
+ }
+ *interrupt_enable = new_mask;
+ restore_flags(flags);
+}
+
+static void sun4c_enable_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+ unsigned char current_mask, new_mask;
+
+ save_flags(flags); cli();
+ irq_nr &= NR_IRQS;
+ current_mask = *interrupt_enable;
+ switch(irq_nr) {
+ case 1:
+ new_mask = ((current_mask) | SUN4C_INT_E1);
+ break;
+ case 8:
+ new_mask = ((current_mask) | SUN4C_INT_E8);
+ break;
+ case 10:
+ new_mask = ((current_mask) | SUN4C_INT_E10);
+ break;
+ case 14:
+ new_mask = ((current_mask) | SUN4C_INT_E14);
+ break;
+ default:
+ restore_flags(flags);
+ return;
+ }
+ *interrupt_enable = new_mask;
+ restore_flags(flags);
+}
+
+#define TIMER_IRQ 10 /* Also at level 14, but we ignore that one. */
+#define PROFILE_IRQ 14 /* Level14 ticker.. used by OBP for polling */
+
+volatile struct sun4c_timer_info *sun4c_timers;
+
+static void sun4c_clear_clock_irq(void)
+{
+ volatile unsigned int clear_intr;
+ clear_intr = sun4c_timers->timer_limit10;
+}
+
+static void sun4c_clear_profile_irq(void )
+{
+ /* Errm.. not sure how to do this.. */
+}
+
+static void sun4c_load_profile_irq(unsigned int limit)
+{
+ /* Errm.. not sure how to do this.. */
+}
+
+static void sun4c_init_timers(void (*counter_fn)(int, void *, struct pt_regs *))
+{
+ int irq;
+
+ /* Map the Timer chip, this is implemented in hardware inside
+ * the cache chip on the sun4c.
+ */
+ sun4c_timers = sparc_alloc_io ((void *) SUN4C_TIMER_PHYSADDR, 0,
+ sizeof(struct sun4c_timer_info),
+ "timer", 0x0, 0x0);
+
+ /* Have the level 10 timer tick at 100HZ. We don't touch the
+ * level 14 timer limit since we are letting the prom handle
+ * them until we have a real console driver so L1-A works.
+ */
+ sun4c_timers->timer_limit10 = (((1000000/HZ) + 1) << 10);
+
+ irq = request_irq(TIMER_IRQ,
+ counter_fn,
+ (SA_INTERRUPT | SA_STATIC_ALLOC),
+ "timer", NULL);
+ if (irq) {
+ prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
+ prom_halt();
+ }
+
+ claim_ticker14(NULL, PROFILE_IRQ, 0);
+}
+
+static void sun4c_nop(void)
+{
+}
+
+void sun4c_init_IRQ(void)
+{
+ struct linux_prom_registers int_regs[2];
+ int ie_node;
+
+ ie_node = prom_searchsiblings (prom_getchild(prom_root_node),
+ "interrupt-enable");
+ if(ie_node == 0)
+ panic("Cannot find /interrupt-enable node");
+
+ /* Depending on the "address" property is bad news... */
+ prom_getproperty(ie_node, "reg", (char *) int_regs, sizeof(int_regs));
+ interrupt_enable = (char *) sparc_alloc_io(int_regs[0].phys_addr, 0,
+ int_regs[0].reg_size,
+ "sun4c_interrupts",
+ int_regs[0].which_io, 0x0);
+ enable_irq = sun4c_enable_irq;
+ disable_irq = sun4c_disable_irq;
+ clear_clock_irq = sun4c_clear_clock_irq;
+ clear_profile_irq = sun4c_clear_profile_irq;
+ load_profile_irq = sun4c_load_profile_irq;
+ init_timers = sun4c_init_timers;
+#ifdef __SMP__
+ set_cpu_int = sun4c_nop;
+ clear_cpu_int = sun4c_nop;
+ set_irq_udt = sun4c_nop;
+#endif
+ *interrupt_enable = (SUN4C_INT_ENABLE);
+ sti();
+}
--- /dev/null
+/* sun4m_irq.c
+ * arch/sparc/kernel/sun4m_irq.c:
+ *
+ * djhr: Hacked out of irq.c into a CPU dependant version.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@ipmce.su)
+ * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/malloc.h>
+
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/psr.h>
+#include <asm/vaddrs.h>
+#include <asm/timer.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/traps.h>
+#include <asm/smp.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+static unsigned long dummy;
+
+extern int linux_num_cpus;
+struct sun4m_intregs *sun4m_interrupts;
+unsigned long *irq_rcvreg = &dummy;
+
+/* These tables only apply for interrupts greater than 15..
+ *
+ * any intr value below 0x10 is considered to be a soft-int
+ * this may be useful or it may not.. but thats how I've done it.
+ * and it won't clash with what OBP is telling us about devices.
+ *
+ * take an encoded intr value and lookup if its valid
+ * then get the mask bits that match from irq_mask
+ */
+static unsigned char irq_xlate[32] = {
+ /* 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f */
+ 0, 0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 5, 6, 0, 0, 7,
+ 0, 0, 8, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 0
+};
+
+static unsigned long irq_mask[] = {
+ 0, /* illegal index */
+ SUN4M_INT_SCSI, /* 1 irq 4 */
+ SUN4M_INT_ETHERNET, /* 2 irq 6 */
+ SUN4M_INT_VIDEO, /* 3 irq 8 */
+ SUN4M_INT_REALTIME, /* 4 irq 10 */
+ SUN4M_INT_FLOPPY, /* 5 irq 11 */
+ (SUN4M_INT_SERIAL | SUN4M_INT_KBDMS), /* 6 irq 12 */
+ SUN4M_INT_MODULE_ERR, /* 7 irq 15 */
+ SUN4M_INT_SBUS(1), /* 8 irq 2 */
+ SUN4M_INT_SBUS(2), /* 9 irq 3 */
+ SUN4M_INT_SBUS(3), /* 10 irq 5 */
+ SUN4M_INT_SBUS(4), /* 11 irq 7 */
+ SUN4M_INT_SBUS(5), /* 12 irq 9 */
+ SUN4M_INT_SBUS(6), /* 13 irq 11 */
+ SUN4M_INT_SBUS(7) /* 14 irq 13 */
+};
+
+inline unsigned long sun4m_get_irqmask(unsigned int irq)
+{
+ unsigned long mask;
+
+ if (irq > 0x20) {
+ /* OBIO/SBUS interrupts */
+ irq &= 0x1f;
+ mask = irq_mask[irq_xlate[irq]];
+ if (!mask)
+ printk("sun4m_get_irqmask: IRQ%d has no valid mask!\n",irq);
+ } else {
+ /* Soft Interrupts will come here
+ * Currently there is no way to trigger them but I'm sure something
+ * could be cooked up.
+ */
+ irq &= 0xf;
+ mask = SUN4M_SOFT_INT(irq);
+ }
+ return mask;
+}
+
+static void sun4m_disable_irq(unsigned int irq_nr)
+{
+ unsigned long mask, flags;
+ int cpu = smp_processor_id();
+
+ mask = sun4m_get_irqmask(irq_nr);
+ save_flags(flags); cli();
+ if (irq_nr > 15)
+ sun4m_interrupts->set = mask;
+ else
+ sun4m_interrupts->cpu_intregs[cpu].set = mask;
+ restore_flags(flags);
+}
+
+static void sun4m_enable_irq(unsigned int irq_nr)
+{
+ unsigned long mask, flags;
+ int cpu = smp_processor_id();
+
+ /* Dreadful floppy hack. When we use 0x2b instead of
+ * 0x0b the system blows (it starts to whistle!).
+ * So we continue to use 0x0b. Fixme ASAP. --P3
+ */
+ if (irq_nr != 0x0b) {
+ mask = sun4m_get_irqmask(irq_nr);
+ save_flags(flags); cli();
+ if (irq_nr > 15)
+ sun4m_interrupts->clear = mask;
+ else
+ sun4m_interrupts->cpu_intregs[cpu].clear = mask;
+ restore_flags(flags);
+ } else {
+ save_flags(flags); cli();
+ sun4m_interrupts->clear = SUN4M_INT_FLOPPY;
+ restore_flags(flags);
+ }
+}
+
+void sun4m_send_ipi(int cpu, int level)
+{
+ unsigned long mask;
+
+ mask = sun4m_get_irqmask(level);
+ sun4m_interrupts->cpu_intregs[cpu].set = mask;
+}
+
+void sun4m_clear_ipi(int cpu, int level)
+{
+ unsigned long mask;
+
+ mask = sun4m_get_irqmask(level);
+ sun4m_interrupts->cpu_intregs[cpu].clear = mask;
+}
+
+void sun4m_set_udt(int cpu)
+{
+ sun4m_interrupts->undirected_target = cpu;
+}
+
+#define OBIO_INTR 0x20
+#define TIMER_IRQ (OBIO_INTR | 10)
+#define PROFILE_IRQ (OBIO_INTR | 14)
+
+struct sun4m_timer_regs *sun4m_timers;
+unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10);
+
+static void sun4m_clear_clock_irq(void)
+{
+ volatile unsigned int clear_intr;
+ clear_intr = sun4m_timers->l10_timer_limit;
+}
+
+static void sun4m_clear_profile_irq(void)
+{
+ volatile unsigned int clear;
+
+ clear = sun4m_timers->cpu_timers[0].l14_timer_limit;
+}
+
+static void sun4m_load_profile_irq(unsigned int limit)
+{
+ sun4m_timers->cpu_timers[0].l14_timer_limit = limit;
+}
+
+static void sun4m_lvl14_handler(int irq, void *dev_id, struct pt_regs * regs)
+{
+ volatile unsigned int clear;
+
+ printk("CPU[%d]: TOOK A LEVEL14!\n", smp_processor_id());
+ /* we do nothing with this at present
+ * this is purely to prevent OBP getting its mucky paws
+ * in linux.
+ */
+ clear = sun4m_timers->cpu_timers[0].l14_timer_limit; /* clear interrupt */
+
+ /* reload with value, this allows on the fly retuning of the level14
+ * timer
+ */
+ sun4m_timers->cpu_timers[0].l14_timer_limit = lvl14_resolution;
+}
+
+static void sun4m_init_timers(void (*counter_fn)(int, void *, struct pt_regs *))
+{
+ int reg_count, irq, cpu;
+ struct linux_prom_registers cnt_regs[PROMREG_MAX];
+ int obio_node, cnt_node;
+
+ cnt_node = 0;
+ if((obio_node =
+ prom_searchsiblings (prom_getchild(prom_root_node), "obio")) == 0 ||
+ (obio_node = prom_getchild (obio_node)) == 0 ||
+ (cnt_node = prom_searchsiblings (obio_node, "counter")) == 0) {
+ prom_printf("Cannot find /obio/counter node\n");
+ prom_halt();
+ }
+ reg_count = prom_getproperty(cnt_node, "reg",
+ (void *) cnt_regs, sizeof(cnt_regs));
+ reg_count = (reg_count/sizeof(struct linux_prom_registers));
+
+ /* Apply the obio ranges to the timer registers. */
+ prom_apply_obio_ranges(cnt_regs, reg_count);
+
+ cnt_regs[4].phys_addr = cnt_regs[reg_count-1].phys_addr;
+ cnt_regs[4].reg_size = cnt_regs[reg_count-1].reg_size;
+ cnt_regs[4].which_io = cnt_regs[reg_count-1].which_io;
+ for(obio_node = 1; obio_node < 4; obio_node++) {
+ cnt_regs[obio_node].phys_addr =
+ cnt_regs[obio_node-1].phys_addr + PAGE_SIZE;
+ cnt_regs[obio_node].reg_size = cnt_regs[obio_node-1].reg_size;
+ cnt_regs[obio_node].which_io = cnt_regs[obio_node-1].which_io;
+ }
+
+ /* Map the per-cpu Counter registers. */
+ sun4m_timers = sparc_alloc_io(cnt_regs[0].phys_addr, 0,
+ PAGE_SIZE*NCPUS, "counters_percpu",
+ cnt_regs[0].which_io, 0x0);
+
+ /* Map the system Counter register. */
+ sparc_alloc_io(cnt_regs[4].phys_addr, 0,
+ cnt_regs[4].reg_size,
+ "counters_system",
+ cnt_regs[4].which_io, 0x0);
+
+ sun4m_timers->l10_timer_limit = (((1000000/HZ) + 1) << 10);
+
+ irq = request_irq(TIMER_IRQ,
+ counter_fn,
+ (SA_INTERRUPT | SA_STATIC_ALLOC),
+ "timer", NULL);
+ if (irq) {
+ prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
+ prom_halt();
+ }
+
+ /* Can't cope with multiple CPUS yet so no level14 tick events */
+#if 0
+ if (linux_num_cpus > 1)
+ claim_ticker14(NULL, PROFILE_IRQ, 0);
+ else
+ claim_ticker14(sun4m_lvl14_handler, PROFILE_IRQ, lvl14_resolution);
+#endif
+ if(linux_num_cpus > 1) {
+ for(cpu = 0; cpu < 4; cpu++)
+ sun4m_timers->cpu_timers[cpu].l14_timer_limit = 0;
+ sun4m_interrupts->set = SUN4M_INT_E14;
+ } else {
+ sun4m_timers->cpu_timers[0].l14_timer_limit = 0;
+ }
+}
+
+void sun4m_init_IRQ(void)
+{
+ int ie_node,i;
+ struct linux_prom_registers int_regs[PROMREG_MAX];
+ int num_regs;
+
+ cli();
+ if((ie_node = prom_searchsiblings(prom_getchild(prom_root_node), "obio")) == 0 ||
+ (ie_node = prom_getchild (ie_node)) == 0 ||
+ (ie_node = prom_searchsiblings (ie_node, "interrupt")) == 0) {
+ prom_printf("Cannot find /obio/interrupt node\n");
+ prom_halt();
+ }
+ num_regs = prom_getproperty(ie_node, "reg", (char *) int_regs,
+ sizeof(int_regs));
+ num_regs = (num_regs/sizeof(struct linux_prom_registers));
+
+ /* Apply the obio ranges to these registers. */
+ prom_apply_obio_ranges(int_regs, num_regs);
+
+ int_regs[4].phys_addr = int_regs[num_regs-1].phys_addr;
+ int_regs[4].reg_size = int_regs[num_regs-1].reg_size;
+ int_regs[4].which_io = int_regs[num_regs-1].which_io;
+ for(ie_node = 1; ie_node < 4; ie_node++) {
+ int_regs[ie_node].phys_addr = int_regs[ie_node-1].phys_addr + PAGE_SIZE;
+ int_regs[ie_node].reg_size = int_regs[ie_node-1].reg_size;
+ int_regs[ie_node].which_io = int_regs[ie_node-1].which_io;
+ }
+
+ /* Map the interrupt registers for all possible cpus. */
+ sun4m_interrupts = sparc_alloc_io(int_regs[0].phys_addr, 0,
+ PAGE_SIZE*NCPUS, "interrupts_percpu",
+ int_regs[0].which_io, 0x0);
+
+ /* Map the system interrupt control registers. */
+ sparc_alloc_io(int_regs[4].phys_addr, 0,
+ int_regs[4].reg_size, "interrupts_system",
+ int_regs[4].which_io, 0x0);
+
+ sun4m_interrupts->set = ~SUN4M_INT_MASKALL;
+ for (i=0; i<linux_num_cpus; i++)
+ sun4m_interrupts->cpu_intregs[i].clear = ~0x17fff;
+
+ if (linux_num_cpus > 1) {
+ /* system wide interrupts go to cpu 0, this should always
+ * be safe because it is guarenteed to be fitted or OBP doesn't
+ * come up
+ *
+ * Not sure, but writing here on SLAVIO systems may puke
+ * so I don't do it unless there is more than 1 cpu.
+ */
+#if 0
+ printk("Warning:"
+ "sun4m multiple CPU interrupt code requires work\n");
+#endif
+ irq_rcvreg = &sun4m_interrupts->undirected_target;
+ sun4m_interrupts->undirected_target = 0;
+ }
+ enable_irq = sun4m_enable_irq;
+ disable_irq = sun4m_disable_irq;
+ clear_clock_irq = sun4m_clear_clock_irq;
+ clear_profile_irq = sun4m_clear_profile_irq;
+ load_profile_irq = sun4m_load_profile_irq;
+ init_timers = sun4m_init_timers;
+#ifdef __SMP__
+ set_cpu_int = sun4m_send_ipi;
+ clear_cpu_int = sun4m_clear_ipi;
+ set_irq_udt = sun4m_set_udt;
+#endif
+ sti();
+}
-/* $Id: sunos_asm.S,v 1.10 1995/12/26 04:09:33 davem Exp $
+/* $Id: sunos_asm.S,v 1.12 1996/04/03 02:14:57 davem Exp $
* sunos_asm.S: SunOS system calls which must have a low-level
* entry point to operate correctly.
*
call C_LABEL(sparc_execve)
add %sp, REGWIN_SZ, %o0
- st %o0, [%sp + REGWIN_SZ + PT_I0]
-
b C_LABEL(ret_sys_call)
nop
+
+
-/* $Id: sunos_ioctl.c,v 1.17 1996/02/10 04:29:20 davem Exp $
+/* $Id: sunos_ioctl.c,v 1.18 1996/04/04 12:41:38 davem Exp $
* sunos_ioctl.c: The Linux Operating system: SunOS ioctl compatibility.
*
* Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
#include <linux/route.h>
#include <linux/sockios.h>
#include <linux/if.h>
+#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/fs.h>
#include <linux/mm.h>
}
}
- /* Binary compatibility is good American knowhow fuckin' up. */
+ /* Binary compatability is good American knowhow fuckin' up. */
if(cmd == TIOCNOTTY)
return sys_setsid();
-/* $Id: switch.S,v 1.14 1995/12/29 21:47:22 davem Exp $
+/* $Id: switch.S,v 1.18 1996/04/03 02:15:00 davem Exp $
* switch.S: Sparc task switch code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#include <asm/contregs.h>
#include <asm/cprefix.h>
#include <asm/psr.h>
+#include <asm/asmmacro.h>
#include <asm/ptrace.h>
#include <asm/winmacro.h>
/* Load new kernel state. */
wr %sw_psr, PSR_ET, %psr
WRITE_PAUSE
+#ifdef __SMP__
+ GET_PROCESSOR_OFFSET(sw_psr)
+ set C_LABEL(current_set), %sw_tmp
+ st %sw_ntask, [%sw_tmp + %sw_psr]
+#else
sethi %hi(C_LABEL(current_set)), %sw_tmp
st %sw_ntask, [%sw_tmp + %lo(C_LABEL(current_set))]
+#endif
ldd [%sw_ntask + THREAD_KPSR], %sw_psr
wr %sw_psr, PSR_ET, %psr
WRITE_PAUSE
retl
nop
+
+
+#ifdef __SMP__
+ /* Because of nasty register windows this is the only way
+ * to start a processor into it's cpu_idle() thread.
+ */
+
+ .globl C_LABEL(sparc_cpusched)
+C_LABEL(sparc_cpusched):
+ LOAD_CURRENT(g1, g2)
+ rd %psr, %g7
+
+ wr %g7, PSR_ET, %psr
+ WRITE_PAUSE
+
+ ldd [%g1 + THREAD_KPSR], %g2
+
+ wr %g2, PSR_ET, %psr
+ WRITE_PAUSE
+
+ wr %g3, 0x0, %wim
+ WRITE_PAUSE
+
+ ldd [%g1 + THREAD_KSP], %sp
+ LOAD_WINDOW(sp)
+
+ wr %g2, 0x0, %psr
+ WRITE_PAUSE
+
+ retl
+ nop
+#endif
+++ /dev/null
-/* sys_bsd.c: {net, open}bsd specific system call handling.
- *
- * Copyright (C) 1995 David S. Miller (davem@caipfs.rutgers.edu)
- */
-#include <linux/kernel.h>
-
-#include <asm/ptrace.h>
-#include <asm/bsderrno.h>
-
-static int errno_trans[] = {
- 0,
- BSD_EPERM, BSD_ENOENT, BSD_ESRCH, BSD_EINTR, BSD_EIO,
- BSD_ENXIO, BSD_E2BIG, BSD_ENOEXEC, BSD_EBADF, BSD_ECHILD,
- BSD_EAGAIN, BSD_ENOMEM, BSD_EACCES, BSD_EFAULT, BSD_ENOTBLK,
- BSD_EBUSY, BSD_EEXIST, BSD_EXDEV, BSD_ENODEV, BSD_ENOTDIR,
- BSD_EISDIR, BSD_EINVAL, BSD_ENFILE, BSD_EMFILE, BSD_ENOTTY,
- BSD_ETXTBSY, BSD_EFBIG, BSD_ENOSPC, BSD_ESPIPE, BSD_EROFS,
- BSD_EMLINK, BSD_EPIPE, BSD_EDOM, BSD_ERANGE, BSD_EWOULDBLOCK,
- BSD_EINPROGRESS, BSD_EALREADY, BSD_ENOTSOCK, BSD_EDESTADDRREQ,
- BSD_EMSGSIZE, BSD_EPROTOTYPE, BSD_ENOPROTOOPT, BSD_EPROTONOSUPPORT,
- BSD_ESOCKTNOSUPPORT, BSD_EOPNOTSUPP, BSD_EPFNOSUPPORT, BSD_EAFNOSUPPORT,
- BSD_EADDRINUSE,
- BSD_EADDRNOTAVAIL, BSD_ENETDOWN, BSD_ENETUNREACH, BSD_ENETRESET,
- BSD_ECONNABORTED, BSD_ECONNRESET, BSD_ENOBUFS, BSD_EISCONN,
- BSD_ENOTCONN, BSD_ESHUTDOWN, BSD_ETOOMANYREFS, BSD_ETIMEDOUT,
- BSD_ECONNREFUSED, BSD_ELOOP, BSD_ENAMETOOLONG, BSD_EHOSTDOWN,
- BSD_EHOSTUNREACH, BSD_ENOTEMPTY, BSD_EPROCLIM, BSD_EUSERS,
- BSD_EDQUOT, BSD_ESTALE, BSD_EREMOTE, BSD_ENOSTR, BSD_ETIME,
- BSD_ENOSR, BSD_ENOMSG, BSD_EBADMSG, BSD_EIDRM, BSD_EDEADLK,
- BSD_ENOLCK, BSD_ENONET, BSD_ERREMOTE, BSD_ENOLINK, BSD_EADV,
- BSD_ESRMNT, BSD_ECOMM, BSD_EPROTO, BSD_EMULTIHOP, BSD_EDOTDOT,
- BSD_EREMCHG, BSD_ENOSYS
-};
-
-asmlinkage int netbsd_nosys(void)
-{
- struct pt_regs *regs;
-
- regs = (struct pt_regs *) (current->saved_kernel_stack +
- sizeof(struct reg_window));
- current->tss.sig_address = regs->pc;
- current->tss.sig_desc = regs->u_regs[UREG_G1];
- send_sig(SIGSYS, current, 1);
- printk("Process makes ni_syscall number %d, register dump:\n",
- (int) regs->u_regs[UREG_G1]);
- show_regs(regs);
- return -BSD_ENOSYS;
-}
-/* $Id: sys_sparc.c,v 1.7 1996/03/01 07:15:58 davem Exp $
+/* $Id: sys_sparc.c,v 1.10 1996/04/20 08:33:55 davem Exp $
* linux/arch/sparc/kernel/sys_sparc.c
*
* This file contains various random system calls that
/*
* sys_pipe() is the normal C calling standard for creating
- * a pipe. It's not the way unix traditionally does this, though.
+ * a pipe. It's not the way unix tranditionally does this, though.
*/
-asmlinkage void sparc_pipe(struct pt_regs *regs)
+asmlinkage int sparc_pipe(struct pt_regs *regs)
{
int fd[2];
int error;
error = do_pipe(fd);
if (error) {
- regs->u_regs[UREG_I0] = error;
+ return error;
} else {
- regs->u_regs[UREG_I0] = fd[0];
regs->u_regs[UREG_I1] = fd[1];
+ return fd[0];
}
}
}
return -EINVAL;
}
+
+unsigned long get_sparc_unmapped_area(unsigned long len)
+{
+ unsigned long addr = 0xE8000000UL;
+ struct vm_area_struct * vmm;
+
+ if (len > TASK_SIZE)
+ return 0;
+ for (vmm = find_vma(current, addr); ; vmm = vmm->vm_next) {
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return 0;
+ if (!vmm || addr + len <= vmm->vm_start)
+ return addr;
+ addr = vmm->vm_end;
+ }
+}
+
+/* Linux version of mmap */
+asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long fd,
+ unsigned long off)
+{
+ struct file * file = NULL;
+ long retval;
+
+ if (!(flags & MAP_ANONYMOUS)) {
+ if (fd >= NR_OPEN || !(file = current->files->fd[fd])){
+ return -EBADF;
+ }
+ }
+ if(!(flags & MAP_FIXED) && !addr) {
+ addr = get_sparc_unmapped_area(len);
+ if(!addr){
+ return -ENOMEM;
+ }
+ }
+ retval = do_mmap(file, addr, len, prot, flags, off);
+ return retval;
+}
-/* $Id: sys_sunos.c,v 1.33 1996/03/01 07:16:00 davem Exp $
- * sys_sunos.c: SunOS specific syscall compatibility support.
+/* $Id: sys_sunos.c,v 1.37 1996/04/19 16:52:38 miguel Exp $
+ * sys_sunos.c: SunOS specific syscall compatability support.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Based upon preliminary work which is:
*
* Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
+ *
+ * The sunos_poll routine is based on iBCS2's poll routine, this
+ * is the copyright message for that file:
+ *
+ * This file contains the procedures for the handling of poll.
+ *
+ * Copyright (C) 1994 Eric Youngdale
+ *
+ * Created for Linux based loosely upon linux select code, which
+ * in turn is loosely based upon Mathius Lattner's minix
+ * patches by Peter MacDonald. Heavily edited by Linus.
+ *
+ * Poll is used by SVr4 instead of select, and it has considerably
+ * more functionality. Parts of it are related to STREAMS, and since
+ * we do not have streams, we fake it. In fact, select() still exists
+ * under SVr4, but libc turns it into a poll() call instead. We attempt
+ * to do the inverse mapping.
*/
+#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/mman.h>
-#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/fs.h>
#include <linux/utsname.h>
#include <linux/fs.h>
#include <linux/major.h>
+#include <linux/stat.h>
+#include <linux/malloc.h>
+#include <linux/pagemap.h>
+
+#include <asm/segment.h>
+#ifndef KERNEL_DS
+#include <linux/segment.h>
+#endif
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pconf.h>
#include <asm/idprom.h> /* for gethostid() */
#include <asm/unistd.h>
+#include <asm/system.h>
/* For the nfs mount emulation */
#include <linux/socket.h>
#include <linux/time.h>
#include <linux/personality.h>
-static unsigned long get_sparc_unmapped_area(unsigned long len)
-{
- unsigned long addr = 0xE8000000UL;
- struct vm_area_struct * vmm;
-
- if (len > TASK_SIZE)
- return 0;
- for (vmm = find_vma(current, addr); ; vmm = vmm->vm_next) {
- /* At this point: (!vmm || addr < vmm->vm_end). */
- if (TASK_SIZE - len < addr)
- return 0;
- if (!vmm || addr + len <= vmm->vm_start)
- return addr;
- addr = vmm->vm_end;
- }
-}
+extern unsigned long get_sparc_unmapped_area(unsigned long len);
/* We use the SunOS mmap() semantics. */
asmlinkage unsigned long sunos_mmap(unsigned long addr, unsigned long len,
* simple, it hopefully works in most obvious cases.. Easy to
* fool it, but this should catch most mistakes.
*/
- freepages = buffermem >> 12;
+ freepages = buffermem >> PAGE_SHIFT;
+ freepages += page_cache_size;
+ freepages >>= 1;
freepages += nr_free_pages;
freepages += nr_swap_pages;
freepages -= MAP_NR(high_memory) >> 4;
- freepages -= (newbrk-oldbrk) >> 12;
+ freepages -= (newbrk-oldbrk) >> PAGE_SHIFT;
if (freepages < 0)
return -ENOMEM;
/*
}
/* XXX Completely undocumented, and completely magic...
- * XXX I believe it is to increase the size of the stack by
+ * XXX I belive it is to increase the size of the stack by
* XXX argument 'increment' and return the new end of stack
* XXX area. Wheee...
*/
* low-bit is one == Page is currently residing in core
* All other bits are undefined within the character so there...
* Also, if you try to get stats on an area outside of the user vm area
- * *or* the passed base address is not aligned on a page boundary you
+ * *or* the passed base address is not aligned on a page boundry you
* get an error.
*/
asmlinkage int sunos_mincore(unsigned long addr, unsigned long len, char *array)
}
/* This just wants the soft limit (ie. rlim_cur element) of the RLIMIT_NOFILE
- * resource limit and is for backwards compatibility with older sunos
+ * resource limit and is for backwards compatability with older sunos
* revs.
*/
asmlinkage long sunos_getdtablesize(void)
return cnt - buf.count;
}
-/* Old sunos getdirentries, severely broken compatibility stuff here. */
+/* Old sunos getdirentries, severely broken compatability stuff here. */
struct sunos_direntry {
unsigned long d_ino;
unsigned short d_reclen;
return error;
/* Ok, here comes the fun part: Linux's nfs mount needs a
* socket connection to the server, but SunOS mount does not
- * require this, so we use the information on the destination
+ * requiere this, so we use the information on the destination
* address to create a socket and bind it to a reserved
* port on this system
*/
}
return -1;
}
+
+#define POLL_ROUND_UP(x,y) (((x)+(y)-1)/(y))
+
+#define POLLIN 1
+#define POLLPRI 2
+#define POLLOUT 4
+#define POLLERR 8
+#define POLLHUP 16
+#define POLLNVAL 32
+#define POLLRDNORM 64
+#define POLLWRNORM POLLOUT
+#define POLLRDBAND 128
+#define POLLWRBAND 256
+
+#define LINUX_POLLIN (POLLRDNORM | POLLRDBAND | POLLIN)
+#define LINUX_POLLOUT (POLLWRBAND | POLLWRNORM | POLLOUT)
+#define LINUX_POLLERR (POLLERR)
+
+static inline void free_wait(select_table * p)
+{
+ struct select_table_entry * entry = p->entry + p->nr;
+
+ while (p->nr > 0) {
+ p->nr--;
+ entry--;
+ remove_wait_queue(entry->wait_address,&entry->wait);
+ }
+}
+
+
+/* Copied directly from fs/select.c */
+static int check(int flag, select_table * wait, struct file * file)
+{
+ struct inode * inode;
+ struct file_operations *fops;
+ int (*select) (struct inode *, struct file *, int, select_table *);
+
+ inode = file->f_inode;
+ if ((fops = file->f_op) && (select = fops->select))
+ return select(inode, file, flag, wait)
+ || (wait && select(inode, file, flag, NULL));
+ if (S_ISREG(inode->i_mode))
+ return 1;
+ return 0;
+}
+
+struct poll {
+ int fd;
+ short events;
+ short revents;
+};
+
+int sunos_poll(struct poll * ufds, size_t nfds, int timeout)
+{
+ int i,j, count, fdcount, error, retflag;
+ struct poll * fdpnt;
+ struct poll * fds, *fds1;
+ select_table wait_table, *wait;
+ struct select_table_entry *entry;
+
+ if ((error = verify_area(VERIFY_READ, ufds, nfds*sizeof(struct poll))))
+ return error;
+
+ if (nfds > NR_OPEN)
+ return -EINVAL;
+
+ if (!(entry = (struct select_table_entry*)__get_free_page(GFP_KERNEL))
+ || !(fds = (struct poll *)kmalloc(nfds*sizeof(struct poll), GFP_KERNEL)))
+ return -ENOMEM;
+
+ memcpy_fromfs(fds, ufds, nfds*sizeof(struct poll));
+
+ if (timeout < 0)
+ current->timeout = 0x7fffffff;
+ else {
+ current->timeout = jiffies + POLL_ROUND_UP(timeout, (1000/HZ));
+ if (current->timeout <= jiffies)
+ current->timeout = 0;
+ }
+
+ count = 0;
+ wait_table.nr = 0;
+ wait_table.entry = entry;
+ wait = &wait_table;
+
+ for(fdpnt = fds, j = 0; j < (int)nfds; j++, fdpnt++) {
+ i = fdpnt->fd;
+ fdpnt->revents = 0;
+ if (!current->files->fd[i] || !current->files->fd[i]->f_inode)
+ fdpnt->revents = POLLNVAL;
+ }
+repeat:
+ current->state = TASK_INTERRUPTIBLE;
+ for(fdpnt = fds, j = 0; j < (int)nfds; j++, fdpnt++) {
+ i = fdpnt->fd;
+
+ if(i < 0) continue;
+ if (!current->files->fd[i] || !current->files->fd[i]->f_inode) continue;
+
+ if ((fdpnt->events & LINUX_POLLIN)
+ && check(SEL_IN, wait, current->files->fd[i])) {
+ if (fdpnt->events & POLLIN)
+ retflag = POLLIN;
+ if (fdpnt->events & POLLRDNORM)
+ retflag = POLLRDNORM;
+ fdpnt->revents |= retflag;
+ count++;
+ wait = NULL;
+ }
+
+ if ((fdpnt->events & LINUX_POLLOUT) &&
+ check(SEL_OUT, wait, current->files->fd[i])) {
+ fdpnt->revents |= (LINUX_POLLOUT & fdpnt->events);
+ count++;
+ wait = NULL;
+ }
+
+ if (check(SEL_EX, wait, current->files->fd[i])) {
+ fdpnt->revents |= POLLHUP;
+ count++;
+ wait = NULL;
+ }
+ }
+
+ if ((current->signal & (~current->blocked)))
+ return -EINTR;
+
+ wait = NULL;
+ if (!count && current->timeout > jiffies) {
+ schedule();
+ goto repeat;
+ }
+
+ free_wait(&wait_table);
+ free_page((unsigned long) entry);
+
+ /* OK, now copy the revents fields back to user space. */
+ fds1 = fds;
+ fdcount = 0;
+ for(i=0; i < (int)nfds; i++, ufds++, fds++) {
+ if (fds->revents) {
+ fdcount++;
+ }
+ put_fs_word(fds->revents, &ufds->revents);
+ }
+ kfree(fds1);
+ current->timeout = 0;
+ current->state = TASK_RUNNING;
+ return fdcount;
+}
+
-/* $Id: systbls.S,v 1.29 1996/03/01 07:16:02 davem Exp $
- * systbls.S: System call entry point tables for OS compatibility.
+/* $Id: systbls.S,v 1.38 1996/04/20 08:43:26 davem Exp $
+ * systbls.S: System call entry point tables for OS compatability.
* The native Linux system call table lives here also.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
.globl C_LABEL(sys_call_table)
C_LABEL(sys_call_table):
/*0*/ .long C_LABEL(sys_setup), C_LABEL(sys_exit), C_LABEL(sys_fork)
- .long C_LABEL(sys_read), C_LABEL(sys_write), C_LABEL(sys_open)
- .long C_LABEL(sys_close), C_LABEL(sys_wait4), C_LABEL(sys_creat)
- .long C_LABEL(sys_link), C_LABEL(sys_unlink), C_LABEL(sunos_execv)
- .long C_LABEL(sys_chdir), C_LABEL(sys_ni_syscall), C_LABEL(sys_mknod)
- .long C_LABEL(sys_chmod), C_LABEL(sys_chown), C_LABEL(sys_brk)
- .long C_LABEL(sys_ni_syscall), C_LABEL(sys_lseek), C_LABEL(sys_getpid)
- .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_setuid)
- .long C_LABEL(sys_getuid), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
- .long C_LABEL(sys_alarm), C_LABEL(sys_ni_syscall), C_LABEL(sys_pause)
- .long C_LABEL(sys_utime), C_LABEL(sys_stty), C_LABEL(sys_gtty)
+ .long C_LABEL(sys_read), C_LABEL(sys_write)
+/*5*/ .long C_LABEL(sys_open), C_LABEL(sys_close), C_LABEL(sys_wait4)
+ .long C_LABEL(sys_creat), C_LABEL(sys_link)
+/*10*/ .long C_LABEL(sys_unlink), C_LABEL(sunos_execv), C_LABEL(sys_chdir)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_mknod)
+/*15*/ .long C_LABEL(sys_chmod), C_LABEL(sys_chown), C_LABEL(sys_brk)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_lseek)
+/*20*/ .long C_LABEL(sys_getpid), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_setuid), C_LABEL(sys_getuid)
+/*25*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_alarm)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_pause)
+/*30*/ .long C_LABEL(sys_utime), C_LABEL(sys_stty), C_LABEL(sys_gtty)
.long C_LABEL(sys_access), C_LABEL(sys_nice), C_LABEL(sys_ftime)
.long C_LABEL(sys_sync), C_LABEL(sys_kill), C_LABEL(sys_newstat)
.long C_LABEL(sys_ni_syscall), C_LABEL(sys_newlstat), C_LABEL(sys_dup)
.long C_LABEL(sys_newfstat), C_LABEL(sys_ni_syscall), C_LABEL(sys_getpagesize)
.long C_LABEL(sys_ni_syscall), C_LABEL(sys_vfork), C_LABEL(sys_ni_syscall)
.long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
- .long C_LABEL(sunos_mmap), C_LABEL(sys_ni_syscall), C_LABEL(sys_munmap)
+ .long C_LABEL(sys_mmap), C_LABEL(sys_ni_syscall), C_LABEL(sys_munmap)
.long C_LABEL(sys_mprotect), C_LABEL(sys_ni_syscall), C_LABEL(sys_vhangup)
.long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_getgroups)
.long C_LABEL(sys_setgroups), C_LABEL(sys_getpgrp), C_LABEL(sys_ni_syscall)
.long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
.long C_LABEL(sys_ni_syscall), C_LABEL(sys_sigpending), C_LABEL(sys_ni_syscall)
.long C_LABEL(sys_setpgid), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
- .long C_LABEL(sys_ni_syscall), C_LABEL(sys_uname), C_LABEL(sys_init_module)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_newuname), C_LABEL(sys_init_module)
.long C_LABEL(sys_personality), C_LABEL(sys_prof), C_LABEL(sys_break)
.long C_LABEL(sys_lock), C_LABEL(sys_mpx), C_LABEL(sys_ulimit)
.long C_LABEL(sys_getppid), C_LABEL(sys_sigaction), C_LABEL(sys_sgetmask)
.long C_LABEL(sys_link), C_LABEL(sys_unlink), C_LABEL(sunos_execv)
.long C_LABEL(sys_chdir), C_LABEL(sunos_nosys), C_LABEL(sys_mknod)
.long C_LABEL(sys_chmod), C_LABEL(sys_chown), C_LABEL(sunos_brk)
- .long C_LABEL(sunos_nosys), C_LABEL(sys_lseek), LOWSYS(sunosgetpid)
+ .long C_LABEL(sunos_nosys), C_LABEL(sys_lseek), C_LABEL(sunos_getpid)
.long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
- .long LOWSYS(sunosgetuid), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_getuid), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
.long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
.long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
.long C_LABEL(sys_access), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
.long C_LABEL(sys_sync), C_LABEL(sys_kill), C_LABEL(sys_newstat)
.long C_LABEL(sunos_nosys), C_LABEL(sys_newlstat), C_LABEL(sys_dup)
.long C_LABEL(sys_pipe), C_LABEL(sunos_nosys), C_LABEL(sys_profil)
- .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), LOWSYS(sunosgetgid)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_getgid)
.long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
/*50*/ .long C_LABEL(sunos_nosys), C_LABEL(sys_acct), C_LABEL(sunos_nosys)
- .long LOWSYS(sunosmctl), C_LABEL(sunos_ioctl), C_LABEL(sys_reboot)
+ .long C_LABEL(sunos_mctl), C_LABEL(sunos_ioctl), C_LABEL(sys_reboot)
.long C_LABEL(sunos_nosys), C_LABEL(sys_symlink), C_LABEL(sys_readlink)
- .long C_LABEL(sys_execve), LOWSYS(umask), C_LABEL(sys_chroot)
- .long C_LABEL(sys_newfstat), C_LABEL(sunos_nosys), LOWSYS(getpagesize)
+ .long C_LABEL(sys_execve), C_LABEL(sys_umask), C_LABEL(sys_chroot)
+ .long C_LABEL(sys_newfstat), C_LABEL(sunos_nosys), C_LABEL(sys_getpagesize)
.long C_LABEL(sys_msync), C_LABEL(sys_vfork), C_LABEL(sunos_nosys)
.long C_LABEL(sunos_nosys), C_LABEL(sunos_sbrk), C_LABEL(sunos_sstk)
.long C_LABEL(sunos_mmap), C_LABEL(sunos_vadvise), C_LABEL(sys_munmap)
.long C_LABEL(sys_setgroups), C_LABEL(sys_getpgrp), C_LABEL(sunos_setpgrp)
.long C_LABEL(sys_setitimer), C_LABEL(sunos_nosys), C_LABEL(sys_swapon)
.long C_LABEL(sys_getitimer), C_LABEL(sys_gethostname), C_LABEL(sys_sethostname)
- .long LOWSYS(sunosgdtsize), C_LABEL(sys_dup2), LOWSYS(sunosnop)
- .long C_LABEL(sys_fcntl), C_LABEL(sunos_select), LOWSYS(sunosnop)
+ .long C_LABEL(sunos_getdtablesize), C_LABEL(sys_dup2), C_LABEL(sunos_nop)
+ .long C_LABEL(sys_fcntl), C_LABEL(sunos_select), C_LABEL(sunos_nop)
.long C_LABEL(sys_fsync), C_LABEL(sys_setpriority), C_LABEL(sys_socket)
.long C_LABEL(sys_connect), C_LABEL(sys_accept)
/*100*/ .long C_LABEL(sys_getpriority), C_LABEL(sys_send), C_LABEL(sys_recv)
.long C_LABEL(sunos_nosys), C_LABEL(sys_bind), C_LABEL(sys_setsockopt)
.long C_LABEL(sys_listen), C_LABEL(sunos_nosys), C_LABEL(sys_sigaction)
- .long LOWSYS(sunossblock), LOWSYS(sunossmask), C_LABEL(sys_sigpause)
+ .long C_LABEL(sunos_sigblock), C_LABEL(sunos_sigsetmask), C_LABEL(sys_sigpause)
.long C_LABEL(sys_sigstack), C_LABEL(sys_recvmsg), C_LABEL(sys_sendmsg)
.long C_LABEL(sunos_nosys), C_LABEL(sys_gettimeofday), C_LABEL(sys_getrusage)
.long C_LABEL(sys_getsockopt), C_LABEL(sunos_nosys), C_LABEL(sys_readv)
.long C_LABEL(sys_setrlimit), C_LABEL(sunos_killpg), C_LABEL(sunos_nosys)
.long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
/*150*/ .long C_LABEL(sys_getsockname), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
- .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_poll), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
.long C_LABEL(sunos_getdirentries), C_LABEL(sys_statfs), C_LABEL(sys_fstatfs)
.long C_LABEL(sys_umount), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
.long C_LABEL(sunos_getdomainname), C_LABEL(sys_setdomainname)
.long C_LABEL(sunos_nosys)/*MINHERIT*/, C_LABEL(sunos_nosys)/*RFORK*/
/* One thing left, Solaris syscall table, TODO */
+ .globl C_LABEL(solaris_sys_table)
+C_LABEL(solaris_sys_table):
+/*0*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_exit), C_LABEL(sys_fork)
+ .long C_LABEL(sys_read), C_LABEL(sys_write)
+/*5*/ .long C_LABEL(solaris_open), C_LABEL(sys_close), C_LABEL(sys_wait4)
+ .long C_LABEL(sys_creat), C_LABEL(sys_link)
+/*10*/ .long C_LABEL(sys_unlink), C_LABEL(sys_ni_syscall), C_LABEL(sys_chdir)
+ .long C_LABEL(sys_time), C_LABEL(sys_mknod)
+/*15*/ .long C_LABEL(sys_chmod), C_LABEL(sys_chown), C_LABEL(sys_brk)
+ .long C_LABEL(sys_stat), C_LABEL(sys_lseek)
+/*20*/ .long C_LABEL(sunos_getpid), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_setuid), C_LABEL(sunos_getuid)
+/*25*/ .long C_LABEL(sys_stime), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_alarm), C_LABEL(sys_ni_syscall), C_LABEL(sys_pause)
+/*30*/ .long C_LABEL(sys_utime), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_access), C_LABEL(sys_nice)
+/*35*/ .long C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_sync), C_LABEL(sys_kill), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall)
+/*40*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*45*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*50*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*55*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*60*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*65*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*70*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*75*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*80*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*85*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*90*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*95*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*100*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*105*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*110*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*115*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*120*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*125*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*130*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*135*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*140*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*145*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*150*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*155*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*160*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*165*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*170*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*175*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*180*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*185*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*190*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*195*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*200*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*205*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*210*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*215*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*220*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*225*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*230*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*235*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*240*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*245*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*250*/ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+ .long C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall), C_LABEL(sys_ni_syscall)
+/*255*/ .long C_LABEL(sys_ni_syscall)
--- /dev/null
+/* tadpole.c: Probing for the tadpole clock stopping h/w at boot time.
+ *
+ * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
+ */
+
+#include <linux/string.h>
+
+#include <asm/asi.h>
+#include <asm/oplib.h>
+#include <asm/io.h>
+
+#define MACIO_SCSI_CSR_ADDR 0x78400000
+#define MACIO_EN_DMA 0x00000200
+#define CLOCK_INIT_DONE 1
+
+static int clk_state;
+static volatile unsigned char *clk_ctrl;
+void (*cpu_pwr_save)(void);
+
+static inline unsigned int ldphys(unsigned int addr)
+{
+ unsigned long data;
+
+ __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" :
+ "=r" (data) :
+ "r" (addr), "i" (ASI_M_BYPASS));
+ return data;
+}
+
+static void clk_init(void)
+{
+ __asm__ __volatile__("mov 0x6c, %%g1\n\t"
+ "mov 0x4c, %%g2\n\t"
+ "mov 0xdf, %%g3\n\t"
+ "stb %%g1, [%0+3]\n\t"
+ "stb %%g2, [%0+3]\n\t"
+ "stb %%g3, [%0+3]\n\t" : :
+ "r" (clk_ctrl) :
+ "g1", "g2", "g3");
+}
+
+static void clk_slow(void)
+{
+ __asm__ __volatile__("save %sp, -0x68, %sp\n\t"
+ "set _clk_ctrl, %l0\n\t"
+ "ld [%l0], %l0\n\t"
+ "mov 0xcc, %l1\n\t"
+ "mov 0x4c, %l2\n\t"
+ "mov 0xcf, %l3\n\t"
+ "mov 0xdf, %l4\n\t"
+ "stb %l1, [%l0+3]\n\t"
+ "stb %l2, [%l0+3]\n\t"
+ "stb %l3, [%l0+3]\n\t"
+ "stb %l4, [%l0+3]\n\t"
+ "restore\n\t");
+}
+
+static void tsu_clockstop(void)
+{
+ unsigned int mcsr;
+ unsigned long flags;
+
+ if (!clk_ctrl)
+ return;
+ if (!(clk_state & CLOCK_INIT_DONE)) {
+ save_flags(flags); cli();
+ clk_init();
+ clk_state |= CLOCK_INIT_DONE; /* all done */
+ restore_flags(flags);
+ return;
+ }
+ if (!(clk_ctrl[2] & 1))
+ return; /* no speed up yet */
+
+ save_flags(flags); cli();
+
+ /* if SCSI DMA in progress, don't slow clock */
+ mcsr = ldphys(MACIO_SCSI_CSR_ADDR);
+ if ((mcsr&MACIO_EN_DMA) != 0) {
+ restore_flags(flags);
+ return;
+ }
+ /* TODO... the minimum clock setting ought to increase the
+ * memory refresh interval..
+ */
+ clk_slow();
+ restore_flags(flags);
+}
+
+static void swift_clockstop(void)
+{
+ if (!clk_ctrl)
+ return;
+ clk_ctrl[0] = 0;
+}
+
+void clock_stop_probe(void)
+{
+ unsigned int node, clk_nd;
+ char name[20];
+
+ prom_getstring(prom_root_node, "name", name, sizeof(name));
+ if (strncmp(name, "Tadpole", 7))
+ return;
+ node = prom_getchild(prom_root_node);
+ node = prom_searchsiblings(node, "obio");
+ node = prom_getchild(node);
+ clk_nd = prom_searchsiblings(node, "clk-ctrl");
+ if (!clk_nd)
+ return;
+ printk("Clock Stopping h/w detected... ");
+ clk_ctrl = (char *) prom_getint(clk_nd, "address");
+ clk_state = 0;
+ if (name[10] == '\0') {
+ cpu_pwr_save = tsu_clockstop;
+ printk("enabled (S3)\n");
+ } else if ((name[10] == 'X') || (name[10] == 'G')) {
+ cpu_pwr_save = swift_clockstop;
+ printk("enabled (%s)\n",name+7);
+ } else
+ printk("disabled %s\n",name+7);
+}
--- /dev/null
+/* tick14.c
+ * linux/arch/sparc/kernel/tick14.c
+ *
+ * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
+ *
+ * This file handles the Sparc specific level14 ticker
+ * This is really useful for profiling OBP uses it for keyboard
+ * aborts and other stuff.
+ *
+ *
+ */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/timex.h>
+
+#include <asm/oplib.h>
+#include <asm/segment.h>
+#include <asm/timer.h>
+#include <asm/mostek.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+extern unsigned long lvl14_save[5];
+static unsigned long *linux_lvl14 = NULL;
+static unsigned long obp_lvl14[4];
+
+void install_linux_ticker(void)
+{
+ unsigned long flags;
+
+ if (!linux_lvl14)
+ return;
+ save_flags(flags); cli();
+ linux_lvl14[0] = lvl14_save[0];
+ linux_lvl14[1] = lvl14_save[1];
+ linux_lvl14[2] = lvl14_save[2];
+ linux_lvl14[3] = lvl14_save[3];
+ restore_flags(flags);
+}
+
+void install_obp_ticker(void)
+{
+ unsigned long flags;
+
+ if (!linux_lvl14)
+ return;
+ save_flags(flags); cli();
+ linux_lvl14[0] = obp_lvl14[0];
+ linux_lvl14[1] = obp_lvl14[1];
+ linux_lvl14[2] = obp_lvl14[2];
+ linux_lvl14[3] = obp_lvl14[3];
+ restore_flags(flags);
+}
+
+void claim_ticker14(void (*handler)(int, void *, struct pt_regs *),
+ int irq_nr, unsigned int timeout )
+{
+ /* first we copy the obp handler instructions
+ */
+ disable_irq(irq_nr);
+ if (!handler)
+ return;
+
+ linux_lvl14 = (unsigned long *)lvl14_save[4];
+ obp_lvl14[0] = linux_lvl14[0];
+ obp_lvl14[1] = linux_lvl14[1];
+ obp_lvl14[2] = linux_lvl14[2];
+ obp_lvl14[3] = linux_lvl14[3];
+
+ if (!request_irq(irq_nr,
+ handler,
+ (SA_INTERRUPT | SA_STATIC_ALLOC),
+ "counter14",
+ NULL)) {
+ install_linux_ticker();
+ load_profile_irq(timeout);
+ enable_irq(irq_nr);
+ }
+}
-/* $Id: time.c,v 1.7 1996/03/01 07:16:05 davem Exp $
+/* $Id: time.c,v 1.12 1996/04/04 16:30:30 tridge Exp $
* linux/arch/sparc/kernel/time.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*
* This file handles the Sparc specific time handling details.
*/
+#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <asm/irq.h>
#include <asm/io.h>
-#define TIMER_IRQ 10 /* Also at level 14, but we ignore that one. */
-
enum sparc_clock_type sp_clock_typ;
struct mostek48t02 *mstk48t02_regs = 0;
struct mostek48t08 *mstk48t08_regs = 0;
-volatile unsigned int *master_l10_limit = 0;
-volatile unsigned int *master_l10_counter = 0;
-struct sun4m_timer_regs *sun4m_timers;
-
static int set_rtc_mmss(unsigned long);
/*
{
/* last time the cmos clock got updated */
static long last_rtc_update=0;
- volatile unsigned int clear_intr;
- /* First, clear the interrupt. */
- clear_intr = *master_l10_limit;
+ clear_clock_irq();
do_timer(regs);
/* This will basically traverse the node-tree of the prom to see
* which timer chip is on this machine.
*/
-
node = 0;
if(sparc_cpu_model == sun4) {
printk("clock_probe: No SUN4 Clock/Timer support yet...\n");
return;
}
- if(sparc_cpu_model == sun4c) node=prom_getchild(prom_root_node);
+ if(sparc_cpu_model == sun4c)
+ node = prom_getchild(prom_root_node);
else
if(sparc_cpu_model == sun4m)
node=prom_getchild(prom_searchsiblings(prom_getchild(prom_root_node), "obio"));
return;
}
}
-
- if(sparc_cpu_model == sun4c) {
- /* Map the Timer chip, this is implemented in hardware inside
- * the cache chip on the sun4c.
- */
- sun4c_timers = sparc_alloc_io ((void *) SUN4C_TIMER_PHYSADDR, 0,
- sizeof(struct sun4c_timer_info),
- "timer", 0x0, 0x0);
-
- /* Have the level 10 timer tick at 100HZ. We don't touch the
- * level 14 timer limit since we are letting the prom handle
- * them until we have a real console driver so L1-A works.
- */
- sun4c_timers->timer_limit10 = (((1000000/HZ) + 1) << 10);
- master_l10_limit = &(sun4c_timers->timer_limit10);
- master_l10_counter = &(sun4c_timers->cur_count10);
- } else {
- /* XXX Fix this SHIT... UP and MP sun4m configurations
- * XXX have completely different layouts for the counter
- * XXX registers. AIEEE!!!
- */
-
- int reg_count;
- struct linux_prom_registers cnt_regs[PROMREG_MAX];
- volatile unsigned long *real_limit;
- int obio_node, cnt_node;
-
- cnt_node = 0;
- if((obio_node =
- prom_searchsiblings (prom_getchild(prom_root_node), "obio")) == 0 ||
- (obio_node = prom_getchild (obio_node)) == 0 ||
- (cnt_node = prom_searchsiblings (obio_node, "counter")) == 0) {
- prom_printf("Cannot find /obio/counter node\n");
- prom_halt();
- }
- reg_count = prom_getproperty(cnt_node, "reg",
- (void *) cnt_regs, sizeof(cnt_regs));
- reg_count = (reg_count/sizeof(struct linux_prom_registers));
-
- /* Apply the obio ranges to the timer registers. */
- prom_apply_obio_ranges(cnt_regs, reg_count);
-
- /* Map the per-cpu Counter registers. */
- sparc_alloc_io(cnt_regs[0].phys_addr, 0,
- PAGE_SIZE*NCPUS, "counters_percpu",
- cnt_regs[0].which_io, 0x0);
-
- /* Map the system Counter register. */
- sun4m_timers = sparc_alloc_io(cnt_regs[reg_count-1].phys_addr, 0,
- cnt_regs[reg_count-1].reg_size,
- "counters_system",
- cnt_regs[reg_count-1].which_io, 0x0);
-
- real_limit = &sun4m_timers->l10_timer_limit;
- if(reg_count < 4) {
- /* Uniprocessor timers, ugh. */
- real_limit = (volatile unsigned long *) sun4m_timers;
- }
-
- /* Avoid interrupt bombs... */
- foo_limit = (volatile) *real_limit;
-
- /* Must set the master pointer first or we will lose badly. */
- master_l10_limit = real_limit;
- master_l10_counter = real_limit + 1;
- *master_l10_limit = (((1000000/HZ) + 1) << 10);
- }
}
#ifndef BCD_TO_BIN
unsigned int year, mon, day, hour, min, sec;
struct mostek48t02 *mregs;
+#if CONFIG_AP1000
+ init_timers(timer_interrupt);
+ return;
+#endif
+
clock_probe();
- /* request_irq(TIMER_IRQ, timer_interrupt, SA_INTERRUPT, "timer", NULL); */
- enable_irq(TIMER_IRQ);
+ init_timers(timer_interrupt);
+
mregs = mstk48t02_regs;
if(!mregs) {
prom_printf("Something wrong, clock regs not mapped yet.\n");
mregs->creg &= ~MSTK_CREG_READ;
return;
}
+
/* Nothing fancy on the Sparc yet. */
void do_gettimeofday(struct timeval *tv)
{
save_flags(flags);
cli();
+#if CONFIG_AP1000
+ ap_gettimeofday(&xtime);
+#endif
*tv = xtime;
restore_flags(flags);
}
--- /dev/null
+/* $Id: trampoline.S,v 1.3 1996/04/03 02:15:05 davem Exp $
+ * mp.S: Multiprocessor low-level routines on the Sparc.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/cprefix.h>
+#include <asm/head.h>
+#include <asm/psr.h>
+#include <asm/asi.h>
+#include <asm/ptrace.h>
+#include <asm/vaddrs.h>
+#include <asm/contregs.h>
+
+
+ .text
+ .align 4
+
+/* When we start up a cpu for the first time it enters this routine.
+ * This initializes the chip from whatever state the prom left it
+ * in and sets PIL in %psr to 15, no irqs.
+ */
+
+ .globl C_LABEL(sparc_cpu_startup)
+C_LABEL(sparc_cpu_startup):
+cpu1_startup:
+ sethi %hi(C_LABEL(trapbase_cpu1)), %g7
+ or %g7, %lo(C_LABEL(trapbase_cpu1)), %g7
+ sethi %hi(C_LABEL(cpu1_stack)), %g6
+ or %g6, %lo(C_LABEL(cpu1_stack)), %g6
+ b 1f
+ nop
+
+cpu2_startup:
+ sethi %hi(C_LABEL(trapbase_cpu2)), %g7
+ or %g7, %lo(C_LABEL(trapbase_cpu2)), %g7
+ sethi %hi(C_LABEL(cpu2_stack)), %g6
+ or %g6, %lo(C_LABEL(cpu2_stack)), %g6
+ b 1f
+ nop
+
+cpu3_startup:
+ sethi %hi(C_LABEL(trapbase_cpu3)), %g7
+ or %g7, %lo(C_LABEL(trapbase_cpu3)), %g7
+ sethi %hi(C_LABEL(cpu3_stack)), %g6
+ or %g6, %lo(C_LABEL(cpu3_stack)), %g6
+ b 1f
+ nop
+
+1:
+ /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
+ set (PSR_PIL | PSR_S | PSR_PS), %g1
+ wr %g1, 0x0, %psr ! traps off though
+ WRITE_PAUSE
+
+ /* Our %wim is one behind CWP */
+ mov 2, %g1
+ wr %g1, 0x0, %wim
+ WRITE_PAUSE
+
+ /* This identifies "this cpu". */
+ wr %g7, 0x0, %tbr
+ WRITE_PAUSE
+
+ /* Give ourselves a stack. */
+ set 0x2000, %g5
+ add %g6, %g5, %g6 ! end of stack
+ sub %g6, REGWIN_SZ, %sp
+ mov 0, %fp
+
+ /* Turn on traps (PSR_ET). */
+ rd %psr, %g1
+ wr %g1, PSR_ET, %psr ! traps on
+ WRITE_PAUSE
+
+ /* Init our caches, etc. */
+ set C_LABEL(poke_srmmu), %g5
+ ld [%g5], %g5
+ call %g5
+ nop
+
+ /* Start this processor. */
+ call C_LABEL(smp_callin)
+ nop
+
+ call C_LABEL(cpu_idle)
+ mov 0, %o0
+
+ call C_LABEL(cpu_panic)
+ nop
-/* $Id: traps.c,v 1.32 1996/03/01 07:16:08 davem Exp $
+/* $Id: traps.c,v 1.42 1996/04/16 08:24:44 davem Exp $
* arch/sparc/kernel/traps.c
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
#include <linux/sched.h> /* for jiffies */
#include <linux/kernel.h>
+#include <linux/config.h>
#include <linux/signal.h>
#include <asm/delay.h>
#include <asm/oplib.h>
#include <asm/page.h>
#include <asm/pgtable.h>
-#include <asm/mp.h>
#include <asm/kdebug.h>
#include <asm/unistd.h>
+#include <asm/smp.h>
+
+/* #define TRAP_DEBUG */
struct trap_trace_entry {
unsigned long pc;
{
}
+void sun4m_nmi(struct pt_regs *regs)
+{
+ unsigned long afsr, afar;
+
+ printk("Aieee: sun4m NMI received!\n");
+ /* XXX HyperSparc hack XXX */
+ __asm__ __volatile__("mov 0x500, %%g1\n\t"
+ "lda [%%g1] 0x4, %0\n\t"
+ "mov 0x600, %%g1\n\t"
+ "lda [%%g1] 0x4, %1\n\t" :
+ "=r" (afsr), "=r" (afar));
+ printk("afsr=%08lx afar=%08lx\n", afsr, afar);
+ printk("you lose buddy boy...\n");
+ show_regs(regs);
+ prom_halt();
+}
+
void die_if_kernel(char *str, struct pt_regs *regs)
{
unsigned long i;
unsigned long *pc;
- if(regs->psr & PSR_PS)
- do_exit(SIGKILL);
+ /* Amuse the user. */
+ printk(
+" \\|/ ____ \\|/\n"
+" \"@'/ ,. \\`@\"\n"
+" /_| \\__/ |_\\\n"
+" \\__U_/\n");
+
printk("%s(%d): %s\n", current->comm, current->pid, str);
show_regs(regs);
+#if CONFIG_AP1000
+ ap_panic();
+#endif
printk("Instruction DUMP:");
pc = (unsigned long *) regs->pc;
for(i = -3; i < 6; i++)
printk("%c%08lx%c",i?' ':'<',pc[i],i?' ':'>');
printk("\n");
+ if(regs->psr & PSR_PS)
+ do_exit(SIGKILL);
do_exit(SIGSEGV);
}
if(type < 0x80) {
/* Sun OS's puke from bad traps, Linux survives! */
printk("Unimplemented Sparc TRAP, type = %02lx\n", type);
- panic("Whee... Hello Mr. Penguin");
+ die_if_kernel("Whee... Hello Mr. Penguin", current->tss.kregs);
}
current->tss.sig_desc = SUBSIG_BADTRAP(type - 0x80);
current->tss.sig_address = pc;
{
if(psr & PSR_PS)
die_if_kernel("Kernel illegal instruction", regs);
+#ifdef TRAP_DEBUG
+ printk("Ill instr. at pc=%08lx instruction is %08lx\n",
+ regs->pc, *(unsigned long *)regs->pc);
+#endif
current->tss.sig_address = pc;
current->tss.sig_desc = SUBSIG_ILLINST;
send_sig(SIGILL, current, 1);
void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
- if(regs->psr & PSR_PS)
- die_if_kernel("Kernel MNA access", regs);
+ if(regs->psr & PSR_PS) {
+ printk("KERNEL MNA at pc %08lx npc %08lx called by %08lx\n", pc, npc,
+ regs->u_regs[UREG_RETPC]);
+ die_if_kernel("BOGUS", regs);
+ /* die_if_kernel("Kernel MNA access", regs); */
+ }
current->tss.sig_address = pc;
current->tss.sig_desc = SUBSIG_PRIVINST;
send_sig(SIGBUS, current, 1);
{
/* Sanity check... */
if(psr & PSR_PS)
- die_if_kernel("Kernel gets Penguin-FPU disabled trap", regs);
+ die_if_kernel("Kernel gets FloatingPenguinUnit disabled trap", regs);
put_psr(get_psr() | PSR_EF); /* Allow FPU ops. */
regs->psr |= PSR_EF;
+#ifndef __SMP__
if(last_task_used_math == current)
return;
if(last_task_used_math) {
fpload(&init_fregs[0], &init_fsr);
current->used_math = 1;
}
+#else
+ if(!current->used_math) {
+ fpload(&init_fregs[0], &init_fsr);
+ current->used_math = 1;
+ } else {
+ fpload(¤t->tss.float_regs[0], ¤t->tss.fsr);
+ }
+ current->flags |= PF_USEDFPU;
+#endif
}
static unsigned long fake_regs[32] __attribute__ ((aligned (8)));
unsigned long psr)
{
static calls = 0;
+#ifndef __SMP__
struct task_struct *fpt = last_task_used_math;
+#else
+ struct task_struct *fpt = current;
+#endif
put_psr(get_psr() | PSR_EF);
/* If nobody owns the fpu right now, just clear the
* error into our fake static buffer and hope it don't
* happen again. Thank you crashme...
*/
+#ifndef __SMP__
if(!fpt) {
+#else
+ if(!(fpt->flags & PF_USEDFPU)) {
+#endif
fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth);
regs->psr &= ~PSR_EF;
return;
}
fpsave(&fpt->tss.float_regs[0], &fpt->tss.fsr,
&fpt->tss.fpqueue[0], &fpt->tss.fpqdepth);
- last_task_used_math->tss.sig_address = pc;
- last_task_used_math->tss.sig_desc = SUBSIG_FPERROR; /* as good as any */
+ fpt->tss.sig_address = pc;
+ fpt->tss.sig_desc = SUBSIG_FPERROR; /* as good as any */
+#ifdef __SMP__
+ fpt->flags &= ~PF_USEDFPU;
+#endif
if(psr & PSR_PS) {
/* The first fsr store/load we tried trapped,
* the second one will not (we hope).
regs);
return;
}
- send_sig(SIGFPE, last_task_used_math, 1);
+ send_sig(SIGFPE, fpt, 1);
+#ifndef __SMP__
last_task_used_math = NULL;
+#endif
regs->psr &= ~PSR_EF;
if(calls > 0)
calls=0;
void handle_watchpoint(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
+#ifdef TRAP_DEBUG
printk("Watchpoint detected at PC %08lx NPC %08lx PSR %08lx\n",
pc, npc, psr);
+#endif
if(psr & PSR_PS)
panic("Tell me what a watchpoint trap is, and I'll then deal "
"with such a beast...");
void handle_reg_access(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
+#ifdef TRAP_DEBUG
printk("Register Access Exception at PC %08lx NPC %08lx PSR %08lx\n",
pc, npc, psr);
+#endif
send_sig(SIGILL, current, 1);
}
void handle_bad_flush(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
+#ifdef TRAP_DEBUG
printk("Unimplemented FLUSH Exception at PC %08lx NPC %08lx PSR %08lx\n",
pc, npc, psr);
+#endif
+ printk("INSTRUCTION=%08lx\n", *((unsigned long *) regs->pc));
send_sig(SIGILL, current, 1);
}
void handle_cp_exception(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
+#ifdef TRAP_DEBUG
printk("Co-Processor Exception at PC %08lx NPC %08lx PSR %08lx\n",
pc, npc, psr);
+#endif
send_sig(SIGILL, current, 1);
}
void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, unsigned long npc,
unsigned long psr)
{
- printk("Divide By Zero Exception at PC %08lx NPC %08lx PSR %08lx\n",
- pc, npc, psr);
send_sig(SIGILL, current, 1);
}
extern void sparc_cpu_startup(void);
extern int linux_num_cpus;
-extern pgd_t **srmmu_context_table;
+extern ctxd_t *srmmu_ctx_table_phys;
int linux_smp_still_initting;
unsigned int thiscpus_tbr;
void trap_init(void)
{
- struct linux_prom_registers ctx_reg;
- int i;
-
- if(linux_num_cpus == 1) {
- printk("trap_init: Uniprocessor detected.\n");
- return;
- }
- if(sparc_cpu_model != sun4m) {
- prom_printf("trap_init: Multiprocessor on a non-sun4m! Aieee...\n");
- prom_printf("trap_init: Cannot continue, bailing out.\n");
- prom_halt();
- }
- /* Ok, we are on a sun4m with multiple cpu's */
- prom_printf("trap_init: Multiprocessor detected, initiating CPU-startup. cpus=%d\n",
- linux_num_cpus);
- linux_smp_still_initting = 1;
- ctx_reg.which_io = 0x0; /* real ram */
- ctx_reg.phys_addr = (char *) (((unsigned long) srmmu_context_table) - PAGE_OFFSET);
- ctx_reg.reg_size = 0x0;
- /* This basically takes every cpu, loads up our Linux context table
- * into it's context table pointer register, inits it at the low level
- * and then makes it spin in an endless loop...
- */
- for(i=0; i<linux_num_cpus; i++) {
- if((linux_cpus[i].mid & (~8)) != 0x0) {
- static int cpuid = 0;
- cpuid = (linux_cpus[i].mid & (~8));
- percpu_table[cpuid].cpu_is_alive = 0;
- thiscpus_mid = linux_cpus[i].mid;
- thiscpus_tbr = (unsigned int)
- percpu_table[cpuid].trap_table;
- prom_startcpu(linux_cpus[i].prom_node, &ctx_reg, 0x0,
- (char *) sparc_cpu_startup);
- prom_printf("Waiting for cpu %d to start up...\n", i);
- while(percpu_table[cpuid].cpu_is_alive == 0) {
- static int counter = 0;
- counter++;
- if(counter>200)
- break;
- __delay(200000);
- }
- }
- }
-
- linux_smp_still_initting = 1;
}
-/* $Id: wof.S,v 1.20 1996/02/20 07:45:18 davem Exp $
+/* $Id: wof.S,v 1.22 1996/04/03 02:15:10 davem Exp $
* wof.S: Sparc window overflow handler.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
+#include <asm/smp.h>
#include <asm/asi.h>
#include <asm/winmacro.h>
+#include <asm/asmmacro.h>
/* WARNING: This routine is hairy and _very_ complicated, but it
* must be as fast as possible as it handles the allocation
mov %saved_g6, %g6
STORE_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
+ ENTER_SYSCALL
+
/* Turn on traps and call c-code to deal with it. */
wr %t_psr, PSR_ET, %psr
WRITE_PAUSE
-#if 0
- mov 0, %o1
- call C_LABEL(try_to_clear_window_buffer)
- add %sp, REGWIN_SZ, %o0
-#else
call C_LABEL(window_overflow_fault)
nop
-#endif
/* Return from trap if C-code actually fixes things, if it
* doesn't then we never get this far as the process will
-/* $Id: wuf.S,v 1.20 1996/02/20 07:45:22 davem Exp $
+/* $Id: wuf.S,v 1.22 1996/04/03 02:15:13 davem Exp $
* wuf.S: Window underflow trap handler for the Sparc.
*
* Copyright (C) 1995 David S. Miller
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
+#include <asm/smp.h>
#include <asm/asi.h>
#include <asm/winmacro.h>
+#include <asm/asmmacro.h>
/* Just like the overflow handler we define macros for registers
* with fixed meanings in this routine.
st %g5, [%twin_tmp1 + THREAD_UMASK] ! one live user window still
st %g0, [%twin_tmp1 + THREAD_W_SAVED] ! no windows in the buffer
+ ENTER_SYSCALL
+
wr %t_psr, PSR_ET, %psr ! enable traps
WRITE_PAUSE
* check routines in wof.S, these routines are free to use
* any of the local registers they want to as this window
* does not belong to anyone at this point, however the
- * outs and ins are still verboten as they are part of
+ * outs and ins are still verbotten as they are part of
* 'someone elses' window possibly.
*/
be fwin_user_finish_up
nop
- /* Did I ever tell you about my window lobotomy?
+ /* Did I ever tell you about my window labotomy?
* anyways... fwin_user_stack_is_bolixed expects
* to be in window 'W' so make it happy or else
* we watchdog badly.
-# $Id: Makefile,v 1.7 1995/12/10 06:25:02 davem Exp $
+# $Id: Makefile,v 1.9 1996/03/23 01:37:15 davem Exp $
# Makefile for Sparc library files..
#
CFLAGS := $(CFLAGS) -ansi
-OBJS = mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o
+OBJS = mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
+ strlen.o
lib.a: $(OBJS)
$(AR) rcs lib.a $(OBJS)
memcpy.o: memcpy.S
$(CC) -ansi -c -o memcpy.o memcpy.S
+strlen.o: strlen.S
+ $(CC) -ansi -c -o strlen.o strlen.S
+
mul.o: mul.S
$(CC) -c -o mul.o mul.S
--- /dev/null
+/* linux/arch/sparc/lib/memset.c
+ *
+ * This is from GNU libc.
+ */
+
+#include <linux/types.h>
+
+#define op_t unsigned long int
+#define OPSIZ (sizeof(op_t))
+
+typedef unsigned char byte;
+
+void *memset(void *dstpp, char c, size_t len)
+{
+ long int dstp = (long int) dstpp;
+
+ if (len >= 8) {
+ size_t xlen;
+ op_t cccc;
+
+ cccc = (unsigned char) c;
+ cccc |= cccc << 8;
+ cccc |= cccc << 16;
+
+ /* There are at least some bytes to set.
+ No need to test for LEN == 0 in this alignment loop. */
+ while (dstp % OPSIZ != 0) {
+ ((byte *) dstp)[0] = c;
+ dstp += 1;
+ len -= 1;
+ }
+
+ /* Write 8 `op_t' per iteration until less
+ * than 8 `op_t' remain.
+ */
+ xlen = len / (OPSIZ * 8);
+ while (xlen > 0) {
+ ((op_t *) dstp)[0] = cccc;
+ ((op_t *) dstp)[1] = cccc;
+ ((op_t *) dstp)[2] = cccc;
+ ((op_t *) dstp)[3] = cccc;
+ ((op_t *) dstp)[4] = cccc;
+ ((op_t *) dstp)[5] = cccc;
+ ((op_t *) dstp)[6] = cccc;
+ ((op_t *) dstp)[7] = cccc;
+ dstp += 8 * OPSIZ;
+ xlen -= 1;
+ }
+ len %= OPSIZ * 8;
+
+ /* Write 1 `op_t' per iteration until less than
+ * OPSIZ bytes remain.
+ */
+ xlen = len / OPSIZ;
+ while (xlen > 0) {
+ ((op_t *) dstp)[0] = cccc;
+ dstp += OPSIZ;
+ xlen -= 1;
+ }
+ len %= OPSIZ;
+ }
+
+ /* Write the last few bytes. */
+ while (len > 0) {
+ ((byte *) dstp)[0] = c;
+ dstp += 1;
+ len -= 1;
+ }
+
+ return dstpp;
+}
--- /dev/null
+/* strlen.S: Sparc optimized strlen().
+ *
+ * This was hand optimized by davem@caip.rutgers.edu from
+ * the C-code in GNU-libc.
+ */
+
+#include <asm/cprefix.h>
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+ .align 4
+ .global C_LABEL(strlen)
+C_LABEL(strlen):
+ mov %o0,%o1
+ andcc %o0,3,%g0 ! and with %o0 so no dependency problems
+ be scan_words
+ sethi %hi(HI_MAGIC),%g2 ! common case and most Sparcs predict taken
+
+ ldsb [%o0],%g2
+still_not_word_aligned:
+ cmp %g2,0
+ bne,a 1f
+ add %o0,1,%o0
+
+ /* Ok, so there are tons of quick interlocks above for the
+ * < 4 length string unaligned... not too common so I'm not
+ * very concerned.
+ */
+ retl
+ sub %o0,%o1,%o0
+
+1:
+ andcc %o0,3,%g0
+ bne,a still_not_word_aligned
+ ldsb [%o0],%g2
+
+ /* HyperSparc executes each sethi/or pair in 1 cycle. */
+ sethi %hi(HI_MAGIC),%g2
+scan_words:
+ or %g2,%lo(HI_MAGIC),%o3
+ sethi %hi(LO_MAGIC),%g3
+ or %g3,%lo(LO_MAGIC),%o2
+next_word:
+ ld [%o0],%g2 ! no dependancies
+next_word_preloaded:
+ sub %g2,%o2,%g2 ! lots of locks here
+ andcc %g2,%o3,%g0 ! and I dont like it...
+ be next_word
+ add %o0,4,%o0
+
+ /* Check every byte. */
+byte_zero:
+ ldsb [%o0-4],%g2
+ cmp %g2,0
+ bne byte_one
+ add %o0,-4,%g3
+
+ retl
+ sub %g3,%o1,%o0
+
+byte_one:
+ ldsb [%o0-3],%g2
+ cmp %g2,0
+ bne,a byte_two_and_three
+ ldsb [%o0-2],%g2
+
+ sub %g3,%o1,%o0
+ retl
+ add %o0,1,%o0
+
+byte_two_and_three:
+ cmp %g2,0
+ be,a found_it
+ sub %g3,%o1,%o0
+
+ ldsb [%o0-1],%g2
+ cmp %g2,0
+ bne,a next_word_preloaded
+ ld [%o0],%g2
+
+ sub %g3,%o1,%o0
+ retl
+ add %o0,3,%o0
+
+found_it:
+ retl
+ add %o0,2,%o0
-# $Id: Makefile,v 1.17 1996/01/03 03:35:15 davem Exp $
+# $Id: Makefile,v 1.20 1996/04/21 10:30:43 davem Exp $
# Makefile for the linux Sparc-specific parts of the memory manager.
#
# Note! Dependencies are done automagically by 'make dep', which also
# Note 2! The CFLAGS definition is now in the main makefile...
O_TARGET := mm.o
-O_OBJS := fault.o init.o sun4c.o srmmu.o loadmmu.o
+O_OBJS := fault.o init.o sun4c.o srmmu.o loadmmu.o generic.o
include $(TOPDIR)/Rules.make
-/* $Id: fault.c,v 1.53 1996/03/01 07:16:17 davem Exp $
+/* $Id: fault.c,v 1.61 1996/04/12 06:52:35 davem Exp $
* fault.c: Page fault handlers for the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <asm/head.h>
+
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
+#include <linux/tasks.h>
+#include <linux/smp.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <asm/memreg.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
+#include <asm/smp.h>
#include <asm/traps.h>
#include <asm/kdebug.h>
sp_banks[i].base_addr = 0xdeadbeef;
sp_banks[i].num_bytes = 0;
+ /* Now mask all bank sizes on a page boundry, it is all we can
+ * use anyways.
+ */
+ for(i=0; sp_banks[i].num_bytes != 0; i++)
+ sp_banks[i].num_bytes &= PAGE_MASK;
+
return tally;
}
struct vm_area_struct *vma;
int from_user = !(regs->psr & PSR_PS);
+#if 0
+ printk("CPU[%d]: f<pid=%d,tf=%d,wr=%d,addr=%08lx",
+ smp_processor_id(), current->pid, text_fault,
+ write, address);
+ printk(",pc=%08lx> ", regs->pc);
+#endif
+
if(text_fault)
address = regs->pc;
* we'd fault recursively until all our stack is gone. ;-(
*/
if(!from_user && address >= KERNBASE) {
+#ifdef __SMP__
+ printk("CPU[%d]: Kernel faults at addr=%08lx\n",
+ smp_processor_id(), address);
+ while(1)
+ ;
+#else
quick_kernel_fault(address);
return;
+#endif
}
vma = find_vma(current, address);
*/
bad_area:
if(from_user) {
+#if 0
+ printk("%s [%d]: segfaults at %08lx pc=%08lx\n",
+ current->comm, current->pid, address, regs->pc);
+#endif
current->tss.sig_address = address;
current->tss.sig_desc = SUBSIG_NOMAPPING;
send_sig(SIGSEGV, current, 1);
return;
}
- /* Uh oh, a kernel fault. Check for bootup wp_test... */
- if (wp_works_ok < 0 && address == 0x0) {
- wp_works_ok = 1;
- printk("This Sparc honours the WP bit even when in supervisor mode. "
- "Good.\n");
- /* Advance program counter over the store. */
- regs->pc = regs->npc;
- regs->npc += 4;
- return;
- }
if((unsigned long) address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
} else
--- /dev/null
+/* generic.c: Generic Sparc mm routines that are not dependant upon
+ * MMU type but are Sparc specific.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+
+#include <asm/pgtable.h>
+#include <asm/page.h>
+
+static inline void forget_pte(pte_t page)
+{
+ if (pte_none(page))
+ return;
+ if (pte_present(page)) {
+ unsigned long addr = pte_page(page);
+ if (addr >= high_memory || PageReserved(mem_map+MAP_NR(addr)))
+ return;
+ free_page(addr);
+ if (current->mm->rss <= 0)
+ return;
+ current->mm->rss--;
+ return;
+ }
+ swap_free(pte_val(page));
+}
+
+/* Remap IO memory, the same way as remap_page_range(), but use
+ * the obio memory space.
+ *
+ * They use a pgprot that sets PAGE_IO and does not check the
+ * mem_map table as this is independent of normal memory.
+ */
+static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
+ unsigned long offset, pgprot_t prot, int space)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ pte_t oldpage = *pte;
+ pte_clear(pte);
+ set_pte(pte, mk_pte_io(offset, prot, space));
+ forget_pte(oldpage);
+ address += PAGE_SIZE;
+ offset += PAGE_SIZE;
+ pte++;
+ } while (address < end);
+}
+
+static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size,
+ unsigned long offset, pgprot_t prot, int space)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ offset -= address;
+ do {
+ pte_t * pte = pte_alloc(pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ io_remap_pte_range(pte, address, end - address, address + offset, prot, space);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+ return 0;
+}
+
+int io_remap_page_range(unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
+{
+ int error = 0;
+ pgd_t * dir;
+ unsigned long beg = from;
+ unsigned long end = from + size;
+
+ pgprot_val(prot) = pg_iobits;
+ offset -= from;
+ dir = pgd_offset(current->mm, from);
+ flush_cache_range(current->mm, beg, end);
+ while (from < end) {
+ pmd_t *pmd = pmd_alloc(dir, from);
+ error = -ENOMEM;
+ if (!pmd)
+ break;
+ error = io_remap_pmd_range(pmd, from, end - from, offset + from, prot, space);
+ if (error)
+ break;
+ from = (from + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ }
+ flush_tlb_range(current->mm, beg, end);
+ return error;
+}
-/* $Id: init.c,v 1.33 1996/03/01 07:16:20 davem Exp $
+/* $Id: init.c,v 1.36 1996/04/16 08:02:54 davem Exp $
* linux/arch/sparc/mm/init.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
i = MAP_NR(high_memory);
while (i-- > 0) {
total++;
- if (mem_map[i].reserved)
+ if (PageReserved(mem_map + i))
reserved++;
else if (!mem_map[i].count)
free++;
};
/* Initialize the protection map with non-constant values
- * MMU dependent values.
+ * MMU dependant values.
*/
protection_map[0] = PAGE_NONE;
protection_map[1] = PAGE_READONLY;
return device_scan(start_mem);
}
-extern void sun4c_test_wp(void);
-extern void srmmu_test_wp(void);
-
struct cache_palias *sparc_aliases;
extern int min_free_pages;
extern int free_pages_low;
extern int free_pages_high;
+int physmem_mapped_contig = 1;
+
+static void taint_real_pages(unsigned long start_mem, unsigned long end_mem)
+{
+ unsigned long addr, tmp2 = 0;
+
+ if(physmem_mapped_contig) {
+ for(addr = start_mem; addr < end_mem; addr += PAGE_SIZE) {
+ for(tmp2=0; sp_banks[tmp2].num_bytes != 0; tmp2++) {
+ unsigned long phys_addr = (addr - PAGE_OFFSET);
+ unsigned long base = sp_banks[tmp2].base_addr;
+ unsigned long limit = base + sp_banks[tmp2].num_bytes;
+
+ if((phys_addr >= base) && (phys_addr < limit) &&
+ ((phys_addr + PAGE_SIZE) < limit))
+ mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
+ }
+ }
+ } else {
+ for(addr = start_mem; addr < end_mem; addr += PAGE_SIZE)
+ mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
+ }
+}
+
void mem_init(unsigned long start_mem, unsigned long end_mem)
{
int codepages = 0;
addr = PAGE_OFFSET;
while(addr < start_mem) {
- mem_map[MAP_NR(addr)].reserved = 1;
+ mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved);
addr += PAGE_SIZE;
}
- for(addr = start_mem; addr < end_mem; addr += PAGE_SIZE) {
- for(tmp2=0; sp_banks[tmp2].num_bytes != 0; tmp2++) {
- unsigned long phys_addr = (addr - PAGE_OFFSET);
- unsigned long base = sp_banks[tmp2].base_addr;
- unsigned long limit = base + sp_banks[tmp2].num_bytes;
-
- if((phys_addr >= base) && (phys_addr < limit) &&
- ((phys_addr + PAGE_SIZE) < limit))
- mem_map[MAP_NR(addr)].reserved = 0;
- }
- }
+ taint_real_pages(start_mem, end_mem);
for (addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
- if(mem_map[MAP_NR(addr)].reserved) {
+ if(PageReserved(mem_map + MAP_NR(addr))) {
if (addr < (unsigned long) &etext)
codepages++;
else if(addr < start_mem)
free_pages_low = min_free_pages + (min_free_pages >> 1);
free_pages_high = min_free_pages + min_free_pages;
- switch(sparc_cpu_model) {
- case sun4c:
- case sun4e:
- sun4c_test_wp();
- break;
- case sun4m:
- case sun4d:
- srmmu_test_wp();
- break;
- default:
- printk("mem_init: Could not test WP bit on this machine.\n");
- printk("mem_init: sparc_cpu_model = %d\n", sparc_cpu_model);
- printk("mem_init: Halting...\n");
- panic("mem_init()");
- };
}
void si_meminfo(struct sysinfo *val)
val->freeram = nr_free_pages << PAGE_SHIFT;
val->bufferram = buffermem;
while (i-- > 0) {
- if (mem_map[i].reserved)
+ if (PageReserved(mem_map + i))
continue;
val->totalram++;
if (!mem_map[i].count)
-/* $Id: loadmmu.c,v 1.23 1996/02/21 17:56:35 miguel Exp $
+/* $Id: loadmmu.c,v 1.33 1996/04/21 10:32:26 davem Exp $
* loadmmu.c: This code loads up all the mm function pointers once the
* machine type has been determined. It also sets the static
* mmu values such as PAGE_NONE, etc.
*/
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <asm/system.h>
#include <asm/page.h>
void (*mmu_exit_hook)(void);
void (*mmu_flush_hook)(void);
+/* translate between physical and virtual addresses */
+unsigned long (*mmu_v2p)(unsigned long);
+unsigned long (*mmu_p2v)(unsigned long);
+
char *(*mmu_lockarea)(char *, unsigned long);
void (*mmu_unlockarea)(char *, unsigned long);
-char *(*mmu_get_scsi_buffer)(char *, unsigned long, struct linux_sbus *sbus);
-void (*mmu_release_scsi_buffer)(char *, unsigned long, struct linux_sbus *sbus);
+char *(*mmu_get_scsi_one)(char *, unsigned long, struct linux_sbus *sbus);
+void (*mmu_get_scsi_sgl)(struct mmu_sglist *, int, struct linux_sbus *sbus);
+void (*mmu_release_scsi_one)(char *, unsigned long, struct linux_sbus *sbus);
+void (*mmu_release_scsi_sgl)(struct mmu_sglist *, int, struct linux_sbus *sbus);
void (*update_mmu_cache)(struct vm_area_struct *vma, unsigned long address, pte_t pte);
-void (*invalidate_all)(void);
-void (*invalidate_mm)(struct mm_struct *);
-void (*invalidate_range)(struct mm_struct *, unsigned long start, unsigned long end);
-void (*invalidate_page)(struct vm_area_struct *, unsigned long address);
+#ifdef __SMP__
+void (*local_flush_cache_all)(void);
+void (*local_flush_cache_mm)(struct mm_struct *);
+void (*local_flush_cache_range)(struct mm_struct *, unsigned long start,
+ unsigned long end);
+void (*local_flush_cache_page)(struct vm_area_struct *, unsigned long address);
+
+void (*local_flush_tlb_all)(void);
+void (*local_flush_tlb_mm)(struct mm_struct *);
+void (*local_flush_tlb_range)(struct mm_struct *, unsigned long start,
+ unsigned long end);
+void (*local_flush_tlb_page)(struct vm_area_struct *, unsigned long address);
+void (*local_flush_page_to_ram)(unsigned long address);
+#endif
+
+void (*flush_cache_all)(void);
+void (*flush_cache_mm)(struct mm_struct *);
+void (*flush_cache_range)(struct mm_struct *, unsigned long start,
+ unsigned long end);
+void (*flush_cache_page)(struct vm_area_struct *, unsigned long address);
+
+void (*flush_tlb_all)(void);
+void (*flush_tlb_mm)(struct mm_struct *);
+void (*flush_tlb_range)(struct mm_struct *, unsigned long start,
+ unsigned long end);
+void (*flush_tlb_page)(struct vm_area_struct *, unsigned long address);
+
+void (*flush_page_to_ram)(unsigned long page);
void (*set_pte)(pte_t *pteptr, pte_t pteval);
int (*pte_none)(pte_t);
int (*pte_present)(pte_t);
-int (*pte_inuse)(pte_t *);
void (*pte_clear)(pte_t *);
-void (*pte_reuse)(pte_t *);
int (*pmd_none)(pmd_t);
int (*pmd_bad)(pmd_t);
int (*pmd_present)(pmd_t);
-int (*pmd_inuse)(pmd_t *);
void (*pmd_clear)(pmd_t *);
-void (*pmd_reuse)(pmd_t *);
int (*pgd_none)(pgd_t);
int (*pgd_bad)(pgd_t);
int (*pgd_present)(pgd_t);
-int (*pgd_inuse)(pgd_t *);
void (*pgd_clear)(pgd_t *);
-void (*pgd_reuse)(pgd_t *);
pte_t (*mk_pte)(unsigned long, pgprot_t);
-pte_t (*mk_pte_io)(unsigned long, pgprot_t);
+pte_t (*mk_pte_io)(unsigned long, pgprot_t, int);
void (*pgd_set)(pgd_t *, pmd_t *);
pte_t (*pte_modify)(pte_t, pgprot_t);
pgd_t * (*pgd_offset)(struct mm_struct *, unsigned long);
printk("load_mmu:sparc_cpu_model = %d\n", (int) sparc_cpu_model);
printk("load_mmu:Halting...\n");
panic("load_mmu()");
- };
+ }
}
+++ /dev/null
-/* s4cflsh.S: Low-level segment cache flush routines, shared by
- * the kernel and user in-window tlb refill routines.
- *
- * Copyright (C) 1995 David S. Miller (davem@caipfs.rutgers.edu)
- */
-
-#include "s4clow.h"
-
-/* %g1 -- base address of segment to flush
- * %g4 -- pc of caller
- */
- .text
- .align 4
- .globl sun4c_segment_cache_flush
-sun4c_segment_cache_flush:
- std %g0, [REGSAVE_BASE + FLUSHREGS + 0x0]
- std %g2, [REGSAVE_BASE + FLUSHREGS + 0x8]
- set C_LABEL(sun4c_vacinfo), %g2
- ld [%g2 + VACINFO_HWFLSH], %g3
- cmp %g3, 0x0
- ld [%g2 + VACINFO_SIZE], %g3
- add %g1, %g3, %g3
- be,a sw_flush
- ld [%g2 + VACINFO_LSIZE], %g2
-
- /* Hardware flush */
- sethi %hi(PAGE_SIZE), %g2
- sta %g0, [%g1] 0x05
-1:
- add %g1, %g2, %g1
- cmp %g1, %g3
- blu,a 1b
- sta %g0, [%g1] 0x05
-
- ldd [REGSAVE_BASE + FLUSHREGS + 0x0], %g0
- jmpl %g4, %g0
- ldd [REGSAVE_BASE + FLUSHREGS + 0x8], %g2
-
-sw_flush:
- sta %g0, [%g1] 0x0c
-2:
- add %g1, %g2, %g1
- cmp %g1, %g3
- blu,a 2b
- sta %g0, [%g1] 0x0c
-
- ldd [REGSAVE_BASE + FLUSHREGS + 0x0], %g0
- jmpl %g4, %g0
- ldd [REGSAVE_BASE + FLUSHREGS + 0x8], %g2
+++ /dev/null
-/* s4ckflt.S: Quick in window kernel faults on the sun4c.
- *
- * Copyright (C) 1995 David S. Miller (davem@caipfs.rutgers.edu)
- */
-
-#include "s4clow.h"
-
- .text
- .align 8
- .globl sun4c_quick_kernel_fault
-sun4c_quick_kernel_fault:
- sethi %hi(REAL_PGDIR_MASK), %l5
- and %l7, %l5, %l7
- sethi %hi(C_LABEL(invalid_segment)), %l5
- lduba [%l7] 0x3, %l4
- ld [%l5 + %lo(C_LABEL(invalid_segment))], %l5
- cmp %l4, %l5
- bne segment_loaded
- nop
-
- /* We need some breathing room to pull this off, save
- * away some globals.
- */
- std %g0, [REGSAVE_BASE + KFLTREGS + 0x00]
- std %g2, [REGSAVE_BASE + KFLTREGS + 0x08]
- std %g4, [REGSAVE_BASE + KFLTREGS + 0x10]
- std %g6, [REGSAVE_BASE + KFLTREGS + 0x18]
- std %l0, [REGSAVE_BASE + KFLTREGS + 0x20]
- std %l2, [REGSAVE_BASE + KFLTREGS + 0x28]
-
- set C_LABEL(sun4c_kfree_ring), %g1
- ld [%g1 + RING_NENTRIES], %g2
- cmp %g2, 0x0
- be,a pseg_steal
- nop
-
- b distribute_segmap
- ld [%g1 + RING_RINGHD + MMU_ENTRY_NEXT], %g2
-
-pseg_steal:
- /* This is the hard case. */
- set C_LABEL(sun4c_kernel_ring), %g1
- ld [%g1 + RING_RINGHD + MMU_ENTRY_PREV], %g2
- b kernel_segment_cache_flush
- ld [%g2 + MMU_ENTRY_VADDR], %l0
-
-pseg_steal_after_flush:
- ld [%g2 + MMU_ENTRY_VADDR], %l0
- sethi %hi(0x30000000), %l1
- lduba [%l1] 0x02, %g7
- sethi %hi(C_LABEL(num_contexts)), %g6
- mov 0, %g5
- ld [%g6 + %lo(C_LABEL(num_contexts))], %g6
-1:
- stba %g5, [%l1] 0x02
- add %g5, 1, %g5
- cmp %g5, %g6
- bl 1b
- stba %l5, [%l0] 0x03
-
- stba %g7, [%l1] 0x02
-
-
-
-distribute_segmap:
- st %l7, [%g2 + MMU_ENTRY_VADDR]
- ldub [%g2 + MMU_ENTRY_PSEG], %g3
- sethi %hi(0x30000000), %l0
- lduba [%l0] 0x02, %g7
- sethi %hi(C_LABEL(num_contexts)), %g6
- mov 0, %g5
- ld [%g6 + %lo(C_LABEL(num_contexts))], %g6
-1:
- stba %g5, [%l0] 0x02
- add %g5, 1, %g5
- cmp %g5, %g6
- bl 1b
- stba %g3, [%l7] 0x03
-
- stba %g7, [%l0] 0x02
-
-segment_loaded:
- sethi %hi(VMALLOC_START), %l4
- cmp %l7, %l4
- bge vmalloc_kernel_fault
- nop
-
-
-
-
-
-vmalloc_kernel_fault:
-
-
-qkf_exit:
- /* Fault serviced, return from trap, but reload
- * registers first.
- */
- ldd [REGSAVE_BASE + KFLTREGS + 0x00], %g0
- ldd [REGSAVE_BASE + KFLTREGS + 0x08], %g2
- ldd [REGSAVE_BASE + KFLTREGS + 0x10], %g4
- ldd [REGSAVE_BASE + KFLTREGS + 0x18], %g6
- ldd [REGSAVE_BASE + KFLTREGS + 0x20], %l0
- ldd [REGSAVE_BASE + KFLTREGS + 0x28], %l2
-
+++ /dev/null
-/* s4clow.h: Defines for in-window low level tlb refill code.
- *
- * Copyright (C) 1995 David S. Miller (davem@caipfs.rutgers.edu)
- */
-#ifndef _SPARC_S4CLOW_H
-#define _SPARC_S4CLOW_H
-
-#define PAGE_SIZE 0x00001000
-#define REAL_PGDIR_MASK 0xfffc0000
-#define VMALLOC_START 0xfe100000
-
-#define RING_RINGHD 0x00
-#define RING_NENTRIES 0x10
-
-#define MMU_ENTRY_NEXT 0x00
-#define MMU_ENTRY_PREV 0x04
-#define MMU_ENTRY_VADDR 0x08
-#define MMU_ENTRY_PSEG 0x0c
-#define MMU_ENTRY_LCK 0x0d
-
-#define VACINFO_SIZE 0x00
-#define VACINFO_HWFLSH 0x08
-#define VACINFO_LSIZE 0x0c
-
-/* Each of the routines could get called by any of the
- * other low level sun4c tlb routines. Well... at least
- * we code it that way. Because we are in window we need
- * a way to make a routine completely self contained and
- * only need to worry about saving it's own set of registers
- * which it in fact uses. With traps off this is difficult
- * ... however...
- *
- * The Sparc can address anywhere in the two ranges
- * 0 --> PAGE_SIZE and -PAGE_SIZE --> -1 without any
- * address calculation registers. So we pull a trick,
- * we map a special page for these low level tlb routines
- * since they must be as quick as possible. Since the low
- * page is the NULL unmapped page and in user space we use
- * the high one for simplicity. Kids, do not try this at
- * home.
- */
-#define REGSAVE_BASE (-PAGE_SIZE)
-
-#define FLUSHREGS 0
-#define KFLTREGS 256
-#define UFLTREGS 512
-
-#endif /* !(_SPARC_S4CLOW_H) */
+++ /dev/null
-/* s4cuflt.S: Quick in window user tlb faults on the sun4c.
- *
- * Copyright (C) 1995 David S. Miller (davem@caipfs.rutgers.edu)
- */
-
- .data
-
- .align 8
-.g0g1: .quad 0
-.g2g3: .quad 0
-.g4g5: .quad 0
-.g6g7: .quad 0
-
-#include "srclow.h"
-
- .text
-
- .align 8
- .globl sun4c_quick_user_fault
-sun4c_quick_user_fault:
-/* $Id: srmmu.c,v 1.34 1996/03/01 07:16:23 davem Exp $
+/* $Id: srmmu.c,v 1.59 1996/04/21 10:32:21 davem Exp $
* srmmu.c: SRMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@pool.informatik.rwth-aachen.de)
*/
+#include <linux/config.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/kdebug.h>
#include <asm/vaddrs.h>
#include <asm/traps.h>
-#include <asm/mp.h>
+#include <asm/smp.h>
#include <asm/mbus.h>
#include <asm/cache.h>
#include <asm/oplib.h>
#include <asm/sbus.h>
#include <asm/iommu.h>
+#include <asm/asi.h>
+#include <asm/msi.h>
/* Now the cpu specific definitions. */
#include <asm/viking.h>
+#include <asm/mxcc.h>
#include <asm/ross.h>
#include <asm/tsunami.h>
#include <asm/swift.h>
enum mbus_module srmmu_modtype;
unsigned int hwbug_bitmask;
-
int hyper_cache_size;
-
+int hyper_line_size;
+
+#ifdef __SMP__
+extern void smp_capture(void);
+extern void smp_release(void);
+#else
+#define smp_capture()
+#define smp_release()
+#endif /* !(__SMP__) */
+
+static void (*ctxd_set)(ctxd_t *ctxp, pgd_t *pgdp);
+static void (*pmd_set)(pmd_t *pmdp, pte_t *ptep);
+
+static void (*flush_page_for_dma)(unsigned long page);
+static void (*flush_cache_page_to_uncache)(unsigned long page);
+static void (*flush_tlb_page_for_cbit)(unsigned long page);
+#ifdef __SMP__
+static void (*local_flush_page_for_dma)(unsigned long page);
+static void (*local_flush_cache_page_to_uncache)(unsigned long page);
+static void (*local_flush_tlb_page_for_cbit)(unsigned long page);
+#endif
+
+static struct srmmu_stats {
+ int invall;
+ int invpg;
+ int invrnge;
+ int invmm;
+} module_stats;
+
+static char *srmmu_name;
+
+ctxd_t *srmmu_ctx_table_phys;
ctxd_t *srmmu_context_table;
-/* In general all page table modifications should use the V8 atomic
- * swap instruction. This insures the mmu and the cpu are in sync
- * with respect to ref/mod bits in the page tables.
- */
-static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
-{
- __asm__ __volatile__("swap [%1], %0\n\t" :
- "=&r" (value), "=&r" (addr) :
- "0" (value), "1" (addr));
- return value;
-}
+static struct srmmu_trans {
+ unsigned long vbase;
+ unsigned long pbase;
+ int size;
+} srmmu_map[SPARC_PHYS_BANKS];
-/* Functions really use this, not srmmu_swap directly. */
-#define srmmu_set_entry(ptr, newentry) \
- srmmu_swap((unsigned long *) (ptr), (newentry))
+static int can_cache_ptables = 0;
+static int viking_mxcc_present = 0;
-/* We still don't use these at all, perhaps we don't need them
- * at all.
+/* Physical memory can be _very_ non-contiguous on the sun4m, especially
+ * the SS10/20 class machines and with the latest openprom revisions.
+ * So we have to crunch the free page pool.
*/
-unsigned long (*srmmu_read_physical)(unsigned long paddr);
-void (*srmmu_write_physical)(unsigned long paddr, unsigned long word);
-
-static unsigned long gensrmmu_read_physical(unsigned long paddr)
+static inline unsigned long srmmu_v2p(unsigned long vaddr)
{
- unsigned long word;
+ int i;
- __asm__ __volatile__("lda [%1] %2, %0\n\t" :
- "=r" (word) :
- "r" (paddr), "i" (ASI_M_BYPASS) :
- "memory");
- return word;
+ for(i=0; srmmu_map[i].size != 0; i++) {
+ if(srmmu_map[i].vbase <= vaddr &&
+ (srmmu_map[i].vbase + srmmu_map[i].size > vaddr))
+ return (vaddr - srmmu_map[i].vbase) + srmmu_map[i].pbase;
+ }
+ return 0xffffffffUL;
}
-static unsigned long msparc_read_physical(unsigned long paddr)
+static inline unsigned long srmmu_p2v(unsigned long paddr)
{
- unsigned long word, flags;
+ int i;
- save_flags(flags); cli();
- __asm__ __volatile__("lda [%%g0] %3, %%g1\n\t"
- "or %%g1, %4, %%g2\n\t"
- "sta %%g2, [%%g0] %3\n\t"
- "lda [%1] %2, %0\n\t"
- "sta %%g1, [%%g0] %3\n\t" :
- "=r" (word) :
- "r" (paddr), "i" (ASI_M_BYPASS),
- "i" (ASI_M_MMUREGS), "r" (VIKING_ACENABLE) :
- "g1", "g2", "memory");
- restore_flags(flags);
- return word;
+ for(i=0; srmmu_map[i].size != 0; i++) {
+ if(srmmu_map[i].pbase <= paddr &&
+ (srmmu_map[i].pbase + srmmu_map[i].size > paddr))
+ return (paddr - srmmu_map[i].pbase) + srmmu_map[i].vbase;
+ }
+ return 0xffffffffUL;
}
-static void gensrmmu_write_physical(unsigned long paddr, unsigned long word)
+/* In general all page table modifications should use the V8 atomic
+ * swap instruction. This insures the mmu and the cpu are in sync
+ * with respect to ref/mod bits in the page tables.
+ */
+static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
{
- __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
- "r" (word), "r" (paddr), "i" (ASI_M_BYPASS) :
- "memory");
+#if CONFIG_AP1000
+ /* the AP1000 has its memory on bus 8, not 0 like suns do */
+ if (!(value&0xf0000000))
+ value |= 0x80000000;
+ if (value == 0x80000000) value = 0;
+#endif
+ __asm__ __volatile__("swap [%2], %0\n\t" :
+ "=&r" (value) :
+ "0" (value), "r" (addr));
+ return value;
}
-static void msparc_write_physical(unsigned long paddr, unsigned long word)
-{
- unsigned long flags;
+/* Functions really use this, not srmmu_swap directly. */
+#define srmmu_set_entry(ptr, newentry) \
+ srmmu_swap((unsigned long *) (ptr), (newentry))
- save_flags(flags); cli();
- __asm__ __volatile__("lda [%%g0] %3, %%g1\n\t"
- "or %%g1, %4, %%g2\n\t"
- "sta %%g2, [%%g0] %3\n\t"
- "sta %0, [%1] %2\n\t"
- "sta %%g1, [%%g0] %3\n\t" : :
- "r" (word), "r" (paddr), "i" (ASI_M_BYPASS),
- "i" (ASI_M_MMUREGS), "r" (VIKING_ACENABLE) :
- "g1", "g2", "memory");
- restore_flags(flags);
-}
+/* The very generic SRMMU page table operations. */
static unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
static unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
}
static unsigned long srmmu_pgd_page(pgd_t pgd)
-{ return PAGE_OFFSET + ((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
+{ return srmmu_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
static unsigned long srmmu_pmd_page(pmd_t pmd)
-{ return PAGE_OFFSET + ((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
+{ return srmmu_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
static unsigned long srmmu_pte_page(pte_t pte)
-{ return PAGE_OFFSET + ((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
+{ return srmmu_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
static int srmmu_pte_none(pte_t pte) { return !pte_val(pte); }
static int srmmu_pte_present(pte_t pte)
{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
-static int srmmu_pte_inuse(pte_t *ptep)
-{ return mem_map[MAP_NR(ptep)].reserved || mem_map[MAP_NR(ptep)].count != 1; }
-
-static void srmmu_pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
-static void srmmu_pte_reuse(pte_t *ptep)
-{
- if(!mem_map[MAP_NR(ptep)].reserved)
- mem_map[MAP_NR(ptep)].count++;
-}
+static void srmmu_pte_clear(pte_t *ptep) { set_pte(ptep, __pte(0)); }
static int srmmu_pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
static int srmmu_pmd_bad(pmd_t pmd)
static int srmmu_pmd_present(pmd_t pmd)
{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
-static int srmmu_pmd_inuse(pmd_t *pmdp)
-{ return mem_map[MAP_NR(pmdp)].reserved || mem_map[MAP_NR(pmdp)].count != 1; }
-
-static void srmmu_pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
-static void srmmu_pmd_reuse(pmd_t * pmdp)
-{
- if (!mem_map[MAP_NR(pmdp)].reserved)
- mem_map[MAP_NR(pmdp)].count++;
-}
+static void srmmu_pmd_clear(pmd_t *pmdp) { set_pte((pte_t *)pmdp, __pte(0)); }
static int srmmu_pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
static int srmmu_pgd_bad(pgd_t pgd)
static int srmmu_pgd_present(pgd_t pgd)
{ return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
-static int srmmu_pgd_inuse(pgd_t *pgdp) { return mem_map[MAP_NR(pgdp)].reserved; }
-static void srmmu_pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
-static void srmmu_pgd_reuse(pgd_t *pgdp)
-{
- if (!mem_map[MAP_NR(pgdp)].reserved)
- mem_map[MAP_NR(pgdp)].count++;
-}
+static void srmmu_pgd_clear(pgd_t * pgdp) { set_pte((pte_t *)pgdp, __pte(0)); }
static int srmmu_pte_write(pte_t pte) { return pte_val(pte) & SRMMU_WRITE; }
static int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & SRMMU_DIRTY; }
* and a page entry and page directory to the page they refer to.
*/
static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
-{ pte_t pte; pte_val(pte) = ((page - PAGE_OFFSET) >> 4) | pgprot_val(pgprot); return pte; }
+{ pte_t pte; pte_val(pte) = ((srmmu_v2p(page)) >> 4) | pgprot_val(pgprot); return pte; }
-static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot)
-{ pte_t pte; pte_val(pte) = ((page) >> 4) | pgprot_val(pgprot); return pte; }
+static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
+{
+ pte_t pte;
+ pte_val(pte) = ((page) >> 4) | (space << 28) | pgprot_val(pgprot);
+ return pte;
+}
static void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
-{ srmmu_set_entry(ctxp, (SRMMU_ET_PTD | ((((unsigned long) pgdp) - PAGE_OFFSET) >> 4))); }
+{
+ srmmu_set_entry(ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
+}
static void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
-{ srmmu_set_entry(pgdp, (SRMMU_ET_PTD | ((((unsigned long) pmdp) - PAGE_OFFSET) >> 4))); }
+{
+ srmmu_set_entry(pgdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pmdp) >> 4)));
+}
static void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
-{ srmmu_set_entry(pmdp, (SRMMU_ET_PTD | ((((unsigned long) ptep) - PAGE_OFFSET) >> 4))); }
+{
+ srmmu_set_entry(pmdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) ptep) >> 4)));
+}
static pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
{ pte_val(pte) = (pte_val(pte) & ~0xff) | pgprot_val(newprot); return pte; }
/* Find an entry in the second-level page table.. */
static pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
{
- return (pmd_t *) pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
+ return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
}
/* Find an entry in the third-level page table.. */
static pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
{
- return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
+ return (pte_t *) srmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
}
/* This must update the context table entry for this process. */
static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
{
if(tsk->mm->context != NO_CONTEXT)
- srmmu_ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
+ ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
+}
+
+static inline void srmmu_uncache_page(unsigned long addr)
+{
+ pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
+ pmd_t *pmdp = srmmu_pmd_offset(pgdp, addr);
+ pte_t *ptep = srmmu_pte_offset(pmdp, addr);
+
+ flush_cache_page_to_uncache(addr);
+ set_pte(ptep, __pte((pte_val(*ptep) & ~SRMMU_CACHE)));
+ flush_tlb_page_for_cbit(addr);
+}
+
+static inline void srmmu_recache_page(unsigned long addr)
+{
+ pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
+ pmd_t *pmdp = srmmu_pmd_offset(pgdp, addr);
+ pte_t *ptep = srmmu_pte_offset(pmdp, addr);
+
+ set_pte(ptep, __pte((pte_val(*ptep) | SRMMU_CACHE)));
+ flush_tlb_page_for_cbit(addr);
+}
+
+static inline unsigned long srmmu_getpage(void)
+{
+ unsigned long page = get_free_page(GFP_KERNEL);
+
+ if (can_cache_ptables)
+ return page;
+
+ if(page)
+ srmmu_uncache_page(page);
+ return page;
}
+static inline void srmmu_putpage(unsigned long page)
+{
+ if (!can_cache_ptables)
+ srmmu_recache_page(page);
+ free_page(page);
+}
+
+/* The easy versions. */
+#define NEW_PGD() (pgd_t *) srmmu_getpage()
+#define NEW_PMD() (pmd_t *) srmmu_getpage()
+#define NEW_PTE() (pte_t *) srmmu_getpage()
+#define FREE_PGD(chunk) srmmu_putpage((unsigned long)(chunk))
+#define FREE_PMD(chunk) srmmu_putpage((unsigned long)(chunk))
+#define FREE_PTE(chunk) srmmu_putpage((unsigned long)(chunk))
+
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
*/
static void srmmu_pte_free_kernel(pte_t *pte)
{
- mem_map[MAP_NR(pte)].reserved = 0;
- free_page((unsigned long) pte);
+ FREE_PTE(pte);
}
static pte_t *srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
if(srmmu_pmd_none(*pmd)) {
- pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
+ pte_t *page = NEW_PTE();
if(srmmu_pmd_none(*pmd)) {
if(page) {
- srmmu_pmd_set(pmd, page);
- mem_map[MAP_NR(page)].reserved = 1;
+ pmd_set(pmd, page);
return page + address;
}
- srmmu_pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
+ pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
- free_page((unsigned long) page);
+ FREE_PTE(page);
}
if(srmmu_pmd_bad(*pmd)) {
printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- srmmu_pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
+ pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
return (pte_t *) srmmu_pmd_page(*pmd) + address;
static void srmmu_pmd_free_kernel(pmd_t *pmd)
{
- mem_map[MAP_NR(pmd)].reserved = 0;
- free_page((unsigned long) pmd);
+ FREE_PMD(pmd);
}
static pmd_t *srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
{
address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
if(srmmu_pgd_none(*pgd)) {
- pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
+ pmd_t *page = NEW_PMD();
if(srmmu_pgd_none(*pgd)) {
if(page) {
- srmmu_pgd_set(pgd, page);
- mem_map[MAP_NR(page)].reserved = 1;
+ pgd_set(pgd, page);
return page + address;
}
- srmmu_pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
+ pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
return NULL;
}
- free_page((unsigned long) page);
+ FREE_PMD(page);
}
if(srmmu_pgd_bad(*pgd)) {
printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
- srmmu_pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
+ pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
return NULL;
}
return (pmd_t *) pgd_page(*pgd) + address;
static void srmmu_pte_free(pte_t *pte)
{
- free_page((unsigned long) pte);
+ FREE_PTE(pte);
}
static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
if(srmmu_pmd_none(*pmd)) {
- pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
+ pte_t *page = NEW_PTE();
if(srmmu_pmd_none(*pmd)) {
if(page) {
- srmmu_pmd_set(pmd, page);
+ pmd_set(pmd, page);
return page + address;
}
- srmmu_pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
+ pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
- free_page((unsigned long) page);
+ FREE_PTE(page);
}
if(srmmu_pmd_bad(*pmd)) {
printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- srmmu_pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
+ pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
- return (pte_t *) pmd_page(*pmd) + address;
+ return ((pte_t *) srmmu_pmd_page(*pmd)) + address;
}
/* Real three-level page tables on SRMMU. */
static void srmmu_pmd_free(pmd_t * pmd)
{
- free_page((unsigned long) pmd);
+ FREE_PMD(pmd);
}
static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
{
address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
if(srmmu_pgd_none(*pgd)) {
- pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
+ pmd_t *page = NEW_PMD();
if(srmmu_pgd_none(*pgd)) {
if(page) {
- srmmu_pgd_set(pgd, page);
+ pgd_set(pgd, page);
return page + address;
}
- srmmu_pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
+ pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
return NULL;
}
- free_page((unsigned long) page);
+ FREE_PMD(page);
}
if(srmmu_pgd_bad(*pgd)) {
printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
- srmmu_pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
+ pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
return NULL;
}
return (pmd_t *) srmmu_pgd_page(*pgd) + address;
static void srmmu_pgd_free(pgd_t *pgd)
{
- free_page((unsigned long) pgd);
+ FREE_PGD(pgd);
}
static pgd_t *srmmu_pgd_alloc(void)
{
- return (pgd_t *) get_free_page(GFP_KERNEL);
+ return NEW_PGD();
}
-/* Tsunami invalidates. It's page level tlb invalidation is not very
- * useful at all, you must be in the context that page exists in to
- * get a match. It might be worthwhile to try someday though...
+static void srmmu_set_pte(pte_t *ptep, pte_t pteval)
+{
+ srmmu_set_entry(ptep, pte_val(pteval));
+}
+
+static void srmmu_quick_kernel_fault(unsigned long address)
+{
+ printk("Penguin faults at address %08lx\n", address);
+ panic("Srmmu bolixed...");
+}
+
+static inline void alloc_context(struct mm_struct *mm)
+{
+ struct ctx_list *ctxp;
+
+ ctxp = ctx_free.next;
+ if(ctxp != &ctx_free) {
+ remove_from_ctx_list(ctxp);
+ add_to_used_ctxlist(ctxp);
+ mm->context = ctxp->ctx_number;
+ ctxp->ctx_mm = mm;
+ return;
+ }
+ ctxp = ctx_used.next;
+ if(ctxp->ctx_mm == current->mm)
+ ctxp = ctxp->next;
+ if(ctxp == &ctx_used)
+ panic("out of mmu contexts");
+ flush_cache_mm(ctxp->ctx_mm);
+ flush_tlb_mm(ctxp->ctx_mm);
+ remove_from_ctx_list(ctxp);
+ add_to_used_ctxlist(ctxp);
+ ctxp->ctx_mm->context = NO_CONTEXT;
+ ctxp->ctx_mm = mm;
+ mm->context = ctxp->ctx_number;
+}
+
+static void srmmu_switch_to_context(struct task_struct *tsk)
+{
+ /* Kernel threads can execute in any context and so can tasks
+ * sleeping in the middle of exiting. If this task has already
+ * been allocated a piece of the mmu realestate, just jump to
+ * it.
+ */
+ if((tsk->tss.flags & SPARC_FLAG_KTHREAD) ||
+ (tsk->flags & PF_EXITING))
+ return;
+ if(tsk->mm->context == NO_CONTEXT) {
+ alloc_context(tsk->mm);
+ ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
+ }
+ srmmu_set_context(tsk->mm->context);
+}
+
+/* Low level IO area allocation on the SRMMU. */
+void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ unsigned long tmp;
+
+ physaddr &= PAGE_MASK;
+ pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
+ pmdp = srmmu_pmd_offset(pgdp, virt_addr);
+ ptep = srmmu_pte_offset(pmdp, virt_addr);
+ tmp = (physaddr >> 4) | SRMMU_ET_PTE;
+
+ /* I need to test whether this is consistant over all
+ * sun4m's. The bus_type represents the upper 4 bits of
+ * 36-bit physical address on the I/O space lines...
+ */
+ tmp |= (bus_type << 28);
+ if(rdonly)
+ tmp |= SRMMU_PRIV_RDONLY;
+ else
+ tmp |= SRMMU_PRIV;
+ flush_page_to_ram(virt_addr);
+ srmmu_set_entry(ptep, tmp);
+ flush_tlb_all();
+}
+
+static char *srmmu_lockarea(char *vaddr, unsigned long len)
+{
+ return vaddr;
+}
+
+static void srmmu_unlockarea(char *vaddr, unsigned long len)
+{
+}
+
+/* On the SRMMU we do not have the problems with limited tlb entries
+ * for mapping kernel pages, so we just take things from the free page
+ * pool. As a side effect we are putting a little too much pressure
+ * on the gfp() subsystem. This setup also makes the logic of the
+ * iommu mapping code a lot easier as we can transparently handle
+ * mappings on the kernel stack without any special code as we did
+ * need on the sun4c.
*/
-/* static */ inline void tsunami_invalidate_all(void)
+struct task_struct *srmmu_alloc_task_struct(void)
{
- tsunami_invalidate_icache();
- tsunami_invalidate_dcache();
- srmmu_flush_whole_tlb();
+ unsigned long page;
+
+ page = get_free_page(GFP_KERNEL);
+ if(!page)
+ return (struct task_struct *) 0;
+ return (struct task_struct *) page;
}
-static void tsunami_invalidate_mm(struct mm_struct *mm)
+
+unsigned long srmmu_alloc_kernel_stack(struct task_struct *tsk)
{
- tsunami_invalidate_all();
+ unsigned long pages;
+
+ pages = __get_free_pages(GFP_KERNEL, 2, 0);
+ if(!pages)
+ return 0;
+ memset((void *) pages, 0, (PAGE_SIZE << 2));
+ return pages;
}
-static void tsunami_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+static void srmmu_free_task_struct(struct task_struct *tsk)
{
- tsunami_invalidate_all();
+ free_page((unsigned long) tsk);
}
-/* XXX do page level tlb flushes at some point XXX */
-static void tsunami_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
+static void srmmu_free_kernel_stack(unsigned long stack)
{
- tsunami_invalidate_all();
+ free_pages(stack, 2);
}
-/* Swift invalidates. It has the recommended SRMMU specification flushing
- * facilities, so we can do things in a more fine grained fashion than we
- * could on the tsunami. Let's watch out for HARDWARE BUGS...
+/* Tsunami flushes. It's page level tlb invalidation is not very
+ * useful at all, you must be in the context that page exists in to
+ * get a match.
*/
-static inline void swift_invalidate_all(void)
+static void tsunami_flush_cache_all(void)
+{
+ flush_user_windows();
+ tsunami_flush_icache();
+ tsunami_flush_dcache();
+}
+
+static void tsunami_flush_cache_mm(struct mm_struct *mm)
+{
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ tsunami_flush_icache();
+ tsunami_flush_dcache();
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void tsunami_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
- unsigned long addr = 0;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ tsunami_flush_icache();
+ tsunami_flush_dcache();
+#ifndef __SMP__
+ }
+#endif
+}
- /* Invalidate all cache tags */
- for(addr = 0; addr < (PAGE_SIZE << 2); addr += 16) {
- swift_inv_insn_tag(addr); /* whiz- */
- swift_inv_data_tag(addr); /* bang */
+static void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+#ifndef __SMP__
+ struct mm_struct *mm = vma->vm_mm;
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ tsunami_flush_icache();
+ tsunami_flush_dcache();
+#ifndef __SMP__
}
+#endif
+}
+
+static void tsunami_flush_cache_page_to_uncache(unsigned long page)
+{
+ tsunami_flush_dcache();
+}
+
+/* Tsunami does not have a Copy-back style virtual cache. */
+static void tsunami_flush_page_to_ram(unsigned long page)
+{
+}
+
+/* However, Tsunami is not IO coherent. */
+static void tsunami_flush_page_for_dma(unsigned long page)
+{
+ tsunami_flush_dcache();
+}
+
+/* TLB flushes seem to upset the tsunami sometimes, I can't figure out
+ * what the hell is going on. All I see is a tlb flush (page or whole,
+ * there is no consistant pattern) and then total local variable corruption
+ * in the procedure who called us after return. Usually triggerable
+ * by "cool" programs like crashme and bonnie. I played around a bit
+ * and adding a bunch of forced nops seems to make the problems all
+ * go away. (missed instruction fetches possibly? ugh...)
+ */
+#define TSUNAMI_SUCKS do { nop(); nop(); nop(); nop(); nop(); \
+ nop(); nop(); nop(); nop(); nop(); } while(0)
+
+static void tsunami_flush_tlb_all(void)
+{
+ module_stats.invall++;
srmmu_flush_whole_tlb();
+ TSUNAMI_SUCKS;
}
-static void swift_invalidate_mm(struct mm_struct *mm)
+static void tsunami_flush_tlb_mm(struct mm_struct *mm)
{
- unsigned long flags;
- int cc, ncc = mm->context;
+ module_stats.invmm++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ srmmu_flush_whole_tlb();
+ TSUNAMI_SUCKS;
+#ifndef __SMP__
+ }
+#endif
+}
- if(ncc == NO_CONTEXT)
- return;
+static void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ module_stats.invrnge++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ srmmu_flush_whole_tlb();
+ TSUNAMI_SUCKS;
+#ifndef __SMP__
+ }
+#endif
+}
- /* have context will travel... */
- save_flags(flags); cli();
- cc = srmmu_get_context();
- if(cc != ncc)
- srmmu_set_context(ncc);
+static void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int octx;
+ struct mm_struct *mm = vma->vm_mm;
- swift_flush_context(); /* POOF! */
- srmmu_flush_tlb_ctx(); /* POW! */
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ octx = srmmu_get_context();
+
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_page(page);
+ TSUNAMI_SUCKS;
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+ module_stats.invpg++;
+}
- if(cc != ncc)
- srmmu_set_context(cc);
- restore_flags(flags);
+static void tsunami_flush_tlb_page_for_cbit(unsigned long page)
+{
+ srmmu_flush_tlb_page(page);
}
-static void swift_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+/* Swift flushes. It has the recommended SRMMU specification flushing
+ * facilities, so we can do things in a more fine grained fashion than we
+ * could on the tsunami. Let's watch out for HARDWARE BUGS...
+ */
+
+static void swift_flush_cache_all(void)
{
- unsigned long flags, addr;
- int cc, ncc = mm->context;
+ flush_user_windows();
+ swift_idflash_clear();
+}
- if(ncc == NO_CONTEXT)
- return;
+static void swift_flush_cache_mm(struct mm_struct *mm)
+{
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ swift_idflash_clear();
+#ifndef __SMP__
+ }
+#endif
+}
- save_flags(flags); cli();
- cc = srmmu_get_context();
- if(cc != ncc)
- srmmu_set_context(ncc);
+static void swift_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ swift_idflash_clear();
+#ifndef __SMP__
+ }
+#endif
+}
- /* XXX Inefficient, we don't do the best we can... XXX */
- addr = start & SRMMU_PGDIR_MASK;
- while(addr < end) {
- swift_flush_region(addr);
- srmmu_flush_tlb_region(addr);
- addr += SRMMU_PGDIR_SIZE;
+static void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+#ifndef __SMP__
+ struct mm_struct *mm = vma->vm_mm;
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ if(vma->vm_flags & VM_EXEC)
+ swift_flush_icache();
+ swift_flush_dcache();
+#ifndef __SMP__
}
+#endif
+}
- if(cc != ncc)
- srmmu_set_context(cc);
- restore_flags(flags);
+/* Not copy-back on swift. */
+static void swift_flush_page_to_ram(unsigned long page)
+{
}
-static void swift_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
+/* But not IO coherent either. */
+static void swift_flush_page_for_dma(unsigned long page)
{
- unsigned long flags;
- int cc, ncc = vmp->vm_mm->context;
+ swift_flush_dcache();
+}
- if(ncc == NO_CONTEXT)
- return;
+static void swift_flush_cache_page_to_uncache(unsigned long page)
+{
+ swift_flush_dcache();
+}
- save_flags(flags); cli();
- cc = srmmu_get_context();
- if(cc != ncc)
- srmmu_set_context(ncc);
+static void swift_flush_tlb_all(void)
+{
+ module_stats.invall++;
+ srmmu_flush_whole_tlb();
+}
- swift_flush_page(page);
- srmmu_flush_tlb_page(page);
+static void swift_flush_tlb_mm(struct mm_struct *mm)
+{
+ module_stats.invmm++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT)
+#endif
+ srmmu_flush_whole_tlb();
+}
+
+static void swift_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ module_stats.invrnge++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT)
+#endif
+ srmmu_flush_whole_tlb();
+}
+
+static void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+#ifndef __SMP__
+ struct mm_struct *mm = vma->vm_mm;
+ if(mm->context != NO_CONTEXT)
+#endif
+ srmmu_flush_whole_tlb();
+ module_stats.invpg++;
+}
- if(cc != ncc)
- srmmu_set_context(cc);
- restore_flags(flags);
+static void swift_flush_tlb_page_for_cbit(unsigned long page)
+{
+ srmmu_flush_whole_tlb();
}
/* The following are all MBUS based SRMMU modules, and therefore could
- * be found in a multiprocessor configuration.
+ * be found in a multiprocessor configuration. On the whole, these
+ * chips seems to be much more touchy about DVMA and page tables
+ * with respect to cache coherency.
*/
-/* Viking invalidates. For Sun's mainline MBUS processor it is pretty much
+/* Viking flushes. For Sun's mainline MBUS processor it is pretty much
* a crappy mmu. The on-chip I&D caches only have full flushes, no fine
* grained cache invalidations. It only has these "flash clear" things
* just like the MicroSparcI. Added to this many revs of the chip are
- * teaming with hardware buggery.
- *
- * XXX need to handle SMP broadcast invalidations! XXX
+ * teaming with hardware buggery. Someday maybe we'll do direct
+ * diagnostic tag accesses for page level flushes as those should
+ * be painless and will increase performance due to the frequency of
+ * page level flushes. This is a must to _really_ flush the caches,
+ * crazy hardware ;-)
*/
-static inline void viking_invalidate_all(void)
+
+static void viking_flush_cache_all(void)
{
viking_flush_icache();
- viking_flush_dcache();
- srmmu_flush_whole_tlb();
}
-static void viking_invalidate_mm(struct mm_struct *mm)
+
+static void viking_flush_cache_mm(struct mm_struct *mm)
{
- unsigned long flags;
- int cc, ncc = mm->context;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ viking_flush_icache();
+#ifndef __SMP__
+ }
+#endif
+}
- if(ncc == NO_CONTEXT)
- return;
+static void viking_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ viking_flush_icache();
+#ifndef __SMP__
+ }
+#endif
+}
- save_flags(flags); cli();
- cc = srmmu_get_context();
- if(cc != ncc)
- srmmu_set_context(ncc);
+static void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+#ifndef __SMP__
+ struct mm_struct *mm = vma->vm_mm;
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ if(vma->vm_flags & VM_EXEC)
+ viking_flush_icache();
+#ifndef __SMP__
+ }
+#endif
+}
- viking_flush_icache();
- viking_flush_dcache();
- srmmu_flush_tlb_ctx();
+/* Non-mxcc vikings are copy-back but are pure-physical so no flushing. */
+static void viking_flush_page_to_ram(unsigned long page)
+{
+}
- if(cc != ncc)
- srmmu_set_context(cc);
- restore_flags(flags);
+/* Viking is IO cache coherent. */
+static void viking_flush_page_for_dma(unsigned long page)
+{
}
-static void viking_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+static void viking_mxcc_flush_page(unsigned long page)
{
- unsigned long flags, addr;
- int cc, ncc = mm->context;
+ unsigned long ppage = srmmu_hwprobe(page);
+ unsigned long paddr0, paddr1;
- if(ncc == NO_CONTEXT)
+ if (!ppage)
return;
- save_flags(flags); cli();
- cc = srmmu_get_context();
- if(cc != ncc)
- srmmu_set_context(ncc);
+ paddr0 = (ppage >> 28) | 0x10; /* Set cacheable bit. */
+ paddr1 = (ppage << 4) & PAGE_MASK;
- /* XXX Inefficient, we don't do the best we can... XXX */
- viking_flush_icache();
- viking_flush_dcache();
- addr = start & SRMMU_PGDIR_MASK;
- while(addr < end) {
- srmmu_flush_tlb_region(addr);
- addr += SRMMU_PGDIR_SIZE;
+ /* Read the page's data through the stream registers,
+ * and write it back to memory. This will issue
+ * coherent write invalidates to all other caches, thus
+ * should also be sufficient in an MP system.
+ */
+ __asm__ __volatile__ ("or %%g0, %0, %%g2\n\t"
+ "or %%g0, %1, %%g3\n"
+ "1:\n\t"
+ "stda %%g2, [%2] %5\n\t"
+ "stda %%g2, [%3] %5\n\t"
+ "add %%g3, %4, %%g3\n\t"
+ "btst 0xfff, %%g3\n\t"
+ "bne 1b\n\t"
+ "nop\n\t" : :
+ "r" (paddr0), "r" (paddr1),
+ "r" (MXCC_SRCSTREAM),
+ "r" (MXCC_DESSTREAM),
+ "r" (MXCC_STREAM_SIZE),
+ "i" (ASI_M_MXCC) : "g2", "g3");
+
+ /* This was handcoded after a look at the gcc output from
+ *
+ * do {
+ * mxcc_set_stream_src(paddr);
+ * mxcc_set_stream_dst(paddr);
+ * paddr[1] += MXCC_STREAM_SIZE;
+ * } while (paddr[1] & ~PAGE_MASK);
+ */
+}
+
+static void viking_no_mxcc_flush_page(unsigned long page)
+{
+ unsigned long ppage = srmmu_hwprobe(page) >> 8;
+ int set, block;
+ unsigned long ptag[2];
+ unsigned long vaddr;
+ int i;
+
+ if (!ppage)
+ return;
+
+ for (set = 0; set < 128; set++) {
+ for (block = 0; block < 4; block++) {
+
+ viking_get_dcache_ptag(set, block, ptag);
+
+ if (ptag[1] != ppage)
+ continue;
+ if (!(ptag[0] & VIKING_PTAG_VALID))
+ continue;
+ if (!(ptag[0] & VIKING_PTAG_DIRTY))
+ continue;
+
+ /* There was a great cache from TI
+ * with comfort as much as vi,
+ * 4 pages to flush,
+ * 4 pages, no rush,
+ * since anything else makes him die.
+ */
+ vaddr = (KERNBASE + PAGE_SIZE) | (set << 5);
+ for (i = 0; i < 8; i++) {
+ __asm__ __volatile__ ("ld [%0], %%g2\n\t" : :
+ "r" (vaddr) : "g2");
+ vaddr += PAGE_SIZE;
+ }
+
+ /* Continue with next set. */
+ break;
+ }
}
+}
- if(cc != ncc)
- srmmu_set_context(cc);
- restore_flags(flags);
+static void viking_flush_tlb_all(void)
+{
+ module_stats.invall++;
+ srmmu_flush_whole_tlb();
}
-static void viking_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
+
+static void viking_flush_tlb_mm(struct mm_struct *mm)
{
- unsigned long flags;
- int cc, ncc = vmp->vm_mm->context;
+ int octx;
+ module_stats.invmm++;
- if(ncc == NO_CONTEXT)
- return;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_ctx();
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ int octx;
+ module_stats.invrnge++;
- save_flags(flags); cli();
- cc = srmmu_get_context();
- if(cc != ncc)
- srmmu_set_context(ncc);
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ start &= SRMMU_PMD_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_segment(start);
+ start += SRMMU_PMD_SIZE;
+ }
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
- viking_flush_icache();
- viking_flush_dcache();
- srmmu_flush_tlb_page(page);
+static void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int octx;
+ struct mm_struct *mm = vma->vm_mm;
- if(cc != ncc)
- srmmu_set_context(cc);
- restore_flags(flags);
+ module_stats.invpg++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_page(page);
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
}
-/* Cypress invalidates. */
-static inline void cypress_invalidate_all(void)
+static void viking_flush_tlb_page_for_cbit(unsigned long page)
{
- srmmu_flush_whole_tlb();
+ srmmu_flush_tlb_page(page);
}
-static void cypress_invalidate_mm(struct mm_struct *mm)
+
+/* Cypress flushes. */
+
+static void cypress_flush_tlb_all(void)
{
- unsigned long flags;
- int cc, ncc = mm->context;
+ module_stats.invall++;
+ srmmu_flush_whole_tlb();
+}
- if(ncc == NO_CONTEXT)
- return;
+static void cypress_flush_tlb_mm(struct mm_struct *mm)
+{
+ int octx;
- /* have context will travel... */
- save_flags(flags); cli();
- cc = srmmu_get_context();
- if(cc != ncc)
- srmmu_set_context(ncc);
+ module_stats.invmm++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_ctx();
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
- cypress_flush_context(); /* POOF! */
- srmmu_flush_whole_tlb(); /* POW! */
+static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ int octx;
- if(cc != ncc)
- srmmu_set_context(cc);
- restore_flags(flags);
+ module_stats.invrnge++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ start &= SRMMU_PMD_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_segment(start);
+ start += SRMMU_PMD_SIZE;
+ }
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
}
-static void cypress_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+
+static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
- unsigned long flags, addr;
- int cc, ncc = mm->context;
+ int octx;
+ struct mm_struct *mm = vma->vm_mm;
- if(ncc == NO_CONTEXT)
- return;
+ module_stats.invpg++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_page(page);
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
- save_flags(flags); cli();
- cc = srmmu_get_context();
- if(cc != ncc)
- srmmu_set_context(ncc);
+/* Hypersparc flushes. Very nice chip... */
+static void hypersparc_flush_cache_all(void)
+{
+ flush_user_windows();
+ hyper_flush_unconditional_combined();
+ hyper_flush_whole_icache();
+}
- /* XXX Inefficient, we don't do the best we can... XXX */
- addr = start & SRMMU_PGDIR_MASK;
- while(addr < end) {
- cypress_flush_region(addr);
- addr += SRMMU_PGDIR_SIZE;
+static void hypersparc_flush_cache_mm(struct mm_struct *mm)
+{
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ hyper_flush_unconditional_combined();
+ hyper_flush_whole_icache();
+#ifndef __SMP__
}
- srmmu_flush_whole_tlb();
+#endif
+}
- if(cc != ncc)
- srmmu_set_context(cc);
- restore_flags(flags);
+static void hypersparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ hyper_flush_unconditional_combined();
+ hyper_flush_whole_icache();
+#ifndef __SMP__
+ }
+#endif
}
-static void cypress_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
+/* HyperSparc requires a valid mapping where we are about to flush
+ * in order to check for a physical tag match during the flush.
+ */
+static void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
{
- unsigned long flags;
- int cc, ncc = vmp->vm_mm->context;
+ struct mm_struct *mm = vma->vm_mm;
+ volatile unsigned long clear;
+ int octx;
- if(ncc == NO_CONTEXT)
- return;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ octx = srmmu_get_context();
+ flush_user_windows();
+ srmmu_set_context(mm->context);
+ hyper_flush_whole_icache();
+ if(!srmmu_hwprobe(page))
+ goto no_mapping;
+ hyper_flush_cache_page(page);
+ no_mapping:
+ clear = srmmu_get_fstatus();
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
- save_flags(flags); cli();
- cc = srmmu_get_context();
- if(cc != ncc)
- srmmu_set_context(ncc);
+/* HyperSparc is copy-back. */
+static void hypersparc_flush_page_to_ram(unsigned long page)
+{
+ volatile unsigned long clear;
- swift_flush_page(page);
- srmmu_flush_whole_tlb();
+ if(srmmu_hwprobe(page))
+ hyper_flush_cache_page(page);
+ clear = srmmu_get_fstatus();
+}
+
+/* HyperSparc is IO cache coherent. */
+static void hypersparc_flush_page_for_dma(unsigned long page)
+{
+ volatile unsigned long clear;
- if(cc != ncc)
- srmmu_set_context(cc);
- restore_flags(flags);
+ if(srmmu_hwprobe(page))
+ hyper_flush_cache_page(page);
+ clear = srmmu_get_fstatus();
}
-/* Hypersparc invalidates. */
-static inline void hypersparc_invalidate_all(void)
+static void hypersparc_flush_cache_page_to_uncache(unsigned long page)
{
+ volatile unsigned long clear;
- hyper_flush_whole_icache();
+ if(srmmu_hwprobe(page))
+ hyper_flush_cache_page(page);
+ clear = srmmu_get_fstatus();
+}
+
+static void hypersparc_flush_tlb_all(void)
+{
+ module_stats.invall++;
srmmu_flush_whole_tlb();
}
-static void hypersparc_invalidate_mm(struct mm_struct *mm)
+static void hypersparc_flush_tlb_mm(struct mm_struct *mm)
{
+ int octx;
+ module_stats.invmm++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_ctx();
+ srmmu_set_context(octx);
+
+#ifndef __SMP__
+ }
+#endif
}
-static void hypersparc_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+static void hypersparc_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
+ int octx;
+
+ module_stats.invrnge++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ start &= SRMMU_PMD_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_segment(start);
+ start += SRMMU_PMD_SIZE;
+ }
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
}
-static void hypersparc_invalidate_page(struct vm_area_struct *vmp, unsigned long page)
+static void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
+ struct mm_struct *mm = vma->vm_mm;
+ int octx;
+
+ module_stats.invpg++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_page(page);
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
}
-static void srmmu_set_pte(pte_t *ptep, pte_t pteval)
+static void hypersparc_flush_tlb_page_for_cbit(unsigned long page)
{
- srmmu_set_entry(ptep, pte_val(pteval));
+ srmmu_flush_tlb_page(page);
}
-static void srmmu_quick_kernel_fault(unsigned long address)
+static void hypersparc_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
{
- printk("SRMMU: quick_kernel_fault called for %08lx\n", address);
- panic("Srmmu bolixed...");
+ hyper_flush_whole_icache();
+ srmmu_set_entry(ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
}
-static inline void alloc_context(struct mm_struct *mm)
+static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
{
- struct ctx_list *ctxp;
-
- ctxp = ctx_free.next;
- if(ctxp != &ctx_free) {
- remove_from_ctx_list(ctxp);
- add_to_used_ctxlist(ctxp);
- mm->context = ctxp->ctx_number;
- ctxp->ctx_mm = mm;
- return;
+ if(tsk->mm->context != NO_CONTEXT) {
+ hyper_flush_whole_icache();
+ ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
}
- ctxp = ctx_used.next;
- if(ctxp->ctx_mm == current->mm)
- ctxp = ctxp->next;
- if(ctxp == &ctx_used)
- panic("out of mmu contexts");
- remove_from_ctx_list(ctxp);
- add_to_used_ctxlist(ctxp);
- ctxp->ctx_mm->context = NO_CONTEXT;
- ctxp->ctx_mm = mm;
- mm->context = ctxp->ctx_number;
}
-static void srmmu_switch_to_context(struct task_struct *tsk)
+static void hypersparc_set_pte(pte_t *ptep, pte_t pteval)
+{
+ /* xor is your friend */
+ __asm__ __volatile__("rd %%psr, %%g1\n\t"
+ "wr %%g1, %4, %%psr\n\t"
+ "nop; nop; nop;\n\t"
+ "swap [%0], %1\n\t"
+ "wr %%g1, 0x0, %%psr\n\t"
+ "nop; nop; nop;\n\t" :
+ "=r" (ptep), "=r" (pteval) :
+ "0" (ptep), "1" (pteval), "i" (PSR_ET) :
+ "g1");
+}
+
+static void hypersparc_switch_to_context(struct task_struct *tsk)
{
/* Kernel threads can execute in any context and so can tasks
* sleeping in the middle of exiting. If this task has already
* been allocated a piece of the mmu realestate, just jump to
* it.
*/
+ hyper_flush_whole_icache();
if((tsk->tss.flags & SPARC_FLAG_KTHREAD) ||
(tsk->flags & PF_EXITING))
return;
if(tsk->mm->context == NO_CONTEXT) {
alloc_context(tsk->mm);
- srmmu_ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
+ ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
}
srmmu_set_context(tsk->mm->context);
}
-/* Low level IO area allocation on the SRMMU. */
-void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
-{
- pgd_t *pgdp;
- pmd_t *pmdp;
- pte_t *ptep;
- unsigned long tmp;
-
- physaddr &= PAGE_MASK;
- pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
- pmdp = srmmu_pmd_offset(pgdp, virt_addr);
- ptep = srmmu_pte_offset(pmdp, virt_addr);
- tmp = (physaddr >> 4) | SRMMU_ET_PTE;
-
- /* I need to test whether this is consistent over all
- * sun4m's. The bus_type represents the upper 4 bits of
- * 36-bit physical address on the I/O space lines...
- */
- tmp |= (bus_type << 28);
- if(rdonly)
- tmp |= SRMMU_PRIV_RDONLY;
- else
- tmp |= SRMMU_PRIV;
- srmmu_set_entry(ptep, tmp);
- invalidate_all();
-}
-
-static char *srmmu_lockarea(char *vaddr, unsigned long len)
-{
- return vaddr;
-}
-
-static void srmmu_unlockarea(char *vaddr, unsigned long len)
-{
-}
-
/* IOMMU things go here. */
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
static unsigned long first_dvma_page, last_dvma_page;
+#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
+#define MKIOPTE(phys) (((((phys)>>4) & IOPTE_PAGE) | IOPERM) & ~IOPTE_WAZ)
+
static inline void srmmu_map_dvma_pages_for_iommu(struct iommu_struct *iommu)
{
unsigned long first = first_dvma_page;
iopte = iommu->page_table;
iopte += ((DVMA_VADDR - iommu->start) >> PAGE_SHIFT);
while(first <= last) {
- iopte_val(*iopte++) = ((((first - PAGE_OFFSET) >> 4) & IOPTE_PAGE) |
- (IOPTE_WRITE | IOPTE_VALID)) & ~(IOPTE_WAZ);
+ iopte_val(*iopte++) = MKIOPTE(srmmu_v2p(first));
first += PAGE_SIZE;
}
}
tmp |= (IOMMU_RNGE_64MB | IOMMU_CTRL_ENAB);
iommu->regs->control = tmp;
iommu_invalidate(iommu->regs);
- iommu->start = 0xfc000000;
+ iommu->plow = iommu->start = 0xfc000000;
iommu->end = 0xffffffff;
/* Allocate IOMMU page table */
/* Stupid alignment constraints give me a headache. */
memory_start = PAGE_ALIGN(memory_start);
memory_start = (((memory_start) + (ptsize - 1)) & ~(ptsize - 1));
- iommu->page_table = (iopte_t *) memory_start;
+ iommu->lowest = iommu->page_table = (iopte_t *) memory_start;
memory_start += ptsize;
/* Initialize new table. */
+ flush_cache_all();
+ srmmu_uncache_iommu_page_table((unsigned long) iommu->page_table, ptsize);
+ flush_tlb_all();
memset(iommu->page_table, 0, ptsize);
srmmu_map_dvma_pages_for_iommu(iommu);
- iommu->regs->base = (((unsigned long) iommu->page_table) - PAGE_OFFSET) >> 4;
- srmmu_uncache_iommu_page_table((unsigned long) iommu->page_table, ptsize);
+ iommu->regs->base = srmmu_v2p((unsigned long) iommu->page_table) >> 4;
iommu_invalidate(iommu->regs);
- invalidate_all();
sbus->iommu = iommu;
printk("IOMMU: impl %d vers %d page table at %p of size %d bytes\n",
return memory_start;
}
-
-static char *srmmu_get_scsi_buffer(char *vaddr, unsigned long len, struct linux_sbus *sbus)
+static char *srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
{
struct iommu_struct *iommu = sbus->iommu;
unsigned long page = (unsigned long) vaddr;
unsigned long start, end, offset;
iopte_t *iopte;
- if(len > PAGE_SIZE)
- panic("Can only handle page sized iommu mappings.");
offset = page & ~PAGE_MASK;
page &= PAGE_MASK;
- start = iommu->start;
+ start = iommu->plow;
end = KADB_DEBUGGER_BEGVM; /* Don't step on kadb/prom. */
- iopte = iommu->page_table;
+ iopte = iommu->lowest;
while(start < end) {
if(!(iopte_val(*iopte) & IOPTE_VALID))
break;
iopte++;
start += PAGE_SIZE;
}
- if(start == KADB_DEBUGGER_BEGVM)
- panic("Could not find free iommu entry in get_scsi_buffer.");
-
- vaddr = (char *) (start | offset);
- iopte_val(*iopte) = ((((page - PAGE_OFFSET) >> 4) & IOPTE_PAGE) |
- (IOPTE_WRITE | IOPTE_VALID)) & ~(IOPTE_WAZ);
- iommu_invalidate(iommu->regs);
- invalidate_all();
-
- return vaddr;
+
+ flush_page_for_dma(page);
+ vaddr = (char *) (start | offset);
+ iopte_val(*iopte) = MKIOPTE(srmmu_v2p(page));
+ iommu_invalidate_page(iommu->regs, start);
+ iommu->lowest = iopte + 1;
+ iommu->plow = start + PAGE_SIZE;
+
+ return vaddr;
+}
+
+static void srmmu_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+{
+ struct iommu_struct *iommu = sbus->iommu;
+ unsigned long page, start, end, offset;
+ iopte_t *iopte = iommu->lowest;
+
+ start = iommu->plow;
+ end = KADB_DEBUGGER_BEGVM;
+ while(sz >= 0) {
+ page = ((unsigned long) sg[sz].addr) & PAGE_MASK;
+ offset = ((unsigned long) sg[sz].addr) & ~PAGE_MASK;
+ while(start < end) {
+ if(!(iopte_val(*iopte) & IOPTE_VALID))
+ break;
+ iopte++;
+ start += PAGE_SIZE;
+ }
+ if(start == KADB_DEBUGGER_BEGVM)
+ panic("Wheee, iomapping overflow.");
+ flush_page_for_dma(page);
+ sg[sz].alt_addr = (char *) (start | offset);
+ iopte_val(*iopte) = MKIOPTE(srmmu_v2p(page));
+ iommu_invalidate_page(iommu->regs, start);
+ iopte++;
+ start += PAGE_SIZE;
+ sz--;
+ }
+ iommu->lowest = iopte;
+ iommu->plow = start;
}
-static void srmmu_release_scsi_buffer(char *vaddr, unsigned long len, struct linux_sbus *sbus)
+static void srmmu_release_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
{
struct iommu_struct *iommu = sbus->iommu;
unsigned long page = (unsigned long) vaddr;
page &= PAGE_MASK;
iopte = iommu->page_table + ((page - iommu->start) >> PAGE_SHIFT);
iopte_val(*iopte) = 0;
- iommu_invalidate(iommu->regs);
- invalidate_all();
+ iommu_invalidate_page(iommu->regs, page);
+ if(iopte < iommu->lowest) {
+ iommu->lowest = iopte;
+ iommu->plow = page;
+ }
}
-/* On the SRMMU we do not have the problems with limited tlb entries
- * for mapping kernel pages, so we just take things from the free page
- * pool. As a side effect we are putting a little too much pressure
- * on the gfp() subsystem and we don't catch stack overflow like we
- * did on the sun4c with virtual kstack mappings. This setup also
- * makes the logic of the iommu mapping code a lot easier as we can
- * transparently handle mappings on the kernel stack without any
- * special code as we did need on the sun4c.
- */
-struct task_struct *srmmu_alloc_task_struct(void)
+static void srmmu_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
{
+ struct iommu_struct *iommu = sbus->iommu;
unsigned long page;
+ iopte_t *iopte;
- page = get_free_page(GFP_KERNEL);
- if(!page)
- return (struct task_struct *) 0;
- return (struct task_struct *) page;
+ while(sz >= 0) {
+ page = ((unsigned long)sg[sz].alt_addr) & PAGE_MASK;
+ iopte = iommu->page_table + ((page - iommu->start) >> PAGE_SHIFT);
+ iopte_val(*iopte) = 0;
+ iommu_invalidate_page(iommu->regs, page);
+ if(iopte < iommu->lowest) {
+ iommu->lowest = iopte;
+ iommu->plow = page;
+ }
+ sg[sz].alt_addr = 0;
+ sz--;
+ }
}
-unsigned long srmmu_alloc_kernel_stack(struct task_struct *tsk)
+static unsigned long mempool;
+
+/* NOTE: All of this startup code assumes the low 16mb (approx.) of
+ * kernel mappings are done with one single contiguous chunk of
+ * ram. On small ram machines (classics mainly) we only get
+ * around 8mb mapped for us.
+ */
+
+static unsigned long kbpage;
+
+/* Some dirty hacks to abstract away the painful boot up init. */
+static inline unsigned long srmmu_early_paddr(unsigned long vaddr)
{
- unsigned long pages;
+ return ((vaddr - PAGE_OFFSET) + kbpage);
+}
- pages = __get_free_pages(GFP_KERNEL, 1, 0);
- if(!pages)
- return 0;
- memset((void *) pages, 0, (PAGE_SIZE << 1));
- return pages;
+static inline void srmmu_early_pgd_set(pgd_t *pgdp, pmd_t *pmdp)
+{
+ srmmu_set_entry(pgdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) pmdp) >> 4)));
}
-static void srmmu_free_task_struct(struct task_struct *tsk)
+static inline void srmmu_early_pmd_set(pmd_t *pmdp, pte_t *ptep)
{
- free_page((unsigned long) tsk);
+ srmmu_set_entry(pmdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) ptep) >> 4)));
}
-static void srmmu_free_kernel_stack(unsigned long stack)
+static inline unsigned long srmmu_early_pgd_page(pgd_t pgd)
{
- free_pages(stack, 1);
+ return (((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4) - kbpage) + PAGE_OFFSET;
}
-static unsigned long mempool;
+static inline unsigned long srmmu_early_pmd_page(pmd_t pmd)
+{
+ return (((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4) - kbpage) + PAGE_OFFSET;
+}
+
+static inline pmd_t *srmmu_early_pmd_offset(pgd_t *dir, unsigned long address)
+{
+ return (pmd_t *) srmmu_early_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
+}
+
+static inline pte_t *srmmu_early_pte_offset(pmd_t *dir, unsigned long address)
+{
+ return (pte_t *) srmmu_early_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
+}
/* Allocate a block of RAM which is aligned to its size.
* This procedure can be used until the call to mem_init().
*/
-static void *srmmu_init_alloc(unsigned long *kbrk, unsigned size)
+static void *srmmu_init_alloc(unsigned long *kbrk, unsigned long size)
{
- register unsigned mask = size - 1;
- register unsigned long ret;
+ unsigned long mask = size - 1;
+ unsigned long ret;
- if(size==0) return 0x0;
+ if(!size)
+ return 0x0;
if(size & mask) {
prom_printf("panic: srmmu_init_alloc botch\n");
prom_halt();
pgdp = srmmu_pgd_offset(init_task.mm, start);
if(srmmu_pgd_none(*pgdp)) {
pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
- srmmu_pgd_set(pgdp, pmdp);
+ srmmu_early_pgd_set(pgdp, pmdp);
}
- pmdp = srmmu_pmd_offset(pgdp, start);
+ pmdp = srmmu_early_pmd_offset(pgdp, start);
if(srmmu_pmd_none(*pmdp)) {
ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
- srmmu_pmd_set(pmdp, ptep);
+ srmmu_early_pmd_set(pmdp, ptep);
}
start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
}
* looking at the prom's page table directly which is what most
* other OS's do. Yuck... this is much better.
*/
-static inline void srmmu_inherit_prom_mappings(void)
+void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
{
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- unsigned long start, end;
+ int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
unsigned long prompte;
- start = KADB_DEBUGGER_BEGVM;
- end = LINUX_OPPROM_ENDVM;
- while(start < end) {
- /* Something going wrong here on some ss5's... */
- prompte = srmmu_hwprobe(start);
-
- if((prompte & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
- pgdp = srmmu_pgd_offset(init_task.mm, start);
- if(srmmu_pgd_none(*pgdp)) {
- pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
- srmmu_pgd_set(pgdp, pmdp);
- }
- pmdp = srmmu_pmd_offset(pgdp, start);
- if(srmmu_pmd_none(*pmdp)) {
- ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
- srmmu_pmd_set(pmdp, ptep);
- }
- ptep = srmmu_pte_offset(pmdp, start);
- pte_val(*ptep) = prompte;
+ while(start <= end) {
+ if (start == 0)
+ break; /* probably wrap around */
+ if(start == 0xfef00000)
+ start = KADB_DEBUGGER_BEGVM;
+ if(!(prompte = srmmu_hwprobe(start))) {
+ start += PAGE_SIZE;
+ continue;
+ }
+
+ /* A red snapper, see what it really is. */
+ what = 0;
+
+ if(!(start & ~(SRMMU_PMD_MASK))) {
+ if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PMD_SIZE) == prompte)
+ what = 1;
+ }
+
+ if(!(start & ~(SRMMU_PGDIR_MASK))) {
+ if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
+ prompte)
+ what = 2;
+ }
+
+ pgdp = srmmu_pgd_offset(init_task.mm, start);
+ if(what == 2) {
+ pgd_val(*pgdp) = prompte;
+ start += SRMMU_PGDIR_SIZE;
+ continue;
+ }
+ if(srmmu_pgd_none(*pgdp)) {
+ pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
+ srmmu_early_pgd_set(pgdp, pmdp);
+ }
+ pmdp = srmmu_early_pmd_offset(pgdp, start);
+ if(what == 1) {
+ pmd_val(*pmdp) = prompte;
+ start += SRMMU_PMD_SIZE;
+ continue;
+ }
+ if(srmmu_pmd_none(*pmdp)) {
+ ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
+ srmmu_early_pmd_set(pmdp, ptep);
}
+ ptep = srmmu_early_pte_offset(pmdp, start);
+ pte_val(*ptep) = prompte;
start += PAGE_SIZE;
}
}
pte_t *ptep;
start = DVMA_VADDR;
- dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
+ if (viking_mxcc_present)
+ dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
+ else
+ dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
while(first <= last) {
pgdp = srmmu_pgd_offset(init_task.mm, start);
pmdp = srmmu_pmd_offset(pgdp, start);
ptep = srmmu_pte_offset(pmdp, start);
- /* Map with cacheable bit clear. */
srmmu_set_entry(ptep, pte_val(srmmu_mk_pte(first, dvma_prot)));
first += PAGE_SIZE;
start += PAGE_SIZE;
}
+
+ /* Uncache DVMA pages. */
+ if (!viking_mxcc_present) {
+ first = first_dvma_page;
+ last = last_dvma_page;
+ while(first <= last) {
+ pgdp = srmmu_pgd_offset(init_task.mm, first);
+ pmdp = srmmu_pmd_offset(pgdp, first);
+ ptep = srmmu_pte_offset(pmdp, first);
+ pte_val(*ptep) &= ~SRMMU_CACHE;
+ first += PAGE_SIZE;
+ }
+ }
}
static void srmmu_map_kernel(unsigned long start, unsigned long end)
{
+ unsigned long last_page;
+ int srmmu_bank, phys_bank, i;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- end = (PAGE_ALIGN(end) + PAGE_SIZE);
- while(start < end) {
+ end = PAGE_ALIGN(end);
+
+ if(start == (KERNBASE + PAGE_SIZE)) {
+ unsigned long pte;
+ unsigned long tmp;
+
+ pgdp = srmmu_pgd_offset(init_task.mm, KERNBASE);
+ pmdp = srmmu_early_pmd_offset(pgdp, KERNBASE);
+ ptep = srmmu_early_pte_offset(pmdp, KERNBASE);
+
+ /* Put a real mapping in for the KERNBASE page. */
+ tmp = kbpage;
+ pte = (tmp) >> 4;
+ pte |= (SRMMU_CACHE | SRMMU_PRIV | SRMMU_VALID);
+ pte_val(*ptep) = pte;
+ }
+
+ /* Copy over mappings prom already gave us. */
+ last_page = (srmmu_hwprobe(start) & SRMMU_PTE_PMASK) << 4;
+ while((srmmu_hwprobe(start) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
+ unsigned long tmp;
+
pgdp = srmmu_pgd_offset(init_task.mm, start);
- if(srmmu_pgd_none(*pgdp)) {
- pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
- srmmu_pgd_set(pgdp, pmdp);
- }
- pmdp = srmmu_pmd_offset(pgdp, start);
- if(srmmu_pmd_none(*pmdp)) {
- ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
- srmmu_pmd_set(pmdp, ptep);
- }
- ptep = srmmu_pte_offset(pmdp, start);
- *ptep = srmmu_mk_pte(start, SRMMU_PAGE_KERNEL);
+ pmdp = srmmu_early_pmd_offset(pgdp, start);
+ ptep = srmmu_early_pte_offset(pmdp, start);
+ tmp = srmmu_hwprobe(start);
+ tmp &= ~(0xff);
+ tmp |= (SRMMU_CACHE | SRMMU_PRIV | SRMMU_VALID);
+ pte_val(*ptep) = tmp;
start += PAGE_SIZE;
+ tmp = (srmmu_hwprobe(start) & SRMMU_PTE_PMASK) << 4;
+
+ /* Never a cross bank boundry, thank you. */
+ if(tmp != last_page + PAGE_SIZE)
+ break;
+ last_page = tmp;
+ }
+
+ /* Ok, that was assumed to be one full bank, begin
+ * construction of srmmu_map[].
+ */
+ for(phys_bank = 0; sp_banks[phys_bank].num_bytes != 0; phys_bank++) {
+ if(kbpage >= sp_banks[phys_bank].base_addr &&
+ (kbpage <
+ (sp_banks[phys_bank].base_addr + sp_banks[phys_bank].num_bytes)))
+ break; /* found it */
+ }
+ srmmu_bank = 0;
+ srmmu_map[srmmu_bank].vbase = KERNBASE;
+ srmmu_map[srmmu_bank].pbase = sp_banks[phys_bank].base_addr;
+ srmmu_map[srmmu_bank].size = sp_banks[phys_bank].num_bytes;
+ if(kbpage != sp_banks[phys_bank].base_addr) {
+ prom_printf("Detected PenguinPages, getting out of here.\n");
+ prom_halt();
+#if 0
+ srmmu_map[srmmu_bank].pbase = kbpage;
+ srmmu_map[srmmu_bank].size -=
+ (kbpage - sp_banks[phys_bank].base_addr);
+#endif
+ }
+ /* Prom didn't map all of this first bank, fill
+ * in the rest by hand.
+ */
+ while(start < (srmmu_map[srmmu_bank].vbase + srmmu_map[srmmu_bank].size)) {
+ unsigned long pteval;
+
+ pgdp = srmmu_pgd_offset(init_task.mm, start);
+ pmdp = srmmu_early_pmd_offset(pgdp, start);
+ ptep = srmmu_early_pte_offset(pmdp, start);
+
+ pteval = (start - KERNBASE + srmmu_map[srmmu_bank].pbase) >> 4;
+ pteval |= (SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV);
+ pte_val(*ptep) = pteval;
+ start += PAGE_SIZE;
+ }
+
+ /* Mark this sp_bank invalid... */
+ sp_banks[phys_bank].base_addr |= 1;
+ srmmu_bank++;
+
+ /* Now, deal with what is left. */
+ while(start < end) {
+ unsigned long baddr;
+ int btg;
+
+ /* Find a usable cluster of physical ram. */
+ for(i=0; sp_banks[i].num_bytes != 0; i++)
+ if(!(sp_banks[i].base_addr & 1))
+ break;
+ if(sp_banks[i].num_bytes == 0)
+ break;
+
+ /* Add it to srmmu_map */
+ srmmu_map[srmmu_bank].vbase = start;
+ srmmu_map[srmmu_bank].pbase = sp_banks[i].base_addr;
+ srmmu_map[srmmu_bank].size = sp_banks[i].num_bytes;
+ srmmu_bank++;
+
+ btg = sp_banks[i].num_bytes;
+ baddr = sp_banks[i].base_addr;
+ while(btg) {
+ pgdp = srmmu_pgd_offset(init_task.mm, start);
+ pmdp = srmmu_early_pmd_offset(pgdp, start);
+ ptep = srmmu_early_pte_offset(pmdp, start);
+ pte_val(*ptep) = (SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV);
+ pte_val(*ptep) |= (baddr >> 4);
+
+ baddr += PAGE_SIZE;
+ start += PAGE_SIZE;
+ btg -= PAGE_SIZE;
+ }
+ sp_banks[i].base_addr |= 1;
+ }
+ if(start < end) {
+ prom_printf("weird, didn't use all of physical memory... ");
+ prom_halt();
}
+ for(phys_bank = 0; sp_banks[phys_bank].num_bytes != 0; phys_bank++)
+ sp_banks[phys_bank].base_addr &= ~1;
+#if 0
+ for(i = 0; srmmu_map[i].size != 0; i++) {
+ prom_printf("srmmu_map[%d]: vbase=%08lx pbase=%08lx size=%d\n",
+ i, srmmu_map[i].vbase,
+ srmmu_map[i].pbase, srmmu_map[i].size);
+ }
+ prom_getchar();
+ for(i = 0; sp_banks[i].num_bytes != 0; i++) {
+ prom_printf("sp_banks[%d]: base_addr=%08lx num_bytes=%d\n",
+ i,
+ sp_banks[i].base_addr,
+ sp_banks[i].num_bytes);
+ }
+ prom_getchar();
+ prom_halt();
+#endif
}
/* Paging initialization on the Sparc Reference MMU. */
extern unsigned long free_area_init(unsigned long, unsigned long);
extern unsigned long sparc_context_init(unsigned long, int);
+extern int physmem_mapped_contig;
+extern int linux_num_cpus;
+
+void (*poke_srmmu)(void);
+
unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
{
+ unsigned long ptables_start, first_mapped_page;
int i, cpunode;
char node_str[128];
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ physmem_mapped_contig = 0; /* for init.c:taint_real_pages() */
+
+#if CONFIG_AP1000
+ printk("Forcing num_contexts to 1024\n");
+ num_contexts = 1024;
+#else
/* Find the number of contexts on the srmmu. */
cpunode = prom_getchild(prom_root_node);
num_contexts = 0;
break;
}
}
+#endif
if(!num_contexts) {
prom_printf("Something wrong, cant find cpu node in paging_init.\n");
prom_halt();
}
- prom_printf("Number of MMU contexts %d\n", num_contexts);
- mempool = start_mem;
+ ptables_start = mempool = PAGE_ALIGN(start_mem);
memset(swapper_pg_dir, 0, PAGE_SIZE);
- srmmu_map_kernel(KERNBASE, end_mem);
+ first_mapped_page = KERNBASE;
+ kbpage = srmmu_hwprobe(KERNBASE);
+ if((kbpage & SRMMU_ET_MASK) != SRMMU_ET_PTE) {
+ kbpage = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
+ kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
+ kbpage -= PAGE_SIZE;
+ first_mapped_page += PAGE_SIZE;
+ } else
+ kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
+
+ srmmu_allocate_ptable_skeleton(KERNBASE, end_mem);
+#if CONFIG_SUN_IO
srmmu_allocate_ptable_skeleton(IOBASE_VADDR, IOBASE_END);
srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
+#endif
+
+ /* Steal DVMA pages now, I still don't like how we waste all this. */
mempool = PAGE_ALIGN(mempool);
first_dvma_page = mempool;
last_dvma_page = (mempool + (DVMA_LEN) - PAGE_SIZE);
mempool = last_dvma_page + PAGE_SIZE;
- srmmu_map_dvma_pages_for_cpu(first_dvma_page, last_dvma_page);
- srmmu_inherit_prom_mappings();
+#if CONFIG_AP1000
+ ap_inherit_mappings();
+#else
+ srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
+#endif
+ srmmu_map_kernel(first_mapped_page, end_mem);
+#if CONFIG_SUN_IO
+ srmmu_map_dvma_pages_for_cpu(first_dvma_page, last_dvma_page);
+#endif
srmmu_context_table = srmmu_init_alloc(&mempool, num_contexts*sizeof(ctxd_t));
+ srmmu_ctx_table_phys = (ctxd_t *) srmmu_v2p((unsigned long) srmmu_context_table);
for(i = 0; i < num_contexts; i++)
- srmmu_ctxd_set(&srmmu_context_table[i], swapper_pg_dir);
-
- prom_printf("Taking over MMU from PROM.\n");
- srmmu_flush_whole_tlb();
- srmmu_set_ctable_ptr(((unsigned)srmmu_context_table) - PAGE_OFFSET);
- srmmu_flush_whole_tlb();
+ ctxd_set(&srmmu_context_table[i], swapper_pg_dir);
start_mem = PAGE_ALIGN(mempool);
+
+ /* Some SRMMU's are _very_ stupid indeed. */
+ if(!can_cache_ptables) {
+ for( ; ptables_start < start_mem; ptables_start += PAGE_SIZE) {
+ pgdp = srmmu_pgd_offset(init_task.mm, ptables_start);
+ pmdp = srmmu_early_pmd_offset(pgdp, ptables_start);
+ ptep = srmmu_early_pte_offset(pmdp, ptables_start);
+ pte_val(*ptep) &= ~SRMMU_CACHE;
+ }
+
+ pgdp = srmmu_pgd_offset(init_task.mm, (unsigned long)swapper_pg_dir);
+ pmdp = srmmu_early_pmd_offset(pgdp, (unsigned long)swapper_pg_dir);
+ ptep = srmmu_early_pte_offset(pmdp, (unsigned long)swapper_pg_dir);
+ pte_val(*ptep) &= ~SRMMU_CACHE;
+ }
+
+ flush_cache_all();
+ srmmu_set_ctable_ptr((unsigned long) srmmu_ctx_table_phys);
+ flush_tlb_all();
+ poke_srmmu();
+
start_mem = sparc_context_init(start_mem, num_contexts);
start_mem = free_area_init(start_mem, end_mem);
- prom_printf("survived...\n");
return PAGE_ALIGN(start_mem);
}
-/* Test the WP bit on the Sparc Reference MMU. */
-void srmmu_test_wp(void)
-{
- pgd_t *pgdp;
-
- wp_works_ok = -1;
- /* We mapped page zero as a read-only page in paging_init()
- * So fire up the test, then invalidate the pgd for page zero.
- * It is no longer needed.
- */
-
- /* Let it rip... */
- __asm__ __volatile__("st %%g0, [0x0]\n\t": : :"memory");
- if (wp_works_ok < 0)
- wp_works_ok = 0;
-
- pgdp = srmmu_pgd_offset(init_task.mm, 0x0);
- pgd_val(*pgdp) = 0x0;
-}
+static char srmmuinfo[512];
static char *srmmu_mmu_info(void)
{
- return "";
+ sprintf(srmmuinfo, "MMU type\t: %s\n"
+ "invall\t\t: %d\n"
+ "invmm\t\t: %d\n"
+ "invrnge\t\t: %d\n"
+ "invpg\t\t: %d\n"
+ "contexts\t: %d\n"
+ "big_chunks\t: %d\n"
+ "little_chunks\t: %d\n",
+ srmmu_name,
+ module_stats.invall,
+ module_stats.invmm,
+ module_stats.invrnge,
+ module_stats.invpg,
+ num_contexts,
+#if 0
+ num_big_chunks,
+ num_little_chunks
+#else
+ 0, 0
+#endif
+ );
+ return srmmuinfo;
}
static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
struct mm_struct *mm = current->mm;
if(mm->context != NO_CONTEXT) {
- srmmu_ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
+ flush_cache_mm(mm);
+ ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
+ flush_tlb_mm(mm);
+ ctx_old = ctx_list_pool + mm->context;
+ remove_from_ctx_list(ctx_old);
+ add_to_free_ctxlist(ctx_old);
+ mm->context = NO_CONTEXT;
+ }
+}
+
+static void srmmu_flush_hook(void)
+{
+ if(current->tss.flags & SPARC_FLAG_KTHREAD) {
+ alloc_context(current->mm);
+ ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
+ srmmu_set_context(current->mm->context);
+ }
+}
+
+static void hypersparc_exit_hook(void)
+{
+ struct ctx_list *ctx_old;
+ struct mm_struct *mm = current->mm;
+
+ if(mm->context != NO_CONTEXT) {
+ /* HyperSparc is copy-back, any data for this
+ * process in a modified cache line is stale
+ * and must be written back to main memory now
+ * else we eat shit later big time.
+ */
+ flush_cache_mm(mm);
+ ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
+ flush_tlb_mm(mm);
ctx_old = ctx_list_pool + mm->context;
remove_from_ctx_list(ctx_old);
add_to_free_ctxlist(ctx_old);
}
}
-static void
-srmmu_flush_hook(void)
+static void hypersparc_flush_hook(void)
{
if(current->tss.flags & SPARC_FLAG_KTHREAD) {
alloc_context(current->mm);
- srmmu_ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
+ flush_cache_mm(current->mm);
+ ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
srmmu_set_context(current->mm->context);
}
}
prom_halt();
}
+void poke_hypersparc(void)
+{
+ volatile unsigned long clear;
+ unsigned long mreg = srmmu_get_mmureg();
+
+ hyper_flush_unconditional_combined();
+
+ mreg &= ~(HYPERSPARC_CWENABLE);
+ mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
+ mreg |= (HYPERSPARC_CMODE);
+
+ srmmu_set_mmureg(mreg);
+ hyper_clear_all_tags();
+
+ put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
+ hyper_flush_whole_icache();
+ clear = srmmu_get_faddr();
+ clear = srmmu_get_fstatus();
+}
+
void init_hypersparc(void)
{
unsigned long mreg = srmmu_get_mmureg();
- prom_printf("HyperSparc MMU detected.\n");
- if(mreg & HYPERSPARC_CSIZE)
+ srmmu_name = "ROSS HyperSparc";
+ can_cache_ptables = 0;
+ if(mreg & HYPERSPARC_CSIZE) {
hyper_cache_size = (256 * 1024);
- else
+ hyper_line_size = 64;
+ } else {
hyper_cache_size = (128 * 1024);
+ hyper_line_size = 32;
+ }
- srmmu_modtype = HyperSparc;
- hwbug_bitmask |= HWBUG_VACFLUSH_BITROT;
+ flush_cache_all = hypersparc_flush_cache_all;
+ flush_cache_mm = hypersparc_flush_cache_mm;
+ flush_cache_range = hypersparc_flush_cache_range;
+ flush_cache_page = hypersparc_flush_cache_page;
- hyper_flush_whole_icache();
- hyper_flush_all_combined();
+ flush_tlb_all = hypersparc_flush_tlb_all;
+ flush_tlb_mm = hypersparc_flush_tlb_mm;
+ flush_tlb_range = hypersparc_flush_tlb_range;
+ flush_tlb_page = hypersparc_flush_tlb_page;
- /* Keep things sane for now, cache in write-through mode. */
- mreg &= ~(HYPERSPARC_CWENABLE | HYPERSPARC_CMODE | HYPERSPARC_WBENABLE);
- mreg |= HYPERSPARC_CENABLE;
- srmmu_set_mmureg(mreg);
- put_ross_icr(get_ross_icr() | 0x3);
- invalidate_all = hypersparc_invalidate_all;
- invalidate_mm = hypersparc_invalidate_mm;
- invalidate_page = hypersparc_invalidate_page;
- invalidate_range = hypersparc_invalidate_range;
+ flush_page_to_ram = hypersparc_flush_page_to_ram;
+ flush_page_for_dma = hypersparc_flush_page_for_dma;
+ flush_cache_page_to_uncache = hypersparc_flush_cache_page_to_uncache;
+ flush_tlb_page_for_cbit = hypersparc_flush_tlb_page_for_cbit;
+
+ ctxd_set = hypersparc_ctxd_set;
+ switch_to_context = hypersparc_switch_to_context;
+ mmu_exit_hook = hypersparc_exit_hook;
+ mmu_flush_hook = hypersparc_flush_hook;
+ sparc_update_rootmmu_dir = hypersparc_update_rootmmu_dir;
+ set_pte = hypersparc_set_pte;
+ poke_srmmu = poke_hypersparc;
}
-void init_cypress_common(void)
+void poke_cypress(void)
{
unsigned long mreg = srmmu_get_mmureg();
mreg &= ~CYPRESS_CMODE;
mreg |= CYPRESS_CENABLE;
srmmu_set_mmureg(mreg);
- invalidate_all = cypress_invalidate_all;
- invalidate_mm = cypress_invalidate_mm;
- invalidate_page = cypress_invalidate_page;
- invalidate_range = cypress_invalidate_range;
+}
+
+void init_cypress_common(void)
+{
+ can_cache_ptables = 0;
+ flush_tlb_all = cypress_flush_tlb_all;
+ flush_tlb_mm = cypress_flush_tlb_mm;
+ flush_tlb_page = cypress_flush_tlb_page;
+ flush_tlb_range = cypress_flush_tlb_range;
+ poke_srmmu = poke_cypress;
+
+ /* XXX Need to write cache flushes for this one... XXX */
+
}
void init_cypress_604(void)
{
- prom_printf("Cypress 604(UP) MMU detected.\n");
+ srmmu_name = "ROSS Cypress-604(UP)";
srmmu_modtype = Cypress;
init_cypress_common();
}
void init_cypress_605(unsigned long mrev)
{
- prom_printf("Cypress 605(MP) MMU detected.\n");
+ srmmu_name = "ROSS Cypress-605(MP)";
if(mrev == 0xe) {
srmmu_modtype = Cypress_vE;
hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
init_cypress_common();
}
-#define SWIFT_REVISION_ADDR 0x10003000
-void init_swift(void)
+void poke_swift(void)
{
- unsigned long swift_rev, addr;
unsigned long mreg = srmmu_get_mmureg();
- prom_printf("Swift MMU detected.\n");
+ /* Clear any crap from the cache or else... */
+ swift_idflash_clear();
+ mreg |= (SWIFT_IE | SWIFT_DE); /* I & D caches on */
+
+ /* The Swift branch folding logic is completely broken. At
+ * trap time, if things are just right, if can mistakedly
+ * think that a trap is coming from kernel mode when in fact
+ * it is coming from user mode (it mis-executes the branch in
+ * the trap code). So you see things like crashme completely
+ * hosing your machine which is completely unacceptable. Turn
+ * this shit off... nice job Fujitsu.
+ */
+ mreg &= ~(SWIFT_BF);
+ srmmu_set_mmureg(mreg);
+}
+
+#define SWIFT_MASKID_ADDR 0x10003018
+void init_swift(void)
+{
+ unsigned long swift_rev;
+
__asm__ __volatile__("lda [%1] %2, %0\n\t"
"srl %0, 0x18, %0\n\t" :
"=r" (swift_rev) :
- "r" (SWIFT_REVISION_ADDR), "i" (0x20));
+ "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
+ srmmu_name = "Fujitsu Swift";
switch(swift_rev) {
case 0x11:
case 0x20:
srmmu_modtype = Swift_ok;
break;
};
- /* Clear any crap from the cache or else... */
- for(addr = 0; addr < (PAGE_SIZE * 4); addr += 16) {
- swift_inv_insn_tag(addr); /* whiz- */
- swift_inv_data_tag(addr); /* bang */
- }
- mreg |= (SWIFT_IE | SWIFT_DE); /* I & D caches on */
- /* The Swift branch folding logic is completely broken. At
- * trap time, if things are just right, if can mistakenly
- * thing that a trap is coming from kernel mode when in fact
- * it is coming from user mode (it misexecutes the branch in
- * the trap code). So you see things like crashme completely
- * hosing your machine which is completely unacceptable. Turn
- * this crap off... nice job Fujitsu.
- */
- mreg &= ~(SWIFT_BF);
- srmmu_set_mmureg(mreg);
+ flush_cache_all = swift_flush_cache_all;
+ flush_cache_mm = swift_flush_cache_mm;
+ flush_cache_page = swift_flush_cache_page;
+ flush_cache_range = swift_flush_cache_range;
- invalidate_all = swift_invalidate_all;
- invalidate_mm = swift_invalidate_mm;
- invalidate_page = swift_invalidate_page;
- invalidate_range = swift_invalidate_range;
+ flush_tlb_all = swift_flush_tlb_all;
+ flush_tlb_mm = swift_flush_tlb_mm;
+ flush_tlb_page = swift_flush_tlb_page;
+ flush_tlb_range = swift_flush_tlb_range;
+
+ flush_page_to_ram = swift_flush_page_to_ram;
+ flush_page_for_dma = swift_flush_page_for_dma;
+ flush_cache_page_to_uncache = swift_flush_cache_page_to_uncache;
+ flush_tlb_page_for_cbit = swift_flush_tlb_page_for_cbit;
/* Are you now convinced that the Swift is one of the
* biggest VLSI abortions of all time? Bravo Fujitsu!
+ * Fujitsu, the !#?!%$'d up processor people. I bet if
+ * you examined the microcode of the Swift you'd find
+ * XXX's all over the place.
*/
+ poke_srmmu = poke_swift;
+}
+
+void poke_tsunami(void)
+{
+ unsigned long mreg = srmmu_get_mmureg();
+
+ tsunami_flush_icache();
+ tsunami_flush_dcache();
+ mreg &= ~TSUNAMI_ITD;
+ mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
+ srmmu_set_mmureg(mreg);
}
-void init_tsunami(unsigned long mreg)
+void init_tsunami(void)
{
/* Tsunami's pretty sane, Sun and TI actually got it
* somewhat right this time. Fujitsu should have
* taken some lessons from them.
*/
- prom_printf("Tsunami MMU detected.\n");
+ srmmu_name = "TI Tsunami";
srmmu_modtype = Tsunami;
- tsunami_invalidate_icache();
- tsunami_invalidate_dcache();
- mreg &= ~TSUNAMI_ITD;
- mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
+ can_cache_ptables = 1;
+
+ flush_cache_all = tsunami_flush_cache_all;
+ flush_cache_mm = tsunami_flush_cache_mm;
+ flush_cache_page = tsunami_flush_cache_page;
+ flush_cache_range = tsunami_flush_cache_range;
+
+ flush_tlb_all = tsunami_flush_tlb_all;
+ flush_tlb_mm = tsunami_flush_tlb_mm;
+ flush_tlb_page = tsunami_flush_tlb_page;
+ flush_tlb_range = tsunami_flush_tlb_range;
+
+ flush_page_to_ram = tsunami_flush_page_to_ram;
+ flush_page_for_dma = tsunami_flush_page_for_dma;
+ flush_cache_page_to_uncache = tsunami_flush_cache_page_to_uncache;
+ flush_tlb_page_for_cbit = tsunami_flush_tlb_page_for_cbit;
+
+ poke_srmmu = poke_tsunami;
+}
+
+void poke_viking(void)
+{
+ unsigned long mreg = srmmu_get_mmureg();
+ static int smp_catch = 0;
+
+ if(viking_mxcc_present) {
+ unsigned long mxcc_control;
+
+ __asm__ __volatile__("set -1, %%g2\n\t"
+ "set -1, %%g3\n\t"
+ "stda %%g2, [%1] %2\n\t"
+ "lda [%3] %2, %0\n\t" :
+ "=r" (mxcc_control) :
+ "r" (MXCC_EREG), "i" (ASI_M_MXCC),
+ "r" (MXCC_CREG) : "g2", "g3");
+ mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
+ mxcc_control &= ~(MXCC_CTL_PARE | MXCC_CTL_RRC);
+ mreg &= ~(VIKING_PCENABLE);
+ __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
+ "r" (mxcc_control), "r" (MXCC_CREG),
+ "i" (ASI_M_MXCC));
+ srmmu_set_mmureg(mreg);
+ mreg |= VIKING_TCENABLE;
+ } else {
+ unsigned long bpreg;
+
+ mreg &= ~(VIKING_TCENABLE);
+ if(smp_catch++) {
+ /* Must disable mixed-cmd mode here for
+ * other cpu's.
+ */
+ bpreg = viking_get_bpreg();
+ bpreg &= ~(VIKING_ACTION_MIX);
+ viking_set_bpreg(bpreg);
+
+ /* Just in case PROM does something funny. */
+ msi_set_sync();
+ }
+ }
+
+ viking_unlock_icache();
+ viking_flush_icache();
+#if 0
+ viking_unlock_dcache();
+ viking_flush_dcache();
+#endif
+ mreg |= VIKING_SPENABLE;
+ mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
+ mreg |= VIKING_SBENABLE;
+ mreg &= ~(VIKING_ACENABLE);
+#if CONFIG_AP1000
+ mreg &= ~(VIKING_SBENABLE);
+#endif
+#ifdef __SMP__
+ mreg &= ~(VIKING_SBENABLE);
+#endif
srmmu_set_mmureg(mreg);
- invalidate_all = tsunami_invalidate_all;
- invalidate_mm = tsunami_invalidate_mm;
- invalidate_page = tsunami_invalidate_page;
- invalidate_range = tsunami_invalidate_range;
}
-void init_viking(unsigned long psr_vers, unsigned long mod_rev)
+void init_viking(void)
{
unsigned long mreg = srmmu_get_mmureg();
/* Ahhh, the viking. SRMMU VLSI abortion number two... */
- prom_printf("Viking MMU detected.\n");
- if(!psr_vers && ! mod_rev) {
- srmmu_modtype = Viking_12;
- hwbug_bitmask |= (HWBUG_MODIFIED_BITROT | HWBUG_PC_BADFAULT_ADDR);
+ if(mreg & VIKING_MMODE) {
+ unsigned long bpreg;
- /* On a fault, the chip gets entirely confused. It will
- * do one of two things. Either it will set the modified
- * bit for a read-only page (!!!) or it will improperly
- * report a fault when a dcti/loadstore sequence is the
- * last two instructions on a page. Oh baby...
- */
+ srmmu_name = "TI Viking";
+ viking_mxcc_present = 0;
+ can_cache_ptables = 0;
+
+ bpreg = viking_get_bpreg();
+ bpreg &= ~(VIKING_ACTION_MIX);
+ viking_set_bpreg(bpreg);
+
+ msi_set_sync();
+
+ flush_cache_page_to_uncache = viking_no_mxcc_flush_page;
} else {
- if(psr_vers) {
- srmmu_modtype = Viking_2x;
- hwbug_bitmask |= HWBUG_PC_BADFAULT_ADDR; /* see above */
- } else {
- if(mod_rev == 1) {
- srmmu_modtype = Viking_30;
- hwbug_bitmask |= HWBUG_PACINIT_BITROT;
-
- /* At boot time the physical cache
- * has cherry bombs in it, so you
- * have to scrape it by hand before
- * enabling it. Nice CAD tools guys.
- */
- } else {
- if(mod_rev < 8)
- srmmu_modtype = Viking_35;
- else
- srmmu_modtype = Viking_new;
- }
- }
+ srmmu_name = "TI Viking/MXCC";
+ viking_mxcc_present = 1;
+ can_cache_ptables = 1;
+ flush_cache_page_to_uncache = viking_mxcc_flush_page;
}
- /* XXX Dave, play with the MXCC you pinhead XXX */
- viking_flush_icache();
- viking_flush_dcache();
- mreg |= (VIKING_DCENABLE | VIKING_ICENABLE | VIKING_SBENABLE |
- VIKING_TCENABLE | VIKING_DPENABLE);
- srmmu_set_mmureg(mreg);
- invalidate_all = viking_invalidate_all;
- invalidate_mm = viking_invalidate_mm;
- invalidate_page = viking_invalidate_page;
- invalidate_range = viking_invalidate_range;
+
+ flush_cache_all = viking_flush_cache_all;
+ flush_cache_mm = viking_flush_cache_mm;
+ flush_cache_page = viking_flush_cache_page;
+ flush_cache_range = viking_flush_cache_range;
+
+ flush_tlb_all = viking_flush_tlb_all;
+ flush_tlb_mm = viking_flush_tlb_mm;
+ flush_tlb_page = viking_flush_tlb_page;
+ flush_tlb_range = viking_flush_tlb_range;
+
+ flush_page_to_ram = viking_flush_page_to_ram;
+ flush_page_for_dma = viking_flush_page_for_dma;
+ flush_tlb_page_for_cbit = viking_flush_tlb_page_for_cbit;
+
+ poke_srmmu = poke_viking;
}
/* Probe for the srmmu chip version. */
if(psr_typ == 4 &&
((psr_vers == 0) ||
((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
- init_viking(psr_vers, mod_rev);
+ init_viking();
return;
}
/* Finally the Tsunami. */
if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
- init_tsunami(mreg);
+ init_tsunami();
return;
}
extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
tsetup_srmmu_stackchk, srmmu_rett_stackchk;
+#ifdef __SMP__
+extern unsigned long rirq_mmu_patchme, srmmu_reti_stackchk;
+#endif
+
extern unsigned long srmmu_fault;
#define PATCH_BRANCH(insn, dest) do { \
PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
+#ifdef __SMP__
+ PATCH_BRANCH(rirq_mmu_patchme, srmmu_reti_stackchk);
+#endif
PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
+ PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
}
+#ifdef __SMP__
+/* Local cross-calls. */
+static void smp_flush_page_for_dma(unsigned long page)
+{
+ xc1((smpfunc_t) local_flush_page_for_dma, page);
+}
+
+static void smp_flush_cache_page_to_uncache(unsigned long page)
+{
+ xc1((smpfunc_t) local_flush_cache_page_to_uncache, page);
+}
+
+static void smp_flush_tlb_page_for_cbit(unsigned long page)
+{
+ xc1((smpfunc_t) local_flush_tlb_page_for_cbit, page);
+}
+#endif
+
/* Load up routines and constants for sun4m mmu */
void ld_mmu_srmmu(void)
{
- prom_printf("Loading srmmu MMU routines\n");
-
/* First the constants */
pmd_shift = SRMMU_PMD_SHIFT;
pmd_size = SRMMU_PMD_SIZE;
pte_none = srmmu_pte_none;
pte_present = srmmu_pte_present;
- pte_inuse = srmmu_pte_inuse;
pte_clear = srmmu_pte_clear;
- pte_reuse = srmmu_pte_reuse;
pmd_none = srmmu_pmd_none;
pmd_bad = srmmu_pmd_bad;
pmd_present = srmmu_pmd_present;
- pmd_inuse = srmmu_pmd_inuse;
pmd_clear = srmmu_pmd_clear;
- pmd_reuse = srmmu_pmd_reuse;
pgd_none = srmmu_pgd_none;
pgd_bad = srmmu_pgd_bad;
pgd_present = srmmu_pgd_present;
- pgd_inuse = srmmu_pgd_inuse;
pgd_clear = srmmu_pgd_clear;
- pgd_reuse = srmmu_pgd_reuse;
mk_pte = srmmu_mk_pte;
pgd_set = srmmu_pgd_set;
mmu_flush_hook = srmmu_flush_hook;
mmu_lockarea = srmmu_lockarea;
mmu_unlockarea = srmmu_unlockarea;
- mmu_get_scsi_buffer = srmmu_get_scsi_buffer;
- mmu_release_scsi_buffer = srmmu_release_scsi_buffer;
+
+ mmu_get_scsi_one = srmmu_get_scsi_one;
+ mmu_get_scsi_sgl = srmmu_get_scsi_sgl;
+ mmu_release_scsi_one = srmmu_release_scsi_one;
+ mmu_release_scsi_sgl = srmmu_release_scsi_sgl;
+
mmu_info = srmmu_mmu_info;
+ mmu_v2p = srmmu_v2p;
+ mmu_p2v = srmmu_p2v;
/* Task struct and kernel stack allocating/freeing. */
alloc_kernel_stack = srmmu_alloc_kernel_stack;
quick_kernel_fault = srmmu_quick_kernel_fault;
+ /* SRMMU specific. */
+ ctxd_set = srmmu_ctxd_set;
+ pmd_set = srmmu_pmd_set;
+
get_srmmu_type();
- if(!(srmmu_get_mmureg() & 0x800)) {
- srmmu_read_physical = msparc_read_physical;
- srmmu_write_physical = msparc_write_physical;
- } else {
- srmmu_read_physical = gensrmmu_read_physical;
- srmmu_write_physical = gensrmmu_write_physical;
- }
patch_window_trap_handlers();
+
+#ifdef __SMP__
+ /* El switcheroo... */
+
+ local_flush_cache_all = flush_cache_all;
+ local_flush_cache_mm = flush_cache_mm;
+ local_flush_cache_range = flush_cache_range;
+ local_flush_cache_page = flush_cache_page;
+ local_flush_tlb_all = flush_tlb_all;
+ local_flush_tlb_mm = flush_tlb_mm;
+ local_flush_tlb_range = flush_tlb_range;
+ local_flush_tlb_page = flush_tlb_page;
+ local_flush_page_to_ram = flush_page_to_ram;
+ local_flush_page_for_dma = flush_page_for_dma;
+ local_flush_cache_page_to_uncache = flush_cache_page_to_uncache;
+ local_flush_tlb_page_for_cbit = flush_tlb_page_for_cbit;
+
+ flush_cache_all = smp_flush_cache_all;
+ flush_cache_mm = smp_flush_cache_mm;
+ flush_cache_range = smp_flush_cache_range;
+ flush_cache_page = smp_flush_cache_page;
+ flush_tlb_all = smp_flush_tlb_all;
+ flush_tlb_mm = smp_flush_tlb_mm;
+ flush_tlb_range = smp_flush_tlb_range;
+ flush_tlb_page = smp_flush_tlb_page;
+ flush_page_to_ram = smp_flush_page_to_ram;
+ flush_page_for_dma = smp_flush_page_for_dma;
+ flush_cache_page_to_uncache = smp_flush_cache_page_to_uncache;
+ flush_tlb_page_for_cbit = smp_flush_tlb_page_for_cbit;
+#endif
}
--- /dev/null
+/* srmmu_mp.S: Low level invalidates for MP SRMMU modules.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+#include <asm/cprefix.h>
+#include <asm/asi.h>
+
+ /* Protocol is:
+ *
+ * %l0 --> %l3 NO TOUCH
+ * %l7 NO TOUCH
+ * %l6 return address - 0x8
+ * %l4, %l5 what we can use
+ */
+
+ .globl C_LABEL(hyper_invalidate_low)
+C_LABEL(hyper_invalidate_low):
+ /* First the on-chip cache. */
+ set C_LABEL(hyper_cache_size), %l5
+ ld [%l5], %l5
+1:
+ subcc %l5, 32, %l5
+ bne 1b
+ sta %g0, [%l5] ASI_M_FLUSH_CTX
+
+ /* Now the on-chip ICACHE. */
+ sta %g0, [%g0] ASI_M_FLUSH_IWHOLE
+
+ /* Flush the TLB and return. */
+ mov 0x400, %l4
+ sta %g0, [%l4] ASI_M_FLUSH_PROBE
+
+ jmpl %l6 + 0x8, %g0
+ nop
+
+ .globl C_LABEL(viking_invalidate_low)
+C_LABEL(viking_invalidate_low):
+ /* Flash clear the I/D caches. */
+ sta %g0, [%g0] ASI_M_IC_FLCLEAR
+ sta %g0, [%g0] ASI_M_DC_FLCLEAR
+
+ /* Flush the TLB and return. */
+ mov 0x400, %l4
+ sta %g0, [%l4] ASI_M_FLUSH_PROBE
+
+ jmpl %l6 + 0x8, %g0
+ nop
+
+ /* XXX Write the Cypress when I get access to
+ * XXX some modules, poke mossip real hard until
+ * XXX he takes care of the Nubis upgrade.
+ */
+++ /dev/null
-/* $Id: srmmuinv.c,v 1.2 1995/11/25 00:59:36 davem Exp $
- * srmmuinv.c: Invalidate routines for the various different
- * SRMMU implementations.
- *
- * Copyright (C) 1995 David S. Miller
- */
-
-/* HyperSparc */
-hyper_invalidate(void)
-{
- volatile unsigned int sfsr_clear;
-
- /* Flush TLB */
- srmmu_flush_whole_tlb();
-
- /* Flush Virtual Address Cache */
-
- /* Flush ICACHE */
- flush_whole_icache();
- sfsr_clear = srmmu_get_fstatus();
- return;
-}
-
-hyper_invalidate_mp(void)
-{
- volatile unsigned int sfsr_clear;
-
- /* Flush TLB */
- srmmu_flush_whole_tlb();
-
- /* Flush Virtual Address Cache */
-
- /* Flush ICACHE */
- flush_whole_icache();
-
- sfsr_clear = srmmu_get_fstatus();
-
- /* Tell other CPUS to each call the Uniprocessor
- * invalidate routine.
- */
-
- return;
-}
-
-/* Cypress */
-void
-cypress_invalidate(void)
-{
- /* Flush TLB */
- srmmu_flush_whole_tlb();
-
- /* Flush Virtual Address Cache */
-
- return;
-}
-
-void
-cypress_invalidate_mp(void)
-{
- /* Flush TLB */
- srmmu_flush_whole_tlb();
-
- /* Flush Virtual Address Cache */
-
- /* Tell other CPUS to call the UP version */
-
- return;
-}
-
-void
-cypress_invalidate_asibad(void)
-{
- /* Flush TLB */
- srmmu_flush_whole_tlb();
-
- /* Flush Virtual Address Cache w/o using ASIs */
-
- return;
-}
-
-void
-cypress_invalidate_asibad_mp(void)
-{
- /* Flush TLB */
- srmmu_flush_whole_tlb();
-
- /* Flush Virtual Address Cache w/o using ASIs */
-
- /* Tell other CPUS to call the UP version */
-
- return;
-}
-
-/* Swift */
-void
-swift_invalidate(void)
-{
- /* Flush TLB */
- srmmu_flush_whole_tlb();
-
- /* Flush Virtual Address Cache */
-
- return;
-}
-
-void
-swift_invalidate_poke_kernel_pageperms(void)
-{
- /* Flush TLB */
- srmmu_flush_whole_tlb();
-
- /* Flush Virtual Address Cache */
-
- return;
-}
-
-void
-swift_invalidate_poke_kernel_pte_cbits(void)
-{
- /* Flush TLB */
- srmmu_flush_whole_tlb();
-
- /* Flush Virtual Address Cache */
-
- return;
-}
-
-void
-swift_invalidate_poke_everything(void)
-{
- /* Flush TLB */
- srmmu_flush_whole_tlb();
-
- /* Flush Virtual Address Cache */
-
- return;
-}
-
-/* Tsunami */
-tsunami_invalidate()
-{
- /* Flush TLB */
- srmmu_flush_whole_tlb();
-
- /* Flush Virtual Address Cache */
-
- return;
-}
-
-/* Viking */
-viking_invalidate()
-{
- /* Flush TLB */
- srmmu_flush_whole_tlb();
-
- return;
-}
-
-viking_invalidate_mp()
-{
- /* Flush TLB */
- srmmu_flush_whole_tlb();
-
- /* Make other CPUS call UP routine. */
-
- return;
-}
-
-/* That should be it */
struct sun4c_vac_props sun4c_vacinfo;
static int ctxflushes, segflushes, pageflushes;
+/* convert a virtual address to a physical address and vice
+ versa. Easy on the 4c */
+static unsigned long sun4c_v2p(unsigned long vaddr)
+{
+ return(vaddr - PAGE_OFFSET);
+}
+
+static unsigned long sun4c_p2v(unsigned long vaddr)
+{
+ return(vaddr + PAGE_OFFSET);
+}
+
+
/* Invalidate every sun4c cache line tag. */
void sun4c_flush_all(void)
{
}
/* Blow the entire current context out of the virtual cache. */
-/* static */ inline void sun4c_flush_context(void)
+static inline void sun4c_flush_context(void)
{
unsigned long vaddr;
}
/* Using this method to free up mmu entries eliminates a lot of
- * potential races since we have a kernel that incurs tlb
+ * potention races since we have a kernel that incurs tlb
* replacement faults. There may be performance penalties.
*/
static inline struct sun4c_mmu_entry *sun4c_user_strategy(void)
/* READ THIS: If you put any diagnostic printing code in any of the kernel
* fault handling code you will lose badly. This is the most
* delicate piece of code in the entire kernel, atomicity of
- * kernel tlb replacement must be guaranteed. This is why we
- * have separate user and kernel allocation rings to alleviate
+ * kernel tlb replacement must be guarenteed. This is why we
+ * have seperate user and kernel allocation rings to alleviate
* as many bad interactions as possible.
*
* XXX Someday make this into a fast in-window trap handler to avoid
struct task_bucket {
struct task_struct task;
char _unused1[PAGE_SIZE - sizeof(struct task_struct)];
- char _unused2[PAGE_SIZE];
- char kstack[(PAGE_SIZE<<1)];
+ char kstack[(PAGE_SIZE*3)];
};
struct task_bucket *sun4c_bucket[NR_TASKS];
static unsigned long sun4c_alloc_kernel_stack(struct task_struct *tsk)
{
unsigned long saddr = (unsigned long) tsk;
- unsigned long page[2];
+ unsigned long page[3];
if(!saddr)
return 0;
free_page(page[0]);
return 0;
}
- saddr += (PAGE_SIZE << 1);
- sun4c_put_pte(saddr - PAGE_SIZE, 0);
+ page[2] = get_free_page(GFP_KERNEL);
+ if(!page[2]) {
+ free_page(page[0]);
+ free_page(page[1]);
+ return 0;
+ }
+ saddr += PAGE_SIZE;
sun4c_put_pte(saddr, BUCKET_PTE(page[0]));
sun4c_put_pte(saddr + PAGE_SIZE, BUCKET_PTE(page[1]));
+ sun4c_put_pte(saddr + (PAGE_SIZE<<1), BUCKET_PTE(page[2]));
return saddr;
}
static void sun4c_free_kernel_stack(unsigned long stack)
{
- unsigned long page[2];
+ unsigned long page[3];
page[0] = BUCKET_PTE_PAGE(sun4c_get_pte(stack));
page[1] = BUCKET_PTE_PAGE(sun4c_get_pte(stack+PAGE_SIZE));
+ page[2] = BUCKET_PTE_PAGE(sun4c_get_pte(stack+(PAGE_SIZE<<1)));
sun4c_flush_segment(stack & SUN4C_REAL_PGDIR_MASK);
sun4c_put_pte(stack, 0);
sun4c_put_pte(stack + PAGE_SIZE, 0);
+ sun4c_put_pte(stack + (PAGE_SIZE<<1), 0);
free_page(page[0]);
free_page(page[1]);
+ free_page(page[2]);
}
static void sun4c_free_task_struct(struct task_struct *tsk)
static unsigned long *sun4c_iobuffer_map;
static int iobuffer_map_size;
-static char *sun4c_lockpage(char *vaddr, unsigned long _unused)
-{
- unsigned long vpage, voffset, search, pte;
- unsigned long npage;
+/*
+ * Alias our pages so they do not cause a trap.
+ * Also one page may be aliased into several I/O areas and we may
+ * finish these I/O separately.
+ */
+static char *sun4c_lockarea(char *vaddr, unsigned long size)
+{
+ unsigned long base, scan;
+ unsigned long npages;
+ unsigned long vpage;
+ unsigned long pte;
+ unsigned long apage;
+
+ npages = (((unsigned long)vaddr & ~PAGE_MASK) +
+ size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+
+ scan = 0;
+ for (;;) {
+ scan = find_next_zero_bit(sun4c_iobuffer_map,
+ iobuffer_map_size, scan);
+ if ((base = scan) + npages > iobuffer_map_size) goto abend;
+ for (;;) {
+ if (scan >= base + npages) goto found;
+ if (test_bit(scan, sun4c_iobuffer_map)) break;
+ scan++;
+ }
+ }
+found:
vpage = ((unsigned long) vaddr) & PAGE_MASK;
- voffset = ((unsigned long) vaddr) & ~PAGE_MASK;
- pte = ((vpage-PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(SUN4C_PAGE_KERNEL);
- pte |= _SUN4C_PAGE_NOCACHE;
- search = find_first_zero_bit(sun4c_iobuffer_map, iobuffer_map_size);
- set_bit(search, sun4c_iobuffer_map);
- npage = (search << PAGE_SHIFT) + sun4c_iobuffer_start;
- sun4c_flush_page(vpage);
- sun4c_put_pte(npage, pte);
- return (char *) (npage + voffset);
+ for (scan = base; scan < base+npages; scan++) {
+ pte = ((vpage-PAGE_OFFSET) >> PAGE_SHIFT);
+ pte |= pgprot_val(SUN4C_PAGE_KERNEL);
+ pte |= _SUN4C_PAGE_NOCACHE;
+ set_bit(scan, sun4c_iobuffer_map);
+ apage = (scan << PAGE_SHIFT) + sun4c_iobuffer_start;
+ sun4c_flush_page(vpage);
+ sun4c_put_pte(apage, pte);
+ vpage += PAGE_SIZE;
+ }
+ return (char *) ((base << PAGE_SHIFT) + sun4c_iobuffer_start +
+ (((unsigned long) vaddr) & ~PAGE_MASK));
+
+abend:
+ printk("DMA vaddr=0x%p size=%08lx\n", vaddr, size);
+ panic("Out of iobuffer table");
+ return 0;
}
-static void sun4c_unlockpage(char *vaddr, unsigned long _unused)
+static void sun4c_unlockarea(char *vaddr, unsigned long size)
{
- unsigned long vpage, nr;
+ unsigned long vpage, npages;
- vpage = (unsigned long) vaddr;
- vpage &= PAGE_MASK;
- nr = (vpage - sun4c_iobuffer_start) >> PAGE_SHIFT;
- sun4c_put_pte(vpage, 0);
- clear_bit(nr, sun4c_iobuffer_map);
+ vpage = (unsigned long)vaddr & PAGE_MASK;
+ npages = (((unsigned long)vaddr & ~PAGE_MASK) +
+ size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+ while (npages != 0) {
+ --npages;
+ sun4c_put_pte(vpage, 0);
+ clear_bit((vpage - sun4c_iobuffer_start) >> PAGE_SHIFT,
+ sun4c_iobuffer_map);
+ vpage += PAGE_SIZE;
+ }
}
/* Note the scsi code at init time passes to here buffers
* by implication and fool the page locking code above
* if passed to by mistake.
*/
-static char *sun4c_get_scsi_buffer(char *bufptr, unsigned long len, struct linux_sbus *sbus)
+static char *sun4c_get_scsi_one(char *bufptr, unsigned long len, struct linux_sbus *sbus)
{
- unsigned long page1, page2;
+ unsigned long page;
- page1 = ((unsigned long) bufptr) & PAGE_MASK;
- page2 = (((unsigned long) bufptr) + len - 1) & PAGE_MASK;
- if(page1 != page2) {
- printk("Problem, trying to lock multipage scsi buffer.\n");
- printk("page1<%08lx> page2<%08lx>\n", page1, page2);
- panic("Scsi buffer too big.");
- }
- if(page1 > high_memory)
+ page = ((unsigned long) bufptr) & PAGE_MASK;
+ if(page > high_memory)
return bufptr; /* already locked */
- return sun4c_lockpage(bufptr, PAGE_SIZE);
+ return sun4c_lockarea(bufptr, len);
+}
+
+static void sun4c_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+{
+ while(sz >= 0) {
+ sg[sz].alt_addr = sun4c_lockarea(sg[sz].addr, sg[sz].len);
+ sz--;
+ }
}
-static void sun4c_release_scsi_buffer(char *bufptr, unsigned long len, struct linux_sbus *sbus)
+static void sun4c_release_scsi_one(char *bufptr, unsigned long len, struct linux_sbus *sbus)
{
unsigned long page = (unsigned long) bufptr;
if(page < sun4c_iobuffer_start)
return; /* On kernel stack or similar, see above */
- sun4c_unlockpage(bufptr, PAGE_SIZE);
+ sun4c_unlockarea(bufptr, len);
}
-#define TASK_ENTRY_SIZE (3 * PAGE_SIZE)
+static void sun4c_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+{
+ while(sz >= 0) {
+ sun4c_unlockarea(sg[sz].alt_addr, sg[sz].len);
+ sg[sz].alt_addr = 0;
+ sz--;
+ }
+}
+
+#define TASK_ENTRY_SIZE BUCKET_SIZE /* see above */
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
struct vm_area_struct sun4c_kstack_vma;
}
sun4c_iobuffer_start = SUN4C_REAL_PGDIR_ALIGN(sun4c_taskstack_end);
- sun4c_iobuffer_end = sun4c_iobuffer_start + SUN4C_REAL_PGDIR_SIZE;
+ sun4c_iobuffer_end = SUN4C_LOCK_END;
bitmap_size = (sun4c_iobuffer_end - sun4c_iobuffer_start) >> PAGE_SHIFT;
bitmap_size = (bitmap_size + 7) >> 3;
bitmap_size = LONG_ALIGN(bitmap_size);
return start_mem;
}
-static void sun4c_invalidate_all(void)
+/* Cache flushing on the sun4c. */
+static void sun4c_flush_cache_all(void)
+{
+ unsigned long start, end;
+
+ /* Clear all tags in the sun4c cache.
+ * The cache is write through so this is safe.
+ */
+ start = AC_CACHETAGS;
+ end = start + sun4c_vacinfo.num_bytes;
+ flush_user_windows();
+ while(start < end) {
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (start), "i" (ASI_CONTROL));
+ start += sun4c_vacinfo.linesize;
+ }
+}
+
+static void sun4c_flush_cache_mm(struct mm_struct *mm)
+{
+ unsigned long flags;
+ int octx;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ octx = sun4c_get_context();
+ save_flags(flags); cli();
+ flush_user_windows();
+ sun4c_set_context(mm->context);
+ sun4c_flush_context();
+ sun4c_set_context(octx);
+ restore_flags(flags);
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void sun4c_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+ int size, octx;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ size = start - end;
+
+ flush_user_windows();
+
+ if(size >= sun4c_vacinfo.num_bytes)
+ goto flush_it_all;
+
+ save_flags(flags); cli();
+ octx = sun4c_get_context();
+ sun4c_set_context(mm->context);
+
+ if(size <= (PAGE_SIZE << 1)) {
+ start &= PAGE_MASK;
+ while(start < end) {
+ sun4c_flush_page(start);
+ start += PAGE_SIZE;
+ };
+ } else {
+ start &= SUN4C_REAL_PGDIR_MASK;
+ while(start < end) {
+ sun4c_flush_segment(start);
+ start += SUN4C_REAL_PGDIR_SIZE;
+ }
+ }
+ sun4c_set_context(octx);
+ restore_flags(flags);
+#ifndef __SMP__
+ }
+#endif
+ return;
+
+flush_it_all:
+ /* Cache size bounded flushing, thank you. */
+ sun4c_flush_cache_all();
+}
+
+static void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+ unsigned long flags;
+ int octx;
+ struct mm_struct *mm = vma->vm_mm;
+
+ /* Sun4c has no seperate I/D caches so cannot optimize for non
+ * text page flushes.
+ */
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ octx = sun4c_get_context();
+ save_flags(flags); cli();
+ flush_user_windows();
+ sun4c_set_context(mm->context);
+ sun4c_flush_page(page);
+ sun4c_set_context(octx);
+ restore_flags(flags);
+#ifndef __SMP__
+ }
+#endif
+}
+
+/* Sun4c cache is write-through, so no need to validate main memory
+ * during a page copy in kernel space.
+ */
+static void sun4c_flush_page_to_ram(unsigned long page)
+{
+}
+
+/* TLB flushing on the sun4c. These routines count on the cache
+ * flushing code to flush the user register windows so that we need
+ * not do so when we get here.
+ */
+
+static void sun4c_flush_tlb_all(void)
{
struct sun4c_mmu_entry *this_entry, *next_entry;
+ unsigned long flags;
+ int savectx, ctx;
+ save_flags(flags); cli();
this_entry = sun4c_kernel_ring.ringhd.next;
+ savectx = sun4c_get_context();
while(sun4c_kernel_ring.num_entries) {
next_entry = this_entry->next;
- sun4c_kernel_unmap(this_entry);
+ for(ctx = 0; ctx < num_contexts; ctx++) {
+ sun4c_set_context(ctx);
+ sun4c_put_segmap(this_entry->vaddr, invalid_segment);
+ }
free_kernel_entry(this_entry, &sun4c_kernel_ring);
this_entry = next_entry;
}
+ sun4c_set_context(savectx);
+ restore_flags(flags);
}
-static void sun4c_invalidate_mm(struct mm_struct *mm)
+static void sun4c_flush_tlb_mm(struct mm_struct *mm)
{
- if(mm->context == NO_CONTEXT)
- return;
- sun4c_demap_context(&sun4c_context_ring[mm->context], mm->context);
+ struct sun4c_mmu_entry *this_entry, *next_entry;
+ struct sun4c_mmu_ring *crp;
+ int savectx, ctx;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ crp = &sun4c_context_ring[mm->context];
+ savectx = sun4c_get_context();
+ ctx = mm->context;
+ this_entry = crp->ringhd.next;
+ sun4c_set_context(mm->context);
+ while(crp->num_entries) {
+ next_entry = this_entry->next;
+ sun4c_user_unmap(this_entry);
+ free_user_entry(ctx, this_entry);
+ this_entry = next_entry;
+ }
+ sun4c_set_context(savectx);
+#ifndef __SMP__
+ }
+#endif
}
-static void sun4c_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+static void sun4c_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
struct sun4c_mmu_entry *this_entry;
unsigned char pseg, savectx;
+#ifndef __SMP__
if(mm->context == NO_CONTEXT)
return;
+#endif
flush_user_windows();
savectx = sun4c_get_context();
sun4c_set_context(mm->context);
if(pseg == invalid_segment)
goto next_one;
this_entry = &mmu_entry_pool[pseg];
- sun4c_user_unmap(this_entry);
+ sun4c_put_segmap(this_entry->vaddr, invalid_segment);
free_user_entry(mm->context, this_entry);
next_one:
start += SUN4C_REAL_PGDIR_SIZE;
sun4c_set_context(savectx);
}
-static void sun4c_invalidate_page(struct vm_area_struct *vma, unsigned long page)
+static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned char savectx;
+ int savectx;
- if(mm->context == NO_CONTEXT)
- return;
- flush_user_windows();
- savectx = sun4c_get_context();
- sun4c_set_context(mm->context);
- page &= PAGE_MASK;
- if(sun4c_get_pte(page) & _SUN4C_PAGE_VALID) {
- sun4c_flush_page(page);
- sun4c_put_pte(page, 0);
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ savectx = sun4c_get_context();
+ sun4c_set_context(mm->context);
+ page &= PAGE_MASK;
+ if(sun4c_get_pte(page) & _SUN4C_PAGE_VALID)
+ sun4c_put_pte(page, 0);
+ sun4c_set_context(savectx);
+#ifndef __SMP__
}
- sun4c_set_context(savectx);
+#endif
}
/* Sun4c mmu hardware doesn't update the dirty bit in the pte's
}
}
-void sun4c_test_wp(void)
-{
- wp_works_ok = -1;
-
- /* Let it rip... */
- sun4c_put_pte((unsigned long) 0x0, (_SUN4C_PAGE_VALID | _SUN4C_PAGE_PRIV));
- __asm__ __volatile__("st %%g0, [0x0]\n\t": : :"memory");
- sun4c_put_pte((unsigned long) 0x0, 0x0);
- if (wp_works_ok < 0)
- wp_works_ok = 0;
-}
-
static char s4cinfo[512];
static char *sun4c_mmu_info(void)
for(i=0; i < num_contexts; i++)
used_user_entries += sun4c_context_ring[i].num_entries;
- sprintf(s4cinfo, "vacsize: %d bytes\n"
+ sprintf(s4cinfo, "vacsize\t\t: %d bytes\n"
"vachwflush\t: %s\n"
"vaclinesize\t: %d bytes\n"
"mmuctxs\t\t: %d\n"
static int sun4c_pte_none(pte_t pte) { return !pte_val(pte); }
static int sun4c_pte_present(pte_t pte) { return pte_val(pte) & _SUN4C_PAGE_VALID; }
-static int sun4c_pte_inuse(pte_t *ptep) { return mem_map[MAP_NR(ptep)].reserved || mem_map[MAP_NR(ptep)].count != 1; }
static void sun4c_pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
-static void sun4c_pte_reuse(pte_t *ptep)
-{
- if(!mem_map[MAP_NR(ptep)].reserved)
- mem_map[MAP_NR(ptep)].count++;
-}
static int sun4c_pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
static int sun4c_pmd_bad(pmd_t pmd)
}
static int sun4c_pmd_present(pmd_t pmd) { return pmd_val(pmd) & PGD_PRESENT; }
-static int sun4c_pmd_inuse(pmd_t *pmdp) { return 0; }
static void sun4c_pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
-static void sun4c_pmd_reuse(pmd_t * pmdp) { }
static int sun4c_pgd_none(pgd_t pgd) { return 0; }
static int sun4c_pgd_bad(pgd_t pgd) { return 0; }
static int sun4c_pgd_present(pgd_t pgd) { return 1; }
-static int sun4c_pgd_inuse(pgd_t *pgdp) { return mem_map[MAP_NR(pgdp)].reserved; }
static void sun4c_pgd_clear(pgd_t * pgdp) { }
/*
return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
}
-static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot)
+static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
{
return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
}
*/
static void sun4c_pte_free_kernel(pte_t *pte)
{
- mem_map[MAP_NR(pte)].reserved = 0;
free_page((unsigned long) pte);
}
if (sun4c_pmd_none(*pmd)) {
if (page) {
pmd_val(*pmd) = PGD_TABLE | (unsigned long) page;
- mem_map[MAP_NR(page)].reserved = 1;
return page + address;
}
pmd_val(*pmd) = PGD_TABLE | (unsigned long) BAD_PAGETABLE;
| _SUN4C_PAGE_WRITE | _SUN4C_PAGE_DIRTY;
/* Functions */
- invalidate_all = sun4c_invalidate_all;
- invalidate_mm = sun4c_invalidate_mm;
- invalidate_range = sun4c_invalidate_range;
- invalidate_page = sun4c_invalidate_page;
+#ifndef __SMP__
+ flush_cache_all = sun4c_flush_cache_all;
+ flush_cache_mm = sun4c_flush_cache_mm;
+ flush_cache_range = sun4c_flush_cache_range;
+ flush_cache_page = sun4c_flush_cache_page;
+
+ flush_tlb_all = sun4c_flush_tlb_all;
+ flush_tlb_mm = sun4c_flush_tlb_mm;
+ flush_tlb_range = sun4c_flush_tlb_range;
+ flush_tlb_page = sun4c_flush_tlb_page;
+#else
+ local_flush_cache_all = sun4c_flush_cache_all;
+ local_flush_cache_mm = sun4c_flush_cache_mm;
+ local_flush_cache_range = sun4c_flush_cache_range;
+ local_flush_cache_page = sun4c_flush_cache_page;
+
+ local_flush_tlb_all = sun4c_flush_tlb_all;
+ local_flush_tlb_mm = sun4c_flush_tlb_mm;
+ local_flush_tlb_range = sun4c_flush_tlb_range;
+ local_flush_tlb_page = sun4c_flush_tlb_page;
+
+ flush_cache_all = smp_flush_cache_all;
+ flush_cache_mm = smp_flush_cache_mm;
+ flush_cache_range = smp_flush_cache_range;
+ flush_cache_page = smp_flush_cache_page;
+
+ flush_tlb_all = smp_flush_tlb_all;
+ flush_tlb_mm = smp_flush_tlb_mm;
+ flush_tlb_range = smp_flush_tlb_range;
+ flush_tlb_page = smp_flush_tlb_page;
+#endif
+
+ flush_page_to_ram = sun4c_flush_page_to_ram;
+
set_pte = sun4c_set_pte;
switch_to_context = sun4c_switch_to_context;
pmd_align = sun4c_pmd_align;
pte_none = sun4c_pte_none;
pte_present = sun4c_pte_present;
- pte_inuse = sun4c_pte_inuse;
pte_clear = sun4c_pte_clear;
- pte_reuse = sun4c_pte_reuse;
pmd_none = sun4c_pmd_none;
pmd_bad = sun4c_pmd_bad;
pmd_present = sun4c_pmd_present;
- pmd_inuse = sun4c_pmd_inuse;
pmd_clear = sun4c_pmd_clear;
- pmd_reuse = sun4c_pmd_reuse;
pgd_none = sun4c_pgd_none;
pgd_bad = sun4c_pgd_bad;
pgd_present = sun4c_pgd_present;
- pgd_inuse = sun4c_pgd_inuse;
pgd_clear = sun4c_pgd_clear;
mk_pte = sun4c_mk_pte;
update_mmu_cache = sun4c_update_mmu_cache;
mmu_exit_hook = sun4c_exit_hook;
mmu_flush_hook = sun4c_flush_hook;
- mmu_lockarea = sun4c_lockpage;
- mmu_unlockarea = sun4c_unlockpage;
- mmu_get_scsi_buffer = sun4c_get_scsi_buffer;
- mmu_release_scsi_buffer = sun4c_release_scsi_buffer;
+ mmu_lockarea = sun4c_lockarea;
+ mmu_unlockarea = sun4c_unlockarea;
+
+ mmu_get_scsi_one = sun4c_get_scsi_one;
+ mmu_get_scsi_sgl = sun4c_get_scsi_sgl;
+ mmu_release_scsi_one = sun4c_release_scsi_one;
+ mmu_release_scsi_sgl = sun4c_release_scsi_sgl;
+
+ mmu_v2p = sun4c_v2p;
+ mmu_p2v = sun4c_p2v;
/* Task struct and kernel stack allocating/freeing. */
alloc_kernel_stack = sun4c_alloc_kernel_stack;
/* These should _never_ get called with two level tables. */
pgd_set = 0;
- pgd_reuse = 0;
pgd_page = 0;
}
-/* $Id: bootstr.c,v 1.4 1996/02/08 07:06:43 zaitcev Exp $
+/* $Id: bootstr.c,v 1.5 1996/04/04 16:30:53 tridge Exp $
* bootstr.c: Boot string/argument acquisition from the PROM.
*
* Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/config.h>
#include <asm/oplib.h>
static char barg_buf[256];
cp += strlen(cp);
*cp = 0;
break;
+ case PROM_AP1000:
+ /*
+ * Get message from host boot process.
+ */
+#if CONFIG_AP1000
+ ap_getbootargs(barg_buf);
+#endif
+ break;
default:
barg_buf[0] = 0;
break;
-/* $Id: console.c,v 1.6 1996/01/01 02:46:27 davem Exp $
+/* $Id: console.c,v 1.8 1996/04/05 07:44:35 tridge Exp $
* console.c: Routines that deal with sending and receiving IO
* to/from the current console device using the PROM.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/config.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <linux/string.h>
return inc;
return -1;
break;
+ case PROM_AP1000:
+ return -1;
+ break;
};
return 0; /* Ugh, we could spin forever on unsupported proms ;( */
}
return 0;
return -1;
break;
+ case PROM_AP1000:
+#if CONFIG_AP1000
+ {
+ extern void ap_putchar(char );
+ ap_putchar(c);
+ return 0;
+ }
+#endif
+ break;
};
return 0; /* Ugh, we could spin forever on unsupported proms ;( */
}
return PROMDEV_ITTYB;
}
return PROMDEV_I_UNK;
+ case PROM_AP1000:
+ return PROMDEV_I_UNK;
};
}
};
}
break;
+ case PROM_AP1000:
+ return PROMDEV_I_UNK;
};
return PROMDEV_O_UNK;
}
-/* $Id: devops.c,v 1.3 1995/11/25 00:59:59 davem Exp $
+/* $Id: devops.c,v 1.4 1996/04/04 16:30:58 tridge Exp $
* devops.c: Device operations using the PROM.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
handle = (*(romvec->pv_v2devops.v2_dev_open))(dstr);
return handle;
break;
+ case PROM_AP1000:
+ break;
};
return -1;
case PROM_P1275:
(*(romvec->pv_v2devops.v2_dev_close))(dhandle);
return;
+ case PROM_AP1000:
+ return;
};
return;
}
case PROM_P1275:
(*(romvec->pv_v2devops.v2_dev_seek))(dhandle, seekhi, seeklo);
break;
+ case PROM_AP1000:
+ break;
};
return;
+++ /dev/null
-/* devtree.c: Build a copy of the prom device tree in kernel
- * memory for easier access and cleaner interface.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-
-/* Add more as appropriate. */
-enum bus_t {
- OBIO_BUS,
- SBUS_BUS,
- PCI_BUS,
- PMEM_BUS,
- CPU_BUS,
-};
-
-struct sdevmapping {
- unsigned long physpage;
- int mapsz;
- enum bus_t where;
-};
-
-/* limitation of sparc arch. */
-#define NUM_SPARC_IRQS 15
-
-struct sdev_irqs {
- int level;
- int vector; /* For vme/sbus irq sharing methinks. */
-};
-
-struct sparcdev {
- struct sparcdev *next;
- struct sparcdev *prev;
- int node;
- char *name;
- int num_mappings;
- struct sdevmapping *maps;
- int num_irqs;
- struct sdev_irqs irqinfo[NUM_SPARC_IRQS];
-};
-
-struct sparcbus {
- struct sparcbus *next;
- enum bus_t type;
- struct sparcdev *device_list;
-};
-
-/* Add more as appropriate. */
-struct sparcbus obiobus_info = { 0, OBIO_BUS, { 0, 0}, };
-struct sparcbus sbusbus_info = { 0, SBUS_BUS, { 0, 0}, };
-struct sparcbus pcibus_info = { 0, PCI_BUS, { 0, 0}, };
-struct sparcbus pmembus_info = { 0, PMEM_BUS, { 0, 0}, };
-struct sparcbus cpubus_info = { 0, CPU_BUS, { 0, 0}, };
-
-struct sparcbus *sparcbus_list = 0;
-
-/* This is called at boot time to build the prom device tree. */
-int prom_build_devtree(unsigned long start_mem, unsigned long end_mem)
-{
-}
-
-/* Search the bus device list for a device which matches one of the
- * names in NAME_VECTOR which is an array or NUM_NAMES strings, given
- * the passed BUSTYPE. Return ptr to the matching sparcdev structure
- * or NULL if no matches found.
- */
-struct sparcdev *prom_find_dev_on_bus(bus_t bustype, char **name_vector, int num_names)
-{
- struct sparcdev *sdp;
- struct sparcbus *thebus;
- int niter;
-
- if(!num_names)
- return 0;
-
- if(!sparcbus_list) {
- prom_printf("prom_find_dev_on_bus: Device list not initted yet!\n");
- prom_halt();
- }
-
- while(thebus = sparcbus_list; thebus; thebus = thebus->next)
- if(thebus->type == bustype)
- break;
- if(!thebus || !thebus->device_list)
- return 0;
-
- for(sdp = thebus->device_list; sdp; sdp = sdp->next) {
- for(niter = 0; niter < num_names; niter++)
- if(!strcmp(sdp->name, name_vector[niter]))
- break;
- }
- return sdp;
-}
-
-/* Continue searching on a device list, starting at START_DEV for the next
- * instance whose name matches one of the elements of NAME_VECTOR which is
- * of length NUM_NAMES.
- */
-struct sparcdev *prom_find_next_dev(struct sparcdev *start_dev, char **name_vector, int num_names)
-{
- struct sparcdev *sdp;
- int niter;
-
- if(!start_dev->next || !num_names)
- return 0;
- for(sdp = start_dev->next; sdp; sdp = sdp->next) {
- for(niter = 0; niter < num_names; niter++)
- if(!strcmp(sdp->name, name_vector[niter]))
- break;
- }
- return sdp;
-}
-/* $Id: init.c,v 1.6 1995/11/25 01:00:01 davem Exp $
+/* $Id: init.c,v 1.7 1996/04/04 16:31:00 tridge Exp $
* init.c: Initialize internal variables used by the PROM
* library functions.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/openprom.h>
{
romvec = rp;
+#if CONFIG_AP1000
+ prom_vers = PROM_AP1000;
+ prom_meminit();
+ prom_ranges_init();
+ return;
+#endif
switch(romvec->pv_romvers) {
case 0:
prom_vers = PROM_V0;
-/* $Id: memory.c,v 1.4 1995/11/25 01:00:02 davem Exp $
+/* $Id: memory.c,v 1.6 1996/04/08 09:02:27 davem Exp $
* memory.c: Prom routine for acquiring various bits of information
* about RAM on the machine, both virtual and physical.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/kernel.h>
+
#include <asm/openprom.h>
#include <asm/oplib.h>
-/* This routine, for consistency, returns the ram parameters in the
- * V0 prom memory descriptor format. I choose this format because I
+/* This routine, for consistancy, returns the ram parameters in the
+ * V0 prom memory descriptor format. I choose this format becuase I
* think it was the easiest to work with. I feel the religious
* arguments now... ;) Also, I return the linked lists sorted to
- * prevent paging_init() upset stomach as I have not yet written
- * the pepto-bismol kernel module yet.
+ * prevent paging_init() upset stomache as I have not yet written
+ * the pepto-bismal kernel module yet.
*/
struct linux_prom_registers prom_reg_memlist[64];
/* Sort the other two lists. */
prom_sortmemlist(prom_phys_total);
prom_sortmemlist(prom_phys_avail);
+ break;
+ case PROM_AP1000:
+ /* really simple memory map */
+ prom_phys_total[0].start_adr = 0x00000000;
+ prom_phys_total[0].num_bytes = 0x01000000; /* 16MB */
+ prom_phys_total[0].theres_more = 0x0;
+ prom_prom_taken[0].start_adr = 0x00000000;
+ prom_prom_taken[0].num_bytes = 0x00000000;
+ prom_prom_taken[0].theres_more = 0x0;
+ prom_phys_avail[0].start_adr = 0x00000000;
+ prom_phys_avail[0].num_bytes = 0x01000000; /* 16MB */
+ prom_phys_avail[0].theres_more = 0x0;
+ prom_sortmemlist(prom_phys_total);
+ prom_sortmemlist(prom_prom_taken);
+ prom_sortmemlist(prom_phys_avail);
+ printk("Initialised AP1000 memory lists (forced 16MB)\n");
+ break;
};
/* Link all the lists into the top-level descriptor. */
-/* $Id: misc.c,v 1.5 1996/02/02 03:37:44 davem Exp $
+/* $Id: misc.c,v 1.8 1996/04/17 23:03:23 davem Exp $
* misc.c: Miscellaneous prom functions that don't belong
* anywhere else.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/config.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
}
/* We want to do this more nicely some day. */
+#if CONFIG_SUN_CONSOLE
extern void console_restore_palette(void);
extern void set_palette(void);
+extern int serial_console;
+#endif
/* Drop into the prom, with the chance to continue with the 'go'
* prom command.
void
prom_halt(void)
{
- console_restore_palette ();
+ extern void kernel_enter_debugger(void);
+ extern void install_obp_ticker(void);
+ extern void install_linux_ticker(void);
+
+ kernel_enter_debugger();
+#if CONFIG_SUN_CONSOLE
+ if(!serial_console)
+ console_restore_palette ();
+#endif
+ install_obp_ticker();
(*(romvec->pv_abort))();
- set_palette ();
+ install_linux_ticker();
+#if CONFIG_SUN_CONSOLE
+ if(!serial_console)
+ set_palette ();
+#endif
return;
}
void
prom_setsync(sfunc_t funcp)
{
+#if CONFIG_AP1000
+ printk("not doing setsync\n");
+ return;
+#endif
if(!funcp) return;
*romvec->pv_synchook = funcp;
return;
-/* $Id: mp.c,v 1.4 1995/11/25 01:00:06 davem Exp $
+/* $Id: mp.c,v 1.5 1996/04/04 16:31:06 tridge Exp $
* mp.c: OpenBoot Prom Multiprocessor support routines. Don't call
* these on a UP or else you will halt and catch fire. ;)
*
switch(prom_vers) {
case PROM_V0:
case PROM_V2:
+ case PROM_AP1000:
break;
case PROM_V3:
case PROM_P1275:
switch(prom_vers) {
case PROM_V0:
case PROM_V2:
+ case PROM_AP1000:
break;
case PROM_V3:
case PROM_P1275:
switch(prom_vers) {
case PROM_V0:
case PROM_V2:
+ case PROM_AP1000:
break;
case PROM_V3:
case PROM_P1275:
switch(prom_vers) {
case PROM_V0:
case PROM_V2:
+ case PROM_AP1000:
break;
case PROM_V3:
case PROM_P1275:
/* Allocate a chunk of memory of size 'num_bytes' giving a suggestion
* of virtual_hint as the preferred virtual base address of this chunk.
- * There are no guarantees that you will get the allocation, or that
+ * There are no guarentees that you will get the allocation, or that
* the prom will abide by your "hint". So check your return value.
*/
char *
-/* $Id: printf.c,v 1.4 1995/11/25 01:00:10 davem Exp $
+/* $Id: printf.c,v 1.5 1996/04/04 16:31:07 tridge Exp $
* printf.c: Internal prom library printf facility.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* about or use it! It's simple and smelly anyway....
*/
+#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/openprom.h>
bptr = ppbuf;
+#if CONFIG_AP1000
+ ap_write(1,bptr,strlen(bptr));
+#else
while((ch = *(bptr++)) != 0) {
if(ch == '\n')
prom_putchar('\r');
prom_putchar(ch);
}
-
+#endif
va_end(args);
return;
}
-/* $Id: tree.c,v 1.7 1996/01/01 02:46:24 davem Exp $
+/* $Id: tree.c,v 1.8 1996/04/04 16:31:09 tridge Exp $
* tree.c: Basic device tree traversal/scanning for the Linux
* prom library.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/config.h>
#include <linux/string.h>
#include <asm/openprom.h>
{
int cnode;
+#if CONFIG_AP1000
+ printk("prom_getchild -> 0\n");
+ return 0;
+#endif
if(node == -1) return 0;
cnode = prom_nodeops->no_child(node);
if((cnode == 0) || (cnode == -1)) return 0;
{
int sibnode;
+#if CONFIG_AP1000
+ printk("prom_getsibling -> 0\n");
+ return 0;
+#endif
if(node == -1) return 0;
sibnode = prom_nodeops->no_nextnode(node);
if((sibnode == 0) || (sibnode == -1)) return 0;
int
prom_getproplen(int node, char *prop)
{
+#if CONFIG_AP1000
+ printk("prom_getproplen(%s) -> -1\n",prop);
+ return -1;
+#endif
if((!node) || (!prop)) return -1;
return prom_nodeops->no_proplen(node, prop);
}
{
int plen;
+#if CONFIG_AP1000
+ printk("prom_getproperty(%s) -> -1\n",prop);
+ return -1;
+#endif
plen = prom_getproplen(node, prop);
if((plen > bufsize) || (plen == 0) || (plen == -1)) return -1;
{
static int intprop;
+#if CONFIG_AP1000
+ printk("prom_getint(%s) -> -1\n",prop);
+ return -1;
+#endif
if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
return intprop;
{
int retval;
+#if CONFIG_AP1000
+ printk("prom_getintdefault(%s) -> 0\n",property);
+ return 0;
+#endif
retval = prom_getint(node, property);
if(retval == -1) return deflt;
{
int retval;
+#if CONFIG_AP1000
+ printk("prom_getbool(%s) -> 0\n",prop);
+ return 0;
+#endif
retval = prom_getproplen(node, prop);
if(retval == -1) return 0;
return 1;
{
int len;
+#if CONFIG_AP1000
+ printk("prom_getstring(%s) -> .\n",prop);
+ return;
+#endif
len = prom_getproperty(node, prop, user_buf, ubuf_size);
if(len != -1) return;
user_buf[0] = 0;
break;
case 1:
dtr = 300;
+ if (FDCS->version >= FDC_82078) {
+ /* chose the default rate table, not the one
+ * where 1 = 2 Mbps */
+ output_byte(FD_DRIVESPEC);
+ if(need_more_output() == MORE_OUTPUT) {
+ output_byte(UNIT(current_drive));
+ output_byte(0xc0);
+ }
+ }
break;
case 2:
dtr = 250;
ptr->next = 0;
ptr->buffer_length = 0;
param += sizeof(struct floppy_raw_cmd);
- if (ptr->cmd_count > 16)
+ if (ptr->cmd_count > 33)
+ /* the command may now also take up the space
+ * initially intended for the reply & the
+ * reply count. Needed for long 82078 commands
+ * such as RESTORE, which takes ... 17 command
+ * bytes. Murphy's law #137: When you reserve
+ * 16 bytes for a structure, you'll one day
+ * discover that you really need 17...
+ */
return -EINVAL;
}
printk(KERN_INFO "FDC %d is a National Semiconductor PC87306\n", fdc);
return FDC_87306;
default:
- printk(KERN_INFO "FDC %d init: 82077 variant with PARTID=%d.\n",
+ printk(KERN_INFO "FDC %d init: 82078 variant with unknown PARTID=%d.\n",
fdc, reply_buffer[0] >> 5);
- return FDC_82077_UNKN;
+ return FDC_82078_UNKN;
}
} /* get_fdc_version */
#define MD_DRIVER
#define MD_PERSONALITY
-#include <linux/blk.h>
-
static int linear_run (int minor, struct md_dev *mddev)
{
int cur=0, i, size, dev0_size, nb_zone;
}
-static int linear_map (int minor, struct md_dev *mddev, struct request *req)
+static int linear_map (struct md_dev *mddev, kdev_t *rdev,
+ unsigned long *rsector, unsigned long size)
{
struct linear_data *data=(struct linear_data *) mddev->private;
struct linear_hash *hash;
struct real_dev *tmp_dev;
- long block, rblock;
- struct buffer_head *bh, *bh2;
- int queue, nblk;
- static struct request pending[MAX_REAL]={{0, }, };
+ long block;
- while (req->nr_sectors)
+ block=*rsector >> 1;
+ hash=data->hash_table+(block/data->smallest->size);
+
+ if (block >= (hash->dev0->size + hash->dev0->offset))
{
- block=req->sector >> 1;
- hash=data->hash_table+(block/data->smallest->size);
-
- if (block >= (hash->dev0->size + hash->dev0->offset))
- {
- if (!hash->dev1)
- printk ("linear_map : hash->dev1==NULL for block %ld\n", block);
- tmp_dev=hash->dev1;
- }
- else
- tmp_dev=hash->dev0;
-
- if (block >= (tmp_dev->size + tmp_dev->offset) || block < tmp_dev->offset)
- printk ("Block %ld out of bounds on dev %04x size %d offset %d\n", block, tmp_dev->dev, tmp_dev->size, tmp_dev->offset);
-
- rblock=(block-(tmp_dev->offset));
-
- if (req->sem) /* This is a paging request */
+ if (!hash->dev1)
{
- req->rq_dev=tmp_dev->dev;
- req->sector=rblock << 1;
- add_request (blk_dev+MAJOR (tmp_dev->dev), req);
-
- return REDIRECTED_REQ;
+ printk ("linear_map : hash->dev1==NULL for block %ld\n", block);
+ return (-1);
}
-
- queue=tmp_dev - devices[minor];
-
- for (nblk=0, bh=bh2=req->bh;
- bh && rblock + nblk + (bh->b_size >> 10) <= tmp_dev->size;
- nblk+=bh->b_size >> 10, bh2=bh, bh=bh->b_reqnext)
- {
- if (!buffer_locked(bh))
- printk("md%d: block %ld not locked\n", minor, bh->b_blocknr);
-
- bh->b_rdev=tmp_dev->dev;
- }
-
- pending[queue].rq_dev=tmp_dev->dev;
- pending[queue].cmd=req->cmd;
- pending[queue].sector=rblock << 1;
- pending[queue].nr_sectors=nblk << 1;
- pending[queue].current_nr_sectors=req->bh->b_size >> 9;
- pending[queue].bh=req->bh;
- pending[queue].bhtail=bh2;
- bh2->b_reqnext=NULL;
- req->bh=bh;
- req->sector+=nblk << 1;
- req->nr_sectors-=nblk << 1;
+ tmp_dev=hash->dev1;
}
+ else
+ tmp_dev=hash->dev0;
+
+ if (block >= (tmp_dev->size + tmp_dev->offset) || block < tmp_dev->offset)
+ printk ("Block %ld out of bounds on dev %04x size %d offset %d\n", block, tmp_dev->dev, tmp_dev->size, tmp_dev->offset);
+
+ *rdev=tmp_dev->dev;
+ *rsector=(block-(tmp_dev->offset)) << 1;
- req->rq_status=RQ_INACTIVE;
- wake_up (&wait_for_request);
- make_md_request (pending, mddev->nb_dev);
- return REDIRECTED_REQ;
+ return (0);
}
-
static int linear_status (char *page, int minor, struct md_dev *mddev)
{
int sz=0;
* which is important for drive_stat_acct() above.
*/
-struct semaphore request_lock = MUTEX;
-
void add_request(struct blk_dev_struct * dev, struct request * req)
{
struct request * tmp;
short disk_index;
- down (&request_lock);
switch (MAJOR(req->rq_dev)) {
case SCSI_DISK_MAJOR:
disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
req->next = NULL;
cli();
- if (req->bh && req->bh->b_dev==req->bh->b_rdev)
+ if (req->bh)
mark_buffer_clean(req->bh);
if (!(tmp = dev->current_request)) {
dev->current_request = req;
- up (&request_lock);
(dev->request_fn)();
sti();
return;
req->next = tmp->next;
tmp->next = req;
- up (&request_lock);
/* for SCSI devices, call request_fn unconditionally */
- if (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR)
+ if (scsi_major(MAJOR(req->rq_dev)))
(dev->request_fn)();
sti();
int rw_ahead, max_req;
count = bh->b_size >> 9;
- sector = bh->b_blocknr * count;
+ sector = bh->b_rsector;
if (blk_size[major])
- if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
+ if (blk_size[major][MINOR(bh->b_rdev)] < (sector + count)>>1) {
bh->b_state = 0;
printk("attempt to access beyond end of device\n");
- printk("%s: rw=%d, want=%d, limit=%d\n", kdevname(bh->b_dev),
- rw, (sector + count)>>1, blk_size[major][MINOR(bh->b_dev)]);
+ printk("%s: rw=%d, want=%d, limit=%d\n", kdevname(bh->b_rdev),
+ rw, (sector + count)>>1, blk_size[major][MINOR(bh->b_rdev)]);
return;
}
/* Uhhuh.. Nasty dead-lock possible here.. */
return;
}
kstat.pgpgin++;
- max_req = (major == MD_MAJOR) ? NR_REQUEST/2 : NR_REQUEST; /* reads take precedence */
+ max_req = NR_REQUEST; /* reads take precedence */
break;
case WRITEA:
rw_ahead = 1;
* requests are only for reads.
*/
kstat.pgpgout++;
- max_req = (major == MD_MAJOR) ? NR_REQUEST/3 : (NR_REQUEST * 2) / 3;
+ max_req = (NR_REQUEST * 2) / 3;
break;
default:
printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
}
/* look for a free request. */
- down (&request_lock);
/*
* Try to coalesce the new request with old requests
case SCSI_DISK_MAJOR:
case SCSI_CDROM_MAJOR:
- case MD_MAJOR:
do {
if (req->sem)
continue;
if (req->nr_sectors >= 244)
continue;
- if (req->rq_dev != bh->b_dev)
+ if (req->rq_dev != bh->b_rdev)
continue;
/* Can we add it to the end of this request? */
if (req->sector + req->nr_sectors == sector) {
req->nr_sectors += count;
mark_buffer_clean(bh);
- up (&request_lock);
sti();
return;
} while ((req = req->next) != NULL);
}
- up (&request_lock);
-
/* find an unused request. */
- req = get_request(max_req, bh->b_dev);
+ req = get_request(max_req, bh->b_rdev);
sti();
/* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
unlock_buffer(bh);
return;
}
- req = __get_request_wait(max_req, bh->b_dev);
+ req = __get_request_wait(max_req, bh->b_rdev);
}
/* fill up the request-info, and add it to the queue */
add_request(major+blk_dev,req);
}
-#ifdef CONFIG_BLK_DEV_MD
-
-struct request *get_md_request (int max_req, kdev_t dev)
-{
- return (get_request_wait (max_req, dev));
-}
-
-#endif
-
/* This function can be used to request a number of buffers from a block
device. Currently the only restriction is that all buffers must belong to
the same device */
correct_size, bh[i]->b_size);
goto sorry;
}
+
+ /* Md remaps blocks now */
+ bh[i]->b_rdev = bh[i]->b_dev;
+ bh[i]->b_rsector=bh[i]->b_blocknr*(bh[i]->b_size >> 9);
+#ifdef CONFIG_BLK_DEV_MD
+ if (major==MD_MAJOR &&
+ md_map (MINOR(bh[i]->b_dev), &bh[i]->b_rdev,
+ &bh[i]->b_rsector, bh[i]->b_size >> 9))
+ goto sorry;
+#endif
}
if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
if (bh[i]) {
set_bit(BH_Req, &bh[i]->b_state);
- /* Md needs this for error recovery */
- bh[i]->b_rdev = bh[i]->b_dev;
-
- make_request(major, rw, bh[i]);
+ make_request(MAJOR(bh[i]->b_rdev), rw, bh[i]);
}
}
return;
{
int i, j;
int buffersize;
+ unsigned long rsector;
+ kdev_t rdev;
struct request * req[8];
unsigned int major = MAJOR(dev);
struct semaphore sem = MUTEX_LOCKED;
{
for (; j < 8 && i < nb; j++, i++, buf += buffersize)
{
+ rdev = dev;
+ rsector = (b[i] * buffersize) >> 9;
+#ifdef CONFIG_BLK_DEV_MD
+ if (major==MD_MAJOR &&
+ md_map (MINOR(dev), &rdev,
+ &rsector, buffersize >> 9)) {
+ printk ("Bad md_map in ll_rw_page_size\n");
+ return;
+ }
+#endif
+
if (j == 0) {
- req[j] = get_request_wait(NR_REQUEST, dev);
+ req[j] = get_request_wait(NR_REQUEST, rdev);
} else {
cli();
- req[j] = get_request(NR_REQUEST, dev);
+ req[j] = get_request(NR_REQUEST, rdev);
sti();
if (req[j] == NULL)
break;
}
req[j]->cmd = rw;
req[j]->errors = 0;
- req[j]->sector = (b[i] * buffersize) >> 9;
+ req[j]->sector = rsector;
req[j]->nr_sectors = buffersize >> 9;
req[j]->current_nr_sectors = buffersize >> 9;
req[j]->buffer = buf;
req[j]->sem = &sem;
req[j]->bh = NULL;
req[j]->next = NULL;
- add_request(major+blk_dev,req[j]);
+ add_request(MAJOR(rdev)+blk_dev,req[j]);
}
run_task_queue(&tq_disk);
while (j > 0) {
#include <linux/blk.h>
-#ifdef CONFIG_MD_SUPPORT_RAID5
-int support_for_raid5; /* So raid-5 module won't be inserted if support
- was not set in the kernel */
-#endif
-
-#ifdef CONFIG_MD_SUPPORT_RAID1
-int support_for_raid1; /* So raid-1 module won't be inserted if support
- was not set in the kernel */
-#endif
-
static struct hd_struct md_hd_struct[MAX_MD_DEV];
static int md_blocksizes[MAX_MD_DEV];
block_fsync
};
-
-static inline int remap_request (int minor, struct request *req)
+int md_map (int minor, kdev_t *rdev, unsigned long *rsector, unsigned long size)
{
+ if ((unsigned int) minor >= MAX_MD_DEV)
+ {
+ printk ("Bad md device %d\n", minor);
+ return (-1);
+ }
+
if (!md_dev[minor].pers)
{
printk ("Oops ! md%d not running, giving up !\n", minor);
- return -1;
+ return (-1);
}
- return (md_dev[minor].pers->map(minor, md_dev+minor, req));
+ return (md_dev[minor].pers->map(md_dev+minor, rdev, rsector, size));
}
+
static void do_md_request (void)
{
- int minor;
- long flags;
- struct request *req;
-
- while (1)
- {
-#ifdef MD_COUNT_SIZE
- int reqsize, chunksize;
-#endif
-
- save_flags (flags);
- cli ();
- req = blk_dev[MD_MAJOR].current_request;
- if (!req || (req->rq_status == RQ_INACTIVE))
- {
- restore_flags (flags);
- return;
- }
-
-#ifdef MD_COUNT_SIZE
- reqsize=req->nr_sectors>>1;
- chunksize=1 << FACTOR_SHIFT(FACTOR(md_dev+MINOR(req->rq_dev)));
- if (reqsize==chunksize) (md_dev+MINOR(req->rq_dev))->equal_count++;
- if (reqsize<chunksize) (md_dev+MINOR(req->rq_dev))->smallest_count++;
- if (reqsize>chunksize) (md_dev+MINOR(req->rq_dev))->biggest_count++;
-#endif
-
- blk_dev[MD_MAJOR].current_request = req->next;
- restore_flags (flags);
-
- minor = MINOR(req->rq_dev);
- if ((MAJOR(req->rq_dev) != MD_MAJOR) || (minor >= MAX_REAL))
- {
- printk("md: bad device: %s\n", kdevname(req->rq_dev));
- end_request(0, req);
- continue;
- }
-
- switch (remap_request (minor, req))
- {
- case REDIRECTED_BHREQ: /* All right, redirection was successful */
- req->rq_status=RQ_INACTIVE;
- wake_up (&wait_for_request);
- break;
-
- case REDIRECTED_REQ:
- break; /* Redirected whole request (for swapping) */
-
- case REDIRECT_FAILED: /* Swap redirection failed in RAID-[15] */
- end_request (0, req);
- break;
-
- default:
- printk ("remap_request returned strange value !\n");
- }
- }
-}
-
-extern struct semaphore request_lock;
-
-void make_md_request (struct request *pending, int n)
-{
- int i, j, max_req, major=0, rw, found;
- kdev_t dev;
- struct buffer_head *bh;
- struct request *req;
- long flags;
-
- down (&request_lock);
- save_flags (flags);
- cli();
-
- for (i=0; i<n; i++)
- {
- if (!pending[i].bh)
- continue;
-
- found=0;
- rw=pending[i].cmd;
- bh=pending[i].bh;
- major=MAJOR(dev=pending[i].rq_dev);
-
- max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
-
- if (( major == IDE0_MAJOR /* same as HD_MAJOR */
- || major == IDE1_MAJOR
- || major == SCSI_DISK_MAJOR
- || major == IDE2_MAJOR
- || major == IDE3_MAJOR)
- && (req = blk_dev[major].current_request))
- {
- /*
- * Thanx to Gadi Oxman <gadio@netvision.net.il>
- * (He reads my own code better than I do... ;-)
- */
- if (major != SCSI_DISK_MAJOR)
- req = req->next;
-
- while (req && !found)
- {
- if (req->rq_status!=RQ_ACTIVE && &blk_dev[major].plug!=req)
- printk ("Saw bad status request !\n");
-
- if (req->rq_dev == dev &&
- !req->sem &&
- req->cmd == rw &&
- req->sector + req->nr_sectors == pending[i].sector &&
- (req->nr_sectors + pending[i].nr_sectors) < 245)
- {
- req->bhtail->b_reqnext = bh;
- req->bhtail = pending[i].bhtail;
- req->nr_sectors += pending[i].nr_sectors;
- found=1;
- continue;
- }
-
- if (!found &&
- req->rq_dev == dev &&
- !req->sem &&
- req->cmd == rw &&
- req->sector - pending[i].nr_sectors == pending[i].sector &&
- (req->nr_sectors + pending[i].nr_sectors) < 245)
- {
- req->nr_sectors += pending[i].nr_sectors;
- pending[i].bhtail->b_reqnext = req->bh;
- req->buffer = bh->b_data;
- req->current_nr_sectors = bh->b_size >> 9;
- req->sector = pending[i].sector;
- req->bh = bh;
- found=1;
- continue;
- }
-
- req = req->next;
- }
- }
-
- if (found)
- continue;
-
- up (&request_lock);
- sti ();
- req=get_md_request (max_req, dev);
-
- /* Build it up... */
- req->cmd = rw;
- req->errors = 0;
-#if defined (CONFIG_MD_SUPPORT_RAID1)
- req->shared_count = 0;
-#endif
- req->sector = pending[i].sector;
- req->nr_sectors = pending[i].nr_sectors;
- req->current_nr_sectors = bh->b_size >> 9;
- req->buffer = bh->b_data;
- req->sem = NULL;
- req->bh = bh;
- req->bhtail = pending[i].bhtail;
- req->next = NULL;
-
- add_request (blk_dev + MAJOR(dev), req);
- down (&request_lock);
- cli ();
- }
-
- up (&request_lock);
- restore_flags (flags);
- for (j=0; j<n; j++)
- {
- if (!pending[j].bh)
- continue;
-
- pending[j].bh=NULL;
- }
-}
-
+ printk ("Got md request, not good...");
+ return;
+}
static struct symbol_table md_symbol_table=
{
X(devices),
X(md_size),
- X(add_request),
- X(make_md_request),
-
-#ifdef CONFIG_MD_SUPPORT_RAID1
- X(support_for_raid1),
-#endif
-
-#ifdef CONFIG_MD_SUPPORT_RAID5
- X(support_for_raid5),
-#endif
-
X(register_md_personality),
X(unregister_md_personality),
X(partition_name),
if (md_dev[i].pers != pers[(LINEAR>>PERSONALITY_SHIFT)])
{
sz+=sprintf (page+sz, " %dk chunks", 1<<FACTOR_SHIFT(FACTOR(md_dev+i)));
-#ifdef MD_COUNT_SIZES
- sz+=sprintf (page+sz, " (%d/%d/%d)",
- md_dev[i].smallest_count,
- md_dev[i].equal_count,
- md_dev[i].biggest_count);
-#endif
}
sz+=sprintf (page+sz, "\n");
sz+=md_dev[i].pers->status (page+sz, i, md_dev+i);
return (sz);
}
-#if defined(CONFIG_MD_SUPPORT_RAID1) || defined(CONFIG_MD_SUPPORT_RAID5)
-
-int md_valid_device (int minor, kdev_t dev, int mode)
-{
- int i;
-
- for (i=0; i<md_dev[minor].nb_dev; i++)
- if (devices[minor][i].dev==dev)
- break;
-
- if (i>md_dev[minor].nb_dev)
- {
- printk ("Oops, dev %04x not found in md_valid_device\n", dev);
- return -EINVAL;
- }
-
- switch (mode)
- {
- case VALID:
- /* Don't consider INVALID_NEXT as a real invalidation.
- Maybe that's not the good way to treat such a thing,
- we'll see. */
- if (devices[minor][i].invalid==INVALID_ALWAYS)
- {
- devices[minor][i].fault_count=0; /* reset fault count */
- if (md_dev[minor].invalid_dev_count)
- md_dev[minor].invalid_dev_count--;
- }
- break;
-
- case INVALID:
- if (devices[minor][i].invalid != VALID )
- return 0; /* Don't invalidate twice */
-
- if (++devices[minor][i].fault_count > MAX_FAULT(md_dev+minor) &&
- MAX_FAULT(md_dev+minor)!=0xFF)
- {
- /* We cannot tolerate this fault.
- So sing a song, and say GoodBye to this device... */
-
- mode=INVALID_ALWAYS;
- md_dev[minor].invalid_dev_count++;
- }
- else
- /* FIXME :
- If we reached the max_invalid_dev count, doing one
- more invalidation will kill the md_dev. So we choose
- not to invalid the physical dev in such a case. But
- next access will probably fail... */
- if (md_dev[minor].invalid_dev_count<=md_dev[minor].pers->max_invalid_dev)
- mode=INVALID_NEXT;
- else
- mode=VALID;
- break;
-
- case INVALID_ALWAYS: /* Only used via MD_INVALID ioctl */
- md_dev[minor].invalid_dev_count++;
- }
-
- devices[minor][i].invalid=mode;
- return 0;
-}
-
-
-int md_can_reemit (int minor)
-{
- /* FIXME :
- If the device is raid-1 (md_dev[minor].pers->max_invalid_dev=-1),
- always pretend that we can reemit the request.
- Problem : if the 2 devices in the pair are dead, will loop
- forever. Maybe having a per-personality can_reemit function would
- help. */
-
- if (!md_dev[minor].pers)
- return (0);
-
- return(md_dev[minor].pers->max_invalid_dev &&
- ((md_dev[minor].pers->max_invalid_dev==-1) ?
- 1 :
- md_dev[minor].invalid_dev_count<=md_dev[minor].pers->max_invalid_dev));
-}
-
-#endif
-
int register_md_personality (int p_num, struct md_personality *p)
{
int i=(p_num >> PERSONALITY_SHIFT);
#ifdef CONFIG_MD_STRIPED
raid0_init ();
#endif
-#ifdef CONFIG_MD_RAID1
- raid1_init ();
-#endif
-#ifdef CONFIG_MD_RAID5
- raid5_init ();
-#endif
return (0);
}
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/md.h>
#include <linux/raid0.h>
#define MD_DRIVER
#define MD_PERSONALITY
-#include <linux/blk.h>
-
static void create_strip_zones (int minor, struct md_dev *mddev)
{
int i, j, c=0;
* Of course, those facts may not be valid anymore (and surely won't...)
* Hey guys, there's some work out there ;-)
*/
-static int raid0_map (int minor, struct md_dev *mddev, struct request *req)
+static int raid0_map (struct md_dev *mddev, kdev_t *rdev,
+ unsigned long *rsector, unsigned long size)
{
struct raid0_data *data=(struct raid0_data *) mddev->private;
static struct raid0_hash *hash;
struct strip_zone *zone;
struct real_dev *tmp_dev;
- int i, queue, blk_in_chunk, factor, chunk;
+ int blk_in_chunk, factor, chunk, chunk_size;
long block, rblock;
- struct buffer_head *bh;
- static struct request pending[MAX_REAL]={{0, }, };
factor=FACTOR(mddev);
+ chunk_size=(1UL << FACTOR_SHIFT(factor));
+ block=*rsector >> 1;
+ hash=data->hash_table+(block/data->smallest->size);
- while (req->bh || req->sem)
+ /* Sanity check */
+ if ((chunk_size*2)<(*rsector % (chunk_size*2))+size)
{
- block=req->sector >> 1;
- hash=data->hash_table+(block/data->smallest->size);
-
- if (block >= (hash->zone0->size +
- hash->zone0->zone_offset))
- {
- if (!hash->zone1)
- printk ("raid0_map : hash->zone1==NULL for block %ld\n", block);
- zone=hash->zone1;
- }
- else
- zone=hash->zone0;
-
- blk_in_chunk=block & ((1UL << FACTOR_SHIFT(factor)) - 1);
- chunk=(block - zone->zone_offset) / (zone->nb_dev<<FACTOR_SHIFT(factor));
- tmp_dev=zone->dev[(block >> FACTOR_SHIFT(factor)) % zone->nb_dev];
- rblock=(chunk << FACTOR_SHIFT(factor)) + blk_in_chunk + zone->dev_offset;
-
- if (req->sem) /* This is a paging request */
+ printk ("raid0_convert : can't convert block across chunks or bigger than %dk %ld %ld\n", chunk_size, *rsector, size);
+ return (-1);
+ }
+
+ if (block >= (hash->zone0->size +
+ hash->zone0->zone_offset))
+ {
+ if (!hash->zone1)
{
- req->rq_dev=tmp_dev->dev;
- req->sector=rblock << 1;
- add_request (blk_dev+MAJOR (tmp_dev->dev), req);
-
- return REDIRECTED_REQ;
+ printk ("raid0_convert : hash->zone1==NULL for block %ld\n", block);
+ return (-1);
}
-
- queue=tmp_dev - devices[minor];
- /* This is a buffer request */
- for (i=blk_in_chunk;
- i<(1UL << FACTOR_SHIFT(factor)) && req->bh;
- i+=bh->b_size >> 10)
- {
- bh=req->bh;
- if (!buffer_locked(bh))
- printk("md%d: block %ld not locked\n", minor, bh->b_blocknr);
-
- bh->b_rdev=tmp_dev->dev;
-#if defined (CONFIG_MD_SUPPORT_RAID1)
- bh->b_reqshared=NULL;
- bh->b_sister_req=NULL;
-#endif
-
- if (!pending[queue].bh)
- {
- pending[queue].rq_dev=tmp_dev->dev;
- pending[queue].bhtail=pending[queue].bh=bh;
- pending[queue].sector=rblock << 1;
- pending[queue].cmd=req->cmd;
- pending[queue].current_nr_sectors=
- pending[queue].nr_sectors=bh->b_size >> 9;
- }
- else
- {
- pending[queue].bhtail->b_reqnext=bh;
- pending[queue].bhtail=bh;
- pending[queue].nr_sectors+=bh->b_size >> 9;
- }
-
- end_redirect (req); /* Separate bh from the request */
- }
+ zone=hash->zone1;
}
+ else
+ zone=hash->zone0;
+
+ blk_in_chunk=block & (chunk_size -1);
+ chunk=(block - zone->zone_offset) / (zone->nb_dev<<FACTOR_SHIFT(factor));
+ tmp_dev=zone->dev[(block >> FACTOR_SHIFT(factor)) % zone->nb_dev];
+ rblock=(chunk << FACTOR_SHIFT(factor)) + blk_in_chunk + zone->dev_offset;
- req->rq_status=RQ_INACTIVE;
- wake_up (&wait_for_request);
- make_md_request (pending, mddev->nb_dev);
- return REDIRECTED_REQ; /* Since we already set the request free */
-}
+ *rdev=tmp_dev->dev;
+ *rsector=rblock<<1;
+ return (0);
+}
+
static int raid0_status (char *page, int minor, struct md_dev *mddev)
{
int sz=0;
-NOTE: Earlier versions of the driver mapped ttyS0 to minor
-number 32, but this is changed in this distribution. Port ttyS0
+NOTE: Earlier versions of the driver mapped ttyC0 to minor
+number 32, but this is changed in this distribution. Port ttyC0
now maps to minor number 0.) The following patch should be
applied to /dev/MAKEDEV and the script should then be re-run
to create new entries for the ports.
--- /dev/null
+/* suncons.c: Sun SparcStation console support.
+ *
+ * Copyright (C) 1995 Peter Zaitcev (zaitcev@lab.ipmce.su)
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ *
+ * Added font loading Nov/21, Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Added render_screen and faster scrolling Nov/27, miguel
+ * Added console palette code for cg6 Dec/13/95, miguel
+ * Added generic frame buffer support Dec/14/95, miguel
+ * Added cgsix and bwtwo drivers Jan/96, miguel
+ * Added 4m, and cg3 driver Feb/96, miguel
+ * Fixed the cursor on color displays Feb/96, miguel.
+ *
+ * Cleaned up the detection code, generic 8bit depth display
+ * code, Mar/96 miguel
+ *
+ * This file contains the frame buffer device drivers.
+ * Each driver is kept together in case we would like to
+ * split this file.
+ *
+ * Much of this driver is derived from the DEC TGA driver by
+ * Jay Estabrook who has done a nice job with the console
+ * driver abstraction btw.
+ *
+ * We try to make everything a power of two if possible to
+ * speed up the bit blit. Doing multiplies, divides, and
+ * remainer routines end up calling software library routines
+ * since not all Sparcs have the hardware to do it.
+ *
+ * TODO:
+ * do not use minor to index into instances of the frame buffer,
+ * since the numbers assigned to us are not consecutive.
+ *
+ * do not blank the screen when frame buffer is mapped.
+ *
+ * Change the detection loop to use more than one video card.
+ */
+
+
+/* Define thie one if you are debugging something in X, it will not disable the console output */
+/* #define DEBUGGING_X */
+/* See also: sparc/keyboard.c: CODING_NEW_DRIVER */
+
+#define GRAPHDEV_MAJOR 29
+
+#define FRAME_BUFFERS 1
+
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kd.h>
+#include <linux/malloc.h>
+#include <linux/major.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/bitops.h>
+#include <asm/oplib.h>
+#include <asm/sbus.h>
+#include <asm/fbio.h>
+#include <asm/io.h>
+#include <asm/pgtsun4c.h> /* for the sun4c_nocache */
+
+#include "kbd_kern.h"
+#include "vt_kern.h"
+#include "consolemap.h"
+#include "selection.h"
+#include "console_struct.h"
+
+#define cmapsz 8192
+
+extern void register_console(void (*proc)(const char *));
+extern void console_print(const char *);
+extern unsigned char vga_font[];
+extern int graphics_on;
+extern int serial_console;
+
+/* Based upon what the PROM tells us, we can figure out where
+ * the console is currently located. The situation can be either
+ * of the following two scenerios:
+ *
+ * 1) Console i/o is done over the serial line, ttya or ttyb
+ * 2) Console output on frame buffer (video card) and input
+ * coming from the keyboard/mouse which each use a zilog8530
+ * serial channel a piece.
+ */
+
+/* The following variables describe a Sparc console. */
+
+/* From the PROM */
+static char con_name[40];
+
+/* Screen dimensions and color depth. */
+static int con_depth, con_width, con_height, con_type;
+
+static int con_linebytes;
+
+/* Base address of first line. */
+static unsigned char *con_fb_base;
+
+/* Screen parameters: we compute those at startup to make the code faster */
+static int chars_per_line; /* number of bytes per line */
+static int ints_per_line; /* number of ints per line */
+static int skip_bytes; /* number of bytes we skip for the y margin */
+static int x_margin, y_margin; /* the x and y margins */
+static int bytes_per_row; /* bytes used by one screen line (of 16 scan lines) */
+
+/* Functions used by the SPARC dependant console code
+ * to perform the restore_palette function.
+ */
+static void (*restore_palette)(void);
+void set_palette (void);
+
+
+ /* Our screen looks like at 1152 X 900:
+ *
+ * 0,0
+ * ------------------------------------------------------------------
+ * | ^^^^^^^^^^^ |
+ * | 18 y-pixels |
+ * | ^^^^^^^^^^^ |
+ * 13 | <-64 pixels->| <-- 128 8x16 characters --> | <-64 pixels-> |
+ * ....
+ * 54 chars from top to bottom
+ * ....
+ * 888 | <-64 pixels->| <-- 128 8x16 characters --> | <-64 pixels-> |
+ * | ^^^^^^^^^^^ |
+ * | 18 y-pixels |
+ * | ^^^^^^^^^^^ |
+ * ------------------------------------------------------------------
+ */
+/* First for MONO displays. */
+#define SCREEN_WIDTH 1152 /* Screen width in pixels */
+#define SCREEN_HEIGHT 900 /* Screen height in pixels */
+#define CHARS_PER_LINE 144 /* Make this imperical for speed */
+#define NICE_Y_MARGIN 18 /* We skip 18 y-pixels at top/bottom */
+#define NICE_X_MARGIN 8 /* We skip 64 x-pixels at left/right */
+#define FBUF_TOP_SKIP 2592 /* Imperical, (CHARS_PER_LINE * NICE_Y_MARGIN) */
+#define CHAR_HEIGHT 16
+#define ONE_ROW 2304 /* CHARS_PER_LINE * CHAR_HEIGHT */
+
+/* Now we have this, to compute the base frame buffer position
+ * for a new character to be rendered. 1 and 8 bit depth.
+ */
+#define FBUF_OFFSET(cindex) \
+ (((FBUF_TOP_SKIP) + (((cindex)>>7) * ONE_ROW)) + \
+ ((NICE_X_MARGIN) + (((cindex)&127))))
+
+
+#define COLOR_FBUF_OFFSET(cindex) \
+ (((skip_bytes) + (((cindex)>>7) * bytes_per_row)) + \
+ ((x_margin) + (((cindex)&127) << 3)))
+
+void
+__set_origin(unsigned short offset)
+{
+ /*
+ * should not be called, but if so, do nothing...
+ */
+}
+
+/* For the cursor, we just invert the 8x16 block at the cursor
+ * location. Easy enough...
+ *
+ * Hide the cursor from view, during blanking, usually...
+ */
+static int cursor_pos = -1;
+void
+hide_cursor(void)
+{
+ unsigned long flags;
+ int j;
+
+ save_flags(flags); cli();
+
+ if(cursor_pos == -1) {
+ restore_flags (flags);
+ return;
+ }
+ /* We just zero out the area for now. Certain graphics
+ * cards like the cg6 have a hardware cursor that we could
+ * use, but this is an optimization for some time later.
+ */
+ switch (con_depth){
+ case 1: {
+ unsigned char *dst;
+ dst = (unsigned char *)((unsigned long)con_fb_base +
+ FBUF_OFFSET(cursor_pos));
+ for(j = 0; j < CHAR_HEIGHT; j++, dst += CHARS_PER_LINE)
+ *dst = ~(0);
+ break;
+ }
+ case 8: {
+ unsigned long *dst;
+ const int ipl = ints_per_line;
+
+ dst = (unsigned long *)((unsigned long)con_fb_base + COLOR_FBUF_OFFSET(cursor_pos));
+ for(j = 0; j < CHAR_HEIGHT; j++, dst += ipl) {
+ *dst = ~(0UL);
+ *(dst + 1) = ~(0UL);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ restore_flags(flags);
+}
+
+/* The idea is the following:
+ * we only use the colors in the range 0..15, and we only
+ * setup the palette on that range, so we better keep the
+ * pixel inverion using those colors, that's why we have
+ * those constants below.
+ */
+inline static void
+cursor_reverse (long *dst, int height, const int ints_on_line)
+{
+ int j;
+
+ for (j = 0; j < height; j++){
+ *dst = ~(*dst) & 0x0f0f0f0f;
+ *(dst+1) = ~(*(dst+1)) & 0x0f0f0f0f;
+ dst += ints_on_line;
+ }
+}
+
+void
+set_cursor(int currcons)
+{
+ int j, idx, oldpos;
+ unsigned long flags;
+
+ if (currcons != fg_console || console_blanked || vcmode == KD_GRAPHICS)
+ return;
+
+ if (__real_origin != __origin)
+ __set_origin(__real_origin);
+
+ save_flags(flags); cli();
+
+ idx = (pos - video_mem_base) >> 1;
+ oldpos = cursor_pos;
+ cursor_pos = idx;
+ if (!deccm) {
+ hide_cursor ();
+ restore_flags (flags);
+ return;
+ }
+ switch (con_depth){
+ case 1: {
+ unsigned char *dst, *opos;
+
+ dst = (unsigned char *)((unsigned long)con_fb_base + FBUF_OFFSET(idx));
+ opos = (unsigned char *)((unsigned long)con_fb_base + FBUF_OFFSET(oldpos));
+ if(oldpos != -1) {
+ /* Restore what was at the old position */
+ for(j=0; j < CHAR_HEIGHT; j++, opos += CHARS_PER_LINE) {
+ *opos = ~*opos;
+ }
+ }
+ for(j=0; j < 16; j++, dst+=CHARS_PER_LINE) {
+ *dst = ~*dst;
+ }
+ break;
+ }
+ case 8: {
+ unsigned long *dst, *opos;
+ dst = (unsigned long *)((unsigned long)con_fb_base + COLOR_FBUF_OFFSET(idx));
+ opos = (unsigned long *)((unsigned long)con_fb_base + COLOR_FBUF_OFFSET(oldpos));
+
+ if(oldpos != -1)
+ cursor_reverse(opos, CHAR_HEIGHT, ints_per_line);
+ cursor_reverse (dst, CHAR_HEIGHT, ints_per_line);
+ break;
+ }
+ default:
+ }
+ restore_flags(flags);
+}
+
+/*
+ * Render the current screen
+ * Only used at startup to avoid the caching that is being done in selection.h
+ */
+static void
+render_screen(void)
+{
+ int count;
+ unsigned short *contents;
+
+ count = video_num_columns * video_num_lines;
+ contents = (unsigned short *) video_mem_base;
+
+ for (;count--; contents++)
+ sun_blitc (*contents, (unsigned long) contents);
+}
+
+unsigned long
+con_type_init(unsigned long kmem_start, const char **display_desc)
+{
+ can_do_color = (con_type != FBTYPE_SUN2BW);
+
+ video_type = VIDEO_TYPE_SUN;
+ *display_desc = "SUN";
+
+ if (!serial_console) {
+ /* If we fall back to PROM than our output have to remain readable. */
+ prom_putchar('\033'); prom_putchar('['); prom_putchar('H');
+
+ /*
+ * fake the screen memory with some CPU memory
+ */
+ video_mem_base = kmem_start;
+ kmem_start += video_screen_size;
+ video_mem_term = kmem_start;
+
+ render_screen();
+ }
+ return kmem_start;
+}
+
+/*
+ * NOTE: get_scrmem() and set_scrmem() are here only because
+ * the VGA version of set_scrmem() has some direct VGA references.
+ */
+void
+get_scrmem(int currcons)
+{
+ memcpyw((unsigned short *)vc_scrbuf[currcons],
+ (unsigned short *)origin, video_screen_size);
+ origin = video_mem_start = (unsigned long)vc_scrbuf[currcons];
+ scr_end = video_mem_end = video_mem_start + video_screen_size;
+ pos = origin + y*video_size_row + (x<<1);
+}
+
+void
+set_scrmem(int currcons, long offset)
+{
+ if (video_mem_term - video_mem_base < offset + video_screen_size)
+ offset = 0;
+ memcpyw((unsigned short *)(video_mem_base + offset),
+ (unsigned short *) origin, video_screen_size);
+ video_mem_start = video_mem_base;
+ video_mem_end = video_mem_term;
+ origin = video_mem_base + offset;
+ scr_end = origin + video_screen_size;
+ pos = origin + y*video_size_row + (x<<1);
+}
+
+/*
+ * PIO_FONT support.
+ */
+int
+set_get_font(char * arg, int set, int ch512)
+{
+ int error, i, line;
+
+ if (!arg)
+ return -EINVAL;
+ error = verify_area (set ? VERIFY_READ : VERIFY_WRITE, (void *) arg,
+ ch512 ? 2* cmapsz : cmapsz);
+ if (error)
+ return error;
+
+ /* download the current font */
+ if (!set){
+ memset (arg, 0, cmapsz);
+ for (i = 0; i < 256; i++)
+ for (line = 0; line < CHAR_HEIGHT; line++)
+ put_user (vga_font [i], arg+(i*32+line));
+ return 0;
+ }
+
+ /* set the font */
+ for (i = 0; i < 256; i++)
+ for (line = 0; line < CHAR_HEIGHT; line++){
+ vga_font [i*CHAR_HEIGHT + line] = (get_user (arg + (i * 32 + line)));
+ if (con_depth == 1)
+ vga_font [i*CHAR_HEIGHT + line] = vga_font [i*CHAR_HEIGHT + line];
+ }
+ return 0;
+}
+
+/*
+ * Adjust the screen to fit a font of a certain height
+ *
+ * Returns < 0 for error, 0 if nothing changed, and the number
+ * of lines on the adjusted console if changed.
+ *
+ * for now, we only support the built-in font...
+ */
+int
+con_adjust_height(unsigned long fontheight)
+{
+ return -EINVAL;
+}
+
+int
+set_get_cmap(unsigned char * arg, int set)
+{
+ int i;
+
+ i = verify_area(set ? VERIFY_READ : VERIFY_WRITE, (void *)arg, 16*3);
+ if (i)
+ return i;
+
+ for (i=0; i<16; i++) {
+ if (set) {
+ default_red[i] = get_user(arg++) ;
+ default_grn[i] = get_user(arg++) ;
+ default_blu[i] = get_user(arg++) ;
+ } else {
+ put_user (default_red[i], arg++) ;
+ put_user (default_grn[i], arg++) ;
+ put_user (default_blu[i], arg++) ;
+ }
+ }
+ if (set) {
+ for (i=0; i<MAX_NR_CONSOLES; i++)
+ if (vc_cons_allocated(i)) {
+ int j, k ;
+ for (j=k=0; j<16; j++) {
+ vc_cons[i].d->vc_palette[k++] = default_red[j];
+ vc_cons[i].d->vc_palette[k++] = default_grn[j];
+ vc_cons[i].d->vc_palette[k++] = default_blu[j];
+ }
+ }
+ set_palette();
+ }
+
+ return 0;
+}
+
+
+void
+sun_clear_screen(void)
+{
+ memset (con_fb_base, (con_depth == 1 ? ~(0) : (0)),
+ (con_depth * con_height * con_width) / 8);
+ /* also clear out the "shadow" screen memory */
+ memset((char *)video_mem_base, 0, (video_mem_term - video_mem_base));
+}
+
+/*
+ * dummy routines for the VESA blanking code, which is VGA only,
+ * so we don't have to carry that stuff around for the Sparc...
+ */
+void vesa_blank(void)
+{
+}
+void vesa_unblank(void)
+{
+}
+void set_vesa_blanking(const unsigned long arg)
+{
+}
+
+void vesa_powerdown(void)
+{
+}
+
+#undef color
+/* cg6 cursor status, kernel tracked copy */
+struct cg6_cursor {
+ short enable; /* cursor is enabled */
+ struct fbcurpos cpos; /* position */
+ struct fbcurpos chot; /* hot-spot */
+ struct fbcurpos size; /* size of mask & image fields */
+ int bits[2][32]; /* space for mask & image bits */
+ char color [6]; /* cursor colors */
+};
+
+struct cg6_info {
+ struct bt_regs *bt; /* color control */
+ void *fbc;
+ struct cg6_fhc *fhc;
+ struct cg6_tec *tec;
+ struct cg6_thc *thc;
+ struct cg6_cursor cursor; /* cursor control */
+ void *dhc;
+};
+
+struct bwtwo_info {
+ struct bwtwo_regs *regs;
+};
+
+struct cg3_info {
+ struct bt_regs *bt; /* brooktree (color) registers */
+};
+
+/* Array holding the information for the frame buffers */
+typedef struct {
+ union {
+ struct bwtwo_info bwtwo;
+ struct cg3_info cg3;
+ struct cg6_info cg6;
+ } info; /* per frame information */
+ int space; /* I/O space this card resides in */
+ int blanked; /* true if video blanked */
+ int open; /* is this fb open? */
+ int mmaped; /* has this fb been mmapped? */
+ int vtconsole; /* virtual console where it is opened */
+ long base; /* frame buffer base */
+ struct fbtype type; /* frame buffer type */
+ int (*mmap)(struct inode *, struct file *, struct vm_area_struct *, long fb_base, void *);
+ void (*loadcmap)(void *this, int index, int count);
+ void (*blank)(void *this);
+ void (*unblank)(void *this);
+ int (*ioctl)(struct inode *, struct file *, unsigned int, unsigned long, void *);
+} fbinfo_t;
+
+static fbinfo_t fbinfo [FRAME_BUFFERS];
+
+/* We need to keep a copy of the color map to answer ioctl requests */
+static union {
+ unsigned char map[256][3]; /* reasonable way to access */
+ unsigned int raw[256*3/4]; /* hardware wants it like this */
+} color_map;
+
+#define FB_MMAP_VM_FLAGS (VM_SHM| VM_LOCKED)
+
+static int
+fb_open (struct inode * inode, struct file * file)
+{
+ int minor = MINOR (inode->i_rdev);
+
+ if (minor >= FRAME_BUFFERS)
+ return -EBADF;
+ if (fbinfo [minor].open)
+ return -EBUSY;
+ fbinfo [minor].open = 1;
+ fbinfo [minor].mmaped = 0;
+ return 0;
+}
+
+static int
+fb_ioctl (struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int minor = MINOR (inode->i_rdev);
+ fbinfo_t *fb;
+ struct fbcmap *cmap;
+ int i;
+
+ if (minor >= FRAME_BUFFERS)
+ return -EBADF;
+ fb = &fbinfo [minor];
+
+ switch (cmd){
+ case FBIOGTYPE: /* return frame buffer type */
+ i = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct fbtype));
+ if (i) return i;
+ *(struct fbtype *)arg = (fb->type);
+ break;
+ case FBIOGATTR:{
+ struct fbgattr *fba = (struct fbgattr *) arg;
+
+ i = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct fbgattr));
+ if (i) return i;
+ fba->real_type = fb->type.fb_type;
+ fba->owner = 0;
+ fba->fbtype = fb->type;
+ fba->sattr.flags = 0;
+ fba->sattr.emu_type = fb->type.fb_type;
+ fba->sattr.dev_specific [0] = -1;
+ fba->emu_types [0] = fb->type.fb_type;
+ fba->emu_types [1] = -1;
+ break;
+ }
+ case FBIOSVIDEO:
+ i = verify_area(VERIFY_READ, (void *)arg, sizeof(int));
+ if (i) return i;
+
+ if (*(int *)arg){
+ if (!fb->blanked || !fb->unblank)
+ break;
+ (*fb->unblank)(fb);
+ fb->blanked = 0;
+ } else {
+ if (fb->blanked || !fb->blank)
+ break;
+ (*fb->blank)(fb);
+ fb->blanked = 1;
+ }
+ break;
+ case FBIOGVIDEO:
+ i = verify_area (VERIFY_WRITE, (void *) arg, sizeof (int));
+ if (i) return i;
+ *(int *) arg = fb->blanked;
+ break;
+ case FBIOPUTCMAP: { /* load color map entries */
+ char *rp, *gp, *bp;
+ int end, count;;
+
+ if (!fb->loadcmap)
+ return -EINVAL;
+ i = verify_area (VERIFY_READ, (void *) arg, sizeof (struct fbcmap));
+ if (i) return i;
+ cmap = (struct fbcmap *) arg;
+ count = cmap->count;
+ if ((cmap->index < 0) || (cmap->index > 255))
+ return -EINVAL;
+ if (cmap->index + count > 256)
+ count = 256 - cmap->index;
+ i = verify_area (VERIFY_READ, rp = cmap->red, cmap->count);
+ if (i) return i;
+ i = verify_area (VERIFY_READ, gp = cmap->green, cmap->count);
+ if (i) return i;
+ i = verify_area (VERIFY_READ, bp = cmap->blue, cmap->count);
+ if (i) return i;
+
+ end = cmap->index + count;
+ for (i = cmap->index; i < end; i++){
+ color_map.map [i][0] = *rp++;
+ color_map.map [i][1] = *gp++;
+ color_map.map [i][2] = *bp++;
+ }
+ (*fb->loadcmap)(fb, cmap->index, count);
+ break;
+ }
+
+ default:
+ if (fb->ioctl){
+ i = fb->ioctl (inode, file, cmd, arg, fb);
+ if (i == -EINVAL)
+ printk ("[[FBIO: %8.8x]]\n", cmd);
+ return i;
+ }
+ printk ("[[FBIO: %8.8x]]\n", cmd);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+fb_close (struct inode * inode, struct file *filp)
+{
+ int minor = MINOR(inode->i_rdev);
+ struct fbcursor cursor;
+
+ if (minor >= FRAME_BUFFERS)
+ return;
+ if (fbinfo [minor].open)
+ fbinfo [minor].open = 0;
+ vt_cons [fbinfo [minor].vtconsole]->vc_mode = KD_TEXT;
+
+ /* Leaving graphics mode, turn off the cursor */
+ graphics_on = 0;
+ if (fbinfo [minor].mmaped)
+ sun_clear_screen ();
+ cursor.set = FB_CUR_SETCUR;
+ cursor.enable = 0;
+ fb_ioctl (inode, filp, FBIOSCURPOS, (unsigned long) &cursor);
+ set_palette ();
+ render_screen ();
+ return;
+}
+
+static int
+fb_mmap (struct inode *inode, struct file *file, struct vm_area_struct *vma)
+{
+ int minor = MINOR (inode->i_rdev);
+ fbinfo_t *fb;
+
+ if (minor >= FRAME_BUFFERS)
+ return -ENXIO;
+ /* FIXME: the fg_console below should actually be the
+ * console on which the invoking process is running
+ */
+ if (vt_cons [fg_console]->vc_mode == KD_GRAPHICS)
+ return -ENXIO;
+ fbinfo [minor].vtconsole = fg_console;
+ fb = &fbinfo [minor];
+
+ if (fb->mmap){
+ int v;
+
+ v = (*fb->mmap)(inode, file, vma, fb->base, fb);
+ if (v) return v;
+ fbinfo [minor].mmaped = 1;
+ vt_cons [fg_console]->vc_mode = KD_GRAPHICS;
+ graphics_on = 1;
+ return 0;
+ } else
+ return -ENXIO;
+}
+
+static struct file_operations graphdev_fops =
+{
+ NULL, /* lseek */
+ NULL, /* read */
+ NULL, /* write */
+ NULL, /* readdir */
+ NULL, /* select */
+ fb_ioctl,
+ fb_mmap,
+ fb_open, /* open */
+ fb_close, /* close */
+};
+
+/* Call the frame buffer routine for setting the palette */
+void
+set_palette (void)
+{
+ if (console_blanked || vt_cons [fg_console]->vc_mode == KD_GRAPHICS)
+ return;
+
+ if (fbinfo [0].loadcmap){
+ int i, j;
+
+ /* First keep color_map with the palette colors */
+ for (i = 0; i < 16; i++){
+ j = color_table [i];
+ color_map.map [i][0] = default_red [j];
+ color_map.map [i][1] = default_grn [j];
+ color_map.map [i][2] = default_blu [j];
+ }
+ (*fbinfo [0].loadcmap)(&fbinfo [0], 0, 16);
+ }
+}
+
+/* Called when returning to prom */
+void
+console_restore_palette (void)
+{
+ if (restore_palette)
+ (*restore_palette) ();
+}
+
+/* This routine should be moved to srmmu.c */
+static __inline__ unsigned int
+srmmu_get_pte (unsigned long addr)
+{
+ register unsigned long entry;
+
+ __asm__ __volatile__("\n\tlda [%1] %2,%0\n\t" :
+ "=r" (entry):
+ "r" ((addr & 0xfffff000) | 0x400), "i" (ASI_M_FLUSH_PROBE));
+ return entry;
+}
+
+unsigned int
+get_phys (unsigned int addr)
+{
+ switch (sparc_cpu_model){
+ case sun4c:
+ return sun4c_get_pte (addr) << PAGE_SHIFT;
+ case sun4m:
+ return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
+ default:
+ panic ("get_phys called for unsupported cpu model\n");
+ return 0;
+ }
+}
+
+/* CG6 support code */
+
+/* Offset of interesting structures in the OBIO space */
+/*
+ * Brooktree is the video dac and is funny to program on the cg6.
+ * (it's even funnier on the cg3)
+ * The FBC could be the the frame buffer control
+ * The FHC could be the frame buffer hardware control.
+ */
+#define CG6_ROM_OFFSET 0x0
+#define CG6_BROOKTREE_OFFSET 0x200000
+#define CG6_DHC_OFFSET 0x240000
+#define CG6_ALT_OFFSET 0x280000
+#define CG6_FHC_OFFSET 0x300000
+#define CG6_THC_OFFSET 0x301000
+#define CG6_FBC_OFFSET 0x700000
+#define CG6_TEC_OFFSET 0x701000
+#define CG6_RAM_OFFSET 0x800000
+
+struct bt_regs {
+ unsigned int addr; /* address register */
+ unsigned int color_map; /* color map */
+ unsigned int control; /* control register */
+ unsigned int cursor; /* cursor map register */
+};
+
+/* The contents are unknown */
+struct cg6_tec {
+ int tec_matrix;
+ int tec_clip;
+ int tec_vdc;
+};
+
+struct cg6_thc {
+ unsigned int thc_xxx0[512]; /* ??? */
+ unsigned int thc_hsync1; /* hsync timing */
+ unsigned int thc_hsync2;
+ unsigned int thc_hsync3;
+ unsigned int thc_vsync1; /* vsync timing */
+ unsigned int thc_vsync2;
+ unsigned int thc_refresh;
+ unsigned int thc_misc;
+ unsigned int thc_xxx1[56];
+ unsigned int thc_cursxy; /* cursor x,y position (16 bits each) */
+ unsigned int thc_cursmask[32]; /* cursor mask bits */
+ unsigned int thc_cursbits[32]; /* what to show where mask enabled */
+};
+
+static void
+cg6_restore_palette (void)
+{
+ volatile struct bt_regs *bt;
+
+ bt = fbinfo [0].info.cg6.bt;
+ bt->addr = 0;
+ bt->color_map = 0xffffffff;
+ bt->color_map = 0xffffffff;
+ bt->color_map = 0xffffffff;
+}
+
+/* Ugh: X wants to mmap a bunch of cute stuff at the same time :-( */
+/* So, we just mmap the things that are being asked for */
+static int
+cg6_mmap (struct inode *inode, struct file *file, struct vm_area_struct *vma, long base, void *xx)
+{
+ unsigned int size, page, r, map_size;
+ unsigned int map_offset = 0;
+ fbinfo_t *fb = (fbinfo_t *) xx;
+
+ size = vma->vm_end - vma->vm_start;
+ if (vma->vm_offset & ~PAGE_MASK)
+ return -ENXIO;
+
+ /* To stop the swapper from even considering these pages */
+ vma->vm_flags |= FB_MMAP_VM_FLAGS;
+
+ /* Each page, see which map applies */
+ for (page = 0; page < size; ){
+ switch (vma->vm_offset+page){
+ case CG6_TEC:
+ map_size = PAGE_SIZE;
+ map_offset = get_phys ((uint)fb->info.cg6.tec);
+ break;
+ case CG6_FBC:
+ map_size = PAGE_SIZE;
+ map_offset = get_phys ((uint)fb->info.cg6.fbc);
+ break;
+ case CG6_FHC:
+ map_size = PAGE_SIZE;
+ map_offset = get_phys ((uint)fb->info.cg6.fhc);
+ break;
+ case CG6_THC:
+ map_size = PAGE_SIZE;
+ map_offset = get_phys ((uint)fb->info.cg6.thc);
+ break;
+ case CG6_BTREGS:
+ map_size = PAGE_SIZE;
+ map_offset = get_phys ((uint)fb->info.cg6.bt);
+ break;
+
+ case CG6_DHC:
+ map_size = PAGE_SIZE * 40;
+ map_offset = get_phys ((uint)fb->info.cg6.dhc);
+ break;
+
+ case CG6_ROM:
+ map_size = 0;
+ break;
+
+ case CG6_RAM:
+ map_size = size-page;
+ map_offset = get_phys ((uint) con_fb_base);
+ if (map_size < fb->type.fb_size)
+ map_size = fb->type.fb_size;
+ break;
+ default:
+ map_size = 0;
+ break;
+ }
+ if (!map_size){
+ page += PAGE_SIZE;
+ continue;
+ }
+ r = io_remap_page_range (vma->vm_start+page,
+ map_offset,
+ map_size, vma->vm_page_prot,
+ fb->space);
+ if (r) return -EAGAIN;
+ page += map_size;
+ }
+ vma->vm_inode = inode;
+ inode->i_count++;
+ return 0;
+}
+
+#define BT_D4M3(x) ((((x) >> 2) << 1) + ((x) >> 2)) /* (x / 4) * 3 */
+#define BT_D4M4(x) ((x) & ~3) /* (x / 4) * 4 */
+
+static void
+cg6_loadcmap (void *fbinfo, int index, int count)
+{
+ fbinfo_t *fb = (fbinfo_t *) fbinfo;
+ struct bt_regs *bt = fb->info.cg6.bt;
+ int i;
+
+ bt->addr = index << 24;
+ for (i = index; count--; i++){
+ bt->color_map = color_map.map [i][0] << 24;
+ bt->color_map = color_map.map [i][1] << 24;
+ bt->color_map = color_map.map [i][2] << 24;
+ }
+}
+
+/* Load cursor information */
+static void
+cg6_setcursor (struct cg6_info *info)
+{
+ unsigned int v;
+ struct cg6_cursor *c = &info->cursor;
+
+ if (c->enable){
+ v = ((c->cpos.fbx - c->chot.fbx) << 16)
+ |((c->cpos.fby - c->chot.fby) & 0xffff);
+ } else {
+ /* Magic constant to turn off the cursor */
+ v = ((65536-32) << 16) | (65536-32);
+ }
+ info->thc->thc_cursxy = v;
+}
+
+#undef pos
+static int
+cg6_scursor (struct fbcursor *cursor, fbinfo_t *fb)
+{
+ int op = cursor->set;
+ volatile struct cg6_thc *thc = fb->info.cg6.thc;
+ struct cg6_cursor *cursor_info = &fb->info.cg6.cursor;
+ int i, bytes = 0;
+
+ if (op & FB_CUR_SETSHAPE){
+ if ((unsigned int) cursor->size.fbx > 32)
+ return -EINVAL;
+ if ((unsigned int) cursor->size.fby > 32)
+ return -EINVAL;
+ bytes = (cursor->size.fby * 32)/8;
+ i = verify_area (VERIFY_READ, cursor->image, bytes);
+ if (i) return i;
+ i = verify_area (VERIFY_READ, cursor->mask, bytes);
+ if (i) return i;
+ }
+ if (op & (FB_CUR_SETCUR | FB_CUR_SETPOS | FB_CUR_SETHOT)){
+ if (op & FB_CUR_SETCUR)
+ cursor_info->enable = cursor->enable;
+ if (op & FB_CUR_SETPOS)
+ cursor_info->cpos = cursor->pos;
+ if (op & FB_CUR_SETHOT)
+ cursor_info->chot = cursor->hot;
+ cg6_setcursor (&fb->info.cg6);
+ }
+ if (op & FB_CUR_SETSHAPE){
+ unsigned int u;
+
+ cursor_info->size = cursor->size;
+ memset ((void *)&cursor_info->bits, 0, sizeof (cursor_info->size));
+ memcpy (cursor_info->bits [0], cursor->mask, bytes);
+ memcpy (cursor_info->bits [1], cursor->image, bytes);
+ u = ~0;
+ if (cursor_info->size.fbx < 32)
+ u = ~(u >> cursor_info->size.fbx);
+ for (i = 0; i < 32; i++){
+ int m = cursor_info->bits [0][i] & u;
+ thc->thc_cursmask [i] = m;
+ thc->thc_cursbits [i] = m & cursor_info->bits [1][i];
+ }
+ }
+ return 0;
+}
+
+/* Handle cg6-specific ioctls */
+static int
+cg6_ioctl (struct inode *inode, struct file *file, unsigned cmd, unsigned long arg, fbinfo_t *fb)
+{
+ int i;
+
+ switch (cmd){
+ case FBIOGCURMAX:
+ i = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct fbcurpos));
+ if (i) return i;
+ ((struct fbcurpos *) arg)->fbx = 32;
+ ((struct fbcurpos *) arg)->fby = 32;
+ break;
+
+ case FBIOSVIDEO:
+ /* vesa_blank and vesa_unblank could do the job on fb [0] */
+ break;
+
+ case FBIOSCURSOR:
+ return cg6_scursor ((struct fbcursor *) arg, fb);
+
+ case FBIOSCURPOS:
+ /*
+ i= verify_area (VERIFY_READ, (void *) arg, sizeof (struct fbcurpos));
+ if (i) return i;
+ */
+ fb->info.cg6.cursor.cpos = *(struct fbcurpos *)arg;
+ cg6_setcursor (&fb->info.cg6);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+cg6_setup (int slot, unsigned int cg6, int cg6_io)
+{
+ struct cg6_info *cg6info;
+
+ printk ("cgsix%d at 0x%8.8x\n", slot, (unsigned int) cg6);
+
+ /* Fill in parameters we left out */
+ fbinfo [slot].type.fb_cmsize = 256;
+ fbinfo [slot].mmap = cg6_mmap;
+ fbinfo [slot].loadcmap = cg6_loadcmap;
+ fbinfo [slot].ioctl = (void *) cg6_ioctl;
+ fbinfo [slot].blank = 0;
+ fbinfo [slot].unblank = 0;
+
+ cg6info = (struct cg6_info *) &fbinfo [slot].info.cg6;
+
+ /* Map the hardware registers */
+ cg6info->bt = sparc_alloc_io ((void *) cg6+CG6_BROOKTREE_OFFSET, 0,
+ sizeof (struct bt_regs),"cgsix_dac", cg6_io, 0);
+ cg6info->fhc = sparc_alloc_io ((void *) cg6+CG6_FHC_OFFSET, 0,
+ sizeof (int), "cgsix_fhc", cg6_io, 0);
+ cg6info->thc = sparc_alloc_io ((void *) cg6+CG6_THC_OFFSET, 0,
+ sizeof (struct cg6_thc), "cgsix_thc", cg6_io, 0);
+ cg6info->tec = sparc_alloc_io ((void *) cg6+CG6_TEC_OFFSET, 0,
+ sizeof (struct cg6_tec), "cgsix_tec", cg6_io, 0);
+ cg6info->dhc = sparc_alloc_io ((void *) cg6+CG6_DHC_OFFSET, 0,
+ 0x40000, "cgsix_dhc", cg6_io, 0);
+ cg6info->fbc = sparc_alloc_io ((void *) cg6+CG6_FBC_OFFSET, 0,
+ 0x1000, "cgsix_fbc", cg6_io, 0);
+ if (!con_fb_base){
+ con_fb_base = sparc_alloc_io ((void *) cg6+CG6_RAM_OFFSET, 0,
+ fbinfo [slot].type.fb_size, "cgsix_ram", cg6_io, 0);
+ }
+ if (!slot)
+ restore_palette = cg6_restore_palette;
+}
+
+/* The cg3 driver, obio space addresses for mapping the cg3 stuff */
+#define CG3_REGS 0x400000
+#define CG3_RAM 0x800000
+#define D4M3(x) ((((x)>>2)<<1) + ((x)>>2)) /* (x/4)*3 */
+#define D4M4(x) ((x)&~0x3) /* (x/4)*4 */
+
+/* The cg3 palette is loaded with 4 color values at each time */
+/* so you end up with: (rgb)(r), (gb)(rg), (b)(rgb), and so on */
+static void
+cg3_loadcmap (void *fbinfo, int index, int count)
+{
+ fbinfo_t *fb = (fbinfo_t *) fbinfo;
+ struct bt_regs *bt = fb->info.cg3.bt;
+ int *i, steps;
+
+ i = &color_map.raw [D4M3(index)];
+ steps = D4M3(index+count-1) - D4M3(index)+3;
+ bt->addr = D4M4(index);
+ while (steps--)
+ bt->color_map = *i++;
+}
+
+/* The cg3 is pressumed to emulate a cg4, I guess older programs will want that */
+/* addresses above 0x4000000 are for cg3, below that it's cg4 emulation */
+static int
+cg3_mmap (struct inode *inode, struct file *file, struct vm_area_struct *vma, long base, void *xx)
+{
+ unsigned int size, page, r, map_size;
+ unsigned int map_offset = 0;
+ fbinfo_t *fb = (fbinfo_t *) xx;
+
+ size = vma->vm_end - vma->vm_start;
+ if (vma->vm_offset & ~PAGE_MASK)
+ return -ENXIO;
+
+ /* To stop the swapper from even considering these pages */
+ vma->vm_flags |= FB_MMAP_VM_FLAGS;
+
+ /* Each page, see which map applies */
+ for (page = 0; page < size; ){
+ switch (vma->vm_offset+page){
+ case CG3_MMAP_OFFSET:
+ map_size = size-page;
+ map_offset = get_phys ((uint) con_fb_base);
+ if (map_size > fb->type.fb_size)
+ map_size = fb->type.fb_size;
+ break;
+ default:
+ map_size = 0;
+ break;
+ }
+ if (!map_size){
+ page += PAGE_SIZE;
+ continue;
+ }
+ r = io_remap_page_range (vma->vm_start+page,
+ map_offset,
+ map_size, vma->vm_page_prot,
+ fb->space);
+ if (r) return -EAGAIN;
+ page += map_size;
+ }
+ vma->vm_inode = inode;
+ inode->i_count++;
+ return 0;
+}
+
+static void
+cg3_setup (int slot, unsigned int cg3, int cg3_io)
+{
+ struct cg3_info *cg3info;
+
+ printk ("cgthree%d at 0x%8.8x\n", slot, cg3);
+
+ /* Fill in parameters we left out */
+ fbinfo [slot].type.fb_cmsize = 256;
+ fbinfo [slot].mmap = cg3_mmap;
+ fbinfo [slot].loadcmap = cg3_loadcmap;
+ fbinfo [slot].ioctl = 0; /* no special ioctls */
+
+ cg3info = (struct cg3_info *) &fbinfo [slot].info.cg3;
+
+ /* Map the card registers */
+ cg3info->bt = sparc_alloc_io ((void *) cg3+CG3_REGS, 0,
+ sizeof (struct bt_regs),"cg3_bt", cg3_io, 0);
+
+ if (!con_fb_base){
+ con_fb_base=sparc_alloc_io ((void*) cg3+CG3_RAM, 0,
+ fbinfo [slot].type.fb_size, "cg3_ram", cg3_io, 0);
+ }
+}
+
+/* OBio addresses for the bwtwo registers */
+#define BWTWO_REGISTER_OFFSET 0x400000
+
+struct bwtwo_regs {
+ char unknown [16];
+#define BWTWO_ENABLE_VIDEO 0x40
+ unsigned char control;
+ char unknown2 [15];
+};
+
+static int
+bwtwo_mmap (struct inode *inode, struct file *file, struct vm_area_struct *vma, long base, void *xx)
+{
+ unsigned int size, map_offset, r;
+ fbinfo_t *fb = (fbinfo_t *) xx;
+ int map_size;
+
+ map_size = size = vma->vm_end - vma->vm_start;
+
+ if (vma->vm_offset & ~PAGE_MASK)
+ return -ENXIO;
+
+ /* To stop the swapper from even considering these pages */
+ vma->vm_flags |= FB_MMAP_VM_FLAGS;
+ printk ("base=%8.8xl start=%8.8xl size=%x offset=%8.8x\n",
+ (unsigned int) base,
+ (unsigned int) vma->vm_start, size,
+ (unsigned int) vma->vm_offset);
+
+ /* This routine should also map the register if asked for, but we don't do that yet */
+ map_offset = get_phys ((uint) con_fb_base);
+ r = io_remap_page_range (vma->vm_start, map_offset, map_size, vma->vm_page_prot,
+ fb->space);
+ if (r) return -EAGAIN;
+ vma->vm_inode = inode;
+ inode->i_count++;
+ return 0;
+}
+
+static void
+bwtwo_blank (void *xx)
+{
+ fbinfo_t *fb = (fbinfo_t *) xx;
+
+ fb->info.bwtwo.regs->control &= ~BWTWO_ENABLE_VIDEO;
+}
+
+static void
+bwtwo_unblank (void *xx)
+{
+ fbinfo_t *fb = (fbinfo_t *) xx;
+ fb->info.bwtwo.regs->control |= BWTWO_ENABLE_VIDEO;
+}
+
+static void
+bwtwo_setup (int slot, unsigned int bwtwo, int bw2_io)
+{
+ printk ("bwtwo%d at 0x%8.8x\n", slot, bwtwo);
+ fbinfo [slot].type.fb_cmsize = 2;
+ fbinfo [slot].mmap = bwtwo_mmap;
+ fbinfo [slot].loadcmap = 0;
+ fbinfo [slot].ioctl = 0;
+ fbinfo [slot].blank = bwtwo_blank;
+ fbinfo [slot].unblank = bwtwo_unblank;
+ fbinfo [slot].info.bwtwo.regs = sparc_alloc_io ((void *) bwtwo+BWTWO_REGISTER_OFFSET,
+ 0, sizeof (struct bwtwo_regs), "bwtwo_regs", bw2_io, 0);
+}
+
+static char *known_cards [] = {
+ "cgsix", "cgthree", "bwtwo", "SUNW,tcx", 0
+};
+
+static int
+known_card (char *name)
+{
+ int i;
+
+ for (i = 0; known_cards [i]; i++)
+ if (strcmp (name, known_cards [i]) == 0)
+ return 1;
+ return 0;
+}
+
+static struct {
+ int depth;
+ int resx, resy;
+ int x_margin, y_margin;
+} scr_def [] = {
+ { 1, 1152, 900, 8, 18 },
+ { 8, 1152, 900, 64, 18 },
+ { 8, 1280, 1024, 96, 80 },
+ { 8, 1024, 768, 0, 0 },
+ { 0 },
+};
+
+static int
+sparc_console_probe(void)
+{
+ int propl, con_node, i;
+ struct linux_sbus_device *sbdp;
+ unsigned int fbbase = 0xb001b001;
+ int fbiospace = 0;
+
+ /* XXX The detection code needs to support multiple video cards in one system */
+ con_node = 0;
+ switch(prom_vers) {
+ case PROM_V0:
+ /* V0 proms are at sun4c only. Can skip many checks. */
+ con_type = FBTYPE_NOTYPE;
+ if(SBus_chain == 0) {
+ prom_printf("SBUS chain is NULL, bailing out...\n");
+ prom_halt();
+ }
+ for_each_sbusdev(sbdp, SBus_chain) {
+ con_node = sbdp->prom_node;
+
+ /* If no "address" than it is not the PROM console. */
+ if(sbdp->num_vaddrs) {
+ if(!strncmp(sbdp->prom_name, "cgsix", 5)) {
+ con_type = FBTYPE_SUNFAST_COLOR;
+ fbbase = (uint) sbdp->reg_addrs [0].phys_addr;
+ fbiospace = sbdp->reg_addrs[0].which_io;
+ break;
+ } else if(!strncmp(sbdp->prom_name, "cgthree", 7)) {
+ con_type = FBTYPE_SUN3COLOR;
+ fbbase = (uint) sbdp->reg_addrs [0].phys_addr;
+ fbiospace = sbdp->reg_addrs[0].which_io;
+ break;
+ } else if (!strncmp(sbdp->prom_name, "bwtwo", 5)) {
+ con_type = FBTYPE_SUN2BW;
+ fbbase = (uint) sbdp->reg_addrs [0].phys_addr;
+ fbiospace = sbdp->reg_addrs[0].which_io;
+ break;
+ }
+ }
+ }
+ if(con_type == FBTYPE_NOTYPE) return -1;
+ con_fb_base = (unsigned char *) sbdp->sbus_vaddrs[0];
+ strncpy(con_name, sbdp->prom_name, sizeof (con_name));
+ break;
+ case PROM_V2:
+ case PROM_V3:
+ case PROM_P1275:
+ for_each_sbusdev(sbdp, SBus_chain) {
+ if (known_card (sbdp->prom_name))
+ break;
+ }
+ if (!sbdp){
+ prom_printf ("Could not find a know video card on this machine\n");
+ prom_halt ();
+ }
+ prom_apply_sbus_ranges (&sbdp->reg_addrs [0], sbdp->num_registers);
+ fbbase = (long) sbdp->reg_addrs [0].phys_addr;
+ fbiospace = sbdp->reg_addrs[0].which_io;
+ con_node = (*romvec->pv_v2devops.v2_inst2pkg)
+ (*romvec->pv_v2bootargs.fd_stdout);
+ /*
+ * Determine the type of hardware accelerator.
+ */
+ propl = prom_getproperty(con_node, "emulation", con_name, sizeof (con_name));
+ if (propl < 0 || propl >= sizeof (con_name)) {
+ /* Early cg3s had no "emulation". */
+ propl = prom_getproperty(con_node, "name", con_name, sizeof (con_name));
+ if (propl < 0) {
+ prom_printf("console: no device name!!\n");
+ return -1;
+ }
+ }
+ if(!strncmp(con_name, "cgsix", sizeof (con_name))) {
+ con_type = FBTYPE_SUNFAST_COLOR;
+ } else if(!strncmp(con_name, "cgthree", sizeof (con_name))) {
+ con_type = FBTYPE_SUN3COLOR;
+ } else if(!strncmp(con_name, "cgfourteen", sizeof (con_name))) {
+ con_type = FBTYPE_MDICOLOR;
+ } else if(!strncmp(con_name, "bwtwo", sizeof (con_name))) {
+ con_type = FBTYPE_SUN2BW;
+ } else if(!strncmp(con_name,"SUNW,tcx", sizeof (con_name))){
+ con_type = FBTYPE_SUN3COLOR;
+ } else {
+ prom_printf("console: \"%s\" is unsupported\n", con_name);
+ return -1;
+ }
+ propl = prom_getproperty(con_node, "address", (char *) &con_fb_base, 4);
+ if (propl != 4) {
+ con_fb_base = 0;
+ }
+ break;
+ default:
+ return -1;
+ };
+
+ /* Get the device geometry */
+ con_linebytes = prom_getintdefault(con_node, "linebytes", 1152);
+ con_width = prom_getintdefault(con_node, "width", 1152);
+ con_height = prom_getintdefault(con_node, "height", 900);
+
+ /* Currently we just support 1-bit and 8-bit depth displays */
+ if (con_type == FBTYPE_SUN2BW) {
+ con_depth = 1;
+ } else {
+ con_depth = 8;
+ }
+ for (i = 0; scr_def [i].depth; i++){
+ if (scr_def [i].resx != con_width || scr_def [i].resy != con_height)
+ continue;
+ if (scr_def [i].depth != con_depth)
+ continue;
+ x_margin = scr_def [i].x_margin;
+ y_margin = scr_def [i].y_margin;
+ chars_per_line = (con_width * con_depth) / 8;
+ skip_bytes = chars_per_line * y_margin;
+ ints_per_line = chars_per_line / 4;
+ bytes_per_row = CHAR_HEIGHT * chars_per_line;
+ break;
+ }
+ if (!scr_def [i].depth){
+ x_margin = y_margin = 0;
+ prom_printf ("PenguinCon: unknown video resolution %dx%d may be slow\n", con_width, con_height);
+ prom_halt ();
+ }
+ /* P3: I fear this strips 15inch 1024/768 PC-like monitors out. */
+ if ((con_linebytes*8) / con_depth != con_width) {
+ prom_printf("console: UNUSUAL VIDEO, linebytes=%d, width=%d, depth=%d\n",
+ con_linebytes, con_width, con_depth);
+ return -1;
+ }
+
+ /* Negate the font table on 1 bit depth cards so we have white on black */
+ if (con_depth == 1)
+ for(i=0; i<(16 * 256); i++)
+ vga_font[i] = ~vga_font[i];
+
+ /* Fill in common fb information */
+ fbinfo [0].type.fb_type = con_type;
+ fbinfo [0].type.fb_height = con_height;
+ fbinfo [0].type.fb_width = con_width;
+ fbinfo [0].type.fb_depth = con_depth;
+ fbinfo [0].type.fb_size = PAGE_ALIGN((con_linebytes) * (con_height));
+ fbinfo [0].space = fbiospace;
+ fbinfo [0].blanked = 0;
+
+ /* Should be filled in for supported video cards */
+ fbinfo [0].mmap = 0;
+ fbinfo [0].loadcmap = 0;
+ fbinfo [0].ioctl = 0;
+ fbinfo [0].blank = 0;
+ fbinfo [0].unblank = 0;
+
+ if (fbbase == 0xb001b001){
+ printk ("Mail miguel@nuclecu.unam.mx video_card=%d (%s)\n", con_type, con_name);
+ }
+
+ /* Per card setup */
+ switch (con_type){
+ case FBTYPE_SUN3COLOR:
+ cg3_setup (0, fbbase, fbiospace);
+ break;
+ case FBTYPE_SUNFAST_COLOR:
+ cg6_setup (0, fbbase, fbiospace);
+ break;
+ case FBTYPE_SUN2BW:
+ bwtwo_setup (0, fbbase, fbiospace);
+ break;
+ default:
+ break;
+ }
+ if (!con_fb_base){
+ prom_printf ("PROM does not have an 'address' property for this\n"
+ "frame buffer and the Linux drivers do not know how\n"
+ "to map the video of this device\n");
+ prom_halt ();
+ }
+ fbinfo [0].base = (long) con_fb_base;
+
+ /* Register the frame buffer device */
+ if (register_chrdev (GRAPHDEV_MAJOR, "graphics", &graphdev_fops)){
+ printk ("Could not register graphics device\n");
+ return -EIO;
+ }
+ return 0; /* success */
+}
+
+/* video init code, called from withing the SBUS bus scanner at
+ * boot time.
+ */
+void
+sun_console_init(void)
+{
+ if(serial_console)
+ return;
+
+ if(sparc_console_probe()) {
+ prom_printf("Could not probe console, bailing out...\n");
+ prom_halt();
+ }
+ sun_clear_screen();
+}
+
+/*
+ * sun_blitc
+ *
+ * Displays an ASCII character at a specified character cell
+ * position.
+ *
+ * Called from scr_writew() when the destination is
+ * the "shadow" screen
+ */
+static unsigned int
+fontmask_bits[16] = {
+ 0x00000000,
+ 0x000000ff,
+ 0x0000ff00,
+ 0x0000ffff,
+ 0x00ff0000,
+ 0x00ff00ff,
+ 0x00ffff00,
+ 0x00ffffff,
+ 0xff000000,
+ 0xff0000ff,
+ 0xff00ff00,
+ 0xff00ffff,
+ 0xffff0000,
+ 0xffff00ff,
+ 0xffffff00,
+ 0xffffffff
+};
+
+int
+sun_blitc(unsigned int charattr, unsigned long addr)
+{
+ int j, idx;
+ unsigned char *font_row;
+
+#ifndef DEBUGGING_X
+ if (graphics_on)
+ return 0;
+#endif
+ idx = (addr - video_mem_base) >> 1;
+
+ /* Invalidate the cursor position if necessary. */
+ if(idx == cursor_pos)
+ cursor_pos = -1;
+ font_row = &vga_font[(charattr & 0xff) << 4];
+
+ switch (con_depth){
+ case 1: {
+ register unsigned char *dst;
+
+ dst = (unsigned char *)(((unsigned long)con_fb_base) + FBUF_OFFSET(idx));
+ for(j = 0; j < CHAR_HEIGHT; j++, font_row++, dst+=CHARS_PER_LINE)
+ *dst = *font_row;
+ break;
+ }
+ case 8: {
+ register unsigned long *dst;
+ unsigned long fgmask, bgmask, data, rowbits, attrib;
+ const int ipl = ints_per_line;
+
+ dst = (unsigned long *)(((unsigned long)con_fb_base) + COLOR_FBUF_OFFSET(idx));
+ attrib = (charattr >> 8) & 0x0ff;
+ fgmask = attrib & 0x0f;
+ bgmask = (attrib >> 4) & 0x0f;
+ fgmask = fgmask << 8 | fgmask;
+ fgmask |= fgmask << 16;
+ bgmask = bgmask << 8 | bgmask;
+ bgmask |= bgmask << 16;
+
+ for(j = 0; j < CHAR_HEIGHT; j++, font_row++, dst += ipl) {
+ rowbits = *font_row;
+ data = fontmask_bits[(rowbits>>4)&0xf];
+ data = (data & fgmask) | (~data & bgmask);
+ *dst = data;
+ data = fontmask_bits[rowbits&0xf];
+ data = (data & fgmask) | (~data & bgmask);
+ *(dst+1) = data;
+ }
+ break;
+ } /* case */
+ } /* switch */
+ return (0);
+}
+
+unsigned char vga_font[cmapsz] = {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
+0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
+0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
+0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
+0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
+0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
+0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
+0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
+0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
+0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
+0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
+0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
+0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
+0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
+0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
+0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
+0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
+0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
+0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
+0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
+0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
+0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
+0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
+0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
+0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
+0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
+0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
+0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
+0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
+0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
+0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
+0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
+0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
+0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
+0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
+0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
+0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
+0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
+0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
+0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
+0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
+0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
+0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
+0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
+0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
+0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
+0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
+0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
+0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
+0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
+0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
+0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
+0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
+0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
+0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
+0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
+0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
+0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
+0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
+0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
+0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
+0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
+0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
+0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
+0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
+0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
+0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
+0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
+0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
+0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
+0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
+0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
+0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
+0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
+0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
+0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
+0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
+0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
+0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
+0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
+0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
+0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
+0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
+0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
+0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
+0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
+0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
+0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
+0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
+0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
+0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
+0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
+0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
+0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
+0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
+0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
+0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
+0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
+0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
+0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
+0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
+0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
+0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
+0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
+0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
+0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
+0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
+0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
+0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
+0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
+0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
+0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
+0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
+0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
+0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
+0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
+0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
+0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
+0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
+0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
+0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
+0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
+0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
+0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
+0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
--- /dev/null
+/* sunmouse.c: Sun mouse driver for the Sparc
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ *
+ * Parts based on the psaux.c driver written by:
+ * Johan Myreen.
+ *
+ * Dec/19/95 Added SunOS mouse ioctls - miguel.
+ * Jan/5/96 Added VUID support, sigio supprot - miguel.
+ * Mar/5/96 Added proper mouse stream support - miguel.
+ */
+
+/* The mouse is run off of one of the Zilog serial ports. On
+ * that port is the mouse and the keyboard, each gets a zs channel.
+ * The mouse itself is mouse-systems in nature. So the protocol is:
+ *
+ * Byte 1) Button state which is bit-encoded as
+ * 0x4 == left-button down, else up
+ * 0x2 == middle-button down, else up
+ * 0x1 == right-button down, else up
+ *
+ * Byte 2) Delta-x
+ * Byte 3) Delta-y
+ * Byte 4) Delta-x again
+ * Byte 5) Delta-y again
+ *
+ * One day this driver will have to support more than one mouse in the system.
+ *
+ * This driver has two modes of operation: the default VUID_NATIVE is
+ * set when the device is opened and allows the application to see the
+ * mouse character stream as we get it from the serial (for gpm for
+ * example). The second method, VUID_FIRM_EVENT will provide cooked
+ * events in Firm_event records.
+ * */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fcntl.h>
+#include <linux/signal.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/vuid_event.h>
+#include <linux/random.h>
+/* The following keeps track of software state for the Sun
+ * mouse.
+ */
+#define STREAM_SIZE 2048
+#define EV_SIZE (STREAM_SIZE/sizeof (Firm_event))
+#define BUTTON_LEFT 4
+#define BUTTON_MIDDLE 2
+#define BUTTON_RIGHT 1
+
+struct sun_mouse {
+ unsigned char transaction[5]; /* Each protocol transaction */
+ unsigned char byte; /* Counter, starts at 0 */
+ unsigned char button_state; /* Current button state */
+ unsigned char prev_state; /* Previous button state */
+ int delta_x; /* Current delta-x */
+ int delta_y; /* Current delta-y */
+ int present;
+ int ready; /* set if there if data is available */
+ int active; /* set if device is open */
+ int vuid_mode; /* VUID_NATIVE or VUID_FIRM_EVENT */
+ struct wait_queue *proc_list;
+ struct fasync_struct *fasync;
+
+ /* The event/stream queue */
+ unsigned int head;
+ unsigned int tail;
+ union {
+ char stream [STREAM_SIZE];
+ Firm_event ev [0];
+ } queue;
+};
+
+static struct sun_mouse sunmouse;
+#define gen_events (sunmouse.vuid_mode != VUID_NATIVE)
+#define bstate sunmouse.button_state
+#define pstate sunmouse.prev_state
+
+extern void mouse_put_char(char ch);
+
+/* #define SMOUSE_DEBUG */
+
+static void
+push_event (Firm_event *ev)
+{
+ int next = (sunmouse.head + 1) % EV_SIZE;
+
+ if (next != sunmouse.tail){
+ sunmouse.queue.ev [sunmouse.head] = *ev;
+ sunmouse.head = next;
+ }
+}
+
+static int
+queue_empty (void)
+{
+ return sunmouse.head == sunmouse.tail;
+}
+
+static Firm_event *
+get_from_queue (void)
+{
+ Firm_event *result;
+
+ result = &sunmouse.queue.ev [sunmouse.tail];
+ sunmouse.tail = (sunmouse.tail + 1) % EV_SIZE;
+ return result;
+}
+
+static void
+push_char (char c)
+{
+ int next = (sunmouse.head + 1) % STREAM_SIZE;
+
+ if (next != sunmouse.tail){
+ sunmouse.queue.stream [sunmouse.head] = c;
+ sunmouse.head = next;
+ }
+ sunmouse.ready = 1;
+ if (sunmouse.fasync)
+ kill_fasync (sunmouse.fasync, SIGIO);
+ wake_up_interruptible (&sunmouse.proc_list);
+}
+
+/* The following is called from the zs driver when bytes are received on
+ * the Mouse zs8530 channel.
+ */
+void
+sun_mouse_inbyte(unsigned char byte, unsigned char status)
+{
+ signed char mvalue;
+ int d;
+ Firm_event ev;
+
+ add_mouse_randomness (byte);
+ if(!sunmouse.active)
+ return;
+
+ if (!gen_events){
+ push_char (byte);
+ return;
+ }
+ /* Check for framing errors and parity errors */
+ /* XXX TODO XXX */
+
+ /* If the mouse sends us a byte from 0x80 to 0x87
+ * we are starting at byte zero in the transaction
+ * protocol.
+ */
+ if(byte >= 0x80 && byte <= 0x87)
+ sunmouse.byte = 0;
+
+ mvalue = (signed char) byte;
+ switch(sunmouse.byte) {
+ case 0:
+ /* Button state */
+ sunmouse.button_state = (~byte) & 0x7;
+#ifdef SMOUSE_DEBUG
+ printk("B<Left %s, Middle %s, Right %s>",
+ ((sunmouse.button_state & 0x4) ? "DOWN" : "UP"),
+ ((sunmouse.button_state & 0x2) ? "DOWN" : "UP"),
+ ((sunmouse.button_state & 0x1) ? "DOWN" : "UP"));
+#endif
+ sunmouse.byte++;
+ return;
+ case 1:
+ /* Delta-x 1 */
+#ifdef SMOUSE_DEBUG
+ printk("DX1<%d>", mvalue);
+#endif
+ sunmouse.delta_x = mvalue;
+ sunmouse.byte++;
+ return;
+ case 2:
+ /* Delta-y 1 */
+#ifdef SMOUSE_DEBUG
+ printk("DY1<%d>", mvalue);
+#endif
+ sunmouse.delta_y = mvalue;
+ sunmouse.byte++;
+ return;
+ case 3:
+ /* Delta-x 2 */
+#ifdef SMOUSE_DEBUG
+ printk("DX2<%d>", mvalue);
+#endif
+ sunmouse.delta_x += mvalue;
+ sunmouse.byte++;
+ return;
+ case 4:
+ /* Last byte, Delta-y 2 */
+#ifdef SMOUSE_DEBUG
+ printk("DY2<%d>", mvalue);
+#endif
+ sunmouse.delta_y += mvalue;
+ sunmouse.byte = 69; /* Some rediculious value */
+ break;
+ case 69:
+ /* Until we get the (0x80 -> 0x87) value we aren't
+ * in the middle of a real transaction, so just
+ * return.
+ */
+ return;
+ default:
+ printk("sunmouse: bogon transaction state\n");
+ sunmouse.byte = 69; /* What could cause this? */
+ return;
+ };
+ d = bstate ^ pstate;
+ pstate = bstate;
+ if (d){
+ if (d & BUTTON_LEFT){
+ ev.id = MS_LEFT;
+ ev.value = bstate & BUTTON_LEFT;
+ }
+ if (d & BUTTON_RIGHT){
+ ev.id = MS_RIGHT;
+ ev.value = bstate & BUTTON_RIGHT;
+ }
+ if (d & BUTTON_MIDDLE){
+ ev.id = MS_MIDDLE;
+ ev.value = bstate & BUTTON_MIDDLE;
+ }
+ ev.time = xtime;
+ ev.value = ev.value ? VKEY_DOWN : VKEY_UP;
+ push_event (&ev);
+ }
+ if (sunmouse.delta_x){
+ ev.id = LOC_X_DELTA;
+ ev.time = xtime;
+ ev.value = sunmouse.delta_x;
+ push_event (&ev);
+ sunmouse.delta_x = 0;
+ }
+ if (sunmouse.delta_y){
+ ev.id = LOC_Y_DELTA;
+ ev.time = xtime;
+ ev.value = sunmouse.delta_y;
+ push_event (&ev);
+ }
+
+ /* We just completed a transaction, wake up whoever is awaiting
+ * this event.
+ */
+ sunmouse.ready = 1;
+ if (sunmouse.fasync)
+ kill_fasync (sunmouse.fasync, SIGIO);
+ wake_up_interruptible(&sunmouse.proc_list);
+ return;
+}
+
+static int
+sun_mouse_open(struct inode * inode, struct file * file)
+{
+ if(!sunmouse.present)
+ return -EINVAL;
+ if(sunmouse.active)
+ return -EBUSY;
+ sunmouse.active = 1;
+ sunmouse.ready = sunmouse.delta_x = sunmouse.delta_y = 0;
+ sunmouse.button_state = 0x80;
+ sunmouse.vuid_mode = VUID_NATIVE;
+ return 0;
+}
+
+static int
+sun_mouse_fasync (struct inode *inode, struct file *filp, int on)
+{
+ int retval;
+
+ retval = fasync_helper (inode, filp, on, &sunmouse.fasync);
+ if (retval < 0)
+ return retval;
+ return 0;
+}
+
+static void
+sun_mouse_close(struct inode *inode, struct file *file)
+{
+ sunmouse.active = sunmouse.ready = 0;
+ sun_mouse_fasync (inode, file, 0);
+}
+
+static int
+sun_mouse_write(struct inode *inode, struct file *file, const char *buffer,
+ int count)
+{
+ return -EINVAL; /* foo on you */
+}
+
+static int
+sun_mouse_read(struct inode *inode, struct file *file, char *buffer,
+ int count)
+{
+ struct wait_queue wait = { current, NULL };
+
+ if (queue_empty ()){
+ if (file->f_flags & O_NONBLOCK)
+ return -EWOULDBLOCK;
+ add_wait_queue (&sunmouse.proc_list, &wait);
+ while (queue_empty () && !(current->signal & ~current->blocked)){
+ current->state = TASK_INTERRUPTIBLE;
+ schedule ();
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue (&sunmouse.proc_list, &wait);
+ }
+ if (gen_events){
+ char *p = buffer, *end = buffer+count;
+
+ while (p < end && !queue_empty ()){
+ *(Firm_event *)p = *get_from_queue ();
+ p += sizeof (Firm_event);
+ }
+ sunmouse.ready = !queue_empty ();
+ inode->i_atime = CURRENT_TIME;
+ return p-buffer;
+ } else {
+ int c;
+
+ for (c = count; !queue_empty () && c; c--){
+ *buffer++ = sunmouse.queue.stream [sunmouse.tail];
+ sunmouse.tail = (sunmouse.tail + 1) % STREAM_SIZE;
+ }
+ sunmouse.ready = !queue_empty ();
+ inode->i_atime = CURRENT_TIME;
+ return count-c;
+ }
+ /* Only called if nothing was sent */
+ if (current->signal & ~current->blocked)
+ return -ERESTARTSYS;
+ return 0;
+}
+
+static int
+sun_mouse_select(struct inode *inode, struct file *file, int sel_type,
+ select_table *wait)
+{
+ if(sel_type != SEL_IN)
+ return 0;
+ if(sunmouse.ready)
+ return 1;
+ select_wait(&sunmouse.proc_list, wait);
+ return 0;
+}
+int
+sun_mouse_ioctl (struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int i;
+
+ switch (cmd){
+ /* VUIDGFORMAT - Get input device byte stream format */
+ case _IOR('v', 2, int):
+ i = verify_area (VERIFY_WRITE, (void *)arg, sizeof (int));
+ if (i) return i;
+ *(int *)arg = sunmouse.vuid_mode;
+ break;
+
+ /* VUIDSFORMAT - Set input device byte stream format*/
+ case _IOW('v', 1, int):
+ i = verify_area (VERIFY_READ, (void *)arg, sizeof (int));
+ if (i) return i;
+ i = *(int *) arg;
+ if (i == VUID_NATIVE || i == VUID_FIRM_EVENT){
+ sunmouse.vuid_mode = *(int *)arg;
+ sunmouse.head = sunmouse.tail = 0;
+ } else
+ return -EINVAL;
+ break;
+
+ default:
+ printk ("[MOUSE-ioctl: %8.8x]\n", cmd);
+ return -1;
+ }
+ return 0;
+}
+
+struct file_operations sun_mouse_fops = {
+ NULL,
+ sun_mouse_read,
+ sun_mouse_write,
+ NULL,
+ sun_mouse_select,
+ sun_mouse_ioctl,
+ NULL,
+ sun_mouse_open,
+ sun_mouse_close,
+ NULL,
+ sun_mouse_fasync,
+};
+
+static struct miscdevice sun_mouse_mouse = {
+ SUN_MOUSE_MINOR, "sunmouse", &sun_mouse_fops
+};
+
+int
+sun_mouse_init(void)
+{
+ printk("Sun Mouse-Systems mouse driver version 1.00\n");
+ sunmouse.present = 1;
+ sunmouse.ready = sunmouse.active = 0;
+ misc_register (&sun_mouse_mouse);
+ sunmouse.delta_x = sunmouse.delta_y = 0;
+ sunmouse.button_state = 0x80;
+ sunmouse.proc_list = NULL;
+ return 0;
+}
+
+void
+sun_mouse_zsinit(void)
+{
+ sunmouse.ready = 1;
+}
fi
dep_tristate 'Teles/NICCY1016PC/Creatix support' CONFIG_ISDN_DRV_TELES $CONFIG_ISDN
dep_tristate 'ICN B1 and B2 support' CONFIG_ISDN_DRV_ICN $CONFIG_ISDN
+dep_tristate 'PCBIT-D support' CONFIG_ISDN_DRV_PCBIT $CONFIG_ISDN
SUB_DIRS :=
MOD_SUB_DIRS :=
-ALL_SUB_DIRS := icn teles
+ALL_SUB_DIRS := icn teles pcbit
L_OBJS :=
LX_OBJS :=
endif
endif
+ifeq ($(CONFIG_ISDN_DRV_PCBIT),y)
+ L_OBJS += pcbit/pcbit.o
+ SUB_DIRS += pcbit
+ MOD_SUB_DIRS += pcbit
+else
+ ifeq ($(CONFIG_ISDN_DRV_PCBIT),m)
+ MOD_SUB_DIRS += pcbit
+ endif
+endif
+
include $(TOPDIR)/Rules.make
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* $Log: icn.c,v $
+ * Revision 1.18 1996/04/20 16:50:26 fritz
+ * Fixed status-buffer overrun.
+ * Misc. typos
+ *
* Revision 1.17 1996/02/11 02:39:04 fritz
* Increased Buffer for status-messages.
* Removed conditionals for HDLC-firmware.
#undef LOADEXTERN
static char
-*revision = "$Revision: 1.17 $";
+*revision = "$Revision: 1.18 $";
static void icn_pollcard(unsigned long dummy);
save_flags(flags);
cli();
*dev->msg_buf_write++ = (c == 0xff) ? '\n' : c;
- /* No checks for buffer overflow for raw-status-device */
+ if (dev->msg_buf_write == dev->msg_buf_read) {
+ if (++dev->msg_buf_read > dev->msg_buf_end)
+ dev->msg_buf_read = dev->msg_buf;
+ }
if (dev->msg_buf_write > dev->msg_buf_end)
dev->msg_buf_write = dev->msg_buf;
restore_flags(flags);
}
restore_flags(flags);
OUTB_P(0, ICN_RUN); /* Reset Controller */
- OUTB_P(0, ICN_MAPRAM); /* Disable RAM */
+ OUTB_P(0, ICN_MAPRAM); /* Disable RAM */
icn_shiftout(ICN_CFG, 0x0f, 3, 4); /* Windowsize= 16k */
icn_shiftout(ICN_CFG, (unsigned long) dev->shmem, 23, 10); /* Set RAM-Addr. */
#ifdef BOOT_DEBUG
u_char *p;
for (p = buf, count = 0; count < len; p++, count++) {
+ if (dev->msg_buf_read == dev->msg_buf_write)
+ return count;
if (user)
put_fs_byte(*dev->msg_buf_read++, p);
else
-/* $Id: icn.h,v 1.12 1996/01/22 05:01:22 fritz Exp fritz $
+/* $Id: icn.h,v 1.13 1996/04/20 16:51:41 fritz Exp $
*
* ISDN lowlevel-module for the ICN active ISDN-Card.
*
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* $Log: icn.h,v $
+ * Revision 1.13 1996/04/20 16:51:41 fritz
+ * Increased status buffer.
+ * Misc. typos
+ *
* Revision 1.12 1996/01/22 05:01:22 fritz
* Revert to GPL.
*
} pqueue;
typedef struct {
- unsigned short port; /* Base-port-address */
+ unsigned short port; /* Base-port-address */
icn_shmem *shmem; /* Pointer to memory-mapped-buffers */
int myid; /* Driver-Nr. assigned by linklevel */
int rvalid; /* IO-portregion has been requested */
unsigned short flags; /* Statusflags */
int doubleS0; /* Flag: Double-S0-Card */
int secondhalf; /* Flag: Second half of a doubleS0 */
- int ptype; /* Protocol type (1TR6 or Euro) */
+ int ptype; /* Protocol type (1TR6 or Euro) */
struct timer_list st_timer; /* Timer for Status-Polls */
struct timer_list rb_timer; /* Timer for B-Channel-Polls */
int channel; /* Currently mapped Channel */
+/* $Id: isdn_cards.c,v 1.1 1996/04/20 16:04:36 fritz Exp $
+ *
+ * Linux ISDN subsystem, initialization for non-modularized drivers.
+ *
+ * Copyright 1994,95,96 by Fritz Elfert (fritz@wuemaus.franken.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * $Log: isdn_cards.c,v $
+ * Revision 1.1 1996/04/20 16:04:36 fritz
+ * Initial revision
+ *
+ */
#include <linux/config.h>
extern void teles_init(void);
#endif
+#ifdef CONFIG_ISDN_DRV_PCBIT
+extern void pcbit_init(void);
+#endif
+
void isdn_cards_init(void)
{
#if CONFIG_ISDN_DRV_ICN
#if CONFIG_ISDN_DRV_TELES
teles_init();
#endif
+#if CONFIG_ISDN_DRV_PCBIT
+ pcbit_init();
+#endif
}
+/* $Id: isdn_cards.h,v 1.1 1996/04/20 16:04:03 fritz Exp $
+ *
+ * Linux ISDN subsystem, initialization for non-modularized drivers.
+ *
+ * Copyright 1994,95,96 by Fritz Elfert (fritz@wuemaus.franken.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * $Log: isdn_cards.h,v $
+ * Revision 1.1 1996/04/20 16:04:03 fritz
+ * Initial revision
+ *
+ */
extern void isdn_cards_init(void);
-/* $Id: isdn_common.c,v 1.4 1996/02/11 02:33:26 fritz Exp fritz $
+/* $Id: isdn_common.c,v 1.5 1996/04/20 16:19:07 fritz Exp $
*
* Linux ISDN subsystem, common used functions (linklevel).
*
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* $Log: isdn_common.c,v $
+ * Revision 1.5 1996/04/20 16:19:07 fritz
+ * Changed slow timer handlers to increase accuracy.
+ * Added statistic information for usage by xisdnload.
+ * Fixed behaviour of isdnctrl-device on non-blocked io.
+ * Fixed all io to go through generic writebuf-function without
+ * bypassing. Same for incoming data.
+ * Fixed bug: Last channel had been unusable.
+ * Fixed kfree of tty xmit_buf on ppp initialization failure.
+ *
* Revision 1.4 1996/02/11 02:33:26 fritz
* Fixed bug in main timer-dispatcher.
* Bugfix: Lot of tty-callbacks got called regardless of the events already
isdn_dev *dev = (isdn_dev *) 0;
static int has_exported = 0;
-static char *isdn_revision = "$Revision: 1.4 $";
+static char *isdn_revision = "$Revision: 1.5 $";
extern char *isdn_net_revision;
extern char *isdn_tty_revision;
isdn_tty_modem_xmit();
}
if (tf & ISDN_TIMER_SLOW) {
- if (++isdn_timer_cnt1 > ISDN_TIMER_02SEC) {
+ if (++isdn_timer_cnt1 >= ISDN_TIMER_02SEC) {
isdn_timer_cnt1 = 0;
if (tf & ISDN_TIMER_NETDIAL)
isdn_net_dial();
}
- if (++isdn_timer_cnt2 > ISDN_TIMER_1SEC) {
+ if (++isdn_timer_cnt2 >= ISDN_TIMER_1SEC) {
isdn_timer_cnt2 = 0;
if (tf & ISDN_TIMER_NETHANGUP)
isdn_net_autohup();
return;
if ((i = isdn_dc2minor(di,channel))==-1)
return;
+ /* Update statistics */
+ dev->ibytes[i] += len;
/* First, try to deliver data to network-device */
if (isdn_net_receive_callback(i, buf, len))
return;
break;
case 2: /* For calling back, first reject incoming call ... */
case 3: /* Interface found, but down, reject call actively */
+ printk(KERN_INFO "isdn: Rejecting Call\n");
cmd.driver = di;
cmd.arg = c->arg;
cmd.command = ISDN_CMD_HANGUP;
dev->drv[di]->interface->command(&cmd);
- if (r == 2)
- /* ... then start dialing. */
- isdn_net_dial();
- break;
+ if (r == 3)
+ break;
+ /* Fall through */
+ case 4:
+ /* ... then start callback. */
+ isdn_net_dial();
}
return 0;
break;
if (minor == ISDN_MINOR_STATUS) {
char *p;
- if (!file->private_data)
+ if (!file->private_data) {
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
interruptible_sleep_on(&(dev->info_waitq));
+ }
save_flags(flags);
p = isdn_statstr();
restore_flags(flags);
drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL);
if (drvidx < 0)
return -ENODEV;
- if (!dev->drv[drvidx]->stavail)
+ if (!dev->drv[drvidx]->stavail) {
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
interruptible_sleep_on(&(dev->drv[drvidx]->st_waitq));
+ }
if (dev->drv[drvidx]->interface->readstat)
len = dev->drv[drvidx]->interface->
readstat(buf, MIN(count, dev->drv[drvidx]->stavail), 1);
len = 0;
save_flags(flags);
cli();
- dev->drv[drvidx]->stavail -= len;
+ if (len)
+ dev->drv[drvidx]->stavail -= len;
+ else
+ dev->drv[drvidx]->stavail = 0;
restore_flags(flags);
file->f_pos += len;
return len;
if (!dev->drv[drvidx]->running)
return -ENODEV;
chidx = isdn_minor2chan(minor);
- while (dev->drv[drvidx]->interface->writebuf(drvidx, chidx, buf, count, 1) != count)
+ dev->obytes[minor] += count;
+ while (isdn_writebuf_stub(drvidx, chidx, buf, count, 1) != count)
interruptible_sleep_on(&dev->drv[drvidx]->snd_waitq[chidx]);
return count;
}
isdn_net_ioctl_phone phone;
isdn_net_ioctl_cfg cfg;
- if (minor == ISDN_MINOR_STATUS)
- return -EPERM;
+ if (minor == ISDN_MINOR_STATUS) {
+ switch (cmd) {
+ case IIOCGETCPS:
+ if (arg) {
+ ulong *p = (ulong *)arg;
+ int i;
+ if ((ret = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(ulong)*ISDN_MAX_CHANNELS*2)))
+ return ret;
+ for (i = 0;i<ISDN_MAX_CHANNELS;i++) {
+ put_fs_long(dev->ibytes[i],p++);
+ put_fs_long(dev->obytes[i],p++);
+ }
+ return 0;
+ } else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
if (!dev->drivers)
return -ENODEV;
if (minor < ISDN_MINOR_CTRL) {
if (minor == ISDN_MINOR_STATUS) {
infostruct *p;
-
if ((p = (infostruct *) kmalloc(sizeof(infostruct), GFP_KERNEL))) {
MOD_INC_USE_COUNT;
p->next = (char *) dev->infochain;
int drvidx;
isdn_ctrl c;
- if (!dev->channels)
- return;
- MOD_DEC_USE_COUNT;
if (minor == ISDN_MINOR_STATUS) {
infostruct *p = dev->infochain;
infostruct *q = NULL;
+ MOD_DEC_USE_COUNT;
while (p) {
if (p->private == (char *) &(filep->private_data)) {
if (q)
p = (infostruct *) (p->next);
}
printk(KERN_WARNING "isdn: No private data while closing isdnctrl\n");
+ return;
}
+ if (!dev->channels)
+ return;
+ MOD_DEC_USE_COUNT;
if (minor < ISDN_MINOR_CTRL) {
drvidx = isdn_minor2drv(minor);
if (drvidx < 0)
return;
}
#ifdef CONFIG_ISDN_PPP
- if (minor <= ISDN_MINOR_PPPMAX) {
+ if (minor <= ISDN_MINOR_PPPMAX)
isdn_ppp_release(minor - ISDN_MINOR_PPP, filep);
- }
#endif
}
(dev->chanmap[i] == ch)) {
dev->usage[i] &= (ISDN_USAGE_NONE | ISDN_USAGE_EXCLUSIVE);
strcpy(dev->num[i], "???");
+ dev->ibytes[i] = 0;
+ dev->obytes[i] = 0;
isdn_info_update();
restore_flags(flags);
return;
void isdn_receive_skb_callback(int drvidx, int chan, struct sk_buff *skb)
{
- int i;
+ int i, len;
if (dev->global_flags & ISDN_GLOBAL_STOPPED)
return;
if ((i = isdn_dc2minor(drvidx,chan))==-1)
return;
+ len = skb->len;
if (isdn_net_rcv_skb(i, skb) == 0) {
isdn_receive_callback(drvidx, chan, skb->data, skb->len);
skb->free = 1;
kfree_skb(skb, FREE_READ);
- }
+ } else
+ /* Update statistics */
+ dev->ibytes[i] += len;
}
/*
int isdn_writebuf_stub(int drvidx, int chan, const u_char *buf, int len,
int user)
{
- struct sk_buff * skb;
-
- skb = alloc_skb(dev->drv[drvidx]->interface->hl_hdrlen + len, GFP_ATOMIC);
- if (skb == NULL)
- return 0;
+ if (dev->drv[drvidx]->interface->writebuf)
+ return dev->drv[drvidx]->interface->writebuf(drvidx, chan, buf,
+ len, user);
+ else {
+ struct sk_buff * skb;
+
+ skb = alloc_skb(dev->drv[drvidx]->interface->hl_hdrlen + len, GFP_ATOMIC);
+ if (skb == NULL)
+ return 0;
- skb_reserve(skb, dev->drv[drvidx]->interface->hl_hdrlen);
- skb->free = 1;
+ skb_reserve(skb, dev->drv[drvidx]->interface->hl_hdrlen);
+ skb->free = 1;
- if (user)
- memcpy_fromfs(skb_put(skb, len), buf, len);
- else
- memcpy(skb_put(skb, len), buf, len);
+ if (user)
+ memcpy_fromfs(skb_put(skb, len), buf, len);
+ else
+ memcpy(skb_put(skb, len), buf, len);
- return dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, skb);
+ return dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, skb);
+ }
}
-
+
/*
- * Low-level-driver registration
+ * writebuf_skb replacement for NON SKB_ABLE drivers
+ * If lowlevel-device does not support supports skbufs, use
+ * standard send-routine, else sind directly.
+ *
+ * Return: length of data on success, -ERRcode on failure.
*/
+int isdn_writebuf_skb_stub(int drvidx, int chan, struct sk_buff * skb)
+{
+ int ret;
+
+ if (dev->drv[drvidx]->interface->writebuf_skb)
+ ret = dev->drv[drvidx]->interface->
+ writebuf_skb(drvidx, chan, skb);
+ else {
+ if ((ret = dev->drv[drvidx]->interface->
+ writebuf(drvidx,chan,skb->data,skb->len,0))==skb->len)
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ return ret;
+}
+
+/*
+ * Low-level-driver registration
+ */
int register_isdn(isdn_if * i)
{
return 0;
}
n = i->channels;
- if (dev->channels + n >= ISDN_MAX_CHANNELS) {
+ if (dev->channels + n > ISDN_MAX_CHANNELS) {
printk(KERN_WARNING "register_isdn: Max. %d channels supported\n",
ISDN_MAX_CHANNELS);
return 0;
}
+ if ((!i->writebuf_skb) && (!i->writebuf)) {
+ printk(KERN_WARNING "register_isdn: No write routine given.\n");
+ return 0;
+ }
if (!(d = (driver *) kmalloc(sizeof(driver), GFP_KERNEL))) {
printk(KERN_WARNING "register_isdn: Could not alloc driver-struct\n");
return 0;
if (!dev->drv[drvidx])
break;
i->channels = drvidx;
-
- if (i->writebuf_skb && (!i->writebuf))
- i->writebuf = isdn_writebuf_stub;
i->rcvcallb_skb = isdn_receive_skb_callback;
i->rcvcallb = isdn_receive_callback;
return -EIO;
}
memset((char *) dev, 0, sizeof(isdn_dev));
- for (i = 0; i < ISDN_MAX_DRIVERS; i++)
+ for (i = 0; i < ISDN_MAX_CHANNELS; i++)
dev->drvmap[i] = -1;
for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
dev->chanmap[i] = -1;
tty_unregister_driver(&dev->mdm.tty_modem);
tty_unregister_driver(&dev->mdm.cua_modem);
for (i = 0; i < ISDN_MAX_CHANNELS; i++)
- kfree(dev->mdm.info[i].xmit_buf);
+ kfree(dev->mdm.info[i].xmit_buf - 4);
unregister_chrdev(ISDN_MAJOR, "isdn");
kfree(dev);
return -EIO;
-/* $Id: isdn_common.h,v 1.1 1996/01/10 21:37:19 fritz Exp fritz $
+/* $Id: isdn_common.h,v 1.2 1996/04/20 16:20:40 fritz Exp $
*
* header for Linux ISDN subsystem, common used functions and debugging-switches (linklevel).
*
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* $Log: isdn_common.h,v $
+ * Revision 1.2 1996/04/20 16:20:40 fritz
+ * Misc. typos.
+ *
* Revision 1.1 1996/01/10 21:37:19 fritz
* Initial revision
*
extern void isdn_timer_ctrl(int tf, int onoff);
extern void isdn_unexclusive_channel(int di, int ch);
extern int isdn_getnum(char **);
-extern int isdn_readbchan (int di, int channel, u_char *buf,
- u_char *fp, int len, int user);
-extern int isdn_get_free_channel(int usage, int l2_proto, int l3_proto,
- int pre_dev, int pre_chan);
+extern int isdn_readbchan (int, int, u_char *, u_char *, int, int);
+extern int isdn_get_free_channel(int, int, int, int, int);
+extern int isdn_writebuf_stub(int, int, const u_char *, int, int);
+extern int isdn_writebuf_skb_stub(int, int, struct sk_buff *);
#if defined(ISDN_DEBUG_NET_DUMP) || defined(ISDN_DEBUG_MODEM_DUMP)
extern void isdn_dumppkt(char *, u_char *, int, int);
#endif
-/* $Id: isdn_net.c,v 1.4 1996/02/19 15:23:38 fritz Exp fritz $
+/* $Id: isdn_net.c,v 1.5 1996/04/20 16:28:38 fritz Exp $
*
* Linux ISDN subsystem, network interfaces and related functions (linklevel).
*
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* $Log: isdn_net.c,v $
+ * Revision 1.5 1996/04/20 16:28:38 fritz
+ * Made more parameters of the dial statemachine user-configurable and
+ * added hangup after dial for more reliability using callback.
+ * Changed all io going through generic routines in isdn_common.c
+ * Added missing call to dev_free_skb on failed dialing.
+ * Added uihdlc encapsulation.
+ * Fixed isdn_net_setcfg not to destroy interface-flags anymore.
+ * Misc. typos.
+ *
* Revision 1.4 1996/02/19 15:23:38 fritz
* Bugfix: Sync-PPP packets got compressed twice, when resent due to
* send-queue-full reject.
static int isdn_net_start_xmit(struct sk_buff *, struct device *);
static int isdn_net_xmit(struct device *, isdn_net_local *, struct sk_buff *);
-char *isdn_net_revision = "$Revision: 1.4 $";
+char *isdn_net_revision = "$Revision: 1.5 $";
/*
* Code for raw-networking over ISDN
return 1;
case ISDN_STAT_DCONN:
/* D-Channel is up */
- if (lp->dialstate == 4 || lp->dialstate == 7
- || lp->dialstate == 8) {
- lp->dialstate++;
- return 1;
+ switch (lp->dialstate) {
+ case 4:
+ case 7:
+ case 8:
+ lp->dialstate++;
+ return 1;
+ case 12:
+ lp->dialstate = 5;
+ return 1;
}
break;
case ISDN_STAT_DHUP:
break;
case ISDN_STAT_BCONN:
/* B-Channel is up */
- if (lp->dialstate >= 5 && lp->dialstate <= 10) {
- if (lp->dialstate <= 6) {
- dev->usage[idx] |= ISDN_USAGE_OUTGOING;
- isdn_info_update();
- } else
- dev->rx_netdev[idx] = p;
- lp->dialstate = 0;
- isdn_timer_ctrl(ISDN_TIMER_NETHANGUP,1);
- printk(KERN_INFO "isdn_net: %s connected\n", lp->name);
- /* If first Chargeinfo comes before B-Channel connect,
- * we correct the timestamp here.
- */
- lp->chargetime = jiffies;
- /* Immediately send first skb to speed up arp */
- if (lp->first_skb) {
- if (!(isdn_net_xmit(&p->dev,lp,lp->first_skb)))
- lp->first_skb = NULL;
- }
- return 1;
+ switch (lp->dialstate) {
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ case 9:
+ case 10:
+ case 12:
+ if (lp->dialstate <= 6) {
+ dev->usage[idx] |= ISDN_USAGE_OUTGOING;
+ isdn_info_update();
+ } else
+ dev->rx_netdev[idx] = p;
+ lp->dialstate = 0;
+ isdn_timer_ctrl(ISDN_TIMER_NETHANGUP,1);
+ printk(KERN_INFO "isdn_net: %s connected\n", lp->name);
+ /* If first Chargeinfo comes before B-Channel connect,
+ * we correct the timestamp here.
+ */
+ lp->chargetime = jiffies;
+ /* Immediately send first skb to speed up arp */
+ if (lp->first_skb) {
+ if (!(isdn_net_xmit(&p->dev,lp,lp->first_skb)))
+ lp->first_skb = NULL;
+ }
+ return 1;
}
break;
case ISDN_STAT_NODCH:
isdn_ctrl cmd;
while (p) {
+#ifdef ISDN_DEBUG_NET_DIAL
+ if (p->local.dialstate)
+ printk(KERN_DEBUG "%s: dialstate=%d\n", p->local.name,p->local.dialstate);
+#endif
switch (p->local.dialstate) {
case 0:
/* Nothing to do for this interface */
p->local.huptimer = 0;
p->local.outgoing = 1;
p->local.hupflags |= 1;
+ p->local.hupflags &= ~2;
if (!strcmp(p->local.dial->num, "LEASED")) {
p->local.dialstate = 4;
printk(KERN_INFO "%s: Open leased line ...\n", p->local.name);
dev->drv[p->local.isdn_device]->interface->command(&cmd);
}
anymore = 1;
- p->local.dialstate++;
+ p->local.dialstate =
+ (p->local.cbdelay &&
+ (p->local.flags & ISDN_NET_CBOUT))?12:4;
break;
case 4:
- /* Wait for D-Channel-connect or incoming call, if passive
- * callback configured. If timeout and max retries not
+ /* Wait for D-Channel-connect.
+ * If timeout and max retries not
* reached, switch back to state 3.
*/
if (p->local.dtimer++ > ISDN_TIMER_DTIMEOUT10)
if (p->local.dialretry < p->local.dialmax) {
p->local.dialstate = 3;
} else
- isdn_net_hangup(&p->dev);
+ isdn_net_hangup(&p->dev);
anymore = 1;
break;
case 5:
p->local.dialstate++;
break;
case 8:
- case 10:
+ case 10:
/* Wait for B- or D-channel-connect */
#ifdef ISDN_DEBUG_NET_DIAL
printk(KERN_DEBUG "dialtimer4: %d\n", p->local.dtimer);
else
anymore = 1;
break;
+ case 11:
+ /* Callback Delay */
+ if (p->local.dtimer++ > p->local.cbdelay)
+ p->local.dialstate = 1;
+ anymore = 1;
+ break;
+ case 12:
+ /* Remote does callback. Hangup after cbdelay, then wait for incoming
+ * call (in state 4).
+ */
+ if (p->local.dtimer++ > p->local.cbdelay) {
+ printk(KERN_INFO "%s: hangup waiting for callback ...\n", p->local.name);
+ p->local.dtimer = 0;
+ p->local.dialstate = 4;
+ cmd.driver = p->local.isdn_device;
+ cmd.command = ISDN_CMD_HANGUP;
+ cmd.arg = p->local.isdn_channel;
+ (void) dev->drv[cmd.driver]->interface->command(&cmd);
+ isdn_all_eaz(p->local.isdn_device, p->local.isdn_channel);
+ }
+ anymore = 1;
+ break;
default:
printk(KERN_WARNING "isdn_net: Illegal dialstate %d for device %s\n",
p->local.dialstate, p->local.name);
isdn_timer_ctrl(ISDN_TIMER_NETDIAL, anymore);
}
-/*
- * Send-data-helpfunction for net-interfaces
- */
-int
-isdn_net_send(u_char * buf, int di, int ch, int len)
-{
- int l;
-
- if ((l = dev->drv[di]->interface->writebuf(di, ch, buf, len, 0)) == len)
- return 1;
- /* Device driver queue full (or packet > 4000 bytes, should never
- * happen)
- */
- if (l == -EINVAL)
- printk(KERN_ERR "isdn_net: Huh, sending pkt too big!\n");
- return 0;
-}
-
/*
* Perform hangup for a net-interface.
*/
save_flags(flags);
cli();
+ if (lp->first_skb) {
+ dev_kfree_skb(lp->first_skb,FREE_WRITE);
+ lp->first_skb = NULL;
+ }
if (lp->flags & ISDN_NET_CONNECTED) {
printk(KERN_INFO "isdn_net: local hangup %s\n", lp->name);
lp->dialstate = 0;
len = 4;
#ifdef CONFIG_ISDN_MPP
if (lp->ppp_minor!=-1) {
- if (ippp_table[lp->ppp_minor].mpppcfg &
+ if (ippp_table[lp->ppp_minor]->mpppcfg &
SC_MP_PROT) {
- if (ippp_table[lp->ppp_minor].mpppcfg &
+ if (ippp_table[lp->ppp_minor]->mpppcfg &
SC_OUT_SHORT_SEQ)
len = 7;
else
int ret;
lp->transcount += skb->len;
- if (dev->drv[lp->isdn_device]->interface->writebuf_skb)
- ret = dev->drv[lp->isdn_device]->interface->
- writebuf_skb(lp->isdn_device, lp->isdn_channel, skb);
- else {
- if ((ret = isdn_net_send(skb->data, lp->isdn_device,
- lp->isdn_channel, skb->len)))
- dev_kfree_skb(skb, FREE_WRITE);
- }
-
- if (ret)
+ ret = isdn_writebuf_skb_stub(lp->isdn_device, lp->isdn_channel, skb);
+ if (ret == skb->len)
clear_bit(0, (void *)&(ndev->tbusy));
return (!ret);
}
/* remember first skb to speed up arp
* when using encap ETHER
*/
+ if (lp->first_skb) {
+ printk(KERN_WARNING "isdn_net_start_xmit: First skb already set!\n");
+ dev_kfree_skb(lp->first_skb,FREE_WRITE);
+ lp->first_skb = NULL;
+ }
lp->first_skb = skb;
/* Initiate dialing */
isdn_net_dial();
isdn_dumppkt("R:", skb->data, skb->len, 40);
#endif
switch (lp->p_encap) {
- case ISDN_NET_ENCAP_ETHER:
- /* Ethernet over ISDN */
- skb->protocol = isdn_net_type_trans(skb,ndev);
- break;
- case ISDN_NET_ENCAP_RAWIP:
- /* RAW-IP without MAC-Header */
- skb->protocol = htons(ETH_P_IP);
- break;
- case ISDN_NET_ENCAP_CISCOHDLC:
- /* CISCO-HDLC IP with type field and fake I-frame-header */
- skb_pull(skb, 2);
- /* Fall through */
- case ISDN_NET_ENCAP_IPTYP:
- /* IP with type field */
- skb->protocol = *(unsigned short *)&(skb->data[0]);
- skb_pull(skb, 2);
- if (*(unsigned short *)skb->data == 0xFFFF)
- skb->protocol = htons(ETH_P_802_3);
- break;
+ case ISDN_NET_ENCAP_ETHER:
+ /* Ethernet over ISDN */
+ skb->protocol = isdn_net_type_trans(skb,ndev);
+ break;
+ case ISDN_NET_ENCAP_UIHDLC:
+ /* HDLC with UI-frame (for ispa with -h1 option) */
+ skb_pull(skb,2);
+ /* Fall through */
+ case ISDN_NET_ENCAP_RAWIP:
+ /* RAW-IP without MAC-Header */
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case ISDN_NET_ENCAP_CISCOHDLC:
+ /* CISCO-HDLC IP with type field and fake I-frame-header */
+ skb_pull(skb, 2);
+ /* Fall through */
+ case ISDN_NET_ENCAP_IPTYP:
+ /* IP with type field */
+ skb->protocol = *(unsigned short *)&(skb->data[0]);
+ skb_pull(skb, 2);
+ if (*(unsigned short *)skb->data == 0xFFFF)
+ skb->protocol = htons(ETH_P_802_3);
+ break;
#ifdef CONFIG_ISDN_PPP
- case ISDN_NET_ENCAP_SYNCPPP:
- isdn_ppp_receive(lp->netdev, olp, skb);
- return;
+ case ISDN_NET_ENCAP_SYNCPPP:
+ isdn_ppp_receive(lp->netdev, olp, skb);
+ return;
#endif
- default:
- printk(KERN_WARNING "%s: unknown encapsulation, dropping\n",
- lp->name);
- kfree_skb(skb,FREE_READ);
- return;
+ default:
+ printk(KERN_WARNING "%s: unknown encapsulation, dropping\n",
+ lp->name);
+ kfree_skb(skb,FREE_READ);
+ return;
}
netif_rx(skb);
return;
*((ushort*) skb_push(skb, 2)) = htons(type);
len = 2;
break;
+ case ISDN_NET_ENCAP_UIHDLC:
+ /* HDLC with UI-Frames (for ispa with -h1 option) */
+ *((ushort*) skb_push(skb, 2)) = htons(0x0103);
+ len = 2;
+ break;
case ISDN_NET_ENCAP_CISCOHDLC:
skb_push(skb, 4);
skb->data[0] = 0x0f;
len = 4;
#ifdef CONFIG_ISDN_MPP
if (lp->ppp_minor!=-1) {
- if (ippp_table[lp->ppp_minor].mpppcfg &
+ if (ippp_table[lp->ppp_minor]->mpppcfg &
SC_MP_PROT) {
- if (ippp_table[lp->ppp_minor].mpppcfg &
+ if (ippp_table[lp->ppp_minor]->mpppcfg &
SC_OUT_SHORT_SEQ)
len = 7;
else
*
* Return-Value: 0 = No appropriate interface for this call.
* 1 = Call accepted
- * 2 = Do callback
+ * 2 = Reject call, wait cbdelay, then call back
+ * 3 = Reject call
+ * 4 = Wait cbdelay, then call back
*/
int
isdn_net_find_icall(int di, int ch, int idx, char *num)
printk(KERN_DEBUG "n_fi: if='%s', l.msn=%s, l.flags=%d, l.dstate=%d\n",
p->local.name, p->local.msn, p->local.flags, p->local.dialstate);
#endif
- if ((!strcmp(isdn_map_eaz2msn(p->local.msn, di), eaz)) && /* EAZ is matching */
- (((!(p->local.flags & ISDN_NET_CONNECTED)) && /* but not connected */
- (USG_NONE(dev->usage[idx]))) || /* and ch. unused or */
- (((p->local.dialstate == 4) && /* if dialing */
- (!(p->local.flags & ISDN_NET_CALLBACK))) /* but no callback */
+ if ((!strcmp(isdn_map_eaz2msn(p->local.msn, di), eaz)) && /* EAZ is matching */
+ (((!(p->local.flags & ISDN_NET_CONNECTED)) && /* but not connected */
+ (USG_NONE(dev->usage[idx]))) || /* and ch. unused or */
+ ((((p->local.dialstate == 4) || (p->local.dialstate == 12)) && /* if dialing */
+ (!(p->local.flags & ISDN_NET_CALLBACK))) /* but no callback */
))) {
#ifdef ISDN_DEBUG_NET_ICALL
printk(KERN_DEBUG "n_fi: match1, pdev=%d pch=%d\n",
continue;
}
}
- }
+ } /* if (dev->usage[idx] & ISDN_USAGE_EXCLUSIVE) */
#ifdef ISDN_DEBUG_NET_ICALL
printk(KERN_DEBUG "n_fi: match2\n");
#endif
return 0;
}
/* Setup dialstate. */
- lp->dialstate = 1;
+ lp->dtimer = 0;
+ lp->dialstate = 11;
lp->flags |= ISDN_NET_CONNECTED;
/* Connect interface with channel */
isdn_net_bind_channel(lp, chi);
return 0;
}
#endif
- /* Initiate dialing by returning 2 */
+ /* Initiate dialing by returning 2 or 4 */
restore_flags(flags);
- return 2;
+ return (lp->flags & ISDN_NET_CBHUP)?2:4;
} else
printk(KERN_WARNING "isdn_net: %s: No phone number\n", lp->name);
restore_flags(flags);
#endif
/* if this interface is dialing, it does it probably on a different
device, so free this device */
- if (p->local.dialstate == 4)
+ if ((p->local.dialstate == 4) || (p->local.dialstate == 12))
isdn_free_channel(p->local.isdn_device, p->local.isdn_channel,
ISDN_USAGE_NET);
dev->usage[idx] &= ISDN_USAGE_EXCLUSIVE;
p->local.outgoing = 0;
p->local.huptimer = 0;
p->local.hupflags |= 1;
+ p->local.hupflags &= ~2;
#ifdef CONFIG_ISDN_PPP
if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP)
if (isdn_ppp_bind(lp) < 0) {
* Allocate a new network-interface and initialize it's data structures.
*/
char *
- isdn_net_new(char *name, struct device *master)
+isdn_net_new(char *name, struct device *master)
{
isdn_net_dev *netdev;
netdev->local.hupflags = 8; /* Do hangup even on incoming calls */
netdev->local.onhtime = 10; /* Default hangup-time for saving costs
of those who forget configuring this */
- /* The following should be configurable via ioctl */
netdev->local.dialmax = 1;
+ netdev->local.flags = ISDN_NET_CBHUP; /* Hangup before Callback */
+ netdev->local.cbdelay = 25; /* Wait 5 secs before Callback */
/* Put into to netdev-chain */
netdev->next = (void *) dev->netdev;
dev->netdev = netdev;
}
char *
- isdn_net_newslave(char *parm)
+isdn_net_newslave(char *parm)
{
char *p = strchr(parm, ',');
isdn_net_dev *n;
printk(KERN_WARNING "isdn_net: No driver with selected features\n");
return -ENODEV;
}
- if ((p->local.p_encap != cfg->p_encap) &&
- ((p->local.p_encap == ISDN_NET_ENCAP_RAWIP) ||
- (cfg->p_encap == ISDN_NET_ENCAP_RAWIP) ))
+ if (p->local.p_encap != cfg->p_encap)
if (p->dev.start) {
printk(KERN_WARNING
"%s: cannot change encap when if is up\n",
p->local.charge = cfg->charge;
p->local.l2_proto = cfg->l2_proto;
p->local.l3_proto = cfg->l3_proto;
+ p->local.cbdelay = cfg->cbdelay;
+ p->local.dialmax = cfg->dialmax;
p->local.slavedelay = cfg->slavedelay * HZ;
- p->local.p_encap = cfg->p_encap;
if (cfg->secure)
p->local.flags |= ISDN_NET_SECURE;
else
p->local.flags &= ~ISDN_NET_SECURE;
- if (cfg->callback)
- p->local.flags |= ISDN_NET_CALLBACK;
+ if (cfg->cbhup)
+ p->local.flags |= ISDN_NET_CBHUP;
else
- p->local.flags &= ~ISDN_NET_CALLBACK;
+ p->local.flags &= ~ISDN_NET_CBHUP;
+ switch (cfg->callback) {
+ case 0:
+ p->local.flags &= ~(ISDN_NET_CALLBACK&ISDN_NET_CBOUT);
+ break;
+ case 1:
+ p->local.flags |= ISDN_NET_CALLBACK;
+ p->local.flags &= ~ISDN_NET_CBOUT;
+ break;
+ case 2:
+ p->local.flags |= ISDN_NET_CBOUT;
+ p->local.flags &= ~ISDN_NET_CALLBACK;
+ break;
+ }
if (cfg->chargehup)
p->local.hupflags |= 4;
else
p->local.hupflags |= 8;
else
p->local.hupflags &= ~8;
- if (cfg->p_encap == ISDN_NET_ENCAP_RAWIP) {
- p->dev.hard_header = NULL;
- p->dev.header_cache_bind = NULL;
- p->dev.header_cache_update = NULL;
- p->dev.flags = IFF_NOARP;
- } else {
- p->dev.hard_header = isdn_net_header;
- if (cfg->p_encap == ISDN_NET_ENCAP_ETHER) {
- p->dev.header_cache_bind = p->local.org_hcb;
- p->dev.header_cache_update = p->local.org_hcu;
- p->dev.flags = IFF_BROADCAST | IFF_MULTICAST;
- } else {
+ if (cfg->p_encap != p->local.p_encap) {
+ if (cfg->p_encap == ISDN_NET_ENCAP_RAWIP) {
+ p->dev.hard_header = NULL;
p->dev.header_cache_bind = NULL;
p->dev.header_cache_update = NULL;
p->dev.flags = IFF_NOARP;
+ } else {
+ p->dev.hard_header = isdn_net_header;
+ if (cfg->p_encap == ISDN_NET_ENCAP_ETHER) {
+ p->dev.header_cache_bind = p->local.org_hcb;
+ p->dev.header_cache_update = p->local.org_hcu;
+ p->dev.flags = IFF_BROADCAST | IFF_MULTICAST;
+ } else {
+ p->dev.header_cache_bind = NULL;
+ p->dev.header_cache_update = NULL;
+ p->dev.flags = IFF_NOARP;
+ }
}
}
+ p->local.p_encap = cfg->p_encap;
return 0;
}
return -ENODEV;
cfg->p_encap = p->local.p_encap;
cfg->secure = (p->local.flags & ISDN_NET_SECURE) ? 1 : 0;
cfg->callback = (p->local.flags & ISDN_NET_CALLBACK) ? 1 : 0;
+ cfg->callback = (p->local.flags & ISDN_NET_CBOUT) ? 2 : 0;
+ cfg->cbhup = (p->local.flags & ISDN_NET_CBHUP) ? 1 : 0;
cfg->chargehup = (p->local.hupflags & 4) ? 1 : 0;
cfg->ihup = (p->local.hupflags & 8) ? 1 : 0;
+ cfg->cbdelay = p->local.cbdelay;
+ cfg->dialmax = p->local.dialmax;
cfg->slavedelay = p->local.slavedelay / HZ;
if (p->local.slave)
strcpy(cfg->slave, ((isdn_net_local *) p->local.slave->priv)->name);
n = n->next;
more = 1;
}
+ put_fs_byte(0,phones);
restore_flags(flags);
return count;
}
-/* $Id: isdn_net.h,v 1.1 1996/02/11 02:35:13 fritz Exp fritz $
+/* $Id: isdn_net.h,v 1.2 1996/04/20 16:29:43 fritz Exp $
*
* header for Linux ISDN subsystem, network related functions (linklevel).
*
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* $Log: isdn_net.h,v $
+ * Revision 1.2 1996/04/20 16:29:43 fritz
+ * Misc. typos
+ *
* Revision 1.1 1996/02/11 02:35:13 fritz
* Initial revision
*
-/* $Id: isdn_ppp.c,v 1.4 1996/02/19 15:25:50 fritz Exp fritz $
+/* $Id: isdn_ppp.c,v 1.5 1996/04/20 16:32:32 fritz Exp $
*
* Linux ISDN subsystem, functions for synchronous PPP (linklevel).
*
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* $Log: isdn_ppp.c,v $
+ * Revision 1.5 1996/04/20 16:32:32 fritz
+ * Changed ippp_table to an array of pointers, allocating each part
+ * separately.
+ *
* Revision 1.4 1996/02/19 15:25:50 fritz
* Bugfix: Sync-PPP packets got compressed twice, when resent due to
* send-queue-full reject.
int BEbyte, int *sqno, int min_sqno);
#endif
-char *isdn_ppp_revision = "$Revision: 1.4 $";
-struct ippp_struct *ippp_table = (struct ippp_struct *) 0;
+char *isdn_ppp_revision = "$Revision: 1.5 $";
+struct ippp_struct *ippp_table[ISDN_MAX_CHANNELS];
extern int isdn_net_force_dial_lp(isdn_net_local *);
isdn_ppp_hangup(lp->ppp_minor);
#if 0
- printk(KERN_DEBUG "isdn_ppp_free %d %lx %lx\n", lp->ppp_minor, (long) lp,(long) ippp_table[lp->ppp_minor].lp);
+ printk(KERN_DEBUG "isdn_ppp_free %d %lx %lx\n", lp->ppp_minor, (long) lp,(long) ippp_table[lp->ppp_minor]->lp);
#endif
- ippp_table[lp->ppp_minor].lp = NULL;
+ ippp_table[lp->ppp_minor]->lp = NULL;
return 0;
}
* search a free device
*/
for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- if (ippp_table[i].state == IPPP_OPEN) { /* OPEN, but not connected! */
+ if (ippp_table[i]->state == IPPP_OPEN) { /* OPEN, but not connected! */
#if 0
printk(KERN_DEBUG "find_minor, %d lp: %08lx\n", i, (long) lp);
#endif
return -1;
}
lp->ppp_minor = i;
- ippp_table[lp->ppp_minor].lp = lp;
+ ippp_table[lp->ppp_minor]->lp = lp;
name = lp->name;
unit = isdn_ppp_if_get_unit(&name); /* get unit number from interface name .. ugly! */
- ippp_table[lp->ppp_minor].unit = unit;
+ ippp_table[lp->ppp_minor]->unit = unit;
- ippp_table[lp->ppp_minor].state = IPPP_OPEN | IPPP_CONNECT | IPPP_NOBLOCK;
+ ippp_table[lp->ppp_minor]->state = IPPP_OPEN | IPPP_CONNECT | IPPP_NOBLOCK;
restore_flags(flags);
/*
* kick the ipppd on the new device
*/
- if (ippp_table[lp->ppp_minor].wq)
- wake_up_interruptible(&ippp_table[lp->ppp_minor].wq);
+ if (ippp_table[lp->ppp_minor]->wq)
+ wake_up_interruptible(&ippp_table[lp->ppp_minor]->wq);
return lp->ppp_minor;
}
if (minor < 0 || minor >= ISDN_MAX_CHANNELS)
return 0;
- if (ippp_table[minor].state && ippp_table[minor].wq)
- wake_up_interruptible(&ippp_table[minor].wq);
+ if (ippp_table[minor]->state && ippp_table[minor]->wq)
+ wake_up_interruptible(&ippp_table[minor]->wq);
- ippp_table[minor].state = IPPP_CLOSEWAIT;
+ ippp_table[minor]->state = IPPP_CLOSEWAIT;
return 1;
}
int isdn_ppp_open(int minor, struct file *file)
{
#if 0
- printk(KERN_DEBUG "ippp, open, minor: %d state: %04x\n", minor,ippp_table[minor].state);
+ printk(KERN_DEBUG "ippp, open, minor: %d state: %04x\n", minor,ippp_table[minor]->state);
#endif
- if (ippp_table[minor].state)
+ if (ippp_table[minor]->state)
return -EBUSY;
- ippp_table[minor].lp = 0;
- ippp_table[minor].mp_seqno = 0; /* MP sequence number */
- ippp_table[minor].pppcfg = 0; /* ppp configuration */
- ippp_table[minor].mpppcfg = 0; /* mppp configuration */
- ippp_table[minor].range = 0x1000000; /* MP: 24 bit range */
- ippp_table[minor].last_link_seqno = -1; /* MP: maybe set to Bundle-MIN, when joining a bundle ?? */
- ippp_table[minor].unit = -1; /* set, when we have our interface */
- ippp_table[minor].mru = 1524; /* MRU, default 1524 */
- ippp_table[minor].maxcid = 16; /* VJ: maxcid */
- ippp_table[minor].tk = current;
- ippp_table[minor].wq = NULL; /* read() wait queue */
- ippp_table[minor].wq1 = NULL; /* select() wait queue */
- ippp_table[minor].first = ippp_table[minor].rq + NUM_RCV_BUFFS - 1; /* receive queue */
- ippp_table[minor].last = ippp_table[minor].rq;
+ ippp_table[minor]->lp = 0;
+ ippp_table[minor]->mp_seqno = 0; /* MP sequence number */
+ ippp_table[minor]->pppcfg = 0; /* ppp configuration */
+ ippp_table[minor]->mpppcfg = 0; /* mppp configuration */
+ ippp_table[minor]->range = 0x1000000; /* MP: 24 bit range */
+ ippp_table[minor]->last_link_seqno = -1; /* MP: maybe set to Bundle-MIN, when joining a bundle ?? */
+ ippp_table[minor]->unit = -1; /* set, when we have our interface */
+ ippp_table[minor]->mru = 1524; /* MRU, default 1524 */
+ ippp_table[minor]->maxcid = 16; /* VJ: maxcid */
+ ippp_table[minor]->tk = current;
+ ippp_table[minor]->wq = NULL; /* read() wait queue */
+ ippp_table[minor]->wq1 = NULL; /* select() wait queue */
+ ippp_table[minor]->first = ippp_table[minor]->rq + NUM_RCV_BUFFS - 1; /* receive queue */
+ ippp_table[minor]->last = ippp_table[minor]->rq;
#ifdef CONFIG_ISDN_PPP_VJ
/*
* VJ header compression init
*/
- ippp_table[minor].cbuf = kmalloc(ippp_table[minor].mru + PPP_HARD_HDR_LEN + 2, GFP_KERNEL);
+ ippp_table[minor]->cbuf = kmalloc(ippp_table[minor]->mru + PPP_HARD_HDR_LEN + 2, GFP_KERNEL);
- if (ippp_table[minor].cbuf == NULL) {
+ if (ippp_table[minor]->cbuf == NULL) {
printk(KERN_DEBUG "ippp: Can't allocate memory buffer for VJ compression.\n");
return -ENOMEM;
}
- ippp_table[minor].slcomp = slhc_init(16, 16); /* not necessary for 2. link in bundle */
+ ippp_table[minor]->slcomp = slhc_init(16, 16); /* not necessary for 2. link in bundle */
#endif
- ippp_table[minor].state = IPPP_OPEN;
+ ippp_table[minor]->state = IPPP_OPEN;
return 0;
}
return;
#if 0
- printk(KERN_DEBUG "ippp: release, minor: %d %lx\n", minor, (long) ippp_table[minor].lp);
+ printk(KERN_DEBUG "ippp: release, minor: %d %lx\n", minor, (long) ippp_table[minor]->lp);
#endif
- if (ippp_table[minor].lp) { /* a lp address says: this link is still up */
+ if (ippp_table[minor]->lp) { /* a lp address says: this link is still up */
isdn_net_dev *p = dev->netdev;
while(p) { /* find interface for our lp; */
- if(&p->local == ippp_table[minor].lp)
+ if(&p->local == ippp_table[minor]->lp)
break;
p = p->next;
}
if(!p) {
printk(KERN_ERR "isdn_ppp_release: Can't find device for net_local\n");
- p = ippp_table[minor].lp->netdev;
+ p = ippp_table[minor]->lp->netdev;
}
- ippp_table[minor].lp->ppp_minor = -1;
+ ippp_table[minor]->lp->ppp_minor = -1;
isdn_net_hangup(&p->dev); /* lp->ppp_minor==-1 => no calling of isdn_ppp_hangup() */
- ippp_table[minor].lp = NULL;
+ ippp_table[minor]->lp = NULL;
}
for (i = 0; i < NUM_RCV_BUFFS; i++) {
- if (ippp_table[minor].rq[i].buf)
- kfree(ippp_table[minor].rq[i].buf);
+ if (ippp_table[minor]->rq[i].buf)
+ kfree(ippp_table[minor]->rq[i].buf);
}
#ifdef CONFIG_ISDN_PPP_VJ
- slhc_free(ippp_table[minor].slcomp);
- kfree(ippp_table[minor].cbuf);
+ slhc_free(ippp_table[minor]->slcomp);
+ kfree(ippp_table[minor]->cbuf);
#endif
- ippp_table[minor].state = 0;
+ ippp_table[minor]->state = 0;
}
static int get_arg(void *b, unsigned long *val)
#if 0
printk(KERN_DEBUG "isdn_ppp_ioctl: minor: %d cmd: %x",minor,cmd);
- printk(KERN_DEBUG " state: %x\n",ippp_table[minor].state);
+ printk(KERN_DEBUG " state: %x\n",ippp_table[minor]->state);
#endif
- if (!(ippp_table[minor].state & IPPP_OPEN))
+ if (!(ippp_table[minor]->state & IPPP_OPEN))
return -EINVAL;
switch (cmd) {
if ((r = get_arg((void *) arg, &val)))
return r;
printk(KERN_DEBUG "iPPP-bundle: minor: %d, slave unit: %d, master unit: %d\n",
- (int) minor, (int) ippp_table[minor].unit, (int) val);
+ (int) minor, (int) ippp_table[minor]->unit, (int) val);
return isdn_ppp_bundle(minor, val);
#else
return -1;
#endif
break;
case PPPIOCGUNIT: /* get ppp/isdn unit number */
- if ((r = set_arg((void *) arg, ippp_table[minor].unit)))
+ if ((r = set_arg((void *) arg, ippp_table[minor]->unit)))
return r;
break;
case PPPIOCGMPFLAGS: /* get configuration flags */
- if ((r = set_arg((void *) arg, ippp_table[minor].mpppcfg)))
+ if ((r = set_arg((void *) arg, ippp_table[minor]->mpppcfg)))
return r;
break;
case PPPIOCSMPFLAGS: /* set configuration flags */
if ((r = get_arg((void *) arg, &val)))
return r;
- ippp_table[minor].mpppcfg = val;
+ ippp_table[minor]->mpppcfg = val;
break;
case PPPIOCGFLAGS: /* get configuration flags */
- if ((r = set_arg((void *) arg, ippp_table[minor].pppcfg)))
+ if ((r = set_arg((void *) arg, ippp_table[minor]->pppcfg)))
return r;
break;
case PPPIOCSFLAGS: /* set configuration flags */
if ((r = get_arg((void *) arg, &val))) {
return r;
}
- if (val & SC_ENABLE_IP && !(ippp_table[minor].pppcfg & SC_ENABLE_IP)) {
- ippp_table[minor].lp->netdev->dev.tbusy = 0;
+ if (val & SC_ENABLE_IP && !(ippp_table[minor]->pppcfg & SC_ENABLE_IP)) {
+ ippp_table[minor]->lp->netdev->dev.tbusy = 0;
mark_bh(NET_BH); /* OK .. we are ready to send the first buffer */
}
- ippp_table[minor].pppcfg = val;
+ ippp_table[minor]->pppcfg = val;
break;
#if 0
case PPPIOCGSTAT: /* read PPP statistic information */
case PPPIOCSMRU: /* set receive unit size for PPP */
if ((r = get_arg((void *) arg, &val)))
return r;
- ippp_table[minor].mru = val;
+ ippp_table[minor]->mru = val;
break;
case PPPIOCSMPMRU:
break;
case PPPIOCSMAXCID: /* set the maximum compression slot id */
if ((r = get_arg((void *) arg, &val)))
return r;
- ippp_table[minor].maxcid = val;
+ ippp_table[minor]->maxcid = val;
break;
case PPPIOCGDEBUG:
break;
printk(KERN_DEBUG "isdn_ppp_select: minor: %d, type: %d \n",minor,type);
#endif
- if (!(ippp_table[minor].state & IPPP_OPEN))
+ if (!(ippp_table[minor]->state & IPPP_OPEN))
return -EINVAL;
switch (type) {
case SEL_IN:
save_flags(flags);
cli();
- bl = ippp_table[minor].last;
- bf = ippp_table[minor].first;
- if (bf->next == bl && !(ippp_table[minor].state & IPPP_NOBLOCK)) {
- select_wait(&ippp_table[minor].wq, st);
+ bl = ippp_table[minor]->last;
+ bf = ippp_table[minor]->first;
+ if (bf->next == bl && !(ippp_table[minor]->state & IPPP_NOBLOCK)) {
+ select_wait(&ippp_table[minor]->wq, st);
restore_flags(flags);
return 0;
}
- ippp_table[minor].state &= ~IPPP_NOBLOCK;
+ ippp_table[minor]->state &= ~IPPP_NOBLOCK;
restore_flags(flags);
return 1;
case SEL_OUT:
/* we're always ready to send .. */
return 1;
case SEL_EX:
- select_wait(&ippp_table[minor].wq1, st);
+ select_wait(&ippp_table[minor]->wq1, st);
return 0;
}
return 1;
printk(KERN_WARNING "ippp: illegal minor.\n");
return 0;
}
- if (!(ippp_table[minor].state & IPPP_CONNECT)) {
+ if (!(ippp_table[minor]->state & IPPP_CONNECT)) {
printk(KERN_DEBUG "ippp: device not activated.\n");
return 0;
}
save_flags(flags);
cli();
- bf = ippp_table[minor].first;
- bl = ippp_table[minor].last;
+ bf = ippp_table[minor]->first;
+ bl = ippp_table[minor]->last;
if (bf == bl) {
printk(KERN_WARNING "ippp: Queue is full; discarding first buffer\n");
bf = bf->next;
kfree(bf->buf);
- ippp_table[minor].first = bf;
+ ippp_table[minor]->first = bf;
}
bl->buf = (char *) kmalloc(len, GFP_ATOMIC);
if (!bl->buf) {
memcpy(bl->buf, buf, len);
- ippp_table[minor].last = bl->next;
+ ippp_table[minor]->last = bl->next;
restore_flags(flags);
- if (ippp_table[minor].wq)
- wake_up_interruptible(&ippp_table[minor].wq);
+ if (ippp_table[minor]->wq)
+ wake_up_interruptible(&ippp_table[minor]->wq);
return len;
}
int isdn_ppp_read(int minor, struct file *file, char *buf, int count)
{
- struct ippp_struct *c = &ippp_table[minor];
+ struct ippp_struct *c = ippp_table[minor];
struct ippp_buf_queue *b;
int r;
unsigned long flags;
- if (!(ippp_table[minor].state & IPPP_OPEN))
+ if (!(ippp_table[minor]->state & IPPP_OPEN))
return 0;
if ((r = verify_area(VERIFY_WRITE, (void *) buf, count)))
{
isdn_net_local *lp;
- if (!(ippp_table[minor].state & IPPP_CONNECT))
+ if (!(ippp_table[minor]->state & IPPP_CONNECT))
return 0;
- lp = ippp_table[minor].lp;
+ lp = ippp_table[minor]->lp;
/* -> push it directly to the lowlevel interface */
if (dev->drv[lp->isdn_device]->running && lp->dialstate == 0 &&
(lp->flags & ISDN_NET_CONNECTED))
- dev->drv[lp->isdn_device]->interface->writebuf(
- lp->isdn_device,lp->isdn_channel, buf, count, 1);
+ isdn_writebuf_stub(lp->isdn_device,lp->isdn_channel,
+ buf, count, 1);
}
return count;
{
int i, j;
- if (!(ippp_table = (struct ippp_struct *)
- kmalloc(sizeof(struct ippp_struct) * ISDN_MAX_CHANNELS, GFP_KERNEL))) {
- printk(KERN_WARNING "isdn_ppp_init: Could not alloc ippp_table\n");
- return -1;
- }
- memset((char *) ippp_table, 0, sizeof(struct ippp_struct) * ISDN_MAX_CHANNELS);
for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
- ippp_table[i].state = 0;
- ippp_table[i].first = ippp_table[i].rq + NUM_RCV_BUFFS - 1;
- ippp_table[i].last = ippp_table[i].rq;
+ if (!(ippp_table[i] = (struct ippp_struct *)
+ kmalloc(sizeof(struct ippp_struct), GFP_KERNEL))) {
+ printk(KERN_WARNING "isdn_ppp_init: Could not alloc ippp_table\n");
+ for (j = 0; j < i; j++)
+ kfree(ippp_table[i]);
+ return -1;
+ }
+ memset((char *) ippp_table[i], 0, sizeof(struct ippp_struct));
+ ippp_table[i]->state = 0;
+ ippp_table[i]->first = ippp_table[i]->rq + NUM_RCV_BUFFS - 1;
+ ippp_table[i]->last = ippp_table[i]->rq;
for (j = 0; j < NUM_RCV_BUFFS; j++) {
- ippp_table[i].rq[j].buf = NULL;
- ippp_table[i].rq[j].last = ippp_table[i].rq +
+ ippp_table[i]->rq[j].buf = NULL;
+ ippp_table[i]->rq[j].last = ippp_table[i]->rq +
(NUM_RCV_BUFFS + j - 1) % NUM_RCV_BUFFS;
- ippp_table[i].rq[j].next = ippp_table[i].rq + (j + 1) % NUM_RCV_BUFFS;
+ ippp_table[i]->rq[j].next = ippp_table[i]->rq + (j + 1) % NUM_RCV_BUFFS;
}
}
return 0;
void isdn_ppp_cleanup(void)
{
- kfree(ippp_table);
+ int i;
+
+ for (i = 0; i < ISDN_MAX_CHANNELS; i++)
+ kfree(ippp_table[i]);
}
/*
if(skb->data[0] == 0xff && skb->data[1] == 0x03)
skb_pull(skb,2);
- else if (ippp_table[lp->ppp_minor].pppcfg & SC_REJ_COMP_AC)
+ else if (ippp_table[lp->ppp_minor]->pppcfg & SC_REJ_COMP_AC)
return; /* discard it silently */
#ifdef CONFIG_ISDN_MPP
- if (!(ippp_table[lp->ppp_minor].mpppcfg & SC_REJ_MP_PROT)) {
+ if (!(ippp_table[lp->ppp_minor]->mpppcfg & SC_REJ_MP_PROT)) {
int proto;
int sqno_end;
if (skb->data[0] & 0x1) {
(int) skb->len, (int) skb->data[0], (int) skb->data[1], (int) skb->data[2],
(int) skb->data[3], (int) skb->data[4], (int) skb->data[5]);
#endif
- if (!(ippp_table[lp->ppp_minor].mpppcfg & SC_IN_SHORT_SEQ)) {
+ if (!(ippp_table[lp->ppp_minor]->mpppcfg & SC_IN_SHORT_SEQ)) {
sqno = ((int) skb->data[1] << 16) + ((int) skb->data[2] << 8) + (int) skb->data[3];
skb_pull(skb,4);
} else {
skb_pull(skb,2);
}
- if ((tseq = ippp_table[lp->ppp_minor].last_link_seqno) >= sqno) {
- int range = ippp_table[lp->ppp_minor].range;
+ if ((tseq = ippp_table[lp->ppp_minor]->last_link_seqno) >= sqno) {
+ int range = ippp_table[lp->ppp_minor]->range;
if (tseq + 1024 < range + sqno) /* redundancy check .. not MP conform */
printk(KERN_WARNING "isdn_ppp_receive, MP, detected overflow with sqno: %d, last: %d !!!\n", sqno, tseq);
else {
sqno += range;
- ippp_table[lp->ppp_minor].last_link_seqno = sqno;
+ ippp_table[lp->ppp_minor]->last_link_seqno = sqno;
}
} else
- ippp_table[lp->ppp_minor].last_link_seqno = sqno;
+ ippp_table[lp->ppp_minor]->last_link_seqno = sqno;
for (min_sqno = 0, lpq = net_dev->queue;;) {
- if (ippp_table[lpq->ppp_minor].last_link_seqno > min_sqno)
- min_sqno = ippp_table[lpq->ppp_minor].last_link_seqno;
+ if (ippp_table[lpq->ppp_minor]->last_link_seqno > min_sqno)
+ min_sqno = ippp_table[lpq->ppp_minor]->last_link_seqno;
lpq = lpq->next;
if (lpq == net_dev->queue)
break;
}
- if (min_sqno >= ippp_table[lpq->ppp_minor].range) { /* OK, every link overflowed */
- int mask = ippp_table[lpq->ppp_minor].range - 1; /* range is a power of 2 */
+ if (min_sqno >= ippp_table[lpq->ppp_minor]->range) { /* OK, every link overflowed */
+ int mask = ippp_table[lpq->ppp_minor]->range - 1; /* range is a power of 2 */
isdn_ppp_cleanup_queue(net_dev, min_sqno);
isdn_ppp_mask_queue(net_dev, mask);
net_dev->ib.next_num &= mask;
}
min_sqno &= mask;
for (lpq = net_dev->queue;;) {
- ippp_table[lpq->ppp_minor].last_link_seqno &= mask;
+ ippp_table[lpq->ppp_minor]->last_link_seqno &= mask;
lpq = lpq->next;
if (lpq == net_dev->queue)
break;
break;
#ifdef CONFIG_ISDN_PPP_VJ
case PPP_VJC_UNCOMP:
- slhc_remember(ippp_table[net_dev->local.ppp_minor].slcomp, skb->data, skb->len);
+ slhc_remember(ippp_table[net_dev->local.ppp_minor]->slcomp, skb->data, skb->len);
#endif
case PPP_IP:
skb->dev = dev;
skb_put(skb,skb_old->len + 40);
memcpy(skb->data, skb_old->data, skb_old->len);
skb->mac.raw = skb->data;
- pkt_len = slhc_uncompress(ippp_table[net_dev->local.ppp_minor].slcomp,
+ pkt_len = slhc_uncompress(ippp_table[net_dev->local.ppp_minor]->slcomp,
skb->data, skb_old->len);
skb_trim(skb, pkt_len);
dev_kfree_skb(skb_old,FREE_WRITE);
isdn_net_dev *nd = ((isdn_net_local *) dev->priv)->netdev;
isdn_net_local *lp = nd->queue;
int proto = PPP_IP; /* 0x21 */
- struct ippp_struct *ipt = ippp_table + lp->ppp_minor;
- struct ippp_struct *ipts = ippp_table + lp->netdev->local.ppp_minor;
+ struct ippp_struct *ipt = ippp_table[lp->ppp_minor];
+ struct ippp_struct *ipts = ippp_table[lp->netdev->local.ppp_minor];
/* If packet is to be resent, it has already been processed and
* therefore it's first bytes are already initialized. In this case
save_flags(flags);
cli();
- nlp = ippp_table[minor].lp;
+ nlp = ippp_table[minor]->lp;
lp = p->queue;
p->ib.bundled = 1;
nlp->netdev = lp->netdev;
- ippp_table[nlp->ppp_minor].unit = ippp_table[lp->ppp_minor].unit;
+ ippp_table[nlp->ppp_minor]->unit = ippp_table[lp->ppp_minor]->unit;
/* maybe also SC_CCP stuff */
- ippp_table[nlp->ppp_minor].pppcfg |= ippp_table[lp->ppp_minor].pppcfg &
+ ippp_table[nlp->ppp_minor]->pppcfg |= ippp_table[lp->ppp_minor]->pppcfg &
(SC_ENABLE_IP | SC_NO_TCP_CCID | SC_REJ_COMP_TCP);
- ippp_table[nlp->ppp_minor].mpppcfg |= ippp_table[lp->ppp_minor].mpppcfg &
+ ippp_table[nlp->ppp_minor]->mpppcfg |= ippp_table[lp->ppp_minor]->mpppcfg &
(SC_MP_PROT | SC_REJ_MP_PROT | SC_OUT_SHORT_SEQ | SC_IN_SHORT_SEQ);
#if 0
- if (ippp_table[nlp->ppp_minor].mpppcfg != ippp_table[lp->ppp_minor].mpppcfg) {
+ if (ippp_table[nlp->ppp_minor]->mpppcfg != ippp_table[lp->ppp_minor]->mpppcfg) {
printk(KERN_WARNING "isdn_ppp_bundle: different MP options %04x and %04x\n",
- ippp_table[nlp->ppp_minor].mpppcfg, ippp_table[lp->ppp_minor].mpppcfg);
+ ippp_table[nlp->ppp_minor]->mpppcfg, ippp_table[lp->ppp_minor]->mpppcfg);
}
#endif
-/* $Id: isdn_ppp.h,v 1.1 1996/01/10 21:39:10 fritz Exp fritz $
+/* $Id: isdn_ppp.h,v 1.2 1996/04/20 16:35:11 fritz Exp $
*
* header for Linux ISDN subsystem, functions for synchronous PPP (linklevel).
*
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* $Log: isdn_ppp.h,v $
+ * Revision 1.2 1996/04/20 16:35:11 fritz
+ * Changed isdn_ppp_receive to use sk_buff as parameter.
+ * Added definition of isdn_ppp_dial_slave and ippp_table.
+ *
* Revision 1.1 1996/01/10 21:39:10 fritz
* Initial revision
*
extern void isdn_ppp_release(int, struct file *);
extern int isdn_ppp_dial_slave(char *);
-extern struct ippp_struct *ippp_table;
+extern struct ippp_struct *ippp_table[ISDN_MAX_CHANNELS];
-/* $Id: isdn_tty.c,v 1.3 1996/02/11 02:12:32 fritz Exp fritz $
+/* $Id: isdn_tty.c,v 1.4 1996/04/20 16:39:54 fritz Exp $
*
* Linux ISDN subsystem, tty functions and AT-command emulator (linklevel).
*
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* $Log: isdn_tty.c,v $
+ * Revision 1.4 1996/04/20 16:39:54 fritz
+ * Changed all io to go through generic routines in isdn_common.c
+ * Fixed a real ugly bug in modem-emulator: 'ATA' had been accepted
+ * even when a call has been cancelled from the remote machine.
+ *
* Revision 1.3 1996/02/11 02:12:32 fritz
* Bugfixes according to similar fixes in standard serial.c of kernel.
*
static char *isdn_ttyname_ttyI = "ttyI";
static char *isdn_ttyname_cui = "cui";
-char *isdn_tty_revision = "$Revision: 1.3 $";
+char *isdn_tty_revision = "$Revision: 1.4 $";
int isdn_tty_try_read(int i, u_char * buf, int len)
{
{
isdn_ctrl cmd;
- dev->mdm.rcvsched[info->line] = 0;
- dev->mdm.online[info->line] = 0;
+ if (!info)
+ return;
+ dev->mdm.rcvsched[info->line] = 0;
+ dev->mdm.online[info->line] = 0;
if (info->isdn_driver >= 0) {
cmd.driver = info->isdn_driver;
cmd.command = ISDN_CMD_HANGUP;
isdn_all_eaz(info->isdn_driver, info->isdn_channel);
isdn_free_channel(info->isdn_driver, info->isdn_channel, ISDN_USAGE_MODEM);
}
- dev->m_idx[info->drv_index] = -1;
info->isdn_driver = -1;
info->isdn_channel = -1;
- info->drv_index = -1;
+ if (info->drv_index >= 0) {
+ info->drv_index = -1;
+ dev->m_idx[info->drv_index] = -1;
+ }
}
static inline int isdn_tty_paranoia_check(modem_info * info, dev_t device, const char *routine)
{
#ifdef MODEM_PARANOIA_CHECK
if (!info) {
- printk(KERN_WARNING "isdn: null info_struct for (%d, %d) in %s\n",
+ printk(KERN_WARNING "isdn_tty: null info_struct for (%d, %d) in %s\n",
MAJOR(device), MINOR(device), routine);
return 1;
}
if (info->magic != ISDN_ASYNC_MAGIC) {
- printk(KERN_WARNING "isdn: bad magic for modem struct (%d, %d) in %s\n",
+ printk(KERN_WARNING "isdn_tty: bad magic for modem struct (%d, %d) in %s\n",
MAJOR(device), MINOR(device), routine);
return 1;
}
} else {
info->MCR &= ~UART_MCR_DTR;
isdn_tty_modem_reset_regs(&dev->mdm.atmodem[info->line], 0);
- if (dev->mdm.online[info->line]) {
#ifdef ISDN_DEBUG_MODEM_HUP
- printk(KERN_DEBUG "Mhup in changespeed\n");
+ printk(KERN_DEBUG "Mhup in changespeed\n");
#endif
- isdn_tty_modem_hup(info);
+ isdn_tty_modem_hup(info);
+ if (dev->mdm.online[info->line])
isdn_tty_modem_result(3, info);
- }
return;
}
/* byte size and parity */
if (!info->tty || (info->tty->termios->c_cflag & HUPCL)) {
info->MCR &= ~(UART_MCR_DTR | UART_MCR_RTS);
isdn_tty_modem_reset_regs(&dev->mdm.atmodem[info->line], 0);
- if (dev->mdm.online[info->line]) {
#ifdef ISDN_DEBUG_MODEM_HUP
- printk(KERN_DEBUG "Mhup in isdn_tty_shutdown\n");
+ printk(KERN_DEBUG "Mhup in isdn_tty_shutdown\n");
#endif
- isdn_tty_modem_hup(info);
- }
+ isdn_tty_modem_hup(info);
}
if (info->tty)
set_bit(TTY_IO_ERROR, &info->tty->flags);
isdn_dumppkt("T70pack2:", bufptr, buflen, 40);
#endif
}
- if (dev->drv[info->isdn_driver]->interface->
- writebuf(info->isdn_driver, info->isdn_channel, bufptr, buflen, 0) > 0) {
+ if (isdn_writebuf_stub(info->isdn_driver, info->isdn_channel, bufptr,
+ buflen, 0) > 0) {
info->xmit_count = 0;
info->xmit_size = dev->mdm.atmodem[i].mdmreg[16] * 16;
#if FUTURE
if (arg & TIOCM_DTR) {
info->MCR &= ~UART_MCR_DTR;
isdn_tty_modem_reset_regs(&dev->mdm.atmodem[info->line], 0);
- if (dev->mdm.online[info->line]) {
#ifdef ISDN_DEBUG_MODEM_HUP
- printk(KERN_DEBUG "Mhup in TIOCMBIC\n");
+ printk(KERN_DEBUG "Mhup in TIOCMBIC\n");
#endif
- isdn_tty_modem_hup(info);
+ isdn_tty_modem_hup(info);
+ if (dev->mdm.online[info->line])
isdn_tty_modem_result(3, info);
- }
}
break;
case TIOCMSET:
| ((arg & TIOCM_DTR) ? UART_MCR_DTR : 0));
if (!(info->MCR & UART_MCR_DTR)) {
isdn_tty_modem_reset_regs(&dev->mdm.atmodem[info->line], 0);
- if (dev->mdm.online[info->line]) {
#ifdef ISDN_DEBUG_MODEM_HUP
- printk(KERN_DEBUG "Mhup in TIOCMSET\n");
+ printk(KERN_DEBUG "Mhup in TIOCMSET\n");
#endif
- isdn_tty_modem_hup(info);
+ isdn_tty_modem_hup(info);
+ if (dev->mdm.online[info->line])
isdn_tty_modem_result(3, info);
- }
}
break;
default:
*/
if (tty_hung_up_p(filp) ||
(info->flags & ISDN_ASYNC_CLOSING)) {
- if (info->flags & ISDN_ASYNC_CLOSING)
+ if (info->flags & ISDN_ASYNC_CLOSING)
interruptible_sleep_on(&info->close_wait);
#ifdef MODEM_DO_RESTART
if (info->flags & ISDN_ASYNC_HUP_NOTIFY)
m->cua_modem.subtype = ISDN_SERIAL_TYPE_CALLOUT;
if (tty_register_driver(&m->tty_modem)) {
- printk(KERN_WARNING "isdn: Unable to register modem-device\n");
+ printk(KERN_WARNING "isdn_tty: Couldn't register modem-device\n");
return -1;
}
if (tty_register_driver(&m->cua_modem)) {
- printk(KERN_WARNING "Couldn't register modem-callout-device\n");
+ printk(KERN_WARNING "isdn_tty: Couldn't register modem-callout-device\n");
return -2;
}
for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
if (num[0] == ',') {
nr[0] = '0';
strncpy(&nr[1], num, 29);
- printk(KERN_WARNING "isdn: Incoming call without OAD, assuming '0'\n");
+ printk(KERN_WARNING "isdn_tty: Incoming call without OAD, assuming '0'\n");
} else
strncpy(nr, num, 30);
s = strtok(nr, ",");
s = strtok(NULL, ",");
if (!s) {
- printk(KERN_WARNING "isdn: Incoming callinfo garbled, ignored: %s\n",
+ printk(KERN_WARNING "isdn_tty: Incoming callinfo garbled, ignored: %s\n",
num);
restore_flags(flags);
return -1;
si1 = (int)simple_strtoul(s,NULL,10);
s = strtok(NULL, ",");
if (!s) {
- printk(KERN_WARNING "isdn: Incoming callinfo garbled, ignored: %s\n",
+ printk(KERN_WARNING "isdn_tty: Incoming callinfo garbled, ignored: %s\n",
num);
restore_flags(flags);
return -1;
si2 = (int)simple_strtoul(s,NULL,10);
eaz = strtok(NULL, ",");
if (!eaz) {
- printk(KERN_WARNING "isdn: Incoming call without CPN, assuming '0'\n");
+ printk(KERN_WARNING "isdn_tty: Incoming call without CPN, assuming '0'\n");
eaz = "0";
}
#ifdef ISDN_DEBUG_MODEM_ICALL
ulong flags;
if (!msg) {
- printk(KERN_WARNING "isdn: Null-Message in isdn_tty_at_cout\n");
+ printk(KERN_WARNING "isdn_tty: Null-Message in isdn_tty_at_cout\n");
return;
}
save_flags(flags);
case 'A':
/* A - Accept incoming call */
p++;
- if (m->mdmreg[1]) {
+ if (dev->mdm.msr[info->line] & UART_MSR_RI) {
#define FIDOBUG
#ifdef FIDOBUG
/* Variables fido... defined temporarily for finding a strange bug */
isdn_dumppkt("T70pack4:", bufptr, buflen, 40);
#endif
}
- if (dev->drv[info->isdn_driver]->interface->
- writebuf(info->isdn_driver, info->isdn_channel, bufptr, buflen, 0) > 0) {
+ if (isdn_writebuf_stub(info->isdn_driver, info->isdn_channel,
+ bufptr, buflen, 0) > 0) {
info->xmit_count = 0;
info->xmit_size = dev->mdm.atmodem[midx].mdmreg[16] * 16;
#if FUTURE
--- /dev/null
+L_OBJS :=
+M_OBJS :=
+O_OBJS := module.o edss1.o drv.o layer2.o capi.o callbacks.o
+
+O_TARGET :=
+ifeq ($(CONFIG_ISDN_DRV_PCBIT),y)
+ O_TARGET += pcbit.o
+else
+ ifeq ($(CONFIG_ISDN_DRV_PCBIT),m)
+ O_TARGET += pcbit.o
+ M_OBJS += pcbit.o
+ endif
+endif
+
+include $(TOPDIR)/Rules.make
--- /dev/null
+/*
+ * Copyright (C) 1996 Universidade de Lisboa
+ *
+ * Writen by Pedro Roque Marques (roque@di.fc.ul.pt)
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU Public License, incorporated herein by reference.
+ */
+
+/*
+ * callbacks for the FSM
+ */
+
+#define __NO_VERSION__
+
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#include <linux/types.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/tqueue.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+
+#include <linux/isdnif.h>
+
+#include "pcbit.h"
+#include "layer2.h"
+#include "edss1.h"
+#include "callbacks.h"
+#include "capi.h"
+
+ushort last_ref_num = 1;
+
+/*
+ * send_conn_req
+ *
+ */
+
+void cb_out_1(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *cbdata)
+{
+ struct sk_buff *skb;
+ int len;
+ ushort refnum;
+
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "Called Party Number: %s\n",
+ cbdata->data.setup.CalledPN);
+#endif
+ /*
+ * hdr - kmalloc in capi_conn_req
+ * - kfree when msg has been sent
+ */
+
+ if ((len = capi_conn_req(cbdata->data.setup.CalledPN, &skb)) < 0)
+ {
+ printk("capi_conn_req failed\n");
+ return;
+ }
+
+
+ refnum = last_ref_num++ & 0x7fffU;
+
+ chan->callref = 0;
+ chan->layer2link = 0;
+ chan->snum = 0;
+ chan->s_refnum = refnum;
+
+ pcbit_l2_write(dev, MSG_CONN_REQ, refnum, skb, len);
+}
+
+/*
+ * rcv CONNECT
+ * will go into ACTIVE state
+ * send CONN_ACTIVE_RESP
+ * send Select protocol request
+ */
+
+void cb_out_2(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data)
+{
+ isdn_ctrl ictl;
+ struct sk_buff *skb;
+ int len;
+ ushort refnum;
+
+ if ((len=capi_conn_active_resp(chan, &skb)) < 0)
+ {
+ printk("capi_conn_active_req failed\n");
+ return;
+ }
+
+ refnum = last_ref_num++ & 0x7fffU;
+ chan->s_refnum = refnum;
+
+ pcbit_l2_write(dev, MSG_CONN_ACTV_RESP, refnum, skb, len);
+
+
+ ictl.command = ISDN_STAT_DCONN;
+ ictl.driver=dev->id;
+ ictl.arg=chan->id;
+ dev->dev_if->statcallb(&ictl);
+
+ /* ACTIVE D-channel */
+
+ /* Select protocol */
+
+ if ((len=capi_select_proto_req(chan, &skb, 1 /*outgoing*/)) < 0) {
+ printk("capi_select_proto_req failed\n");
+ return;
+ }
+
+ refnum = last_ref_num++ & 0x7fffU;
+ chan->s_refnum = refnum;
+
+ pcbit_l2_write(dev, MSG_SELP_REQ, refnum, skb, len);
+}
+
+
+/*
+ * Disconnect received (actualy RELEASE COMPLETE)
+ * This means we where not hable to establish connection with remote
+ * Inform the big boss above
+ */
+void cb_out_3(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data)
+{
+ isdn_ctrl ictl;
+
+ ictl.command = ISDN_STAT_DHUP;
+ ictl.driver=dev->id;
+ ictl.arg=chan->id;
+ dev->dev_if->statcallb(&ictl);
+}
+
+
+/*
+ * Incomming call received
+ * inform user
+ */
+
+void cb_in_1(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *cbdata)
+{
+ isdn_ctrl ictl;
+ unsigned short refnum;
+ struct sk_buff *skb;
+ int len;
+
+
+ ictl.command = ISDN_STAT_ICALL;
+ ictl.driver=dev->id;
+ ictl.arg=chan->id;
+
+ /*
+ * ictl.num >= strlen() + strlen() + 5
+ */
+
+ if (cbdata->data.setup.CalledPN)
+ sprintf(ictl.num, "%s,%d,%d,%s",
+ cbdata->data.setup.CallingPN,
+ 7, 0,
+ cbdata->data.setup.CalledPN);
+
+ else
+ sprintf(ictl.num, "%s,%d,%d,%s",
+ cbdata->data.setup.CallingPN,
+ 7, 0,
+ "0");
+
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "statstr: %s\n", ictl.num);
+#endif
+
+ dev->dev_if->statcallb(&ictl);
+
+
+ if ((len=capi_conn_resp(chan, &skb)) < 0) {
+ printk(KERN_DEBUG "capi_conn_resp failed\n");
+ return;
+ }
+
+ refnum = last_ref_num++ & 0x7fffU;
+ chan->s_refnum = refnum;
+
+ pcbit_l2_write(dev, MSG_CONN_RESP, refnum, skb, len);
+}
+
+/*
+ * user has replyed
+ * open the channel
+ * send CONNECT message CONNECT_ACTIVE_REQ in CAPI
+ */
+
+void cb_in_2(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data)
+{
+ unsigned short refnum;
+ struct sk_buff *skb;
+ int len;
+
+ if ((len = capi_conn_active_req(chan, &skb)) < 0) {
+ printk(KERN_DEBUG "capi_conn_active_req failed\n");
+ return;
+ }
+
+
+ refnum = last_ref_num++ & 0x7fffU;
+ chan->s_refnum = refnum;
+
+ printk(KERN_DEBUG "sending MSG_CONN_ACTV_REQ\n");
+ pcbit_l2_write(dev, MSG_CONN_ACTV_REQ, refnum, skb, len);
+}
+
+/*
+ * CONN_ACK arrived
+ * start b-proto selection
+ *
+ */
+
+void cb_in_3(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data)
+{
+ unsigned short refnum;
+ struct sk_buff *skb;
+ int len;
+
+ if ((len = capi_select_proto_req(chan, &skb, 0 /*incoming*/)) < 0)
+ {
+ printk("capi_select_proto_req failed\n");
+ return;
+ }
+
+ refnum = last_ref_num++ & 0x7fffU;
+ chan->s_refnum = refnum;
+
+ pcbit_l2_write(dev, MSG_SELP_REQ, refnum, skb, len);
+
+}
+
+
+/*
+ * Received disconnect ind on active state
+ * send disconnect resp
+ * send msg to user
+ */
+void cb_disc_1(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data)
+{
+ struct sk_buff *skb;
+ int len;
+ ushort refnum;
+ isdn_ctrl ictl;
+
+ if ((len = capi_disc_resp(chan, &skb)) < 0) {
+ printk("capi_disc_resp failed\n");
+ return;
+ }
+
+ refnum = last_ref_num++ & 0x7fffU;
+ chan->s_refnum = refnum;
+
+ pcbit_l2_write(dev, MSG_DISC_RESP, refnum, skb, len);
+
+ ictl.command = ISDN_STAT_BHUP;
+ ictl.driver=dev->id;
+ ictl.arg=chan->id;
+ dev->dev_if->statcallb(&ictl);
+}
+
+
+/*
+ * User HANGUP on active/call proceding state
+ * send disc.req
+ */
+void cb_disc_2(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data)
+{
+ struct sk_buff *skb;
+ int len;
+ ushort refnum;
+
+ if ((len = capi_disc_req(chan->callref, &skb, CAUSE_NORMAL)) < 0)
+ {
+ printk("capi_disc_req failed\n");
+ return;
+ }
+
+ refnum = last_ref_num++ & 0x7fffU;
+ chan->s_refnum = refnum;
+
+ pcbit_l2_write(dev, MSG_DISC_REQ, refnum, skb, len);
+}
+
+/*
+ * Disc confirm received send BHUP
+ * Problem: when the HL driver sends the disc req itself
+ * LL receives BHUP
+ */
+void cb_disc_3(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data)
+{
+ isdn_ctrl ictl;
+
+ ictl.command = ISDN_STAT_BHUP;
+ ictl.driver=dev->id;
+ ictl.arg=chan->id;
+ dev->dev_if->statcallb(&ictl);
+}
+
+void cb_notdone(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data)
+{
+}
+
+/*
+ * send activate b-chan protocol
+ */
+void cb_selp_1(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data)
+{
+ struct sk_buff *skb;
+ int len;
+ ushort refnum;
+
+ if ((len = capi_activate_transp_req(chan, &skb)) < 0)
+ {
+ printk("capi_conn_activate_transp_req failed\n");
+ return;
+ }
+
+ refnum = last_ref_num++ & 0x7fffU;
+ chan->s_refnum = refnum;
+
+ pcbit_l2_write(dev, MSG_ACT_TRANSP_REQ, refnum, skb, len);
+}
+
+/*
+ * Inform User that the B-channel is availiable
+ */
+void cb_open(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data)
+{
+ isdn_ctrl ictl;
+
+ ictl.command = ISDN_STAT_BCONN;
+ ictl.driver=dev->id;
+ ictl.arg=chan->id;
+ dev->dev_if->statcallb(&ictl);
+}
+
+
+
--- /dev/null
+/*
+ * Copyright (C) 1996 Universidade de Lisboa
+ *
+ * Writen by Pedro Roque Marques (roque@di.fc.ul.pt)
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU Public License, incorporated herein by reference.
+ */
+
+/*
+ * Callbacks prototypes for FSM
+ *
+ */
+
+#ifndef CALLBACKS_H
+#define CALLBACKS_H
+
+
+extern void cb_out_1(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data);
+
+extern void cb_out_2(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data);
+
+extern void cb_out_3(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data);
+
+extern void cb_in_1(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data);
+extern void cb_in_2(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data);
+extern void cb_in_3(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data);
+
+extern void cb_disc_1(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data);
+extern void cb_disc_2(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data);
+extern void cb_disc_3(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data);
+
+extern void cb_notdone(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data);
+
+extern void cb_selp_1(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data);
+extern void cb_open(struct pcbit_dev * dev, struct pcbit_chan* chan,
+ struct callb_data *data);
+
+#endif
+
+
--- /dev/null
+/*
+ * Copyright (C) 1996 Universidade de Lisboa
+ *
+ * Writen by Pedro Roque Marques (roque@di.fc.ul.pt)
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU Public License, incorporated herein by reference.
+ */
+
+/*
+ * CAPI encoder/decoder for
+ * Portugal Telecom CAPI 2.0
+ *
+ * Not compatible with the AVM Gmbh. CAPI 2.0
+ */
+
+/*
+ * Documentation:
+ * - "Common ISDN API - Perfil Português - Versão 2.1",
+ * Telecom Portugal, Fev 1992.
+ * - "Common ISDN API - Especificação de protocolos para
+ * acesso aos canais B", Inesc, Jan 1994.
+ */
+
+/*
+ * TODO: better decoding of Information Elements
+ * for debug purposes mainly
+ * encode our number in CallerPN and ConnectedPN
+ */
+
+#define __NO_VERSION__
+
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#include <linux/types.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+
+#include <linux/tqueue.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/string.h>
+
+#include <linux/isdnif.h>
+
+#include "pcbit.h"
+#include "edss1.h"
+#include "capi.h"
+
+
+/*
+ * Encoding of CAPI messages
+ *
+ */
+
+int capi_conn_req(const char * calledPN, struct sk_buff **skb)
+{
+ ushort len;
+
+ /*
+ * length
+ * AppInfoMask - 2
+ * BC0 - 3
+ * BC1 - 1
+ * Chan - 2
+ * Keypad - 1
+ * CPN - 1
+ * CPSA - 1
+ * CalledPN - 2 + strlen
+ * CalledPSA - 1
+ * rest... - 4
+ * ----------------
+ * Total 18 + strlen
+ */
+
+ len = 18 + strlen(calledPN);
+
+ if ((*skb = dev_alloc_skb(len)) == NULL) {
+
+ printk(KERN_WARNING "capi_conn_req: alloc_skb failed\n");
+ return -1;
+ }
+
+ /* InfoElmMask */
+ *((ushort*) skb_put(*skb, 2)) = AppInfoMask;
+
+
+ /* Bearer Capbility - Mandatory*/
+ *(skb_put(*skb, 1)) = 2; /* BC0.Length */
+ *(skb_put(*skb, 1)) = 0x88; /* BC0.Octect3 - Digital Information */
+ *(skb_put(*skb, 1)) = 0x90; /* BC0.Octect4 - */
+
+ /* Bearer Capbility - Optional*/
+ *(skb_put(*skb, 1)) = 0; /* BC1.Length = 0 */
+
+ *(skb_put(*skb, 1)) = 1; /* ChannelID.Length = 1 */
+ *(skb_put(*skb, 1)) = 0x83; /* Basic Interface - Any Channel */
+
+ *(skb_put(*skb, 1)) = 0; /* Keypad.Length = 0 */
+
+
+ *(skb_put(*skb, 1)) = 0; /* CallingPN.Length = 0 */
+ *(skb_put(*skb, 1)) = 0; /* CallingPSA.Length = 0 */
+
+ /* Called Party Number */
+ *(skb_put(*skb, 1)) = strlen(calledPN) + 1;
+ *(skb_put(*skb, 1)) = 0x81;
+ memcpy(skb_put(*skb, strlen(calledPN)), calledPN, strlen(calledPN));
+
+ /* '#' */
+
+ *(skb_put(*skb, 1)) = 0; /* CalledPSA.Length = 0 */
+
+ /* LLC.Length = 0; */
+ /* HLC0.Length = 0; */
+ /* HLC1.Length = 0; */
+ /* UTUS.Length = 0; */
+ memset(skb_put(*skb, 4), 0, 4);
+
+ return len;
+}
+
+int capi_conn_resp(struct pcbit_chan* chan, struct sk_buff **skb)
+{
+
+ if ((*skb = dev_alloc_skb(5)) == NULL) {
+
+ printk(KERN_WARNING "capi_conn_resp: alloc_skb failed\n");
+ return -1;
+ }
+
+ (*skb)->free = 1;
+
+
+ *((ushort*) skb_put(*skb, 2) ) = chan->callref;
+ *(skb_put(*skb, 1)) = 0x01; /* ACCEPT_CALL */
+ *(skb_put(*skb, 1)) = 0;
+ *(skb_put(*skb, 1)) = 0;
+
+ return 5;
+}
+
+int capi_conn_active_req(struct pcbit_chan* chan, struct sk_buff **skb)
+{
+ /*
+ * 8 bytes
+ */
+
+ if ((*skb = dev_alloc_skb(8)) == NULL) {
+
+ printk(KERN_WARNING "capi_conn_active_req: alloc_skb failed\n");
+ return -1;
+ }
+
+ (*skb)->free = 1;
+
+ *((ushort*) skb_put(*skb, 2) ) = chan->callref;
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "Call Reference: %04x\n", chan->callref);
+#endif
+
+ *(skb_put(*skb, 1)) = 0; /* BC.Length = 0; */
+ *(skb_put(*skb, 1)) = 0; /* ConnectedPN.Length = 0 */
+ *(skb_put(*skb, 1)) = 0; /* PSA.Length */
+ *(skb_put(*skb, 1)) = 0; /* LLC.Length = 0; */
+ *(skb_put(*skb, 1)) = 0; /* HLC.Length = 0; */
+ *(skb_put(*skb, 1)) = 0; /* UTUS.Length = 0; */
+
+ return 8;
+}
+
+int capi_conn_active_resp(struct pcbit_chan* chan, struct sk_buff **skb)
+{
+ /*
+ * 2 bytes
+ */
+
+ if ((*skb = dev_alloc_skb(2)) == NULL) {
+
+ printk(KERN_WARNING "capi_conn_active_resp: alloc_skb failed\n");
+ return -1;
+ }
+
+ (*skb)->free = 1;
+
+ *((ushort*) skb_put(*skb, 2) ) = chan->callref;
+
+ return 2;
+}
+
+
+int capi_select_proto_req(struct pcbit_chan *chan, struct sk_buff **skb,
+ int outgoing)
+{
+
+ /*
+ * 18 bytes
+ */
+
+ if ((*skb = dev_alloc_skb(18)) == NULL) {
+
+ printk(KERN_WARNING "capi_select_proto_req: alloc_skb failed\n");
+ return -1;
+ }
+
+ (*skb)->free = 1;
+
+ *((ushort*) skb_put(*skb, 2) ) = chan->callref;
+
+ /* Layer2 protocol */
+
+ switch (chan->proto) {
+ case ISDN_PROTO_L2_X75I:
+ *(skb_put(*skb, 1)) = 0x05; /* LAPB */
+ break;
+ case ISDN_PROTO_L2_HDLC:
+#ifdef DEBUG
+ printk(KERN_DEBUG "HDLC\n"); /* HDLC */
+#endif
+ *(skb_put(*skb, 1)) = 0x02;
+ break;
+ default:
+#ifdef DEBUG
+ printk(KERN_DEBUG "Transparent\n");
+#endif
+ *(skb_put(*skb, 1)) = 0x03;
+ break;
+ }
+
+ *(skb_put(*skb, 1)) = (outgoing ? 0x02 : 0x42); /* Don't ask */
+ *(skb_put(*skb, 1)) = 0x00;
+
+ *((ushort *) skb_put(*skb, 2)) = MRU;
+
+
+ *(skb_put(*skb, 1)) = 0x08; /* Modulo */
+ *(skb_put(*skb, 1)) = 0x07; /* Max Window */
+
+ *(skb_put(*skb, 1)) = 0x01; /* No Layer3 Protocol */
+
+ /*
+ * 2 - layer3 MTU [10]
+ * - Modulo [12]
+ * - Window
+ * - layer1 proto [14]
+ * - bitrate
+ * - sub-channel [16]
+ * - layer1dataformat [17]
+ */
+
+ memset(skb_put(*skb, 8), 0, 8);
+
+ return 18;
+}
+
+
+int capi_activate_transp_req(struct pcbit_chan *chan, struct sk_buff **skb)
+{
+
+ if ((*skb = dev_alloc_skb(7)) == NULL) {
+
+ printk(KERN_WARNING "capi_activate_transp_req: alloc_skb failed\n");
+ return -1;
+ }
+
+ (*skb)->free = 1;
+
+ *((ushort*) skb_put(*skb, 2) ) = chan->callref;
+
+
+ *(skb_put(*skb, 1)) = chan->layer2link; /* Layer2 id */
+ *(skb_put(*skb, 1)) = 0x00; /* Transmit by default */
+
+ *((ushort *) skb_put(*skb, 2)) = MRU;
+
+ *(skb_put(*skb, 1)) = 0x01; /* Enables reception*/
+
+ return 7;
+}
+
+int capi_tdata_req(struct pcbit_chan* chan, struct sk_buff *skb)
+{
+ ushort data_len;
+
+
+ /*
+ * callref - 2
+ * layer2link - 1
+ * wBlockLength - 2
+ * data - 4
+ * sernum - 1
+ */
+
+ data_len = skb->len;
+
+ skb_push(skb, 10);
+
+ *((u16 *) (skb->data)) = chan->callref;
+ skb->data[2] = chan->layer2link;
+ *((u16 *) (skb->data + 3)) = data_len;
+
+ chan->s_refnum = (chan->s_refnum + 1) % 8;
+ *((u32 *) (skb->data + 5)) = chan->s_refnum;
+
+ skb->data[9] = 0; /* HDLC frame number */
+
+ return 10;
+}
+
+int capi_tdata_resp(struct pcbit_chan *chan, struct sk_buff ** skb)
+
+{
+ if ((*skb = dev_alloc_skb(4)) == NULL) {
+
+ printk(KERN_WARNING "capi_tdata_resp: alloc_skb failed\n");
+ return -1;
+ }
+
+ (*skb)->free = 1;
+
+ *((ushort*) skb_put(*skb, 2) ) = chan->callref;
+
+ *(skb_put(*skb, 1)) = chan->layer2link;
+ *(skb_put(*skb, 1)) = chan->r_refnum;
+
+ return (*skb)->len;
+}
+
+int capi_disc_req(ushort callref, struct sk_buff **skb, u_char cause)
+{
+
+ if ((*skb = dev_alloc_skb(6)) == NULL) {
+
+ printk(KERN_WARNING "capi_disc_req: alloc_skb failed\n");
+ return -1;
+ }
+
+ (*skb)->free = 1;
+
+ *((ushort*) skb_put(*skb, 2) ) = callref;
+
+ *(skb_put(*skb, 1)) = 2; /* Cause.Length = 2; */
+ *(skb_put(*skb, 1)) = 0x80;
+ *(skb_put(*skb, 1)) = 0x80 | cause;
+
+ /*
+ * Change it: we should send 'Sic transit gloria Mundi' here ;-)
+ */
+
+ *(skb_put(*skb, 1)) = 0; /* UTUS.Length = 0; */
+
+ return 6;
+}
+
+int capi_disc_resp(struct pcbit_chan *chan, struct sk_buff **skb)
+{
+ if ((*skb = dev_alloc_skb(2)) == NULL) {
+
+ printk(KERN_WARNING "capi_disc_resp: alloc_skb failed\n");
+ return -1;
+ }
+
+ (*skb)->free = 1;
+
+ *((ushort*) skb_put(*skb, 2)) = chan->callref;
+
+ return 2;
+}
+
+
+/*
+ * Decoding of CAPI messages
+ *
+ */
+
+int capi_decode_conn_ind(struct pcbit_chan * chan,
+ struct sk_buff *skb,
+ struct callb_data *info)
+{
+ int CIlen, len;
+
+ /* Call Reference [CAPI] */
+ chan->callref = *((ushort*) skb->data);
+ skb_pull(skb, 2);
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "Call Reference: %04x\n", chan->callref);
+#endif
+
+ /* Channel Identification */
+
+ /* Expect
+ Len = 1
+ Octect 3 = 0100 10CC - [ 7 Basic, 4 , 2-1 chan ]
+ */
+
+ CIlen = skb->data[0];
+#ifdef DEBUG
+ if (CIlen == 1) {
+
+ if ( ((skb->data[1]) & 0xFC) == 0x48 )
+ printk(KERN_DEBUG "decode_conn_ind: chan ok\n");
+ printk(KERN_DEBUG "phyChan = %d\n", skb->data[1] & 0x03);
+ }
+ else
+ printk(KERN_DEBUG "conn_ind: CIlen = %d\n", CIlen);
+#endif
+ skb_pull(skb, CIlen + 1);
+
+ /* Calling Party Number */
+ /* An "aditional service" as far as Portugal Telecom is concerned */
+
+ len = skb->data[0];
+
+ if (len > 0) {
+ int count = 1;
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "CPN: Octect 3 %02x\n", skb->data[1]);
+#endif
+ if ((skb->data[1] & 0x80) == 0)
+ count = 2;
+
+ if (!(info->data.setup.CallingPN = kmalloc(len - count + 1, GFP_ATOMIC)))
+ return -1;
+
+ memcpy(info->data.setup.CallingPN, skb->data + count + 1,
+ len - count);
+ info->data.setup.CallingPN[len - count] = 0;
+
+ }
+ else {
+ info->data.setup.CallingPN = NULL;
+ printk(KERN_DEBUG "NULL CallingPN\n");
+ }
+
+ skb_pull(skb, len + 1);
+
+ /* Calling Party Subaddress */
+ skb_pull(skb, skb->data[0] + 1);
+
+ /* Called Party Number */
+
+ len = skb->data[0];
+
+ if (len > 0) {
+ int count = 1;
+
+ if ((skb->data[1] & 0x80) == 0)
+ count = 2;
+
+ if (!(info->data.setup.CalledPN = kmalloc(len - count + 1, GFP_ATOMIC)))
+ return -1;
+
+ memcpy(info->data.setup.CalledPN, skb->data + count + 1,
+ len - count);
+ info->data.setup.CalledPN[len - count] = 0;
+
+ }
+ else {
+ info->data.setup.CalledPN = NULL;
+ printk(KERN_DEBUG "NULL CalledPN\n");
+ }
+
+ skb_pull(skb, len + 1);
+
+ /* Called Party Subaddress */
+ skb_pull(skb, skb->data[0] + 1);
+
+ /* LLC */
+ skb_pull(skb, skb->data[0] + 1);
+
+ /* HLC */
+ skb_pull(skb, skb->data[0] + 1);
+
+ /* U2U */
+ skb_pull(skb, skb->data[0] + 1);
+
+ return 0;
+}
+
+/*
+ * returns errcode
+ */
+
+int capi_decode_conn_conf(struct pcbit_chan * chan, struct sk_buff *skb,
+ int *complete)
+{
+ int errcode;
+
+ chan->callref = *((ushort *) skb->data); /* Update CallReference */
+ skb_pull(skb, 2);
+
+ errcode = *((ushort *) skb->data); /* read errcode */
+ skb_pull(skb, 2);
+
+ *complete = *(skb->data);
+ skb_pull(skb, 1);
+
+ /* FIX ME */
+ /* This is actualy a firmware bug */
+ if (!*complete)
+ {
+ printk(KERN_DEBUG "complete=%02x\n", *complete);
+ *complete = 1;
+ }
+
+
+ /* Optional Bearer Capability */
+ skb_pull(skb, *(skb->data) + 1);
+
+ /* Channel Identification */
+ skb_pull(skb, *(skb->data) + 1);
+
+ /* High Layer Compatibility follows */
+ skb_pull(skb, *(skb->data) + 1);
+
+ return errcode;
+}
+
+int capi_decode_conn_actv_ind(struct pcbit_chan * chan, struct sk_buff *skb)
+{
+ ushort len;
+#ifdef DEBUG
+ char str[32];
+#endif
+
+ /* Yet Another Bearer Capability */
+ skb_pull(skb, *(skb->data) + 1);
+
+
+ /* Connected Party Number */
+ len=*(skb->data);
+
+#ifdef DEBUG
+ if (len > 1 && len < 31) {
+ memcpy(str, skb->data + 2, len - 1);
+ str[len] = 0;
+ printk(KERN_DEBUG "Connected Party Number: %s\n", str);
+ }
+ else
+ printk(KERN_DEBUG "actv_ind CPN len = %d\n", len);
+#endif
+
+ skb_pull(skb, len + 1);
+
+ /* Connected Subaddress */
+ skb_pull(skb, *(skb->data) + 1);
+
+ /* Low Layer Capability */
+ skb_pull(skb, *(skb->data) + 1);
+
+ /* High Layer Capability */
+ skb_pull(skb, *(skb->data) + 1);
+
+ return 0;
+}
+
+int capi_decode_conn_actv_conf(struct pcbit_chan * chan, struct sk_buff *skb)
+{
+ ushort errcode;
+
+ errcode = *((ushort*) skb->data);
+ skb_pull(skb, 2);
+
+ /* Channel Identification
+ skb_pull(skb, skb->data[0] + 1);
+ */
+ return errcode;
+}
+
+
+int capi_decode_sel_proto_conf(struct pcbit_chan *chan, struct sk_buff *skb)
+{
+ ushort errcode;
+
+ chan->layer2link = *(skb->data);
+ skb_pull(skb, 1);
+
+ errcode = *((ushort*) skb->data);
+ skb_pull(skb, 2);
+
+ return errcode;
+}
+
+int capi_decode_actv_trans_conf(struct pcbit_chan *chan, struct sk_buff *skb)
+{
+ ushort errcode;
+
+ if (chan->layer2link != *(skb->data) )
+ printk("capi_decode_actv_trans_conf: layer2link doesn't match\n");
+
+ skb_pull(skb, 1);
+
+ errcode = *((ushort*) skb->data);
+ skb_pull(skb, 2);
+
+ return errcode;
+}
+
+int capi_decode_disc_ind(struct pcbit_chan *chan, struct sk_buff *skb)
+{
+ ushort len;
+#ifdef DEBUG
+ int i;
+#endif
+ /* Cause */
+
+ len = *(skb->data);
+ skb_pull(skb, 1);
+
+#ifdef DEBUG
+
+ for (i=0; i<len; i++)
+ printk(KERN_DEBUG "Cause Octect %d: %02x\n", i+3,
+ *(skb->data + i));
+#endif
+
+ skb_pull(skb, len);
+
+ return 0;
+}
+
+int capi_decode_disc_conf(struct pcbit_chan *chan, struct sk_buff *skb)
+{
+ ushort errcode;
+
+ errcode = *((ushort*) skb->data);
+ skb_pull(skb, 2);
+
+ return errcode;
+}
+
+#ifdef DEBUG
+int capi_decode_debug_188(u_char *hdr, ushort hdrlen)
+{
+ char str[64];
+ int len;
+
+ len = hdr[0];
+
+ if (len < 64 && len == hdrlen - 1) {
+ memcpy(str, hdr + 1, hdrlen - 1);
+ str[hdrlen - 1] = 0;
+ printk("%s\n", str);
+ }
+ else
+ printk("debug message incorrect\n");
+
+ return 0;
+}
+#endif
+
+
+
+
+
--- /dev/null
+/*
+ * Copyright (C) 1996 Universidade de Lisboa
+ *
+ * Writen by Pedro Roque Marques (roque@di.fc.ul.pt)
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU Public License, incorporated herein by reference.
+ */
+
+/*
+ * CAPI encode/decode prototypes and defines
+ */
+
+#ifndef CAPI_H
+#define CAPI_H
+
+
+#define REQ_CAUSE 0x01
+#define REQ_DISPLAY 0x04
+#define REQ_USER_TO_USER 0x08
+
+#define AppInfoMask REQ_CAUSE|REQ_DISPLAY|REQ_USER_TO_USER
+
+/* Connection Setup */
+extern int capi_conn_req(const char * calledPN, struct sk_buff **buf);
+extern int capi_decode_conn_conf(struct pcbit_chan * chan, struct sk_buff *skb,
+ int *complete);
+
+extern int capi_decode_conn_ind(struct pcbit_chan * chan, struct sk_buff *skb,
+ struct callb_data *info);
+extern int capi_conn_resp(struct pcbit_chan* chan, struct sk_buff **skb);
+
+extern int capi_conn_active_req(struct pcbit_chan* chan, struct sk_buff **skb);
+extern int capi_decode_conn_actv_conf(struct pcbit_chan * chan,
+ struct sk_buff *skb);
+
+extern int capi_decode_conn_actv_ind(struct pcbit_chan * chan,
+ struct sk_buff *skb);
+extern int capi_conn_active_resp(struct pcbit_chan* chan,
+ struct sk_buff **skb);
+
+/* Data */
+extern int capi_select_proto_req(struct pcbit_chan *chan, struct sk_buff **skb,
+ int outgoing);
+extern int capi_decode_sel_proto_conf(struct pcbit_chan *chan,
+ struct sk_buff *skb);
+
+extern int capi_activate_transp_req(struct pcbit_chan *chan,
+ struct sk_buff **skb);
+extern int capi_decode_actv_trans_conf(struct pcbit_chan *chan,
+ struct sk_buff *skb);
+
+extern int capi_tdata_req(struct pcbit_chan* chan, struct sk_buff *skb);
+extern int capi_tdata_resp(struct pcbit_chan *chan, struct sk_buff ** skb);
+
+/* Connection Termination */
+extern int capi_disc_req(ushort callref, struct sk_buff **skb, u_char cause);
+extern int capi_decode_disc_conf(struct pcbit_chan *chan, struct sk_buff *skb);
+
+extern int capi_decode_disc_ind(struct pcbit_chan *chan, struct sk_buff *skb);
+extern int capi_disc_resp(struct pcbit_chan *chan, struct sk_buff **skb);
+
+#ifdef DEBUG
+extern int capi_decode_debug_188(u_char *hdr, ushort hdrlen);
+#endif
+
+extern __inline__
+struct pcbit_chan *
+capi_channel(struct pcbit_dev *dev, struct sk_buff *skb)
+{
+ ushort callref;
+
+ callref = *((ushort*) skb->data);
+ skb_pull(skb, 2);
+
+ if (dev->b1->callref == callref)
+ return dev->b1;
+ else if (dev->b2->callref == callref)
+ return dev->b2;
+
+ return NULL;
+}
+
+#endif
+
+
+
+
+
+
--- /dev/null
+/*
+ * Copyright (C) 1996 Universidade de Lisboa
+ *
+ * Writen by Pedro Roque Marques (roque@di.fc.ul.pt)
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU Public License, incorporated herein by reference.
+ */
+
+/*
+ * PCBIT-D interface with isdn4linux
+ */
+
+#define __NO_VERSION__
+
+#include <linux/module.h>
+
+#include <linux/sched.h>
+
+#include <linux/kernel.h>
+
+#include <linux/types.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+
+#include <linux/isdnif.h>
+#include <asm/string.h>
+#include <asm/io.h>
+
+#include "pcbit.h"
+#include "edss1.h"
+#include "layer2.h"
+#include "capi.h"
+
+
+extern ushort last_ref_num;
+
+static int pcbit_ioctl(isdn_ctrl* ctl);
+
+static char* pcbit_devname[MAX_PCBIT_CARDS] = {
+ "pcbit0",
+ "pcbit1",
+ "pcbit2",
+ "pcbit3"
+};
+
+/*
+ * prototypes
+ */
+
+int pcbit_command(isdn_ctrl* ctl);
+int pcbit_stat(u_char* buf, int len, int user);
+int pcbit_xmit(int driver, int chan, struct sk_buff *skb);
+int pcbit_writecmd(const u_char*, int, int);
+
+static int set_protocol_running(struct pcbit_dev * dev);
+
+static void pcbit_clear_msn(struct pcbit_dev *dev);
+static void pcbit_set_msn(struct pcbit_dev *dev, char *list);
+static int pcbit_check_msn(struct pcbit_dev *dev, char *msn);
+
+
+extern void pcbit_deliver(void * data);
+
+int pcbit_init_dev(int board, int mem_base, int irq)
+{
+ struct pcbit_dev *dev;
+ isdn_if *dev_if;
+
+ if ((dev=kmalloc(sizeof(struct pcbit_dev), GFP_KERNEL)) == NULL)
+ {
+ printk("pcbit_init: couldn't malloc pcbit_dev struct\n");
+ return -ENOMEM;
+ }
+
+ dev_pcbit[board] = dev;
+ memset(dev, 0, sizeof(struct pcbit_dev));
+
+ if (mem_base >= 0xA0000 && mem_base <= 0xFFFFF )
+ dev->sh_mem = (unsigned char*) mem_base;
+ else
+ {
+ printk("memory address invalid");
+ kfree(dev);
+ dev_pcbit[board] = NULL;
+ return -EACCES;
+ }
+
+ dev->b1 = kmalloc(sizeof(struct pcbit_chan), GFP_KERNEL);
+ if (!dev->b1) {
+ printk("pcbit_init: couldn't malloc pcbit_chan struct\n");
+ kfree(dev);
+ return -ENOMEM;
+ }
+
+ dev->b2 = kmalloc(sizeof(struct pcbit_chan), GFP_KERNEL);
+ if (!dev->b2) {
+ printk("pcbit_init: couldn't malloc pcbit_chan struct\n");
+ kfree(dev->b1);
+ kfree(dev);
+ return -ENOMEM;
+ }
+
+ memset(dev->b1, 0, sizeof(struct pcbit_chan));
+ memset(dev->b2, 0, sizeof(struct pcbit_chan));
+ dev->b2->id = 1;
+
+
+ dev->qdelivery.next = 0;
+ dev->qdelivery.sync = 0;
+ dev->qdelivery.routine = pcbit_deliver;
+ dev->qdelivery.data = dev;
+
+ /*
+ * interrupts
+ */
+
+ if (request_irq(irq, &pcbit_irq_handler, 0, pcbit_devname[board], dev) != 0)
+ {
+ kfree(dev->b1);
+ kfree(dev->b2);
+ kfree(dev);
+ dev_pcbit[board] = NULL;
+ return -EIO;
+ }
+
+ dev->irq = irq;
+
+ /* next frame to be received */
+ dev->rcv_seq = 0;
+ dev->send_seq = 0;
+ dev->unack_seq = 0;
+
+ dev->hl_hdrlen = 10;
+
+ dev_if = kmalloc(sizeof(isdn_if), GFP_KERNEL);
+
+ if (!dev_if) {
+ free_irq(irq, dev);
+ kfree(dev->b1);
+ kfree(dev->b2);
+ kfree(dev);
+ dev_pcbit[board] = NULL;
+ return -EIO;
+ }
+
+ dev->dev_if = dev_if;
+
+ dev_if->channels = 2;
+
+
+ dev_if->features = ISDN_FEATURE_P_EURO | ISDN_FEATURE_L3_TRANS |
+ ISDN_FEATURE_L2_HDLC;
+
+ dev_if->writebuf_skb = pcbit_xmit;
+ dev_if->writebuf = NULL;
+ dev_if->hl_hdrlen = 10;
+
+ dev_if->maxbufsize = MAXBUFSIZE;
+ dev_if->command = pcbit_command;
+
+ dev_if->writecmd = pcbit_writecmd;
+ dev_if->readstat = pcbit_stat;
+
+
+ strcpy(dev_if->id, pcbit_devname[board]);
+
+ if (!register_isdn(dev_if)) {
+ free_irq(irq, dev);
+ kfree(dev->b1);
+ kfree(dev->b2);
+ kfree(dev);
+ dev_pcbit[board] = NULL;
+ return -EIO;
+ }
+
+ dev->id = dev_if->channels;
+
+
+ dev->l2_state = L2_DOWN;
+ dev->free = 511;
+
+ /*
+ * set_protocol_running(dev);
+ */
+
+ return 0;
+}
+
+#ifdef MODULE
+void pcbit_terminate(int board)
+{
+ struct pcbit_dev * dev;
+
+ dev = dev_pcbit[board];
+
+ if (dev) {
+ /* unregister_isdn(dev->dev_if); */
+ free_irq(dev->irq, dev);
+ pcbit_clear_msn(dev);
+ kfree(dev->dev_if);
+ if (dev->b1->fsm_timer.function)
+ del_timer(&dev->b1->fsm_timer);
+ if (dev->b2->fsm_timer.function)
+ del_timer(&dev->b2->fsm_timer);
+ kfree(dev->b1);
+ kfree(dev->b2);
+ kfree(dev);
+ }
+}
+#endif
+
+int pcbit_command(isdn_ctrl* ctl)
+{
+ struct pcbit_dev *dev;
+ struct pcbit_chan *chan;
+ struct callb_data info;
+ char *cp;
+
+ dev = finddev(ctl->driver);
+
+ if (!dev)
+ {
+ printk("pcbit_command: unknown device\n");
+ return -1;
+ }
+
+ chan = (ctl->arg & 0x0F) ? dev->b2 : dev->b1;
+
+
+ switch(ctl->command) {
+ case ISDN_CMD_IOCTL:
+ return pcbit_ioctl(ctl);
+ break;
+ case ISDN_CMD_DIAL:
+ info.type = EV_USR_SETUP_REQ;
+ info.data.setup.CalledPN = (char *) &ctl->num;
+ cp = strchr(info.data.setup.CalledPN, ',');
+ if (cp)
+ *cp = 0;
+ else {
+ printk(KERN_DEBUG "DIAL: error in CalledPN\n");
+ return -1;
+ }
+ pcbit_fsm_event(dev, chan, EV_USR_SETUP_REQ, &info);
+ break;
+ case ISDN_CMD_ACCEPTD:
+ pcbit_fsm_event(dev, chan, EV_USR_SETUP_RESP, NULL);
+ break;
+ case ISDN_CMD_ACCEPTB:
+ printk("ISDN_CMD_ACCEPTB - not realy needed\n");
+ break;
+ case ISDN_CMD_HANGUP:
+ pcbit_fsm_event(dev, chan, EV_USR_RELEASE_REQ, NULL);
+ break;
+ case ISDN_CMD_SETL2:
+ chan->proto = (ctl->arg >> 8);
+ break;
+ case ISDN_CMD_GETL2:
+ return chan->proto;
+ break;
+ case ISDN_CMD_LOCK:
+ MOD_INC_USE_COUNT;
+ break;
+ case ISDN_CMD_UNLOCK:
+ MOD_DEC_USE_COUNT;
+ break;
+ case ISDN_CMD_CLREAZ:
+ pcbit_clear_msn(dev);
+ break;
+ case ISDN_CMD_SETEAZ:
+ pcbit_set_msn(dev, ctl->num);
+ break;
+ case ISDN_CMD_SETL3:
+ if ((ctl->arg >> 8) != ISDN_PROTO_L3_TRANS)
+ printk(KERN_DEBUG "L3 protocol unknown\n");
+ break;
+ case ISDN_CMD_GETL3:
+ return ISDN_PROTO_L3_TRANS;
+ break;
+ case ISDN_CMD_GETEAZ:
+ case ISDN_CMD_SETSIL:
+ case ISDN_CMD_GETSIL:
+ printk(KERN_DEBUG "pcbit_command: code %d not implemented yet\n", ctl->command);
+ break;
+ default:
+ printk(KERN_DEBUG "pcbit_command: unknown command\n");
+ break;
+ };
+
+ return 0;
+}
+
+/*
+ * Another Hack :-(
+ * on some conditions the board stops sending TDATA_CONFs
+ * let's see if we can turn around the problem
+ */
+
+#ifdef BLOCK_TIMER
+static void pcbit_block_timer(unsigned long data)
+{
+ struct pcbit_chan *chan;
+ struct pcbit_dev * dev;
+ isdn_ctrl ictl;
+
+ chan = (struct pcbit_chan *) data;
+
+ dev = chan2dev(chan);
+
+ if (dev == NULL) {
+ printk(KERN_DEBUG "pcbit: chan2dev failed\n");
+ return;
+ }
+
+ del_timer(&chan->block_timer);
+ chan->block_timer.function = NULL;
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "pcbit_block_timer\n");
+#endif
+ chan->queued = 0;
+ ictl.driver = dev->id;
+ ictl.command = ISDN_STAT_BSENT;
+ ictl.arg = chan->id;
+ dev->dev_if->statcallb(&ictl);
+}
+#endif
+
+int pcbit_xmit(int driver, int chnum, struct sk_buff *skb)
+{
+ ushort hdrlen;
+ int refnum, len;
+ struct pcbit_chan * chan;
+ struct pcbit_dev *dev;
+
+ dev = finddev(driver);
+ if (dev == NULL)
+ {
+ printk("finddev returned NULL");
+ return -1;
+ }
+
+ chan = chnum ? dev->b2 : dev->b1;
+
+
+ if (chan->fsm_state != ST_ACTIVE)
+ return -1;
+
+ if (chan->queued >= MAX_QUEUED )
+ {
+#ifdef DEBUG_QUEUE
+ printk(KERN_DEBUG
+ "pcbit: %d packets allready in queue - write fails\n",
+ chan->queued);
+#endif
+ /*
+ * packet stays on the head of the device queue
+ * since dev_start_xmit will fail
+ * see net/core/dev.c
+ */
+#ifdef BLOCK_TIMER
+ if (chan->block_timer.function == NULL) {
+ init_timer(&chan->block_timer);
+ chan->block_timer.function = &pcbit_block_timer;
+ chan->block_timer.data = (long) chan;
+ chan->block_timer.expires = jiffies + 1 * HZ;
+ add_timer(&chan->block_timer);
+ }
+#endif
+ return 0;
+ }
+
+
+ chan->queued++;
+
+ len = skb->len;
+
+ hdrlen = capi_tdata_req(chan, skb);
+
+ refnum = last_ref_num++ & 0x7fffU;
+ chan->s_refnum = refnum;
+
+ pcbit_l2_write(dev, MSG_TDATA_REQ, refnum, skb, hdrlen);
+
+ return len;
+}
+
+
+int pcbit_writecmd(const u_char* buf, int len, int user)
+{
+ struct pcbit_dev * dev;
+ int board, i, j;
+ const u_char * loadbuf;
+ u_char * ptr = NULL;
+
+ int errstat;
+
+ /* we should have the driver id as input here too - let's say it's 0 */
+ board = 0;
+
+ dev = dev_pcbit[board];
+
+ if (!dev)
+ {
+ printk("pcbit_writecmd: couldn't find device");
+ return -ENODEV;
+ }
+
+ switch(dev->l2_state) {
+ case L2_LWMODE:
+ /* check (size <= rdp_size); write buf into board */
+ if (len > BANK4 + 1)
+ {
+ printk("pcbit_writecmd: invalid length %d\n", len);
+ return -EFAULT;
+ }
+
+ if (user)
+ {
+ u_char cbuf[1024];
+
+ memcpy_fromfs(cbuf, buf, len);
+ for (i=0; i<len; i++)
+ writeb(cbuf[i], dev->sh_mem + i);
+ }
+ else
+ memcpy_toio(dev->sh_mem, buf, len);
+ return len;
+ break;
+ case L2_FWMODE:
+ /* this is the hard part */
+ /* dumb board */
+ if (len < 0)
+ return -EINVAL;
+
+ if (user) {
+ /* get it into kernel space */
+ if ((ptr = kmalloc(len, GFP_KERNEL))==NULL)
+ return -ENOMEM;
+ memcpy_fromfs(ptr, buf, len);
+ loadbuf = ptr;
+ }
+ else
+ loadbuf = buf;
+
+ errstat = 0;
+
+ for (i=0; i < len; i++)
+ {
+ for(j=0; j < LOAD_RETRY; j++)
+ {
+ __volatile__ unsigned char * ptr;
+
+ ptr = dev->sh_mem + dev->loadptr;
+ if (*ptr == 0)
+ break;
+
+ }
+
+ if (j == LOAD_RETRY)
+ {
+ errstat = -ETIME;
+ printk("TIMEOUT i=%d\n", i);
+ break;
+ }
+ writeb(loadbuf[i], dev->sh_mem + dev->loadptr + 1);
+ writeb(0x01, dev->sh_mem + dev->loadptr);
+
+ dev->loadptr += 2;
+ if (dev->loadptr > LOAD_ZONE_END)
+ dev->loadptr = LOAD_ZONE_START;
+ }
+
+ if (user)
+ kfree(ptr);
+
+ return errstat ? errstat : len;
+
+ break;
+ default:
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/*
+ * demultiplexing of messages
+ *
+ */
+
+void pcbit_l3_receive(struct pcbit_dev * dev, ulong msg,
+ struct sk_buff * skb,
+ ushort hdr_len, ushort refnum)
+{
+ struct pcbit_chan *chan;
+ struct sk_buff *skb2;
+ unsigned short len;
+ struct callb_data cbdata;
+ int complete, err;
+ isdn_ctrl ictl;
+#ifdef DEBUG
+ struct msg_fmt * fmsg;
+#endif
+
+ switch(msg) {
+
+ case MSG_TDATA_IND:
+ if (!(chan = capi_channel(dev, skb))) {
+ printk(KERN_WARNING
+ "CAPI header: unknown channel id\n");
+ break;
+ }
+ chan->r_refnum = skb->data[7];
+ skb_pull(skb, 8);
+
+ dev->dev_if->rcvcallb_skb(dev->id, chan->id, skb);
+
+ if (capi_tdata_resp(chan, &skb2) > 0)
+ pcbit_l2_write(dev, MSG_TDATA_RESP, refnum,
+ skb2, skb2->len);
+ return;
+ break;
+ case MSG_TDATA_CONF:
+ if (!(chan = capi_channel(dev, skb))) {
+ printk(KERN_WARNING
+ "CAPI header: unknown channel id\n");
+ break;
+ }
+
+#ifdef DEBUG
+ if ( (*((ushort *) (skb->data + 2) )) != 0) {
+ printk(KERN_DEBUG "TDATA_CONF error\n");
+ }
+#endif
+#ifdef BLOCK_TIMER
+ if (chan->queued == MAX_QUEUED) {
+ del_timer(&chan->block_timer);
+ chan->block_timer.function = NULL;
+ }
+
+#endif
+ chan->queued--;
+
+ ictl.driver = dev->id;
+ ictl.command = ISDN_STAT_BSENT;
+ ictl.arg = chan->id;
+ dev->dev_if->statcallb(&ictl);
+ break;
+
+ case MSG_CONN_IND:
+ /*
+ * channel: 1st not used will do
+ * if both are used we're in trouble
+ */
+
+ if (!dev->b1->fsm_state)
+ chan = dev->b1;
+ else if (!dev->b2->fsm_state)
+ chan = dev->b2;
+ else {
+ printk(KERN_INFO
+ "Incoming connection: no channels available");
+
+ if ((len = capi_disc_req(*(ushort*)(skb->data), &skb2, CAUSE_NOCHAN)) > 0)
+ pcbit_l2_write(dev, MSG_DISC_REQ, refnum, skb2, len);
+ break;
+ }
+
+ cbdata.data.setup.CalledPN = NULL;
+ cbdata.data.setup.CallingPN = NULL;
+
+ capi_decode_conn_ind(chan, skb, &cbdata);
+ cbdata.type = EV_NET_SETUP;
+
+ pcbit_fsm_event(dev, chan, EV_NET_SETUP, NULL);
+
+ if (pcbit_check_msn(dev, cbdata.data.setup.CallingPN))
+ pcbit_fsm_event(dev, chan, EV_USR_PROCED_REQ, &cbdata);
+ else
+ pcbit_fsm_event(dev, chan, EV_USR_RELEASE_REQ, NULL);
+
+ if (cbdata.data.setup.CalledPN)
+ kfree(cbdata.data.setup.CalledPN);
+ if (cbdata.data.setup.CallingPN)
+ kfree(cbdata.data.setup.CallingPN);
+ break;
+
+ case MSG_CONN_CONF:
+ /*
+ * We should be able to find the channel by the message
+ * reference number. The current version of the firmware
+ * doesn't sent the ref number correctly.
+ */
+#ifdef DEBUG
+ printk(KERN_DEBUG "refnum=%04x b1=%04x b2=%04x\n", refnum,
+ dev->b1->s_refnum,
+ dev->b2->s_refnum);
+#endif
+#if 0
+ if (dev->b1->s_refnum == refnum)
+ chan = dev->b1;
+ else {
+
+ if (dev->b2->s_refnum == refnum)
+ chan = dev->b2;
+ else {
+ chan = NULL;
+ printk(KERN_WARNING "Connection Confirm - refnum doesn't match chan\n");
+ break;
+ }
+ }
+#else
+ /* We just try to find a channel in the right state */
+
+ if (dev->b1->fsm_state == ST_CALL_INIT)
+ chan = dev->b1;
+ else {
+ if (dev->b2->s_refnum == ST_CALL_INIT)
+ chan = dev->b2;
+ else {
+ chan = NULL;
+ printk(KERN_WARNING "Connection Confirm - no channel in Call Init state\n");
+ break;
+ }
+ }
+#endif
+ if (capi_decode_conn_conf(chan, skb, &complete)) {
+ printk(KERN_DEBUG "conn_conf indicates error\n");
+ pcbit_fsm_event(dev, chan, EV_ERROR, NULL);
+ }
+ else
+ if (complete)
+ pcbit_fsm_event(dev, chan, EV_NET_CALL_PROC, NULL);
+ else
+ pcbit_fsm_event(dev, chan, EV_NET_SETUP_ACK, NULL);
+ break;
+ case MSG_CONN_ACTV_IND:
+
+ if (!(chan = capi_channel(dev, skb))) {
+ printk(KERN_WARNING
+ "CAPI header: unknown channel id\n");
+ break;
+ }
+
+ if (capi_decode_conn_actv_ind(chan, skb)) {
+ printk("error in capi_decode_conn_actv_ind\n");
+ /* pcbit_fsm_event(dev, chan, EV_ERROR, NULL); */
+ break;
+ }
+ chan->r_refnum = refnum;
+ pcbit_fsm_event(dev, chan, EV_NET_CONN, NULL);
+ break;
+ case MSG_CONN_ACTV_CONF:
+
+ if (!(chan = capi_channel(dev, skb))) {
+ printk(KERN_WARNING
+ "CAPI header: unknown channel id\n");
+ break;
+ }
+
+ if (capi_decode_conn_actv_conf(chan, skb) == 0)
+ pcbit_fsm_event(dev, chan, EV_NET_CONN_ACK, NULL);
+
+ else
+ printk(KERN_DEBUG "decode_conn_actv_conf failed\n");
+ break;
+
+ case MSG_SELP_CONF:
+
+ if (!(chan = capi_channel(dev, skb))) {
+ printk(KERN_WARNING
+ "CAPI header: unknown channel id\n");
+ break;
+ }
+
+ if (!(err = capi_decode_sel_proto_conf(chan, skb)))
+ pcbit_fsm_event(dev, chan, EV_NET_SELP_RESP, NULL);
+ else {
+ /* Error */
+ printk("error %d - capi_decode_sel_proto_conf\n", err);
+ }
+ break;
+ case MSG_ACT_TRANSP_CONF:
+ if (!(chan = capi_channel(dev, skb))) {
+ printk(KERN_WARNING
+ "CAPI header: unknown channel id\n");
+ break;
+ }
+
+ if (!capi_decode_actv_trans_conf(chan, skb))
+ pcbit_fsm_event(dev, chan, EV_NET_ACTV_RESP, NULL);
+ break;
+
+ case MSG_DISC_IND:
+
+ if (!(chan = capi_channel(dev, skb))) {
+ printk(KERN_WARNING
+ "CAPI header: unknown channel id\n");
+ break;
+ }
+
+ if (!capi_decode_disc_ind(chan, skb))
+ pcbit_fsm_event(dev, chan, EV_NET_DISC, NULL);
+ else
+ printk(KERN_WARNING "capi_decode_disc_ind - error\n");
+ break;
+ case MSG_DISC_CONF:
+ if (!(chan = capi_channel(dev, skb))) {
+ printk(KERN_WARNING
+ "CAPI header: unknown channel id\n");
+ break;
+ }
+
+ if (!capi_decode_disc_ind(chan, skb))
+ pcbit_fsm_event(dev, chan, EV_NET_RELEASE, NULL);
+ else
+ printk(KERN_WARNING "capi_decode_disc_conf - error\n");
+ break;
+ case MSG_INFO_IND:
+#ifdef DEBUG
+ printk(KERN_DEBUG "received Info Indication - discarded\n");
+#endif
+ break;
+#ifdef DEBUG
+ case MSG_DEBUG_188:
+ capi_decode_debug_188(skb->data, skb->len);
+ break;
+
+ default:
+ printk(KERN_DEBUG "pcbit_l3_receive: unknown message %08lx\n",
+ msg);
+ fmsg = (struct msg_fmt *) &msg;
+ printk(KERN_DEBUG "cmd=%02x sub=%02x\n",
+ fmsg->cmd, fmsg->scmd);
+ break;
+#endif
+ }
+
+ skb->free = 1;
+
+ kfree_skb(skb, FREE_READ);
+
+}
+
+/*
+ * Single statbuf
+ * should be a statbuf per device
+ */
+
+static char statbuf[STATBUF_LEN];
+static int stat_st = 0;
+static int stat_end = 0;
+
+
+#define memcpy_to_COND(flag, d, s, len) \
+(flag ? memcpy_tofs(d, s, len) : memcpy(d, s, len))
+
+
+int pcbit_stat(u_char* buf, int len, int user)
+{
+ int stat_count;
+ stat_count = stat_end - stat_st;
+
+ if (stat_count < 0)
+ stat_count = STATBUF_LEN - stat_st + stat_end;
+
+ /* FIXME: should we sleep and wait for more cockies ? */
+ if (len > stat_count)
+ len = stat_count;
+
+ if (stat_st < stat_end)
+ {
+ memcpy_to_COND(user, buf, statbuf + stat_st, len);
+ stat_st += len;
+ }
+ else
+ {
+ if (len > STATBUF_LEN - stat_st)
+ {
+ memcpy_to_COND(user, buf, statbuf + stat_st,
+ STATBUF_LEN - stat_st);
+ memcpy_to_COND(user, buf, statbuf,
+ len - (STATBUF_LEN - stat_st));
+
+ stat_st = len - (STATBUF_LEN - stat_st);
+ }
+ else
+ {
+ memcpy_to_COND(user, buf, statbuf + stat_st,
+ len);
+
+ stat_st += len;
+
+ if (stat_st == STATBUF_LEN)
+ stat_st = 0;
+ }
+ }
+
+ if (stat_st == stat_end)
+ stat_st = stat_end = 0;
+
+ return len;
+}
+
+static void pcbit_logstat(struct pcbit_dev *dev, char *str)
+{
+ int i;
+ isdn_ctrl ictl;
+
+ for (i=stat_end; i<strlen(str); i++)
+ {
+ statbuf[i]=str[i];
+ stat_end = (stat_end + 1) % STATBUF_LEN;
+ if (stat_end == stat_st)
+ stat_st = (stat_st + 1) % STATBUF_LEN;
+ }
+
+ ictl.command=ISDN_STAT_STAVAIL;
+ ictl.driver=dev->id;
+ ictl.arg=strlen(str);
+ dev->dev_if->statcallb(&ictl);
+}
+
+extern char * isdn_state_table[];
+extern char * strisdnevent(unsigned short);
+
+
+void pcbit_state_change(struct pcbit_dev * dev, struct pcbit_chan * chan,
+ unsigned short i, unsigned short ev, unsigned short f)
+{
+ char buf[256];
+
+ sprintf(buf, "change on device: %d channel:%d\n%s -> %s -> %s\n",
+ dev->id, chan->id,
+ isdn_state_table[i], strisdnevent(ev), isdn_state_table[f]
+ );
+
+#ifdef DEBUG
+ printk("%s", buf);
+#endif
+
+ pcbit_logstat(dev, buf);
+}
+
+static void set_running_timeout(unsigned long ptr)
+{
+ struct pcbit_dev * dev;
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "set_running_timeout\n");
+#endif
+ dev = (struct pcbit_dev *) ptr;
+
+ wake_up_interruptible(&dev->set_running_wq);
+}
+
+static int set_protocol_running(struct pcbit_dev * dev)
+{
+ isdn_ctrl ctl;
+
+ init_timer(&dev->set_running_timer);
+
+ dev->set_running_timer.function = &set_running_timeout;
+ dev->set_running_timer.data = (ulong) dev;
+ dev->set_running_timer.expires = jiffies + SET_RUN_TIMEOUT;
+
+ /* kick it */
+
+ dev->l2_state = L2_STARTING;
+
+ writeb((0x80U | ((dev->rcv_seq & 0x07) << 3) | (dev->send_seq & 0x07)),
+ dev->sh_mem + BANK4);
+
+ add_timer(&dev->set_running_timer);
+
+ interruptible_sleep_on(&dev->set_running_wq);
+
+ del_timer(&dev->set_running_timer);
+
+ if (dev->l2_state == L2_RUNNING)
+ {
+ printk(KERN_DEBUG "pcbit: running\n");
+
+ dev->unack_seq = dev->send_seq;
+
+ dev->writeptr = dev->sh_mem;
+ dev->readptr = dev->sh_mem + BANK2;
+
+ /* tell the good news to the upper layer */
+ ctl.driver = dev->id;
+ ctl.command = ISDN_STAT_RUN;
+
+ dev->dev_if->statcallb(&ctl);
+ }
+ else
+ {
+ printk(KERN_DEBUG "pcbit: initialization failed\n");
+ printk(KERN_DEBUG "pcbit: firmware not loaded\n");
+
+ dev->l2_state = L2_DOWN;
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "Bank3 = %02x\n",
+ readb(dev->sh_mem + BANK3));
+#endif
+ *(dev->sh_mem + BANK4) = 0x40U;
+
+ /* warn the upper layer */
+ ctl.driver = dev->id;
+ ctl.command = ISDN_STAT_STOP;
+
+ dev->dev_if->statcallb(&ctl);
+
+ return -EL2HLT; /* Level 2 halted */
+ }
+
+ return 0;
+}
+
+static int pcbit_ioctl(isdn_ctrl* ctl)
+{
+ struct pcbit_dev * dev;
+ struct pcbit_ioctl *cmd;
+
+ dev = finddev(ctl->driver);
+
+ if (!dev)
+ {
+ printk(KERN_DEBUG "pcbit_ioctl: unknown device\n");
+ return -ENODEV;
+ }
+
+ cmd = (struct pcbit_ioctl *) ctl->num;
+
+ switch(ctl->arg) {
+ case PCBIT_IOCTL_GETSTAT:
+ cmd->info.l2_status = dev->l2_state;
+ break;
+
+ case PCBIT_IOCTL_STRLOAD:
+ if (dev->l2_state == L2_RUNNING)
+ return -EBUSY;
+
+ dev->unack_seq = dev->send_seq = dev->rcv_seq = 0;
+
+ dev->writeptr = dev->sh_mem;
+ dev->readptr = dev->sh_mem + BANK2;
+
+ dev->l2_state = L2_LOADING;
+ break;
+
+ case PCBIT_IOCTL_LWMODE:
+ if (dev->l2_state != L2_LOADING)
+ return -EINVAL;
+
+ dev->l2_state = L2_LWMODE;
+ break;
+
+ case PCBIT_IOCTL_FWMODE:
+ if (dev->l2_state == L2_RUNNING)
+ return -EBUSY;
+ dev->loadptr = LOAD_ZONE_START;
+ dev->l2_state = L2_FWMODE;
+
+ break;
+ case PCBIT_IOCTL_ENDLOAD:
+ if (dev->l2_state == L2_RUNNING)
+ return -EBUSY;
+ dev->l2_state = L2_DOWN;
+ break;
+
+ case PCBIT_IOCTL_SETBYTE:
+ if (dev->l2_state == L2_RUNNING)
+ return -EBUSY;
+
+ /* check addr */
+ if (cmd->info.rdp_byte.addr > BANK4)
+ return -EFAULT;
+
+ writeb(cmd->info.rdp_byte.value, dev->sh_mem + cmd->info.rdp_byte.addr);
+ break;
+ case PCBIT_IOCTL_GETBYTE:
+ if (dev->l2_state == L2_RUNNING)
+ return -EBUSY;
+
+ /* check addr */
+
+ if (cmd->info.rdp_byte.addr > BANK4)
+ {
+ printk("getbyte: invalid addr %04x\n", cmd->info.rdp_byte.addr);
+ return -EFAULT;
+ }
+
+ cmd->info.rdp_byte.value = readb(dev->sh_mem + cmd->info.rdp_byte.addr);
+ break;
+ case PCBIT_IOCTL_RUNNING:
+ if (dev->l2_state == L2_RUNNING)
+ return -EBUSY;
+ return set_protocol_running(dev);
+ break;
+ case PCBIT_IOCTL_WATCH188:
+ if (dev->l2_state != L2_LOADING)
+ return -EINVAL;
+ pcbit_l2_write(dev, MSG_WATCH188, 0x0001, NULL, 0);
+ break;
+ case PCBIT_IOCTL_PING188:
+ if (dev->l2_state != L2_LOADING)
+ return -EINVAL;
+ pcbit_l2_write(dev, MSG_PING188_REQ, 0x0001, NULL, 0);
+ break;
+ case PCBIT_IOCTL_APION:
+ if (dev->l2_state != L2_LOADING)
+ return -EINVAL;
+ pcbit_l2_write(dev, MSG_API_ON, 0x0001, NULL, 0);
+ break;
+ case PCBIT_IOCTL_STOP:
+ dev->l2_state = L2_DOWN;
+ writeb(0x40, dev->sh_mem + BANK4);
+ dev->rcv_seq = 0;
+ dev->send_seq = 0;
+ dev->unack_seq = 0;
+ break;
+ default:
+ printk("error: unkown ioctl\n");
+ break;
+ };
+ return 0;
+}
+
+/*
+ * MSN list handling
+ *
+ * if null reject all calls
+ * if first entry has null MSN accept all calls
+ */
+
+static void pcbit_clear_msn(struct pcbit_dev *dev)
+{
+ struct msn_entry *ptr, *back;
+
+ for (ptr=dev->msn_list; ptr; )
+ {
+ back = ptr->next;
+ kfree(ptr);
+ ptr = back;
+ }
+
+ dev->msn_list = NULL;
+}
+
+static void pcbit_set_msn(struct pcbit_dev *dev, char *list)
+{
+ struct msn_entry *ptr, *back;
+ char *cp, *sp;
+ int len;
+
+ if (strlen(list) == 0) {
+ ptr = kmalloc(sizeof(struct msn_entry), GFP_ATOMIC);
+ if (!ptr) {
+ printk(KERN_WARNING "kmalloc failed\n");
+ return;
+ }
+
+ ptr->msn = NULL;
+
+ ptr->next = dev->msn_list;
+ dev->msn_list = ptr;
+
+ return;
+ }
+
+ for (back=dev->msn_list; back->next; back=back->next);
+
+ sp = list;
+
+ do {
+ cp=strchr(sp, ',');
+ if (cp)
+ len = cp - sp;
+ else
+ len = strlen(sp);
+
+ ptr = kmalloc(sizeof(struct msn_entry), GFP_ATOMIC);
+
+ if (!ptr) {
+ printk(KERN_WARNING "kmalloc failed\n");
+ return;
+ }
+ ptr->next = NULL;
+
+ ptr->msn = kmalloc(len, GFP_ATOMIC);
+ if (!ptr->msn) {
+ printk(KERN_WARNING "kmalloc failed\n");
+ return;
+ }
+
+ memcpy(ptr->msn, sp, len - 1);
+ ptr->msn[len] = 0;
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "msn: %s\n", ptr->msn);
+#endif
+ if (dev->msn_list == NULL)
+ dev->msn_list = ptr;
+ else
+ back->next = ptr;
+ back = ptr;
+ sp += len;
+ } while(cp);
+}
+
+/*
+ * check if we do signal or reject an incoming call
+ */
+static int pcbit_check_msn(struct pcbit_dev *dev, char *msn)
+{
+ struct msn_entry *ptr;
+
+ for (ptr=dev->msn_list; ptr; ptr=ptr->next) {
+
+ if (ptr->msn == NULL)
+ return 1;
+
+ if (strcmp(ptr->msn, msn) == 0)
+ return 1;
+ }
+
+ return 0;
+}
+
+
+
+
+
+
+
--- /dev/null
+/*
+ * Copyright (C) 1996 Universidade de Lisboa
+ *
+ * Writen by Pedro Roque Marques (roque@di.fc.ul.pt)
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU Public License, incorporated herein by reference.
+ */
+
+/*
+ * DSS.1 Finite State Machine
+ * base: ITU-T Rec Q.931
+ */
+
+/*
+ * TODO: complete the FSM
+ * move state/event descriptions to a user space logger
+ */
+
+#define __NO_VERSION__
+
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#include <linux/types.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/tqueue.h>
+#include <linux/skbuff.h>
+
+#include <linux/timer.h>
+#include <asm/io.h>
+
+#include <linux/isdnif.h>
+
+#include "pcbit.h"
+#include "edss1.h"
+#include "layer2.h"
+#include "callbacks.h"
+
+
+extern void pcbit_state_change(struct pcbit_dev *, struct pcbit_chan *,
+ unsigned short i, unsigned short ev,
+ unsigned short f);
+
+extern struct pcbit_dev * dev_pcbit[MAX_PCBIT_CARDS];
+
+char * isdn_state_table[] = {
+ "Closed",
+ "Call initiated",
+ "Overlap sending",
+ "Outgoing call proceeding",
+ "NOT DEFINED",
+ "Call delivered",
+ "Call present",
+ "Call received",
+ "Connect request",
+ "Incoming call proceeding",
+ "Active",
+ "Disconnect request",
+ "Disconnect indication",
+ "NOT DEFINED",
+ "NOT DEFINED",
+ "Suspend request",
+ "NOT DEFINED",
+ "Resume request",
+ "NOT DEFINED",
+ "Release Request",
+ "NOT DEFINED",
+ "NOT DEFINED",
+ "NOT DEFINED",
+ "NOT DEFINED",
+ "NOT DEFINED",
+ "Overlap receiving",
+ "Select protocol on B-Channel",
+ "Activate B-channel protocol"
+};
+
+#ifdef DEBUG_ERRS
+static
+struct CauseValue {
+ byte nr;
+ char *descr;
+} cvlist[]={
+ {0x01,"Unallocated (unassigned) number"},
+ {0x02,"No route to specified transit network"},
+ {0x03,"No route to destination"},
+ {0x04,"Send special information tone"},
+ {0x05,"Misdialled trunk prefix"},
+ {0x06,"Channel unacceptable"},
+ {0x07,"Channel awarded and being delivered in an established channel"},
+ {0x08,"Preemption"},
+ {0x09,"Preemption - circuit reserved for reuse"},
+ {0x10,"Normal call clearing"},
+ {0x11,"User busy"},
+ {0x12,"No user responding"},
+ {0x13,"No answer from user (user alerted)"},
+ {0x14,"Subscriber absent"},
+ {0x15,"Call rejected"},
+ {0x16,"Number changed"},
+ {0x1a,"non-selected user clearing"},
+ {0x1b,"Destination out of order"},
+ {0x1c,"Invalid number format (address incomplete)"},
+ {0x1d,"Facility rejected"},
+ {0x1e,"Response to Status enuiry"},
+ {0x1f,"Normal, unspecified"},
+ {0x22,"No circuit/channel available"},
+ {0x26,"Network out of order"},
+ {0x27,"Permanent frame mode connection out-of-service"},
+ {0x28,"Permanent frame mode connection operational"},
+ {0x29,"Temporary failure"},
+ {0x2a,"Switching equipment congestion"},
+ {0x2b,"Access information discarded"},
+ {0x2c,"Requested circuit/channel not available"},
+ {0x2e,"Precedence call blocked"},
+ {0x2f,"Resource unavailable, unspecified"},
+ {0x31,"Quality of service unavailable"},
+ {0x32,"Requested facility not subscribed"},
+ {0x35,"Outgoing calls barred within CUG"},
+ {0x37,"Incoming calls barred within CUG"},
+ {0x39,"Bearer capability not auhorized"},
+ {0x3a,"Bearer capability not presently available"},
+ {0x3e,"Inconsistency in designated outgoing access information and subscriber class"},
+ {0x3f,"Service or option not available, unspecified"},
+ {0x41,"Bearer capability not implemented"},
+ {0x42,"Channel type not implemented"},
+ {0x43,"Requested facility not implemented"},
+ {0x44,"Only restricted digital information bearer capability is available"},
+ {0x4f,"Service or option not implemented"},
+ {0x51,"Invalid call reference value"},
+ {0x52,"Identified channel does not exist"},
+ {0x53,"A suspended call exists, but this call identity does not"},
+ {0x54,"Call identity in use"},
+ {0x55,"No call suspended"},
+ {0x56,"Call having the requested call identity has been cleared"},
+ {0x57,"User not member of CUG"},
+ {0x58,"Incompatible destination"},
+ {0x5a,"Non-existent CUG"},
+ {0x5b,"Invalid transit network selection"},
+ {0x5f,"Invalid message, unspecified"},
+ {0x60,"Mandatory information element is missing"},
+ {0x61,"Message type non-existent or not implemented"},
+ {0x62,"Message not compatible with call state or message type non-existent or not implemented"},
+ {0x63,"Information element/parameter non-existent or not implemented"},
+ {0x64,"Invalid information element contents"},
+ {0x65,"Message not compatible with call state"},
+ {0x66,"Recovery on timer expiry"},
+ {0x67,"Parameter non-existent or not implemented - passed on"},
+ {0x6e,"Message with unrecognized parameter discarded"},
+ {0x6f,"Protocol error, unspecified"},
+ {0x7f,"Interworking, unspecified"}
+};
+
+#endif
+
+static struct isdn_event_desc {
+ unsigned short ev;
+ char * desc;
+} isdn_event_table [] = {
+ {EV_USR_SETUP_REQ, "CC->L3: Setup Request"},
+ {EV_USR_SETUP_RESP, "CC->L3: Setup Response"},
+ {EV_USR_PROCED_REQ, "CC->L3: Proceeding Request"},
+ {EV_USR_RELEASE_REQ, "CC->L3: Release Request"},
+
+ {EV_NET_SETUP, "NET->TE: setup "},
+ {EV_NET_CALL_PROC, "NET->TE: call proceding"},
+ {EV_NET_SETUP_ACK, "NET->TE: setup acknowlegde (more info needed)"},
+ {EV_NET_CONN, "NET->TE: connect"},
+ {EV_NET_CONN_ACK, "NET->TE: connect aknowlegde"},
+ {EV_NET_DISC, "NET->TE: disconnect indication"},
+ {EV_NET_RELEASE, "NET->TE: release"},
+ {EV_NET_RELEASE_COMP, "NET->TE: release complete"},
+ {EV_NET_SELP_RESP, "Board: Select B-channel protocol ack"},
+ {EV_NET_ACTV_RESP, "Board: Activate B-channel protocol ack"},
+ {EV_TIMER, "Timeout"},
+ {0, "NULL"}
+};
+
+char * strisdnevent(ushort ev)
+{
+ struct isdn_event_desc * entry;
+
+ for (entry = isdn_event_table; entry->ev; entry++)
+ if (entry->ev == ev)
+ break;
+
+ return entry->desc;
+}
+
+/*
+ * Euro ISDN finite state machine
+ */
+
+static struct fsm_timer_entry fsm_timers[] = {
+ {ST_CALL_PROC, 10},
+ {ST_DISC_REQ, 2},
+ {ST_ACTIVE_SELP, 5},
+ {ST_ACTIVE_ACTV, 5},
+ {ST_INCM_PROC, 10},
+ {ST_CONN_REQ, 2},
+ {0xff, 0}
+};
+
+static struct fsm_entry fsm_table[] = {
+/* Connect Phase */
+ /* Outgoing */
+ {ST_NULL, ST_CALL_INIT, EV_USR_SETUP_REQ, cb_out_1},
+
+ {ST_CALL_INIT, ST_OVER_SEND, EV_NET_SETUP_ACK, cb_notdone},
+ {ST_CALL_INIT, ST_CALL_PROC, EV_NET_CALL_PROC, NULL},
+ {ST_CALL_INIT, ST_NULL, EV_NET_DISC, cb_out_2},
+
+ {ST_CALL_PROC, ST_ACTIVE_SELP, EV_NET_CONN, cb_out_2},
+ {ST_CALL_PROC, ST_NULL, EV_NET_DISC, cb_disc_1},
+ {ST_CALL_PROC, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2},
+
+ /* Incomming */
+ {ST_NULL, ST_CALL_PRES, EV_NET_SETUP, NULL},
+
+ {ST_CALL_PRES, ST_INCM_PROC, EV_USR_PROCED_REQ, cb_in_1},
+ {ST_CALL_PRES, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2},
+
+ {ST_INCM_PROC, ST_CONN_REQ, EV_USR_SETUP_RESP, cb_in_2},
+ {ST_INCM_PROC, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2},
+
+ {ST_CONN_REQ, ST_ACTIVE_SELP, EV_NET_CONN_ACK, cb_in_3},
+
+ /* Active */
+ {ST_ACTIVE, ST_NULL, EV_NET_DISC, cb_disc_1},
+ {ST_ACTIVE, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2},
+ {ST_ACTIVE, ST_NULL, EV_NET_RELEASE, cb_disc_3},
+
+ /* Disconnect */
+
+ {ST_DISC_REQ, ST_NULL, EV_NET_DISC, cb_disc_1},
+ {ST_DISC_REQ, ST_NULL, EV_NET_RELEASE, cb_disc_3},
+
+ /* protocol selection */
+ {ST_ACTIVE_SELP, ST_ACTIVE_ACTV, EV_NET_SELP_RESP, cb_selp_1},
+ {ST_ACTIVE_SELP, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2},
+
+ {ST_ACTIVE_ACTV, ST_ACTIVE, EV_NET_ACTV_RESP, cb_open},
+ {ST_ACTIVE_ACTV, ST_DISC_REQ, EV_USR_RELEASE_REQ, cb_disc_2},
+
+ /* Timers */
+ {ST_CALL_PROC, ST_DISC_REQ, EV_TIMER, cb_disc_2},
+ {ST_DISC_REQ, ST_NULL, EV_TIMER, cb_disc_3},
+ {ST_ACTIVE_SELP, ST_DISC_REQ, EV_TIMER, cb_disc_2},
+ {ST_ACTIVE_ACTV, ST_DISC_REQ, EV_TIMER, cb_disc_2},
+ {ST_INCM_PROC, ST_DISC_REQ, EV_TIMER, cb_disc_2},
+ {ST_CONN_REQ, ST_CONN_REQ, EV_TIMER, cb_in_2},
+
+ {0xff, 0, 0, NULL}
+};
+
+
+static void pcbit_fsm_timer(unsigned long data)
+{
+ struct pcbit_dev *dev;
+ struct pcbit_chan *chan;
+
+ chan = (struct pcbit_chan *) data;
+
+ del_timer(&chan->fsm_timer);
+ chan->fsm_timer.function = NULL;
+
+ dev = chan2dev(chan);
+
+ if (dev == NULL) {
+ printk(KERN_WARNING "pcbit: timer for unkown device\n");
+ return;
+ }
+
+ pcbit_fsm_event(dev, chan, EV_TIMER, NULL);
+}
+
+
+void pcbit_fsm_event(struct pcbit_dev *dev, struct pcbit_chan *chan,
+ unsigned short event, struct callb_data *data)
+{
+ struct fsm_entry * action;
+ struct fsm_timer_entry *tentry;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ if (chan->fsm_timer.function) {
+ del_timer(&chan->fsm_timer);
+ chan->fsm_timer.function = NULL;
+ }
+
+ for (action = fsm_table; action->init != 0xff; action++)
+ if (action->init == chan->fsm_state && action->event == event)
+ break;
+
+
+ if (action->init == 0xff) {
+
+ printk(KERN_DEBUG "fsm error: event %x on state %x\n",
+ event, chan->fsm_state);
+ return;
+ }
+
+ chan->fsm_state = action->final;
+
+ pcbit_state_change(dev, chan, action->init, event, action->final);
+
+ for (tentry = fsm_timers; tentry->init != 0xff; tentry++)
+ if (tentry->init == chan->fsm_state)
+ break;
+
+ if (tentry->init != 0xff) {
+ init_timer(&chan->fsm_timer);
+ chan->fsm_timer.function = &pcbit_fsm_timer;
+ chan->fsm_timer.data = (ulong) chan;
+ chan->fsm_timer.expires = jiffies + tentry->timeout * HZ;
+ add_timer(&chan->fsm_timer);
+ }
+
+ restore_flags(flags);
+
+ if (action->callb)
+ action->callb(dev, chan, data);
+
+}
+
+
+
+
--- /dev/null
+/*
+ * Copyright (C) 1996 Universidade de Lisboa
+ *
+ * Writen by Pedro Roque Marques (roque@di.fc.ul.pt)
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU Public License, incorporated herein by reference.
+ */
+
+/*
+ * DSS.1 module definitions
+ */
+
+#ifndef EDSS1_H
+#define EDSS1_H
+
+/* ISDN states */
+
+#define ST_NULL 0
+#define ST_CALL_INIT 1 /* Call initiated */
+#define ST_OVER_SEND 2 /* Overlap sending - Requests More Info 4 call */
+#define ST_CALL_PROC 3 /* Call Procceding */
+#define ST_CALL_DELV 4
+#define ST_CALL_PRES 6 /* Call Present - Received CONN.IND */
+#define ST_CALL_RECV 7 /* Alerting sent */
+#define ST_CONN_REQ 8 /* Answered - wainting 4 CONN.CONF */
+#define ST_INCM_PROC 9
+#define ST_ACTIVE 10
+#define ST_DISC_REQ 11
+#define ST_DISC_IND 12
+#define ST_SUSP_REQ 15
+#define ST_RESM_REQ 17
+#define ST_RELS_REQ 19
+#define ST_OVER_RECV 25
+
+#define ST_ACTIVE_SELP 26 /* Select protocol on B-Channel */
+#define ST_ACTIVE_ACTV 27 /* Activate B-channel protocol */
+
+#define MAX_STATE ST_ACTIVE_ACTV
+
+#define EV_NULL 0
+#define EV_USR_SETUP_REQ 1
+#define EV_USR_SETUP_RESP 2
+#define EV_USR_PROCED_REQ 3
+#define EV_USR_RELEASE_REQ 4
+#define EV_USR_REJECT_REQ 4
+
+#define EV_NET_SETUP 16
+#define EV_NET_CALL_PROC 17
+#define EV_NET_SETUP_ACK 18
+#define EV_NET_CONN 19
+#define EV_NET_CONN_ACK 20
+
+#define EV_NET_SELP_RESP 21
+#define EV_NET_ACTV_RESP 22
+
+#define EV_NET_DISC 23
+#define EV_NET_RELEASE 24
+#define EV_NET_RELEASE_COMP 25
+
+#define EV_TIMER 26
+#define EV_ERROR 32
+
+/*
+ * Cause values
+ * only the ones we use
+ */
+
+#define CAUSE_NORMAL 0x10U
+#define CAUSE_NOCHAN 0x22U
+
+struct callb_data {
+ unsigned short type;
+ union {
+ struct ConnInfo {
+ char *CalledPN;
+ char *CallingPN;
+ } setup;
+ unsigned short cause;
+ } data;
+};
+
+struct fsm_entry {
+ unsigned short init;
+ unsigned short final;
+ unsigned short event;
+ void (*callb)(struct pcbit_dev *, struct pcbit_chan *, struct callb_data*);
+};
+
+struct fsm_timer_entry {
+ unsigned short init;
+ unsigned long timeout; /* in seconds */
+};
+
+
+extern void pcbit_fsm_event(struct pcbit_dev *, struct pcbit_chan *,
+ unsigned short event, struct callb_data *);
+#endif
+
+
+
--- /dev/null
+/*
+ * Copyright (C) 1996 Universidade de Lisboa
+ *
+ * Writen by Pedro Roque Marques (roque@di.fc.ul.pt)
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU Public License, incorporated herein by reference.
+ */
+
+/*
+ * PCBIT-D low-layer interface
+ */
+
+/*
+ * Based on documentation provided by Inesc:
+ * - "Interface com bus do PC para o PCBIT e PCBIT-D", Inesc, Jan 93
+ */
+
+/*
+ * TODO: better handling of errors
+ * re-write/remove debug printks
+ */
+
+#define __NO_VERSION__
+
+
+#ifdef MODULE
+#define INCLUDE_INLINE_FUNCS
+#endif
+
+
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/tqueue.h>
+#include <linux/mm.h>
+#include <linux/skbuff.h>
+
+#include <linux/isdnif.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+
+#include "pcbit.h"
+#include "layer2.h"
+#include "edss1.h"
+
+#undef DEBUG_FRAG
+
+
+
+/*
+ * task queue struct
+ */
+struct tq_struct *tq_delivery=NULL;
+
+static void do_pcbit_bh(task_queue *list)
+{
+ run_task_queue(list);
+}
+
+struct tq_struct run_delivery= {
+ 0, 0, (void *)(void *) do_pcbit_bh, &tq_delivery,
+};
+
+
+/*
+ * Layer 3 packet demultiplexer
+ * drv.c
+ */
+
+extern void pcbit_l3_receive(struct pcbit_dev * dev, ulong msg,
+ struct sk_buff * skb,
+ ushort hdr_len, ushort refnum);
+
+/*
+ * Prototypes
+ */
+
+void pcbit_deliver(void * data);
+static void pcbit_transmit(struct pcbit_dev * dev);
+
+static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack);
+static void pcbit_frame_read(struct pcbit_dev * dev, unsigned char read_seq);
+
+static void pcbit_l2_error(struct pcbit_dev *dev);
+static void pcbit_l2_active_conf(struct pcbit_dev *dev, u_char info);
+static void pcbit_l2_err_recover(unsigned long data);
+
+static void pcbit_firmware_bug(struct pcbit_dev * dev);
+
+static void pcbit_sched_delivery(struct pcbit_dev *dev)
+{
+ queue_task_irq_off(&dev->qdelivery, &tq_delivery);
+ queue_task_irq_off(&run_delivery, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
+
+/*
+ * Called from layer3
+ */
+
+int pcbit_l2_write(struct pcbit_dev * dev, ulong msg, ushort refnum,
+ struct sk_buff *skb, unsigned short hdr_len)
+
+{
+ struct frame_buf * frame, * ptr;
+ unsigned long flags;
+
+ if (dev->l2_state != L2_RUNNING && dev->l2_state != L2_LOADING) {
+ dev_kfree_skb(skb, FREE_WRITE);
+ return -1;
+ }
+
+ if ( (frame = (struct frame_buf *) kmalloc(sizeof(struct frame_buf),
+ GFP_ATOMIC)) == NULL ) {
+ printk(KERN_WARNING "pcbit_2_write: kmalloc failed\n");
+ dev_kfree_skb(skb, FREE_WRITE);
+ return -1;
+ }
+
+ frame->msg = msg;
+ frame->refnum = refnum;
+ frame->copied = 0;
+ frame->hdr_len = hdr_len;
+
+ if (skb) {
+ frame->dt_len = skb->len - hdr_len;
+ if (frame->dt_len == 0)
+ skb->lock++;
+ }
+ else
+ frame->dt_len = 0;
+
+ frame->skb = skb;
+
+ frame->next = NULL;
+
+ save_flags(flags);
+ cli();
+
+ if (dev->write_queue == NULL) {
+ dev->write_queue = frame;
+ restore_flags(flags);
+ pcbit_transmit(dev);
+ }
+ else {
+ for(ptr=dev->write_queue; ptr->next; ptr=ptr->next);
+ ptr->next = frame;
+
+ restore_flags(flags);
+ }
+ return 0;
+}
+
+static __inline__ void pcbit_tx_update(struct pcbit_dev *dev, ushort len)
+{
+ u_char info;
+
+ dev->send_seq = (dev->send_seq + 1) % 8;
+
+ dev->fsize[dev->send_seq] = len;
+ info = 0;
+ info |= dev->rcv_seq << 3;
+ info |= dev->send_seq;
+
+ writeb(info, dev->sh_mem + BANK4);
+
+}
+
+/*
+ * called by interrupt service routine or by write_2
+ */
+
+static void pcbit_transmit(struct pcbit_dev * dev)
+{
+ struct frame_buf * frame = NULL;
+ unsigned char unacked;
+ int flen; /* fragment frame length including all headers */
+ int totlen; /* non-fragmented frame length */
+ int free;
+ int count, cp_len;
+ unsigned long flags;
+ unsigned short tt;
+
+ if (dev->l2_state != L2_RUNNING && dev->l2_state != L2_LOADING)
+ return;
+
+ unacked = (dev->send_seq + (8 - dev->unack_seq) ) & 0x07;
+
+ if (dev->free > 16 && dev->write_queue && unacked < 7) {
+
+ save_flags(flags);
+ cli();
+
+
+ if (!dev->w_busy)
+ dev->w_busy = 1;
+ else
+ {
+ restore_flags(flags);
+ return;
+ }
+
+ restore_flags(flags);
+
+ frame = dev->write_queue;
+ free = dev->free;
+
+ if (frame->copied == 0) {
+
+ /* Type 0 frame */
+
+ struct msg_fmt * msg;
+
+ if (frame->skb)
+ totlen = FRAME_HDR_LEN + PREHDR_LEN + frame->skb->len;
+ else
+ totlen = FRAME_HDR_LEN + PREHDR_LEN;
+
+ flen = MIN(totlen, free);
+
+ msg = (struct msg_fmt *) &(frame->msg);
+
+ /*
+ * Board level 2 header
+ */
+
+ pcbit_writew(dev, flen - FRAME_HDR_LEN);
+
+ pcbit_writeb(dev, msg->cpu);
+
+ pcbit_writeb(dev, msg->proc);
+
+ /* TH */
+ pcbit_writew(dev, frame->hdr_len + PREHDR_LEN);
+
+ /* TD */
+ pcbit_writew(dev, frame->dt_len);
+
+
+ /*
+ * Board level 3 fixed-header
+ */
+
+ /* LEN = TH */
+ pcbit_writew(dev, frame->hdr_len + PREHDR_LEN);
+
+ /* XX */
+ pcbit_writew(dev, 0);
+
+ /* C + S */
+ pcbit_writeb(dev, msg->cmd);
+ pcbit_writeb(dev, msg->scmd);
+
+ /* NUM */
+ pcbit_writew(dev, frame->refnum);
+
+ count = FRAME_HDR_LEN + PREHDR_LEN;
+ }
+ else {
+ /* Type 1 frame */
+
+ totlen = 2 + (frame->skb->len - frame->copied);
+
+ flen = MIN(totlen, free);
+
+ /* TT */
+ tt = ((ushort) (flen - 2)) | 0x8000U; /* Type 1 */
+ pcbit_writew(dev, tt);
+
+ count = 2;
+ }
+
+ if (frame->skb) {
+ cp_len = MIN(frame->skb->len - frame->copied,
+ flen - count);
+
+ memcpy_topcbit(dev, frame->skb->data + frame->copied,
+ cp_len);
+ frame->copied += cp_len;
+ }
+
+ /* bookeeping */
+ dev->free -= flen;
+ pcbit_tx_update(dev, flen);
+
+ save_flags(flags);
+ cli();
+
+
+ if (frame->skb == NULL || frame->copied == frame->skb->len) {
+
+ dev->write_queue = frame->next;
+
+ if (frame->skb != NULL) {
+ /* free frame */
+ dev_kfree_skb(frame->skb, FREE_WRITE);
+ }
+
+ kfree(frame);
+ }
+
+ dev->w_busy = 0;
+ restore_flags(flags);
+ }
+#ifdef DEBUG
+ else
+ printk(KERN_DEBUG "unacked %d free %d write_queue %s\n",
+ unacked, dev->free, dev->write_queue ? "not empty" :
+ "empty");
+#endif
+}
+
+
+/*
+ * deliver a queued frame to the upper layer
+ */
+
+void pcbit_deliver(void * data)
+{
+ struct frame_buf *frame;
+ unsigned long flags;
+ struct msg_fmt msg;
+ struct pcbit_dev *dev = (struct pcbit_dev *) data;
+
+ save_flags(flags);
+ cli();
+
+ /* get frame from queue */
+ if (!(frame=dev->read_queue)) {
+ restore_flags(flags);
+ return;
+ }
+
+ dev->read_queue = frame->next;
+ restore_flags(flags);
+
+ msg.cpu = 0;
+ msg.proc = 0;
+ msg.cmd = frame->skb->data[2];
+ msg.scmd = frame->skb->data[3];
+
+ frame->refnum = *((ushort*) frame->skb->data + 4);
+ frame->msg = *((ulong*) &msg);
+
+ skb_pull(frame->skb, 6);
+
+ pcbit_l3_receive(dev, frame->msg, frame->skb, frame->hdr_len,
+ frame->refnum);
+
+ kfree(frame);
+}
+
+/*
+ * Reads BANK 2 & Reassembles
+ */
+
+static void pcbit_receive(struct pcbit_dev * dev)
+{
+ unsigned short tt;
+ u_char cpu, proc;
+ struct frame_buf * frame = NULL;
+ unsigned long flags;
+ u_char type1;
+
+ if (dev->l2_state != L2_RUNNING && dev->l2_state != L2_LOADING)
+ return;
+
+ tt = pcbit_readw(dev);
+
+ if ((tt & 0x7fffU) > 511) {
+ printk(KERN_INFO "pcbit: invalid frame length -> TT=%04x\n",
+ tt);
+ pcbit_l2_error(dev);
+ return;
+ }
+
+ if (!(tt & 0x8000U))
+ { /* Type 0 */
+ type1 = 0;
+
+ if (dev->read_frame) {
+ printk(KERN_DEBUG "pcbit_receive: Type 0 frame and read_frame != NULL\n");
+#if 0
+ pcbit_l2_error(dev);
+ return;
+#else
+ /* discard previous queued frame */
+ if (dev->read_frame->skb) {
+ dev->read_frame->skb->free = 1;
+ kfree_skb(dev->read_frame->skb, FREE_READ);
+ }
+ kfree(dev->read_frame);
+ dev->read_frame = NULL;
+#endif
+ }
+
+ frame = kmalloc(sizeof(struct frame_buf), GFP_ATOMIC);
+
+ if (frame == NULL) {
+ printk(KERN_WARNING "kmalloc failed\n");
+ return;
+ }
+ memset(frame, 0, sizeof(struct frame_buf));
+
+ cpu = pcbit_readb(dev);
+ proc = pcbit_readb(dev);
+
+
+ if (cpu != 0x06 && cpu != 0x02)
+ {
+ printk (KERN_DEBUG "pcbit: invalid cpu value\n");
+ kfree(frame);
+ pcbit_l2_error(dev);
+ return;
+ }
+
+ /*
+ * we discard cpu & proc on receiving
+ * but we read it to update the pointer
+ */
+
+ frame->hdr_len = pcbit_readw(dev);
+ frame->dt_len = pcbit_readw(dev);
+
+ /*
+ * 0 sized packet
+ * I don't know if they are an error or not...
+ * But they are very frequent
+ * Not documented
+ */
+
+ if (frame->hdr_len == 0) {
+ kfree(frame);
+#ifdef DEBUG
+ printk(KERN_DEBUG "0 sized frame\n");
+#endif
+ pcbit_firmware_bug(dev);
+ return;
+ }
+
+ /* sanity check the length values */
+ if (frame->hdr_len > 1024 || frame->dt_len > 2048)
+ {
+#ifdef DEBUG
+ printk(KERN_DEBUG "length problem: ");
+ printk(KERN_DEBUG "TH=%04x TD=%04x\n",
+ frame->hdr_len,
+ frame->dt_len);
+#endif
+ pcbit_l2_error(dev);
+ kfree(frame);
+ return;
+ }
+
+ /* miminum frame read */
+
+ frame->skb = dev_alloc_skb(frame->hdr_len + frame->dt_len +
+ ((frame->hdr_len + 15) & ~15));
+
+ if (!frame->skb) {
+ printk(KERN_DEBUG "pcbit_receive: out of memmory\n");
+ kfree(frame);
+ return;
+ }
+
+ /* 16 byte aligment for IP */
+ if (frame->dt_len)
+ skb_reserve(frame->skb, (frame->hdr_len + 15) & ~15);
+
+ }
+ else {
+ /* Type 1 */
+ type1 = 1;
+ tt &= 0x7fffU;
+
+ if (!(frame = dev->read_frame)) {
+ printk("Type 1 frame and no frame queued\n");
+#if 1
+ /* usualy after an error: toss frame */
+ dev->readptr += tt;
+ if (dev->readptr > dev->sh_mem + BANK2 + BANKLEN)
+ dev->readptr -= BANKLEN;
+#else
+ pcbit_l2_error(dev);
+#endif
+ return;
+
+ }
+ }
+
+ memcpy_frompcbit(dev, skb_put(frame->skb, tt), tt);
+
+ frame->copied += tt;
+
+ if (frame->copied == frame->hdr_len + frame->dt_len) {
+
+ save_flags(flags);
+ cli();
+
+ if (type1) {
+ dev->read_frame = NULL;
+ }
+
+ if (dev->read_queue) {
+ struct frame_buf *ptr;
+ for(ptr=dev->read_queue;ptr->next;ptr=ptr->next);
+ ptr->next = frame;
+ }
+ else
+ dev->read_queue = frame;
+
+ pcbit_sched_delivery(dev);
+ restore_flags(flags);
+
+ }
+ else {
+ save_flags(flags);
+ cli();
+ dev->read_frame = frame;
+ restore_flags(flags);
+ }
+}
+
+/*
+ * The board sends 0 sized frames
+ * They are TDATA_CONFs that get messed up somehow
+ * gotta send a fake acknowledment to the upper layer somehow
+ */
+
+static __inline__ void pcbit_fake_conf(struct pcbit_dev *dev, struct pcbit_chan * chan)
+{
+ isdn_ctrl ictl;
+
+ if (chan->queued) {
+ chan->queued--;
+
+ ictl.driver = dev->id;
+ ictl.command = ISDN_STAT_BSENT;
+ ictl.arg = chan->id;
+ dev->dev_if->statcallb(&ictl);
+ }
+}
+
+static void pcbit_firmware_bug(struct pcbit_dev * dev)
+{
+ struct pcbit_chan *chan;
+
+ chan = dev->b1;
+
+ if (chan->fsm_state == ST_ACTIVE) {
+ pcbit_fake_conf(dev, chan);
+ }
+
+ chan = dev->b2;
+
+ if (chan->fsm_state == ST_ACTIVE) {
+ pcbit_fake_conf(dev, chan);
+ }
+
+}
+
+void pcbit_irq_handler(int interrupt, void * devptr, struct pt_regs *regs)
+{
+ struct pcbit_dev * dev;
+ u_char info, ack_seq, read_seq;
+ u_char ack_int = 1;
+
+ dev = (struct pcbit_dev *) devptr;
+
+ if (!dev)
+ {
+ printk(KERN_WARNING "pcbit_irq_handler: wrong device\n");
+ return;
+ }
+
+ if (dev->interrupt) {
+ printk(KERN_DEBUG "pcbit: reentering interrupt hander\n");
+ return;
+ }
+
+ dev->interrupt = 1;
+
+ info = readb(dev->sh_mem + BANK3);
+
+ if (dev->l2_state == L2_STARTING || dev->l2_state == L2_ERROR)
+ {
+ pcbit_l2_active_conf(dev, info);
+ dev->interrupt = 0;
+ return;
+ }
+
+ if (info & 0x40U) /* E bit set */
+ {
+#ifdef DEBUG
+ printk(KERN_DEBUG "pcbit_irq_handler: E bit on\n");
+#endif
+ pcbit_l2_error(dev);
+ dev->interrupt = 0;
+ return;
+ }
+
+ if (dev->l2_state != L2_RUNNING && dev->l2_state != L2_LOADING)
+ {
+ dev->interrupt = 0;
+ return;
+ }
+
+ ack_seq = (info >> 3) & 0x07U;
+ read_seq = (info & 0x07U);
+
+ dev->interrupt = 0;
+ sti();
+
+ /*
+ * Bottom Half
+ * Runs with ints enabled
+ */
+
+ if (read_seq != dev->rcv_seq)
+ {
+ pcbit_frame_read(dev, read_seq);
+ ack_int = 0;
+ }
+
+ if (ack_seq != dev->unack_seq)
+ {
+ pcbit_recv_ack(dev, ack_seq);
+ ack_int = 0;
+ }
+
+ if (ack_int)
+ {
+ info = 0;
+ info |= dev->rcv_seq << 3;
+ info |= dev->send_seq;
+
+ writeb(info, dev->sh_mem + BANK4);
+ }
+}
+
+
+static void pcbit_l2_active_conf(struct pcbit_dev *dev, u_char info)
+{
+ u_char state;
+
+ state = dev->l2_state;
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "layer2_active_confirm\n");
+#endif
+
+
+ if (info & 0x80U ) {
+ dev->rcv_seq = info & 0x07U;
+ dev->l2_state = L2_RUNNING;
+ }
+ else
+ dev->l2_state = L2_DOWN;
+
+ if (state == L2_STARTING)
+ wake_up_interruptible(&dev->set_running_wq);
+
+ if (state == L2_ERROR && dev->l2_state == L2_RUNNING) {
+ pcbit_transmit(dev);
+ }
+
+}
+
+static void pcbit_l2_err_recover(unsigned long data)
+{
+
+ struct pcbit_dev * dev;
+ struct frame_buf *frame;
+
+ dev = (struct pcbit_dev *) data;
+
+ del_timer(&dev->error_recover_timer);
+ if (dev->w_busy || dev->r_busy)
+ {
+ init_timer(&dev->error_recover_timer);
+ dev->error_recover_timer.expires = jiffies + ERRTIME;
+ add_timer(&dev->error_recover_timer);
+ return;
+ }
+
+ dev->w_busy = dev->r_busy = 1;
+
+ if (dev->read_frame) {
+ if (dev->read_frame->skb) {
+ dev->read_frame->skb->free = 1;
+ kfree_skb(dev->read_frame->skb, FREE_READ);
+ }
+ kfree(dev->read_frame);
+ dev->read_frame = NULL;
+ }
+
+
+ if (dev->write_queue) {
+ frame = dev->write_queue;
+#ifdef FREE_ON_ERROR
+ dev->write_queue = dev->write_queue->next;
+
+ if (frame->skb) {
+ dev_kfree_skb(frame->skb, FREE_WRITE);
+ }
+
+ kfree(frame);
+#else
+ frame->copied = 0;
+#endif
+ }
+
+ dev->rcv_seq = dev->send_seq = dev->unack_seq = 0;
+ dev->free = 511;
+ dev->l2_state = L2_ERROR;
+
+ /* this is an hack... */
+ pcbit_firmware_bug(dev);
+
+ dev->writeptr = dev->sh_mem;
+ dev->readptr = dev->sh_mem + BANK2;
+
+ writeb((0x80U | ((dev->rcv_seq & 0x07) << 3) | (dev->send_seq & 0x07)),
+ dev->sh_mem + BANK4);
+ dev->w_busy = dev->r_busy = 0;
+
+}
+
+static void pcbit_l2_error(struct pcbit_dev *dev)
+{
+ if (dev->l2_state == L2_RUNNING) {
+
+ printk(KERN_INFO "pcbit: layer 2 error\n");
+
+#ifdef DEBUG
+ log_state(dev);
+#endif
+
+ dev->l2_state = L2_DOWN;
+
+ init_timer(&dev->error_recover_timer);
+ dev->error_recover_timer.function = &pcbit_l2_err_recover;
+ dev->error_recover_timer.data = (ulong) dev;
+ dev->error_recover_timer.expires = jiffies + ERRTIME;
+ add_timer(&dev->error_recover_timer);
+ }
+}
+
+/*
+ * Description:
+ * if board acks frames
+ * update dev->free
+ * call pcbit_transmit to write possible queued frames
+ */
+
+static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack)
+{
+ int i, count;
+ int unacked;
+
+ unacked = (dev->send_seq + (8 - dev->unack_seq) ) & 0x07;
+
+ /* dev->unack_seq < ack <= dev->send_seq; */
+
+ if (unacked)
+ {
+
+ if (dev->send_seq > dev->unack_seq)
+ if (ack <= dev->unack_seq || ack > dev->send_seq)
+ {
+ printk("layer 2 ack unacceptable - dev %d", dev->id);
+ pcbit_l2_error(dev);
+ }
+ else
+ if (ack > dev->send_seq && ack <= dev->unack_seq) {
+ printk("layer 2 ack unacceptable - dev %d", dev->id);
+ pcbit_l2_error(dev);
+ }
+
+ /* ack is acceptable */
+
+
+ i = dev->unack_seq;
+
+ do {
+ dev->unack_seq = i = (i + 1) % 8;
+ dev->free += dev->fsize[i];
+ } while (i != ack);
+
+ count = 0;
+ while (count < 7 && dev->write_queue)
+ {
+ u8 lsend_seq = dev->send_seq;
+
+ pcbit_transmit(dev);
+
+ if (dev->send_seq == lsend_seq)
+ break;
+ count++;
+ }
+
+ if (!count) {
+ u_char info;
+
+ info = 0;
+ info |= dev->rcv_seq << 3;
+ info |= dev->send_seq;
+
+ writeb(info, dev->sh_mem + BANK4);
+ }
+ }
+ else
+ printk(KERN_DEBUG "recv_ack: unacked = 0\n");
+}
+
+static void pcbit_frame_read(struct pcbit_dev * dev, unsigned char read_seq)
+{
+ unsigned long flags;
+ int busy;
+ u_char info;
+
+ save_flags(flags);
+ cli();
+ if (!(busy=dev->r_busy))
+ dev->r_busy = 1;
+ restore_flags(flags);
+
+ if (busy)
+ return;
+
+
+ while (read_seq != dev->rcv_seq) {
+ pcbit_receive(dev);
+ dev->rcv_seq = (dev->rcv_seq + 1) % 8;
+ }
+
+ dev->r_busy = 0;
+
+ info = 0;
+ info |= dev->rcv_seq << 3;
+ info |= dev->send_seq;
+
+ writeb(info, dev->sh_mem + BANK4);
+}
+
+
+
+
+
+
--- /dev/null
+/*
+ * Copyright (C) 1996 Universidade de Lisboa
+ *
+ * Writen by Pedro Roque Marques (roque@di.fc.ul.pt)
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU Public License, incorporated herein by reference.
+ */
+
+/*
+ * PCBIT-D low-layer interface definitions
+ */
+
+#ifndef LAYER2_H
+#define LAYER2_H
+
+#include <bytesex.h>
+
+#define BANK1 0x0000U /* PC -> Board */
+#define BANK2 0x01ffU /* Board -> PC */
+#define BANK3 0x03feU /* Att Board */
+#define BANK4 0x03ffU /* Att PC */
+
+#define BANKLEN 0x01FFU
+
+#define LOAD_ZONE_START 0x03f8U
+#define LOAD_ZONE_END 0x03fdU
+
+#define LOAD_RETRY 18000000
+
+
+
+/* TAM - XX - C - S - NUM */
+#define PREHDR_LEN 8
+/* TT - M - I - TH - TD */
+#define FRAME_HDR_LEN 8
+
+#define MSG_CONN_REQ 0x08000100
+#define MSG_CONN_CONF 0x00000101
+#define MSG_CONN_IND 0x00000102
+#define MSG_CONN_RESP 0x08000103
+
+#define MSG_CONN_ACTV_REQ 0x08000300
+#define MSG_CONN_ACTV_CONF 0x00000301
+#define MSG_CONN_ACTV_IND 0x00000302
+#define MSG_CONN_ACTV_RESP 0x08000303
+
+#define MSG_DISC_REQ 0x08000400
+#define MSG_DISC_CONF 0x00000401
+#define MSG_DISC_IND 0x00000402
+#define MSG_DISC_RESP 0x08000403
+
+#define MSG_TDATA_REQ 0x0908E200
+#define MSG_TDATA_CONF 0x0000E201
+#define MSG_TDATA_IND 0x0000E202
+#define MSG_TDATA_RESP 0x0908E203
+
+#define MSG_SELP_REQ 0x09004000
+#define MSG_SELP_CONF 0x00004001
+
+#define MSG_ACT_TRANSP_REQ 0x0908E000
+#define MSG_ACT_TRANSP_CONF 0x0000E001
+
+#define MSG_STPROT_REQ 0x09004100
+#define MSG_STPROT_CONF 0x00004101
+
+#define MSG_PING188_REQ 0x09030500
+#define MSG_PING188_CONF 0x000005bc
+
+#define MSG_WATCH188 0x09030400
+
+#define MSG_API_ON 0x08020102
+#define MSG_POOL_PCBIT 0x08020400
+#define MSG_POOL_PCBIT_CONF 0x00000401
+
+#define MSG_INFO_IND 0x00002602
+#define MSG_INFO_RESP 0x08002603
+
+#define MSG_DEBUG_188 0x0000ff00
+
+/*
+
+ long 4 3 2 1
+ Intel 1 2 3 4
+*/
+
+struct msg_fmt {
+#if __BYTE_ORDER == 1234 /* Little Endian */
+ u_char scmd;
+ u_char cmd;
+ u_char proc;
+ u_char cpu;
+#else
+#error "Non-Intel CPU"
+ u_char cpu;
+ u_char proc;
+ u_char cmd;
+ u_char scmd;
+#endif
+};
+
+
+#define MAX_QUEUED 7
+
+#define SCHED_READ 0x01
+#define SCHED_WRITE 0x02
+
+#define SET_RUN_TIMEOUT 2*HZ /* 2 seconds */
+
+
+struct frame_buf {
+ ulong msg;
+ unsigned short refnum;
+ unsigned short dt_len;
+ unsigned short hdr_len;
+ struct sk_buff *skb;
+ unsigned short copied;
+ struct frame_buf * next;
+};
+
+#define MIN(a,b) ((a<b)?a:b)
+
+extern int pcbit_l2_write(struct pcbit_dev * dev, ulong msg, ushort refnum,
+ struct sk_buff *skb, unsigned short hdr_len);
+
+extern void pcbit_irq_handler(int interrupt, void *, struct pt_regs *regs);
+
+extern struct pcbit_dev * dev_pcbit[MAX_PCBIT_CARDS];
+
+#ifdef DEBUG
+static __inline__ void log_state(struct pcbit_dev *dev) {
+ printk(KERN_DEBUG "writeptr = %ld\n",
+ (ulong) (dev->writeptr - dev->sh_mem));
+ printk(KERN_DEBUG "readptr = %ld\n",
+ (ulong) (dev->readptr - (dev->sh_mem + BANK2)));
+ printk(KERN_DEBUG "{rcv_seq=%01x, send_seq=%01x, unack_seq=%01x}\n",
+ dev->rcv_seq, dev->send_seq, dev->unack_seq);
+}
+#endif
+
+static __inline__ struct pcbit_dev * chan2dev(struct pcbit_chan * chan)
+{
+ struct pcbit_dev * dev;
+ int i;
+
+
+ for (i=0; i<MAX_PCBIT_CARDS; i++)
+ if ((dev=dev_pcbit[i]))
+ if (dev->b1 == chan || dev->b2 == chan)
+ return dev;
+ return NULL;
+
+}
+
+static __inline__ struct pcbit_dev * finddev(int id)
+{
+ struct pcbit_dev * dev;
+ int i;
+
+ for (i=0; i<MAX_PCBIT_CARDS; i++)
+ if ((dev=dev_pcbit[i]))
+ if (dev->id == id)
+ return dev;
+ return NULL;
+}
+
+
+/*
+ * Support routines for reading and writing in the board
+ */
+
+static __inline__ void pcbit_writeb(struct pcbit_dev *dev, unsigned char dt)
+{
+ writeb(dt, dev->writeptr++);
+ if (dev->writeptr == dev->sh_mem + BANKLEN)
+ dev->writeptr = dev->sh_mem;
+}
+
+static __inline__ void pcbit_writew(struct pcbit_dev *dev, unsigned short dt)
+{
+ int dist;
+
+ dist = BANKLEN - (dev->writeptr - dev->sh_mem);
+ switch (dist) {
+ case 2:
+ writew(dt, dev->writeptr);
+ dev->writeptr = dev->sh_mem;
+ break;
+ case 1:
+ writeb((u_char) (dt & 0x00ffU), dev->writeptr);
+ dev->writeptr = dev->sh_mem;
+ writeb((u_char) (dt >> 8), dev->writeptr++);
+ break;
+ default:
+ writew(dt, dev->writeptr);
+ dev->writeptr += 2;
+ break;
+ };
+}
+
+static __inline__ void memcpy_topcbit(struct pcbit_dev * dev, u_char * data,
+ int len)
+{
+ int diff;
+
+ diff = len - (BANKLEN - (dev->writeptr - dev->sh_mem) );
+
+ if (diff > 0)
+ {
+ memcpy_toio(dev->writeptr, data, len - diff);
+ memcpy_toio(dev->sh_mem, data + (len - diff), diff);
+ dev->writeptr = dev->sh_mem + diff;
+ }
+ else
+ {
+ memcpy_toio(dev->writeptr, data, len);
+
+ dev->writeptr += len;
+ if (diff == 0)
+ dev->writeptr = dev->sh_mem;
+ }
+}
+
+static __inline__ unsigned char pcbit_readb(struct pcbit_dev *dev)
+{
+ unsigned char val;
+
+ val = readb(dev->readptr++);
+ if (dev->readptr == dev->sh_mem + BANK2 + BANKLEN)
+ dev->readptr = dev->sh_mem + BANK2;
+
+ return val;
+}
+
+static __inline__ unsigned short pcbit_readw(struct pcbit_dev *dev)
+{
+ int dist;
+ unsigned short val;
+
+ dist = BANKLEN - ( dev->readptr - (dev->sh_mem + BANK2 ) );
+ switch (dist) {
+ case 2:
+ val = readw(dev->readptr);
+ dev->readptr = dev->sh_mem + BANK2;
+ break;
+ case 1:
+ val = readb(dev->readptr);
+ dev->readptr = dev->sh_mem + BANK2;
+ val = (readb(dev->readptr++) << 8) | val;
+ break;
+ default:
+ val = readw(dev->readptr);
+ dev->readptr += 2;
+ break;
+ };
+ return val;
+}
+
+static __inline__ void memcpy_frompcbit(struct pcbit_dev * dev, u_char * data, int len)
+{
+ int diff;
+
+ diff = len - (BANKLEN - (dev->readptr - (dev->sh_mem + BANK2) ) );
+ if (diff > 0)
+ {
+ memcpy_fromio(data, dev->readptr, len - diff);
+ memcpy_fromio(data + (len - diff), dev->sh_mem + BANK2 , diff);
+ dev->readptr = dev->sh_mem + BANK2 + diff;
+ }
+ else
+ {
+ memcpy_fromio(data, dev->readptr, len);
+ dev->readptr += len;
+ if (diff == 0)
+ dev->readptr = dev->sh_mem + BANK2;
+ }
+}
+
+
+#endif
+
+
+
+
+
+
+
--- /dev/null
+/*
+ * Copyright (C) 1996 Universidade de Lisboa
+ *
+ * Writen by Pedro Roque Marques (roque@di.fc.ul.pt)
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU Public License, incorporated herein by reference.
+ */
+
+/*
+ * PCBIT-D module support
+ */
+
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/tqueue.h>
+#include <linux/skbuff.h>
+
+#include <linux/isdnif.h>
+#include "pcbit.h"
+
+int mem[MAX_PCBIT_CARDS] = {0, };
+int irq[MAX_PCBIT_CARDS] = {0, };
+
+int num_boards;
+struct pcbit_dev * dev_pcbit[MAX_PCBIT_CARDS] = {0, 0, 0, 0};
+
+int init_module(void);
+void cleanup_module(void);
+
+extern void pcbit_terminate(int board);
+extern int pcbit_init_dev(int board, int mem_base, int irq);
+
+#ifdef MODULE
+#define pcbit_init init_module
+#endif
+
+int pcbit_init(void)
+{
+ int board;
+
+ num_boards = 0;
+
+ printk(KERN_INFO
+ "PCBIT-D device driver v 0.5 - "
+ "Copyright (C) 1996 Universidade de Lisboa\n");
+
+ if (mem[0] || irq[0])
+ {
+ for (board=0; board < MAX_PCBIT_CARDS && mem[board] && irq[board]; board++)
+ {
+ if (!mem[board])
+ mem[board] = 0xD0000;
+ if (!irq[board])
+ irq[board] = 5;
+
+ if (pcbit_init_dev(board, mem[board], irq[board]) == 0)
+ num_boards++;
+
+ else
+ {
+ printk(KERN_WARNING
+ "pcbit_init failed for dev %d",
+ board + 1);
+ return -EIO;
+ }
+ }
+ }
+
+ /* Hardcoded default settings detection */
+
+ if (!num_boards)
+ {
+ printk(KERN_INFO
+ "Trying to detect board using default settings\n");
+ if (pcbit_init_dev(0, 0xD0000, 5) == 0)
+ num_boards++;
+ else
+ return -EIO;
+ }
+
+ /* No symbols to export, hide all symbols */
+ register_symtab(NULL);
+
+ return 0;
+}
+
+#ifdef MODULE
+void cleanup_module(void)
+{
+ int board;
+
+ if (MOD_IN_USE) {
+ printk(KERN_WARNING "pcbit: device busy, remove cancelled\n");
+ return;
+ }
+
+ for (board = 0; board < num_boards; board++)
+ pcbit_terminate(board);
+ printk(KERN_INFO
+ "PCBIT-D module unloaded\n");
+}
+
+#else
+void pcbit_setup(char *str, int *ints)
+{
+ int i, j, argc;
+
+ argc = ints[0];
+ i = 0;
+ j = 1;
+
+ while (argc && (i<MAX_PCBIT_CARDS)) {
+
+ if (argc) {
+ mem[i] = ints[j];
+ j++; argc--;
+ }
+
+ if (argc) {
+ irq[i] = ints[j];
+ j++; argc--;
+ }
+
+ i++;
+ }
+}
+#endif
+
+
+
--- /dev/null
+/*
+ * Copyright (C) 1996 Universidade de Lisboa
+ *
+ * Writen by Pedro Roque Marques (roque@di.fc.ul.pt)
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU Public License, incorporated herein by reference.
+ */
+
+/*
+ * PCBIT-D device driver definitions
+ */
+
+#ifndef PCBIT_H
+#define PCBIT_H
+
+#define MAX_PCBIT_CARDS 4
+
+
+#define BLOCK_TIMER
+
+#ifdef __KERNEL__
+
+struct pcbit_chan {
+ unsigned short id;
+ unsigned short callref; /* Call Reference */
+ unsigned char proto; /* layer2protol */
+ unsigned char queued; /* unacked data messages */
+ unsigned char layer2link; /* used in TData */
+ unsigned char snum; /* used in TData */
+ unsigned short s_refnum;
+ unsigned short r_refnum;
+ unsigned short fsm_state;
+ struct timer_list fsm_timer;
+#ifdef BLOCK_TIMER
+ struct timer_list block_timer;
+#endif
+};
+
+struct msn_entry {
+ char *msn;
+ struct msn_entry * next;
+};
+
+struct pcbit_dev {
+ /* board */
+
+ volatile unsigned char* sh_mem; /* RDP address */
+ unsigned int irq;
+ unsigned int id;
+ unsigned int interrupt; /* set during interrupt
+ processing */
+
+ /* isdn4linux */
+
+ struct msn_entry * msn_list; /* ISDN address list */
+
+ isdn_if * dev_if;
+
+ ushort ll_hdrlen;
+ ushort hl_hdrlen;
+
+ /* link layer */
+ unsigned char l2_state;
+
+ struct frame_buf *read_queue;
+ struct frame_buf *read_frame;
+ struct frame_buf *write_queue;
+
+ /* Protocol start */
+ struct wait_queue *set_running_wq;
+ struct timer_list set_running_timer;
+
+ struct timer_list error_recover_timer;
+
+ struct tq_struct qdelivery;
+
+ u_char w_busy;
+ u_char r_busy;
+
+ volatile unsigned char *readptr;
+ volatile unsigned char *writeptr;
+
+ ushort loadptr;
+
+ unsigned short fsize[8]; /* sent layer2 frames size */
+
+ unsigned char send_seq;
+ unsigned char rcv_seq;
+ unsigned char unack_seq;
+
+ unsigned short free;
+
+ /* channels */
+
+ struct pcbit_chan *b1;
+ struct pcbit_chan *b2;
+};
+
+#define STATS_TIMER (10*HZ)
+#define ERRTIME (0.1*HZ)
+
+/* MRU */
+#define MAXBUFSIZE 1534
+#define MRU MAXBUFSIZE
+
+#define STATBUF_LEN 2048
+/*
+ *
+ */
+
+#endif /* __KERNEL__ */
+
+/* isdn_ctrl only allows a long sized argument */
+
+struct pcbit_ioctl {
+ union {
+ struct byte_op {
+ ushort addr;
+ ushort value;
+ } rdp_byte;
+ unsigned long l2_status;
+ } info;
+};
+
+
+
+#define PCBIT_IOCTL_GETSTAT 0x01 /* layer2 status */
+#define PCBIT_IOCTL_LWMODE 0x02 /* linear write mode */
+#define PCBIT_IOCTL_STRLOAD 0x03 /* start load mode */
+#define PCBIT_IOCTL_ENDLOAD 0x04 /* end load mode */
+#define PCBIT_IOCTL_SETBYTE 0x05 /* set byte */
+#define PCBIT_IOCTL_GETBYTE 0x06 /* get byte */
+#define PCBIT_IOCTL_RUNNING 0x07 /* set protocol running */
+#define PCBIT_IOCTL_WATCH188 0x08 /* set watch 188 */
+#define PCBIT_IOCTL_PING188 0x09 /* ping 188 */
+#define PCBIT_IOCTL_FWMODE 0x0A /* firmware write mode */
+#define PCBIT_IOCTL_STOP 0x0B /* stop protocol */
+#define PCBIT_IOCTL_APION 0x0C /* issue API_ON */
+
+#ifndef __KERNEL__
+
+#define PCBIT_GETSTAT (PCBIT_IOCTL_GETSTAT + IIOCDRVCTL)
+#define PCBIT_LWMODE (PCBIT_IOCTL_LWMODE + IIOCDRVCTL)
+#define PCBIT_STRLOAD (PCBIT_IOCTL_STRLOAD + IIOCDRVCTL)
+#define PCBIT_ENDLOAD (PCBIT_IOCTL_ENDLOAD + IIOCDRVCTL)
+#define PCBIT_SETBYTE (PCBIT_IOCTL_SETBYTE + IIOCDRVCTL)
+#define PCBIT_GETBYTE (PCBIT_IOCTL_GETBYTE + IIOCDRVCTL)
+#define PCBIT_RUNNING (PCBIT_IOCTL_RUNNING + IIOCDRVCTL)
+#define PCBIT_WATCH188 (PCBIT_IOCTL_WATCH188 + IIOCDRVCTL)
+#define PCBIT_PING188 (PCBIT_IOCTL_PING188 + IIOCDRVCTL)
+#define PCBIT_FWMODE (PCBIT_IOCTL_FWMODE + IIOCDRVCTL)
+#define PCBIT_STOP (PCBIT_IOCTL_STOP + IIOCDRVCTL)
+#define PCBIT_APION (PCBIT_IOCTL_APION + IIOCDRVCTL)
+
+#define MAXSUPERLINE 3000
+
+#endif
+
+#define L2_DOWN 0
+#define L2_LOADING 1
+#define L2_LWMODE 2
+#define L2_FWMODE 3
+#define L2_STARTING 4
+#define L2_RUNNING 5
+#define L2_ERROR 6
+
+#endif
+
+
+
+
+
+
+
+/* $Id: buffers.c,v 1.1 1996/04/13 10:19:28 fritz Exp $
+ *
+ * $Log: buffers.c,v $
+ * Revision 1.1 1996/04/13 10:19:28 fritz
+ * Initial revision
+ *
+ *
+ */
#define __NO_VERSION__
#include "teles.h"
#include <linux/mm.h>
+/* $Id: callc.c,v 1.2 1996/04/20 16:42:29 fritz Exp fritz $
+ *
+ * $Log: callc.c,v $
+ * Revision 1.2 1996/04/20 16:42:29 fritz
+ * Changed statemachine to allow reject of incoming calls.
+ *
+ * Revision 1.1 1996/04/13 10:20:59 fritz
+ * Initial revision
+ *
+ *
+ */
#define __NO_VERSION__
#include "teles.h"
ST_PRO_W, /* 13 call clear. (initiator), DISCONNECT req. sent */
ST_ANT_W, /* 14 call clear. (receiver), awaiting DISCONNECT ind. */
ST_DISC_BC_HANGUP, /* d channel gone, wait for b channel deactivation */
+ ST_OUT_W_HANGUP, /* Outgoing waiting for D-Channel hangup received */
ST_D_ERR, /* d channel released while active */
};
"ST_PRO_W",
"ST_ANT_W",
"ST_DISC_BC_HANGUP",
+ "ST_OUT_W_HANGUP",
"ST_D_ERR",
};
ll_hangup(chanp, 0);
}
+
+static void
+r2_1(struct FsmInst *fi, int event, void *arg)
+{
+ struct Channel *chanp = fi->userdata;
+
+ chanp->is.l4.l4l3(&chanp->is, CC_DISCONNECT_REQ, NULL);
+
+ FsmChangeState(fi, ST_OUT_W_HANGUP);
+}
+
+
+static void
+r2_2(struct FsmInst *fi, int event, void *arg)
+{
+ struct Channel *chanp = fi->userdata;
+
+ FsmChangeState(fi, ST_REL_W);
+ FsmEvent(&chanp->lc_d.lcfi, EV_LC_RELEASE, NULL);
+ ll_hangup(chanp, 0);
+}
+
+
static void
r3(struct FsmInst *fi, int event, void *arg)
{
FsmChangeState(fi, ST_REL_W);
}
+
+static void
+r3_1(struct FsmInst *fi, int event, void *arg)
+{
+ struct Channel *chanp = fi->userdata;
+
+ chanp->is.l4.l4l3(&chanp->is,CC_DLRL,NULL);
+
+ FsmEvent(&chanp->lc_d.lcfi, EV_LC_RELEASE, NULL);
+ FsmChangeState(fi, ST_REL_W);
+ ll_hangup(chanp, 0);
+}
+
+
static void
r4(struct FsmInst *fi, int event, void *arg)
{
+ struct Channel *chanp=fi->userdata;
+
+ chanp->is.l4.l4l3(&chanp->is,CC_DLRL,NULL);
FsmChangeState(fi, ST_NULL);
}
struct Channel *chanp = fi->userdata;
isdn_ctrl ic;
- chanp->is.l4.l4l3(&chanp->is, CC_ALERTING_REQ, NULL);
-
- FsmChangeState(fi, ST_IN);
-
/*
* Report incoming calls only once to linklevel, use octet 3 of
* channel identification information element. (it's value
* is copied to chanp->para.bchannel in l3s12(), file isdnl3.c)
*/
if (((chanp->chan & 1) + 1) & chanp->para.bchannel) {
+ chanp->is.l4.l4l3(&chanp->is, CC_ALERTING_REQ, NULL);
+ FsmChangeState(fi, ST_IN);
if (chanp->debug & 1)
stat_debug(chanp, "STAT_ICALL");
ic.driver = drid;
sprintf(ic.num, "%s,%d,0,%s", chanp->para.calling, chanp->para.info,
chanp->para.called);
iif.statcallb(&ic);
+ } else {
+ chanp->is.l4.l4l3(&chanp->is,CC_DLRL,NULL);
+ FsmEvent(&chanp->lc_d.lcfi, EV_LC_RELEASE, NULL);
+ FsmChangeState(fi, ST_REL_W);
}
}
FsmChangeState(fi, ST_ANT_W);
}
+
+static void
+r17_1(struct FsmInst *fi, int event, void *arg)
+{
+ struct Channel *chanp = fi->userdata;
+
+ chanp->data_open = 0;
+ release_ds(chanp->chan);
+
+ chanp->is.l4.l4l3(&chanp->is,CC_DLRL,NULL);
+
+ FsmEvent(&chanp->lc_d.lcfi,EV_LC_RELEASE,NULL);
+
+ FsmChangeState(fi, ST_NULL);
+
+ ll_hangup(chanp,!0);
+}
+
static void
r18(struct FsmInst *fi, int event, void *arg)
{
r20(struct FsmInst *fi, int event, void *arg)
{
struct Channel *chanp = fi->userdata;
+
+ chanp->is.l4.l4l3(&chanp->is,CC_DLRL,NULL);
+
+ FsmEvent(&chanp->lc_d.lcfi,EV_LC_RELEASE,NULL);
FsmChangeState(fi, ST_NULL);
ll_hangup(chanp, 0);
}
+
static void
r21(struct FsmInst *fi, int event, void *arg)
{
chanp->is.l4.l4l3(&chanp->is, CC_DISCONNECT_REQ, NULL);
}
+static void
+r23_1(struct FsmInst *fi, int event, void *arg)
+{
+ struct Channel *chanp = fi->userdata;
+
+ release_ds(chanp->chan);
+
+ chanp->is.l4.l4l3(&chanp->is, CC_DLRL,NULL);
+
+ FsmEvent(&chanp->lc_d.lcfi, EV_LC_RELEASE,NULL);
+
+ FsmChangeState(fi, ST_NULL);
+
+ ll_hangup(chanp,!0);
+}
+
static void
r24(struct FsmInst *fi, int event, void *arg)
{
static struct FsmNode fnlist[] =
{
- {ST_NULL, EV_DIAL, r1},
- {ST_OUT_W, EV_DLEST, r5},
- {ST_OUT_W, EV_DLRL, r20},
- {ST_OUT, EV_DISCONNECT_IND, r2},
- {ST_CLEAR, EV_RELEASE_CNF, r3},
- {ST_REL_W, EV_DLRL, r4},
- {ST_NULL, EV_SETUP_IND, r6},
- {ST_IN_W, EV_DLEST, r7},
- {ST_IN, EV_RELEASE_IND, r3},
- {ST_IN, EV_ACCEPTD, r8},
- {ST_IN_SETUP, EV_SETUP_CMPL_IND, r9},
- {ST_OUT, EV_SETUP_CNF, r10},
- {ST_OUT_ESTB, EV_BC_EST, r12},
- {ST_OUT_ESTB, EV_BC_REL, r23},
- {ST_IN_DACT, EV_BC_EST, r12},
- {ST_ACTIVE, EV_HANGUP, r15},
- {ST_BC_HANGUP, EV_BC_REL, r16},
- {ST_BC_HANGUP, EV_DISCONNECT_IND, r21},
- {ST_ACTIVE, EV_BC_REL, r17},
- {ST_ACTIVE, EV_DISCONNECT_IND, r21},
- {ST_ACTIVE, EV_DLRL, r24},
- {ST_ACTIVE, EV_CINF, r26},
- {ST_PRO_W, EV_RELEASE_IND, r18},
- {ST_ANT_W, EV_DISCONNECT_IND, r19},
- {ST_DISC_BC_HANGUP, EV_BC_REL, r22},
- {ST_D_ERR, EV_BC_REL, r25},
+ {ST_NULL, EV_DIAL, r1},
+ {ST_OUT_W, EV_DLEST, r5},
+ {ST_OUT_W, EV_DLRL, r20},
+ {ST_OUT_W, EV_RELEASE_CNF, r2_2 },
+ {ST_OUT, EV_DISCONNECT_IND, r2},
+ {ST_OUT, EV_SETUP_CNF, r10},
+ {ST_OUT, EV_HANGUP, r2_1},
+ {ST_OUT, EV_RELEASE_IND, r20},
+ {ST_OUT, EV_DLRL, r2_2},
+ {ST_OUT_W_HANGUP, EV_RELEASE_IND, r2_2},
+ {ST_OUT_W_HANGUP, EV_DLRL, r20},
+ {ST_CLEAR, EV_RELEASE_CNF, r3},
+ {ST_CLEAR, EV_DLRL, r20},
+ {ST_REL_W, EV_DLRL, r4},
+ {ST_NULL, EV_SETUP_IND, r6},
+ {ST_IN_W, EV_DLEST, r7},
+ {ST_IN_W, EV_DLRL, r3_1},
+ {ST_IN, EV_DLRL, r3_1},
+ {ST_IN, EV_HANGUP, r3_1},
+ {ST_IN, EV_RELEASE_IND, r2_2},
+ {ST_IN, EV_RELEASE_CNF, r2_2},
+ {ST_IN, EV_ACCEPTD, r8},
+ {ST_IN_SETUP, EV_HANGUP, r2_1},
+ {ST_IN_SETUP, EV_SETUP_CMPL_IND, r9},
+ {ST_IN_SETUP, EV_RELEASE_IND, r2_2},
+ {ST_IN_SETUP, EV_DISCONNECT_IND, r2},
+ {ST_IN_SETUP, EV_DLRL, r20},
+ {ST_OUT_ESTB, EV_BC_EST, r12},
+ {ST_OUT_ESTB, EV_BC_REL, r23},
+ {ST_OUT_ESTB, EV_DLRL, r23_1},
+ {ST_IN_DACT, EV_BC_EST, r12},
+ {ST_IN_DACT, EV_BC_REL, r17},
+ {ST_IN_DACT, EV_DLRL, r17_1},
+ {ST_ACTIVE, EV_HANGUP, r15},
+ {ST_ACTIVE, EV_BC_REL, r17},
+ {ST_ACTIVE, EV_DISCONNECT_IND, r21},
+ {ST_ACTIVE, EV_DLRL, r24},
+ {ST_ACTIVE, EV_CINF, r26},
+ {ST_ACTIVE, EV_RELEASE_IND, r17},
+ {ST_BC_HANGUP, EV_BC_REL, r16},
+ {ST_BC_HANGUP, EV_DISCONNECT_IND, r21},
+ {ST_PRO_W, EV_RELEASE_IND, r18},
+ {ST_ANT_W, EV_DISCONNECT_IND, r19},
+ {ST_DISC_BC_HANGUP, EV_BC_REL, r22},
+ {ST_D_ERR, EV_BC_REL, r25},
};
#define FNCOUNT (sizeof(fnlist)/sizeof(struct FsmNode))
static struct FsmNode LcFnList[] =
{
- {ST_LC_NULL, EV_LC_ESTABLISH, lc_r1},
- {ST_LC_ACTIVATE_WAIT, EV_LC_PH_ACTIVATE, lc_r6},
- {ST_LC_DELAY, EV_LC_TIMER, lc_r2},
- {ST_LC_ESTABLISH_WAIT, EV_LC_DL_ESTABLISH, lc_r3},
- {ST_LC_CONNECTED, EV_LC_RELEASE, lc_r4},
- {ST_LC_CONNECTED, EV_LC_DL_RELEASE, lc_r5},
- {ST_LC_RELEASE_WAIT, EV_LC_DL_RELEASE, lc_r5},
- {ST_LC_ACTIVATE_WAIT, EV_LC_TIMER, lc_r5},
- {ST_LC_ESTABLISH_WAIT, EV_LC_DL_RELEASE, lc_r5},
+ {ST_LC_NULL, EV_LC_ESTABLISH, lc_r1},
+ {ST_LC_ACTIVATE_WAIT, EV_LC_PH_ACTIVATE, lc_r6},
+ {ST_LC_DELAY, EV_LC_TIMER, lc_r2},
+ {ST_LC_ESTABLISH_WAIT, EV_LC_DL_ESTABLISH, lc_r3},
+ {ST_LC_CONNECTED, EV_LC_RELEASE, lc_r4},
+ {ST_LC_CONNECTED, EV_LC_DL_RELEASE, lc_r5},
+ {ST_LC_RELEASE_WAIT, EV_LC_DL_RELEASE, lc_r5},
+ {ST_LC_ACTIVATE_WAIT, EV_LC_TIMER, lc_r5},
+ {ST_LC_ESTABLISH_WAIT, EV_LC_DL_RELEASE, lc_r5},
};
#define LC_FN_COUNT (sizeof(LcFnList)/sizeof(struct FsmNode))
if (err)
return (0);
- if (count > BUFFER_SIZE(HSCX_SBUF_ORDER, HSCX_SBUF_BPPS)) {
- printk(KERN_WARNING "teles_writebuf: packet too large!\n");
- return (-EINVAL);
- }
ptr = DATAPTR(ibh);
if (chanp->lc_b.l2_establish)
i = st->l2.ihsize;
else
i = 0;
+ if ((count+i) > BUFFER_SIZE(HSCX_SBUF_ORDER, HSCX_SBUF_BPPS)) {
+ printk(KERN_WARNING "teles_writebuf: packet too large!\n");
+ return (-EINVAL);
+ }
+
ptr += i;
if (user)
-/*
+/* $Id: card.c,v 1.1 1996/04/13 10:22:42 fritz Exp $
+ *
* card.c low level stuff for the Teles S0 isdn card
*
* Author Jan den Ouden
*
- *
- *
- * Changelog:
- *
* Beat Doebeli log all D channel traffic
*
+ * $Log: card.c,v $
+ * Revision 1.1 1996/04/13 10:22:42 fritz
+ * Initial revision
+ *
+ *
*/
#define __NO_VERSION__
+/* $Id: config.c,v 1.1 1996/04/13 10:23:11 fritz Exp $
+ *
+ * $Log: config.c,v $
+ * Revision 1.1 1996/04/13 10:23:11 fritz
+ * Initial revision
+ *
+ *
+ */
#define __NO_VERSION__
#include <linux/types.h>
#include <linux/stddef.h>
+/* $Id: fsm.c,v 1.1 1996/04/13 10:23:41 fritz Exp $
+ *
+ * $Log: fsm.c,v $
+ * Revision 1.1 1996/04/13 10:23:41 fritz
+ * Initial revision
+ *
+ *
+ */
#define __NO_VERSION__
#include "teles.h"
+/* $Id: isdnl2.c,v 1.1 1996/04/13 10:24:16 fritz Exp $
+ *
+ * $Log: isdnl2.c,v $
+ * Revision 1.1 1996/04/13 10:24:16 fritz
+ * Initial revision
+ *
+ *
+ */
#define __NO_VERSION__
#include "teles.h"
+/* $Id: isdnl3.c,v 1.2 1996/04/20 16:45:05 fritz Exp $
+ *
+ * $Log: isdnl3.c,v $
+ * Revision 1.2 1996/04/20 16:45:05 fritz
+ * Changed to report all incoming calls to Linklevel, not just those
+ * with Service 7.
+ * Misc. typos
+ *
+ * Revision 1.1 1996/04/13 10:24:45 fritz
+ * Initial revision
+ *
+ *
+ */
#define __NO_VERSION__
#define P_1TR6
#include "teles.h"
st->l3.l3l4(st, CC_RELEASE_CNF, NULL);
}
+static void
+l3s4_1(struct PStack *st, byte pr, void *arg)
+{
+ struct BufHeader *ibh = arg;
+
+ BufPoolRelease(ibh);
+ newl3state(st, 19);
+ l3_message(st, MT_RELEASE);
+ st->l3.l3l4(st, CC_RELEASE_CNF, NULL);
+}
+
static void
l3s5(struct PStack *st, byte pr,
void *arg)
l3s12(struct PStack *st, byte pr, void *arg)
{
byte *p;
+ int bcfound = 0;
struct BufHeader *ibh = arg;
p = DATAPTR(ibh);
if ((p = findie(p + st->l2.uihsize, ibh->datasize - st->l2.uihsize,
0x18, 0))) {
st->pa->bchannel = p[2] & 0x3;
+ bcfound++ ;
} else
printk(KERN_WARNING "l3s12: Channel ident not found\n");
strcpy(st->pa->calling, "");
BufPoolRelease(ibh);
- if (st->pa->info == 7) {
- newl3state(st, 6);
- st->l3.l3l4(st, CC_SETUP_IND, NULL);
- } else {
- printk(KERN_WARNING "non-digital call: %s -> %s\n",
- st->pa->calling,
- st->pa->called);
- }
+ if (bcfound) {
+ if (st->pa->info != 7) {
+ printk(KERN_WARNING "non-dgital call: %s -> %s\n",
+ st->pa->calling,
+ st->pa->called);
+ }
+ newl3state(st, 6);
+ st->l3.l3l4(st, CC_SETUP_IND, NULL);
+ }
}
static void
static struct stateentry downstatelist[] =
{
- {0, CC_SETUP_REQ, l3s5},
- {6, CC_REJECT_REQ, l3s13},
- {6, CC_SETUP_RSP, l3s16},
- {6, CC_ALERTING_REQ, l3s20},
- {7, CC_SETUP_RSP, l3s16},
- {10, CC_DISCONNECT_REQ, l3s18},
- {12, CC_RELEASE_REQ, l3s3},
+ {0,CC_SETUP_REQ,l3s5},
+ {1,CC_DISCONNECT_REQ,l3s18},
+ {1,CC_RELEASE_REQ,l3s3},
+ {1,CC_DLRL,l3s13},
+ {3,CC_DISCONNECT_REQ,l3s18},
+ {3,CC_RELEASE_REQ,l3s3},
+ {3,CC_DLRL,l3s13},
+ {4,CC_RELEASE_REQ,l3s3},
+ {4,CC_DISCONNECT_REQ,l3s18},
+ {4,CC_DLRL,l3s13},
+ {6,CC_RELEASE_REQ,l3s3},
+ {6,CC_DISCONNECT_REQ,l3s18},
+ {6,CC_ALERTING_REQ,l3s20},
+ {6,CC_DLRL,l3s13},
+ {7,CC_RELEASE_REQ,l3s3},
+ {7,CC_SETUP_RSP,l3s16},
+ {7,CC_DLRL,l3s13},
+ {8,CC_RELEASE_REQ,l3s3},
+ {8,CC_DISCONNECT_REQ,l3s18},
+ {8,CC_DLRL,l3s13},
+ {10,CC_DISCONNECT_REQ,l3s18},
+ {10,CC_RELEASE_REQ,l3s3},
+ {10,CC_DLRL,l3s13},
+ {11,CC_RELEASE_REQ,l3s3},
+ {12,CC_RELEASE_REQ,l3s3},
+ {19,CC_DLRL,l3s13},
};
static int downsllen = sizeof(downstatelist) /
static struct stateentry datastatelist[] =
{
- {0, MT_SETUP, l3s12},
- {1, MT_CALL_PROCEEDING, l3s6},
- {1, MT_RELEASE_COMPLETE, l3s7},
- {3, MT_DISCONNECT, l3s7},
- {3, MT_CONNECT, l3s8},
- {3, MT_ALERTING, l3s11},
- {4, MT_CONNECT, l3s8},
- {4, MT_DISCONNECT, l3s7},
- {4, MT_RELEASE, l3s19},
- {7, MT_RELEASE, l3s19},
- {8, MT_CONNECT_ACKNOWLEDGE, l3s17},
- {10, MT_DISCONNECT, l3s7},
- {11, MT_RELEASE, l3s19},
- {19, MT_RELEASE_COMPLETE, l3s4},
+ {0,MT_SETUP,l3s12},
+ {1,MT_CALL_PROCEEDING,l3s6},
+ {1,MT_RELEASE_COMPLETE,l3s4},
+ {1,MT_RELEASE,l3s19},
+ {1,MT_DISCONNECT,l3s7},
+ {3,MT_DISCONNECT,l3s7},
+ {3,MT_CONNECT,l3s8},
+ {3,MT_ALERTING,l3s11},
+ {3,MT_RELEASE,l3s19},
+ {3,MT_RELEASE_COMPLETE,l3s4},
+ {4,MT_CONNECT,l3s8},
+ {4,MT_DISCONNECT,l3s7},
+ {4,MT_RELEASE,l3s19},
+ {4,MT_RELEASE_COMPLETE,l3s4},
+ {6,MT_SETUP,l3s12},
+ {7,MT_RELEASE,l3s19},
+ {7,MT_RELEASE_COMPLETE,l3s4_1},
+ {7,MT_DISCONNECT,l3s7},
+ {8,MT_RELEASE,l3s19},
+ {8,MT_CONNECT_ACKNOWLEDGE,l3s17},
+ {8,MT_DISCONNECT,l3s7},
+ {8,MT_RELEASE_COMPLETE,l3s4_1},
+ {10,MT_DISCONNECT,l3s7},
+ {10,MT_RELEASE,l3s19},
+ {10,MT_RELEASE_COMPLETE,l3s4_1},
+ {11,MT_RELEASE,l3s19},
+ {11,MT_RELEASE_COMPLETE,l3s4},
+ {19,MT_RELEASE_COMPLETE,l3s4},
};
static int datasllen = sizeof(datastatelist) /
-#define DEBUG_1TR6 0
+/* $Id: l3_1TR6.c,v 1.2 1996/04/20 16:47:23 fritz Exp $
+ *
+ * $Log: l3_1TR6.c,v $
+ * Revision 1.2 1996/04/20 16:47:23 fritz
+ * Changed statemachine to allow reject of an incoming call.
+ * Report all incoming calls, not just those with Service = 7.
+ * Misc. typos
+ *
+ * Revision 1.1 1996/04/13 10:25:16 fritz
+ * Initial revision
+ *
+ *
+ */
char *
mt_trans(int pd, int mt)
byte *p;
struct BufHeader *ibh = arg;
-#if DEBUG_1TR6
- printk(KERN_INFO "1tr6: TU_SETUP\n");
-#endif
-
p = DATAPTR(ibh);
p += st->l2.uihsize;
st->pa->callref = getcallref(p);
st->l3.callref = 0x80 + st->pa->callref;
+#if DEBUG_1TR6
+ printk(KERN_INFO "1tr6: TU_SETUP cr=%d\n",st->l3.callref);
+#endif
+
/*
* Channel Identification
*/
BufPoolRelease(ibh);
- if (st->pa->info == 7) {
- newl3state(st, 6);
- st->l3.l3l4(st, CC_SETUP_IND, NULL);
+ /* Signal all services, linklevel takes care of Service-Indicator */
+ if (st->pa->info != 7) {
+ printk(KERN_INFO "non-digital call: %s -> %s\n",
+ st->pa->calling,
+ st->pa->called);
}
+ newl3state(st, 6);
+ st->l3.l3l4(st, CC_SETUP_IND, NULL);
}
static void
l3_1tr6_disconn_req(struct PStack *st, byte pr, void *arg)
{
struct BufHeader *dibh;
- byte *p;
+ byte *p;
+ byte rejflg;
#if DEBUG_1TR6
printk(KERN_INFO "1tr6: send DISCON\n");
#endif
- BufPoolGet(&dibh, st->l1.sbufpool, GFP_ATOMIC, (void *) st, 20);
+ BufPoolGet(&dibh, st->l1.sbufpool, GFP_ATOMIC, (void *) st, 21);
p = DATAPTR(dibh);
p += st->l2.ihsize;
*p++ = st->l3.callref;
*p++ = MT_N1_DISC;
- *p++ = WE0_cause;
- *p++ = 0x0; /* Laenge = 0 normales Ausloesen */
+ if (st->l3.state == 7) {
+ rejflg = 1;
+ *p++ = WE0_cause; /* Anruf abweisen */
+ *p++ = 0x01; /* Laenge = 1 */
+ *p++ = CAUSE_CallRejected;
+ } else {
+ rejflg = 0;
+ *p++ = WE0_cause;
+ *p++ = 0x0; /* Laenge = 0 normales Ausloesen */
+ }
dibh->datasize = p - DATAPTR(dibh);
i_down(st, dibh);
- newl3state(st, 11);
+ if (rejflg)
+ newl3state(st, 0);
+ else
+ newl3state(st, 11);
}
static void
static struct stateentry downstatelist_1tr6t[] =
{
{0, CC_SETUP_REQ, l3_1tr6_setup},
+ {4, CC_DISCONNECT_REQ, l3_1tr6_disconn_req},
{6, CC_REJECT_REQ, l3_1tr6_ignore},
{6, CC_SETUP_RSP, l3_1tr6_conn},
{6, CC_ALERTING_REQ, l3_1tr6_alert},
{7, CC_SETUP_RSP, l3_1tr6_conn},
{7, CC_DISCONNECT_REQ, l3_1tr6_disconn_req},
+ {7, CC_DLRL, l3_1tr6_disconn_req},
{8, CC_DISCONNECT_REQ, l3_1tr6_disconn_req},
{10, CC_DISCONNECT_REQ, l3_1tr6_disconn_req},
{12, CC_RELEASE_REQ, l3_1tr6_rel_req}
+/* $Id: l3_1TR6.h,v 1.1 1996/04/13 10:25:42 fritz Exp $
+ *
+ * $Log: l3_1TR6.h,v $
+ * Revision 1.1 1996/04/13 10:25:42 fritz
+ * Initial revision
+ *
+ *
+ */
#ifndef l3_1TR6
#define l3_1TR6
+/* $Id: llglue.c,v 1.1 1996/04/13 10:26:29 fritz Exp $
+ *
+ * $Log: llglue.c,v $
+ * Revision 1.1 1996/04/13 10:26:29 fritz
+ * Initial revision
+ *
+ *
+ */
#define __NO_VERSION__
#include "teles.h"
#include <linux/malloc.h>
+/* $Id: mod.c,v 1.1 1996/04/13 10:27:02 fritz Exp $
+ *
+ * $Log: mod.c,v $
+ * Revision 1.1 1996/04/13 10:27:02 fritz
+ * Initial revision
+ *
+ *
+ */
#include "teles.h"
-
-
extern struct IsdnCard cards[];
extern char *teles_id;
-/*
+/* $Id: q931.c,v 1.2 1996/04/20 16:48:19 fritz Exp $
+ *
* q931.c code to decode ITU Q.931 call control messages
*
* Author Jan den Ouden
*
* Beat Doebeli cause texts, display information element
*
+ * $Log: q931.c,v $
+ * Revision 1.2 1996/04/20 16:48:19 fritz
+ * Misc. typos
+ *
+ * Revision 1.1 1996/04/13 10:27:49 fritz
+ * Initial revision
+ *
+ *
*/
+/* $Id: tei.c,v 1.1 1996/04/13 10:28:25 fritz Exp $
+ *
+ * $Log: tei.c,v $
+ * Revision 1.1 1996/04/13 10:28:25 fritz
+ * Initial revision
+ *
+ *
+ */
#define __NO_VERSION__
#include "teles.h"
+/* $Id: teles.h,v 1.1 1996/04/13 10:29:00 fritz Exp $
+ *
+ * $Log: teles.h,v $
+ * Revision 1.1 1996/04/13 10:29:00 fritz
+ * Initial revision
+ *
+ *
+ */
#include <linux/module.h>
#include <linux/version.h>
#include <linux/errno.h>
lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state;
}
+ current_tag = dev->name[3]-'0';
/* For the first probe, clear all board's tag registers. */
if (current_tag == 0)
outb(0xd0, id_port);
/* Free the interrupt so that some other card can use it. */
outw(0x0f00, ioaddr + WN0_IRQ);
+
+ if( check_region(ioaddr, EL3_IO_EXTENT) )
+ return -ENODEV;
+
found:
dev->base_addr = ioaddr;
dev->irq = irq;
endif
endif
+ifeq ($(CONFIG_SUNLANCE),y)
+L_OBJS += sunlance.o
+endif
+
ifeq ($(CONFIG_AT1700),y)
L_OBJS += at1700.o
else
extern int tc59x_probe(struct device *);
extern int dgrs_probe(struct device *);
extern int smc_init( struct device * );
+extern int sparc_lance_probe(struct device *);
/* Detachable devices ("pocket adaptors") */
extern int atp_init(struct device *);
#endif
#ifdef CONFIG_NI52
&& ni52_probe(dev)
+#endif
+#ifdef CONFIG_SUNLANCE
+ && sparc_lance_probe(dev)
#endif
&& 1 ) {
return 1; /* -ENODEV or -EAGAIN would be more accurate. */
#endif
/* This must be AFTER the various FRADs so it initializes FIRST! */
-
+
#ifdef CONFIG_DLCI
extern int dlci_init(struct device *);
static struct device dlci_dev = { "dlci", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, dlci_init, };
#undef NEXT_DEV
#define NEXT_DEV (&strip_bootstrap)
#endif /* STRIP */
-
+
#if defined(CONFIG_PPP)
extern int ppp_init(struct device *);
static struct device ppp_bootstrap = {
#endif
#endif
+#ifdef CONFIG_AP1000
+ extern int apfddi_init(struct device *dev);
+ static struct device fddi_dev = {
+ "fddi", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, apfddi_init };
+# undef NEXT_DEV
+# define NEXT_DEV (&fddi_dev)
+
+ extern int bif_init(struct device *dev);
+ static struct device bif_dev = {
+ "bif", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, bif_init };
+# undef NEXT_DEV
+# define NEXT_DEV (&bif_dev)
+
+#endif
+
extern int loopback_init(struct device *dev);
struct device loopback_dev = {
"lo", /* Software Loopback interface */
**********************
+ v2.52 (96/04/20)
+ - Replaced more decimal node ID's with hex, for consistency.
+ - Changed a couple of printk debug levels.
+
v2.51 (96/02/29)
- Inserted a rather important missing "volatile" in autoprobe.
- arc0e and arc0s are now options in drivers/net/Config.in.
TO DO: (semi-prioritized)
+ - Smarter recovery from RECON-during-transmit conditions.
- Make arcnetE_send_packet use arcnet_prepare_tx for loading the
packet into ARCnet memory.
- Probe for multiple devices in one shot (trying to decide whether
*/
static const char *version =
- "arcnet.c: v2.51 96/02/29 Avery Pennarun <apenwarr@foxnet.net>\n";
+ "arcnet.c: v2.52 96/04/20 Avery Pennarun <apenwarr@foxnet.net>\n";
#define BUGLVL(x) if ((ARCNET_DEBUG_MAX)&arcnet_debug&(x))
#define BUGMSG2(x,msg,args...) BUGLVL(x) printk(msg, ## args)
#define BUGMSG(x,msg,args...) BUGMSG2(x,"%s%6s: " msg, \
- x==D_NORMAL ? "" : x<=D_INIT_REASONS ? KERN_INFO : KERN_DEBUG , \
+ x==D_NORMAL ? KERN_WARNING : \
+ x<=D_INIT_REASONS ? KERN_INFO : KERN_DEBUG , \
dev->name , ## args)
/* Some useful multiprotocol macros. The idea here is that GCC will
if (!numports)
{
- BUGMSG(D_INIT,"Stage 1: failed. No ARCnet cards found.\n");
+ BUGMSG(D_NORMAL,"Stage 1: No ARCnet cards found.\n");
return -ENODEV;
}
if (!numshmems)
{
- BUGMSG(D_INIT,"Stage 3: failed. No ARCnet cards found.\n");
+ BUGMSG(D_NORMAL,"Stage 3: No ARCnet cards found.\n");
return -ENODEV;
}
}
else
{
+ /* just one shmem and port, assume they match */
*(u_char *)(shmems[0]) = TESTvalue;
}
#else
*/
for (shmem = &shmems[0]; shmem-shmems<numshmems; shmem++)
*(u_char *)(*shmem) = TESTvalue;
-
+
+ if (retval) BUGMSG(D_NORMAL,"Stage 5: No ARCnet cards found.\n");
return retval;
}
/* get and check the station ID from offset 1 in shmem */
lp->stationid = first_mirror[1];
if (lp->stationid==0)
- BUGMSG(D_NORMAL,"WARNING! Station address 0 is reserved "
+ BUGMSG(D_NORMAL,"WARNING! Station address 00 is reserved "
"for broadcasts!\n");
else if (lp->stationid==255)
BUGMSG(D_NORMAL,"WARNING! Station address FF may confuse "
if (status&TXFREEflag) /* transmit _DID_ finish */
{
- BUGMSG(D_NORMAL,"tx timeout - missed IRQ? (status=%Xh, ticks=%d, mask=%Xh, dest=%d)\n",
+ BUGMSG(D_NORMAL,"tx timeout - missed IRQ? (status=%Xh, ticks=%d, mask=%Xh, dest=%02Xh)\n",
status,tickssofar,lp->intmask,lp->lasttrans_dest);
lp->stats.tx_errors++;
}
else
{
- BUGMSG(D_EXTRA,"tx timed out (status=%Xh, tickssofar=%d, intmask=%Xh, dest=%d)\n",
+ BUGMSG(D_EXTRA,"tx timed out (status=%Xh, tickssofar=%d, intmask=%Xh, dest=%02Xh)\n",
status,tickssofar,lp->intmask,lp->lasttrans_dest);
lp->stats.tx_errors++;
lp->stats.tx_aborted_errors++;
{
if (lp->lasttrans_dest != 0)
{
- BUGMSG(D_EXTRA,"transmit was not acknowledged! (status=%Xh, dest=%d)\n",
+ BUGMSG(D_EXTRA,"transmit was not acknowledged! (status=%Xh, dest=%02Xh)\n",
status,lp->lasttrans_dest);
lp->stats.tx_errors++;
lp->stats.tx_carrier_errors++;
}
else
{
- BUGMSG(D_DURING,"broadcast was not acknowledged; that's normal (status=%Xh, dest=%d)\n",
+ BUGMSG(D_DURING,"broadcast was not acknowledged; that's normal (status=%Xh, dest=%02Xh)\n",
status,
lp->lasttrans_dest);
}
#ifdef MODULE
/*
- * Variables that can be overriden from command line
+ * Variables that can be overridden from command line
*/
static int debug = -1;
static int dma = -1;
#ifdef MODULE
/*
- * Variables that can be overriden from command line
+ * Variables that can be overridden from command line
*/
static int debug = -1;
static int dma = -1;
/*
* Because there are differences between the SE-4 and the SE-6,
* we assume that the following globals will be set up at init
- * time in main.c to containt the appropriate constants from above
+ * time in main.c to contain the appropriate constants from above
*/
extern ushort Gpp; /* Softcopy of GPP register */
extern ushort EEck; /* Clock bit */
extern int Nports; /* Number of genuine ethernet controllers */
extern int Nchan; /* ... plus one for host interface */
-extern int FirstChan; /* 0 or 1, depedning on whether host is used */
+extern int FirstChan; /* 0 or 1, depending on whether host is used */
extern int NumChan; /* 4 or 5 */
/*
/* cmd halfword values */
#define I596_SCB_ACK 0xF000 /* ACKNOWLEDGMENTS */
#define I596_SCB_ACK_CX 0x8000 /* Ack command completion */
-#define I596_SCB_ACK_FR 0x4000 /* Ack recieved frame */
+#define I596_SCB_ACK_FR 0x4000 /* Ack received frame */
#define I596_SCB_ACK_CNA 0x2000 /* Ack command unit not active */
#define I596_SCB_ACK_RNR 0x1000 /* Ack rcv unit not ready */
#define I596_SCB_ACK_ALL 0xF000 /* Ack everything */
/* status halfword values */
#define I596_SCB_STAT 0xF000 /* STATUS */
#define I596_SCB_CX 0x8000 /* command completion */
-#define I596_SCB_FR 0x4000 /* recieved frame */
+#define I596_SCB_FR 0x4000 /* received frame */
#define I596_SCB_CNA 0x2000 /* command unit not active */
#define I596_SCB_RNR 0x1000 /* rcv unit not ready */
#define PCI_INT_LINE 0x3C
/*
- * Registers accessable directly from PCI and local side.
+ * Registers accessible directly from PCI and local side.
* Offset is from PCI side. Add PLX_LCL_OFFSET for local address.
*/
#define PLX_LCL_OFFSET 0x80 /* Offset of regs from local side */
1.4:
Added support to run with a ledma on the Sun4m
+1.5:
+ Added multiple card detection.
+
+ 4/17/97: Burst sizes and tpe selection on sun4m by Christian Dost
+ (ecd@pool.informatik.rwth-aachen.de)
*/
#undef DEBUG_DRIVER
static char *version =
- "sunlance.c:v1.4 17/Feb/96 Miguel de Icaza (miguel@nuclecu.unam.mx)\n";
+ "sunlance.c:v1.6 19/Apr/96 Miguel de Icaza (miguel@nuclecu.unam.mx)\n";
static char *lancestr = "LANCE";
+static char *lancedma = "LANCE DMA";
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-/* Define: 2^2 Tx buffers and 2^4 Rx buffers */
+/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
#ifndef LANCE_LOG_TX_BUFFERS
-#define LANCE_LOG_TX_BUFFERS 2
+#define LANCE_LOG_TX_BUFFERS 4
#define LANCE_LOG_RX_BUFFERS 4
#endif
#define LE_T1_OWN 0x80 /* Lance owns the packet */
#define LE_T1_ERR 0x40 /* Error summary */
+#define LE_T1_EMORE 0x10 /* Error: more than one retry needed */
#define LE_T1_EONE 0x08 /* Error: one retry needed */
#define LE_T1_EDEF 0x04 /* Error: deferred */
#define LE_T1_SOP 0x02 /* Start of packet */
#define LE_T1_EOP 0x01 /* End of packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
#define LE_T3_BUF 0x8000 /* Buffer error */
#define LE_T3_UFL 0x4000 /* Error underflow */
#define TX_BUFF_SIZE PKT_BUF_SZ
struct lance_rx_desc {
- unsigned short rmd0; /* low address of packet */
- unsigned char rmd1_bits; /* descriptor bits */
- unsigned char rmd1_hadr; /* high address of packet */
- short length; /* This length is 2s complement (negative)! Buffer length */
- unsigned short mblength; /* This is the actual number of bytes received */
+ unsigned short rmd0; /* low address of packet */
+ unsigned char rmd1_bits; /* descriptor bits */
+ unsigned char rmd1_hadr; /* high address of packet */
+ short length; /* This length is 2s complement (negative)!
+ * Buffer length
+ */
+ unsigned short mblength; /* This is the actual number of bytes received */
};
struct lance_tx_desc {
- unsigned short tmd0; /* low address of packet */
- unsigned char tmd1_bits; /* descriptor bits */
- unsigned char tmd1_hadr; /* high address of packet */
- short length; /* Length is 2s complement (negative)! */
- unsigned short misc;
+ unsigned short tmd0; /* low address of packet */
+ unsigned char tmd1_bits; /* descriptor bits */
+ unsigned char tmd1_hadr; /* high address of packet */
+ short length; /* Length is 2s complement (negative)! */
+ unsigned short misc;
};
/* The LANCE initialization block, described in databook. */
/* On the Sparc, this block should be on a DMA region */
struct lance_init_block {
- unsigned short mode; /* Pre-set mode (reg. 15) */
- unsigned char phys_addr[6]; /* Physical ethernet address */
- unsigned filter[2]; /* Multicast filter. */
-
- /* Receive and transmit ring base, along with extra bits. */
- unsigned short rx_ptr; /* receive descriptor addr */
- unsigned short rx_len; /* receive len and high addr */
- unsigned short tx_ptr; /* transmit descriptor addr */
- unsigned short tx_len; /* transmit len and high addr */
+ unsigned short mode; /* Pre-set mode (reg. 15) */
+ unsigned char phys_addr[6]; /* Physical ethernet address */
+ unsigned filter[2]; /* Multicast filter. */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ unsigned short rx_ptr; /* receive descriptor addr */
+ unsigned short rx_len; /* receive len and high addr */
+ unsigned short tx_ptr; /* transmit descriptor addr */
+ unsigned short tx_len; /* transmit len and high addr */
- /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
- struct lance_rx_desc brx_ring[RX_RING_SIZE];
- struct lance_tx_desc btx_ring[TX_RING_SIZE];
+ /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
+ struct lance_rx_desc brx_ring[RX_RING_SIZE];
+ struct lance_tx_desc btx_ring[TX_RING_SIZE];
- char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
- char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
+ char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
+ char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
};
struct lance_private {
- char *name;
- volatile struct lance_regs *ll;
- volatile struct lance_init_block *init_block;
+ char *name;
+ volatile struct lance_regs *ll;
+ volatile struct lance_init_block *init_block;
- int rx_new, tx_new;
- int rx_old, tx_old;
+ int rx_new, tx_new;
+ int rx_old, tx_old;
- struct enet_statistics stats;
- struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */
+ struct enet_statistics stats;
+ struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */
+
+ int tpe; /* cable-selection is TPE */
+ int burst_sizes; /* ledma SBus burst sizes */
};
#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
/* On the sparc, the lance control ports are memory mapped */
struct lance_regs {
- unsigned short rdp; /* register data port */
- unsigned short rap; /* register address port */
+ unsigned short rdp; /* register data port */
+ unsigned short rap; /* register address port */
};
int sparc_lance_debug = 2;
/* Load the CSR registers */
static void load_csrs (struct lance_private *lp)
{
- volatile struct lance_regs *ll = lp->ll;
- volatile struct lance_init_block *ib = lp->init_block;
- int leptr;
-
- leptr = LANCE_ADDR (ib);
- ll->rap = LE_CSR1;
- ll->rdp = (leptr & 0xFFFF);
- ll->rap = LE_CSR2;
- ll->rdp = leptr >> 16;
- ll->rap = LE_CSR3;
- ll->rdp = LE_C3_BSWP | LE_C3_ACON | LE_C3_BCON;
-
- /* Point back to csr0 */
- ll->rap = LE_CSR0;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_init_block *ib = lp->init_block;
+ int leptr;
+
+ leptr = LANCE_ADDR (ib);
+ ll->rap = LE_CSR1;
+ ll->rdp = (leptr & 0xFFFF);
+ ll->rap = LE_CSR2;
+ ll->rdp = leptr >> 16;
+ ll->rap = LE_CSR3;
+ ll->rdp = LE_C3_BSWP | LE_C3_ACON | LE_C3_BCON;
+
+ /* Point back to csr0 */
+ ll->rap = LE_CSR0;
}
#define ZERO 0
/* Setup the Lance Rx and Tx rings */
/* Sets dev->tbusy */
-static void
-lance_init_ring (struct device *dev)
+static void lance_init_ring (struct device *dev)
{
- struct lance_private *lp = (struct lance_private *) dev->priv;
- volatile struct lance_init_block *ib = lp->init_block;
- int leptr;
- int i;
+ struct lance_private *lp = (struct lance_private *) dev->priv;
+ volatile struct lance_init_block *ib = lp->init_block;
+ int leptr;
+ int i;
- /* Lock out other processes while setting up hardware */
- dev->tbusy = 1;
- lp->rx_new = lp->tx_new = 0;
- lp->rx_old = lp->tx_old = 0;
-
- ib->mode = 0;
-
- /* Copy the ethernet address to the lance init block
- * Note that on the sparc you need to swap the ethernet address.
- */
- ib->phys_addr [0] = dev->dev_addr [1];
- ib->phys_addr [1] = dev->dev_addr [0];
- ib->phys_addr [2] = dev->dev_addr [3];
- ib->phys_addr [3] = dev->dev_addr [2];
- ib->phys_addr [4] = dev->dev_addr [5];
- ib->phys_addr [5] = dev->dev_addr [4];
-
- if (ZERO)
- printk ("TX rings:\n");
+ /* Lock out other processes while setting up hardware */
+ dev->tbusy = 1;
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ ib->mode = 0;
+
+ /* Copy the ethernet address to the lance init block
+ * Note that on the sparc you need to swap the ethernet address.
+ */
+ ib->phys_addr [0] = dev->dev_addr [1];
+ ib->phys_addr [1] = dev->dev_addr [0];
+ ib->phys_addr [2] = dev->dev_addr [3];
+ ib->phys_addr [3] = dev->dev_addr [2];
+ ib->phys_addr [4] = dev->dev_addr [5];
+ ib->phys_addr [5] = dev->dev_addr [4];
+
+ if (ZERO)
+ printk ("TX rings:\n");
- /* Setup the Tx ring entries */
- for (i = 0; i <= TX_RING_SIZE; i++){
- leptr = LANCE_ADDR(&ib->tx_buf[i][0]);
- ib->btx_ring [i].tmd0 = leptr;
- ib->btx_ring [i].tmd1_hadr = leptr >> 16;
- ib->btx_ring [i].tmd1_bits = 0;
- ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
- ib->btx_ring [i].misc = 0;
- if (i < 3)
- if (ZERO) printk ("%d: 0x%8.8x\n", i, leptr);
- }
-
- /* Setup the Rx ring entries */
- if (ZERO) printk ("RX rings:\n");
- for (i = 0; i < RX_RING_SIZE; i++){
- leptr = LANCE_ADDR(&ib->rx_buf[i][0]);
-
- ib->brx_ring [i].rmd0 = leptr;
- ib->brx_ring [i].rmd1_hadr = leptr >> 16;
- ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
- ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
- ib->brx_ring [i].mblength = 0;
- if (i < 3)
- if (ZERO) printk ("%d: 0x%8.8x\n", i, leptr);
- }
-
- /* Setup the initialization block */
+ /* Setup the Tx ring entries */
+ for (i = 0; i <= TX_RING_SIZE; i++) {
+ leptr = LANCE_ADDR(&ib->tx_buf[i][0]);
+ ib->btx_ring [i].tmd0 = leptr;
+ ib->btx_ring [i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring [i].tmd1_bits = 0;
+ ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring [i].misc = 0;
+ if (i < 3)
+ if (ZERO) printk ("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the Rx ring entries */
+ if (ZERO)
+ printk ("RX rings:\n");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ leptr = LANCE_ADDR(&ib->rx_buf[i][0]);
+
+ ib->brx_ring [i].rmd0 = leptr;
+ ib->brx_ring [i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
+ ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring [i].mblength = 0;
+ if (i < 3 && ZERO)
+ printk ("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the initialization block */
- /* Setup rx descriptor pointer */
- leptr = LANCE_ADDR(&ib->brx_ring);
- ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16);
- ib->rx_ptr = leptr;
- if (ZERO) printk ("RX ptr: %8.8x\n", leptr);
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(&ib->brx_ring);
+ ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+ if (ZERO)
+ printk ("RX ptr: %8.8x\n", leptr);
- /* Setup tx descriptor pointer */
- leptr = LANCE_ADDR(&ib->btx_ring);
- ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16);
- ib->tx_ptr = leptr;
- if (ZERO) printk ("TX ptr: %8.8x\n", leptr);
-
- /* Clear the multicast filter */
- ib->filter [0] = 0;
- ib->filter [1] = 0;
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(&ib->btx_ring);
+ ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+ if (ZERO)
+ printk ("TX ptr: %8.8x\n", leptr);
+
+ /* Clear the multicast filter */
+ ib->filter [0] = 0;
+ ib->filter [1] = 0;
}
-static int
-init_restart_lance (struct lance_private *lp)
+static int init_restart_lance (struct lance_private *lp)
{
- volatile struct lance_regs *ll = lp->ll;
- int i;
+ volatile struct lance_regs *ll = lp->ll;
+ int i;
- ll->rap = LE_CSR0;
- ll->rdp = LE_C0_INIT;
+ if (lp->ledma) {
+ struct sparc_dma_registers *dregs = lp->ledma->regs;
+ unsigned long creg;
- /* Wait for the lance to complete initialization */
- for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
- ;
- if ((i == 100) || (ll->rdp & LE_C0_ERR)){
- printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, ll->rdp);
- if (lp->ledma)
- printk ("dcsr=%8.8x\n", (unsigned int) lp->ledma->regs->cond_reg);
- return -1;
- }
+ while (dregs->cond_reg & DMA_FIFO_ISDRAIN) /* E-Cache draining */
+ barrier();
+
+ creg = dregs->cond_reg;
+ if (lp->burst_sizes & DMA_BURST32)
+ creg |= DMA_E_BURST8;
+ else
+ creg &= ~DMA_E_BURST8;
- /* Clear IDON by writing a "1", enable interrupts and start lance */
- ll->rdp = LE_C0_IDON;
- ll->rdp = LE_C0_INEA | LE_C0_STRT;
+ creg |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV);
- /* On the 4m, enable dma interrupts */
- if (lp->ledma)
- lp->ledma->regs->cond_reg |= DMA_INT_ENAB;
+ if (lp->tpe)
+ creg |= DMA_EN_ENETAUI;
+ else
+ creg &= ~DMA_EN_ENETAUI;
+ udelay(20);
+ dregs->cond_reg = creg;
+ udelay(200);
+ }
+
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_INIT;
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
+ barrier();
+ if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
+ printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, ll->rdp);
+ if (lp->ledma)
+ printk ("dcsr=%8.8x\n",
+ (unsigned int) lp->ledma->regs->cond_reg);
+ return -1;
+ }
+
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ ll->rdp = LE_C0_IDON;
+ ll->rdp = LE_C0_INEA | LE_C0_STRT;
+
+ if (lp->ledma)
+ lp->ledma->regs->cond_reg |= DMA_INT_ENAB;
- return 0;
+ return 0;
}
-static int
-lance_rx (struct device *dev)
+static int lance_rx (struct device *dev)
{
- struct lance_private *lp = (struct lance_private *) dev->priv;
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_regs *ll = lp->ll;
- volatile struct lance_rx_desc *rd;
- unsigned char bits;
+ struct lance_private *lp = (struct lance_private *) dev->priv;
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_rx_desc *rd;
+ unsigned char bits;
#ifdef TEST_HITS
- printk ("[");
- for (i = 0; i < RX_RING_SIZE; i++){
- if (i == lp->rx_new)
- printk ("%s", ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
- else
- printk ("%s", ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
- }
- printk ("]");
+ printk ("[");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (i == lp->rx_new)
+ printk ("%s",
+ ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
+ else
+ printk ("%s",
+ ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
+ }
+ printk ("]");
#endif
- ll->rdp = LE_C0_RINT|LE_C0_INEA;
- for (rd = &ib->brx_ring [lp->rx_new];
- !((bits = rd->rmd1_bits) & LE_R1_OWN);
- rd = &ib->brx_ring [lp->rx_new]){
- int pkt_len;
- struct sk_buff *skb;
- char *buf;
-
- /* We got an incomplete frame? */
- if ((bits & LE_R1_POK) != LE_R1_POK){
- lp->stats.rx_over_errors++;
- lp->stats.rx_errors++;
- continue;
- } else if (bits & LE_R1_ERR){
- /* Count only the end frame as a tx error, not the beginning */
- if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++;
- if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++;
- if (bits & LE_R1_OFL) lp->stats.rx_over_errors++;
- if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++;
- if (bits & LE_R1_EOP) lp->stats.rx_errors++;
- } else {
- pkt_len = rd->mblength;
- skb = dev_alloc_skb (pkt_len+2);
- if (skb == NULL){
- printk ("%s: Memory squeeze, deferring packet.\n", dev->name);
- lp->stats.rx_dropped++;
+ ll->rdp = LE_C0_RINT|LE_C0_INEA;
+ for (rd = &ib->brx_ring [lp->rx_new];
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring [lp->rx_new]) {
+ int pkt_len;
+ struct sk_buff *skb;
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ lp->stats.rx_over_errors++;
+ lp->stats.rx_errors++;
+ continue;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a tx error, not the beginning */
+ if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL) lp->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP) lp->stats.rx_errors++;
+ } else {
+ pkt_len = rd->mblength;
+ skb = dev_alloc_skb (pkt_len+2);
+ if (skb == NULL) {
+ printk ("%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK;
+ return 0;
+ }
+
+ skb->dev = dev;
+ skb_reserve (skb, 2); /* 16 byte align */
+ skb_put (skb, pkt_len); /* make room */
+ eth_copy_and_sum(skb,
+ (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
+ pkt_len, 0);
+ skb->protocol = eth_type_trans (skb,dev);
+ netif_rx (skb);
+ lp->stats.rx_packets++;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK;
- return 0;
- }
-
- skb->dev = dev;
- skb_reserve (skb, 2); /* 16 byte align */
- buf = skb_put (skb, pkt_len); /* make room */
- memcpy (buf, (char *) &(ib->rx_buf [lp->rx_new][0]), pkt_len);
- skb->protocol = eth_type_trans (skb,dev);
- netif_rx (skb);
- lp->stats.rx_packets++;
}
-
- /* Return the packet to the pool */
- rd->rmd1_bits = LE_R1_OWN;
- lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK;
- }
- return 0;
+ return 0;
}
-static int
-lance_tx (struct device *dev)
+static int lance_tx (struct device *dev)
{
- struct lance_private *lp = (struct lance_private *) dev->priv;
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_regs *ll = lp->ll;
- volatile struct lance_tx_desc *td;
- int i, j;
- int status;
-
- /* csr0 is 2f3 */
- ll->rdp = LE_C0_TINT | LE_C0_INEA;
- /* csr0 is 73 */
- j = lp->tx_old;
- for (i = 0; i < TX_RING_SIZE; i++){
- td = &ib->btx_ring [j];
-
- if (td->tmd1_bits & LE_T1_ERR){
- status = td->misc;
+ struct lance_private *lp = (struct lance_private *) dev->priv;
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_tx_desc *td;
+ int i, j;
+ int status;
+
+ /* csr0 is 2f3 */
+ ll->rdp = LE_C0_TINT | LE_C0_INEA;
+ /* csr0 is 73 */
+ j = lp->tx_old;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ td = &ib->btx_ring [j];
+
+ if (td->tmd1_bits & LE_T1_ERR) {
+ status = td->misc;
- if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++;
- if (status & LE_T3_CLOS) lp->stats.tx_carrier_errors++;
- if (status & LE_T3_LCOL) lp->stats.tx_window_errors++;
-
- /* buffer errors and underflows turn off the transmitter */
- /* Restart the adapter */
- if (status & (LE_T3_BUF|LE_T3_UFL)){
- lp->stats.tx_fifo_errors++;
-
- printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name);
- /* Stop the lance */
- ll->rdp = LE_CSR0;
- ll->rap = LE_C0_STOP;
- lance_init_ring (dev);
- load_csrs (lp);
- init_restart_lance (lp);
- return 0;
- }
- } else
- lp->stats.tx_packets++;
+ lp->stats.tx_errors++;
+ if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++;
+ if (status & LE_T3_CLOS) lp->stats.tx_carrier_errors++;
+ if (status & LE_T3_LCOL) lp->stats.tx_window_errors++;
+
+ /* buffer errors and underflows turn off the transmitter */
+ /* Restart the adapter */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ lp->stats.tx_fifo_errors++;
+
+ printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ /* Stop the lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+ lance_init_ring (dev);
+ load_csrs (lp);
+ init_restart_lance (lp);
+ return 0;
+ }
+ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (td->tmd1_bits & LE_T1_EONE)
+ lp->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (td->tmd1_bits & LE_T1_EMORE)
+ lp->stats.collisions += 2;
+
+ /* What to set here? */
+ if (td->tmd1_bits & LE_T1_EDEF)
+ /* EMPTY */ ;
+
+ lp->stats.tx_packets++;
+ }
- j = (j + 1) & TX_RING_MOD_MASK;
- }
- lp->tx_old = (lp->tx_old+1) & TX_RING_MOD_MASK;
+ j = (j + 1) & TX_RING_MOD_MASK;
+ }
+ lp->tx_old = (lp->tx_old+1) & TX_RING_MOD_MASK;
- ll->rdp = LE_C0_TINT | LE_C0_INEA;
- return 0;
+ ll->rdp = LE_C0_TINT | LE_C0_INEA;
+ return 0;
}
-static void
-lance_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+static void lance_interrupt (int irq, void *dev_id, struct pt_regs *regs)
{
- struct device *dev = (struct device *) (irq2dev_map [irq]);
- struct lance_private *lp;
- volatile struct lance_regs *ll;
- int csr0;
+ struct device *dev;
+ struct lance_private *lp;
+ volatile struct lance_regs *ll;
+ int csr0;
- lp = (struct lance_private *) dev->priv;
- ll = lp->ll;
-
- if (lp->ledma)
- if (lp->ledma->regs->cond_reg & DMA_HNDL_ERROR){
- printk ("%s: should reset my ledma (dmacsr=%8.8x, csr=%4.4x\n", dev->name,
- (unsigned int) lp->ledma->regs->cond_reg, ll->rdp);
- printk ("send mail to miguel@nuclecu.unam.mx\n");
+#ifdef OLD_STYLE_IRQ
+ dev = (struct device *) (irq2dev_map [irq]);
+#else
+ dev = (struct device *) dev_id;
+#endif
+
+ lp = (struct lance_private *) dev->priv;
+ ll = lp->ll;
+
+ if (lp->ledma) {
+ if (lp->ledma->regs->cond_reg & DMA_HNDL_ERROR) {
+ printk ("%s: should reset my ledma (dmacsr=%8.8x, csr=%4.4x\n",
+ dev->name, (unsigned int) lp->ledma->regs->cond_reg,
+ ll->rdp);
+ printk ("send mail to miguel@nuclecu.unam.mx\n");
+ }
}
- if (dev->interrupt)
- printk ("%s: again", dev->name);
+ if (dev->interrupt)
+ printk ("%s: again", dev->name);
- dev->interrupt = 1;
+ dev->interrupt = 1;
- csr0 = ll->rdp;
+ csr0 = ll->rdp;
- /* Acknowledge all the interrupt sources ASAP */
- ll->rdp = csr0 & 0x004f;
+ /* Acknowledge all the interrupt sources ASAP */
+ ll->rdp = csr0 & 0x004f;
- if ((csr0 & LE_C0_ERR)){
- /* Clear the error condition */
- ll->rdp = LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA;
- }
+ if ((csr0 & LE_C0_ERR)) {
+ /* Clear the error condition */
+ ll->rdp = LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA;
+ }
- if (csr0 & LE_C0_RINT){
- lance_rx (dev);
- }
+ if (csr0 & LE_C0_RINT)
+ lance_rx (dev);
- if (csr0 & LE_C0_TINT){
- lance_tx (dev);
- }
+ if (csr0 & LE_C0_TINT)
+ lance_tx (dev);
- if ((TX_BUFFS_AVAIL >= 0) && dev->tbusy){
- dev->tbusy = 0;
- mark_bh (NET_BH);
- }
- ll->rap = 0;
- ll->rdp = 0x7940;
+ if ((TX_BUFFS_AVAIL >= 0) && dev->tbusy) {
+ dev->tbusy = 0;
+ mark_bh (NET_BH);
+ }
+ ll->rap = LE_CSR0;
+ ll->rdp = 0x7940;
- dev->interrupt = 0;
+ dev->interrupt = 0;
}
struct device *last_dev = 0;
-static int
-lance_open (struct device *dev)
+static int lance_open (struct device *dev)
{
- struct lance_private *lp = (struct lance_private *)dev->priv;
- volatile struct lance_regs *ll = lp->ll;
- int status = 0;
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ volatile struct lance_regs *ll = lp->ll;
+ int status = 0;
- last_dev = dev;
+ last_dev = dev;
- if (request_irq (dev->irq, &lance_interrupt, 0, lancestr, NULL)){
- printk ("Lance: Can't get irq %d\n", dev->irq);
- return -EAGAIN;
- }
- /* Stop the Lance */
- ll->rap = LE_CSR0;
- ll->rdp = LE_C0_STOP;
+ if (request_irq (dev->irq, &lance_interrupt, 0, lancestr, (void *) dev)) {
+ printk ("Lance: Can't get irq %d\n", dev->irq);
+ return -EAGAIN;
+ }
- irq2dev_map [dev->irq] = dev;
+ /* Stop the Lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+
+#ifdef OLD_STYLE_IRQ
+ irq2dev_map [dev->irq] = dev;
+#endif
- /* On the 4m, setup the ledma to provide the upper bits for buffers */
- if (lp->ledma)
- lp->ledma->regs->dma_test = ((unsigned int) lp->init_block) & 0xff000000;
+ /* On the 4m, setup the ledma to provide the upper bits for buffers */
+ if (lp->ledma)
+ lp->ledma->regs->dma_test = ((unsigned int) lp->init_block) & 0xff000000;
- lance_init_ring (dev);
- load_csrs (lp);
+ lance_init_ring (dev);
+ load_csrs (lp);
- dev->tbusy = 0;
- dev->interrupt = 0;
- dev->start = 1;
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
- status = init_restart_lance (lp);
- if (lp->ledma)
- lp->ledma->regs->cond_reg |= DMA_INT_ENAB;
+ status = init_restart_lance (lp);
#if 0
- /* To emulate SunOS, we add a route to the local network */
- rt_add (RTF_UP,
- dev->pa_addr & ip_get_mask (dev->pa_addr),
- ip_get_mask (dev->pa_addr),
- 0, dev, dev->mtu, 0, 0);
+ /* To emulate SunOS, we add a route to the local network */
+ rt_add (RTF_UP,
+ dev->pa_addr & ip_get_mask (dev->pa_addr),
+ ip_get_mask (dev->pa_addr),
+ 0, dev, dev->mtu, 0, 0);
#endif
- return status;
+ return status;
}
-static int
-lance_close (struct device *dev)
+static int lance_close (struct device *dev)
{
- struct lance_private *lp = (struct lance_private *) dev->priv;
- volatile struct lance_regs *ll = lp->ll;
+ struct lance_private *lp = (struct lance_private *) dev->priv;
+ volatile struct lance_regs *ll = lp->ll;
- dev->start = 0;
- dev->tbusy = 1;
+ dev->start = 0;
+ dev->tbusy = 1;
- /* Stop the card */
- ll->rap = LE_CSR0;
- ll->rdp = LE_C0_STOP;
+ /* Stop the card */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
- free_irq (dev->irq, NULL);
- irq2dev_map [dev->irq] = NULL;
+ free_irq (dev->irq, NULL);
+#ifdef OLD_STYLE_IRQ
+ irq2dev_map [dev->irq] = NULL;
+#endif
- return 0;
+ return 0;
}
-inline static int
-lance_reset (struct device *dev)
+static inline int lance_reset (struct device *dev)
{
- struct lance_private *lp = (struct lance_private *)dev->priv;
- volatile struct lance_regs *ll = lp->ll;
- int status;
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ volatile struct lance_regs *ll = lp->ll;
+ int status;
- /* Stop the lance */
- ll->rdp = LE_CSR0;
- ll->rap = LE_C0_STOP;
-
- /* On the 4m, reset the dma too */
- if (lp->ledma){
- printk ("resetting ledma\n");
- lp->ledma->regs->cond_reg |= DMA_RST_ENET;
- udelay (200);
- lp->ledma->regs->cond_reg &= ~DMA_RST_ENET;
- lp->ledma->regs->cond_reg |= DMA_INT_ENAB;
- }
- lance_init_ring (dev);
- load_csrs (lp);
- dev->trans_start = jiffies;
- dev->interrupt = 0;
- dev->start = 1;
- dev->tbusy = 0;
- status = init_restart_lance (lp);
+ /* Stop the lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+
+ /* On the 4m, reset the dma too */
+ if (lp->ledma) {
+ printk ("resetting ledma\n");
+ lp->ledma->regs->cond_reg |= DMA_RST_ENET;
+ udelay (200);
+ lp->ledma->regs->cond_reg &= ~DMA_RST_ENET;
+ lp->ledma->regs->dma_test = ((unsigned int) lp->init_block) & 0xff000000;
+ }
+ lance_init_ring (dev);
+ load_csrs (lp);
+ dev->trans_start = jiffies;
+ dev->interrupt = 0;
+ dev->start = 1;
+ dev->tbusy = 0;
+ status = init_restart_lance (lp);
#ifdef DEBUG_DRIVER
- printk ("Lance restart=%d\n", status);
+ printk ("Lance restart=%d\n", status);
#endif
- return status;
+ return status;
}
-static int
-lance_start_xmit (struct sk_buff *skb, struct device *dev)
+static int lance_start_xmit (struct sk_buff *skb, struct device *dev)
{
- struct lance_private *lp = (struct lance_private *)dev->priv;
- volatile struct lance_regs *ll = lp->ll;
- volatile struct lance_init_block *ib = lp->init_block;
- int entry, skblen, len;
- int status = 0;
- static int outs;
-
- /* Transmitter timeout, serious problems */
- if (dev->tbusy){
- int tickssofar = jiffies - dev->trans_start;
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile unsigned long flush;
+ int entry, skblen, len;
+ int status = 0;
+ static int outs;
+
+ /* Transmitter timeout, serious problems */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
- if (tickssofar < 100)
- status = -1;
- else {
- printk ("%s: transmit timed out, status %04x, resetting\n",
- dev->name, ll->rdp);
- lance_reset (dev);
- }
- return status;
- }
-
- if (skb == NULL){
- dev_tint (dev);
- printk ("skb is NULL\n");
- return 0;
- }
+ if (tickssofar < 100) {
+ status = -1;
+ } else {
+ printk ("%s: transmit timed out, status %04x, resetting\n",
+ dev->name, ll->rdp);
+ lance_reset (dev);
+ }
+ return status;
+ }
- if (skb->len <= 0){
- printk ("skb len is %ld\n", skb->len);
- return 0;
- }
- /* Block a timer-based transmit from overlapping. */
+ if (skb == NULL) {
+ dev_tint (dev);
+ printk ("skb is NULL\n");
+ return 0;
+ }
+
+ if (skb->len <= 0) {
+ printk ("skb len is %ld\n", skb->len);
+ return 0;
+ }
+ /* Block a timer-based transmit from overlapping. */
#ifdef OLD_METHOD
- dev->tbusy = 1;
+ dev->tbusy = 1;
#else
- if (set_bit (0, (void *) &dev->tbusy) != 0){
- printk ("Transmitter access conflict.\n");
- return -1;
- }
+ if (set_bit (0, (void *) &dev->tbusy) != 0) {
+ printk ("Transmitter access conflict.\n");
+ return -1;
+ }
#endif
- skblen = skb->len;
+ skblen = skb->len;
- if (!TX_BUFFS_AVAIL){
- return -1;
- }
+ if (!TX_BUFFS_AVAIL)
+ return -1;
#ifdef DEBUG_DRIVER
- /* dump the packet */
- {
- int i;
+ /* dump the packet */
+ {
+ int i;
- for (i = 0; i < 64; i++){
- if ((i % 16) == 0) printk ("\n");
- printk ("%2.2x ", skb->data [i]);
+ for (i = 0; i < 64; i++) {
+ if ((i % 16) == 0)
+ printk ("\n");
+ printk ("%2.2x ", skb->data [i]);
+ }
}
- }
#endif
- len = (skblen < ETH_ZLEN) ? ETH_ZLEN : skblen;
- entry = lp->tx_new & TX_RING_MOD_MASK;
- ib->btx_ring [entry].length = (-len) | 0xf000;
- ib->btx_ring [entry].misc = 0;
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+ entry = lp->tx_new & TX_RING_MOD_MASK;
+ ib->btx_ring [entry].length = (-len) | 0xf000;
+ ib->btx_ring [entry].misc = 0;
- memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen);
-
- /* Clear the slack of the packet, do I need this? */
- if (len != skblen){
- memset ((char *) &ib->tx_buf [entry][skblen], 0, len - skblen);
- }
+ memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen);
+ /* Clear the slack of the packet, do I need this? */
+ if (len != skblen)
+ memset ((char *) &ib->tx_buf [entry][skblen], 0, len - skblen);
- /* Now, give the packet to the lance */
- ib->btx_ring [entry].tmd1_bits = (LE_T1_SOP|LE_T1_EOP|LE_T1_OWN);
- lp->tx_new = (lp->tx_new+1) & TX_RING_MOD_MASK;
-
- outs++;
- /* Kick the lance: transmit now */
- ll->rdp = LE_C0_INEA | LE_C0_TDMD;
- dev->trans_start = jiffies;
- dev_kfree_skb (skb, FREE_WRITE);
+ /* Now, give the packet to the lance */
+ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ lp->tx_new = (lp->tx_new+1) & TX_RING_MOD_MASK;
+
+ outs++;
+ /* Kick the lance: transmit now */
+ ll->rdp = LE_C0_INEA | LE_C0_TDMD;
+ dev->trans_start = jiffies;
+ dev_kfree_skb (skb, FREE_WRITE);
- if (TX_BUFFS_AVAIL)
- dev->tbusy = 0;
+ if (TX_BUFFS_AVAIL)
+ dev->tbusy = 0;
+
+ /* Read back CSR to invalidate the E-Cache.
+ * This is needed, because DMA_DSBL_WR_INV is set. */
+ if (lp->ledma)
+ flush = ll->rdp;
- return status;
+ return status;
}
-static struct enet_statistics *
-lance_get_stats (struct device *dev)
+static struct enet_statistics *lance_get_stats (struct device *dev)
{
- struct lance_private *lp = (struct lance_private *) dev->priv;
+ struct lance_private *lp = (struct lance_private *) dev->priv;
- return &lp->stats;
+ return &lp->stats;
}
-static void
-lance_set_multicast (struct device *dev, int num_addrs, void *addrs)
+static void lance_set_multicast (struct device *dev)
{
#ifdef NOT_YET
- struct lance_private *lp = (struct lance_private *) dev->priv;
- volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_regs *ll = lp->ll;
-
- ll->rdp = LE_CSR0;
- ll->rap = LE_C0_STOP;
- lance_init_ring (dev);
- if (num_addrs >= 0){
- printk ("Ignoring set_multicast\n");
- } else {
+ struct lance_private *lp = (struct lance_private *) dev->priv;
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+ lance_init_ring (dev);
ib->mode |= LE_MO_PROM;
- }
- lance_init_ring (dev);
- load_csrs (lp);
- init_restart_lance (lp);
- dev->tbusy = 0;
+ lance_init_ring (dev);
+ load_csrs (lp);
+ init_restart_lance (lp);
+ dev->tbusy = 0;
#endif
}
+int sparc_lance_init (struct device *dev, struct linux_sbus_device *sdev,
+ struct Linux_SBus_DMA *ledma,
+ struct linux_sbus_device *lebuffer)
+{
+ static unsigned version_printed = 0;
+ int i;
+ struct lance_private *lp;
+ volatile struct lance_regs *ll;
+
+ if (dev == NULL) {
+ dev = init_etherdev (0, sizeof (struct lance_private));
+ } else {
+ dev->priv = kmalloc (sizeof (struct lance_private), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ }
+ if (sparc_lance_debug && version_printed++ == 0)
+ printk (version);
+
+ printk ("%s: LANCE ", dev->name);
+ /* Fill the dev fields */
+ dev->base_addr = (long) sdev;
+
+ /* Copy the IDPROM ethernet address to the device structure, later we
+ * will copy the address in the device structure to the lance initialization
+ * block
+ */
+ for (i = 0; i < 6; i++)
+ printk ("%2.2x%c",
+ dev->dev_addr [i] = idprom->id_eaddr [i], i == 5 ? ' ': ':');
+ printk("\n");
+
+ /* Get the IO region */
+ prom_apply_sbus_ranges (&sdev->reg_addrs [0], sdev->num_registers);
+ ll = sparc_alloc_io (sdev->reg_addrs [0].phys_addr, 0,
+ sizeof (struct lance_regs), lancestr,
+ sdev->reg_addrs[0].which_io, 0x0);
+
+ /* Make certain the data structures used by the LANCE are aligned. */
+ dev->priv = (void *)(((int)dev->priv + 7) & ~7);
+ lp = (struct lance_private *) dev->priv;
+ memset ((char *)dev->priv, 0, sizeof (struct lance_private));
+
+ if (lebuffer){
+ prom_apply_sbus_ranges (&sdev->reg_addrs [0], sdev->num_registers);
+ lp->init_block = (void *)
+ sparc_alloc_io (lebuffer->reg_addrs [0].phys_addr, 0,
+ sizeof (struct lance_init_block), "lebuffer",
+ lebuffer->reg_addrs [0].which_io, 0);
+ } else {
+ lp->init_block = (void *)
+ sparc_dvma_malloc (sizeof (struct lance_init_block),
+ lancedma);
+ }
+
+ lp->ll = ll;
+ lp->name = lancestr;
+ lp->ledma = ledma;
+
+ lp->burst_sizes = 0;
+ if (lp->ledma) {
+ char cable_prop[4];
+ unsigned int sbmask;
+
+ /* Find burst-size property for ledma */
+ lp->burst_sizes = prom_getintdefault(ledma->SBus_dev->prom_node,
+ "burst-sizes", 0);
+
+ /* ledma may be capable of fast bursts, but sbus may not. */
+ sbmask = prom_getintdefault(ledma->SBus_dev->my_bus->prom_node,
+ "burst-sizes", DMA_BURSTBITS);
+ lp->burst_sizes &= sbmask;
+
+ /* Get the cable-selection property */
+ prom_getstring(ledma->SBus_dev->prom_node, "cable-selection",
+ cable_prop, sizeof(cable_prop));
+ if (!strcmp(cable_prop, "aui"))
+ lp->tpe = 0;
+ else
+ lp->tpe = 1;
+
+ /* Reset ledma */
+ lp->ledma->regs->cond_reg |= DMA_RST_ENET;
+ udelay (200);
+ lp->ledma->regs->cond_reg &= ~DMA_RST_ENET;
+ }
+
+ /* This should never happen. */
+ if ((int)(lp->init_block->brx_ring) & 0x07) {
+ printk(" **ERROR** LANCE Rx and Tx rings not on even boundary.\n");
+ return ENODEV;
+ }
+
+ dev->open = &lance_open;
+ dev->stop = &lance_close;
+ dev->hard_start_xmit = &lance_start_xmit;
+ dev->get_stats = &lance_get_stats;
+ dev->set_multicast_list = &lance_set_multicast;
+
+ dev->irq = (unsigned char) sdev->irqs [0].pri;
+ dev->dma = 0;
+ ether_setup (dev);
+
+ return 0;
+}
+
/* On 4m, find the associated dma for the lance chip */
static struct Linux_SBus_DMA *
find_ledma (struct linux_sbus_device *dev)
{
- struct Linux_SBus_DMA *p;
+ struct Linux_SBus_DMA *p;
- for (p = dma_chain; p; p = p->next)
- if (p->SBus_dev == dev)
- return p;
- return 0;
+ for (p = dma_chain; p; p = p->next)
+ if (p->SBus_dev == dev)
+ return p;
+ return 0;
}
-/* FIXME: the probe code should be able to detect */
+/* Find all the lance cards on the system and initialize them */
int sparc_lance_probe (struct device *dev)
{
- static unsigned version_printed = 0;
- int i;
- int found = 0;
- struct linux_sbus *bus;
- struct linux_sbus_device *sdev = 0;
- struct Linux_SBus_DMA *ledma = 0;
- struct lance_private *lp;
- volatile struct lance_regs *ll;
-
-#ifdef DEBUG_DRIVER
- printk ("Lance probe...0x%p\n", SBus_chain);
-#endif
- for_each_sbus (bus){
- for_each_sbusdev (sdev, bus){
- if (strcmp (sdev->prom_name, "le") == 0){
- found = 1;
- break;
- }
- if (strcmp (sdev->prom_name, "ledma") == 0){
- ledma = find_ledma (sdev);
- found = 1;
- sdev = sdev->child;
- break;
- }
- }
- }
- if (!found)
- return ENODEV;
-
- if (dev == NULL){
- printk ("LANCE buffer @0x0. You don't really want this\n");
- dev = init_etherdev (0, sizeof (struct lance_private));
- } else {
- dev->priv = kmalloc (sizeof (struct lance_private), GFP_KERNEL);
- if (dev->priv == NULL)
- return -ENOMEM;
- }
- if (sparc_lance_debug && version_printed++ == 0)
- printk (version);
-
- printk ("%s: LANCE ", dev->name);
- /* Fill the dev fields */
- dev->base_addr = (long) sdev;
-
- /* Copy the IDPROM ethernet address to the device structure, later we
- * will copy the address in the device structure to the lance initialization
- * block
- */
- for (i = 0; i < 6; i++){
- printk ("%2.2x%c", dev->dev_addr [i] = idprom->id_eaddr [i], i == 5 ? ' ': ':');
- }
- /* Get the IO region */
- prom_apply_sbus_ranges (&sdev->reg_addrs [0], sdev->num_registers);
- ll = sparc_alloc_io (sdev->reg_addrs [0].phys_addr, 0,
- sizeof (struct lance_regs), lancestr,
- sdev->reg_addrs[0].which_io, 0x0);
-
- /* Make certain the data structures used by the LANCE are aligned. */
- dev->priv = (void *)(((int)dev->priv + 7) & ~7);
- lp = (struct lance_private *) dev->priv;
- memset ((char *)dev->priv, 0, sizeof (struct lance_private));
+ struct linux_sbus *bus;
+ struct linux_sbus_device *sdev = 0;
+ struct Linux_SBus_DMA *ledma = 0;
+ int cards = 0, v;
- lp->init_block = (void *)
- sparc_dvma_malloc (sizeof (struct lance_init_block), lancestr);
-
- lp->ll = ll;
- lp->name = lancestr;
- lp->ledma = ledma;
-
- /* This should never happen. */
- if ((int)(lp->init_block->brx_ring) & 0x07) {
- printk(" **ERROR** LANCE Rx and Tx rings not on even boundary.\n");
- return ENODEV;
- }
-
- dev->open = &lance_open;
- dev->stop = &lance_close;
- dev->hard_start_xmit = &lance_start_xmit;
- dev->get_stats = &lance_get_stats;
- dev->set_multicast_list = &lance_set_multicast;
-
- dev->irq = (unsigned char) sdev->irqs [0].pri;
- dev->dma = 0;
- ether_setup (dev);
-
- return 0;
+ for_each_sbus (bus) {
+ for_each_sbusdev (sdev, bus) {
+ if (cards) dev = NULL;
+ if (strcmp (sdev->prom_name, "le") == 0) {
+ cards++;
+ if ((v = sparc_lance_init(dev, sdev, ledma,0)))
+ return v;
+ }
+ if (strcmp (sdev->prom_name, "ledma") == 0) {
+ cards++;
+ ledma = find_ledma (sdev);
+ sdev = sdev->child;
+ if ((v = sparc_lance_init(dev, sdev, ledma,0)))
+ return v;
+ break;
+ }
+ if (strcmp (sdev->prom_name, "lebuffer") == 0){
+ struct linux_sbus_device *le = sdev->child;
+ cards++;
+ if ((v = sparc_lance_init(dev, le, ledma,sdev)))
+ return v;
+ break;
+ }
+ } /* for each sbusdev */
+ } /* for each sbus */
+ if (!cards)
+ return ENODEV;
+ return 0;
}
+
/*
* Local variables:
- * compile-command: "gcc -DKERNEL -Wall -O6 -fomit-frame-pointer -I../../..//net/tcp -c lance.c"
* version-control: t
* kept-new-versions: 5
* End:
--- /dev/null
+#
+# Makefile for the linux kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now in the main makefile...
+
+L_OBJS := sbus.o dvma.o
+L_TARGET := sbus.a
+
+include $(TOPDIR)/Rules.make
--- /dev/null
+/* dvma.c: Routines that are used to access DMA on the Sparc SBus.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/malloc.h>
+
+#include <asm/oplib.h>
+#include <asm/contregs.h>
+#include <asm/sysen.h>
+#include <asm/delay.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/sbus.h>
+#include <asm/vac-ops.h>
+#include <asm/vaddrs.h>
+
+struct Linux_SBus_DMA *dma_chain;
+
+/* Print out the current values in the DMA control registers */
+void
+dump_dma_regs(struct sparc_dma_registers *dregs)
+{
+ printk("DMA CONTROL<%08lx> ADDR<%08lx> CNT<%08lx> TEST<%08lx>\n",
+ dregs->cond_reg,
+ (unsigned long) dregs->st_addr,
+ (unsigned long) dregs->cnt,
+ (unsigned long) dregs->dma_test);
+ return;
+}
+
+
+/* Probe this SBus DMA module(s) */
+unsigned long
+dvma_init(struct linux_sbus *sbus, unsigned long memory_start)
+{
+ struct linux_sbus_device *this_dev;
+ struct Linux_SBus_DMA *dma;
+ struct Linux_SBus_DMA *dchain;
+ static int num_dma=0;
+
+ for_each_sbusdev(this_dev, sbus) {
+ if(strcmp(this_dev->prom_name, "dma") &&
+ strcmp(this_dev->prom_name, "ledma") &&
+ strcmp(this_dev->prom_name, "espdma"))
+ continue;
+
+ /* Found one... */
+ dma = (struct Linux_SBus_DMA *) memory_start;
+ memory_start += sizeof(struct Linux_SBus_DMA);
+
+ dma->SBus_dev = this_dev;
+
+ /* Put at end of dma chain */
+ dchain = dma_chain;
+ if(dchain) {
+ while(dchain->next) dchain=dchain->next;
+ dchain->next=dma;
+ } else {
+ /* We're the first in line */
+ dma_chain=dma;
+ }
+ dma->next = 0;
+
+ printk("dma%d: ", num_dma);
+ num_dma++;
+
+ /* The constant PAGE_SIZE that is passed to sparc_alloc_io makes the
+ * routine only alloc 1 page, that was what the original code did
+ */
+ prom_apply_sbus_ranges(dma->SBus_dev->reg_addrs, 0x1);
+ dma->regs = (struct sparc_dma_registers *)
+ sparc_alloc_io (dma->SBus_dev->reg_addrs[0].phys_addr, 0,
+ PAGE_SIZE, "dma",
+ dma->SBus_dev->reg_addrs[0].which_io, 0x0);
+
+ dma->node = dma->SBus_dev->prom_node;
+ dma->running=0; /* No tranfers going on as of yet */
+ dma->allocated=0; /* No one has allocated us yet */
+ switch((dma->regs->cond_reg)&DMA_DEVICE_ID) {
+ case DMA_VERS0:
+ dma->revision=dvmarev0;
+ printk("Revision 0 ");
+ break;
+ case DMA_ESCV1:
+ dma->revision=dvmaesc1;
+ printk("ESC Revision 1 ");
+ break;
+ case DMA_VERS1:
+ dma->revision=dvmarev1;
+ printk("Revision 1 ");
+ break;
+ case DMA_VERS2:
+ dma->revision=dvmarev2;
+ printk("Revision 2 ");
+ break;
+ case DMA_VERSPLUS:
+ dma->revision=dvmarevplus;
+ printk("Revision 1 PLUS ");
+ break;
+ default:
+ printk("unknown dma version");
+ dma->allocated = 1;
+ break;
+ }
+ printk("\n");
+#if 0 /* Clutters up the screen */
+ dump_dma_regs(dma->regs);
+#endif
+ }; /* while(this_dev) */
+
+ return memory_start;
+}
+
--- /dev/null
+/* sbus.c: SBus support routines.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/malloc.h>
+
+#include <asm/system.h>
+#include <asm/sbus.h>
+#include <asm/dma.h>
+#include <asm/oplib.h>
+
+/* This file has been written to be more dynamic and a bit cleaner,
+ * but it still needs some spring cleaning.
+ */
+
+struct linux_sbus *SBus_chain;
+
+static char lbuf[128];
+
+/* Perhaps when I figure out more about the iommu we'll put a
+ * device registration routine here that probe_sbus() calls to
+ * setup the iommu for each Sbus.
+ */
+
+/* We call this for each SBus device, and fill the structure based
+ * upon the prom device tree. We return the start of memory after
+ * the things we have allocated.
+ */
+
+/* #define DEBUG_FILL */
+void
+fill_sbus_device(int nd, struct linux_sbus_device *sbus_dev)
+{
+ int grrr, len;
+ unsigned long dev_base_addr, base;
+
+ sbus_dev->prom_node = nd;
+ prom_getstring(nd, "name", lbuf, sizeof(lbuf));
+ strcpy(sbus_dev->prom_name, lbuf);
+
+ dev_base_addr = prom_getint(nd, "address");
+ if(dev_base_addr != -1)
+ sbus_dev->sbus_addr = dev_base_addr;
+
+ len = prom_getproperty(nd, "reg", (void *) sbus_dev->reg_addrs,
+ sizeof(sbus_dev->reg_addrs));
+ if(len%sizeof(struct linux_prom_registers)) {
+ prom_printf("WHOOPS: proplen for %s was %d, need multiple of %d\n",
+ sbus_dev->prom_name, len,
+ (int) sizeof(struct linux_prom_registers));
+ panic("fill_sbus_device");
+ }
+ sbus_dev->num_registers = (len/sizeof(struct linux_prom_registers));
+
+ base = (unsigned long) sbus_dev->reg_addrs[0].phys_addr;
+ if(base>=SUN_SBUS_BVADDR || sparc_cpu_model == sun4m) {
+ /* Ahh, we can determine the slot and offset */
+ sbus_dev->slot = sbus_dev_slot(base);
+ sbus_dev->offset = sbus_dev_offset(base);
+ } else { /* Grrr, gotta do calculations to fix things up */
+ sbus_dev->slot = sbus_dev->reg_addrs[0].which_io;
+ sbus_dev->offset = base;
+ sbus_dev->reg_addrs[0].phys_addr =
+ (char *) sbus_devaddr(sbus_dev->slot, base);
+ for(grrr=1; grrr<sbus_dev->num_registers; grrr++) {
+ base = (unsigned long) sbus_dev->reg_addrs[grrr].phys_addr;
+ sbus_dev->reg_addrs[grrr].phys_addr = (char *)
+ sbus_devaddr(sbus_dev->slot, base);
+ }
+ /* That surely sucked */
+ }
+ sbus_dev->sbus_addr = (unsigned long) sbus_dev->reg_addrs[0].phys_addr;
+
+ if(len>(sizeof(struct linux_prom_registers)*PROMREG_MAX)) {
+ prom_printf("WHOOPS: I got too many register addresses for %s len=%d\n",
+ sbus_dev->prom_name, len);
+ panic("sbus device register overflow");
+ }
+
+ len = prom_getproperty(nd, "address", (void *) sbus_dev->sbus_vaddrs,
+ sizeof(sbus_dev->sbus_vaddrs));
+ if(len == -1) len=0;
+ if(len&3) {
+ prom_printf("Grrr, I didn't get a multiple of 4 proplen "
+ "for device %s got %d\n", sbus_dev->prom_name, len);
+ len=0;
+ }
+ sbus_dev->num_vaddrs = (len/4);
+
+ len = prom_getproperty(nd, "intr", (void *)sbus_dev->irqs,
+ sizeof(sbus_dev->irqs));
+ if (len == -1) len=0;
+ if (len&7) {
+ prom_printf("Grrr, I didn't get a multiple of 8 proplen for "
+ "device %s got %d\n", sbus_dev->prom_name, len);
+ len=0;
+ }
+ sbus_dev->num_irqs=(len/8);
+#if OLD_STYLE_IRQ
+ /* Grrr, V3 prom tries to be efficient */
+ for(len=0; len<sbus_dev->num_irqs; len++) {
+ sbus_dev->irqs[len].pri &= 0xf;
+ }
+#endif
+ if(sbus_dev->num_irqs == 0) sbus_dev->irqs[0].pri=0;
+
+#ifdef DEBUG_FILL
+ prom_printf("Found %s at SBUS slot %x offset %08lx irq-level %d\n",
+ sbus_dev->prom_name, sbus_dev->slot, sbus_dev->offset,
+ sbus_dev->irqs[0].pri);
+ prom_printf("Base address %08lx\n", sbus_dev->sbus_addr);
+ prom_printf("REGISTERS: Probed %d register(s)\n", sbus_dev->num_registers);
+ for(len=0; len<sbus_dev->num_registers; len++)
+ prom_printf("Regs<%d> at address<%08lx> IO-space<%d> size<%d "
+ "bytes, %d words>\n", (int) len,
+ (unsigned long) sbus_dev->reg_addrs[len].phys_addr,
+ sbus_dev->reg_addrs[len].which_io,
+ sbus_dev->reg_addrs[len].reg_size,
+ (sbus_dev->reg_addrs[len].reg_size/4));
+#endif
+
+ return;
+}
+
+/* This routine gets called from whoever needs the sbus first, to scan
+ * the SBus device tree. Currently it just prints out the devices
+ * found on the bus and builds trees of SBUS structs and attached
+ * devices.
+ */
+
+extern void sun_console_init(void);
+extern unsigned long iommu_init(int iommu_node, unsigned long memstart,
+ unsigned long memend, struct linux_sbus *sbus);
+
+unsigned long
+sbus_init(unsigned long memory_start, unsigned long memory_end)
+{
+ register int nd, this_sbus, sbus_devs, topnd, iommund;
+ unsigned int sbus_clock;
+ struct linux_sbus *sbus;
+ struct linux_sbus_device *this_dev;
+ int num_sbus = 0; /* How many did we find? */
+
+ memory_start = ((memory_start + 7) & (~7));
+
+ topnd = prom_getchild(prom_root_node);
+
+ /* Finding the first sbus is a special case... */
+ iommund = 0;
+ if((nd = prom_searchsiblings(topnd, "sbus")) == 0) {
+ if((iommund = prom_searchsiblings(topnd, "iommu")) == 0 ||
+ (nd = prom_getchild(iommund)) == 0 ||
+ (nd = prom_searchsiblings(nd, "sbus")) == 0) {
+ /* No reason to run further - the data access trap will occur. */
+ panic("sbus not found");
+ }
+ }
+
+ /* Ok, we've found the first one, allocate first SBus struct
+ * and place in chain.
+ */
+ sbus = SBus_chain = (struct linux_sbus *) memory_start;
+ memory_start += sizeof(struct linux_sbus);
+ sbus->next = 0;
+ this_sbus=nd;
+
+ /* Have IOMMU will travel. XXX grrr - this should be per sbus... */
+ if(iommund)
+ memory_start = iommu_init(iommund, memory_start, memory_end, sbus);
+
+ /* Loop until we find no more SBUS's */
+ while(this_sbus) {
+ printk("sbus%d: ", num_sbus);
+ sbus_clock = prom_getint(this_sbus, "clock-frequency");
+ if(sbus_clock==-1) sbus_clock = (25*1000*1000);
+ printk("Clock %d.%d MHz\n", (int) ((sbus_clock/1000)/1000),
+ (int) (((sbus_clock/1000)%1000 != 0) ?
+ (((sbus_clock/1000)%1000) + 1000) : 0));
+
+ prom_getstring(this_sbus, "name", lbuf, sizeof(lbuf));
+ sbus->prom_node = this_sbus;
+ strcpy(sbus->prom_name, lbuf);
+ sbus->clock_freq = sbus_clock;
+
+ sbus_devs = prom_getchild(this_sbus);
+
+ sbus->devices = (struct linux_sbus_device *) memory_start;
+ memory_start += sizeof(struct linux_sbus_device);
+
+ this_dev = sbus->devices;
+ this_dev->next = 0;
+
+ fill_sbus_device(sbus_devs, this_dev);
+ this_dev->my_bus = sbus;
+
+ /* Should we traverse for children? */
+ if(strcmp(this_dev->prom_name, "espdma")==0 ||
+ strcmp(this_dev->prom_name, "ledma")==0) {
+ /* Allocate device node */
+ this_dev->child = (struct linux_sbus_device *) memory_start;
+ memory_start += sizeof(struct linux_sbus_device);
+ /* Fill it */
+ fill_sbus_device(prom_getchild(sbus_devs), this_dev->child);
+ this_dev->child->my_bus = sbus;
+ } else {
+ this_dev->child = 0;
+ }
+
+ while((sbus_devs = prom_getsibling(sbus_devs)) != 0) {
+ /* Allocate device node */
+ this_dev->next = (struct linux_sbus_device *) memory_start;
+ memory_start += sizeof(struct linux_sbus_device);
+ this_dev=this_dev->next;
+ this_dev->next=0;
+
+ /* Fill it */
+ fill_sbus_device(sbus_devs, this_dev);
+ this_dev->my_bus = sbus;
+
+ /* Is there a child node hanging off of us? */
+ if(strcmp(this_dev->prom_name, "espdma")==0 ||
+ strcmp(this_dev->prom_name, "ledma")==0) {
+ /* Get new device struct */
+ this_dev->child =
+ (struct linux_sbus_device *) memory_start;
+ memory_start += sizeof(struct linux_sbus_device);
+
+ /* Fill it */
+ fill_sbus_device(prom_getchild(sbus_devs),
+ this_dev->child);
+ this_dev->child->my_bus = sbus;
+ } else {
+ this_dev->child = 0;
+ }
+ }
+
+ memory_start = dvma_init(sbus, memory_start);
+
+ num_sbus++;
+ this_sbus = prom_getsibling(this_sbus);
+ if(!this_sbus) break;
+ this_sbus = prom_searchsiblings(this_sbus, "sbus");
+ if(this_sbus) {
+ sbus->next = (struct linux_sbus *) memory_start;
+ memory_start += sizeof(struct linux_sbus);
+ sbus = sbus->next;
+ sbus->next = 0;
+ } else {
+ break;
+ }
+ } /* while(this_sbus) */
+ sun_console_init(); /* whee... */
+ return memory_start;
+}
scsi_conf = inb(SCSICONF + p->base);
/*
- * Scale the Data FIFO Threshhold and the Bus Release Time; they are
+ * Scale the Data FIFO Threshold and the Bus Release Time; they are
* stored in formats compatible for writing to sequencer registers.
*/
dfthresh = p->bus_speed >> 6;
/*
* Set the QCNT (queue count) mask to deal with broken aic7850s that
- * sporatically get garbage in the upper bits of their QCNT registers.
+ * sporadically get garbage in the upper bits of their QCNT registers.
*/
outb(config->qcntmask, QCNTMASK + base);
--- /dev/null
+/* esp.c: EnhancedScsiProcessor Sun SCSI driver code.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/blk.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+
+#include "scsi.h"
+#include "hosts.h"
+#include "esp.h"
+
+#include <asm/sbus.h>
+#include <asm/dma.h>
+#include <asm/system.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>
+#include <asm/ptrace.h>
+#include <asm/pgtable.h>
+#include <asm/oplib.h>
+#include <asm/vaddrs.h>
+#include <asm/io.h>
+
+#define DEBUG_ESP
+/* #define DEBUG_ESP_SG */
+
+#if defined(DEBUG_ESP)
+#define ESPLOG(foo) printk foo
+#else
+#define ESPLOG(foo)
+#endif /* (DEBUG_ESP) */
+
+#define INTERNAL_ESP_ERROR \
+ (panic ("Internal ESP driver error in file %s, line %d\n", \
+ __FILE__, __LINE__))
+
+#define INTERNAL_ESP_ERROR_NOPANIC \
+ (printk ("Internal ESP driver error in file %s, line %d\n", \
+ __FILE__, __LINE__))
+
+/* This enum will be expanded when we have sync code written. */
+enum {
+ not_issued = 0x01, /* Still in the issue_SC queue. */
+ in_selection = 0x02, /* ESP is arbitrating, awaiting IRQ */
+ in_datain = 0x04, /* Data is transferring over the bus */
+ in_dataout = 0x08, /* Data is transferring over the bus */
+ in_status = 0x10, /* Awaiting status/msg bytes from target */
+ in_finale = 0x11, /* Sent Msg ack, awaiting disconnect */
+};
+
+struct proc_dir_entry proc_scsi_esp = {
+ PROC_SCSI_ESP, 3, "esp",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+struct Sparc_ESP *espchain;
+
+static void esp_intr(int irq, void *dev_id, struct pt_regs *pregs);
+static void esp_done(struct Sparc_ESP *esp, int error);
+
+/* Debugging routines */
+struct esp_cmdstrings {
+ unchar cmdchar;
+ char *text;
+} esp_cmd_strings[] = {
+ /* Miscellaneous */
+ { ESP_CMD_NULL, "ESP_NOP", },
+ { ESP_CMD_FLUSH, "FIFO_FLUSH", },
+ { ESP_CMD_RC, "RSTESP", },
+ { ESP_CMD_RS, "RSTSCSI", },
+ /* Disconnected State Group */
+ { ESP_CMD_RSEL, "RESLCTSEQ", },
+ { ESP_CMD_SEL, "SLCTNATN", },
+ { ESP_CMD_SELA, "SLCTATN", },
+ { ESP_CMD_SELAS, "SLCTATNSTOP", },
+ { ESP_CMD_ESEL, "ENSLCTRESEL", },
+ { ESP_CMD_DSEL, "DISSELRESEL", },
+ { ESP_CMD_SA3, "SLCTATN3", },
+ { ESP_CMD_RSEL3, "RESLCTSEQ", },
+ /* Target State Group */
+ { ESP_CMD_SMSG, "SNDMSG", },
+ { ESP_CMD_SSTAT, "SNDSTATUS", },
+ { ESP_CMD_SDATA, "SNDDATA", },
+ { ESP_CMD_DSEQ, "DISCSEQ", },
+ { ESP_CMD_TSEQ, "TERMSEQ", },
+ { ESP_CMD_TCCSEQ, "TRGTCMDCOMPSEQ", },
+ { ESP_CMD_DCNCT, "DISC", },
+ { ESP_CMD_RMSG, "RCVMSG", },
+ { ESP_CMD_RCMD, "RCVCMD", },
+ { ESP_CMD_RDATA, "RCVDATA", },
+ { ESP_CMD_RCSEQ, "RCVCMDSEQ", },
+ /* Initiator State Group */
+ { ESP_CMD_TI, "TRANSINFO", },
+ { ESP_CMD_ICCSEQ, "INICMDSEQCOMP", },
+ { ESP_CMD_MOK, "MSGACCEPTED", },
+ { ESP_CMD_TPAD, "TPAD", },
+ { ESP_CMD_SATN, "SATN", },
+ { ESP_CMD_RATN, "RATN", },
+};
+#define NUM_ESP_COMMANDS ((sizeof(esp_cmd_strings)) / (sizeof(struct esp_cmdstrings)))
+
+/* Print textual representation of an ESP command */
+static inline void esp_print_cmd(unchar espcmd)
+{
+ unchar dma_bit = espcmd & ESP_CMD_DMA;
+ int i;
+
+ espcmd &= ~dma_bit;
+ for(i=0; i<NUM_ESP_COMMANDS; i++)
+ if(esp_cmd_strings[i].cmdchar == espcmd)
+ break;
+ if(i==NUM_ESP_COMMANDS)
+ printk("ESP_Unknown");
+ else
+ printk("%s%s", esp_cmd_strings[i].text,
+ ((dma_bit) ? "+DMA" : ""));
+}
+
+/* Print the status register's value */
+static inline void esp_print_statreg(unchar statreg)
+{
+ unchar phase;
+
+ printk("STATUS<");
+ phase = statreg & ESP_STAT_PMASK;
+ printk("%s,", (phase == ESP_DOP ? "DATA-OUT" :
+ (phase == ESP_DIP ? "DATA-IN" :
+ (phase == ESP_CMDP ? "COMMAND" :
+ (phase == ESP_STATP ? "STATUS" :
+ (phase == ESP_MOP ? "MSG-OUT" :
+ (phase == ESP_MIP ? "MSG_IN" :
+ "unknown")))))));
+ if(statreg & ESP_STAT_TDONE)
+ printk("TRANS_DONE,");
+ if(statreg & ESP_STAT_TCNT)
+ printk("TCOUNT_ZERO,");
+ if(statreg & ESP_STAT_PERR)
+ printk("P_ERROR,");
+ if(statreg & ESP_STAT_SPAM)
+ printk("SPAM,");
+ if(statreg & ESP_STAT_INTR)
+ printk("IRQ,");
+ printk(">");
+}
+
+/* Print the interrupt register's value */
+static inline void esp_print_ireg(unchar intreg)
+{
+ printk("INTREG< ");
+ if(intreg & ESP_INTR_S)
+ printk("SLCT_NATN ");
+ if(intreg & ESP_INTR_SATN)
+ printk("SLCT_ATN ");
+ if(intreg & ESP_INTR_RSEL)
+ printk("RSLCT ");
+ if(intreg & ESP_INTR_FDONE)
+ printk("FDONE ");
+ if(intreg & ESP_INTR_BSERV)
+ printk("BSERV ");
+ if(intreg & ESP_INTR_DC)
+ printk("DISCNCT ");
+ if(intreg & ESP_INTR_IC)
+ printk("ILL_CMD ");
+ if(intreg & ESP_INTR_SR)
+ printk("SCSI_BUS_RESET ");
+ printk(">");
+}
+
+/* Print the sequence step registers contents */
+static inline void esp_print_seqreg(unchar stepreg)
+{
+ stepreg &= ESP_STEP_VBITS;
+ printk("STEP<%s>",
+ (stepreg == ESP_STEP_ASEL ? "SLCT_ARB_CMPLT" :
+ (stepreg == ESP_STEP_SID ? "1BYTE_MSG_SENT" :
+ (stepreg == ESP_STEP_NCMD ? "NOT_IN_CMD_PHASE" :
+ (stepreg == ESP_STEP_PPC ? "CMD_BYTES_LOST" :
+ (stepreg == ESP_STEP_FINI ? "CMD_SENT_OK" :
+ "UNKNOWN"))))));
+}
+
+/* Manipulation of the ESP command queues. Thanks to the aha152x driver
+ * and its author, Juergen E. Fischer, for the methods used here.
+ * Note that these are per-ESP queues, not global queues like
+ * the aha152x driver uses.
+ */
+static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
+{
+ Scsi_Cmnd *end;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ new_SC->host_scribble = (unsigned char *) NULL;
+ if(!*SC)
+ *SC = new_SC;
+ else {
+ for(end=*SC;end->host_scribble;end=(Scsi_Cmnd *)end->host_scribble)
+ ;
+ end->host_scribble = (unsigned char *) new_SC;
+ }
+ restore_flags(flags);
+}
+
+static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd **SC)
+{
+ Scsi_Cmnd *ptr;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ ptr = *SC;
+ if(ptr)
+ *SC = (Scsi_Cmnd *) (*SC)->host_scribble;
+ restore_flags(flags);
+ return ptr;
+}
+
+static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, int target, int lun)
+{
+ Scsi_Cmnd *ptr, *prev;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ for(ptr = *SC, prev = NULL;
+ ptr && ((ptr->target != target) || (ptr->lun != lun));
+ prev = ptr, ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ ;
+ if(ptr) {
+ if(prev)
+ prev->host_scribble=ptr->host_scribble;
+ else
+ *SC=(Scsi_Cmnd *)ptr->host_scribble;
+ }
+ restore_flags(flags);
+ return ptr;
+}
+
+static inline void do_pause(unsigned amount)
+{
+ unsigned long the_time = jiffies + amount;
+
+ while(jiffies < the_time)
+ barrier(); /* Not really needed, but... */
+}
+
+/* This places the ESP into a known state at boot time. */
+static inline void esp_bootup_reset(struct Sparc_ESP *esp, struct Sparc_ESP_regs *eregs)
+{
+ struct sparc_dma_registers *dregs = esp->dregs;
+ volatile unchar trash;
+
+ /* Punt the DVMA into a known state. */
+ dregs->cond_reg |= DMA_RST_SCSI;
+ do_pause(100);
+ dregs->cond_reg &= ~(DMA_RST_SCSI);
+ if(esp->dma->revision == dvmarev2)
+ if(esp->erev != esp100)
+ dregs->cond_reg |= DMA_3CLKS;
+ else if(esp->dma->revision == dvmarev3)
+ if(esp->erev == fas236 || esp->erev == fas100a) {
+ dregs->cond_reg &= ~(DMA_3CLKS);
+ dregs->cond_reg |= DMA_2CLKS;
+ }
+ else if(esp->dma->revision == dvmaesc1)
+ dregs->cond_reg |= DMA_ADD_ENABLE;
+ DMA_INTSON(dregs);
+
+ /* Now reset the ESP chip */
+ eregs->esp_cmd = ESP_CMD_RC;
+ eregs->esp_cmd = (ESP_CMD_NULL | ESP_CMD_DMA);
+ eregs->esp_cmd = (ESP_CMD_NULL | ESP_CMD_DMA); /* borken hardware... */
+
+ /* Reload the configuration registers */
+ eregs->esp_cfg1 = esp->config1;
+ eregs->esp_cfact = esp->cfact;
+ eregs->esp_stp = 0;
+ eregs->esp_soff = 0;
+ eregs->esp_timeo = esp->sync_defp;
+ if(esp->erev == esp100a || esp->erev == esp236)
+ eregs->esp_cfg2 = esp->config2;
+ if(esp->erev == esp236)
+ eregs->esp_cfg3 = esp->config3[0];
+ /* Eat any bitrot in the chip */
+ trash = eregs->esp_intrpt;
+
+ /* Reset the SCSI bus, but tell ESP not to generate an irq */
+ eregs->esp_cfg1 |= ESP_CONFIG1_SRRDISAB;
+ eregs->esp_cmd = ESP_CMD_RS;
+ do_pause(200);
+ eregs->esp_cfg1 = esp->config1;
+
+ /* Eat any bitrot in the chip and we are done... */
+ trash = eregs->esp_intrpt;
+}
+
+/* Detecting ESP chips on the machine. This is the simple and easy
+ * version.
+ */
+int esp_detect(Scsi_Host_Template *tpnt)
+{
+ struct Sparc_ESP *esp, *elink;
+ struct Scsi_Host *esp_host;
+ struct linux_sbus *sbus;
+ struct linux_sbus_device *esp_dev, *sbdev_iter;
+ struct Sparc_ESP_regs *eregs;
+ struct sparc_dma_registers *dregs;
+ struct Linux_SBus_DMA *dma, *dlink;
+ unsigned int fmhz;
+ unchar ccf, bsizes, bsizes_more;
+ int nesps = 0;
+ int esp_node;
+
+ espchain = 0;
+ if(!SBus_chain)
+ panic("No SBUS in esp_detect()");
+ for_each_sbus(sbus) {
+ for_each_sbusdev(sbdev_iter, sbus) {
+ /* Is it an esp sbus device? */
+ esp_dev = sbdev_iter;
+ if(strcmp(esp_dev->prom_name, "esp") &&
+ strcmp(esp_dev->prom_name, "SUNW,esp")) {
+ if(!esp_dev->child ||
+ strcmp(esp_dev->prom_name, "espdma"))
+ continue; /* nope... */
+ esp_dev = esp_dev->child;
+ if(strcmp(esp_dev->prom_name, "esp") &&
+ strcmp(esp_dev->prom_name, "SUNW,esp"))
+ continue; /* how can this happen? */
+ }
+ esp_host = scsi_register(tpnt, sizeof(struct Sparc_ESP));
+ if(!esp_host)
+ panic("Cannot register ESP SCSI host");
+ esp = (struct Sparc_ESP *) esp_host->hostdata;
+ if(!esp)
+ panic("No esp in hostdata");
+ esp->ehost = esp_host;
+ esp->edev = esp_dev;
+ /* Put into the chain of esp chips detected */
+ if(espchain) {
+ elink = espchain;
+ while(elink->next) elink = elink->next;
+ elink->next = esp;
+ } else {
+ espchain = esp;
+ }
+ esp->next = 0;
+
+ /* Get misc. prom information */
+#define ESP_IS_MY_DVMA(esp, dma) \
+ ((esp->edev->my_bus == dma->SBus_dev->my_bus) && \
+ (esp->edev->slot == dma->SBus_dev->slot) && \
+ (!strcmp(dma->SBus_dev->prom_name, "dma") || \
+ !strcmp(dma->SBus_dev->prom_name, "espdma")))
+
+ esp_node = esp_dev->prom_node;
+ prom_getstring(esp_node, "name", esp->prom_name,
+ sizeof(esp->prom_name));
+ esp->prom_node = esp_node;
+ for_each_dvma(dlink) {
+ if(ESP_IS_MY_DVMA(esp, dlink) && !dlink->allocated)
+ break;
+ }
+#undef ESP_IS_MY_DVMA
+ /* If we don't know how to handle the dvma, do not use this device */
+ if(!dlink){
+ printk ("Cannot find dvma for ESP SCSI\n");
+ scsi_unregister (esp_host);
+ continue;
+ }
+ if (dlink->allocated){
+ printk ("esp: can't use my espdma\n");
+ scsi_unregister (esp_host);
+ continue;
+ }
+ dlink->allocated = 1;
+ dma = dlink;
+ esp->dma = dma;
+ esp->dregs = dregs = dma->regs;
+
+ /* Map in the ESP registers from I/O space */
+ prom_apply_sbus_ranges(esp->edev->reg_addrs, 1);
+ esp->eregs = eregs = (struct Sparc_ESP_regs *)
+ sparc_alloc_io(esp->edev->reg_addrs[0].phys_addr, 0,
+ PAGE_SIZE, "ESP Registers",
+ esp->edev->reg_addrs[0].which_io, 0x0);
+ if(!eregs)
+ panic("ESP registers unmappable");
+ esp->esp_command =
+ sparc_dvma_malloc(16, "ESP DVMA Cmd Block");
+ if(!esp->esp_command)
+ panic("ESP DVMA transport area unmappable");
+
+ /* Set up the irq's etc. */
+ esp->ehost->base = (unsigned char *) esp->eregs;
+ esp->ehost->io_port = (unsigned int) esp->eregs;
+ esp->ehost->n_io_port = (unsigned char)
+ esp->edev->reg_addrs[0].reg_size;
+ /* XXX The following may be different on sun4ms XXX */
+ esp->ehost->irq = esp->irq = esp->edev->irqs[0].pri;
+
+ /* Allocate the irq only if necessary */
+ for_each_esp(elink) {
+ if((elink != esp) && (esp->irq == elink->irq)) {
+ goto esp_irq_acquired; /* BASIC rulez */
+ }
+ }
+ /* XXX We have shared interrupts per level now, maybe
+ * XXX use them, maybe not...
+ */
+ if(request_irq(esp->ehost->irq, esp_intr, SA_INTERRUPT,
+ "Sparc ESP SCSI", NULL))
+ panic("Cannot acquire ESP irq line");
+esp_irq_acquired:
+ printk("esp%d: IRQ %d ", nesps, esp->ehost->irq);
+ /* Figure out our scsi ID on the bus */
+ esp->scsi_id = prom_getintdefault(esp->prom_node,
+ "initiator-id", -1);
+ if(esp->scsi_id == -1)
+ esp->scsi_id = prom_getintdefault(esp->prom_node,
+ "scsi-initiator-id", -1);
+ if(esp->scsi_id == -1)
+ esp->scsi_id =
+ prom_getintdefault(esp->edev->my_bus->prom_node,
+ "scsi-initiator-id", 7);
+ esp->ehost->this_id = esp->scsi_id;
+ esp->scsi_id_mask = (1 << esp->scsi_id);
+ /* Check for differential bus */
+ esp->diff = prom_getintdefault(esp->prom_node, "differential", -1);
+ esp->diff = (esp->diff == -1) ? 0 : 1;
+ /* Check out the clock properties of the chip */
+ fmhz = prom_getintdefault(esp->prom_node, "clock-frequency", -1);
+ if(fmhz==-1)
+ fmhz = prom_getintdefault(esp->edev->my_bus->prom_node,
+ "clock-frequency", -1);
+ if(fmhz <= (5000))
+ ccf = 0;
+ else
+ ccf = (((5000 - 1) + (fmhz))/(5000));
+ if(!ccf || ccf > 8) {
+ ccf = ESP_CCF_F4;
+ fmhz = (5000 * 4);
+ }
+ if(ccf==(ESP_CCF_F7+1))
+ esp->cfact = ESP_CCF_F0;
+ else if(ccf == ESP_CCF_NEVER)
+ esp->cfact = ESP_CCF_F2;
+ else
+ esp->cfact = ccf;
+ esp->cfreq = fmhz;
+ esp->ccycle = ((1000000000) / ((fmhz)/1000));
+ esp->ctick = ((7682 * esp->cfact * esp->ccycle)/1000);
+ esp->sync_defp = ((7682 + esp->ctick - 1) / esp->ctick);
+
+ /* XXX HACK HACK HACK XXX */
+ if (esp->sync_defp < 153)
+ esp->sync_defp = 153;
+
+ printk("SCSI ID %d Clock %d MHz Period %2x ", esp->scsi_id,
+ (fmhz / 1000), esp->sync_defp);
+
+ /* Find the burst sizes this dma supports. */
+ bsizes = prom_getintdefault(esp->prom_node, "burst-sizes", 0xff);
+ bsizes_more = prom_getintdefault(esp->edev->my_bus->prom_node,
+ "burst-sizes", 0xff);
+ if(bsizes_more != 0xff) bsizes &= bsizes_more;
+ if(bsizes == 0xff || (bsizes & DMA_BURST16)==0 ||
+ (bsizes & DMA_BURST32)==0)
+ bsizes = (DMA_BURST32 - 1);
+ esp->bursts = bsizes;
+
+ /* Probe the revision of this esp */
+ esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
+ esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
+ esp->config3[0] = ESP_CONFIG3_TENB;
+ eregs->esp_cfg2 = esp->config2;
+ if((eregs->esp_cfg2 & ~(ESP_CONFIG2_MAGIC)) !=
+ (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
+ printk("NCR53C90(esp100) detected\n");
+ esp->erev = esp100;
+ } else {
+ eregs->esp_cfg2 = esp->config2 = 0;
+ eregs->esp_cfg3 = 0;
+ eregs->esp_cfg3 = esp->config3[0] = 5;
+ if(eregs->esp_cfg3 != 5) {
+ printk("NCR53C90A(esp100a) detected\n");
+ esp->erev = esp100a;
+ } else {
+ int target;
+
+ for(target=0; target<8; target++)
+ esp->config3[target] = 0;
+ eregs->esp_cfg3 = 0;
+ if(esp->cfact > ESP_CCF_F5) {
+ printk("NCR53C9XF(espfast) detected\n");
+ esp->erev = fast;
+ esp->config2 |= ESP_CONFIG2_FENAB;
+ eregs->esp_cfg2 = esp->config2;
+ } else {
+ printk("NCR53C9x(esp236) detected\n");
+ esp->erev = esp236;
+ eregs->esp_cfg2 = esp->config2 = 0;
+ }
+ }
+ }
+
+ /* Initialize the command queues */
+ esp->current_SC = 0;
+ esp->disconnected_SC = 0;
+ esp->issue_SC = 0;
+
+ /* Reset the thing before we try anything... */
+ esp_bootup_reset(esp, eregs);
+
+ nesps++;
+#ifdef THREADED_ESP_DRIVER
+ kernel_thread(esp_kernel_thread, esp, 0);
+#endif
+ } /* for each sbusdev */
+ } /* for each sbus */
+ return nesps;
+}
+
+/*
+ * The info function will return whatever useful
+ * information the developer sees fit. If not provided, then
+ * the name field will be used instead.
+ */
+const char *esp_info(struct Scsi_Host *host)
+{
+ struct Sparc_ESP *esp;
+
+ esp = (struct Sparc_ESP *) host->hostdata;
+ switch(esp->erev) {
+ case esp100:
+ return "Sparc ESP100 (NCR53C90)";
+ case esp100a:
+ return "Sparc ESP100A (NCR53C90A)";
+ case esp236:
+ return "Sparc ESP236";
+ case fast:
+ return "Sparc ESP-FAST (236 or 100A)";
+ case fas236:
+ return "Sparc ESP236-FAST";
+ case fas100a:
+ return "Sparc ESP100A-FAST";
+ default:
+ panic("Bogon ESP revision");
+ };
+}
+
+/* Execute a SCSI command when the bus is free. All callers
+ * turn off all interrupts, so we don't need to explicitly do
+ * it here.
+ */
+static inline void esp_exec_cmd(struct Sparc_ESP *esp)
+{
+ struct sparc_dma_registers *dregs;
+ struct Sparc_ESP_regs *eregs;
+ Scsi_Cmnd *SCptr;
+ int i;
+
+ eregs = esp->eregs;
+ dregs = esp->dregs;
+
+ /* Grab first member of the issue queue. */
+ SCptr = esp->current_SC = remove_first_SC(&esp->issue_SC);
+ if(!SCptr)
+ goto bad;
+ SCptr->SCp.phase = in_selection;
+
+ /* NCR docs say:
+ * 1) Load select/reselect Bus ID register with target ID
+ * 2) Load select/reselect Timeout Reg with desired value
+ * 3) Load Synchronous offset register with zero (for
+ * asynchronous transfers).
+ * 4) Load Synchronous Transfer Period register (if
+ * synchronous)
+ * 5) Load FIFO with 6, 10, or 12 byte SCSI command
+ * 6) Issue SELECTION_WITHOUT_ATTENTION command
+ *
+ * They also mention that a DMA NOP command must be issued
+ * to the SCSI chip under many circumstances, plus it's
+ * also a good idea to flush out the fifo just in case.
+ */
+
+ /* Load zeros into COUNTER via 2 DMA NOP chip commands
+ * due to flaky implementations of the 53C9x which don't
+ * get the idea the first time around.
+ */
+ dregs->cond_reg = (DMA_INT_ENAB | DMA_FIFO_INV);
+
+ eregs->esp_tclow = 0;
+ eregs->esp_tcmed = 0;
+ eregs->esp_cmd = (ESP_CMD_NULL | ESP_CMD_DMA);
+
+ /* Flush the fifo of excess garbage. */
+ eregs->esp_cmd = ESP_CMD_FLUSH;
+
+ /* Load bus-id and timeout values. */
+ eregs->esp_busid = (SCptr->target & 7);
+ eregs->esp_timeo = esp->sync_defp;
+
+ eregs->esp_soff = 0; /* This means async transfer... */
+ eregs->esp_stp = 0;
+
+ /* Load FIFO with the actual SCSI command. */
+ for(i=0; i < SCptr->cmd_len; i++)
+ eregs->esp_fdata = SCptr->cmnd[i];
+
+ /* Make sure the dvma forwards the ESP interrupt. */
+ dregs->cond_reg = DMA_INT_ENAB;
+
+ /* Tell ESP to SELECT without asserting ATN. */
+ eregs->esp_cmd = ESP_CMD_SEL;
+ return;
+
+bad:
+ panic("esp: daaarrrkk starrr crashesss....");
+}
+
+/* Queue a SCSI command delivered from the mid-level Linux SCSI code. */
+int esp_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ struct Sparc_ESP *esp;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+
+ /* Set up func ptr and initial driver cmd-phase. */
+ SCpnt->scsi_done = done;
+ SCpnt->SCp.phase = not_issued;
+
+ esp = (struct Sparc_ESP *) SCpnt->host->hostdata;
+
+ /* We use the scratch area. */
+ if(!SCpnt->use_sg) {
+ SCpnt->SCp.this_residual = SCpnt->request_bufflen;
+ SCpnt->SCp.buffer =
+ (struct scatterlist *) SCpnt->request_buffer;
+ SCpnt->SCp.buffers_residual = 0;
+ SCpnt->SCp.Status = CHECK_CONDITION;
+ SCpnt->SCp.Message = 0;
+ SCpnt->SCp.have_data_in = 0;
+ SCpnt->SCp.sent_command = 0;
+ SCpnt->SCp.ptr = mmu_get_scsi_one((char *)SCpnt->SCp.buffer,
+ SCpnt->SCp.this_residual,
+ esp->edev->my_bus);
+ } else {
+#ifdef DEBUG_ESP_SG
+ printk("esp: sglist at %p with %d buffers\n",
+ SCpnt->buffer, SCpnt->use_sg);
+#endif
+ SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->buffer;
+ SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1;
+ SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ mmu_get_scsi_sgl((struct mmu_sglist *) SCpnt->SCp.buffer,
+ SCpnt->SCp.buffers_residual,
+ esp->edev->my_bus);
+ SCpnt->SCp.ptr = (char *) SCpnt->SCp.buffer->alt_address;
+ }
+
+ /* Place into our queue. */
+ append_SC(&esp->issue_SC, SCpnt);
+
+ /* Run it now if we can */
+ if(!esp->current_SC)
+ esp_exec_cmd(esp);
+
+ restore_flags(flags);
+ return 0;
+}
+
+/* Only queuing supported in this ESP driver. */
+int esp_command(Scsi_Cmnd *SCpnt)
+{
+ ESPLOG(("esp: esp_command() called...\n"));
+ return -1;
+}
+
+/* Abort a command. Those that are on the bus force a SCSI bus
+ * reset.
+ */
+int esp_abort(Scsi_Cmnd *SCpnt)
+{
+ ESPLOG(("esp_abort: Not implemented yet\n"));
+ return SCSI_ABORT_ERROR;
+}
+
+/* Reset ESP chip, reset hanging bus, then kill active and
+ * disconnected commands for targets without soft reset.
+ */
+int esp_reset(Scsi_Cmnd *SCptr, unsigned int how)
+{
+ ESPLOG(("esp_reset: Not implemented yet\n"));
+ return SCSI_RESET_ERROR;
+}
+
+/* Internal ESP done function. */
+static inline void esp_done(struct Sparc_ESP *esp, int error)
+{
+ unsigned long flags;
+ Scsi_Cmnd *done_SC;
+
+ if(esp->current_SC) {
+ /* Critical section... */
+ save_flags(flags); cli();
+ done_SC = esp->current_SC;
+ esp->current_SC = NULL;
+ /* Free dvma entry. */
+ if(!done_SC->use_sg) {
+ mmu_release_scsi_one(done_SC->SCp.ptr,
+ done_SC->SCp.this_residual,
+ esp->edev->my_bus);
+ } else {
+ struct scatterlist *scl = (struct scatterlist *)done_SC->buffer;
+#ifdef DEBUG_ESP_SG
+ printk("esp: unmapping sg ");
+#endif
+ mmu_release_scsi_sgl((struct mmu_sglist *) scl,
+ done_SC->use_sg - 1,
+ esp->edev->my_bus);
+#ifdef DEBUG_ESP_SG
+ printk("done.\n");
+#endif
+ }
+ done_SC->result = error;
+ if(done_SC->scsi_done)
+ done_SC->scsi_done(done_SC);
+ else
+ panic("esp: esp->current_SC->scsi_done() == NULL");
+
+ /* Bus is free, issue any commands in the queue. */
+ if(esp->issue_SC)
+ esp_exec_cmd(esp);
+
+ restore_flags(flags);
+ /* End of critical section... */
+ } else
+ panic("esp: done() called with NULL esp->current_SC");
+}
+
+#ifdef THREADED_ESP_DRIVER /* planning stage... */
+
+/* With multiple lots of commands being processed I frequently
+ * see a situation where we see galloping esp herds. esp_done()
+ * wakes the entire world up and each interrupt causes a reschedule.
+ * This kernel thread fixes some of these unwanted effects during
+ * IO intensive activity.... I hope...
+ */
+
+static void esp_kernel_thread(void *opaque)
+{
+ struct Sparc_ESP *esp = opaque;
+
+ for(;;) {
+ unsigned long flags;
+
+ while(esp->eatme_SC) {
+ struct Scsi_Cmnd *SCpnt;
+
+ SCpnt = remove_first_SC(esp->eatme_SC);
+ esp_done(esp, error, SCpnt);
+ }
+ sleep();
+ }
+}
+#endif
+
+/* Read the interrupt status registers on this ESP board */
+static inline void esp_updatesoft(struct Sparc_ESP *esp, struct Sparc_ESP_regs *eregs)
+{
+ /* Update our software copies of the three ESP status
+ * registers for this ESP. Be careful, reading the
+ * ESP interrupt register clears the status and sequence
+ * step registers (unlatches them, you get the idea).
+ * So read the interrupt register last.
+ */
+
+ esp->seqreg = eregs->esp_sstep;
+ esp->sreg = eregs->esp_status;
+
+ /* Supposedly, the ESP100A and above assert the highest
+ * bit in the status register if an interrupt is pending.
+ * I've never seen this work properly, so let's clear it
+ * manually while we are here. If I see any esp chips
+ * for which this bit is reliable I will conditionalize
+ * this. However, I don't see what this extra bit can
+ * buy me with all the tests I'll have to place all over
+ * the code to actually use it when I 'can'. Plus the
+ * 'pending interrupt' condition can more than reliably
+ * be obtained from the DVMA control register.
+ *
+ * "Broken hardware" -Linus
+ */
+ esp->sreg &= (~ESP_STAT_INTR);
+ esp->ireg = eregs->esp_intrpt; /* Must be last or we lose */
+}
+
+/* #define ESP_IRQ_TRACE */
+
+#ifdef ESP_IRQ_TRACE
+#define ETRACE(foo) printk foo
+#else
+#define ETRACE(foo)
+#endif
+
+static char last_fflags, last_status, last_msg;
+
+/* Main interrupt handler for an esp adapter. */
+static inline void esp_handle(struct Sparc_ESP *esp)
+{
+ struct sparc_dma_registers *dregs;
+ struct Sparc_ESP_regs *eregs;
+ Scsi_Cmnd *SCptr;
+
+ eregs = esp->eregs;
+ dregs = esp->dregs;
+ SCptr = esp->current_SC;
+
+ DMA_IRQ_ENTRY(esp->dma, dregs);
+ esp_updatesoft(esp, eregs);
+
+ ETRACE(("ESPIRQ: <%2x,%2x,%2x> --> ", esp->ireg, esp->sreg, esp->seqreg));
+
+ /* Check for errors. */
+ if(!SCptr)
+ panic("esp_handle: current_SC == penguin within interrupt!");
+
+ /* At this point in time, this esp driver should not see
+ * scsibus resets, parity errors, or gross errors unless
+ * something truly terrible happens which we are not ready
+ * to properly recover from yet.
+ */
+ if((esp->ireg & (ESP_INTR_SR | ESP_INTR_IC)) ||
+ (esp->sreg & (ESP_STAT_PERR | ESP_STAT_SPAM))) {
+ printk("esp: really bad error detected\n");
+ printk("esp: intr<%2x> stat<%2x> seq<%2x>",
+ esp->ireg, esp->sreg, esp->seqreg);
+ printk("esp: SCptr->SCp.phase = %d\n", SCptr->SCp.phase);
+ panic("esp: cannot continue\n");
+ }
+ if(dregs->cond_reg & DMA_HNDL_ERROR) {
+ printk("esp: DMA shows an error cond_reg<%08lx> addr<%p>\n",
+ dregs->cond_reg, dregs->st_addr);
+ printk("esp: intr<%2x> stat<%2x> seq<%2x>",
+ esp->ireg, esp->sreg, esp->seqreg);
+ printk("esp: SCptr->SCp.phase = %d\n", SCptr->SCp.phase);
+ panic("esp: cannot continue\n");
+ }
+ if(esp->sreg & ESP_STAT_PERR) {
+ printk("esp: SCSI bus parity error\n");
+ printk("esp: intr<%2x> stat<%2x> seq<%2x>",
+ esp->ireg, esp->sreg, esp->seqreg);
+ printk("esp: SCptr->SCp.phase = %d\n", SCptr->SCp.phase);
+ panic("esp: cannot continue\n");
+ }
+
+ /* Service interrupt. */
+ switch(SCptr->SCp.phase) {
+ case not_issued:
+ panic("Unexpected ESP interrupt, current_SC not issued.");
+ break;
+ case in_selection:
+ if(esp->ireg & ESP_INTR_RSEL) {
+ /* XXX Some day XXX */
+ panic("ESP penguin reselected in async mode.");
+ } else if(esp->ireg & ESP_INTR_DC) {
+ /* Either we are scanning the bus and no-one
+ * lives at this target or it didn't respond.
+ */
+ ETRACE(("DISCONNECT\n"));
+#ifdef THREADED_ESP_DRIVER
+ append_SC(esp->eatme_SC, esp->current_SC);
+ esp->current_SC = 0;
+ wake_up(esp_kernel_thread);
+#else
+ esp_done(esp, (DID_NO_CONNECT << 16));
+#endif
+ goto esp_handle_done;
+ } else if((esp->ireg & (ESP_INTR_FDONE | ESP_INTR_BSERV)) ==
+ (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
+ /* Selection successful, check the sequence step. */
+ /* XXX I know, I know... add error recovery. XXX */
+ switch(esp->seqreg & ESP_STEP_VBITS) {
+ case ESP_STEP_NCMD:
+ panic("esp: penguin didn't enter cmd phase.");
+ break;
+ case ESP_STEP_PPC:
+ panic("esp: penguin prematurely changed from cmd phase.");
+ break;
+ case ESP_STEP_FINI:
+ /* At the completion of every command
+ * or message-out phase, we _must_
+ * unlatch the fifo-flags register
+ * with an ESP nop command.
+ */
+ eregs->esp_cmd = ESP_CMD_NULL;
+
+ /* Selection/Command sequence completed. We
+ * (at least for this driver) will be in
+ * either one of the data phases or status
+ * phase, check the status register to find
+ * out.
+ */
+ switch(esp->sreg & ESP_STAT_PMASK) {
+ default:
+ printk("esp: Not datain/dataout/status.\n");
+ panic("esp: penguin phase transition after selection.");
+ break;
+ case ESP_DOP:
+ /* Data out phase. */
+ dregs->cond_reg |= DMA_FIFO_INV;
+ while(dregs->cond_reg & DMA_FIFO_ISDRAIN)
+ barrier();
+ SCptr->SCp.phase = in_dataout;
+#ifdef DEBUG_ESP_SG
+ if(SCptr->use_sg)
+ printk("esp: sg-start <%p,%d>",
+ SCptr->SCp.ptr,
+ SCptr->SCp.this_residual);
+#endif
+ eregs->esp_tclow = SCptr->SCp.this_residual;
+ eregs->esp_tcmed = (SCptr->SCp.this_residual>>8);
+ eregs->esp_cmd = (ESP_CMD_DMA | ESP_CMD_NULL);
+
+ /* This is either the one buffer dvma ptr,
+ * or the first one in the scatter gather
+ * list. Check out esp_queue to see how
+ * this is set up.
+ */
+ dregs->st_addr = SCptr->SCp.ptr;
+ dregs->cond_reg &= ~(DMA_ST_WRITE);
+ dregs->cond_reg |= (DMA_ENABLE | DMA_INT_ENAB);
+ eregs->esp_cmd = (ESP_CMD_DMA | ESP_CMD_TI);
+ ETRACE(("DATA_OUT\n"));
+ goto esp_handle_done;
+ case ESP_DIP:
+ /* Data in phase. */
+ dregs->cond_reg |= DMA_FIFO_INV;
+ while(dregs->cond_reg & DMA_FIFO_ISDRAIN)
+ barrier();
+ SCptr->SCp.phase = in_datain;
+#ifdef DEBUG_ESP_SG
+ if(SCptr->use_sg)
+ printk("esp: sg-start <%p,%d>",
+ SCptr->SCp.ptr,
+ SCptr->SCp.this_residual);
+#endif
+ eregs->esp_tclow = SCptr->SCp.this_residual;
+ eregs->esp_tcmed = (SCptr->SCp.this_residual>>8);
+ eregs->esp_cmd = (ESP_CMD_DMA | ESP_CMD_NULL);
+
+ /* This is either the one buffer dvma ptr,
+ * or the first one in the scatter gather
+ * list. Check out esp_queue to see how
+ * this is set up.
+ */
+ dregs->st_addr = SCptr->SCp.ptr;
+ dregs->cond_reg |= (DMA_ENABLE | DMA_ST_WRITE | DMA_INT_ENAB);
+ eregs->esp_cmd = (ESP_CMD_DMA | ESP_CMD_TI);
+ ETRACE(("DATA_IN\n"));
+ goto esp_handle_done;
+ case ESP_STATP:
+ /* Status phase. */
+ SCptr->SCp.phase = in_status;
+ eregs->esp_cmd = ESP_CMD_ICCSEQ;
+ ETRACE(("STATUS\n"));
+ goto esp_handle_done; /* Wait for message. */
+ };
+ };
+ } else if(esp->ireg & ESP_INTR_FDONE) {
+ /* I'd like to investigate why this happens... */
+ ESPLOG(("esp: This is weird, halfway through "));
+ ESPLOG(("selection, trying to continue anyways.\n"));
+ goto esp_handle_done;
+ } else {
+ panic("esp: Did not get bus service during selection.");
+ goto esp_handle_done;
+ }
+ panic("esp: Mr. Potatoe Head is on the loose!");
+
+ case in_datain:
+ /* Drain the fifo for writes to memory. */
+ switch(esp->dma->revision) {
+ case dvmarev0:
+ case dvmarev1:
+ case dvmarevplus:
+ case dvmarev2:
+ case dvmarev3:
+ /* Force a drain. */
+ dregs->cond_reg |= DMA_FIFO_STDRAIN;
+
+ /* fall through */
+ case dvmaesc1:
+ /* Wait for the fifo to drain completely. */
+ while(dregs->cond_reg & DMA_FIFO_ISDRAIN)
+ barrier();
+ break;
+ };
+
+ case in_dataout:
+ dregs->cond_reg &= ~DMA_ENABLE;
+
+ /* We may be pipelining an sg-list. */
+ if(SCptr->use_sg) {
+ if(SCptr->SCp.buffers_residual) {
+ /* If we do not see a BUS SERVICE interrupt
+ * at this point, or we see that we have left
+ * the current data phase, then we lose.
+ */
+ if(!(esp->ireg & ESP_INTR_BSERV) ||
+ ((esp->sreg & ESP_STAT_PMASK) > 1))
+ panic("esp: Aiee penguin on the SCSI-bus.");
+
+ ++SCptr->SCp.buffer;
+ --SCptr->SCp.buffers_residual;
+ SCptr->SCp.this_residual = SCptr->SCp.buffer->length;
+ SCptr->SCp.ptr = SCptr->SCp.buffer->alt_address;
+
+#ifdef DEBUG_ESP_SG
+ printk("<%p,%d> ", SCptr->SCp.ptr,
+ SCptr->SCp.this_residual);
+#endif
+
+ /* Latch in new esp counters... */
+ eregs->esp_tclow = SCptr->SCp.this_residual;
+ eregs->esp_tcmed = (SCptr->SCp.this_residual>>8);
+ eregs->esp_cmd = (ESP_CMD_DMA | ESP_CMD_NULL);
+
+ /* Reload DVMA gate array with new vaddr and enab. */
+ dregs->st_addr = SCptr->SCp.ptr;
+ dregs->cond_reg |= DMA_ENABLE;
+
+ /* Tell the esp to start transferring. */
+ eregs->esp_cmd = (ESP_CMD_DMA | ESP_CMD_TI);
+ goto esp_handle_done;
+ }
+#ifdef DEBUG_ESP_SG
+ printk("done.\n");
+#endif
+ }
+ /* Take a look at what happened. */
+ if(esp->ireg & ESP_INTR_DC) {
+ panic("esp: target disconnects during data transfer.");
+ goto esp_handle_done;
+ } else if(esp->ireg & ESP_INTR_BSERV) {
+ if((esp->sreg & ESP_STAT_PMASK) != ESP_STATP) {
+ panic("esp: Not status phase after data phase.");
+ goto esp_handle_done;
+ }
+ SCptr->SCp.phase = in_status;
+ eregs->esp_cmd = ESP_CMD_ICCSEQ;
+ ETRACE(("STATUS\n"));
+ goto esp_handle_done; /* Wait for message. */
+ } else {
+ printk("esp: did not get bus service after data transfer.");
+ printk("esp_status: intr<%2x> stat<%2x> seq<%2x>\n",
+ esp->ireg, esp->sreg, esp->seqreg);
+ panic("esp: penguin data transfer.");
+ goto esp_handle_done;
+ }
+ case in_status:
+ if(esp->ireg & ESP_INTR_DC) {
+ panic("esp: penguin disconnects in status phase.");
+ goto esp_handle_done;
+ } else if (esp->ireg & ESP_INTR_FDONE) {
+ /* Status and Message now sit in the fifo for us. */
+ last_fflags = eregs->esp_fflags;
+ SCptr->SCp.phase = in_finale;
+ last_status = SCptr->SCp.Status = eregs->esp_fdata;
+ last_msg = SCptr->SCp.Message = eregs->esp_fdata;
+ eregs->esp_cmd = ESP_CMD_MOK;
+ ETRACE(("FINALE\n"));
+ goto esp_handle_done;
+ } else {
+ panic("esp: penguin status phase.");
+ }
+ case in_finale:
+ if(esp->ireg & ESP_INTR_BSERV) {
+ panic("esp: penguin doesn't disconnect after status msg-ack.");
+ goto esp_handle_done;
+ } else if(esp->ireg & ESP_INTR_DC) {
+ /* Nexus is complete. */
+#ifdef THREADED_ESP_DRIVER
+ append_SC(esp->eatme_SC, esp->current_SC);
+ esp->current_SC = 0;
+ wake_up(esp_kernel_thread);
+#else
+ esp_done(esp, ((SCptr->SCp.Status & 0xff) |
+ ((SCptr->SCp.Message & 0xff) << 8) |
+ (DID_OK << 16)));
+#endif
+ ETRACE(("NEXUS_COMPLETE\n"));
+ goto esp_handle_done;
+ } else {
+ printk("esp: wacky state while in in_finale phase.\n");
+ printk("esp_status: intr<%2x> stat<%2x> seq<%2x>\n",
+ esp->ireg, esp->sreg, esp->seqreg);
+ panic("esp: penguin esp state.");
+ goto esp_handle_done;
+ }
+ default:
+ panic("esp: detected penguin phase.");
+ goto esp_handle_done;
+ }
+ panic("esp: Heading to the promised land.");
+
+esp_handle_done:
+ DMA_IRQ_EXIT(esp->dma, dregs);
+ return;
+}
+
+static void esp_intr(int irq, void *dev_id, struct pt_regs *pregs)
+{
+ struct Sparc_ESP *esp;
+
+ /* Handle all ESP interrupts showing */
+ for_each_esp(esp) {
+ if(DMA_IRQ_P(esp->dregs)) {
+ esp_handle(esp);
+ }
+ }
+}
--- /dev/null
+/* esp.h: Defines and structures for the Sparc ESP (Enhanced SCSI
+ * Processor) driver under Linux.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#ifndef _SPARC_ESP_H
+#define _SPARC_ESP_H
+
+/* For dvma controller register definitions. */
+#include <asm/dma.h>
+
+/* The ESP SCSI controllers have their register sets in three
+ * "classes":
+ *
+ * 1) Registers which are both read and write.
+ * 2) Registers which are read only.
+ * 3) Registers which are write only.
+ *
+ * Yet, they all live within the same IO space.
+ */
+
+/* All the ESP registers are one byte each and are accessed longwords
+ * apart with a big-endian ordering to the bytes.
+ */
+
+struct Sparc_ESP_regs {
+ /* Access Description Offset */
+ volatile unchar esp_tclow; /* rw Low bits of the transfer count 0x00 */
+ unchar tlpad1[3];
+ volatile unchar esp_tcmed; /* rw Mid bits of the transfer count 0x04 */
+ unchar fdpad[3];
+ volatile unchar esp_fdata; /* rw FIFO data bits 0x08 */
+ unchar cbpad[3];
+ volatile unchar esp_cmd; /* rw SCSI command bits 0x0c */
+ unchar stpad[3];
+ volatile unchar esp_status; /* ro ESP status register 0x10 */
+#define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */
+ unchar irqpd[3];
+ volatile unchar esp_intrpt; /* ro Kind of interrupt 0x14 */
+#define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */
+ unchar sspad[3];
+ volatile unchar esp_sstep; /* ro Sequence step register 0x18 */
+#define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */
+ unchar ffpad[3];
+ volatile unchar esp_fflags; /* ro Bits of current FIFO info 0x1c */
+#define esp_soff esp_fflags /* wo Sync offset 0x1c */
+ unchar cf1pd[3];
+ volatile unchar esp_cfg1; /* rw First configuration register 0x20 */
+ unchar cfpad[3];
+ volatile unchar esp_cfact; /* wo Clock conversion factor 0x24 */
+ unchar ctpad[3];
+ volatile unchar esp_ctest; /* wo Chip test register 0x28 */
+ unchar cf2pd[3];
+ volatile unchar esp_cfg2; /* rw Second configuration register 0x2c */
+ unchar cf3pd[3];
+
+ /* The following is only found on the 53C9X series SCSI chips */
+ volatile unchar esp_cfg3; /* rw Third configuration register 0x30 */
+ unchar thpd[7];
+
+ /* The following is found on all chips except the NCR53C90 (ESP100) */
+ volatile unchar esp_tchi; /* rw High bits of transfer count 0x38 */
+#define esp_uid esp_tchi /* ro Unique ID code 0x38 */
+ unchar fgpad[3];
+ volatile unchar esp_fgrnd; /* rw Data base for fifo 0x3c */
+};
+
+/* Various revisions of the ESP board. */
+enum esp_rev {
+ esp100 = 0x00, /* NCR53C90 */
+ esp100a = 0x01, /* NCR53C90A */
+ esp236 = 0x02,
+ fas236 = 0x03,
+ fas100a = 0x04,
+ fast = 0x05,
+ espunknown = 0x06
+};
+
+/* We get one of these for each ESP probed. */
+struct Sparc_ESP {
+ struct Sparc_ESP *next; /* Next ESP on probed or NULL */
+ struct Sparc_ESP_regs *eregs; /* All esp registers */
+ struct Linux_SBus_DMA *dma; /* Who I do transfers with. */
+ struct sparc_dma_registers *dregs; /* And his registers. */
+ struct Scsi_Host *ehost; /* Backpointer to SCSI Host */
+
+ struct linux_sbus_device *edev; /* Pointer to SBus entry */
+ char prom_name[64]; /* Name of ESP device from prom */
+ int prom_node; /* Prom node where ESP found */
+ int esp_id; /* Same as esphost->host_id */
+
+ /* ESP Configuration Registers */
+ unsigned char config1; /* Copy of the 1st config register */
+ unsigned char config2; /* Copy of the 2nd config register */
+ unsigned char config3[8]; /* Copy of the 3rd config register */
+
+ /* The current command we are sending to the ESP chip. This esp_command
+ * ptr needs to be mapped in DVMA area so we can send commands and read
+ * from the ESP fifo without burning precious CPU cycles. Programmed I/O
+ * sucks when we have the DVMA to do it for us.
+ */
+ volatile unchar *esp_command; /* Location of command */
+ int esp_clen; /* Length of this command */
+
+ /* To hold onto the dvma buffer ptr. */
+ char *dvma_hold;
+
+ /* The following are used to determine the cause of an IRQ. Upon every
+ * IRQ entry we synchronize these with the hardware registers.
+ */
+ unchar ireg; /* Copy of ESP interrupt register */
+ unchar sreg; /* Same for ESP status register */
+ unchar seqreg; /* The ESP sequence register */
+
+ /* Clock periods, frequencies, synchronization, etc. */
+ unsigned int cfreq; /* Clock frequency in HZ */
+ unsigned int cfact; /* Clock conversion factor */
+ unsigned int ccycle; /* One ESP clock cycle */
+ unsigned int ctick; /* One ESP clock time */
+ unsigned int sync_defp; /* Default negotiation period */
+
+ /* Misc. info about this ESP */
+ enum esp_rev erev; /* ESP revision */
+ int irq; /* SBus IRQ for this ESP */
+ int scsi_id; /* Who am I as initiator? */
+ int scsi_id_mask; /* Bitmask of 'me'. */
+ int diff; /* Differential SCSI? */
+ int bursts; /* Burst sizes our DVMA supports */
+
+ /* Our command queues, only one cmd lives in the current_SC queue. */
+ Scsi_Cmnd *issue_SC; /* Commands to be issued */
+ Scsi_Cmnd *current_SC; /* Who is currently working the bus */
+ Scsi_Cmnd *disconnected_SC; /* Commands disconnected from the bus */
+
+#ifdef THREADED_ESP_DRIVER
+ Scsi_Cmnd *eatme_SC; /* Cmds waiting for esp thread to process. */
+#endif
+
+ /* Abortion status */
+ int aborting, abortion_complete, abort_result;
+};
+
+/* Bitfield meanings for the above registers. */
+
+/* ESP config reg 1, read-write, found on all ESP chips */
+#define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */
+#define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */
+#define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */
+#define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */
+#define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */
+#define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */
+
+/* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */
+#define ESP_CONFIG2_DMAPARITY 0x01 /* Parity DMA err (200,236) */
+#define ESP_CONFIG2_REGPARITY 0x02 /* Parity reg err (200,236) */
+#define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */
+#define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features */
+#define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */
+#define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */
+#define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,esp216) */
+#define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (esp236) */
+#define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */
+
+/* ESP config register 3 read-write, found only esp236+fas236+fas100a chips */
+#define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a) */
+#define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */
+#define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a) */
+#define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */
+#define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a) */
+#define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */
+#define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a) */
+#define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */
+#define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a) */
+#define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */
+#define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */
+#define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */
+#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */
+
+/* ESP command register read-write */
+/* Group 1 commands: These may be sent at any point in time to the ESP
+ * chip. None of them can generate interrupts 'cept
+ * the "SCSI bus reset" command if you have not disabled
+ * SCSI reset interrupts in the config1 ESP register.
+ */
+#define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */
+#define ESP_CMD_FLUSH 0x01 /* FIFO Flush */
+#define ESP_CMD_RC 0x02 /* Chip reset */
+#define ESP_CMD_RS 0x03 /* SCSI bus reset */
+
+/* Group 2 commands: ESP must be an initiator and connected to a target
+ * for these commands to work.
+ */
+#define ESP_CMD_TI 0x10 /* Transfer Information */
+#define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */
+#define ESP_CMD_MOK 0x12 /* Message okie-dokie */
+#define ESP_CMD_TPAD 0x18 /* Transfer Pad */
+#define ESP_CMD_SATN 0x1a /* Set ATN */
+#define ESP_CMD_RATN 0x1b /* De-assert ATN */
+
+/* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected
+ * to a target as the initiator for these commands to work.
+ */
+#define ESP_CMD_SMSG 0x20 /* Send message */
+#define ESP_CMD_SSTAT 0x21 /* Send status */
+#define ESP_CMD_SDATA 0x22 /* Send data */
+#define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */
+#define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */
+#define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */
+#define ESP_CMD_DCNCT 0x27 /* Disconnect */
+#define ESP_CMD_RMSG 0x28 /* Receive Message */
+#define ESP_CMD_RCMD 0x29 /* Receive Command */
+#define ESP_CMD_RDATA 0x2a /* Receive Data */
+#define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */
+
+/* Group 4 commands: The ESP must be in the disconnected state and must
+ * not be connected to any targets as initiator for
+ * these commands to work.
+ */
+#define ESP_CMD_RSEL 0x40 /* Reselect */
+#define ESP_CMD_SEL 0x41 /* Select w/o ATN */
+#define ESP_CMD_SELA 0x42 /* Select w/ATN */
+#define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */
+#define ESP_CMD_ESEL 0x44 /* Enable selection */
+#define ESP_CMD_DSEL 0x45 /* Disable selections */
+#define ESP_CMD_SA3 0x46 /* Select w/ATN3 */
+#define ESP_CMD_RSEL3 0x47 /* Reselect3 */
+
+/* This bit enables the ESP's DMA on the SBus */
+#define ESP_CMD_DMA 0x80 /* Do DMA? */
+
+
+/* ESP status register read-only */
+#define ESP_STAT_PIO 0x01 /* IO phase bit */
+#define ESP_STAT_PCD 0x02 /* CD phase bit */
+#define ESP_STAT_PMSG 0x04 /* MSG phase bit */
+#define ESP_STAT_PMASK 0x07 /* Mask of phase bits */
+#define ESP_STAT_TDONE 0x08 /* Transfer Completed */
+#define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */
+#define ESP_STAT_PERR 0x20 /* Parity error */
+#define ESP_STAT_SPAM 0x40 /* Real bad error */
+/* This indicates the 'interrupt pending' condition on esp236, it is a reserved
+ * bit on other revs of the ESP.
+ */
+#define ESP_STAT_INTR 0x80 /* Interrupt */
+
+/* The status register can be masked with ESP_STAT_PMASK and compared
+ * with the following values to determine the current phase the ESP
+ * (at least thinks it) is in. For our pusposes we also add our own
+ * software 'done' bit for our phase management engine.
+ */
+#define ESP_DOP (0) /* Data Out */
+#define ESP_DIP (ESP_STAT_PIO) /* Data In */
+#define ESP_CMDP (ESP_STAT_PCD) /* Command */
+#define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */
+#define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */
+#define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */
+
+/* ESP interrupt register read-only */
+#define ESP_INTR_S 0x01 /* Select w/o ATN */
+#define ESP_INTR_SATN 0x02 /* Select w/ATN */
+#define ESP_INTR_RSEL 0x04 /* Reselected */
+#define ESP_INTR_FDONE 0x08 /* Function done */
+#define ESP_INTR_BSERV 0x10 /* Bus service */
+#define ESP_INTR_DC 0x20 /* Disconnect */
+#define ESP_INTR_IC 0x40 /* Illegal command given */
+#define ESP_INTR_SR 0x80 /* SCSI bus reset detected */
+
+/* Interrupt status macros */
+#define ESP_SRESET_IRQ(esp) ((esp)->intreg & (ESP_INTR_SR))
+#define ESP_ILLCMD_IRQ(esp) ((esp)->intreg & (ESP_INTR_IC))
+#define ESP_SELECT_WITH_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_SATN))
+#define ESP_SELECT_WITHOUT_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_S))
+#define ESP_SELECTION_IRQ(esp) ((ESP_SELECT_WITH_ATN_IRQ(esp)) || \
+ (ESP_SELECT_WITHOUT_ATN_IRQ(esp)))
+#define ESP_RESELECTION_IRQ(esp) ((esp)->intreg & (ESP_INTR_RSEL))
+
+/* ESP sequence step register read-only */
+#define ESP_STEP_VBITS 0x07 /* Valid bits */
+#define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */
+#define ESP_STEP_SID 0x01 /* One msg byte sent */
+#define ESP_STEP_NCMD 0x02 /* Was not in command phase */
+#define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd
+ * bytes to be lost
+ */
+#define ESP_STEP_FINI 0x04 /* Command was sent ok */
+
+/* ESP chip-test register read-write */
+#define ESP_TEST_TARG 0x01 /* Target test mode */
+#define ESP_TEST_INI 0x02 /* Initiator test mode */
+#define ESP_TEST_TS 0x04 /* Tristate test mode */
+
+/* ESP unique ID register read-only, found on fas236+fas100a only */
+#define ESP_UID_F100A 0x00 /* ESP FAS100A */
+#define ESP_UID_F236 0x02 /* ESP FAS236 */
+#define ESP_UID_REV 0x07 /* ESP revision */
+#define ESP_UID_FAM 0xf8 /* ESP family */
+
+/* ESP fifo flags register read-only */
+/* Note that the following implies a 16 byte FIFO on the ESP. */
+#define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */
+#define ESP_FF_SSTEP 0xe0 /* Sequence step */
+
+/* ESP clock conversion factor register write-only */
+#define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */
+#define ESP_CCF_NEVER 0x01 /* Set it to this and die */
+#define ESP_CCF_F2 0x02 /* 10MHz */
+#define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */
+#define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */
+#define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */
+#define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */
+#define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */
+
+extern int esp_detect(struct SHT *);
+extern const char *esp_info(struct Scsi_Host *);
+extern int esp_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+extern int esp_command(Scsi_Cmnd *);
+extern int esp_abort(Scsi_Cmnd *);
+extern int esp_reset(Scsi_Cmnd *, unsigned int);
+
+extern struct proc_dir_entry proc_scsi_esp;
+
+#define SCSI_SPARC_ESP { \
+/* struct SHT *next */ NULL, \
+/* long *usage_count */ NULL, \
+/* struct proc_dir_entry *proc_dir */ &proc_scsi_esp, \
+/* int (*proc_info)(char *, char **, off_t, int, int, int) */ NULL, \
+/* const char *name */ "Sun ESP 100/100a/200", \
+/* int detect(struct SHT *) */ esp_detect, \
+/* int release(struct Scsi_Host *) */ NULL, \
+/* const char *info(struct Scsi_Host *) */ esp_info, \
+/* int command(Scsi_Cmnd *) */ esp_command, \
+/* int queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)) */ esp_queue, \
+/* int abort(Scsi_Cmnd *) */ esp_abort, \
+/* int reset(Scsi_Cmnd *, int) */ esp_reset, \
+/* int slave_attach(int, int) */ NULL, \
+/* int bios_param(Disk *, kdev_t, int[]) */ NULL, \
+/* int can_queue */ 10, \
+/* int this_id */ 7, \
+/* short unsigned int sg_tablesize */ SG_ALL, \
+/* short cmd_per_lun */ 1, \
+/* unsigned char present */ 0, \
+/* unsigned unchecked_isa_dma:1 */ 0, \
+/* unsigned use_clustering:1 */ DISABLE_CLUSTERING, }
+
+/* For our interrupt engine. */
+#define for_each_esp(esp) \
+ for((esp) = espchain; (esp); (esp) = (esp)->next)
+
+#endif /* !(_SPARC_ESP_H) */
#include "ppa.h"
#endif
+#ifdef CONFIG_SCSI_SUNESP
+#include "esp.h"
+#endif
+
#ifdef CONFIG_SCSI_DEBUG
#include "scsi_debug.h"
#endif
/*
-static const char RCSid[] = "$Header: /usr/src/linux/kernel/blk_drv/scsi/RCS/hosts.c,v 1.3 1993/09/24 12:21:00 drew Exp drew $";
+static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/hosts.c,v 1.10 1996/04/16 08:09:36 davem Exp $";
*/
/*
#ifdef CONFIG_SCSI_PPA
PPA,
#endif
+#ifdef CONFIG_SCSI_SUNESP
+ SCSI_SPARC_ESP,
+#endif
#ifdef CONFIG_SCSI_DEBUG
SCSI_DEBUG,
#endif
void fat_put_inode(struct inode *inode)
{
- struct inode *depend;
+ struct inode *depend, *linked;
struct super_block *sb;
+ depend = MSDOS_I(inode)->i_depend;
+ linked = MSDOS_I(inode)->i_linked;
+ sb = inode->i_sb;
if (inode->i_nlink) {
+ if (depend) {
+ iput(depend);
+ }
+ if (linked) {
+ iput(linked);
+ MSDOS_I(inode)->i_linked = NULL;
+ }
if (MSDOS_I(inode)->i_busy) fat_cache_inval_inode(inode);
return;
}
inode->i_size = 0;
fat_truncate(inode);
- depend = MSDOS_I(inode)->i_depend;
- sb = inode->i_sb;
clear_inode(inode);
if (depend) {
if (MSDOS_I(depend)->i_old != inode) {
MSDOS_I(depend)->i_old = NULL;
iput(depend);
}
+ if (linked) {
+ if (MSDOS_I(linked)->i_oldlink != inode) {
+ printk("Invalid link (0x%p): expected 0x%p, got 0x%p\n",
+ linked, inode, MSDOS_I(linked)->i_oldlink);
+ fat_fs_panic(sb,"...");
+ return;
+ }
+ MSDOS_I(linked)->i_oldlink = NULL;
+ iput(linked);
+ }
}
/* printk("read inode %d\n",inode->i_ino); */
MSDOS_I(inode)->i_busy = 0;
MSDOS_I(inode)->i_depend = MSDOS_I(inode)->i_old = NULL;
+ MSDOS_I(inode)->i_linked = MSDOS_I(inode)->i_oldlink = NULL;
MSDOS_I(inode)->i_binary = 1;
inode->i_uid = MSDOS_SB(inode->i_sb)->options.fs_uid;
inode->i_gid = MSDOS_SB(inode->i_sb)->options.fs_gid;
struct super_block *sb = inode->i_sb;
struct buffer_head *bh;
struct msdos_dir_entry *raw_entry;
+ struct inode *linked;
+
+ linked = MSDOS_I(inode)->i_linked;
+ if (linked) {
+ if (MSDOS_I(linked)->i_oldlink != inode) {
+ printk("Invalid link (0x%p): expected 0x%p, got 0x%p\n",
+ linked, inode, MSDOS_I(linked)->i_oldlink);
+ fat_fs_panic(sb,"...");
+ return;
+ }
+ linked->i_version = ++event;
+ linked->i_mode = inode->i_mode;
+ linked->i_uid = inode->i_uid;
+ linked->i_gid = inode->i_gid;
+ linked->i_size = inode->i_size;
+ linked->i_atime = inode->i_atime;
+ linked->i_mtime = inode->i_mtime;
+ linked->i_ctime = inode->i_ctime;
+ linked->i_blocks = inode->i_blocks;
+ linked->i_atime = inode->i_atime;
+ MSDOS_I(linked)->i_attrs = MSDOS_I(inode)->i_attrs;
+ linked->i_dirt = 1;
+ }
inode->i_dirt = 0;
if (inode->i_ino == MSDOS_ROOT_INO || !inode->i_nlink) return;
}
msdos_read_inode(free_inode);
MSDOS_I(old_inode)->i_busy = 1;
+ MSDOS_I(old_inode)->i_linked = free_inode;
+ MSDOS_I(free_inode)->i_oldlink = old_inode;
fat_cache_inval_inode(old_inode);
old_inode->i_dirt = 1;
old_de->name[0] = DELETED_FLAG;
mark_buffer_dirty(old_bh, 1);
mark_buffer_dirty(free_bh, 1);
- if (!exists) iput(free_inode);
- else {
+ if (exists) {
MSDOS_I(new_inode)->i_depend = free_inode;
MSDOS_I(free_inode)->i_old = new_inode;
- /* free_inode is put when putting new_inode */
+ /* Two references now exist to free_inode so increase count */
+ free_inode->i_count++;
+ /* free_inode is put after putting new_inode and old_inode */
iput(new_inode);
dcache_add(new_dir, new_name, new_len, new_ino);
brelse(new_bh);
* filesystem and type 'ls xyzzy' to turn on debugging.
*/
-#if 1
+#if 0
#define NFS_PROC_DEBUG
#endif
vfat_read_inode(new_inode);
MSDOS_I(old_inode)->i_busy = 1;
+ MSDOS_I(old_inode)->i_linked = new_inode;
+ MSDOS_I(new_inode)->i_oldlink = old_inode;
fat_cache_inval_inode(old_inode);
PRINTK(("vfat_rename 15: old_slots=%d\n",old_slots));
old_inode->i_dirt = 1;
mark_buffer_dirty(new_bh, 1);
dcache_add(new_dir, new_name, new_len, new_ino);
- iput(new_inode);
/* XXX: There is some code in the original MSDOS rename that
* is not duplicated here and it might cause a problem in
-/* $Id: asi.h,v 1.13 1996/03/01 07:20:51 davem Exp $ */
+/* $Id: asi.h,v 1.15 1996/04/17 22:45:52 davem Exp $ */
#ifndef _SPARC_ASI_H
#define _SPARC_ASI_H
#define ASI_M_IC_FLCLEAR 0x36
#define ASI_M_DC_FLCLEAR 0x37
-#define ASI_M_DCDR 0x39 /* Data Cache Diagnostics Register rw, ss */
-
-/* Sparc V9 TI UltraSparc ASI's (V8 ploos ploos) */
-
-/* ASIs 0x0-0x7f are Supervisor Only. 0x80-0xff are for anyone. */
-
-/* You will notice that there are a lot of places where if a normal
- * ASI is available on the V9, there is also a little-endian version.
- */
-
-#define ASI_V9_RESV0 0x00 /* Don't touch... */
-#define ASI_V9_RESV1 0x01 /* Not here */
-#define ASI_V9_RESV2 0x02 /* Or here */
-#define ASI_V9_RESV3 0x03 /* nor here. */
-#define ASI_V9_NUCLEUS 0x04 /* Impl-dep extra virtual access context */
-#define ASI_V9_NUCLEUSL 0x0C /* Nucleus context, little-endian */
-#define ASI_V9_USER_PRIM 0x10 /* User primary address space */
-#define ASI_V9_USER_SEC 0x11 /* User secondary address space */
-
-#define ASI_V9_MMUPASS 0x14 /* OBMEM (external cache, no data cache) */
-#define ASI_V9_IOPASS 0x15 /* Like MMUPASS, for I/O areas (uncached) */
-#define ASI_V9_USER_PRIML 0x18 /* User primary addr space, lil-endian. */
-#define ASI_V9_USER_SECL 0x19 /* User secondary addr space, lil-endian. */
-#define ASI_V9_MMUPASSL 0x1C /* OBMEM little-endian */
-#define ASI_V9_IOPASSL 0x1D /* Like IOPASS but little-endian */
-#define ASI_V9_ATOMICQ 0x24 /* Atomic 128-bit load address space */
-#define ASI_V9_ATOMICQL 0x2C /* Atomic 128-bit load little-endian */
-#define ASI_V9_LSTORECTL 0x45 /* ld/st control unit */
-#define ASI_V9_DCACHE_ENT 0x46 /* Data cache entries */
-#define ASI_V9_DCACHE_TAG 0x47 /* Data cache tags */
-#define ASI_V9_IRQDISPS 0x48 /* IRQ dispatch status registers */
-#define ASI_V9_IRQRECVS 0x49 /* IRQ receive status registers */
-#define ASI_V9_MMUREGS 0x4A /* Spitfire MMU control register */
-#define ASI_V9_ESTATE 0x4B /* Error state enable register */
-#define ASI_V9_ASYNC_FSR 0x4C /* Asynchronous Fault Status reg */
-#define ASI_V9_ASYNC_FAR 0x4D /* Asynchronous Fault Address reg */
-
-#define ASI_V9_ECACHE_DIAG 0x4E /* External Cache diagnostics */
-
-#define ASI_V9_TXTMMU 0x50 /* MMU for program text */
-#define ASI_V9_TXTMMU_D1 0x51 /* XXX */
-#define ASI_V9_TXTMMU_D2 0x52 /* XXX */
-#define ASI_V9_TXTMMU_TDI 0x54 /* Text MMU TLB data in */
-#define ASI_V9_TXTMMU_TDA 0x55 /* Text MMU TLB data access */
-#define ASI_V9_TXTMMU_TTR 0x56 /* Text MMU TLB tag read */
-#define ASI_V9_TXTMMU_TDM 0x57 /* Text MMU TLB de-map */
-
-#define ASI_V9_DATAMMU 0x58 /* MMU for program data */
-#define ASI_V9_DATAMMU_D1 0x59 /* XXX */
-#define ASI_V9_DATAMMU_D2 0x5A /* XXX */
-#define ASI_V9_DATAMMU_DD 0x5B /* XXX */
-#define ASI_V9_DATAMMU_TDI 0x5C /* Data MMU TLB data in */
-#define ASI_V9_DATAMMU_TDA 0x5D /* Data MMU TLB data access */
-#define ASI_V9_DATAMMU_TTR 0x5E /* Data MMU TLB tag read */
-#define ASI_V9_DATAMMU_TDM 0x5F /* Data MMU TLB de-map */
-
-#define ASI_V9_ICACHE_D 0x66 /* Instruction cache data */
-#define ASI_V9_ICACHE_T 0x67 /* Instruction cache tags */
-#define ASI_V9_ICACHE_DEC 0x6E /* Instruction cache decode */
-#define ASI_V9_ICACHE_NXT 0x6F /* Instruction cache next ent */
-
-#define ASI_V9_HUH1 0x70 /* XXX */
-#define ASI_V9_HUH2 0x71 /* XXX */
-
-#define ASI_V9_ECACHE_ACC 0x76 /* External cache registers */
-
-#define ASI_V9_INTR_DISP 0x77 /* Interrupt dispatch registers */
-#define ASI_V9_HUH1L 0x78 /* XXX */
-#define ASI_V9_HUH2L 0x79 /* XXX */
-#define ASI_V9_INTR_RECV 0x7f /* Interrupt Receive registers */
-
-#define ASI_V9_PRIMARY 0x80 /* Primary address space */
-#define ASI_V9_SECONDARY 0x81 /* Secondary address space */
-#define ASI_V9_PRIMARY_NF 0x82 /* Primary address space -- No Fault */
-#define ASI_V9_SECONDARY_NF 0x83 /* Secondary address space -- No Fault */
-
-#define ASI_V9_PRIMARYL 0x80 /* Primary address space, little-endian */
-#define ASI_V9_SECONDARYL 0x81 /* Secondary address space, little-endian */
-#define ASI_V9_PRIMARY_NFL 0x82 /* Primary address space, No Fault, l-endian */
-#define ASI_V9_SECONDARY_NFL 0x83 /* Secondary address space, No Fault, l-endian */
-
-#define ASI_V9_XXX1 0xC0 /* XXX */
-#define ASI_V9_XXX2 0xC1 /* XXX */
-#define ASI_V9_XXX3 0xC2 /* XXX */
-#define ASI_V9_XXX4 0xC3 /* XXX */
-#define ASI_V9_XXX5 0xC4 /* XXX */
-#define ASI_V9_XXX6 0xC5 /* XXX */
-#define ASI_V9_XXX7 0xC8 /* XXX */
-#define ASI_V9_XXX8 0xC9 /* XXX */
-#define ASI_V9_XXX9 0xCA /* XXX */
-#define ASI_V9_XXX10 0xCB /* XXX */
-#define ASI_V9_XXX11 0xCC /* XXX */
-#define ASI_V9_XXX12 0xCD /* XXX */
-
-#define ASI_V9_XXX13 0xD0 /* XXX */
-#define ASI_V9_XXX14 0xD1 /* XXX */
-#define ASI_V9_XXX15 0xD2 /* XXX */
-#define ASI_V9_XXX16 0xD3 /* XXX */
-#define ASI_V9_XXX17 0xD8 /* XXX */
-#define ASI_V9_XXX18 0xD9 /* XXX */
-#define ASI_V9_XXX19 0xDA /* XXX */
-#define ASI_V9_XXX20 0xDB /* XXX */
-
-#define ASI_V9_XXX21 0xE0 /* XXX */
-#define ASI_V9_XXX22 0xE1 /* XXX */
-#define ASI_V9_XXX23 0xF0 /* XXX */
-#define ASI_V9_XXX24 0xF1 /* XXX */
-#define ASI_V9_XXX25 0xF8 /* XXX */
-#define ASI_V9_XXX26 0xF9 /* XXX */
-
-#ifndef __ASSEMBLY__
-
-/* Better to do these inline with gcc __asm__ statements. */
-
-/* The following allow you to access physical memory directly without
- * translation by the SRMMU. The only other way to do this is to
- * turn off the SRMMU completely, and well... thats not good.
- *
- * TODO: For non-MBus SRMMU units we have to perform the following
- * using this sequence.
- * 1) Turn off traps
- * 2) Turn on AC bit in SRMMU control register
- * 3) Do our direct physical memory access
- * 4) Restore old SRMMU control register value
- * 5) Restore old %psr value
- */
-
-extern __inline__ unsigned int
-ldb_sun4m_bypass(unsigned int addr)
-{
- unsigned int retval;
-
- __asm__("lduba [%2] %1, %0\n\t" :
- "=r" (retval) :
- "i" (ASI_M_BYPASS), "r" (addr));
-
- return retval;
-}
-
-extern __inline__ unsigned int
-ldw_sun4m_bypass(unsigned int addr)
-{
- unsigned int retval;
-
- __asm__("lda [%2] %1, %0\n\t" :
- "=r" (retval) :
- "i" (ASI_M_BYPASS), "r" (addr));
-
- return retval;
-}
-
-extern __inline__ void
-stb_sun4m_bypass(unsigned char value, unsigned int addr)
-{
- __asm__("stba %0, [%2] %1\n\t" : :
- "r" (value), "i" (ASI_M_BYPASS), "r" (addr) :
- "memory");
-}
-
-extern __inline__ void
-stw_sun4m_bypass(unsigned int value, unsigned int addr)
-{
- __asm__("sta %0, [%2] %1\n\t" : :
- "r" (value), "i" (ASI_M_BYPASS), "r" (addr) :
- "memory");
-}
-
-#endif /* !(__ASSEMBLY__) */
+#define ASI_M_DCDR 0x39 /* Data Cache Diagnostics Registerl rw, ss */
+#define ASI_M_ACTION 0x4c /* Breakpoint Action Register (GNU/Viking) */
#endif /* _SPARC_ASI_H */
--- /dev/null
+/* asmmacro.h: Assembler macros.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
+ */
+
+#ifndef _SPARC_ASMMACRO_H
+#define _SPARC_ASMMACRO_H
+
+/* #define SMP_DEBUG */
+
+#define GET_PROCESSOR_ID(reg) \
+ rd %tbr, %reg; \
+ srl %reg, 12, %reg; \
+ and %reg, 3, %reg;
+
+#define GET_PROCESSOR_MID(reg, tmp) \
+ GET_PROCESSOR_ID(reg) \
+ set C_LABEL(mid_xlate), %tmp; \
+ ldub [%tmp + %reg], %reg;
+
+#define GET_PROCESSOR_OFFSET(reg) \
+ rd %tbr, %reg; \
+ srl %reg, 10, %reg; \
+ and %reg, 0xc, %reg;
+
+#define PROCESSOR_OFFSET_TO_ID(reg) \
+ srl %reg, 2, %reg;
+
+#define PROCESSOR_ID_TO_OFFSET(reg) \
+ sll %reg, 2, %reg;
+
+/* All trap entry points _must_ begin with this macro or else you
+ * lose. It makes sure the kernel has a proper window so that
+ * c-code can be called.
+ */
+#ifndef SMP_DEBUG
+#define SAVE_ALL \
+ sethi %hi(trap_setup), %l4; \
+ jmpl %l4 + %lo(trap_setup), %l6; \
+ nop;
+#else
+#define SAVE_ALL \
+ GET_PROCESSOR_ID(l4); \
+ set C_LABEL(trap_log), %l5; \
+ sll %l4, 11, %l6; \
+ add %l5, %l6, %l5; \
+ set C_LABEL(trap_log_ent), %l6; \
+ sll %l4, 2, %l4; \
+ add %l6, %l4, %l6; \
+ ld [%l6], %l6; \
+ sll %l6, 3, %l6; \
+ st %l1, [%l5 + %l6]; \
+ add %l5, 4, %l5; \
+ st %l0, [%l5 + %l6]; \
+ set C_LABEL(trap_log_ent), %l5; \
+ add %l5, %l4, %l5; \
+ srl %l6, 3, %l6; \
+ add %l6, 1, %l6; \
+ and %l6, 255, %l6; \
+ st %l6, [%l5]; \
+ sethi %hi(trap_setup), %l4; \
+ jmpl %l4 + %lo(trap_setup), %l6; \
+ nop;
+#endif
+
+/* All traps low-level code here must end with this macro.
+ * For SMP configurations the ret_trap_entry routine will
+ * have to appropriate code to actually release the kernel
+ * entry lock.
+ */
+#define RESTORE_ALL \
+ b ret_trap_entry; \
+ nop;
+
+#ifndef __SMP__
+
+#define ENTER_SYSCALL
+#define LEAVE_SYSCALL
+#define ENTER_IRQ
+#define LEAVE_IRQ
+
+#else
+
+#define INCREMENT_COUNTER(symbol, tmp1, tmp2) \
+ set C_LABEL(symbol), %tmp1; \
+ ld [%tmp1], %tmp2; \
+ add %tmp2, 1, %tmp2; \
+ st %tmp2, [%tmp1];
+
+#define DECREMENT_COUNTER(symbol, tmp1, tmp2) \
+ set C_LABEL(symbol), %tmp1; \
+ ld [%tmp1], %tmp2; \
+ sub %tmp2, 1, %tmp2; \
+ st %tmp2, [%tmp1];
+
+ /* This is so complicated I suggest you don't look at it. */
+#define ENTER_MASK(mask) \
+ GET_PROCESSOR_OFFSET(l4) \
+ set C_LABEL(smp_spinning), %l6; \
+ add %l6, %l4, %l6; \
+ mov 1, %l5; \
+ st %l5, [%l6]; \
+ set C_LABEL(smp_proc_in_lock), %l5; \
+ ld [%l5 + %l4], %l6; \
+ or %l6, mask, %l6; \
+ st %l6, [%l5 + %l4]; \
+1: \
+ set C_LABEL(kernel_flag), %l5; \
+ ldstub [%l5], %l6; \
+ cmp %l6, 0; \
+ be 3f; \
+ nop; \
+ set C_LABEL(active_kernel_processor), %l5; \
+ GET_PROCESSOR_ID(l4) \
+ ldub [%l5], %l6; \
+ cmp %l6, %l4; \
+ be 4f; \
+ nop; \
+2: \
+ GET_PROCESSOR_MID(l4, l5) \
+ set C_LABEL(sun4m_interrupts), %l5; \
+ ld [%l5], %l5; \
+ sll %l4, 12, %l4; \
+ add %l5, %l4, %l5; \
+ ld [%l5], %l4; \
+ sethi %hi(0x80000000), %l6; \
+ andcc %l6, %l4, %g0; \
+ be 5f; \
+ nop; \
+ st %l6, [%l5 + 4]; \
+ nop; nop; nop; \
+ ld [%l5], %g0; \
+ nop; nop; nop; \
+ or %l0, PSR_PIL, %l4; \
+ wr %l4, 0x0, %psr; \
+ nop; nop; nop; \
+ wr %l4, PSR_ET, %psr; \
+ nop; nop; nop; \
+ call C_LABEL(smp_message_irq); \
+ nop; \
+ wr %l0, 0x0, %psr; \
+ nop; nop; nop; \
+5: \
+ set C_LABEL(kernel_flag), %l5; \
+ ldub [%l5], %l6; \
+ cmp %l6, 0; \
+ bne 2b; \
+ nop; \
+ b 1b; \
+ nop; \
+3: \
+ GET_PROCESSOR_ID(l4) \
+ set C_LABEL(active_kernel_processor), %l5; \
+ stb %l4, [%l5]; \
+ GET_PROCESSOR_MID(l4, l5) \
+ set C_LABEL(irq_rcvreg), %l5; \
+ ld [%l5], %l5; \
+ st %l4, [%l5]; \
+4: \
+ GET_PROCESSOR_OFFSET(l4) \
+ set C_LABEL(smp_spinning), %l6; \
+ st %g0, [%l6 + %l4];
+
+#define ENTER_SYSCALL \
+ ENTER_MASK(SMP_FROM_SYSCALL) \
+ INCREMENT_COUNTER(kernel_counter, l6, l5) \
+ INCREMENT_COUNTER(syscall_count, l6, l5)
+
+#define ENTER_IRQ \
+ ENTER_MASK(SMP_FROM_INT) \
+ INCREMENT_COUNTER(kernel_counter, l6, l5)
+
+#define LEAVE_MASK(mask) \
+ GET_PROCESSOR_OFFSET(l4) \
+ set C_LABEL(smp_proc_in_lock), %l5; \
+ ld [%l5 + %l4], %l6; \
+ andn %l6, mask, %l6; \
+ st %l6, [%l5 + %l4];
+
+#define LEAVE_SYSCALL \
+ LEAVE_MASK(SMP_FROM_SYSCALL) \
+ DECREMENT_COUNTER(syscall_count, l6, l5) \
+ set C_LABEL(kernel_counter), %l6; \
+ ld [%l6], %l5; \
+ subcc %l5, 1, %l5; \
+ st %l5, [%l6]; \
+ bne 1f; \
+ nop; \
+ set C_LABEL(active_kernel_processor), %l6; \
+ mov NO_PROC_ID, %l5; \
+ stb %l5, [%l6]; \
+ set C_LABEL(kernel_flag), %l6; \
+ stb %g0, [%l6]; \
+1:
+
+#define LEAVE_IRQ \
+ LEAVE_MASK(SMP_FROM_INT) \
+ INCREMENT_COUNTER(syscall_count, l6, l5)
+
+
+#define RESTORE_ALL_FASTIRQ \
+ b ret_irq_entry; \
+ nop;
+
+#endif /* !(__SMP__) */
+
+#endif /* !(_SPARC_ASMMACRO_H) */
--- /dev/null
+/* atomic.h: These really suck for now.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#ifndef __ARCH_SPARC_ATOMIC__
+#define __ARCH_SPARC_ATOMIC__
+
+#ifdef __SMP__
+#include <asm/smp.h>
+#include <asm/smp_lock.h>
+#endif
+
+typedef int atomic_t;
+
+static __inline__ void atomic_add(atomic_t i, atomic_t *v)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ *v += i;
+ restore_flags(flags);
+}
+
+static __inline__ void atomic_sub(atomic_t i, atomic_t *v)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ *v -= i;
+ restore_flags(flags);
+}
+
+static __inline__ int atomic_sub_and_test(atomic_t i, atomic_t *v)
+{
+ unsigned long flags, result;
+
+ save_flags(flags); cli();
+ *v -= i;
+ result = (*v == 0);
+ restore_flags(flags);
+ return result;
+}
+
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ atomic_add(1, v);
+}
+
+static __inline__ void atomic_dec(atomic_t *v)
+{
+ atomic_sub(1, v);
+}
+
+static __inline__ int atomic_dec_and_test(atomic_t *v)
+{
+ return atomic_sub_and_test(1, v);
+}
+
+#endif /* !(__ARCH_SPARC_ATOMIC__) */
--- /dev/null
+/* atops.h: Atomic SPARC operations.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+#ifndef _SPARC_ATOPS_H
+#define _SPARC_ATOPS_H
+
+#ifdef __SMP__
+
+extern __inline volatile unsigned char ldstub(volatile unsigned char *lock)
+{
+ volatile unsigned char retval;
+
+ __asm__ __volatile__("ldstub [%1], %0\n\t" :
+ "=&r" (retval) :
+ "r" (lock));
+ return retval;
+}
+
+#endif
+
+#endif
/* $Id: auxio.h,v 1.10 1996/01/03 03:52:58 davem Exp $
- * auxio.h: Definitions and code for the Auxiliary I/O register.
+ * auxio.h: Definitons and code for the Auxiliary I/O register.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
-/* $Id: bitops.h,v 1.18 1996/01/03 03:53:00 davem Exp $
+/* $Id: bitops.h,v 1.23 1996/04/20 07:54:35 davem Exp $
* bitops.h: Bit string operations on the Sparc.
*
* Copyright 1995, David S. Miller (davem@caip.rutgers.edu).
#include <asm/system.h>
#endif
+#ifdef __SMP__
+
+#define SMPVOL volatile
+
+#else
+
+#define SMPVOL
+
+#endif
+
/* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
* is in the highest of the four bytes and bit '31' is the high bit
* within the first byte. Sparc is BIG-Endian. Unless noted otherwise
* all bit-ops return 0 if bit was previously clear and != 0 otherwise.
*/
-extern __inline__ unsigned long set_bit(unsigned long nr, void *addr)
+extern __inline__ unsigned long set_bit(unsigned long nr, SMPVOL void *addr)
{
int mask, flags;
unsigned long *ADDR = (unsigned long *) addr;
return oldbit != 0;
}
-extern __inline__ unsigned long clear_bit(unsigned long nr, void *addr)
+extern __inline__ unsigned long clear_bit(unsigned long nr, SMPVOL void *addr)
{
int mask, flags;
unsigned long *ADDR = (unsigned long *) addr;
return oldbit != 0;
}
-extern __inline__ unsigned long change_bit(unsigned long nr, void *addr)
+extern __inline__ unsigned long change_bit(unsigned long nr, SMPVOL void *addr)
{
int mask, flags;
unsigned long *ADDR = (unsigned long *) addr;
}
/* The following routine need not be atomic. */
-extern __inline__ unsigned long test_bit(int nr, const void *addr)
+extern __inline__ unsigned long test_bit(int nr, const SMPVOL void *addr)
{
- return 1UL & (((const unsigned int *) addr)[nr >> 5] >> (nr & 31));
+ return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
}
/* The easy/cheese version for now. */
offset &= 31UL;
if (offset) {
tmp = *(p++);
- tmp |= ~0UL << (32-offset);
+ tmp |= ~0UL >> (32-offset);
if (size < 32)
goto found_first;
if (~tmp)
/* $Id: bsderrno.h,v 1.2 1995/11/25 02:31:17 davem Exp $
- * bsderrno.h: Error numbers for NetBSD binary compatibility
+ * bsderrno.h: Error numbers for NetBSD binary compatability
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
* alternate address space. The IDC bit must be off in the ICCR on
* HyperSparcs for these accesses to work. The code below does not do
* any checking, the caller must do so. These routines are for
- * diagnostics only, but could end up being useful. Use with care.
+ * diagnostics only, but coule end up being useful. Use with care.
* Also, you are asking for trouble if you execute these in one of the
* three instructions following a %asr/%psr access or modification.
*/
-/* $Id: checksum.h,v 1.10 1995/11/25 02:31:23 davem Exp $ */
+/* $Id: checksum.h,v 1.13 1996/04/18 03:30:19 davem Exp $ */
#ifndef __SPARC_CHECKSUM_H
#define __SPARC_CHECKSUM_H
*
* Copyright(C) 1995 Linus Torvalds
* Copyright(C) 1995 Miguel de Icaza
+ * Copyright(C) 1996 David S. Miller
+ *
+ * derived from:
+ * Alpha checksum c-code
+ * ix86 inline assembly
+ */
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
*/
+extern inline unsigned short csum_tcpudp_magic(unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum)
+{
+ __asm__ __volatile__("
+ addcc %0, %1, %0
+ addxcc %0, %4, %0
+ addxcc %0, %5, %0
+ addx %0, %%g0, %0
+
+ ! We need the carry from the addition of 16-bit
+ ! significant addition, so we zap out the low bits
+ ! in one half, zap out the high bits in another,
+ ! shift them both up to the top 16-bits of a word
+ ! and do the carry producing addition, finally
+ ! shift the result back down to the low 16-bits.
+
+ ! Actually, we can further optimize away two shifts
+ ! because we know the low bits of the original
+ ! value will be added to zero-only bits so cannot
+ ! affect the addition result nor the final carry
+ ! bit.
+
+ sll %0, 16, %1
+ addcc %0, %1, %0 ! add and set carry, neat eh?
+ srl %0, 16, %0 ! shift back down the result
+ addx %0, %%g0, %0 ! get remaining carry bit
+ xnor %%g0, %0, %0 ! negate, sparc is cool
+ "
+ : "=&r" (sum), "=&r" (saddr)
+ : "0" (daddr), "1" (saddr), "r" (len+proto), "r" (sum));
+ return ((unsigned short) sum);
+}
-/* 32 bits version of the checksum routines written for the Alpha by Linus */
extern inline unsigned short from32to16(unsigned long x)
{
- /* add up 16-bit and 17-bit words for 17+c bits */
- x = (x & 0xffff) + (x >> 16);
- /* add up 16-bit and 2-bit for 16+c bit */
- x = (x & 0xffff) + (x >> 16);
- /* add up carry.. */
- x = (x & 0xffff) + (x >> 16);
+ __asm__ __volatile__("
+ addcc %0, %1, %0
+ srl %0, 16, %0
+ addx %%g0, %0, %0
+ "
+ : "=r" (x)
+ : "r" (x << 16), "0" (x));
return x;
}
-extern inline unsigned long
-do_csum(unsigned char * buff, int len)
+extern inline unsigned long do_csum(unsigned char * buff, int len)
{
int odd, count;
unsigned long result = 0;
goto out;
odd = 1 & (unsigned long) buff;
if (odd) {
- result = *buff << 8;
+ result = *buff;
len--;
buff++;
}
unsigned long w = *(unsigned long *) buff;
count--;
buff += 4;
- len -= 4;
result += carry;
result += w;
carry = (w > result);
}
}
if (len & 1)
- result += (*buff) << 8;
+ result += (*buff << 8);
result = from32to16(result);
if (odd)
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
return result;
}
-extern inline unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+/* ihl is always 5 or greater, almost always is 5, iph is always word
+ * aligned but can fail to be dword aligned very often.
+ */
+extern inline unsigned short ip_fast_csum(const unsigned char *iph, unsigned int ihl)
{
- return ~do_csum(iph,ihl*4);
+ unsigned int sum;
+
+ __asm__ __volatile__("
+ ld [%1], %0
+ sub %2, 4, %2
+ ld [%1 + 0x4], %%g1
+ ld [%1 + 0x8], %%g2
+ addcc %%g1, %0, %0
+ addxcc %%g2, %0, %0
+ ld [%1 + 0xc], %%g1
+ ld [%1 + 0x10], %%g2
+ addxcc %%g1, %0, %0
+ addxcc %0, %%g0, %0
+1:
+ addcc %%g2, %0, %0
+ add %1, 0x4, %1
+ addxcc %0, %%g0, %0
+ subcc %2, 0x1, %2
+ bne,a 1b
+ ld [%1 + 0x10], %%g2
+
+ sll %0, 16, %2
+ addcc %0, %2, %2
+ srl %2, 16, %0
+ addx %0, %%g0, %2
+ xnor %%g0, %2, %0
+2:
+ "
+ : "=&r" (sum), "=&r" (iph), "=&r" (ihl)
+ : "1" (iph), "2" (ihl)
+ : "g1", "g2");
+ return sum;
}
/*
*/
extern inline unsigned int csum_partial(unsigned char * buff, int len, unsigned int sum)
{
- unsigned long result = do_csum(buff, len);
+ __asm__ __volatile__("
+ mov 0, %%g5 ! g5 = result
+ cmp %1, 0
+ bgu,a 1f
+ andcc %0, 1, %%g7 ! g7 = odd
- /* add in old sum, and carry.. */
- result += sum;
- /* 32+c bits -> 32 bits */
- result = (result & 0xffff) + (result >> 16);
- return result;
+ b,a 9f
+
+1:
+ be,a 1f
+ srl %1, 1, %%g6 ! g6 = count = (len >> 1)
+
+ sub %1, 1, %1 ! if(odd) { result = *buff;
+ ldub [%0], %%g5 ! len--;
+ add %0, 1, %0 ! buff++ }
+
+ srl %1, 1, %%g6
+1:
+ cmp %%g6, 0 ! if (count) {
+ be,a 8f
+ andcc %1, 1, %%g0
+
+ andcc %0, 2, %%g0 ! if (2 & buff) {
+ be,a 1f
+ srl %%g6, 1, %%g6
+
+ sub %1, 2, %1 ! result += *(unsigned short *) buff;
+ lduh [%0], %%g1 ! count--;
+ sub %%g6, 1, %%g6 ! len -= 2;
+ add %%g1, %%g5, %%g5! buff += 2;
+ add %0, 2, %0 ! }
+
+ srl %%g6, 1, %%g6
+1:
+ cmp %%g6, 0 ! if (count) {
+ be,a 2f
+ andcc %1, 2, %%g0
+
+ ld [%0], %%g1 ! csum aligned 32bit words
+1:
+ add %0, 4, %0
+ addcc %%g1, %%g5, %%g5
+ addx %%g5, %%g0, %%g5
+ subcc %%g6, 1, %%g6
+ bne,a 1b
+ ld [%0], %%g1
+
+ sethi %%hi(0xffff), %%g3
+ srl %%g5, 16, %%g2
+ or %%g3, %%lo(0xffff), %%g3
+ and %%g5, %%g3, %%g5
+ add %%g2, %%g5, %%g5! }
+
+ andcc %1, 2, %%g0
+2:
+ be,a 8f ! if (len & 2) {
+ andcc %1, 1, %%g0
+
+ lduh [%0], %%g1 ! result += *(unsigned short *) buff;
+ add %%g5, %%g1, %%g5! buff += 2;
+ add %0, 2, %0 ! }
+
+
+ andcc %1, 1, %%g0
+8:
+ be,a 1f ! if (len & 1) {
+ sll %%g5, 16, %%g1
+
+ ldub [%0], %%g1
+ sll %%g1, 8, %%g1 ! result += (*buff << 8);
+ add %%g5, %%g1, %%g5! }
+
+ sll %%g5, 16, %%g1
+1:
+ addcc %%g1, %%g5, %%g5! result = from32to16(result);
+ srl %%g5, 16, %%g1
+ addx %%g0, %%g1, %%g5
+
+ orcc %%g7, %%g0, %%g0! if(odd) {
+ be 9f
+ srl %%g5, 8, %%g1
+
+ and %%g5, 0xff, %%g2! result = ((result >> 8) & 0xff) |
+ and %%g1, 0xff, %%g1! ((result & 0xff) << 8);
+ sll %%g2, 8, %%g2
+ or %%g2, %%g1, %%g5! }
+9:
+ addcc %2, %%g5, %2 ! add result and sum with carry
+ addx %%g0, %2, %2
+ " :
+ "=&r" (buff), "=&r" (len), "=&r" (sum) :
+ "0" (buff), "1" (len), "2" (sum) :
+ "g1", "g2", "g3", "g5", "g6", "g7");
+
+ return sum;
}
/*
/*
* Fold a partial checksum without adding pseudo headers
*/
-
-static inline unsigned short csum_fold(unsigned int sum)
-{
- sum = (sum & 0xffff) + (sum >> 16);
- sum = (sum & 0xffff) + (sum >> 16);
- return ~sum;
-}
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-extern inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
- unsigned long daddr,
- unsigned short len,
- unsigned short proto,
- unsigned int sum)
+extern inline unsigned int csum_fold(unsigned int sum)
{
- return ~from32to16 (((saddr >> 16) + (saddr & 0xffff) + (daddr >> 16)
- + (daddr & 0xffff) + (sum >> 16) +
- (sum & 0xffff) + proto + len));
+ __asm__ __volatile__("
+ addcc %0, %1, %0
+ srl %0, 16, %0
+ addx %%g0, %0, %0
+ xnor %%g0, %0, %0
+ "
+ : "=r" (sum)
+ : "r" (sum << 16), "0" (sum));
+ return sum;
}
#endif /* !(__SPARC_CHECKSUM_H) */
-/* $Id: cypress.h,v 1.2 1995/11/25 02:31:29 davem Exp $
+/* $Id: cypress.h,v 1.4 1996/03/12 17:48:12 davem Exp $
* cypress.h: Cypress module specific definitions and defines.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* BM: Boot Mode -- 0 = not in boot mode, 1 = in boot mode
* C: Cacheable -- Indicates whether accesses are cacheable while
* the MMU is off. 0=no 1=yes
- * MR: MemoryReflection -- Indicates whether the bus attached to the
+ * MR: MemoryReflection -- Indicates whether the bus attacted to the
* MBus supports memory reflection. 0=no 1=yes (605 only)
* CM: CacheMode -- Indicates whether the cache is operating in write
* through or copy-back mode. 0=write-through 1=copy-back
"i" (ASI_M_FLUSH_CTX));
}
+/* XXX Displacement flushes for buggy chips and initial testing
+ * XXX go here.
+ */
+
#endif /* !(_SPARC_CYPRESS_H) */
-/* $Id: dma.h,v 1.13 1996/02/17 17:32:33 miguel Exp $
+/* $Id: dma.h,v 1.15 1996/03/23 02:40:00 davem Exp $
* include/asm-sparc/dma.h
*
* Copyright 1995 (C) David S. Miller (davem@caip.rutgers.edu)
#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
-#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
+#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pendind Read */
+#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
+#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
-#define DMA_BRST_SZ 0x000c0000 /* SBUS transfer r/w burst size */
+#define DMA_E_BURST8 0x00040000 /* ENET: SBUS r/w burst size */
+#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
+#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
* 31-18 17 16 15-8 7-4 3 2 1 0
*
* C2E: A C2 graphics error occurred. 0=no 1=yes (SS10 only)
- * MULT: Multiple errors occurred ;-O 0=no 1=prom_panic(yes)
+ * MULT: Multiple errors occurres ;-O 0=no 1=prom_panic(yes)
* SYNDROME: Controller is mentally unstable.
* DWORD:
* UNC: Uncorrectable error. 0=no 1=yes
int emu_types[FB_ATTR_NEMUTYPES]; /* supported emulations */
};
#define FBIOSATTR _IOW('F', 5, struct fbgattr) /* Unsupported: */
-#define FBIOGATTR _IOR('F', 6, struct fbgattr) /* supported */
+#define FBIOGATTR _IOR('F', 6, struct fbgattr) /* supporoted */
#define FBIOSVIDEO _IOW('F', 7, int)
#define FBIOGVIDEO _IOR('F', 8, int)
#define fd_cacheflush(addr, size) /* nothing... */
#define fd_request_irq() sun_fd_request_irq()
#define fd_free_irq() /* nothing... */
-#define fd_eject(x) sun_fd_eject()
#define FLOPPY_MOTOR_MASK 0x10
#define N_FDC 1
#define N_DRIVE 8
-/* No 64k boundary crossing problems on the Sparc. */
+/* No 64k boundry crossing problems on the Sparc. */
#define CROSS_64KB(a,s) (0)
/* Routines unique to each controller type on a Sun. */
* drive attached to a Sun controller
* and it will be at drive zero.
*/
-#if 0
- if(value & 0xf0)
-#else
- if(value & 0x10)
-#endif
- set_auxio(AUXIO_FLPY_DSEL, 0);
- else
- set_auxio(0, AUXIO_FLPY_DSEL);
+ {
+ unsigned bits = 0;
+ if (value & 0x10) bits |= AUXIO_FLPY_DSEL;
+ if ((value & 0x80) == 0) bits |= AUXIO_FLPY_EJCT;
+ set_auxio(bits, (~bits) & (AUXIO_FLPY_DSEL|AUXIO_FLPY_EJCT));
+ }
break;
case 5: /* FD_DATA */
sun_fdc->data_82072 = value;
pdma_areasize = pdma_size;
}
-static int sun_fd_eject(void)
-{
- if(sparc_cpu_model == sun4c) {
- set_auxio(AUXIO_FLPY_DSEL, AUXIO_FLPY_EJCT);
- udelay(1000);
- set_auxio(AUXIO_FLPY_EJCT, AUXIO_FLPY_DSEL);
- } else {
- set_dor(fdc, ~0, 0x90);
- udelay(500);
- set_dor(fdc, ~0x80, 0);
- udelay(500);
- }
- return 0;
-}
-
/* Our low-level entry point in arch/sparc/kernel/entry.S */
extern void floppy_hardint(int irq, void *unused, struct pt_regs *regs);
goto no_sun_fdc;
}
- /* We need the version as early as possible to set up the
- * function pointers correctly. Assume 82077 for probing
- * purposes.
- */
- sun_fdops.fd_inb = sun_82077_fd_inb;
- sun_fdops.fd_outb = sun_82077_fd_outb;
- fdc_status = &sun_fdc->status_82077;
-
- /* This controller detection technique is from the netbsd
- * Sun floppy driver, originally Chris Torek of BSDI came
- * up with this. It seems to work pretty well.
- */
- if(sun_fdc->dor_82077 == 0x80) {
- sun_fdc->dor_82077 = 2;
- if(sun_fdc->dor_82077 == 0x80) {
- /* Ok, it's really an 82072. */
- sun_fdops.fd_inb = sun_82072_fd_inb;
- sun_fdops.fd_outb = sun_82072_fd_outb;
- fdc_status = &sun_fdc->status_82072;
- }
+ if(sparc_cpu_model == sun4c) {
+ sun_fdops.fd_inb = sun_82072_fd_inb;
+ sun_fdops.fd_outb = sun_82072_fd_outb;
+ fdc_status = &sun_fdc->status_82072;
+ /* printk("AUXIO @0x%p\n", auxio_register); */ /* P3 */
+ } else {
+ sun_fdops.fd_inb = sun_82077_fd_inb;
+ sun_fdops.fd_outb = sun_82077_fd_outb;
+ fdc_status = &sun_fdc->status_82077;
+ /* printk("DOR @0x%p\n", &sun_fdc->dor_82077); */ /* P3 */
}
- /* P3: The only reliable way which I found for ejection
- * of boot floppy. AUXIO_FLPY_EJCT is not enough alone.
- */
- set_auxio(AUXIO_FLPY_EJCT, 0); /* Bring EJECT line to normal. */
- udelay(1000);
- sun_fd_eject(0); /* Send Eject Pulse. */
-
/* Success... */
return (int) sun_fdc;
-/* $Id: head.h,v 1.23 1996/02/15 09:12:55 davem Exp $ */
+/* $Id: head.h,v 1.26 1996/03/25 20:21:08 davem Exp $ */
#ifndef __SPARC_HEAD_H
#define __SPARC_HEAD_H
#define SRMMU_L1_KBASE_OFFSET ((KERNBASE>>24)<<2) /* Used in boot remapping. */
#define INTS_ENAB 0x01 /* entry.S uses this. */
-#define NCPUS 4 /* Architectural limit of sun4m. */
+#define NCPUS 4 /* Architectual limit of sun4m. */
#define SUN4_PROM_VECTOR 0xFFE81000 /* To safely die on a SUN4 */
#define SUN4_PRINTF 0x84 /* Offset into SUN4_PROM_VECTOR */
/* Data/text faults. Defaults to sun4c version at boot time. */
#define SPARC_TFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 1, %l7;
#define SPARC_DFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 0, %l7;
+#define SRMMU_TFAULT rd %psr, %l0; rd %wim, %l3; b C_LABEL(srmmu_fault); mov 1, %l7;
+#define SRMMU_DFAULT rd %psr, %l0; rd %wim, %l3; b C_LABEL(srmmu_fault); mov 0, %l7;
/* This is for traps we should NEVER get. */
#define BAD_TRAP(num) \
#define SOLARIS_SYSCALL_TRAP \
sethi %hi(C_LABEL(sys_call_table)), %l7; \
or %l7, %lo(C_LABEL(sys_call_table)), %l7; \
- b linux_sparc_syscall; \
+ b solaris_syscall; \
rd %psr, %l0;
/* Software trap for Sparc-netbsd system calls. */
#define NETBSD_SYSCALL_TRAP \
sethi %hi(C_LABEL(sys_call_table)), %l7; \
or %l7, %lo(C_LABEL(sys_call_table)), %l7; \
- b linux_sparc_syscall; \
+ b bsd_syscall; \
rd %psr, %l0;
/* The Get Condition Codes software trap for userland. */
#define GETCC_TRAP \
- b getcc_trap_handler; mov %psr, %l0; nop; nop
+ b getcc_trap_handler; mov %psr, %l0; nop; nop;
/* The Set Condition Codes software trap for userland. */
#define SETCC_TRAP \
- b setcc_trap_handler; mov %psr, %l0; nop; nop
+ b setcc_trap_handler; mov %psr, %l0; nop; nop;
/* This is for hard interrupts from level 1-14, 15 is non-maskable (nmi) and
* gets handled with another macro.
/* NMI's (Non Maskable Interrupts) are special, you can't keep them
* from coming in, and basically if you get one, the shows over. ;(
- * On the sun4c they are usually asynchronous memory errors, on the
+ * On the sun4c they are usually asyncronous memory errors, on the
* the sun4m they could be either due to mem errors or a software
* initiated interrupt from the prom/kern on an SMP box saying "I
* command you to do CPU tricks, read your mailbox for more info."
--- /dev/null
+#ifndef _ASM_SPARC_IOCTLS_H
+#define _ASM_SPARC_IOCTLS_H
+
+#include <asm/ioctl.h>
+
+/* Big T */
+#define TCGETA _IOR('T', 1, struct termio)
+#define TCSETA _IOW('T', 2, struct termio)
+#define TCSETAW _IOW('T', 3, struct termio)
+#define TCSETAF _IOW('T', 4, struct termio)
+#define TCSBRK _IO('T', 5)
+#define TCXONC _IO('T', 6)
+#define TCFLSH _IO('T', 7)
+#define TCGETS _IOR('T', 8, struct termios)
+#define TCSETS _IOW('T', 9, struct termios)
+#define TCSETSW _IOW('T', 10, struct termios)
+#define TCSETSF _IOW('T', 11, struct termios)
+
+/* SCARY Rutgers local SunOS kernel hackery, perhaps I will support it
+ * someday. This is completely bogus, I know...
+ */
+#define TCGETSTAT _IO('T', 200) /* Rutgers specific */
+#define TCSETSTAT _IO('T', 201) /* Rutgers specific */
+
+/* Little t */
+#define TIOCGETD _IOR('t', 0, int)
+#define TIOCSETD _IOW('t', 1, int)
+#define TIOCHPCL _IO('t', 2) /* SunOS Specific */
+#define TIOCMODG _IOR('t', 3, int) /* SunOS Specific */
+#define TIOCMODS _IOW('t', 4, int) /* SunOS Specific */
+#define TIOCGETP _IOR('t', 8, struct sgttyb) /* SunOS Specific */
+#define TIOCSETP _IOW('t', 9, struct sgttyb) /* SunOS Specific */
+#define TIOCSETN _IOW('t', 10, struct sgttyb) /* SunOS Specific */
+#define TIOCEXCL _IO('t', 13)
+#define TIOCNXCL _IO('t', 14)
+#define TIOCFLUSH _IOW('t', 16, int) /* SunOS Specific */
+#define TIOCSETC _IOW('t', 17, struct tchars) /* SunOS Specific */
+#define TIOCGETC _IOR('t', 18, struct tchars) /* SunOS Specific */
+#define TIOCTCNTL _IOW('t', 32, int) /* SunOS Specific */
+#define TIOCSIGNAL _IOW('t', 33, int) /* SunOS Specific */
+#define TIOCSETX _IOW('t', 34, int) /* SunOS Specific */
+#define TIOCGETX _IOR('t', 35, int) /* SunOS Specific */
+#define TIOCCONS _IO('t', 36)
+#define TIOCSSIZE _IOW('t', 37, struct sunos_ttysize) /* SunOS Specific */
+#define TIOCGSIZE _IOR('t', 38, struct sunos_ttysize) /* SunOS Specific */
+#define TIOCGSOFTCAR _IOR('t', 100, int)
+#define TIOCSSOFTCAR _IOW('t', 101, int)
+#define TIOCUCNTL _IOW('t', 102, int) /* SunOS Specific */
+#define TIOCSWINSZ _IOW('t', 103, struct winsize)
+#define TIOCGWINSZ _IOR('t', 104, struct winsize)
+#define TIOCREMOTE _IOW('t', 105, int) /* SunOS Specific */
+#define TIOCMGET _IOR('t', 106, int)
+#define TIOCMBIC _IOW('t', 107, int)
+#define TIOCMBIS _IOW('t', 108, int)
+#define TIOCMSET _IOW('t', 109, int)
+#define TIOCSTART _IO('t', 110) /* SunOS Specific */
+#define TIOCSTOP _IO('t', 111) /* SunOS Specific */
+#define TIOCPKT _IOW('t', 112, int)
+#define TIOCNOTTY _IO('t', 113)
+#define TIOCSTI _IOW('t', 114, char)
+#define TIOCOUTQ _IOR('t', 115, int)
+#define TIOCGLTC _IOR('t', 116, struct ltchars) /* SunOS Specific */
+#define TIOCSLTC _IOW('t', 117, struct ltchars) /* SunOS Specific */
+/* 118 is the non-posix setpgrp tty ioctl */
+/* 119 is the non-posix getpgrp tty ioctl */
+#define TIOCCDTR _IO('t', 120) /* SunOS Specific */
+#define TIOCSDTR _IO('t', 121) /* SunOS Specific */
+#define TIOCCBRK _IO('t', 122) /* SunOS Specific */
+#define TIOCSBRK _IO('t', 123) /* SunOS Specific */
+#define TIOCLGET _IOW('t', 124, int) /* SunOS Specific */
+#define TIOCLSET _IOW('t', 125, int) /* SunOS Specific */
+#define TIOCLBIC _IOW('t', 126, int) /* SunOS Specific */
+#define TIOCLBIS _IOW('t', 127, int) /* SunOS Specific */
+#define TIOCISPACE _IOR('t', 128, int) /* SunOS Specific */
+#define TIOCISIZE _IOR('t', 129, int) /* SunOS Specific */
+#define TIOCSPGRP _IOW('t', 130, int)
+#define TIOCGPGRP _IOR('t', 131, int)
+#define TIOCSCTTY _IO('t', 132)
+
+/* Little f */
+#define FIOCLEX _IO('f', 1)
+#define FIONCLEX _IO('f', 2)
+#define FIOASYNC _IOW('f', 125, int)
+#define FIONBIO _IOW('f', 126, int)
+#define FIONREAD _IOR('f', 127, int)
+#define TIOCINQ FIONREAD
+
+/* Linux specific, no SunOS equivalent. */
+#define TIOCLINUX 0x541C
+#define TIOCGSERIAL 0x541E
+#define TIOCSSERIAL 0x541F
+#define TCSBRKP 0x5425
+#define TIOCTTYGSTRUCT 0x5426
+#define TIOCSERCONFIG 0x5453
+#define TIOCSERGWILD 0x5454
+#define TIOCSERSWILD 0x5455
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+/* Used for packet mode */
+#define TIOCPKT_DATA 0
+#define TIOCPKT_FLUSHREAD 1
+#define TIOCPKT_FLUSHWRITE 2
+#define TIOCPKT_STOP 4
+#define TIOCPKT_START 8
+#define TIOCPKT_NOSTOP 16
+#define TIOCPKT_DOSTOP 32
+
+#endif /* !(_ASM_SPARC_IOCTLS_H) */
/* The format of an iopte in the page tables */
#define IOPTE_PAGE 0x07ffff00 /* Physical page number (PA[30:12]) */
+#define IOPTE_CACHE 0x00000080 /* Cached (in vme IOCACHE or Viking/MXCC) */
#define IOPTE_WRITE 0x00000004 /* Writeable */
#define IOPTE_VALID 0x00000002 /* IOPTE is valid */
#define IOPTE_WAZ 0x00000001 /* Write as zeros */
struct iommu_struct {
struct iommu_regs *regs;
iopte_t *page_table;
+ iopte_t *lowest; /* to speed up searches... */
+ unsigned long plow;
/* For convenience */
unsigned long start; /* First managed virtual address */
unsigned long end; /* Last managed virtual address */
-/* $Id: irq.h,v 1.8 1995/11/25 02:31:54 davem Exp $
+/* $Id: irq.h,v 1.12 1996/04/03 02:17:34 davem Exp $
* irq.h: IRQ registers on the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#define NR_IRQS 15
-extern void disable_irq(unsigned int);
-extern void enable_irq(unsigned int);
+/* Dave Redman (djhr@tadpole.co.uk)
+ * changed these to function pointers.. it saves cycles and will allow
+ * the irq dependancies to be split into different files at a later date
+ * sun4c_irq.c, sun4m_irq.c etc so we could reduce the kernel size.
+ */
+extern void (*disable_irq)(unsigned int);
+extern void (*enable_irq)(unsigned int);
+extern void (*clear_clock_irq)( void );
+extern void (*clear_profile_irq)( void );
+extern void (*load_profile_irq)( unsigned int timeout );
+extern void (*init_timers)(void (*lvl10_irq)(int, void *, struct pt_regs *));
+extern void claim_ticker14(void (*irq_handler)(int, void *, struct pt_regs *),
+ int irq,
+ unsigned int timeout);
+
+#ifdef __SMP__
+extern void (*set_cpu_int)(int, int);
+extern void (*clear_cpu_int)(int, int);
+extern void (*set_irq_udt)(int);
+#endif
extern int request_fast_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), unsigned long flags, const char *devname);
*/
/* These registers are used for sending/receiving irqs from/to
- * different cpu's.
+ * different cpu's.
*/
struct sun4m_intreg_percpu {
unsigned int tbt; /* Interrupts still pending for this cpu. */
unsigned char space[PAGE_SIZE - 12];
};
+/*
+ * djhr
+ * Actually the clear and set fields in this struct are misleading..
+ * according to the SLAVIO manual (and the same applies for the SEC)
+ * the clear field clears bits in the mask which will ENABLE that IRQ
+ * the set field sets bits in the mask to DISABLE the IRQ.
+ *
+ * Also the undirected_xx address in the SLAVIO is defined as
+ * RESERVED and write only..
+ *
+ * DAVEM_NOTE: The SLAVIO only specifies behavior on uniprocessor
+ * sun4m machines, for MP the layout makes more sense.
+ */
struct sun4m_intregs {
struct sun4m_intreg_percpu cpu_intregs[NCPUS];
unsigned int tbt; /* IRQ's that are still pending. */
extern struct sun4m_intregs *sun4m_interrupts;
-/* Bit field defines for the interrupt registers on various
+/*
+ * Bit field defines for the interrupt registers on various
* Sparc machines.
*/
#define SUN4C_INT_E4 0x04 /* Enable level 4 IRQ. */
#define SUN4C_INT_E1 0x02 /* Enable level 1 IRQ. */
-/* The sun4m interrupt registers. MUST RESEARCH THESE SOME MORE XXX */
-#define SUN4M_INT_ENABLE 0x80000000
-#define SUN4M_INT_E14 0x00000080
-#define SUN4M_INT_E10 0x00080000
-
-/* XXX add cross-cpu ipi functions XXX */
+/* Dave Redman (djhr@tadpole.co.uk)
+ * The sun4m interrupt registers.
+ */
+#define SUN4M_INT_ENABLE 0x80000000
+#define SUN4M_INT_E14 0x00000080
+#define SUN4M_INT_E10 0x00080000
+
+#define SUN4M_HARD_INT(x) (0x000000001 << (x))
+#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
+
+#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
+#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
+#define SUN4M_INT_M2S_WRITE 0x20000000 /* write buffer error */
+#define SUN4M_INT_ECC 0x10000000 /* ecc memory error */
+#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
+#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
+#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
+#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
+#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
+#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
+#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
+#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
+#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
+#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
+
+#define SUN4M_INT_SBUS(x) (1 << (x+7))
#endif
-/* $Id: mbus.h,v 1.5 1995/11/25 02:32:00 davem Exp $
+/* $Id: mbus.h,v 1.6 1996/04/16 09:34:31 zaitcev Exp $
* mbus.h: Various defines for MBUS modules.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
extern unsigned int hwbug_bitmask;
/* First the module type values. To find out which you have, just load
- * the mmu control register from ASI_M_MMUREG alternate address space and
+ * the mmu control register from ASI_M_MMUREG alternate adress space and
* shift the value right 28 bits.
*/
/* IMPL field means the company which produced the chip. */
/* The CPU ID is encoded in the trap base register, 20 bits to the left of
* bit zero, with 2 bits being significant.
*/
-#define TBR_ID_SHIFT 0x20
+#define TBR_ID_SHIFT 20
extern inline int get_cpuid(void)
{
#define SUN4C_SYNC_SIZE 0x0002 /* bad access size? whuz this? */
#define SUN4C_SYNC_PARITY 0x0008 /* bad ram chips caused a parity error */
#define SUN4C_SYNC_SBUS 0x0010 /* the SBUS had some problems... */
-#define SUN4C_SYNC_NOMEM 0x0020 /* translation to non-existent ram */
+#define SUN4C_SYNC_NOMEM 0x0020 /* translation to non-existant ram */
#define SUN4C_SYNC_PROT 0x0040 /* access violated pte protections */
#define SUN4C_SYNC_NPRESENT 0x0080 /* pte said that page was not present */
#define SUN4C_SYNC_BADWRITE 0x8000 /* while writing something went bogus */
#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
#define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */
#define MAP_LOCKED 0x100 /* lock the mapping */
-#define _MAP_NEW 0x80000000 /* Binary compatibility is fun... */
+#define _MAP_NEW 0x80000000 /* Binary compatability is fun... */
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
/* Control register values. */
#define MSTK_CREG_WRITE 0x80 /* Must set this before placing values. */
#define MSTK_CREG_READ 0x40 /* Stop the clock, I want to fetch values. */
-#define MSTK_CREG_SIGN 0x20 /* Grrr... what's this??? */
+#define MSTK_CREG_SIGN 0x20 /* Grrr... whats this??? */
#define MSTK_YR_ZERO 1968 /* If year reg has zero, it is 1968 */
#define MSTK_CVT_YEAR(yr) ((yr) + MSTK_YR_ZERO)
-/* $Id: mp.h,v 1.2 1995/11/25 02:32:06 davem Exp $
+/* $Id: mp.h,v 1.3 1996/03/25 20:21:09 davem Exp $
* mp.h: Multiprocessing definitions for the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#include <asm/page.h>
#include <asm/vaddrs.h>
-extern int linux_smp_still_initting;
-
struct sparc_percpu {
- struct tt_entry trap_table[NUM_SPARC_TRAPS]; /* One page */
- unsigned int kernel_stack[PAGE_SIZE/4]; /* One page */
+ struct tt_entry *trap_table;
+ char *kernel_stack[PAGE_SIZE<<1];
int cpuid; /* Who am I? */
int cpu_is_alive; /* Linux has fired it up. */
int cpu_is_idling; /* Is sitting in the idle loop. */
*/
#define MAILBOX_BPT_SPIN 0xfd
-/* Oh geese, some other nitwit got a damn watchdog reset. The party's
+/* Oh geese, some other nitwit got a damn watchdog reset. The partys
* over so go call prom_stopcpu().
*/
#define MAILBOX_WDOG_STOP 0xfe
--- /dev/null
+/* $Id: msi.h,v 1.1 1996/04/20 10:14:32 davem Exp $
+ * msi.h: Defines specific to the MBus - Sbus - Interface.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@pool.informatik.rwth-aachen.de)
+ */
+
+#ifndef _SPARC_MSI_H
+#define _SPARC_MSI_H
+
+/*
+ * Locations of MSI Registers.
+ */
+#define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */
+
+/*
+ * Useful bits in the MSI Registers.
+ */
+#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */
+
+
+extern inline void msi_set_sync(void)
+{
+ __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
+ "andn %%g3, %2, %%g3\n\t"
+ "sta %%g3, [%0] %1\n\t" : :
+ "r" (MSI_MBUS_ARBEN),
+ "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
+}
+
+#endif /* !(_SPARC_MSI_H) */
-/* $Id: mxcc.h,v 1.2 1995/11/25 02:32:11 davem Exp $
+/* $Id: mxcc.h,v 1.3 1996/04/20 10:15:44 davem Exp $
* mxcc.h: Definitions of the Viking MXCC registers
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#define MXCC_EREG 0x1C00E00 /* Error code register */
#define MXCC_PREG 0x1C00F04 /* Address port register */
+/* Some MXCC constants. */
+#define MXCC_STREAM_SIZE 0x20 /* Size in bytes of one stream r/w */
+
/* The MXCC Control Register:
*
* ----------------------------------------------------------------------
* 31 30 29 28 27 26 25 24-15 14-7 6 5-3 2-0
*
* ME: Multiple Errors have occurred
- * CE: Cache consistency Error
+ * CE: Cache consistancy Error
* PEW: Parity Error during a Write operation
* PEE: Parity Error involving the External cache
* ASE: ASynchronous Error
* MID: The moduleID of the cpu your read this from.
*/
+extern inline void mxcc_set_stream_src(unsigned long *paddr)
+{
+ unsigned long data0 = paddr[0];
+ unsigned long data1 = paddr[1];
+
+ __asm__ __volatile__ ("or %%g0, %0, %%g2\n\t"
+ "or %%g0, %1, %%g3\n\t"
+ "stda %%g2, [%2] %3\n\t" : :
+ "r" (data0), "r" (data1),
+ "r" (MXCC_SRCSTREAM),
+ "i" (ASI_M_MXCC) : "g2", "g3");
+}
+
+extern inline void mxcc_set_stream_dst(unsigned long *paddr)
+{
+ unsigned long data0 = paddr[0];
+ unsigned long data1 = paddr[1];
+
+ __asm__ __volatile__ ("or %%g0, %0, %%g2\n\t"
+ "or %%g0, %1, %%g3\n\t"
+ "stda %%g2, [%2] %3\n\t" : :
+ "r" (data0), "r" (data1),
+ "r" (MXCC_DESSTREAM),
+ "i" (ASI_M_MXCC) : "g2", "g3");
+}
+
#endif /* !(_SPARC_MXCC_H) */
-/* $Id: oplib.h,v 1.6 1996/01/01 02:47:19 davem Exp $
+/* $Id: oplib.h,v 1.7 1996/04/04 16:31:25 tridge Exp $
* oplib.h: Describes the interface and available routines in the
* Linux Prom library.
*
/* Enumeration to describe the prom major version we have detected. */
enum prom_major_version {
- PROM_V0, /* Original sun4c V0 prom */
+ PROM_V0, /* Origional sun4c V0 prom */
PROM_V2, /* sun4c and early sun4m V2 prom */
PROM_V3, /* sun4m and later, up to sun4d/sun4e machines V3 */
PROM_P1275, /* IEEE compliant ISA based Sun PROM, only sun4u */
+ PROM_AP1000, /* actually no prom at all */
};
extern enum prom_major_version prom_vers;
* entries. One for the total amount of physical ram on the machine, one
* for the amount of physical ram available, and one describing the virtual
* areas which are allocated by the prom. So, in a sense the physical
- * available is a calculation of the total physical minus the physical mapped
+ * available is a calculation of the total physical minus the physcial mapped
* by the prom with virtual mappings.
*
* These lists are returned pre-sorted, this should make your life easier
-/* $Id: page.h,v 1.26 1996/01/03 03:53:07 davem Exp $
+/* $Id: page.h,v 1.27 1996/04/18 01:33:42 davem Exp $
* page.h: Various defines and such for MMU operations on the Sparc for
* the Linux kernel.
*
extern struct cache_palias *sparc_aliases;
-#define STRICT_MM_TYPECHECKS
+/* passing structs on the Sparc slow us down tremendously... */
+
+/* #define STRICT_MM_TYPECHECKS */
#ifdef STRICT_MM_TYPECHECKS
/*
/* $Id: pconf.h,v 1.2 1995/11/25 02:32:20 davem Exp $
* pconf.h: pathconf() and fpathconf() defines for SunOS
- * system call compatibility.
+ * system call compatability.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#define _PCONF_LINK 1 /* Max number of links to an object */
#define _PCONF_CANON 2 /* TTY input buffer line size */
-#define _PCONF_INPUT 3 /* Biggest packet a tty can imbibe at once */
+#define _PCONF_INPUT 3 /* Biggest packet a tty can inbibe at once */
#define _PCONF_NAME 4 /* Filename length max */
#define _PCONF_PATH 5 /* Max size of a pathname */
#define _PCONF_PIPE 6 /* Buffer size for a pipe */
-/* $Id: pgtable.h,v 1.35 1996/02/21 17:57:30 miguel Exp $ */
+/* $Id: pgtable.h,v 1.45 1996/04/18 03:29:21 davem Exp $ */
#ifndef _SPARC_PGTABLE_H
#define _SPARC_PGTABLE_H
#include <asm/sbus.h>
extern void load_mmu(void);
+extern int io_remap_page_range(unsigned long from, unsigned long to,
+ unsigned long size, pgprot_t prot, int space);
extern void (*quick_kernel_fault)(unsigned long);
extern void (*mmu_exit_hook)(void);
extern void (*mmu_flush_hook)(void);
+/* translate between physical and virtual addresses */
+extern unsigned long (*mmu_v2p)(unsigned long);
+extern unsigned long (*mmu_p2v)(unsigned long);
+
/* Routines for data transfer buffers. */
extern char *(*mmu_lockarea)(char *, unsigned long);
extern void (*mmu_unlockarea)(char *, unsigned long);
/* Routines for getting a dvma scsi buffer. */
-extern char *(*mmu_get_scsi_buffer)(char *, unsigned long, struct linux_sbus *sbus);
-extern void (*mmu_release_scsi_buffer)(char *, unsigned long, struct linux_sbus *sbus);
+struct mmu_sglist {
+ /* ick, I know... */
+ char *addr;
+ char *alt_addr;
+ unsigned int len;
+};
+extern char *(*mmu_get_scsi_one)(char *, unsigned long, struct linux_sbus *sbus);
+extern void (*mmu_get_scsi_sgl)(struct mmu_sglist *, int, struct linux_sbus *sbus);
+extern void (*mmu_release_scsi_one)(char *, unsigned long, struct linux_sbus *sbus);
+extern void (*mmu_release_scsi_sgl)(struct mmu_sglist *, int, struct linux_sbus *sbus);
extern unsigned int pmd_shift;
extern unsigned int pmd_size;
#define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page()
-#define ZERO_PAGE (&empty_zero_page)
+#define ZERO_PAGE ((unsigned long)(&(empty_zero_page)))
/* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8*sizeof(unsigned long))
extern int (*pte_none)(pte_t);
extern int (*pte_present)(pte_t);
-extern int (*pte_inuse)(pte_t *);
extern void (*pte_clear)(pte_t *);
-extern void (*pte_reuse)(pte_t *);
extern int (*pmd_none)(pmd_t);
extern int (*pmd_bad)(pmd_t);
extern int (*pmd_present)(pmd_t);
-extern int (*pmd_inuse)(pmd_t *);
extern void (*pmd_clear)(pmd_t *);
-extern void (*pmd_reuse)(pmd_t *);
extern int (*pgd_none)(pgd_t);
extern int (*pgd_bad)(pgd_t);
extern int (*pgd_present)(pgd_t);
-extern int (*pgd_inuse)(pgd_t *);
extern void (*pgd_clear)(pgd_t *);
-extern void (*pgd_reuse)(pgd_t *);
/*
* The following only work if pte_present() is true.
* and a page entry and page directory to the page they refer to.
*/
extern pte_t (*mk_pte)(unsigned long, pgprot_t);
-extern pte_t (*mk_pte_io)(unsigned long, pgprot_t);
+extern pte_t (*mk_pte_io)(unsigned long, pgprot_t, int);
extern void (*pgd_set)(pgd_t *, pmd_t *);
extern pgd_t * (*pgd_alloc)(void);
-/* Fine grained invalidation. */
-extern void (*invalidate_all)(void);
-extern void (*invalidate_mm)(struct mm_struct *);
-extern void (*invalidate_range)(struct mm_struct *, unsigned long start, unsigned long end);
-extern void (*invalidate_page)(struct vm_area_struct *, unsigned long address);
+/* Fine grained cache/tlb flushing. */
+
+#ifdef __SMP__
+extern void (*local_flush_cache_all)(void);
+extern void (*local_flush_cache_mm)(struct mm_struct *);
+extern void (*local_flush_cache_range)(struct mm_struct *, unsigned long start,
+ unsigned long end);
+extern void (*local_flush_cache_page)(struct vm_area_struct *, unsigned long address);
+
+extern void (*local_flush_tlb_all)(void);
+extern void (*local_flush_tlb_mm)(struct mm_struct *);
+extern void (*local_flush_tlb_range)(struct mm_struct *, unsigned long start,
+ unsigned long end);
+extern void (*local_flush_tlb_page)(struct vm_area_struct *, unsigned long address);
+
+extern void (*local_flush_page_to_ram)(unsigned long address);
+
+extern void smp_flush_cache_all(void);
+extern void smp_flush_cache_mm(struct mm_struct *mm);
+extern void smp_flush_cache_range(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
+extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
+
+extern void smp_flush_tlb_all(void);
+extern void smp_flush_tlb_mm(struct mm_struct *mm);
+extern void smp_flush_tlb_range(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
+extern void smp_flush_tlb_page(struct vm_area_struct *mm, unsigned long page);
+extern void smp_flush_page_to_ram(unsigned long page);
+#endif
+
+extern void (*flush_cache_all)(void);
+extern void (*flush_cache_mm)(struct mm_struct *);
+extern void (*flush_cache_range)(struct mm_struct *, unsigned long start,
+ unsigned long end);
+extern void (*flush_cache_page)(struct vm_area_struct *, unsigned long address);
+
+extern void (*flush_tlb_all)(void);
+extern void (*flush_tlb_mm)(struct mm_struct *);
+extern void (*flush_tlb_range)(struct mm_struct *, unsigned long start, unsigned long end);
+extern void (*flush_tlb_page)(struct vm_area_struct *, unsigned long address);
+
+extern void (*flush_page_to_ram)(unsigned long page);
/* The permissions for pgprot_val to make a page mapped on the obio space */
extern unsigned int pg_iobits;
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
+
+#if 0 /* XXX try this soon XXX */
+extern void (*set_pte)(struct vm_area_struct *vma, unsigned long address,
+ pte_t *pteptr, pte_t pteval);
+#else
extern void (*set_pte)(pte_t *pteptr, pte_t pteval);
+#endif
extern char *(*mmu_info)(void);
extern int invalid_segment;
#define SWP_TYPE(entry) (((entry)>>2) & 0x7f)
-#define SWP_OFFSET(entry) ((entry) >> 9)
+#define SWP_OFFSET(entry) (((entry) >> 9) & 0x7ffff)
#define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9))
struct ctx_list {
struct ctx_list *next;
struct ctx_list *prev;
- unsigned char ctx_number;
+ unsigned int ctx_number;
struct mm_struct *ctx_mm;
};
-/* $Id: pgtsrmmu.h,v 1.13 1996/03/01 07:20:54 davem Exp $
+/* $Id: pgtsrmmu.h,v 1.16 1996/04/04 16:31:32 tridge Exp $
* pgtsrmmu.h: SRMMU page table defines and code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#ifndef _SPARC_PGTSRMMU_H
#define _SPARC_PGTSRMMU_H
+#include <linux/config.h>
#include <asm/page.h>
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#define SRMMU_PMD_TABLE_SIZE 0x100 /* 64 entries, 4 bytes a piece */
#define SRMMU_PGD_TABLE_SIZE 0x400 /* 256 entries, 4 bytes a piece */
-#define SRMMU_VMALLOC_START (0xfe100000)
+#define SRMMU_VMALLOC_START (0xfe200000)
/* Definition of the values in the ET field of PTD's and PTE's */
#define SRMMU_ET_MASK 0x3
extern inline void srmmu_set_ctable_ptr(unsigned long paddr)
{
paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
+#if CONFIG_AP1000
+ /* weird memory system on the AP1000 */
+ paddr |= (0x8<<28);
+#endif
__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
"r" (paddr), "r" (SRMMU_CTXTBL_PTR),
"i" (ASI_M_MMUREGS) :
return retval;
}
-/* This is guaranteed on all SRMMU's. */
+/* This is guarenteed on all SRMMU's. */
extern inline void srmmu_flush_whole_tlb(void)
{
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
{
unsigned long retval;
+ vaddr &= PAGE_MASK;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
"=r" (retval) :
"r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
-/* $Id: pgtsun4c.h,v 1.22 1996/01/24 02:33:45 davem Exp $
+/* $Id: pgtsun4c.h,v 1.24 1996/03/26 06:51:56 miguel Exp $
* pgtsun4c.h: Sun4c specific pgtable.h defines and code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* translations at KERNBASE + 128MB for 1MB, then we begin the VMALLOC
* area, makes sense. This works out to the value below.
*/
-#define SUN4C_VMALLOC_START (0xfe100000)
+#define SUN4C_VMALLOC_START (0xfe200000)
/*
* Sparc SUN4C pte fields.
_SUN4C_PAGE_PRIV | _SUN4C_PAGE_DIRTY | \
_SUN4C_PAGE_REF | _SUN4C_PAGE_NOCACHE)
-extern char *sun4c_lockarea(char *vaddr, unsigned long size);
-extern void sun4c_unlockarea(char *vaddr, unsigned long size);
-
extern __inline__ unsigned long sun4c_get_synchronous_error(void)
{
unsigned long sync_err;
--- /dev/null
+#ifndef __ARCH_SPARC_POSIX_TYPES_H
+#define __ARCH_SPARC_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+/* When cross-compilation is no longer an issue, fix this. */
+#ifdef __svr4__
+typedef unsigned int __kernel_size_t; /* solaris sucks */
+#else
+typedef long unsigned int __kernel_size_t; /* sunos is much better */
+#endif /* !(__svr4__) */
+
+typedef int __kernel_ssize_t;
+typedef long int __kernel_ptrdiff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_pid_t;
+typedef unsigned short __kernel_uid_t;
+typedef unsigned short __kernel_gid_t;
+typedef unsigned short __kernel_dev_t;
+typedef unsigned long __kernel_ino_t;
+typedef unsigned short __kernel_mode_t;
+typedef unsigned short __kernel_umode_t;
+typedef short __kernel_nlink_t;
+typedef long __kernel_daddr_t;
+typedef long __kernel_off_t;
+typedef char * __kernel_caddr_t;
+
+#ifdef __GNUC__
+typedef long long __kernel_loff_t;
+#endif
+
+#undef __FD_SET
+static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
+}
+
+#undef __FD_CLR
+static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
+}
+
+#undef __FD_ISSET
+static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
+}
+
+/*
+ * This will unroll the loop for the normal constant cases (4 or 8 longs,
+ * for 256 and 512-bit fd_sets respectively)
+ */
+#undef __FD_ZERO
+static __inline__ void __FD_ZERO(__kernel_fd_set *p)
+{
+ unsigned int *tmp = p->fds_bits;
+ int i;
+
+ if (__builtin_constant_p(__FDSET_INTS)) {
+ switch (__FDSET_INTS) {
+ case 8:
+ tmp[0] = 0; tmp[1] = 0; tmp[2] = 0; tmp[3] = 0;
+ tmp[4] = 0; tmp[5] = 0; tmp[6] = 0; tmp[7] = 0;
+ return;
+ case 4:
+ tmp[0] = 0; tmp[1] = 0; tmp[2] = 0; tmp[3] = 0;
+ return;
+ }
+ }
+ i = __FDSET_INTS;
+ while (i) {
+ i--;
+ *tmp = 0;
+ tmp++;
+ }
+}
+
+#endif /* !(__ARCH_SPARC_POSIX_TYPES_H) */
-/* $Id: processor.h,v 1.40 1996/02/03 10:06:01 davem Exp $
+/* $Id: processor.h,v 1.43 1996/03/23 02:40:05 davem Exp $
* include/asm-sparc/processor.h
*
* Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
#define MCA_bus__is_a_macro /* for versions in ksyms.c */
/*
- * Write Protection works right in supervisor mode on the Sparc...
- * And then there came the Swift module, which isn't so swift...
+ * The sparc has no problems with write protection
*/
-extern char wp_works_ok;
+#define wp_works_ok 1
+#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
/* Whee, this is STACK_TOP and the lowest kernel address too... */
#define TASK_SIZE (KERNBASE)
regs->pc = ((pc & (~3)) - 4);
regs->npc = regs->pc + 4;
regs->psr = saved_psr;
- regs->u_regs[UREG_G1] = sp; /* Base of arg/env stack area */
- regs->u_regs[UREG_G2] = regs->u_regs[UREG_G7] = regs->npc;
regs->u_regs[UREG_FP] = (sp - REGWIN_SZ);
}
+#ifdef __KERNEL__
extern unsigned long (*alloc_kernel_stack)(struct task_struct *tsk);
extern void (*free_kernel_stack)(unsigned long stack);
extern struct task_struct *(*alloc_task_struct)(void);
extern void (*free_task_struct)(struct task_struct *tsk);
+#endif
#endif /* __ASM_SPARC_PROCESSOR_H */
-/* $Id: ross.h,v 1.4 1996/01/03 03:53:20 davem Exp $
+/* $Id: ross.h,v 1.9 1996/04/08 08:34:21 davem Exp $
* ross.h: Ross module specific definitions and defines.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#define HYPERSPARC_NFAULT 0x00000002
#define HYPERSPARC_MENABLE 0x00000001
-/* Flushes which clear out only the on-chip Ross HyperSparc ICACHE. */
-extern inline void hyper_flush_i_page(unsigned int addr)
-{
- __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
- "r" (addr), "i" (ASI_M_IFLUSH_PAGE) :
- "memory");
- return;
-}
-
-extern inline void hyper_flush_i_seg(unsigned int addr)
-{
- __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
- "r" (addr), "i" (ASI_M_IFLUSH_SEG) :
- "memory");
- return;
-}
-
-extern inline void hyper_flush_i_region(unsigned int addr)
-{
- __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
- "r" (addr), "i" (ASI_M_IFLUSH_REGION) :
- "memory");
- return;
-}
-
-extern inline void hyper_flush_i_ctx(unsigned int addr)
-{
- __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
- "r" (addr), "i" (ASI_M_IFLUSH_CTX) :
- "memory");
- return;
-}
-
-extern inline void hyper_flush_i_user(unsigned int addr)
-{
- __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
- "r" (addr), "i" (ASI_M_IFLUSH_USER) :
- "memory");
- return;
-}
-
-/* Finally, flush the entire ICACHE. */
-extern inline void hyper_flush_whole_icache(void)
-{
- __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
- "i" (ASI_M_FLUSH_IWHOLE));
- return;
-}
-
/* The ICCR instruction cache register on the HyperSparc.
*
* -----------------------------------------------
- * | | FTD | IDC |
+ * | | FTD | ICE |
* -----------------------------------------------
* 31 1 0
*
*
* All other bits are read as zeros, and writes to them have no
* effect.
+ *
+ * Wheee, not many assemblers understand the %iccr register nor
+ * the generic asr r/w instructions.
+ *
+ * 1000 0011 0100 0111 1100 0000 0000 0000 ! rd %iccr, %g1
+ *
+ * 0x 8 3 4 7 c 0 0 0 ! 0x8347c000
+ *
+ * 1011 1111 1000 0000 0110 0000 0000 0000 ! wr %g1, 0x0, %iccr
+ *
+ * 0x b f 8 0 6 0 0 0 ! 0xbf806000
+ *
*/
+#define HYPERSPARC_ICCR_FTD 0x00000002
+#define HYPERSPARC_ICCR_ICE 0x00000001
+
extern inline unsigned int get_ross_icr(void)
{
unsigned int icreg;
- __asm__ __volatile__(".word 0xbf402000\n\t" : /* rd %iccr, %g1 */
+ __asm__ __volatile__(".word 0x8347c000\n\t" /* rd %iccr, %g1 */
+ "mov %%g1, %0\n\t" :
"=r" (icreg) : :
"g1", "memory");
extern inline void put_ross_icr(unsigned int icreg)
{
__asm__ __volatile__("or %%g0, %0, %%g1\n\t"
- ".word 0xbf802000\n\t" /* wr %g1, 0x0, %iccr */
+ ".word 0xbf806000\n\t" /* wr %g1, 0x0, %iccr */
"nop\n\t"
"nop\n\t"
"nop\n\t" : :
/* HyperSparc specific cache flushing. */
+/* This is for the on-chip instruction cache. */
+extern inline void hyper_flush_whole_icache(void)
+{
+ __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
+ "i" (ASI_M_FLUSH_IWHOLE));
+ return;
+}
+
extern int hyper_cache_size;
+extern int hyper_line_size;
-extern inline void hyper_flush_all_combined(void)
+extern inline void hyper_clear_all_tags(void)
{
unsigned long addr;
- for(addr = 0; addr < hyper_cache_size; addr += 32)
- __asm__ __volatile__("sta %%g0, [%0] 0xe\n\t" : :
- "r" (addr));
+ for(addr = 0; addr < hyper_cache_size; addr += hyper_line_size)
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (addr), "i" (ASI_M_DATAC_TAG));
}
+extern inline void hyper_flush_unconditional_combined(void)
+{
+ unsigned long addr;
+ for(addr = 0; addr < hyper_cache_size; addr += hyper_line_size)
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (addr), "i" (ASI_M_FLUSH_CTX));
+}
+
+extern inline void hyper_flush_cache_user(void)
+{
+ unsigned long addr;
+
+ for(addr = 0; addr < hyper_cache_size; addr += hyper_line_size)
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (addr), "i" (ASI_M_FLUSH_USER));
+}
+
+extern inline void hyper_flush_cache_page(unsigned long page)
+{
+ unsigned long end;
+
+ page &= PAGE_MASK;
+ end = page + PAGE_SIZE;
+ while(page < end) {
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (page), "i" (ASI_M_FLUSH_PAGE));
+ page += hyper_line_size;
+ }
+}
#endif /* !(_SPARC_ROSS_H) */
-/* $Id: segment.h,v 1.9 1996/01/14 00:05:33 davem Exp $ */
+/* $Id: segment.h,v 1.10 1996/03/08 01:19:38 miguel Exp $ */
#ifndef _ASM_SEGMENT_H
#define _ASM_SEGMENT_H
+#ifdef __KERNEL__
#include <asm/vac-ops.h>
+#endif
#ifndef __ASSEMBLY__
-/* $Id: signal.h,v 1.17 1996/03/01 07:21:02 davem Exp $ */
+/* $Id: signal.h,v 1.20 1996/03/24 20:21:27 davem Exp $ */
#ifndef _ASMSPARC_SIGNAL_H
#define _ASMSPARC_SIGNAL_H
* irq handling routines.
*
* SA_INTERRUPT is also used by the irq handling routines.
+ *
+ * DJHR
+ * SA_STATIC_ALLOC is used for the SPARC system to indicate that this
+ * interupt handler's irq structure should be statically allocated
+ * by the request_irq routine.
+ * The alternative is that arch/sparc/kernel/irq.c has carnal knowledge
+ * of interrupt usage and that sucks. Also without a flag like this
+ * it may be possible for the free_irq routine to attempt to free
+ * statically allocated data.. which is NOT GOOD.
+ *
*/
#define SA_PROBE SA_ONESHOT
#define SA_SAMPLE_RANDOM SA_RESTART
+#define SA_STATIC_ALLOC 0x80
#endif
/* Type of a signal handler. */
+#ifdef __KERNEL__
typedef void (*__sighandler_t)(int, int, struct sigcontext_struct *, char *);
+#else
+typedef void (*__sighandler_t)(int);
+#endif
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
#ifndef _SPARC_SMP_H
#define _SPARC_SMP_H
-#include <asm/bitops.h>
-#include <asm/ptrace.h>
+#ifdef __SMP__
-/* Per processor Sparc parameters. */
+#ifndef __ASSEMBLY__
+
+/* PROM provided per-processor information we need
+ * to start them all up.
+ */
+
+struct prom_cpuinfo {
+ int prom_node;
+ int mid;
+};
+
+extern struct prom_cpuinfo linux_cpus[NCPUS];
+
+/* Per processor Sparc parameters we need. */
struct cpuinfo_sparc {
- unsigned char impl;
- unsigned char vers;
- unsigned long udelay_val;
+ unsigned long udelay_val; /* thats it */
};
extern struct cpuinfo_sparc cpu_data[NR_CPUS];
-typedef klock_t volatile unsigned char;
-extern klock_t kernel_lock;
+typedef volatile unsigned char klock_t;
+extern klock_t kernel_flag;
#define KLOCK_HELD 0xff
#define KLOCK_CLEAR 0x00
-struct sparc_ipi_invalidate {
- struct mm_struct *mm;
- unsigned long addr; /* page for inv_pg, start for inv_rnge */
- unsigned long end; /* Used for inv_rnge only. */
-};
-
-struct sparc_ipimsg {
- union {
- /* Add more here as we need them. */
- struct sparc_ipi_invalidate invmsg;
- };
-};
-
-extern void smp_scan_prom_for_cpus(unsigned long, unsigned long);
-extern unsigned long smp_alloc_memory(unsigned long mem_base);
-extern unsigned long *kernel_stacks[NR_CPUS];
+/*
+ * Private routines/data
+ */
+
+extern int smp_found_cpus;
extern unsigned char boot_cpu_id;
extern unsigned long cpu_present_map;
-extern volatile unsigned long smp_invalidate_needed;
-extern unsigned long kernel_counter;
+extern volatile unsigned long smp_invalidate_needed[NR_CPUS];
+extern volatile unsigned long kernel_counter;
extern volatile unsigned char active_kernel_processor;
-extern void smp_message_irq(int cpl, struct pt_regs *regs);
-extern void smp_reschedule_irq(int cpl, struct pt_regs *regs);
-extern void smp_invalidate_rcv(void);
-extern volatils unsigned long syscall_count;
+extern void smp_message_irq(void);
+extern unsigned long ipi_count;
+extern volatile unsigned long kernel_counter;
+extern volatile unsigned long syscall_count;
+
+extern void print_lock_state(void);
-extern void (*smp_invalidate_all)(void);
-extern void (*smp_invalidate_mm)(struct mm_struct *);
-extern void (*smp_invalidate_range)(struct mm_struct *, unsigned long, unsigned long);
-extern void (*smp_invalidate_page)(struct vm_area_struct *, unsigned long);
+typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long);
+
+/*
+ * General functions that each host system must provide.
+ */
extern void smp_callin(void);
extern void smp_boot_cpus(void);
extern void smp_store_cpu_info(int id);
-
-extern _inline_ int smp_processor_id(void)
+extern void smp_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5);
+extern void smp_capture(void);
+extern void smp_release(void);
+
+extern inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); }
+extern inline void xc1(smpfunc_t func, unsigned long arg1)
+{ smp_cross_call(func, arg1, 0, 0, 0, 0); }
+extern inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
+{ smp_cross_call(func, arg1, arg2, 0, 0, 0); }
+extern inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3)
+{ smp_cross_call(func, arg1, arg2, arg3, 0, 0); }
+extern inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4)
+{ smp_cross_call(func, arg1, arg2, arg3, arg4, 0); }
+extern inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
+{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
+
+extern volatile int cpu_number_map[NR_CPUS];
+extern volatile int cpu_logical_map[NR_CPUS];
+
+extern __inline int smp_processor_id(void)
{
int cpuid;
__asm__ __volatile__("rd %%tbr, %0\n\t"
- "srl %0, 24, %0\n\t"
+ "srl %0, 12, %0\n\t"
"and %0, 3, %0\n\t" :
- "=&r" (cpuid) :
- "0" (cpuid));
+ "=&r" (cpuid));
return cpuid;
}
-/* Empirical PROM processor mailbox constants. If the per-cpu mailbox
+
+extern volatile unsigned long smp_proc_in_lock[NR_CPUS]; /* for computing process time */
+extern volatile int smp_process_available;
+
+extern inline int smp_swap(volatile int *addr, int value)
+{
+ __asm__ __volatile__("swap [%2], %0\n\t" :
+ "=&r" (value) :
+ "0" (value), "r" (addr));
+ return value;
+}
+
+extern inline volatile void inc_smp_counter(volatile int *ctr)
+{
+ int tmp;
+
+ while((tmp = smp_swap(ctr, -1)) == -1)
+ ;
+ smp_swap(ctr, (tmp + 1));
+}
+
+extern inline volatile void dec_smp_counter(volatile int *ctr)
+{
+ int tmp;
+
+ while((tmp = smp_swap(ctr, -1)) == -1)
+ ;
+ smp_swap(ctr, (tmp - 1));
+}
+
+extern inline volatile int read_smp_counter(volatile int *ctr)
+{
+ int value;
+
+ while((value = *ctr) == -1)
+ ;
+ return value;
+}
+
+#endif /* !(__ASSEMBLY__) */
+
+/* Sparc specific messages. */
+#define MSG_CAPTURE 0x0004 /* Park a processor. */
+#define MSG_CROSS_CALL 0x0005 /* run func on cpus */
+
+/* Imperical PROM processor mailbox constants. If the per-cpu mailbox
* contains something other than one of these then the ipi is from
* Linux's active_kernel_processor. This facility exists so that
* the boot monitor can capture all the other cpus when one catches
#define NO_PROC_ID 0xFF
-#define PROC_CHANGE_PENALTY 0x23
+#define PROC_CHANGE_PENALTY 20
+
+#define SMP_FROM_INT 1
+#define SMP_FROM_SYSCALL 2
+
+#endif /* !(__SMP__) */
#endif /* !(_SPARC_SMP_H) */
#ifndef __SPARC_SMPLOCK_H
#define __SPARC_SMPLOCK_H
-#ifdef __SMP__
+#include <asm/smp.h>
+#include <asm/bitops.h>
+#include <asm/atops.h>
+#include <asm/pgtable.h>
-extern _inline_ unsigned char ldstub(klock_t *lock)
-{
- klock_t retval;
+#ifdef __SMP__
- __asm__ __volatile__("ldstub [%1], %0\n\t" :
- "=r" (retval) :
- "r" (lock));
- return retval;
-}
+/*
+ * Locking the kernel
+ */
/* Knock knock... */
-extern _inline_ void lock_kernel(void)
+extern __inline void lock_kernel(void)
{
unsigned long flags;
int proc = smp_processor_id();
save_flags(flags); cli(); /* need this on sparc? */
- while(ldstub(&kernel_lock)) {
+ while(ldstub(&kernel_flag)) {
if(proc == active_kernel_processor)
break;
- if(test_bit(proc, (unsigned long *)&smp_invalidate_needed))
- if(clear_bit(proc, (unsigned long *)&smp_invalidate_needed))
- local_invalidate();
+ do {
+#ifdef __SMP_PROF__
+ smp_spins[smp_processor_id()]++;
+#endif
+ barrier();
+ } while(kernel_flag); /* Don't lock the bus more than we have to. */
}
active_kernel_processor = proc;
kernel_counter++;
}
/* I want out... */
-extern _inline_ void unlock_kernel(void)
+extern __inline void unlock_kernel(void)
{
unsigned long flags;
save_flags(flags); cli(); /* need this on sparc? */
if(kernel_counter == 0)
panic("Bogus kernel counter.\n");
+
if(!--kernel_counter) {
active_kernel_processor = NO_PROC_ID;
- kernel_lock = KLOCK_CLEAR;
+ kernel_flag = KLOCK_CLEAR;
}
- restore_flag(flags);
+ restore_flags(flags);
}
#endif /* !(__SPARC_SMPLOCK_H) */
-/* $Id: socket.h,v 1.5 1995/11/26 01:32:36 davem Exp $ */
+/* $Id: socket.h,v 1.6 1996/04/04 12:51:26 davem Exp $ */
#ifndef _ASM_SOCKET_H
#define _ASM_SOCKET_H
-/* Socket-level I/O control calls. */
-#define FIOSETOWN 0x8901
-#define SIOCSPGRP 0x8902
-#define FIOGETOWN 0x8903
-#define SIOCGPGRP 0x8904
-#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp */
+#include <asm/sockios.h>
/* For setsockoptions(2) */
#define SOL_SOCKET 0xffff
--- /dev/null
+#ifndef _ASM_SPARC_SOCKIOS_H
+#define _ASM_SPARC_SOCKIOS_H
+
+/* Socket-level I/O control calls. */
+#define FIOSETOWN 0x8901
+#define SIOCSPGRP 0x8902
+#define FIOGETOWN 0x8903
+#define SIOCGPGRP 0x8904
+#define SIOCATMARK 0x8905
+#define SIOCGSTAMP 0x8906 /* Get stamp */
+
+#endif /* !(_ASM_SPARC_SOCKIOS_H) */
+
-/* $Id: solerrno.h,v 1.3 1995/11/25 02:32:51 davem Exp $
- * solerrno.h: Solaris error return codes for compatibility.
+/* $Id: solerrno.h,v 1.4 1996/03/23 02:40:09 davem Exp $
+ * solerrno.h: Solaris error return codes for compatability.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#define SOL_EPIPE 32 /* Call a plumber */
#define SOL_EDOM 33 /* Argument was out of fct domain */
#define SOL_ERANGE 34 /* Could not represent math result */
-#define SOL_ENOMSG 35 /* Message of req type doesn't exist */
+#define SOL_ENOMSG 35 /* Message of req type doesnt exist */
#define SOL_EIDRM 36 /* Identifier has been removed */
#define SOL_ECHRNG 37 /* Req channel number out of range */
#define SOL_EL2NSYNC 38 /* Could not sync at run level 2 */
#define SOL_ETOOMANYREFS 144 /* Reference limit exceeded */
#define SOL_ETIMEDOUT 145 /* Timed out connection */
#define SOL_ECONNREFUSED 146 /* Connection refused by remote host*/
-#define SOL_EHOSTDOWN 147 /* Remote host in up in flames */
+#define SOL_EHOSTDOWN 147 /* Remote host is up in flames */
#define SOL_EHOSTUNREACH 148 /* Make a left at Easton Ave..... */
#define SOL_EWOULDBLOCK EAGAIN /* Just an alias */
#define SOL_EALREADY 149 /* Operation is already occurring */
-/* $Id: string.h,v 1.17 1995/12/10 06:25:48 davem Exp $
+/* $Id: string.h,v 1.19 1996/03/23 02:40:10 davem Exp $
* string.h: External definitions for optimized assembly string
* routines for the Linux Kernel.
*
#define __HAVE_ARCH_BCOPY
#define __HAVE_ARCH_MEMMOVE
#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_STRLEN
#endif /* !(__SPARC_STRING_H__) */
/* swift.h: Specific definitions for the _broken_ Swift SRMMU
- * MMU.
+ * MMU module.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
"r" (addr), "i" (ASI_M_DATAC_TAG));
}
+extern inline void swift_flush_dcache(void)
+{
+ unsigned long addr;
+
+ for(addr = 0; addr < (PAGE_SIZE << 2); addr += 16)
+ swift_inv_data_tag(addr);
+}
+
+extern inline void swift_flush_icache(void)
+{
+ unsigned long addr;
+
+ for(addr = 0; addr < (PAGE_SIZE << 2); addr += 16)
+ swift_inv_insn_tag(addr);
+}
+
+extern inline void swift_idflash_clear(void)
+{
+ unsigned long addr;
+
+ for(addr = 0; addr < (PAGE_SIZE << 2); addr += 16) {
+ swift_inv_insn_tag(addr);
+ swift_inv_data_tag(addr);
+ }
+}
+
+/* Swift is so broken, it isn't even safe to use the following. */
extern inline void swift_flush_page(unsigned long page)
{
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
-/* $Id: system.h,v 1.24 1996/02/11 00:42:39 davem Exp $ */
+/* $Id: system.h,v 1.29 1996/04/03 02:17:52 davem Exp $ */
#ifndef __SPARC_SYSTEM_H
#define __SPARC_SYSTEM_H
#include <linux/kernel.h>
#include <asm/segment.h>
+
+#ifdef __KERNEL__
#include <asm/page.h>
#include <asm/oplib.h>
#include <asm/psr.h>
+#endif
#define EMPTY_PGT (&empty_bad_page)
#define EMPTY_PGE (&empty_bad_page_table)
extern void flush_user_windows(void);
extern void synchronize_user_stack(void);
extern void sparc_switch_to(void *new_task);
-#define switch_to(p) do { \
+#ifndef __SMP__
+#define switch_to(prev, next) do { \
+ flush_user_windows(); \
+ switch_to_context(next); \
+ prev->tss.current_ds = active_ds; \
+ active_ds = next->tss.current_ds; \
+ if(last_task_used_math != next) \
+ next->tss.kregs->psr &= ~PSR_EF; \
+ sparc_switch_to(next); \
+ } while(0)
+#else
+
+extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
+ void *fpqueue, unsigned long *fpqdepth);
+
+#define switch_to(prev, next) do { \
+ cli(); \
+ if(prev->flags & PF_USEDFPU) { \
+ fpsave(&prev->tss.float_regs[0], &prev->tss.fsr, \
+ &prev->tss.fpqueue[0], &prev->tss.fpqdepth); \
+ prev->flags &= ~PF_USEDFPU; \
+ prev->tss.kregs->psr &= ~PSR_EF; \
+ } \
+ prev->lock_depth = syscall_count; \
+ kernel_counter += (next->lock_depth - prev->lock_depth); \
+ syscall_count = next->lock_depth; \
flush_user_windows(); \
- switch_to_context(p); \
- current->tss.current_ds = active_ds; \
- active_ds = p->tss.current_ds; \
- sparc_switch_to(p); \
+ switch_to_context(next); \
+ prev->tss.current_ds = active_ds; \
+ active_ds = next->tss.current_ds; \
+ sparc_switch_to(next); \
+ sti(); \
} while(0)
+#endif
/* Changing the IRQ level on the Sparc. */
extern inline void setipl(int __new_ipl)
--- /dev/null
+#ifndef _SPARC_TERMBITS_H
+#define _SPARC_TERMBITS_H
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned long tcflag_t;
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+#define NCCS 17
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VEOL 5
+#define VEOL2 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VDSUSP 11 /* SunOS POSIX nicety I do believe... */
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VMIN VEOF
+#define VTIME VEOL
+
+/* c_iflag bits */
+#define IGNBRK 0x00000001
+#define BRKINT 0x00000002
+#define IGNPAR 0x00000004
+#define PARMRK 0x00000008
+#define INPCK 0x00000010
+#define ISTRIP 0x00000020
+#define INLCR 0x00000040
+#define IGNCR 0x00000080
+#define ICRNL 0x00000100
+#define IUCLC 0x00000200
+#define IXON 0x00000400
+#define IXANY 0x00000800
+#define IXOFF 0x00001000
+#define IMAXBEL 0x00002000
+
+/* c_oflag bits */
+#define OPOST 0x00000001
+#define OLCUC 0x00000002
+#define ONLCR 0x00000004
+#define OCRNL 0x00000008
+#define ONOCR 0x00000010
+#define ONLRET 0x00000020
+#define OFILL 0x00000040
+#define OFDEL 0x00000080
+#define NLDLY 0x00000100
+#define NL0 0x00000000
+#define NL1 0x00000100
+#define CRDLY 0x00000600
+#define CR0 0x00000000
+#define CR1 0x00000200
+#define CR2 0x00000400
+#define CR3 0x00000600
+#define TABDLY 0x00001800
+#define TAB0 0x00000000
+#define TAB1 0x00000800
+#define TAB2 0x00001000
+#define TAB3 0x00001800
+#define XTABS 0x00001800
+#define BSDLY 0x00002000
+#define BS0 0x00000000
+#define BS1 0x00002000
+#define VTDLY 0x00004000
+#define VT0 0x00000000
+#define VT1 0x00004000
+#define FFDLY 0x00008000
+#define FF0 0x00000000
+#define FF1 0x00008000
+#define PAGEOUT 0x00010000 /* SUNOS specific */
+#define WRAP 0x00020000 /* SUNOS specific */
+
+/* c_cflag bit meaning */
+#define CBAUD 0x0000000f
+#define B0 0x00000000 /* hang up */
+#define B50 0x00000001
+#define B75 0x00000002
+#define B110 0x00000003
+#define B134 0x00000004
+#define B150 0x00000005
+#define B200 0x00000006
+#define B300 0x00000007
+#define B600 0x00000008
+#define B1200 0x00000009
+#define B1800 0x0000000a
+#define B2400 0x0000000b
+#define B4800 0x0000000c
+#define B9600 0x0000000d
+#define B19200 0x0000000e
+#define B38400 0x0000000f
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0x00000030
+#define CS5 0x00000000
+#define CS6 0x00000010
+#define CS7 0x00000020
+#define CS8 0x00000030
+#define CSTOPB 0x00000040
+#define CREAD 0x00000080
+#define PARENB 0x00000100
+#define PARODD 0x00000200
+#define HUPCL 0x00000400
+#define CLOCAL 0x00000800
+/* We'll never see these speeds with the Zilogs' but for completeness... */
+#define CBAUDEX 0x00010000
+#define B57600 0x00010001
+#define B115200 0x00010002
+#define B230400 0x00010003
+#define CIBAUD 0x000f0000 /* input baud rate (not used) */
+#define CRTSCTS 0x80000000 /* flow control */
+
+/* c_lflag bits */
+#define ISIG 0x00000001
+#define ICANON 0x00000002
+#define XCASE 0x00000004
+#define ECHO 0x00000008
+#define ECHOE 0x00000010
+#define ECHOK 0x00000020
+#define ECHONL 0x00000040
+#define NOFLSH 0x00000080
+#define TOSTOP 0x00000100
+#define ECHOCTL 0x00000200
+#define ECHOPRT 0x00000400
+#define ECHOKE 0x00000800
+#define DEFECHO 0x00001000 /* SUNOS thing, what is it? */
+#define FLUSHO 0x00002000
+#define PENDIN 0x00004000
+#define IEXTEN 0x00008000
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+
+
+/* tcflow() and TCXONC use these */
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif /* !(_SPARC_TERMBITS_H) */
-/* $Id: termios.h,v 1.11 1996/02/10 04:31:03 davem Exp $ */
+/* $Id: termios.h,v 1.13 1996/04/04 12:51:30 davem Exp $ */
#ifndef _SPARC_TERMIOS_H
#define _SPARC_TERMIOS_H
-#include <linux/types.h>
-
-#include <asm/ioctl.h>
+#include <asm/ioctls.h>
+#include <asm/termbits.h>
struct sgttyb {
char sg_ispeed;
int st_columns; /* Columns on the terminal */
};
-/* Big T */
-#define TCGETA _IOR('T', 1, struct termio)
-#define TCSETA _IOW('T', 2, struct termio)
-#define TCSETAW _IOW('T', 3, struct termio)
-#define TCSETAF _IOW('T', 4, struct termio)
-#define TCSBRK _IO('T', 5)
-#define TCXONC _IO('T', 6)
-#define TCFLSH _IO('T', 7)
-#define TCGETS _IOR('T', 8, struct termios)
-#define TCSETS _IOW('T', 9, struct termios)
-#define TCSETSW _IOW('T', 10, struct termios)
-#define TCSETSF _IOW('T', 11, struct termios)
-
-/* SCARY Rutgers local SunOS kernel hackery, perhaps I will support it
- * someday. This is completely bogus, I know...
- */
-#define TCGETSTAT _IO('T', 200) /* Rutgers specific */
-#define TCSETSTAT _IO('T', 201) /* Rutgers specific */
-
-/* Little t */
-#define TIOCGETD _IOR('t', 0, int)
-#define TIOCSETD _IOW('t', 1, int)
-#define TIOCHPCL _IO('t', 2) /* SunOS Specific */
-#define TIOCMODG _IOR('t', 3, int) /* SunOS Specific */
-#define TIOCMODS _IOW('t', 4, int) /* SunOS Specific */
-#define TIOCGETP _IOR('t', 8, struct sgttyb) /* SunOS Specific */
-#define TIOCSETP _IOW('t', 9, struct sgttyb) /* SunOS Specific */
-#define TIOCSETN _IOW('t', 10, struct sgttyb) /* SunOS Specific */
-#define TIOCEXCL _IO('t', 13)
-#define TIOCNXCL _IO('t', 14)
-#define TIOCFLUSH _IOW('t', 16, int) /* SunOS Specific */
-#define TIOCSETC _IOW('t', 17, struct tchars) /* SunOS Specific */
-#define TIOCGETC _IOR('t', 18, struct tchars) /* SunOS Specific */
-#define TIOCTCNTL _IOW('t', 32, int) /* SunOS Specific */
-#define TIOCSIGNAL _IOW('t', 33, int) /* SunOS Specific */
-#define TIOCSETX _IOW('t', 34, int) /* SunOS Specific */
-#define TIOCGETX _IOR('t', 35, int) /* SunOS Specific */
-#define TIOCCONS _IO('t', 36)
-#define TIOCSSIZE _IOW('t', 37, struct sunos_ttysize) /* SunOS Specific */
-#define TIOCGSIZE _IOR('t', 38, struct sunos_ttysize) /* SunOS Specific */
-#define TIOCGSOFTCAR _IOR('t', 100, int)
-#define TIOCSSOFTCAR _IOW('t', 101, int)
-#define TIOCUCNTL _IOW('t', 102, int) /* SunOS Specific */
-#define TIOCSWINSZ _IOW('t', 103, struct winsize)
-#define TIOCGWINSZ _IOR('t', 104, struct winsize)
-#define TIOCREMOTE _IOW('t', 105, int) /* SunOS Specific */
-#define TIOCMGET _IOR('t', 106, int)
-#define TIOCMBIC _IOW('t', 107, int)
-#define TIOCMBIS _IOW('t', 108, int)
-#define TIOCMSET _IOW('t', 109, int)
-#define TIOCSTART _IO('t', 110) /* SunOS Specific */
-#define TIOCSTOP _IO('t', 111) /* SunOS Specific */
-#define TIOCPKT _IOW('t', 112, int)
-#define TIOCNOTTY _IO('t', 113)
-#define TIOCSTI _IOW('t', 114, char)
-#define TIOCOUTQ _IOR('t', 115, int)
-#define TIOCGLTC _IOR('t', 116, struct ltchars) /* SunOS Specific */
-#define TIOCSLTC _IOW('t', 117, struct ltchars) /* SunOS Specific */
-/* 118 is the non-posix setpgrp tty ioctl */
-/* 119 is the non-posix getpgrp tty ioctl */
-#define TIOCCDTR _IO('t', 120) /* SunOS Specific */
-#define TIOCSDTR _IO('t', 121) /* SunOS Specific */
-#define TIOCCBRK _IO('t', 122) /* SunOS Specific */
-#define TIOCSBRK _IO('t', 123) /* SunOS Specific */
-#define TIOCLGET _IOW('t', 124, int) /* SunOS Specific */
-#define TIOCLSET _IOW('t', 125, int) /* SunOS Specific */
-#define TIOCLBIC _IOW('t', 126, int) /* SunOS Specific */
-#define TIOCLBIS _IOW('t', 127, int) /* SunOS Specific */
-#define TIOCISPACE _IOR('t', 128, int) /* SunOS Specific */
-#define TIOCISIZE _IOR('t', 129, int) /* SunOS Specific */
-#define TIOCSPGRP _IOW('t', 130, int)
-#define TIOCGPGRP _IOR('t', 131, int)
-#define TIOCSCTTY _IO('t', 132)
-
-/* Little f */
-#define FIOCLEX _IO('f', 1)
-#define FIONCLEX _IO('f', 2)
-#define FIOASYNC _IOW('f', 125, int)
-#define FIONBIO _IOW('f', 126, int)
-#define FIONREAD _IOR('f', 127, int)
-#define TIOCINQ FIONREAD
-
-/* Linux specific, no SunOS equivalent. */
-#define TIOCLINUX 0x541C
-#define TIOCGSERIAL 0x541E
-#define TIOCSSERIAL 0x541F
-#define TCSBRKP 0x5425
-#define TIOCTTYGSTRUCT 0x5426
-#define TIOCSERCONFIG 0x5453
-#define TIOCSERGWILD 0x5454
-#define TIOCSERSWILD 0x5455
-#define TIOCGLCKTRMIOS 0x5456
-#define TIOCSLCKTRMIOS 0x5457
-#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
-#define TIOCSERGETLSR 0x5459 /* Get line status register */
-#define TIOCSERGETMULTI 0x545A /* Get multiport config */
-#define TIOCSERSETMULTI 0x545B /* Set multiport config */
-
/* Used for packet mode */
#define TIOCPKT_DATA 0
#define TIOCPKT_FLUSHREAD 1
unsigned short ws_ypixel;
};
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-#define NCCS 17
-struct termios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
-};
-
-/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VEOL 5
-#define VEOL2 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VDSUSP 11 /* SunOS POSIX nicety I do believe... */
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VMIN VEOF
-#define VTIME VEOL
+/* line disciplines */
+#define N_TTY 0
+#define N_SLIP 1
+#define N_MOUSE 2
+#define N_PPP 3
#ifdef __KERNEL__
/* intr=^C quit=^\ erase=del kill=^U
reprint=^R discard=^U werase=^W lnext=^V
*/
#define INIT_C_CC "\003\034\177\025\001\000\000\000\021\023\032\031\022\025\027\026"
-#endif
-
-/* c_iflag bits */
-#define IGNBRK 0x00000001
-#define BRKINT 0x00000002
-#define IGNPAR 0x00000004
-#define PARMRK 0x00000008
-#define INPCK 0x00000010
-#define ISTRIP 0x00000020
-#define INLCR 0x00000040
-#define IGNCR 0x00000080
-#define ICRNL 0x00000100
-#define IUCLC 0x00000200
-#define IXON 0x00000400
-#define IXANY 0x00000800
-#define IXOFF 0x00001000
-#define IMAXBEL 0x00002000
-
-/* c_oflag bits */
-#define OPOST 0x00000001
-#define OLCUC 0x00000002
-#define ONLCR 0x00000004
-#define OCRNL 0x00000008
-#define ONOCR 0x00000010
-#define ONLRET 0x00000020
-#define OFILL 0x00000040
-#define OFDEL 0x00000080
-#define NLDLY 0x00000100
-#define NL0 0x00000000
-#define NL1 0x00000100
-#define CRDLY 0x00000600
-#define CR0 0x00000000
-#define CR1 0x00000200
-#define CR2 0x00000400
-#define CR3 0x00000600
-#define TABDLY 0x00001800
-#define TAB0 0x00000000
-#define TAB1 0x00000800
-#define TAB2 0x00001000
-#define TAB3 0x00001800
-#define XTABS 0x00001800
-#define BSDLY 0x00002000
-#define BS0 0x00000000
-#define BS1 0x00002000
-#define VTDLY 0x00004000
-#define VT0 0x00000000
-#define VT1 0x00004000
-#define FFDLY 0x00008000
-#define FF0 0x00000000
-#define FF1 0x00008000
-#define PAGEOUT 0x00010000 /* SUNOS specific */
-#define WRAP 0x00020000 /* SUNOS specific */
-
-/* c_cflag bit meaning */
-#define CBAUD 0x0000000f
-#define B0 0x00000000 /* hang up */
-#define B50 0x00000001
-#define B75 0x00000002
-#define B110 0x00000003
-#define B134 0x00000004
-#define B150 0x00000005
-#define B200 0x00000006
-#define B300 0x00000007
-#define B600 0x00000008
-#define B1200 0x00000009
-#define B1800 0x0000000a
-#define B2400 0x0000000b
-#define B4800 0x0000000c
-#define B9600 0x0000000d
-#define B19200 0x0000000e
-#define B38400 0x0000000f
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0x00000030
-#define CS5 0x00000000
-#define CS6 0x00000010
-#define CS7 0x00000020
-#define CS8 0x00000030
-#define CSTOPB 0x00000040
-#define CREAD 0x00000080
-#define PARENB 0x00000100
-#define PARODD 0x00000200
-#define HUPCL 0x00000400
-#define CLOCAL 0x00000800
-/* We'll never see these speeds with the Zilogs' but for completeness... */
-#define CBAUDEX 0x00010000
-#define B57600 0x00010001
-#define B115200 0x00010002
-#define B230400 0x00010003
-#define CIBAUD 0x000f0000 /* input baud rate (not used) */
-#define CRTSCTS 0x80000000 /* flow control */
-
-/* c_lflag bits */
-#define ISIG 0x00000001
-#define ICANON 0x00000002
-#define XCASE 0x00000004
-#define ECHO 0x00000008
-#define ECHOE 0x00000010
-#define ECHOK 0x00000020
-#define ECHONL 0x00000040
-#define NOFLSH 0x00000080
-#define TOSTOP 0x00000100
-#define ECHOCTL 0x00000200
-#define ECHOPRT 0x00000400
-#define ECHOKE 0x00000800
-#define DEFECHO 0x00001000 /* SUNOS thing, what is it? */
-#define FLUSHO 0x00002000
-#define PENDIN 0x00004000
-#define IEXTEN 0x00008000
-
-/* modem lines */
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
-
-
-/* tcflow() and TCXONC use these */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
-/* tcsetattr uses these */
-#define TCSANOW 0
-#define TCSADRAIN 1
-#define TCSAFLUSH 2
-
-/* line disciplines */
-#define N_TTY 0
-#define N_SLIP 1
-#define N_MOUSE 2
-#define N_PPP 3
-
-#ifdef __KERNEL__
/*
* Translate a "termio" structure into a "termios". Ugh.
-/* $Id: timer.h,v 1.11 1996/01/03 03:53:23 davem Exp $
+/* $Id: timer.h,v 1.12 1996/03/24 20:21:29 davem Exp $
* timer.h: Definitions for the timer chips on the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#define SUN4C_TIMER_PHYSADDR 0xf3000000
-volatile struct sun4c_timer_info *sun4c_timers;
-
/* A sun4m has two blocks of registers which are probably of the same
* structure. LSI Logic's L64851 is told to _decrement_ from the limit
* value. Aurora behaves similarly but its limit value is compacted in
/* First, hardware traps. */
#define SP_TRAP_TFLT 0x1 /* Text fault */
#define SP_TRAP_II 0x2 /* Illegal Instruction */
-#define SP_TRAP_PI 0x3 /* Privileged Instruction */
+#define SP_TRAP_PI 0x3 /* Priviledged Instruction */
#define SP_TRAP_FPD 0x4 /* Floating Point Disabled */
#define SP_TRAP_WOVF 0x5 /* Window Overflow */
#define SP_TRAP_WUNF 0x6 /* Window Underflow */
-/* $Id: tsunami.h,v 1.3 1996/01/10 21:00:12 davem Exp $
+/* $Id: tsunami.h,v 1.4 1996/04/04 12:51:32 davem Exp $
* tsunami.h: Module specific definitions for Tsunami V8 Sparcs
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#define TSUNAMI_NF 0x00000002
#define TSUNAMI_ME 0x00000001
-extern inline void tsunami_invalidate_icache(void)
+extern inline void tsunami_flush_icache(void)
{
__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
"i" (ASI_M_IC_FLCLEAR) : "memory");
}
-extern inline void tsunami_invalidate_dcache(void)
+extern inline void tsunami_flush_dcache(void)
{
__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
"i" (ASI_M_DC_FLCLEAR) : "memory");
-/* $Id: types.h,v 1.8 1995/11/25 02:33:08 davem Exp $ */
+/* $Id: types.h,v 1.9 1996/04/04 12:51:34 davem Exp $ */
#ifndef _SPARC_TYPES_H
#define _SPARC_TYPES_H
/*
* _xx is ok: it doesn't pollute the POSIX namespace. Use these in the
- * header files exported to user space <-- Linus sez this
+ * header files exported to user space.
*/
-#ifndef _SIZE_T
-#define _SIZE_T
-#ifdef __svr4__
-typedef unsigned int size_t; /* solaris sucks */
-#else
-typedef long unsigned int size_t; /* sunos is much better */
-#endif /* !(__svr4__) */
-#endif
-
-#ifndef _SSIZE_T
-#define _SSIZE_T
-typedef int ssize_t;
-#endif
-
-#ifndef _PTRDIFF_T
-#define _PTRDIFF_T
-typedef long int ptrdiff_t;
-#endif
-
-#ifndef _TIME_T
-#define _TIME_T
-typedef long time_t;
-#endif
-
-#ifndef _CLOCK_T
-#define _CLOCK_T
-typedef long clock_t;
-#endif
+/*
+ * This file is never included by application software unless
+ * explicitly requested (e.g., via linux/types.h) in which case the
+ * application is Linux specific so (user-) name space pollution is
+ * not a major issue. However, for interoperability, libraries still
+ * need to be careful to avoid a name clashes.
+ */
-typedef int pid_t;
-typedef unsigned short uid_t;
-typedef unsigned short gid_t;
-typedef unsigned short dev_t;
-typedef unsigned long ino_t;
-typedef unsigned short mode_t;
typedef unsigned short umode_t;
-typedef short nlink_t;
-typedef long daddr_t;
-typedef long off_t;
typedef signed char __s8;
typedef unsigned char __u8;
#endif /* __KERNEL__ */
-#undef __FD_SET
-static __inline__ void __FD_SET(unsigned long fd, fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
-}
-
-#undef __FD_CLR
-static __inline__ void __FD_CLR(unsigned long fd, fd_set *fdsetp)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
-}
-
-#undef __FD_ISSET
-static __inline__ int __FD_ISSET(unsigned long fd, fd_set *p)
-{
- unsigned long _tmp = fd / __NFDBITS;
- unsigned long _rem = fd % __NFDBITS;
- return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
-}
-
-/*
- * This will unroll the loop for the normal constant cases (4 or 8 longs,
- * for 256 and 512-bit fd_sets respectively)
- */
-#undef __FD_ZERO
-static __inline__ void __FD_ZERO(fd_set *p)
-{
- unsigned int *tmp = p->fds_bits;
- int i;
-
- if (__builtin_constant_p(__FDSET_INTS)) {
- switch (__FDSET_INTS) {
- case 8:
- tmp[0] = 0; tmp[1] = 0; tmp[2] = 0; tmp[3] = 0;
- tmp[4] = 0; tmp[5] = 0; tmp[6] = 0; tmp[7] = 0;
- return;
- case 4:
- tmp[0] = 0; tmp[1] = 0; tmp[2] = 0; tmp[3] = 0;
- return;
- }
- }
- i = __FDSET_INTS;
- while (i) {
- i--;
- *tmp = 0;
- tmp++;
- }
-}
-
#endif /* defined(_SPARC_TYPES_H) */
-/* $Id: unistd.h,v 1.16 1995/12/29 23:14:26 miguel Exp $ */
+/* $Id: unistd.h,v 1.20 1996/04/20 07:54:39 davem Exp $ */
#ifndef _SPARC_UNISTD_H
#define _SPARC_UNISTD_H
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*
- * SunOS compatibility based upon preliminary work which is:
+ * SunOS compatability based upon preliminary work which is:
*
* Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
*/
#define __NR_munlock 238
#define __NR_mlockall 239
#define __NR_munlockall 240
+#define __NR_sched_setparam 241
+#define __NR_sched_getparam 242
+#define __NR_sched_setscheduler 243
+#define __NR_sched_getscheduler 244
+#define __NR_sched_yield 245
+#define __NR_sched_get_priority_max 246
+#define __NR_sched_get_priority_min 247
+#define __NR_sched_rr_get_interval 248
+#define __NR_nanosleep 249
+#define __NR_mremap 250
+#define __NR__sysctl 251
+#define __NR_getsid 252
+#define __NR_fdatasync 253
#define _syscall0(type,name) \
type name(void) \
"1:\n\t" \
: "=r" (__res)\
: "0" (__NR_##name) \
- : "g1"); \
+ : "g1", "o0"); \
if (__res >= 0) \
return (type) __res; \
errno = -__res; \
errno = -__res; \
return -1; \
}
-
+#
#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
type5,arg5) \
type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
{ \
-long __res; \
-__asm__ volatile ("or %%g0, %0, %%g1\n\t" \
- "or %%g0, %1, %%o0\n\t" \
+ long __res; \
+\
+__asm__ volatile ("or %%g0, %1, %%o0\n\t" \
"or %%g0, %2, %%o1\n\t" \
"or %%g0, %3, %%o2\n\t" \
"or %%g0, %4, %%o3\n\t" \
"or %%g0, %5, %%o4\n\t" \
+ "or %%g0, %6, %%g1\n\t" \
"t 0x10\n\t" \
+ "bcc 1f\n\t" \
"or %%g0, %%o0, %0\n\t" \
- : "=r" (__res), "=r" ((long)(arg1)), "=r" ((long)(arg2)), \
- "=r" ((long)(arg3)), "=r" ((long)(arg4)), "=r" ((long)(arg5)) \
- : "0" (__NR_##name),"1" ((long)(arg1)),"2" ((long)(arg2)), \
- "3" ((long)(arg3)),"4" ((long)(arg4)),"5" ((long)(arg5)) \
+ "sub %%g0, %%o0, %0\n\t" \
+ "1:\n\t" \
+ : "=r" (__res) \
+ : "0" ((long)(arg1)),"1" ((long)(arg2)), \
+ "2" ((long)(arg3)),"3" ((long)(arg4)),"4" ((long)(arg5)), \
+ "i" (__NR_##name) \
: "g1", "o0", "o1", "o2", "o3", "o4"); \
if (__res>=0) \
return (type) __res; \
* some others too.
*/
#define __NR__exit __NR_exit
-/* static inline _syscall0(int,idle) */
+static inline _syscall0(int,idle)
static inline _syscall0(int,fork)
static inline _syscall2(int,clone,unsigned long,flags,char *,ksp)
static inline _syscall0(int,pause)
-/* static inline _syscall0(int,setup) */
+static inline _syscall0(int,setup)
static inline _syscall0(int,sync)
static inline _syscall0(pid_t,setsid)
static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
static inline _syscall1(int,_exit,int,exitcode)
static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
-extern void sys_idle(void);
-static inline void idle(void)
-{
- sys_idle();
-}
-
-extern int sys_setup(void);
-static inline int setup(void)
-{
- return sys_setup();
-}
-
-extern int sys_waitpid(int, int *, int);
static inline pid_t wait(int * wait_stat)
{
- long retval;
- retval = waitpid(-1,wait_stat,0);
- return retval;
+ return waitpid(-1,wait_stat,0);
}
/*
-/* $Id: vac-ops.h,v 1.9 1995/12/17 09:02:00 davem Exp $ */
+/* $Id: vac-ops.h,v 1.10 1996/04/04 12:51:36 davem Exp $ */
#ifndef _SPARC_VAC_OPS_H
#define _SPARC_VAC_OPS_H
/* The indexing of cache lines creates a problem. Because the line
* field of a virtual address extends past the page offset within
* the virtual address it is possible to have what are called
- * 'bad aliases' which will create inconsistencies. So we must make
+ * 'bad aliases' which will create inconsistancies. So we must make
* sure that within a context that if a physical page is mapped
* more than once, that 'extra' line bits are the same. If this is
* not the case, and thus is a 'bad alias' we must turn off the
sun4c_vacinfo.on = 0;
}
-extern void sun4c_flush_context(void);
-
#endif /* !(_SPARC_VAC_OPS_H) */
-/* $Id: vaddrs.h,v 1.17 1996/01/10 21:00:16 davem Exp $ */
+/* $Id: vaddrs.h,v 1.19 1996/03/26 06:51:58 miguel Exp $ */
#ifndef _SPARC_VADDRS_H
#define _SPARC_VADDRS_H
* a pointer and then the value in the assembly code
*/
#define IOBASE_VADDR 0xfe000000 /* Base for mapping pages */
-#define IOBASE_LEN 0x00100000 /* Length of the IO area */
-#define IOBASE_END 0xfe100000
+#define IOBASE_LEN 0x00200000 /* Length of the IO area */
+#define IOBASE_END 0xfe200000
#define DVMA_VADDR 0xfff00000 /* Base area of the DVMA on suns */
#define DVMA_LEN 0x00040000 /* Size of the DVMA address space */
#define DVMA_END 0xfff40000
-/* IOMMU Mapping area, must be on a 16MB boundary! Note this
+/* IOMMU Mapping area, must be on a 16MB boundry! Note this
* doesn't count the DVMA areas, the prom lives between the
* iommu mapping area (for scsi transfer buffers) and the
* dvma upper range (for lance packet ring buffers).
#define SUN4C_LOCK_END 0xffc00000
/* On sun4m machines we need per-cpu virtual areas */
-#define PERCPU_VADDR 0xff000000 /* Base for per-cpu virtual mappings */
+#define PERCPU_VADDR 0xffc00000 /* Base for per-cpu virtual mappings */
#define PERCPU_ENTSIZE 0x00100000
#define PERCPU_LEN ((PERCPU_ENTSIZE*NCPUS))
/* per-cpu offsets */
#define PERCPU_TBR_OFFSET 0x00000 /* %tbr, mainly used for identification. */
#define PERCPU_KSTACK_OFFSET 0x01000 /* Beginning of kernel stack for this cpu */
-#define PERCPU_MBOX_OFFSET 0x02000 /* Prom SMP Mailbox */
-#define PERCPU_CPUID_OFFSET 0x03000 /* Per-cpu ID number. */
-#define PERCPU_ISALIVE_OFFSET 0x03004 /* Has CPU been initted yet? */
-#define PERCPU_ISIDLING_OFFSET 0x03008 /* Is CPU in idle loop spinning? */
+#define PERCPU_MBOX_OFFSET 0x03000 /* Prom SMP Mailbox */
+#define PERCPU_CPUID_OFFSET 0x04000 /* Per-cpu ID number. */
+#define PERCPU_ISALIVE_OFFSET 0x04004 /* Has CPU been initted yet? */
+#define PERCPU_ISIDLING_OFFSET 0x04008 /* Is CPU in idle loop spinning? */
#endif /* !(_SPARC_VADDRS_H) */
-/* $Id: viking.h,v 1.6 1996/03/01 07:21:05 davem Exp $
- * viking.h: Defines specific to the TI Viking MBUS module.
+/* $Id: viking.h,v 1.12 1996/04/20 10:15:46 davem Exp $
+ * viking.h: Defines specific to the GNU/Viking MBUS module.
* This is SRMMU stuff.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
#include <asm/mxcc.h>
-/* Bits in the SRMMU control register for TI Viking modules.
- *
- * -------------------------------------------------------------
- * |implvers| RSV |DP|RSV|TC|AC|SP|BM|PC|MBM|SB|IC|DC|RSV|NF|ME|
- * -------------------------------------------------------------
- * 31 24 23-20 19 18 17 16 15 14 13 12 11 10 9 8-2 1 0
- *
- * DP: Data Prefetcher Enable -- 0 = DP is off, 1 = DP is on
- * TC: Tablewalk Cacheable -- 0 = Twalks are not cacheable
- * 1 = Twalks are cacheable
- * AC: Alternate Cacheable -- 0 = Direct physical accesses not cacheable
- * 1 = Direct physical accesses are cacheable
+/* Bits in the SRMMU control register for GNU/Viking modules.
+ *
+ * -----------------------------------------------------------
+ * |impl-vers| RSV |TC|AC|SP|BM|PC|MBM|SB|IC|DC|PSO|RSV|NF|ME|
+ * -----------------------------------------------------------
+ * 31 24 23-17 16 15 14 13 12 11 10 9 8 7 6-2 1 0
+ *
+ * TC: Tablewalk Cacheable -- 0 = Twalks are not cacheable in E-cache
+ * 1 = Twalks are cacheable in E-cache
+ *
+ * GNU/Viking will only cache tablewalks in the E-cache (mxcc) if present
+ * and never caches them internally (or so states the docs). Therefore
+ * for machines lacking an E-cache (ie. in MBUS mode) this bit must
+ * remain cleared.
+ *
+ * AC: Alternate Cacheable -- 0 = Passthru physical accesses not cacheable
+ * 1 = Passthru physical accesses cacheable
+ *
+ * This indicates whether accesses are cacheable when no cachable bit
+ * is present in the pte when the processor is in boot-mode or the
+ * access does not need pte's for translation (ie. pass-thru ASI's).
+ * "Cachable" is only referring to E-cache (if present) and not the
+ * on chip split I/D caches of the GNU/Viking.
+ *
* SP: SnooP Enable -- 0 = bus snooping off, 1 = bus snooping on
+ *
+ * This enables snooping on the GNU/Viking bus. This must be on
+ * for the hardware cache consistancy mechanisms of the GNU/Viking
+ * to work at all. On non-mxcc GNU/Viking modules the split I/D
+ * caches will snoop regardless of whether they are enabled, this
+ * takes care of the case where the I or D or both caches are turned
+ * off yet still contain valid data. Note also that this bit does
+ * not affect GNU/Viking store-buffer snoops, those happen if the
+ * store-buffer is enabled no matter what.
+ *
* BM: Boot Mode -- 0 = not in boot mode, 1 = in boot mode
+ *
+ * This indicates whether the GNU/Viking is in boot-mode or not,
+ * if it is then all instruction fetch physical addresses are
+ * computed as 0xff0000000 + low 28 bits of requested address.
+ * GNU/Viking boot-mode does not affect data accesses. Also,
+ * in boot mode instruction accesses bypass the split on chip I/D
+ * caches, they may be cached by the GNU/MXCC if present and enabled.
+ *
* MBM: MBus Mode -- 0 = not in MBus mode, 1 = in MBus mode
+ *
+ * This indicated the GNU/Viking configuration present. If in
+ * MBUS mode, the GNU/Viking lacks a GNU/MXCC E-cache. If it is
+ * not then the GNU/Viking is on a module VBUS connected directly
+ * to a GNU/MXCC cache controller. The GNU/MXCC can be thus connected
+ * to either an GNU/MBUS (sun4m) or the packet-switched GNU/XBus (sun4d).
+ *
* SB: StoreBuffer enable -- 0 = store buffer off, 1 = store buffer on
+ *
+ * The GNU/Viking store buffer allows the chip to continue execution
+ * after a store even if the data cannot be placed in one of the
+ * caches during that cycle. If disabled, all stores operations
+ * occur synchronously.
+ *
* IC: Instruction Cache -- 0 = off, 1 = on
* DC: Data Cache -- 0 = off, 1 = 0n
+ *
+ * These bits enable the on-cpu GNU/Viking split I/D caches. Note,
+ * as mentioned above, these caches will snoop the bus in GNU/MBUS
+ * configurations even when disabled to avoid data corruption.
+ *
* NF: No Fault -- 0 = faults generate traps, 1 = faults don't trap
* ME: MMU enable -- 0 = mmu not translating, 1 = mmu translating
*
*/
+#define VIKING_MMUENABLE 0x00000001
+#define VIKING_NOFAULT 0x00000002
+#define VIKING_PSO 0x00000080
#define VIKING_DCENABLE 0x00000100 /* Enable data cache */
#define VIKING_ICENABLE 0x00000200 /* Enable instruction cache */
#define VIKING_SBENABLE 0x00000400 /* Enable store buffer */
#define VIKING_MMODE 0x00000800 /* MBUS mode */
#define VIKING_PCENABLE 0x00001000 /* Enable parity checking */
-
-/* Boot mode, 0 at boot-time, 1 after prom initializes the MMU. */
#define VIKING_BMODE 0x00002000
#define VIKING_SPENABLE 0x00004000 /* Enable bus cache snooping */
-
-/* The deal with this AC bit is that if you are going to modify the
- * contents of physical ram using the MMU bypass, you had better set
- * this bit or things will get unsynchronized. This is only applicable
- * if an E-cache (ie. a PAC) is around and the Viking is not in MBUS mode.
- */
#define VIKING_ACENABLE 0x00008000 /* Enable alternate caching */
#define VIKING_TCENABLE 0x00010000 /* Enable table-walks to be cached */
-#define VIKING_DPENABLE 0x00040000 /* Enable the data prefetcher */
+
+/*
+ * GNU/Viking Breakpoint Action Register fields.
+ */
+#define VIKING_ACTION_MIX 0x00001000 /* Enable multiple instructions */
+
+/*
+ * GNU/Viking Cache Tags.
+ */
+#define VIKING_PTAG_VALID 0x01000000 /* Cache block is valid */
+#define VIKING_PTAG_DIRTY 0x00010000 /* Block has been modified */
+#define VIKING_PTAG_SHARED 0x00000100 /* Shared with some other cache */
extern inline void viking_flush_icache(void)
{
"i" (ASI_M_DC_FLCLEAR));
}
-/* MXCC stuff... */
-extern inline void viking_enable_mxcc(void)
+extern inline void viking_unlock_icache(void)
+{
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (0x80000000), "i" (ASI_M_IC_FLCLEAR));
+}
+
+extern inline void viking_unlock_dcache(void)
+{
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (0x80000000), "i" (ASI_M_DC_FLCLEAR));
+}
+
+extern inline void viking_set_bpreg(unsigned long regval)
{
+ __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
+ "r" (regval),
+ "i" (ASI_M_ACTION));
}
-extern inline void viking_mxcc_scrape(void)
+extern inline unsigned long viking_get_bpreg(void)
{
- /* David, what did you learn in school today? */
+ unsigned long regval;
+
+ __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
+ "=r" (regval) :
+ "i" (ASI_M_ACTION));
+ return regval;
+}
+extern inline void viking_get_dcache_ptag(int set, int block,
+ unsigned long *data)
+{
+ unsigned long ptag = ((set & 0x7f) << 5) | ((block & 0x3) << 26) |
+ 0x80000000;
+ unsigned long info, page;
+ __asm__ __volatile__ ("ldda [%2] %3, %%g2\n\t"
+ "or %%g0, %%g2, %0\n\t"
+ "or %%g0, %%g3, %1\n\t" :
+ "=r" (info), "=r" (page) :
+ "r" (ptag), "i" (ASI_M_DATAC_TAG) :
+ "g2", "g3");
+ data[0] = info;
+ data[1] = page;
}
#endif /* !(_SPARC_VIKING_H) */
/* Set input device byte stream format (any of VUID_{NATIVE,FIRM_EVENT}) */
#define VUIDSFORMAT _IOW('v', 1, int)
/* Retrieve input device byte stream format */
-#define VUIDGFORMAT _IOR(v, 2, int)
+#define VUIDGFORMAT _IOR('v', 2, int)
/* Possible tag values */
/* mouse buttons: */
-/* $Id: winmacro.h,v 1.13 1995/12/29 21:48:04 davem Exp $
+/* $Id: winmacro.h,v 1.16 1996/03/27 02:43:18 davem Exp $
* winmacro.h: Window loading-unloading macros.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
add %scratch, 1, %scratch; \
st %scratch, [%cur_reg + THREAD_W_SAVED];
-/* For now on a uniprocessor this is ok. */
#ifdef __SMP__
-#error SMP not yet
#define LOAD_CURRENT(dest_reg, idreg) \
rd %tbr, %idreg; \
- srl %idreg, 24, %idreg; \
+ srl %idreg, 10, %idreg; \
+ and %idreg, 0xc, %idreg; \
sethi %hi(C_LABEL(current_set)), %dest_reg; \
or %dest_reg, %lo(C_LABEL(current_set)), %dest_reg; \
- add %dest_reg, %idreg, %dest_reg;
+ add %dest_reg, %idreg, %dest_reg; \
+ ld [%dest_reg], %dest_reg;
#else
#define LOAD_CURRENT(dest_reg, idreg) \
sethi %hi(C_LABEL(current_set)), %dest_reg; \
#ifdef __i386__
#define SEGMENT_SIZE 1024
#else
+#ifndef SEGMENT_SIZE
#define SEGMENT_SIZE PAGE_SIZE
#endif
#endif
+#endif
#define _N_SEGMENT_ROUND(x) (((x) + SEGMENT_SIZE - 1) & ~(SEGMENT_SIZE - 1))
/* Kludge to use the same number for both char and block major numbers */
#elif (MAJOR_NR == MD_MAJOR) && defined(MD_DRIVER)
-#ifndef MD_PERSONALITY
-
#define DEVICE_NAME "Multiple devices driver"
#define DEVICE_REQUEST do_md_request
#define DEVICE_NR(device) (MINOR(device))
#define DEVICE_ON(device)
#define DEVICE_OFF(device)
-#endif
-
#elif (MAJOR_NR == SCSI_TAPE_MAJOR)
#define DEVICE_NAME "scsitape"
#endif /* MAJOR_NR == whatever */
-#if ((MAJOR_NR != SCSI_TAPE_MAJOR) && !defined(IDE_DRIVER) && !defined(MD_DRIVER))
+#if ((MAJOR_NR != SCSI_TAPE_MAJOR) && !defined(IDE_DRIVER))
#ifndef CURRENT
#define CURRENT (blk_dev[MAJOR_NR].current_request)
#endif /* DEVICE_TIMEOUT */
-#ifndef MD_PERSONALITY
static void (DEVICE_REQUEST)(void);
-#endif
#ifdef DEVICE_INTR
#define CLEAR_INTR SET_INTR(NULL)
/* end_request() - SCSI devices have their own version */
/* - IDE drivers have their own copy too */
-#if ! SCSI_MAJOR(MAJOR_NR) || (defined(MD_DRIVER) && !defined(MD_PERSONALITY))
+#if ! SCSI_MAJOR(MAJOR_NR)
#if defined(IDE_DRIVER) && !defined(_IDE_C) /* shared copy for IDE modules */
void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup);
#ifdef IDE_DRIVER
void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup) {
struct request *req = hwgroup->rq;
-#elif defined(MD_DRIVER)
-static void end_request (int uptodate, struct request * req) {
#else
static void end_request(int uptodate) {
struct request *req = CURRENT;
#ifdef IDE_DRIVER
blk_dev[MAJOR(req->rq_dev)].current_request = req->next;
hwgroup->rq = NULL;
-#elif !defined(MD_DRIVER)
+#else
DEVICE_OFF(req->rq_dev);
CURRENT = req->next;
#endif /* IDE_DRIVER */
#endif /* defined(IDE_DRIVER) && !defined(_IDE_C) */
#endif /* ! SCSI_MAJOR(MAJOR_NR) */
-#ifdef MD_PERSONALITY
-extern inline void end_redirect (struct request *req)
-{
- struct buffer_head * bh;
-
- req->errors = 0;
-
- if ((bh = req->bh) != NULL)
- {
- req->bh = bh->b_reqnext;
- bh->b_reqnext = NULL;
-
- if ((bh = req->bh) != NULL)
- {
- req->sector += req->current_nr_sectors;
- req->current_nr_sectors = bh->b_size >> 9;
-
- if (req->nr_sectors < req->current_nr_sectors)
- {
- req->nr_sectors = req->current_nr_sectors;
- printk("end_redirect : buffer-list destroyed\n");
- }
-
- req->buffer = bh->b_data;
- return;
- }
- }
-}
-#endif /* MD_PERSONALITY */
-
#endif /* defined(MAJOR_NR) || defined(IDE_DRIVER) */
#endif /* _BLK_H */
extern struct wait_queue * wait_for_request;
extern void resetup_one_dev(struct gendisk *dev, int drive);
-/* md needs those functions to requeue requests */
-extern void add_request(struct blk_dev_struct * dev, struct request * req);
-extern struct request *get_md_request (int max_req, kdev_t dev);
+/* md needs this function to remap requests */
+extern int md_map (int minor, kdev_t *rdev, unsigned long *rsector, unsigned long size);
extern int * blk_size[MAX_BLKDEV];
#define FD_LOCK 0x94 /* Fifo config lock */
#define FD_RSEEK_OUT 0x8f /* seek out (i.e. to lower tracks) */
#define FD_RSEEK_IN 0xcf /* seek in (i.e. to higher tracks) */
+
+/* the following commands are new in the 82078. They are not used in the
+ * floppy driver, except the first three. These commands may be useful for apps
+ * which use the FDRAWCMD interface. For doc, get the 82078 spec sheets at
+ * http://www-techdoc.intel.com/docs/periph/fd_contr/datasheets/ */
+
#define FD_PARTID 0x18 /* part id ("extended" version cmd) */
#define FD_SAVE 0x2e /* save fdc regs for later restore */
+#define FD_DRIVESPEC 0x8e /* drive specification: Access to the
+ * 2 Mbps data transfer rate for tape
+ * drives */
+
+#define FD_RESTORE 0x4e /* later restore */
+#define FD_POWERDOWN 0x27 /* configure FDC's powersave features */
+#define FD_FORMAT_N_WRITE 0xef /* format and write in one go. */
+#define FD_OPTION 0x33 /* ISO format (which is a clean way to
+ * pack more sectors on a track) */
/* DMA commands */
#define DMA_READ 0x46
#define FDC_82072A 0x50 /* 82072A (on Sparcs) */
#define FDC_82077_ORIG 0x51 /* Original version of 82077AA, sans LOCK */
#define FDC_82077 0x52 /* 82077AA-1 */
-#define FDC_82077_UNKN 0x53 /* Unknown 82077 variant */
+#define FDC_82078_UNKN 0x5f /* Unknown 82078 variant */
#define FDC_82078 0x60 /* 44pin 82078 or 64pin 82078SL */
#define FDC_82078_1 0x61 /* 82078-1 (2Mbps fdc) */
#define FDC_S82078B 0x62 /* S82078B (first seen on Adaptec AVA-2825 VLB
/* First cache line: */
unsigned long b_blocknr; /* block number */
kdev_t b_dev; /* device (B_FREE = free) */
- kdev_t b_rdev; /* Real device */
+ kdev_t b_rdev; /* Real device */
+ unsigned long b_rsector; /* Real buffer location on disk */
struct buffer_head * b_next; /* Hash queue list */
struct buffer_head * b_this_page; /* circular list of buffers in one page */
-/* $Id: isdn.h,v 1.2 1996/02/11 02:10:02 fritz Exp fritz $
+/* $Id: isdn.h,v 1.3 1996/04/20 16:54:58 fritz Exp $
*
* Main header for the Linux ISDN subsystem (linklevel).
*
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* $Log: isdn.h,v $
+ * Revision 1.3 1996/04/20 16:54:58 fritz
+ * Increased maximum number of channels.
+ * Added some flags for isdn_net to handle callback more reliable.
+ * Fixed delay-definitions to be more accurate.
+ * Misc. typos
+ *
* Revision 1.2 1996/02/11 02:10:02 fritz
* Changed IOCTL-names
* Added rx_netdev, st_netdev, first_skb, org_hcb, and org_hcu to
* the correspondent code in isdn.c
*/
-#define ISDN_MAX_DRIVERS 16
-#define ISDN_MAX_CHANNELS 16
+#define ISDN_MAX_DRIVERS 32
+#define ISDN_MAX_CHANNELS 64
#define ISDN_MINOR_B 0
#define ISDN_MINOR_BMAX (ISDN_MAX_CHANNELS-1)
#define ISDN_MINOR_CTRL ISDN_MAX_CHANNELS
#define ISDN_MINOR_CTRLMAX (2*ISDN_MAX_CHANNELS-1)
#define ISDN_MINOR_PPP (2*ISDN_MAX_CHANNELS)
#define ISDN_MINOR_PPPMAX (3*ISDN_MAX_CHANNELS-1)
-#define ISDN_MINOR_STATUS 128
+#define ISDN_MINOR_STATUS 255
/* New ioctl-codes */
#define IIOCNETAIF _IO('I',1)
#define IIOCSETMAP _IO('I',18)
#define IIOCNETASL _IO('I',19)
#define IIOCNETDIL _IO('I',20)
+#define IIOCGETCPS _IO('I',21)
#define IIOCNETALN _IO('I',32)
#define IIOCNETDLN _IO('I',33)
#define ISDN_NET_ENCAP_IPTYP 2
#define ISDN_NET_ENCAP_CISCOHDLC 3
#define ISDN_NET_ENCAP_SYNCPPP 4
+#define ISDN_NET_ENCAP_UIHDLC 5
/* Facility which currently uses an ISDN-channel */
#define ISDN_USAGE_NONE 0
char slave[10]; /* Name of Slave for Bundling */
char eaz[256]; /* EAZ/MSN */
char drvid[25]; /* DriverId for Bindings */
- int secure; /* Flag: Secure */
- int callback; /* Flag: Callback */
int onhtime; /* Hangup-Timeout */
int charge; /* Charge-Units */
- int chargehup; /* Flag: Charge-Hangup */
int l2_proto; /* Layer-2 protocol */
int l3_proto; /* Layer-3 protocol */
int p_encap; /* Encapsulation */
- int ihup; /* Flag: Hangup-Timeout on incoming line */
int exclusive; /* Channel, if bound exclusive */
+ int dialmax; /* Dial Retry-Counter */
int slavedelay; /* Delay until slave starts up */
+ int cbdelay; /* Delay before Callback */
+ int chargehup; /* Flag: Charge-Hangup */
+ int ihup; /* Flag: Hangup-Timeout on incoming line */
+ int secure; /* Flag: Secure */
+ int callback; /* Flag: Callback */
+ int cbhup; /* Flag: Reject Call before Callback */
} isdn_net_ioctl_cfg;
#ifdef __KERNEL__
/* Timer-delays and scheduling-flags */
#define ISDN_TIMER_RES 3 /* Main Timer-Resolution */
-#define ISDN_TIMER_02SEC (HZ/ISDN_TIMER_RES/5) /* Slow-Timer1 (0.2 sec.) */
-#define ISDN_TIMER_1SEC (HZ/ISDN_TIMER_RES) /* Slow-Timer2 (1 sec.) */
+#define ISDN_TIMER_02SEC (HZ/(ISDN_TIMER_RES+1)/5) /* Slow-Timer1 .2 sec */
+#define ISDN_TIMER_1SEC (HZ/(ISDN_TIMER_RES+1)) /* Slow-Timer2 1 sec */
#define ISDN_TIMER_MODEMREAD 1
#define ISDN_TIMER_MODEMPLUS 2
#define ISDN_TIMER_MODEMRING 4
ISDN_TIMER_NETDIAL)
/* Timeout-Values for isdn_net_dial() */
-#define ISDN_TIMER_DTIMEOUT10 (10*HZ/(ISDN_TIMER_02SEC*ISDN_TIMER_RES))
-#define ISDN_TIMER_DTIMEOUT15 (15*HZ/(ISDN_TIMER_02SEC*ISDN_TIMER_RES))
+#define ISDN_TIMER_DTIMEOUT10 (10*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1)))
+#define ISDN_TIMER_DTIMEOUT15 (15*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1)))
/* GLOBAL_FLAGS */
#define ISDN_GLOBAL_STOPPED 1
/* Feature- and status-flags for a net-interface */
#define ISDN_NET_CONNECTED 0x01 /* Bound to ISDN-Channel */
#define ISDN_NET_SECURE 0x02 /* Accept calls from phonelist only */
-#define ISDN_NET_CALLBACK 0x04 /* callback incoming phonenumber */
+#define ISDN_NET_CALLBACK 0x04 /* activate callback */
+#define ISDN_NET_CBHUP 0x08 /* hangup before callback */
+#define ISDN_NET_CBOUT 0x10 /* remote machine does callback */
+#if 0
+/* Unused??? */
#define ISDN_NET_CLONE 0x08 /* clone a tmp interface when called */
#define ISDN_NET_TMP 0x10 /* tmp interface until getting an IP */
#define ISDN_NET_DYNAMIC 0x20 /* this link is dynamically allocated */
+#endif
#define ISDN_NET_MAGIC 0x49344C02 /* for paranoia-checking */
/* Phone-list-element */
int pre_channel; /* Preselected isdn-channel */
int exclusive; /* If non-zero idx to reserved chan.*/
int flags; /* Connection-flags */
- int dialstate; /* State for dialing */
int dialretry; /* Counter for Dialout-retries */
int dialmax; /* Max. Number of Dial-retries */
- char msn[ISDN_MSNLEN]; /* MSNs/EAZs for this interface */
+ int cbdelay; /* Delay before Callback starts */
int dtimer; /* Timeout-counter for dialing */
+ char msn[ISDN_MSNLEN]; /* MSNs/EAZs for this interface */
+ u_char cbhup; /* Flag: Reject Call before Callback*/
+ u_char dialstate; /* State for dialing */
u_char p_encap; /* Packet encapsulation */
/* 0 = Ethernet over ISDN */
/* 1 = RAW-IP */
modem mdm; /* tty-driver-data */
isdn_net_dev *rx_netdev[ISDN_MAX_CHANNELS]; /* rx netdev-pointers */
isdn_net_dev *st_netdev[ISDN_MAX_CHANNELS]; /* stat netdev-pointers */
+ ulong ibytes[ISDN_MAX_CHANNELS]; /* Statistics incoming bytes */
+ ulong obytes[ISDN_MAX_CHANNELS]; /* Statistics outgoing bytes */
} isdn_dev;
extern isdn_dev *dev;
-/* $Id: isdnif.h,v 1.1 1996/01/09 05:50:51 fritz Exp fritz $
+/* $Id: isdnif.h,v 1.2 1996/04/20 17:02:40 fritz Exp $
*
* Linux ISDN subsystem
*
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* $Log: isdnif.h,v $
+ * Revision 1.2 1996/04/20 17:02:40 fritz
+ * Changes to support skbuffs for Lowlevel-Drivers.
+ * Misc. typos
+ *
* Revision 1.1 1996/01/09 05:50:51 fritz
* Initial revision
*
#define MATSUSHITA_CDROM4_MAJOR 28
#define STL_SIOMEMMAJOR 28
#define AZTECH_CDROM_MAJOR 29
+#define GRAPHDEV_MAJOR 29 /* SparcLinux /dev/fb */
#define CM206_CDROM_MAJOR 32
#define IDE2_MAJOR 33
#define IDE3_MAJOR 34
#define NETLINK_MAJOR 36
#define IDETAPE_MAJOR 37
-
+#define APBLOCK_MAJOR 60 /* AP1000 Block device */
+#define DDV_MAJOR 61 /* AP1000 DDV block device */
/*
* Tests for SCSI devices.
*/
#include <linux/mm.h>
#include <linux/ioctl.h>
-#define MD_VERSION "0.34"
+#define MD_VERSION "0.35"
/* ioctls */
#define REGISTER_DEV _IO (MD_MAJOR, 1)
#ifdef __KERNEL__
-#include <linux/config.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
-#undef MD_COUNT_SIZE /* Define this to have stats about
- chunk size in /proc/mdstat */
#define MAX_REAL 8 /* Max number of physical dev per md dev */
#define MAX_MD_DEV 4 /* Max number of md dev */
struct md_personality
{
char *name;
- int (*map)(int minor, struct md_dev *md_dev, struct request *req);
+ int (*map)(struct md_dev *md_dev, kdev_t *rdev,
+ unsigned long *rsector, unsigned long size);
int (*run)(int minor, struct md_dev *md_dev);
int (*stop)(int minor, struct md_dev *md_dev);
int (*status)(char *page, int minor, struct md_dev *md_dev);
int busy;
int nb_dev;
void *private;
-#ifdef MD_COUNT_SIZE
- unsigned int smallest_count;
- unsigned int biggest_count;
- unsigned int equal_count;
-#endif
};
extern struct real_dev devices[MAX_MD_DEV][MAX_REAL];
extern struct md_dev md_dev[MAX_MD_DEV];
extern int md_size[MAX_MD_DEV];
-extern void make_md_request(struct request *pending, int n);
extern char *partition_name (kdev_t dev);
-#if defined(CONFIG_MD_SUPPORT_RAID1) || defined(CONFIG_MD_SUPPORT_RAID5)
-extern int md_valid_device (int minor, kdev_t dev, int mode);
-extern int md_can_reemit (int minor);
-#endif
-
extern int register_md_personality (int p_num, struct md_personality *p);
extern int unregister_md_personality (int p_num);
#define PSMOUSE_MINOR 1
#define MS_BUSMOUSE_MINOR 2
#define ATIXL_BUSMOUSE_MINOR 3
+#define SUN_MOUSE_MINOR 6
#define MISC_DYNAMIC_MINOR 255
extern int misc_init(void);
current inode */
struct inode *i_old; /* pointer to the old inode this inode
depends on */
+ struct inode *i_linked; /* pointer to inode linked to the current one,
+ happens when an open file is moved */
+ struct inode *i_oldlink;/* pointer to open inode that references
+ the same file */
int i_binary; /* file contains non-text data */
};
PROC_SCSI_SSC,
PROC_SCSI_NCR53C406A,
PROC_SCSI_PPA,
+ PROC_SCSI_ESP,
PROC_SCSI_SCSI_DEBUG,
PROC_SCSI_NOT_PRESENT,
PROC_SCSI_FILE, /* I'm assuming here that we */
#define VIDEO_TYPE_TGAC 0x40 /* DEC TGA */
+#define VIDEO_TYPE_SUN 0x50 /* Sun frame buffer. */
+
/*
* This character is the same as _POSIX_VDISABLE: it cannot be used as
* a c_cc[] character, but indicates that a particular special character
#ifdef CONFIG_DIGI
extern void pcxx_setup(char *str, int *ints);
#endif
+#ifdef CONFIG_ISDN_DRV_PCBIT
+extern void pcbit_setup(char *str, int *ints);
+#endif
+
#if defined(CONFIG_SYSVIPC) || defined(CONFIG_KERNELD)
extern void ipc_init(void);
#ifdef CONFIG_ISDN_DRV_TELES
{ "teles=", teles_setup },
#endif
+#ifdef CONFIG_ISDN_DRV_PCBIT
+ { "pcbit=", pcbit_setup },
+#endif
#ifdef CONFIG_DIGI
{ "digi=", pcxx_setup },
#endif
GET_USE_COUNT(mp) &= ~MOD_AUTOCLEAN;
(*mp->cleanup)();
mp->state = MOD_DELETED;
- free_modules();
}
}
+ free_modules();
}
return 0;
}
p = &page->next;
offset = start - offset;
/* partial truncate, clear end of page */
- if (offset < PAGE_SIZE)
+ if (offset < PAGE_SIZE) {
memset((void *) (offset + page_address(page)), 0, PAGE_SIZE - offset);
+ flush_page_to_ram(page_address(page));
+ }
}
}
*
* Synchronous read-ahead benefits:
* --------------------------------
- * Using reasonnable IO xfer length from peripheral devices increase system
+ * Using reasonable IO xfer length from peripheral devices increase system
* performances.
- * Reasonnable means, in this context, not too large but not too small.
+ * Reasonable means, in this context, not too large but not too small.
* The actual maximum value is MAX_READAHEAD + PAGE_SIZE = 32k
*
* Asynchronous read-ahead benefits:
* ------------------------------
* In order to maximize overlapping, we must start some asynchronous read
* request from the device, as soon as possible.
- * We must be very carefull about:
+ * We must be very careful about:
* - The number of effective pending IO read requests.
- * ONE seems to be the only reasonnable value.
+ * ONE seems to be the only reasonable value.
* - The total memory pool usage for the file access stream.
* We try to have a limit of MAX_READWINDOW = 48K.
*/
ppos = pos & PAGE_MASK;
rapos = filp->f_rapos & PAGE_MASK;
max_ahead = 0;
+
/*
- * If the current page is locked, try some synchronous read-ahead in order
+ * If the current page is locked, and if the current position is outside the
+ * previous read IO request, try some synchronous read-ahead in order
* to avoid too small IO requests.
*/
if (PageLocked(page)) {
- rapos = ppos;
- if (rapos < inode->i_size)
- max_ahead = filp->f_ramax;
- filp->f_rawin = 0;
- filp->f_ralen = PAGE_SIZE;
+ if (!rapos || ppos >= rapos || ppos + filp->f_ralen < rapos) {
+ rapos = ppos;
+ if (rapos < inode->i_size)
+ max_ahead = filp->f_ramax;
+ filp->f_rawin = 0;
+ filp->f_ralen = PAGE_SIZE;
+ }
}
/*
* The current page is not locked
page = fill_page(inode, offset);
if (page && no_share) {
unsigned long new_page = __get_free_page(GFP_KERNEL);
- if (new_page)
+ if (new_page) {
memcpy((void *) new_page, (void *) page, PAGE_SIZE);
+ flush_page_to_ram(new_page);
+ }
free_page(page);
return new_page;
}
+ flush_page_to_ram(page);
return page;
}
return 0;
if (!pte_dirty(pte))
return 0;
+ flush_page_to_ram(pte_page(pte));
flush_cache_page(vma, address);
set_pte(ptep, pte_mkclean(pte));
flush_tlb_page(vma, address);
pte_t oldpage = *pte;
pte_clear(pte);
if (offset >= high_memory || PageReserved(mem_map+MAP_NR(offset)))
- set_pte(pte, mk_pte(offset, prot));
+ set_pte(pte, mk_pte(offset, prot));
forget_pte(oldpage);
address += PAGE_SIZE;
offset += PAGE_SIZE;
offset -= from;
dir = pgd_offset(current->mm, from);
- flush_cache_range(current->mm, beg, from);
+ flush_cache_range(current->mm, beg, end);
while (from < end) {
pmd_t *pmd = pmd_alloc(dir, from);
error = -ENOMEM;
from = (from + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
- flush_tlb_range(current->mm, beg, from);
+ flush_tlb_range(current->mm, beg, end);
return error;
}
free_page(pte_page(pte));
return;
}
-/* no need for invalidate */
+/* no need for flush_tlb */
set_pte(page_table, pte);
}
free_page(page);
return 0;
}
+ flush_page_to_ram(page);
set_pte(pte, pte_mkwrite(pte_mkdirty(mk_pte(page, PAGE_COPY))));
-/* no need for flush_tlb */
+/* no need for invalidate */
return page;
}
oom(tsk);
pte = BAD_PAGE;
}
+ flush_page_to_ram(page);
}
put_page(page_table, pte);
}
pte = *page_table;
if (!pte_present(pte))
return;
+ flush_cache_page(vma, address);
address &= ~PAGE_MASK;
address += pte_page(pte);
if (address >= high_memory)
return;
memset((void *) address, 0, PAGE_SIZE - (address & ~PAGE_MASK));
+ flush_page_to_ram(pte_page(pte));
}
/*
if (!vma->vm_ops || !vma->vm_ops->swapin) {
swap_in(tsk, vma, page_table, pte_val(entry), write_access);
+ flush_page_to_ram(pte_page(*page_table));
return;
}
page = vma->vm_ops->swapin(vma, address - vma->vm_start + vma->vm_offset, pte_val(entry));
page = pte_wrprotect(page);
++vma->vm_mm->rss;
++tsk->maj_flt;
+ flush_page_to_ram(pte_page(page));
set_pte(page_table, page);
return;
}
* so we can make it writable and dirty to avoid having to
* handle that later.
*/
+ flush_page_to_ram(page);
entry = mk_pte(page, vma->vm_page_prot);
if (write_access) {
entry = pte_mkwrite(pte_mkdirty(entry));
return;
}
set_pte(pte, pte_mkyoung(*pte));
+ flush_tlb_page(vma, address);
if (!write_access)
return;
if (pte_write(*pte)) {
set_pte(pte, pte_mkdirty(*pte));
+ flush_tlb_page(vma, address);
return;
}
do_wp_page(current, vma, address, write_access);
* Mike Kilburn : htons() missing in ip_build_xmit.
* Bradford Johnson: Fix faulty handling of some frames when
* no route is found.
+ * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
+ * (in case if packet not accepted by
+ * output firewall rules)
*/
#include <asm/segment.h>
iph = skb->ip_hdr;
iph->tot_len = htons(skb->len-(((unsigned char *)iph)-skb->data));
-#ifdef CONFIG_FIREWALL
- if(call_out_firewall(PF_INET, skb->dev, iph) < FW_ACCEPT)
- /* just don't send this packet */
- return;
-#endif
-
/*
* No reassigning numbers to fragments...
*/
skb->free = free;
+#ifdef CONFIG_FIREWALL
+ if(call_out_firewall(PF_INET, skb->dev, iph) < FW_ACCEPT) {
+ /* just don't send this packet */
+ /* and free socket buffers ;) <aldem@barnet.kharkov.ua> */
+ if (free)
+ skb->sk = sk; /* I am not sure *this* really need, */
+ kfree_skb(skb, FREE_WRITE); /* but *this* must be here */
+ return;
+ }
+#endif
+
/*
* Do we need to fragment. Again this is inefficient.
* We need to somehow lock the original buffer and use