Various ATA, Work(s) In Progress (EXPERIMENTAL)
CONFIG_IDEDMA_PCI_WIP
If you enable this you will be capable of using and testing
- highly developmentail projects.
+ highly developmental projects.
It is SAFEST to say N to this question.
-# $Id: Makefile,v 1.1 1999/09/18 16:55:51 gniibe Exp gniibe $
+# $Id: Makefile,v 1.2 1999/12/23 12:13:53 gniibe Exp gniibe $
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
#
# Select the object file format to substitute into the linker script.
#
-tool-prefix = sh-elf
+tool-prefix = sh-linux-gnu-
ifdef CONFIG_LITTLE_ENDIAN
CFLAGS += -ml
AFLAGS += -ml
# LINKFLAGS += -EL
LDFLAGS := -EL
-
-LD =$(CROSS_COMPILE)ld $(LDFLAGS)
-
endif
-ifdef CONFIG_CROSSCOMPILE
+# ifdef CONFIG_CROSSCOMPILE
CROSS_COMPILE = $(tool-prefix)
-endif
+# endif
+
+LD =$(CROSS_COMPILE)ld $(LDFLAGS)
+OBJCOPY=$(CROSS_COMPILE)objcopy -O binary -R .note -R .comment -S
MODFLAGS +=
# none has been choosen above.
#
LINKSCRIPT = arch/sh/vmlinux.lds
-LINKFLAGS += -T $(word 1,$(LINKSCRIPT)) -e __stext
+LINKFLAGS += -T $(word 1,$(LINKSCRIPT)) -e _stext
ifdef LOADADDR
LINKFLAGS += -Ttext $(word 1,$(LOADADDR))
#
-# arch/mips/boot/Makefile
+# arch/sh/boot/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
#
mainmenu_name "Linux/SuperH Kernel Configuration"
+define_bool CONFIG_SUPERH y
+
+define_bool CONFIG_UID16 y
+
mainmenu_option next_comment
comment 'Code maturity level options'
bool 'Prompt for development and/or incomplete code/drivers' CONFIG_EXPERIMENTAL
fi
bool 'Little Endian' CONFIG_LITTLE_ENDIAN
hex 'Physical memory start address' CONFIG_MEMORY_START 08000000
-bool 'Use SH CPU internal real time clock' CONFIG_SH_CPU_RTC
endmenu
mainmenu_option next_comment
mainmenu_option next_comment
comment 'General setup'
+
bool 'Networking support' CONFIG_NET
+
+bool 'Directy Connected Compact Flash support' CONFIG_CF_ENABLER
+
+bool 'PCI support' CONFIG_PCI
+if [ "$CONFIG_PCI" = "y" ]; then
+ choice ' PCI access mode' \
+ "BIOS CONFIG_PCI_GOBIOS \
+ Direct CONFIG_PCI_GODIRECT \
+ Any CONFIG_PCI_GOANY" Any
+ if [ "$CONFIG_PCI_GOBIOS" = "y" -o "$CONFIG_PCI_GOANY" = "y" ]; then
+ define_bool CONFIG_PCI_BIOS y
+ fi
+ if [ "$CONFIG_PCI_GODIRECT" = "y" -o "$CONFIG_PCI_GOANY" = "y" ]; then
+ define_bool CONFIG_PCI_DIRECT y
+ fi
+fi
+
+source drivers/pci/Config.in
+
+bool 'Support for hot-pluggable devices' CONFIG_HOTPLUG
+
+if [ "$CONFIG_HOTPLUG" = "y" ] ; then
+ source drivers/pcmcia/Config.in
+fi
+
bool 'System V IPC' CONFIG_SYSVIPC
bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
bool 'Sysctl support' CONFIG_SYSCTL
-
if [ "$CONFIG_PROC_FS" = "y" ]; then
choice 'Kernel core (/proc/kcore) format' \
"ELF CONFIG_KCORE_ELF \
A.OUT CONFIG_KCORE_AOUT" ELF
fi
-
tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
-endmenu
-mainmenu_option next_comment
-comment 'Character devices'
-define_bool CONFIG_SERIAL n
-define_bool CONFIG_SERIAL_CONSOLE y
-bool 'SuperH SCI support' CONFIG_SH_SCI_SERIAL
-bool 'SuperH SCIF support' CONFIG_SH_SCIF_SERIAL
+source drivers/parport/Config.in
+
endmenu
-mainmenu_option next_comment
-comment 'Floppy, IDE, and other block devices'
+source drivers/block/Config.in
-tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
-if [ "$CONFIG_BLK_DEV_RAM" = "y" ]; then
- bool ' Initial RAM disk (initrd) support' CONFIG_BLK_DEV_INITRD
+if [ "$CONFIG_NET" = "y" ]; then
+ source net/Config.in
fi
-tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
-tristate 'Network block device support' CONFIG_BLK_DEV_NBD
+mainmenu_option next_comment
+comment 'SCSI support'
+
+tristate 'SCSI support' CONFIG_SCSI
+
+if [ "$CONFIG_SCSI" != "n" ]; then
+ source drivers/scsi/Config.in
+fi
endmenu
+source drivers/ieee1394/Config.in
+
if [ "$CONFIG_NET" = "y" ]; then
- source net/Config.in
mainmenu_option next_comment
- comment 'Network device drivers'
+ comment 'Network device support'
+
+ bool 'Network device support' CONFIG_NETDEVICES
+ if [ "$CONFIG_NETDEVICES" = "y" ]; then
source drivers/net/Config.in
+ if [ "$CONFIG_ATM" = "y" ]; then
+ source drivers/atm/Config.in
+ fi
+ fi
endmenu
fi
mainmenu_option next_comment
+comment 'Character devices'
+
+bool 'Virtual terminal' CONFIG_VT
+if [ "$CONFIG_VT" = "y" ]; then
+ bool ' Support for console on virtual terminal' CONFIG_VT_CONSOLE
+fi
+
+tristate 'Serial support' CONFIG_SERIAL
+if [ "$CONFIG_SERIAL" = "y" -o "$CONFIG_SERIAL" = "m" ]; then
+ choice 'Serial interface type' \
+ "SCI CONFIG_SH_SCI_SERIAL \
+ SCIF CONFIG_SH_SCIF_SERIAL"
+fi
+if [ "$CONFIG_SERIAL" = "y" ]; then
+ bool ' Support for console on serial port' CONFIG_SERIAL_CONSOLE
+fi
comment 'Unix 98 PTY support'
bool 'Unix98 PTY support' CONFIG_UNIX98_PTYS
if [ "$CONFIG_UNIX98_PTYS" = "y" ]; then
int 'Maximum number of Unix98 PTYs in use (0-2048)' CONFIG_UNIX98_PTY_COUNT 256
fi
+if [ "$CONFIG_PARPORT" != "n" ]; then
+ dep_tristate 'Parallel printer support' CONFIG_PRINTER $CONFIG_PARPORT
+ if [ "$CONFIG_PRINTER" != "n" ]; then
+ bool ' Support for console on line printer' CONFIG_LP_CONSOLE
+ fi
+ dep_tristate 'Support for user-space parallel port device drivers' CONFIG_PPDEV $CONFIG_PARPORT
+fi
endmenu
+if [ "$CONFIG_HOTPLUG" = "y" -a "$CONFIG_PCMCIA" != "n" ]; then
+ source drivers/char/pcmcia/Config.in
+fi
+
+source drivers/misc/Config.in
+
source fs/Config.in
+if [ "$CONFIG_VT" = "y" ]; then
+ mainmenu_option next_comment
+ comment 'Console drivers'
+ bool 'VGA text console' CONFIG_VGA_CONSOLE
+ bool 'Video mode selection support' CONFIG_VIDEO_SELECT
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ tristate 'MDA text console (dual-headed) (EXPERIMENTAL)' CONFIG_MDA_CONSOLE
+ source drivers/video/Config.in
+ fi
+ endmenu
+fi
+
+
mainmenu_option next_comment
-comment 'Watchdog'
+comment 'Sound'
-tristate 'Software watchdog' CONFIG_SOFT_WATCHDOG
+tristate 'Sound card support' CONFIG_SOUND
+if [ "$CONFIG_SOUND" != "n" ]; then
+ source drivers/sound/Config.in
+fi
endmenu
mainmenu_option next_comment
#
# Automatically generated make config: don't edit
#
+CONFIG_SUPERH=y
+CONFIG_UID16=y
#
# Code maturity level options
# CONFIG_CPU_SH4 is not set
CONFIG_LITTLE_ENDIAN=y
CONFIG_MEMORY_START=0c000000
-CONFIG_SH_CPU_RTC=y
#
# Loadable module support
# General setup
#
# CONFIG_NET is not set
+CONFIG_CF_ENABLER=y
+# CONFIG_PCI is not set
+# CONFIG_HOTPLUG is not set
# CONFIG_SYSVIPC is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_SYSCTL is not set
# CONFIG_KCORE_AOUT is not set
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
+# CONFIG_PARPORT is not set
#
-# Character devices
+# Block devices
#
-# CONFIG_SERIAL is not set
-CONFIG_SERIAL_CONSOLE=y
-CONFIG_SH_SCI_SERIAL=y
-# CONFIG_SH_SCIF_SERIAL is not set
+# CONFIG_BLK_DEV_FD is not set
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_HD_IDE is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_BLK_DEV_IDECD is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_BLK_DEV_IDESCSI is not set
#
-# Floppy, IDE, and other block devices
+# IDE chipset support/bugfixes
#
+# CONFIG_BLK_DEV_CMD640 is not set
+# CONFIG_IDE_CHIPSETS is not set
+
+#
+# Additional Block Devices
+#
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_MD is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_BLK_DEV_LOOP is not set
-# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_XD is not set
+# CONFIG_PARIDE is not set
+# CONFIG_BLK_DEV_IDE_MODES is not set
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI support
+#
+# CONFIG_SCSI is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+CONFIG_SERIAL=y
+CONFIG_SH_SCI_SERIAL=y
+# CONFIG_SH_SCIF_SERIAL is not set
+CONFIG_SERIAL_CONSOLE=y
#
# Unix 98 PTY support
#
# CONFIG_UNIX98_PTYS is not set
+#
+# Misc devices
+#
+
#
# Filesystems
#
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
# CONFIG_FAT_FS is not set
+# CONFIG_CRAMFS is not set
# CONFIG_ISO9660_FS is not set
# CONFIG_JOLIET is not set
-# CONFIG_UDF_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_NTFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_ROMFS_FS is not set
CONFIG_EXT2_FS=y
# CONFIG_SYSV_FS is not set
+# CONFIG_UDF_FS is not set
# CONFIG_UFS_FS is not set
#
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-# CONFIG_SGI_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
# CONFIG_NLS is not set
#
-# Watchdog
+# Sound
#
-# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_SOUND is not set
#
# Kernel hacking
O_TARGET := kernel.o
O_OBJS := process.o signal.o entry.o traps.o irq.o irq_onchip.o \
- ptrace.o setup.o time.o sys_sh.o semaphore.o
+ ptrace.o setup.o time.o sys_sh.o semaphore.o pci-sh.o \
+ irq_imask.o
OX_OBJS := sh_ksyms.o
MX_OBJS :=
+ifdef CONFIG_CF_ENABLER
+O_OBJS += cf-enabler.o
+endif
+
+ifdef CONFIG_CPU_SH4
+O_OBJS += fpu.o
+endif
+
all: kernel.o head.o init_task.o
entry.o: entry.S
--- /dev/null
+/* $Id: cf-enabler.c,v 1.2 1999/12/20 10:14:40 gniibe Exp $
+ *
+ * linux/drivers/block/cf-enabler.c
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * Enable the CF configuration.
+ */
+
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#define CF_CIS_BASE 0xb8000000
+/*
+ * 0xB8000000 : Attribute
+ * 0xB8001000 : Common Memory
+ * 0xBA000000 : I/O
+ */
+
+int __init cf_init(void)
+{
+ outw(0x0042, CF_CIS_BASE+0x0200);
+ make_imask_irq(14);
+ disable_irq(14);
+ return 0;
+}
+
+__initcall (cf_init);
-/* $Id: entry.S,v 1.19 1999/10/31 13:19:35 gniibe Exp gniibe $
+/* $Id: entry.S,v 1.55 2000/03/05 01:48:58 gniibe Exp $
*
* linux/arch/sh/entry.S
*
- * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 1999, 2000 Niibe Yutaka
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
addr_limit = 12
need_resched = 20
-PF_TRACESYS = 0x20
+PF_TRACESYS = 0x00000020
+PF_USEDFPU = 0x00100000
ENOSYS = 38
1: .long SYMBOL_NAME(do_exception_error)
2: .long 0xefffffff ! BL=0
-reschedule:
- mova SYMBOL_NAME(ret_from_syscall),r0
- mov.l 1f,r1
- jmp @r1
- lds r0,pr
- .balign 4
-1: .long SYMBOL_NAME(schedule)
-
badsys: mov #-ENOSYS,r0
rts ! go to ret_from_syscall..
mov.l r0,@(R0,r15)
-signal_return:
- ! We can reach here from an interrupt handler,
- ! so, we need to unblock interrupt.
- /* STI */
- mov.l 1f,r1
- stc sr,r0
- and r1,r0
- ldc r0,sr
- !
- mov r15,r4
- mov #0,r5
- mov.l 2f,r1
- mova restore_all,r0
- jmp @r1
- lds r0,pr
- .balign 4
-1: .long 0xefffffff ! BL=0
-2: .long SYMBOL_NAME(do_signal)
-
!
!
!
ldc r2,sr
!
mov.l __n_sys,r1
- cmp/ge r1,r0
+ cmp/hs r1,r0
bt/s badsys
mov r0,r2
!
3: .long SYMBOL_NAME(syscall_trace)
2: .long 0xefffffff ! BL=0
1: .long TRA
+__n_sys: .long NR_syscalls
+__sct: .long SYMBOL_NAME(sys_call_table)
+__tsk_flags: .long flags-8192 ! offset from stackbase to tsk->flags
led: .long 0xa8000000 ! For my board -- gN
.section .fixup,"ax"
.long 8b,fixup_syscall_argerr
.previous
+reschedule:
+ mova SYMBOL_NAME(ret_from_syscall),r0
+ mov.l 1f,r1
+ jmp @r1
+ lds r0,pr
+ .balign 4
+1: .long SYMBOL_NAME(schedule)
ENTRY(ret_from_irq)
- mov.l @(SR,r15),r0 ! get original stack
+ mov.l @(SR,r15),r0 ! get status register
shll r0
shll r0 ! kernel space?
bt restore_all ! Yes, it's from kernel, go back soon
- ! XXX: Is it better to run through bottom half?
- ! In such a case, we should go "ret_from_syscall" instead
+ ! STI
+ mov.l 1f, $r1
+ stc $sr, $r2
+ and $r1, $r2
+ ldc $r2, $sr
+ !
bra ret_with_reschedule
nop
+ENTRY(ret_from_exception)
+ mov.l @(SR,r15),r0 ! get status register
+ shll r0
+ shll r0 ! kernel space?
+ bt restore_all ! Yes, it's from kernel, go back soon
+ ! STI
+ mov.l 1f, $r1
+ stc $sr, $r2
+ and $r1, $r2
+ ldc $r2, $sr
+ !
+ bra ret_from_syscall
+ nop
+ .balign 4
+1: .long 0xefffffff ! BL=0
+
+ .balign 4
ret: add r8,r15 ! pop off the arguments
mov.l r0,@(R0,r15) ! save the return value
/* fall through */
ENTRY(ret_from_syscall)
- mov.l __bh_mask,r0
+ mov.l __softirq_state,r0
mov.l @r0,r1
- mov.l __bh_active,r0
- mov.l @r0,r2
+ mov.l @(4,r0),r2
tst r2,r1
bt ret_with_reschedule
-handle_bottom_half:
- mov.l __dbh,r0
+handle_softirq:
+ mov.l __do_softirq,r0
jsr @r0
nop
ret_with_reschedule:
bf reschedule
mov.l @(sigpending,r1),r0
tst #0xff,r0
- bf signal_return
- !
+ bt restore_all
+signal_return:
+ mov r15,r4
+ mov #0,r5
+ mov.l __do_signal,r1
+ mova restore_all,r0
+ jmp @r1
+ lds r0,pr
+ .balign 4
+__do_signal:
+ .long SYMBOL_NAME(do_signal)
+__softirq_state:
+ .long SYMBOL_NAME(softirq_state)
+__do_softirq:
+ .long SYMBOL_NAME(do_softirq)
+__minus8192:
+ .long -8192 ! offset from stackbase to tsk
+
+ .balign 4
restore_all:
- add #4,r15 ! skip syscall number
- mov.l @r15+,r11 ! SSR
+#if defined(__SH4__)
+ mov.l __fpu_prepare_fd, $r1
+ jsr @$r1
+ stc $sr, $r4
+#endif
+ add #4,r15 ! Skip syscall number
+ mov.l @r15+,r11 ! Got SSR into R11
+#if defined(__SH4__)
+ mov $r11, $r12
+#endif
+ !
+ mov.l 1f,r1
+ stc sr,r0
+ and r1,r0 ! Get IMASK+FD
+ mov.l 2f,r1
+ and r1,r11
+ or r0,r11 ! Inherit the IMASK+FD value of SR
+ !
mov.l @r15+,r10 ! original stack
mov.l @r15+,r0
mov.l @r15+,r1
ldc r14,sr ! here, change the register bank
mov r10,k0
mov r11,k1
+#if defined(__SH4__)
+ mov $r12, $k2
+#endif
mov.l @r15+,r8
mov.l @r15+,r9
mov.l @r15+,r10
lds.l @r15+,macl
lds.l @r15+,pr
ldc.l @r15+,spc
- mov k0,r15
ldc k1,ssr
+#if defined(__SH4__)
+ shll $k1
+ shll $k1
+ bf 9f ! user mode
+ /* Kernel to kernel transition */
+ mov.l 3f, $k1
+ tst $k1, $k2
+ bf 9f ! it hadn't FPU
+ ! Kernel to kernel and FPU was used
+ ! There's the case we don't get FPU now
+ stc $sr, $k2
+ tst $k1, $k2
+ bt 7f
+ ! We need to grab FPU here
+ xor $k1, $k2
+ ldc $k2, $sr ! Grab FPU
+ mov.l __init_task_flags, $k1
+ mov.l @$k1, $k2
+ mov.l __PF_USEDFPU, $k1
+ or $k1, $k2
+ mov.l __init_task_flags, $k1
+ mov.l $k2, @$k1 ! Set init_task.flags |= PF_USEDFPU
+ !
+ ! Restoring FPU...
+ !
+7: fmov.s @$r15+, $fr0
+ fmov.s @$r15+, $fr1
+ fmov.s @$r15+, $fr2
+ fmov.s @$r15+, $fr3
+ fmov.s @$r15+, $fr4
+ fmov.s @$r15+, $fr5
+ fmov.s @$r15+, $fr6
+ fmov.s @$r15+, $fr7
+ fmov.s @$r15+, $fr8
+ fmov.s @$r15+, $fr9
+ fmov.s @$r15+, $fr10
+ fmov.s @$r15+, $fr11
+ fmov.s @$r15+, $fr12
+ fmov.s @$r15+, $fr13
+ fmov.s @$r15+, $fr14
+ fmov.s @$r15+, $fr15
+ lds.l @$r15+, $fpscr
+ lds.l @$r15+, $fpul
+9:
+#endif
+ mov k0,r15
rte
nop
.balign 4
-__n_sys: .long NR_syscalls
-__sct: .long SYMBOL_NAME(sys_call_table)
-__bh_mask: .long SYMBOL_NAME(bh_mask)
-__bh_active: .long SYMBOL_NAME(bh_active)
-__dbh: .long SYMBOL_NAME(do_bottom_half)
__blrb_flags: .long 0x30000000
-__minus8192: .long -8192 ! offset from stackbase to tsk
-__tsk_flags: .long flags-8192 ! offset from stackbase to tsk->flags
-
+#if defined(__SH4__)
+__fpu_prepare_fd:
+ .long SYMBOL_NAME(fpu_prepare_fd)
+__init_task_flags:
+ .long SYMBOL_NAME(init_task_union)+4
+__PF_USEDFPU:
+ .long PF_USEDFPU
+#endif
+1: .long 0x000080f0 ! IMASK+FD
+2: .long 0xffff7f0f ! ~(IMASK+FD)
+3: .long 0x00008000 ! FD=1
! Exception Vector Base
!
bra handle_exception
mov.l @k2,k2
.balign 4
-2: .long SYMBOL_NAME(ret_from_syscall)
+2: .long SYMBOL_NAME(ret_from_exception)
1: .long EXPEVT
!
!
.balign 1024,0,1024
tlb_miss:
mov.l 1f,k2
- mov.l 3f,k3
+ mov.l 4f,k3
bra handle_exception
mov.l @k2,k2
!
.balign 512,0,512
interrupt:
mov.l 2f,k2
- mov.l 4f,k3
+ mov.l 3f,k3
bra handle_exception
mov.l @k2,k2
.balign 4
1: .long EXPEVT
2: .long INTEVT
-3: .long SYMBOL_NAME(ret_from_syscall)
-4: .long SYMBOL_NAME(ret_from_irq)
+3: .long SYMBOL_NAME(ret_from_irq)
+4: .long SYMBOL_NAME(ret_from_exception)
!
!
handle_exception:
- ! Using k0, k1 for scratch registers (r0_bank1, and r1_bank1),
+ ! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
! save all registers onto stack.
!
stc ssr,k0 ! from kernel space?
shll k0 ! Check MD bit (bit30)
shll k0
- bt/s 1f ! it's from kernel to kernel transition
+#if defined(__SH4__)
+ bf/s 8f ! it's from user to kernel transition
+ mov $r15, $k0 ! save original stack to k0
+ /* Kernel to kernel transition */
+ mov.l 2f, $k1
+ stc $ssr, $k0
+ tst $k1, $k0
+ bf/s 9f ! FPU is not used
+ mov $r15, $k0 ! save original stack to k0
+ ! FPU is used, save FPU
+ ! /* XXX: Need to save another bank of FPU if all FPU feature is used */
+ ! /* Currently it's not the case for GCC (only udivsi3_i4, divsi3_i4) */
+ sts.l $fpul, @-$r15
+ sts.l $fpscr, @-$r15
+ fmov.s $fr15, @-$r15
+ fmov.s $fr14, @-$r15
+ fmov.s $fr13, @-$r15
+ fmov.s $fr12, @-$r15
+ fmov.s $fr11, @-$r15
+ fmov.s $fr10, @-$r15
+ fmov.s $fr9, @-$r15
+ fmov.s $fr8, @-$r15
+ fmov.s $fr7, @-$r15
+ fmov.s $fr6, @-$r15
+ fmov.s $fr5, @-$r15
+ fmov.s $fr4, @-$r15
+ fmov.s $fr3, @-$r15
+ fmov.s $fr2, @-$r15
+ fmov.s $fr1, @-$r15
+ fmov.s $fr0, @-$r15
+ bra 9f
+ mov #0, $k1
+#else
+ bt/s 9f ! it's from kernel to kernel transition
mov r15,k0 ! save original stack to k0 anyway
- mov kernel_sp,r15 ! change to kernel stack
-1: stc.l spc,@-r15
+#endif
+8: /* User space to kernel */
+ mov kernel_sp, $r15 ! change to kernel stack
+#if defined(__SH4__)
+ mov.l 2f, $k1 ! let kernel release FPU
+#endif
+9: stc.l spc,@-r15
sts.l pr,@-r15
!
lds k3,pr ! Set the return address to pr
stc.l gbr,@-r15
mov.l r14,@-r15
!
- mov.l 2f,k1
- stc sr,r14 ! back to normal register bank, and
- and k1,r14 ! ..
+ stc sr,r14 ! Back to normal register bank, and
+#if defined(__SH4__)
+ or $k1, $r14 ! may release FPU
+#endif
+ mov.l 3f,k1
+ and k1,r14 ! ...
ldc r14,sr ! ...changed here.
!
mov.l r13,@-r15
mov.l @r15,r0 ! recovering r0..
.balign 4
1: .long SYMBOL_NAME(exception_handling_table)
-2: .long 0xdfffffff ! RB=0, BL=1
+2: .long 0x00008000 ! FD=1
+3: .long 0xdfffffff ! RB=0, leave BL=1
none:
rts
.long tlb_protection_violation_store
.long error ! address_error_load (filled by trap_init)
.long error ! address_error_store (filled by trap_init)
+#if defined(__SH4__)
+ .long SYMBOL_NAME(do_fpu_error)
+#else
.long error ! fpu_exception
+#endif
.long error
.long system_call ! Unconditional Trap
.long error ! reserved_instruction (filled by trap_init)
.long error
.long error
.long error
- .long error ! fpu
- .long error ! fpu
+ .long SYMBOL_NAME(do_fpu_state_restore)
+ .long SYMBOL_NAME(do_fpu_state_restore)
#endif
ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_time)
.long SYMBOL_NAME(sys_mknod)
.long SYMBOL_NAME(sys_chmod) /* 15 */
- .long SYMBOL_NAME(sys_lchown)
+ .long SYMBOL_NAME(sys_lchown16)
.long SYMBOL_NAME(sys_ni_syscall) /* old break syscall holder */
.long SYMBOL_NAME(sys_stat)
.long SYMBOL_NAME(sys_lseek)
.long SYMBOL_NAME(sys_getpid) /* 20 */
.long SYMBOL_NAME(sys_mount)
.long SYMBOL_NAME(sys_oldumount)
- .long SYMBOL_NAME(sys_setuid)
- .long SYMBOL_NAME(sys_getuid)
+ .long SYMBOL_NAME(sys_setuid16)
+ .long SYMBOL_NAME(sys_getuid16)
.long SYMBOL_NAME(sys_stime) /* 25 */
.long SYMBOL_NAME(sys_ptrace)
.long SYMBOL_NAME(sys_alarm)
.long SYMBOL_NAME(sys_times)
.long SYMBOL_NAME(sys_ni_syscall) /* old prof syscall holder */
.long SYMBOL_NAME(sys_brk) /* 45 */
- .long SYMBOL_NAME(sys_setgid)
- .long SYMBOL_NAME(sys_getgid)
+ .long SYMBOL_NAME(sys_setgid16)
+ .long SYMBOL_NAME(sys_getgid16)
.long SYMBOL_NAME(sys_signal)
- .long SYMBOL_NAME(sys_geteuid)
- .long SYMBOL_NAME(sys_getegid) /* 50 */
+ .long SYMBOL_NAME(sys_geteuid16)
+ .long SYMBOL_NAME(sys_getegid16) /* 50 */
.long SYMBOL_NAME(sys_acct)
- .long SYMBOL_NAME(sys_umount) /* recycled never used phys() */
+ .long SYMBOL_NAME(sys_umount) /* recycled never used phys() */
.long SYMBOL_NAME(sys_ni_syscall) /* old lock syscall holder */
.long SYMBOL_NAME(sys_ioctl)
.long SYMBOL_NAME(sys_fcntl) /* 55 */
.long SYMBOL_NAME(sys_sigaction)
.long SYMBOL_NAME(sys_sgetmask)
.long SYMBOL_NAME(sys_ssetmask)
- .long SYMBOL_NAME(sys_setreuid) /* 70 */
- .long SYMBOL_NAME(sys_setregid)
+ .long SYMBOL_NAME(sys_setreuid16) /* 70 */
+ .long SYMBOL_NAME(sys_setregid16)
.long SYMBOL_NAME(sys_sigsuspend)
.long SYMBOL_NAME(sys_sigpending)
.long SYMBOL_NAME(sys_sethostname)
.long SYMBOL_NAME(sys_setrlimit) /* 75 */
- .long SYMBOL_NAME(sys_getrlimit)
+ .long SYMBOL_NAME(sys_old_getrlimit)
.long SYMBOL_NAME(sys_getrusage)
.long SYMBOL_NAME(sys_gettimeofday)
.long SYMBOL_NAME(sys_settimeofday)
- .long SYMBOL_NAME(sys_getgroups) /* 80 */
- .long SYMBOL_NAME(sys_setgroups)
- .long SYMBOL_NAME(sys_ni_syscall) /* old_select */
+ .long SYMBOL_NAME(sys_getgroups16) /* 80 */
+ .long SYMBOL_NAME(sys_setgroups16)
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_oldselect */
.long SYMBOL_NAME(sys_symlink)
.long SYMBOL_NAME(sys_lstat)
.long SYMBOL_NAME(sys_readlink) /* 85 */
.long SYMBOL_NAME(sys_swapon)
.long SYMBOL_NAME(sys_reboot)
.long SYMBOL_NAME(old_readdir)
- .long SYMBOL_NAME(sys_mmap) /* 90 */
+ .long SYMBOL_NAME(old_mmap) /* 90 */
.long SYMBOL_NAME(sys_munmap)
.long SYMBOL_NAME(sys_truncate)
.long SYMBOL_NAME(sys_ftruncate)
.long SYMBOL_NAME(sys_fchmod)
- .long SYMBOL_NAME(sys_fchown) /* 95 */
+ .long SYMBOL_NAME(sys_fchown16) /* 95 */
.long SYMBOL_NAME(sys_getpriority)
.long SYMBOL_NAME(sys_setpriority)
.long SYMBOL_NAME(sys_ni_syscall) /* old profil syscall holder */
.long SYMBOL_NAME(sys_statfs)
.long SYMBOL_NAME(sys_fstatfs) /* 100 */
- .long SYMBOL_NAME(sys_ni_syscall) /* ioperm */
+ .long SYMBOL_NAME(sys_ni_syscall) /* ioperm */
.long SYMBOL_NAME(sys_socketcall)
.long SYMBOL_NAME(sys_syslog)
.long SYMBOL_NAME(sys_setitimer)
.long SYMBOL_NAME(sys_sysfs) /* 135 */
.long SYMBOL_NAME(sys_personality)
.long SYMBOL_NAME(sys_ni_syscall) /* for afs_syscall */
- .long SYMBOL_NAME(sys_setfsuid)
- .long SYMBOL_NAME(sys_setfsgid)
+ .long SYMBOL_NAME(sys_setfsuid16)
+ .long SYMBOL_NAME(sys_setfsgid16)
.long SYMBOL_NAME(sys_llseek) /* 140 */
.long SYMBOL_NAME(sys_getdents)
.long SYMBOL_NAME(sys_select)
.long SYMBOL_NAME(sys_sched_rr_get_interval)
.long SYMBOL_NAME(sys_nanosleep)
.long SYMBOL_NAME(sys_mremap)
- .long SYMBOL_NAME(sys_setresuid)
- .long SYMBOL_NAME(sys_getresuid) /* 165 */
- .long SYMBOL_NAME(sys_ni_syscall) /* vm86 */
+ .long SYMBOL_NAME(sys_setresuid16)
+ .long SYMBOL_NAME(sys_getresuid16) /* 165 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* vm86 */
.long SYMBOL_NAME(sys_query_module)
.long SYMBOL_NAME(sys_poll)
.long SYMBOL_NAME(sys_nfsservctl)
- .long SYMBOL_NAME(sys_setresgid) /* 170 */
- .long SYMBOL_NAME(sys_getresgid)
+ .long SYMBOL_NAME(sys_setresgid16) /* 170 */
+ .long SYMBOL_NAME(sys_getresgid16)
.long SYMBOL_NAME(sys_prctl)
.long SYMBOL_NAME(sys_rt_sigreturn)
.long SYMBOL_NAME(sys_rt_sigaction)
.long SYMBOL_NAME(sys_rt_sigsuspend)
.long SYMBOL_NAME(sys_pread) /* 180 */
.long SYMBOL_NAME(sys_pwrite)
- .long SYMBOL_NAME(sys_chown)
+ .long SYMBOL_NAME(sys_chown16)
.long SYMBOL_NAME(sys_getcwd)
.long SYMBOL_NAME(sys_capget)
.long SYMBOL_NAME(sys_capset) /* 185 */
.long SYMBOL_NAME(sys_sigaltstack)
.long SYMBOL_NAME(sys_sendfile)
- .long SYMBOL_NAME(sys_ni_syscall) /* streams1 */
- .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* streams1 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
.long SYMBOL_NAME(sys_vfork) /* 190 */
+ .long SYMBOL_NAME(sys_getrlimit)
+ .long SYMBOL_NAME(sys_mmap2)
+ .long SYMBOL_NAME(sys_truncate64)
+ .long SYMBOL_NAME(sys_ftruncate64)
+ .long SYMBOL_NAME(sys_stat64) /* 195 */
+ .long SYMBOL_NAME(sys_lstat64)
+ .long SYMBOL_NAME(sys_fstat64)
+ .long SYMBOL_NAME(sys_lchown)
+ .long SYMBOL_NAME(sys_getuid)
+ .long SYMBOL_NAME(sys_getgid) /* 200 */
+ .long SYMBOL_NAME(sys_geteuid)
+ .long SYMBOL_NAME(sys_getegid)
+ .long SYMBOL_NAME(sys_setreuid)
+ .long SYMBOL_NAME(sys_setregid)
+ .long SYMBOL_NAME(sys_getgroups) /* 205 */
+ .long SYMBOL_NAME(sys_setgroups)
+ .long SYMBOL_NAME(sys_fchown)
+ .long SYMBOL_NAME(sys_setresuid)
+ .long SYMBOL_NAME(sys_getresuid)
+ .long SYMBOL_NAME(sys_setresgid) /* 210 */
+ .long SYMBOL_NAME(sys_getresgid)
+ .long SYMBOL_NAME(sys_chown)
+ .long SYMBOL_NAME(sys_setuid)
+ .long SYMBOL_NAME(sys_setgid)
+ .long SYMBOL_NAME(sys_setfsuid) /* 215 */
+ .long SYMBOL_NAME(sys_setfsgid)
+ .long SYMBOL_NAME(sys_pivot_root)
/*
* NOTE!! This doesn't have to be exact - we just have
* entries. Don't panic if you notice that this hasn't
* been shrunk every time we add a new system call.
*/
- .rept NR_syscalls-190
+ .rept NR_syscalls-217
.long SYMBOL_NAME(sys_ni_syscall)
.endr
--- /dev/null
+/* $Id: fpu.c,v 1.27 2000/03/05 01:48:34 gniibe Exp $
+ *
+ * linux/arch/sh/kernel/fpu.c
+ *
+ * Save/restore floating point context for signal handlers.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ *
+ * FIXME! These routines can be optimized in big endian case.
+ */
+
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+
+void
+save_fpu(struct task_struct *tsk)
+{
+ asm volatile("sts.l $fpul, @-%0\n\t"
+ "sts.l $fpscr, @-%0\n\t"
+ "frchg\n\t"
+ "fmov.s $fr15, @-%0\n\t"
+ "fmov.s $fr14, @-%0\n\t"
+ "fmov.s $fr13, @-%0\n\t"
+ "fmov.s $fr12, @-%0\n\t"
+ "fmov.s $fr11, @-%0\n\t"
+ "fmov.s $fr10, @-%0\n\t"
+ "fmov.s $fr9, @-%0\n\t"
+ "fmov.s $fr8, @-%0\n\t"
+ "fmov.s $fr7, @-%0\n\t"
+ "fmov.s $fr6, @-%0\n\t"
+ "fmov.s $fr5, @-%0\n\t"
+ "fmov.s $fr4, @-%0\n\t"
+ "fmov.s $fr3, @-%0\n\t"
+ "fmov.s $fr2, @-%0\n\t"
+ "fmov.s $fr1, @-%0\n\t"
+ "fmov.s $fr0, @-%0\n\t"
+ "frchg\n\t"
+ "fmov.s $fr15, @-%0\n\t"
+ "fmov.s $fr14, @-%0\n\t"
+ "fmov.s $fr13, @-%0\n\t"
+ "fmov.s $fr12, @-%0\n\t"
+ "fmov.s $fr11, @-%0\n\t"
+ "fmov.s $fr10, @-%0\n\t"
+ "fmov.s $fr9, @-%0\n\t"
+ "fmov.s $fr8, @-%0\n\t"
+ "fmov.s $fr7, @-%0\n\t"
+ "fmov.s $fr6, @-%0\n\t"
+ "fmov.s $fr5, @-%0\n\t"
+ "fmov.s $fr4, @-%0\n\t"
+ "fmov.s $fr3, @-%0\n\t"
+ "fmov.s $fr2, @-%0\n\t"
+ "fmov.s $fr1, @-%0\n\t"
+ "fmov.s $fr0, @-%0"
+ : /* no output */
+ : "r" ((char *)(&tsk->thread.fpu.hard.status))
+ : "memory");
+
+ tsk->flags &= ~PF_USEDFPU;
+ release_fpu();
+}
+
+static void
+restore_fpu(struct task_struct *tsk)
+{
+ asm volatile("fmov.s @%0+, $fr0\n\t"
+ "fmov.s @%0+, $fr1\n\t"
+ "fmov.s @%0+, $fr2\n\t"
+ "fmov.s @%0+, $fr3\n\t"
+ "fmov.s @%0+, $fr4\n\t"
+ "fmov.s @%0+, $fr5\n\t"
+ "fmov.s @%0+, $fr6\n\t"
+ "fmov.s @%0+, $fr7\n\t"
+ "fmov.s @%0+, $fr8\n\t"
+ "fmov.s @%0+, $fr9\n\t"
+ "fmov.s @%0+, $fr10\n\t"
+ "fmov.s @%0+, $fr11\n\t"
+ "fmov.s @%0+, $fr12\n\t"
+ "fmov.s @%0+, $fr13\n\t"
+ "fmov.s @%0+, $fr14\n\t"
+ "fmov.s @%0+, $fr15\n\t"
+ "frchg\n\t"
+ "fmov.s @%0+, $fr0\n\t"
+ "fmov.s @%0+, $fr1\n\t"
+ "fmov.s @%0+, $fr2\n\t"
+ "fmov.s @%0+, $fr3\n\t"
+ "fmov.s @%0+, $fr4\n\t"
+ "fmov.s @%0+, $fr5\n\t"
+ "fmov.s @%0+, $fr6\n\t"
+ "fmov.s @%0+, $fr7\n\t"
+ "fmov.s @%0+, $fr8\n\t"
+ "fmov.s @%0+, $fr9\n\t"
+ "fmov.s @%0+, $fr10\n\t"
+ "fmov.s @%0+, $fr11\n\t"
+ "fmov.s @%0+, $fr12\n\t"
+ "fmov.s @%0+, $fr13\n\t"
+ "fmov.s @%0+, $fr14\n\t"
+ "fmov.s @%0+, $fr15\n\t"
+ "frchg\n\t"
+ "lds.l @%0+, $fpscr\n\t"
+ "lds.l @%0+, $fpul\n\t"
+ : /* no output */
+ : "r" (&tsk->thread.fpu)
+ : "memory");
+}
+
+/*
+ * Load the FPU with signalling NANS. This bit pattern we're using
+ * has the property that no matter wether considered as single or as
+ * double precission represents signaling NANS.
+ */
+/* Double presision, NANS as NANS, rounding to nearest, no exceptions */
+#define FPU_DEFAULT 0x00080000
+
+void fpu_init(void)
+{
+ asm volatile("lds %0, $fpul\n\t"
+ "lds %1, $fpscr\n\t"
+ "fsts $fpul, $fr0\n\t"
+ "fsts $fpul, $fr1\n\t"
+ "fsts $fpul, $fr2\n\t"
+ "fsts $fpul, $fr3\n\t"
+ "fsts $fpul, $fr4\n\t"
+ "fsts $fpul, $fr5\n\t"
+ "fsts $fpul, $fr6\n\t"
+ "fsts $fpul, $fr7\n\t"
+ "fsts $fpul, $fr8\n\t"
+ "fsts $fpul, $fr9\n\t"
+ "fsts $fpul, $fr10\n\t"
+ "fsts $fpul, $fr11\n\t"
+ "fsts $fpul, $fr12\n\t"
+ "fsts $fpul, $fr13\n\t"
+ "fsts $fpul, $fr14\n\t"
+ "fsts $fpul, $fr15\n\t"
+ "frchg\n\t"
+ "fsts $fpul, $fr0\n\t"
+ "fsts $fpul, $fr1\n\t"
+ "fsts $fpul, $fr2\n\t"
+ "fsts $fpul, $fr3\n\t"
+ "fsts $fpul, $fr4\n\t"
+ "fsts $fpul, $fr5\n\t"
+ "fsts $fpul, $fr6\n\t"
+ "fsts $fpul, $fr7\n\t"
+ "fsts $fpul, $fr8\n\t"
+ "fsts $fpul, $fr9\n\t"
+ "fsts $fpul, $fr10\n\t"
+ "fsts $fpul, $fr11\n\t"
+ "fsts $fpul, $fr12\n\t"
+ "fsts $fpul, $fr13\n\t"
+ "fsts $fpul, $fr14\n\t"
+ "fsts $fpul, $fr15\n\t"
+ "frchg"
+ : /* no output */
+ : "r" (0), "r" (FPU_DEFAULT));
+}
+
+asmlinkage void
+do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ struct task_struct *tsk = current;
+
+ regs.syscall_nr = -1;
+ regs.pc += 2;
+
+ grab_fpu();
+ save_fpu(tsk);
+ tsk->thread.trap_no = 11;
+ tsk->thread.error_code = 0;
+ force_sig(SIGFPE, tsk);
+}
+
+asmlinkage void
+do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
+ unsigned long r7, struct pt_regs regs)
+{
+ struct task_struct *tsk = current;
+
+ regs.syscall_nr = -1;
+
+ if (!user_mode(®s)) {
+ if (tsk != &init_task) {
+ unlazy_fpu(tsk);
+ }
+ tsk = &init_task;
+ if (tsk->flags & PF_USEDFPU)
+ BUG();
+ }
+
+ grab_fpu();
+ if (tsk->used_math) {
+ /* Using the FPU again. */
+ restore_fpu(tsk);
+ } else {
+ /* First time FPU user. */
+ fpu_init();
+ tsk->used_math = 1;
+ }
+ tsk->flags |= PF_USEDFPU;
+ release_fpu();
+}
+
+/*
+ * Change current FD flag to set FD flag back to exception
+ */
+asmlinkage void
+fpu_prepare_fd(unsigned long sr, unsigned long r5, unsigned long r6,
+ unsigned long r7, struct pt_regs regs)
+{
+ __cli();
+ if (!user_mode(®s)) {
+ if (init_task.flags & PF_USEDFPU)
+ grab_fpu();
+ else {
+ if (!(sr & SR_FD)) {
+ release_fpu();
+ BUG();
+ }
+ }
+ return;
+ }
+
+ if (sr & SR_FD) { /* Kernel doesn't grab FPU */
+ if (current->flags & PF_USEDFPU)
+ grab_fpu();
+ else {
+ if (init_task.flags & PF_USEDFPU) {
+ init_task.flags &= ~PF_USEDFPU;
+ BUG();
+ }
+ }
+ } else {
+ if (init_task.flags & PF_USEDFPU)
+ save_fpu(&init_task);
+ else {
+ release_fpu();
+ BUG();
+ }
+ }
+}
+
+/* Short cut for the FPU exception */
+asmlinkage void
+enable_fpu_in_danger(void)
+{
+ struct task_struct *tsk = current;
+
+ if (tsk != &init_task)
+ unlazy_fpu(tsk);
+
+ tsk = &init_task;
+ if (tsk->used_math) {
+ /* Using the FPU again. */
+ restore_fpu(tsk);
+ } else {
+ /* First time FPU user. */
+ fpu_init();
+ tsk->used_math = 1;
+ }
+ tsk->flags |= PF_USEDFPU;
+}
-/* $Id: head.S,v 1.7 1999/10/27 09:41:42 gniibe Exp gniibe $
+/* $Id: head.S,v 1.16 2000/03/02 00:01:15 gniibe Exp $
*
* arch/sh/kernel/head.S
*
- * Copyright (C) 1999 Niibe Yutaka & Kaz Kojima
+ * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* Cache may or may not be initialized.
* Hardware (including on-chip modules) may or may not be initialized.
*
- * The register R4&R5 holds the address of the parameter block, which has
- * command-line data, etc.
- *
*/
ENTRY(_stext)
-#if defined(__SH4__)
- ! Initialize FPSCR
- /* GCC (as of 2.95.1) assumes FPU with double precision mode. */
- mov.l 7f,r0
- lds r0,fpscr
-#endif
! Initialize Status Register
- mov.l 1f,r0 ! MD=1, RB=0, BL=1
- ldc r0,sr
+ mov.l 1f, $r0 ! MD=1, RB=0, BL=1
+ ldc $r0, $sr
!
- mov.l 2f,r0
- mov r0,r15 ! Set initial r15 (stack pointer)
- ldc r0,r4_bank ! and stack base
+ mov.l 2f, $r0
+ mov $r0, $r15 ! Set initial r15 (stack pointer)
+ ldc $r0, $r4_bank ! and stack base
!
! Enable cache
- mov.l 6f,r0
- jsr @r0
+ mov.l 6f, $r0
+ jsr @$r0
nop
! Clear BSS area
- mov.l 3f,r1
- add #4,r1
- mov.l 4f,r2
- mov #0,r0
-9: cmp/hs r2,r1
+ mov.l 3f, $r1
+ add #4, $r1
+ mov.l 4f, $r2
+ mov #0, $r0
+9: cmp/hs $r2, $r1
bf/s 9b ! while (r1 < r2)
- mov.l r0,@-r2
+ mov.l $r0,@-$r2
! Start kernel
- mov.l 5f,r0
- jmp @r0
+ mov.l 5f, $r0
+ jmp @$r0
nop
.balign 4
-1: .long 0x50000000 ! MD=1, RB=0, BL=1
+1: .long 0x50000000 ! MD=1, RB=0, BL=1, FD=0
2: .long SYMBOL_NAME(stack)
3: .long SYMBOL_NAME(__bss_start)
4: .long SYMBOL_NAME(_end)
5: .long SYMBOL_NAME(start_kernel)
6: .long SYMBOL_NAME(cache_init)
-#if defined(__SH4__)
-7: .long 0x00080000
-#endif
-/* $Id: irq.c,v 1.4 1999/10/11 13:12:14 gniibe Exp $
+/* $Id: irq.c,v 1.11 2000/02/29 11:03:40 gniibe Exp $
*
* linux/arch/sh/kernel/irq.c
*
#include <asm/io.h>
#include <asm/bitops.h>
#include <asm/smp.h>
-#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
#include <asm/delay.h>
#include <asm/irq.h>
#include <linux/irq.h>
/*
* Controller mappings for all interrupt sources:
*/
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }};
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
+ { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }};
/*
* Special irq handlers.
p += sprintf(p, " %14s", irq_desc[i].handler->typename);
p += sprintf(p, " %s", action->name);
- for (action=action->next; action; action = action->next) {
+ for (action=action->next; action; action = action->next)
p += sprintf(p, ", %s", action->name);
- }
*p++ = '\n';
}
return p - buf;
kstat.irqs[cpu][irq]++;
desc = irq_desc + irq;
spin_lock(&irq_controller_lock);
- irq_desc[irq].handler->ack(irq);
+ desc->handler->ack(irq);
/*
REPLAY is when Linux resends an IRQ that was dropped earlier
WAITING is used by probe to mark irqs that are being tested
spin_unlock(&irq_controller_lock);
}
desc->status &= ~IRQ_INPROGRESS;
- if (!(desc->status & IRQ_DISABLED)){
- irq_desc[irq].handler->end(irq);
- }
+ if (!(desc->status & IRQ_DISABLED))
+ desc->handler->end(irq);
spin_unlock(&irq_controller_lock);
- /*
- * This should be conditional: we should really get
- * a return code from the irq handler to tell us
- * whether the handler wants us to do software bottom
- * half handling or not..
- */
- if (1) {
- if (bh_active & bh_mask)
- do_bottom_half();
- }
+#if 1
+ __sti();
+#endif
+ if (softirq_state[cpu].active&softirq_state[cpu].mask)
+ do_softirq();
return 1;
}
kfree(action);
return retval;
}
-
+
void free_irq(unsigned int irq, void *dev_id)
{
struct irqaction **p;
irq_desc[irq].handler->shutdown(irq);
}
spin_unlock_irqrestore(&irq_controller_lock,flags);
-
- /* Wait to make sure it's not being used on another CPU */
- while (irq_desc[irq].status & IRQ_INPROGRESS)
- barrier();
kfree(action);
return;
}
{
unsigned int i;
unsigned long delay;
+ unsigned long val;
/*
* first, enable any unassigned irqs
/*
* Now filter out any obviously spurious interrupts
*/
+ val = 0;
spin_lock_irq(&irq_controller_lock);
for (i=0; i<NR_IRQS; i++) {
unsigned int status = irq_desc[i].status;
irq_desc[i].status = status & ~IRQ_AUTODETECT;
irq_desc[i].handler->shutdown(i);
}
+
+ if (i < 32)
+ val |= 1 << i;
}
spin_unlock_irq(&irq_controller_lock);
- return 0x12345678;
+ return val;
}
-int probe_irq_off(unsigned long unused)
+int probe_irq_off(unsigned long val)
{
int i, irq_found, nr_irqs;
- if (unused != 0x12345678)
- printk("Bad IRQ probe from %lx\n", (&unused)[-1]);
-
nr_irqs = 0;
irq_found = 0;
spin_lock_irq(&irq_controller_lock);
--- /dev/null
+/* $Id: irq_imask.c,v 1.2 2000/02/11 04:57:40 gniibe Exp $
+ *
+ * linux/arch/sh/kernel/irq_imask.c
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * Simple interrupt handling using IMASK of SR register.
+ *
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+
+#include <linux/spinlock.h>
+#include <linux/cache.h>
+#include <linux/irq.h>
+
+/* Bitmap of IRQ masked */
+static unsigned long imask_mask = 0x7fff;
+static int interrupt_priority = 0;
+
+static void enable_imask_irq(unsigned int irq);
+static void disable_imask_irq(unsigned int irq);
+static void shutdown_imask_irq(unsigned int irq);
+static void mask_and_ack_imask(unsigned int);
+static void end_imask_irq(unsigned int irq);
+
+#define IMASK_PRIORITY 15
+
+static unsigned int startup_imask_irq(unsigned int irq)
+{
+ enable_imask_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static struct hw_interrupt_type imask_irq_type = {
+ "Interrupt using IMASK of SR register",
+ startup_imask_irq,
+ shutdown_imask_irq,
+ enable_imask_irq,
+ disable_imask_irq,
+ mask_and_ack_imask,
+ end_imask_irq
+};
+
+void disable_imask_irq(unsigned int irq)
+{
+ unsigned long __dummy;
+
+ clear_bit(irq, &imask_mask);
+ if (interrupt_priority < IMASK_PRIORITY - irq)
+ interrupt_priority = IMASK_PRIORITY - irq;
+
+ asm volatile("stc sr,%0\n\t"
+ "and %1,%0\n\t"
+ "or %2,%0\n\t"
+ "ldc %0,sr"
+ : "=&r" (__dummy)
+ : "r" (0xffffff0f), "r" (interrupt_priority << 4));
+}
+
+static void enable_imask_irq(unsigned int irq)
+{
+ unsigned long __dummy;
+
+ set_bit(irq, &imask_mask);
+ interrupt_priority = IMASK_PRIORITY - ffz(imask_mask);
+
+ asm volatile("stc sr,%0\n\t"
+ "and %1,%0\n\t"
+ "or %2,%0\n\t"
+ "ldc %0,sr"
+ : "=&r" (__dummy)
+ : "r" (0xffffff0f), "r" (interrupt_priority << 4));
+}
+
+static void mask_and_ack_imask(unsigned int irq)
+{
+ disable_imask_irq(irq);
+}
+
+static void end_imask_irq(unsigned int irq)
+{
+ enable_imask_irq(irq);
+}
+
+static void shutdown_imask_irq(unsigned int irq)
+{
+ disable_imask_irq(irq);
+}
+
+void make_imask_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+ irq_desc[irq].handler = &imask_irq_type;
+ enable_irq(irq);
+}
-/* $Id: irq_onchip.c,v 1.5 1999/10/28 02:18:33 gniibe Exp $
+/* $Id: irq_onchip.c,v 1.7 2000-01-09 15:55:55+09 gniibe Exp $
*
* linux/arch/sh/kernel/irq_onchip.c
*
*/
#define INTC_IRR0 0xa4000004UL
-#define INTC_IPRC 0xa4000016UL
+#define INTC_IRR1 0xa4000006UL
+#define INTC_IRR2 0xa4000008UL
+
+#define INTC_ICR0 0xfffffee0
+#define INTC_ICR1 0xa4000010
+#define INTC_ICR2 0xa4000012
+#define INTC_INTER 0xa4000014
+#define INTC_IPRA 0xfffffee2
+#define INTC_IPRB 0xfffffee4
+#define INTC_IPRC 0xa4000016
+#define INTC_IPRD 0xa4000018
+#define INTC_IPRE 0xa400001a
#define IRQ0_IRQ 32
#define IRQ1_IRQ 33
}
#ifdef CONFIG_CPU_SUBTYPE_SH7709
+
+ /*
+ * Initialize the Interrupt Controller (INTC)
+ * registers to their power on values
+ */
+
+ ctrl_outb(0, INTC_IRR0);
+ ctrl_outb(0, INTC_IRR1);
+ ctrl_outb(0, INTC_IRR2);
+
+ ctrl_outw(0, INTC_ICR0);
+ ctrl_outw(0, INTC_ICR1);
+ ctrl_outw(0, INTC_ICR2);
+ ctrl_outw(0, INTC_INTER);
+ ctrl_outw(0, INTC_IPRA);
+ ctrl_outw(0, INTC_IPRB);
+ ctrl_outw(0, INTC_IPRC);
+ ctrl_outw(0, INTC_IPRD);
+ ctrl_outw(0, INTC_IPRE);
+
for (i = IRQ0_IRQ; i < NR_IRQS; i++) {
irq_desc[i].handler = &onChip2_irq_type;
}
set_ipr_data(IRQ3_IRQ, IRQ3_IRP_OFFSET, IRQ3_PRIORITY);
set_ipr_data(IRQ4_IRQ, IRQ4_IRP_OFFSET, IRQ4_PRIORITY);
set_ipr_data(IRQ5_IRQ, IRQ5_IRP_OFFSET, IRQ5_PRIORITY);
-
- ctrl_inb(INTC_IRR0);
- ctrl_outb(0, INTC_IRR0);
#endif /* CONFIG_CPU_SUBTYPE_SH7709 */
}
--- /dev/null
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+
+unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
+ unsigned long start, unsigned long size)
+{
+ return start;
+}
-/* $Id: process.c,v 1.8 1999/10/31 13:19:16 gniibe Exp $
+/* $Id: process.c,v 1.28 2000/03/05 02:16:15 gniibe Exp $
*
* linux/arch/sh/kernel/process.c
*
* Copyright (C) 1995 Linus Torvalds
*
- * SuperH version: Copyright (C) 1999 Niibe Yutaka & Kaz Kojima
+ * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
*/
/*
#include <linux/irq.h>
-#if defined(__SH4__)
-struct task_struct *last_task_used_math = NULL;
-#endif
-
static int hlt_counter=0;
#define HARD_IDLE_TIMEOUT (HZ / 3)
*/
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{ /* Don't use this in BL=1(cli). Or else, CPU resets! */
- register unsigned long __sc0 __asm__ ("r0") = __NR_clone;
- register unsigned long __sc4 __asm__ ("r4") = (long) flags | CLONE_VM;
- register unsigned long __sc5 __asm__ ("r5") = 0;
- register unsigned long __sc8 __asm__ ("r8") = (long) arg;
- register unsigned long __sc9 __asm__ ("r9") = (long) fn;
- __asm__ __volatile__(
- "trapa #0\n\t" /* Linux/SH system call */
- "tst #0xff,r0\n\t" /* child or parent? */
+ register unsigned long __sc0 __asm__ ("$r0") = __NR_clone;
+ register unsigned long __sc4 __asm__ ("$r4") = (long) flags | CLONE_VM;
+ register unsigned long __sc5 __asm__ ("$r5") = 0;
+ register unsigned long __sc8 __asm__ ("$r8") = (long) arg;
+ register unsigned long __sc9 __asm__ ("$r9") = (long) fn;
+
+ __asm__("trapa #0\n\t" /* Linux/SH system call */
+ "tst #0xff, $r0\n\t" /* child or parent? */
"bf 1f\n\t" /* parent - jump */
- "jsr @r9\n\t" /* call fn */
- " mov r8,r4\n\t" /* push argument */
- "mov r0,r4\n\t" /* return value to arg of exit */
- "mov %2,r0\n\t" /* exit */
+ "jsr @$r9\n\t" /* call fn */
+ " mov $r8, $r4\n\t" /* push argument */
+ "mov $r0, $r4\n\t" /* return value to arg of exit */
+ "mov %2, $r0\n\t" /* exit */
"trapa #0\n"
"1:"
- :"=z" (__sc0)
- :"0" (__sc0), "i" (__NR_exit),
- "r" (__sc4), "r" (__sc5), "r" (__sc8), "r" (__sc9)
- :"memory");
+ : "=z" (__sc0)
+ : "0" (__sc0), "i" (__NR_exit),
+ "r" (__sc4), "r" (__sc5), "r" (__sc8), "r" (__sc9)
+ : "memory");
return __sc0;
}
*/
void exit_thread(void)
{
-#if defined(__sh3__)
- /* nothing to do ... */
-#elif defined(__SH4__)
-#if 0 /* for the time being... */
- /* Forget lazy fpu state */
- if (last_task_used_math == current) {
- set_status_register (SR_FD, 0);
- write_system_register (fpscr, FPSCR_PR);
- last_task_used_math = NULL;
- }
-#endif
-#endif
+ /* Nothing to do. */
}
void flush_thread(void)
/* do nothing */
/* Possibly, set clear debug registers */
#elif defined(__SH4__)
-#if 0 /* for the time being... */
- /* Forget lazy fpu state */
- if (last_task_used_math == current) {
- set_status_register (SR_FD, 0);
- write_system_register (fpscr, FPSCR_PR);
- last_task_used_math = NULL;
- }
-#endif
+ struct task_struct *tsk = current;
+
+ /* Forget lazy FPU state */
+ clear_fpu(tsk);
+ tsk->used_math = 0;
#endif
}
}
/* Fill in the fpu structure for a core dump.. */
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
{
#if defined(__SH4__)
-#if 0 /* for the time being... */
- /* We store the FPU info in the task->thread area. */
- if (! (regs->sr & SR_FD)) {
- memcpy (r, ¤t->thread.fpu, sizeof (*r));
- return 1;
- }
-#endif
-#endif
+ int fpvalid;
+ struct task_struct *tsk = current;
+
+ fpvalid = tsk->used_math;
+ if (fpvalid) {
+ unlazy_fpu(tsk);
+ memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
+ }
+
+ return fpvalid;
+#else
return 0; /* Task didn't use the fpu at all. */
+#endif
}
asmlinkage void ret_from_fork(void);
struct task_struct *p, struct pt_regs *regs)
{
struct pt_regs *childregs;
+ struct task_struct *tsk = current;
childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long) p)) - 1;
- *childregs = *regs;
+ struct_cpy(childregs, regs);
#if defined(__SH4__)
-#if 0 /* for the time being... */
- if (last_task_used_math == current) {
- set_status_register (SR_FD, 0);
- sh4_save_fp (p);
+ if (tsk != &init_task) {
+ unlazy_fpu(tsk);
+ struct_cpy(&p->thread.fpu, ¤t->thread.fpu);
+ p->used_math = tsk->used_math;
}
- /* New tasks loose permission to use the fpu. This accelerates context
- switching for most programs since they don't use the fpu. */
- p->thread.sr = (read_control_register (sr) &~ SR_MD) | SR_FD;
- childregs->sr |= SR_FD;
-#endif
#endif
if (user_mode(regs)) {
childregs->sp = usp;
childregs->sp = (unsigned long)p+2*PAGE_SIZE;
}
childregs->regs[0] = 0; /* Set return value for child */
+ childregs->sr |= SR_FD; /* Invalidate FPU flag */
p->thread.sp = (unsigned long) childregs;
p->thread.pc = (unsigned long) ret_from_fork;
*/
void dump_thread(struct pt_regs * regs, struct user * dump)
{
-/* changed the size calculations - should hopefully work better. lbt */
dump->magic = CMAGIC;
dump->start_code = current->mm->start_code;
dump->start_data = current->mm->start_data;
dump->regs = *regs;
-#if 0 /* defined(__SH4__) */
- /* FPU */
- memcpy (&dump->regs[EF_SIZE/4], ¤t->thread.fpu,
- sizeof (current->thread.fpu));
-#endif
+ dump->u_fpvalid = dump_fpu(regs, &dump->fpu);
}
/*
*/
void __switch_to(struct task_struct *prev, struct task_struct *next)
{
+#if defined(__SH4__)
+ if (prev != &init_task)
+ unlazy_fpu(prev);
+#endif
/*
* Restore the kernel stack onto kernel mode register
* k4 (r4_bank1)
*/
- asm volatile("ldc %0,r4_bank"
+ asm volatile("ldc %0, $r4_bank"
: /* no output */
:"r" ((unsigned long)next+8192));
}
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
+
error = do_execve(filename, uargv, uenvp, ®s);
if (error == 0)
current->flags &= ~PF_DTRACE;
unlock_kernel();
return error;
}
+
+/*
+ * These bracket the sleeping functions..
+ */
+extern void scheduling_functions_start_here(void);
+extern void scheduling_functions_end_here(void);
+#define first_sched ((unsigned long) scheduling_functions_start_here)
+#define last_sched ((unsigned long) scheduling_functions_end_here)
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long schedule_frame;
+ unsigned long pc;
+
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ /*
+ * The same comment as on the Alpha applies here, too ...
+ */
+ pc = thread_saved_pc(&p->thread);
+ if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) {
+ schedule_frame = ((unsigned long *)(long)p->thread.sp)[1];
+ return (unsigned long)((unsigned long *)schedule_frame)[1];
+ }
+ return pc;
+}
+
+asmlinkage void print_syscall(int x)
+{
+ unsigned long flags, sr;
+ asm("stc $sr, %0": "=r" (sr));
+ save_and_cli(flags);
+ printk("%c: %c %c, %c: SYSCALL\n", (x&63)+32,
+ (current->flags&PF_USEDFPU)?'C':' ',
+ (init_task.flags&PF_USEDFPU)?'K':' ', (sr&SR_FD)?' ':'F');
+ restore_flags(flags);
+}
*/
#include <linux/sched.h>
+#include <linux/wait.h>
+#include <asm/semaphore.h>
#include <asm/semaphore-helper.h>
/*
{
return waking_non_zero_trylock(sem);
}
+
+/* Called when someone has done an up that transitioned from
+ * negative to non-negative, meaning that the lock has been
+ * granted to whomever owned the bias.
+ */
+struct rw_semaphore *rwsem_wake_readers(struct rw_semaphore *sem)
+{
+ if (xchg(&sem->read_bias_granted, 1))
+ BUG();
+ wake_up(&sem->wait);
+ return sem;
+}
+
+struct rw_semaphore *rwsem_wake_writer(struct rw_semaphore *sem)
+{
+ if (xchg(&sem->write_bias_granted, 1))
+ BUG();
+ wake_up(&sem->write_bias_wait);
+ return sem;
+}
+
+struct rw_semaphore * __rwsem_wake(struct rw_semaphore *sem)
+{
+ if (atomic_read(&sem->count) == 0)
+ return rwsem_wake_writer(sem);
+ else
+ return rwsem_wake_readers(sem);
+}
+
+struct rw_semaphore *down_read_failed_biased(struct rw_semaphore *sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */
+
+ for (;;) {
+ if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0))
+ break;
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ if (!sem->read_bias_granted)
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ return sem;
+}
+
+struct rw_semaphore *down_write_failed_biased(struct rw_semaphore *sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */
+
+ for (;;) {
+ if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0))
+ break;
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
+ if (!sem->write_bias_granted)
+ schedule();
+ }
+
+ remove_wait_queue(&sem->write_bias_wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ /* if the lock is currently unbiased, awaken the sleepers
+ * FIXME: this wakes up the readers early in a bit of a
+ * stampede -> bad!
+ */
+ if (atomic_read(&sem->count) >= 0)
+ wake_up(&sem->wait);
+
+ return sem;
+}
+
+/* Wait for the lock to become unbiased. Readers
+ * are non-exclusive. =)
+ */
+struct rw_semaphore *down_read_failed(struct rw_semaphore *sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ __up_read(sem); /* this takes care of granting the lock */
+
+ add_wait_queue(&sem->wait, &wait);
+
+ while (atomic_read(&sem->count) < 0) {
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&sem->count) >= 0)
+ break;
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ return sem;
+}
+
+/* Wait for the lock to become unbiased. Since we're
+ * a writer, we'll make ourselves exclusive.
+ */
+struct rw_semaphore *down_write_failed(struct rw_semaphore *sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ __up_write(sem); /* this takes care of granting the lock */
+
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ while (atomic_read(&sem->count) < 0) {
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
+ if (atomic_read(&sem->count) >= 0)
+ break; /* we must attempt to aquire or bias the lock */
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ return sem;
+}
+
+struct rw_semaphore *__down_read(struct rw_semaphore *sem, int carry)
+{
+ if (carry) {
+ int saved, new;
+
+ do {
+ down_read_failed(sem);
+ saved = atomic_read(&sem->count);
+ if ((new = atomic_dec_return(&sem->count)) >= 0)
+ return sem;
+ } while (!(new < 0 && saved >=0));
+ }
+
+ return down_read_failed_biased(sem);
+}
+
+struct rw_semaphore *__down_write(struct rw_semaphore *sem, int carry)
+{
+ if (carry) {
+ int saved, new;
+
+ do {
+ down_write_failed(sem);
+ saved = atomic_read(&sem->count);
+ if ((new = atomic_sub_return(RW_LOCK_BIAS, &sem->count) ) == 0)
+ return sem;
+ } while (!(new < 0 && saved >=0));
+ }
+
+ return down_write_failed_biased(sem);
+}
-/* $Id: setup.c,v 1.7 1999/10/23 01:34:50 gniibe Exp gniibe $
+/* $Id: setup.c,v 1.20 2000/03/05 02:44:41 gniibe Exp $
*
* linux/arch/sh/kernel/setup.c
*
extern int rd_image_start; /* starting block # of image */
#endif
+extern void fpu_init(void);
extern int root_mountflags;
extern int _text, _etext, _edata, _end;
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
- /*
- * partially used pages are not usable - thus
- * we are rounding upwards:
- */
- start_pfn = PFN_UP(__pa(&_end)-__MEMORY_START);
-
/*
* Find the highest page frame number we have available
*/
- max_pfn = PFN_DOWN(__pa(memory_end)-__MEMORY_START);
+ max_pfn = PFN_DOWN(__pa(memory_end));
/*
* Determine low and high memory ranges:
*/
max_low_pfn = max_pfn;
+ /*
+ * Partially used pages are not usable - thus
+ * we are rounding upwards:
+ */
+ start_pfn = PFN_UP(__pa(&_end));
/*
- * Initialize the boot-time allocator (with low memory only):
- */
- bootmap_size = init_bootmem(start_pfn, max_low_pfn, __MEMORY_START);
-
- /*
- * FIXME: what about high memory?
+ * Find a proper area for the bootmem bitmap. After this
+ * bootstrap step all allocations (until the page allocator
+ * is intact) must be done via bootmem_alloc().
*/
- ram_resources[1].end = PFN_PHYS(max_low_pfn) + __MEMORY_START;
+ bootmap_size = init_bootmem_node(0, start_pfn,
+ __MEMORY_START>>PAGE_SHIFT,
+ max_low_pfn);
/*
* Register fully available low RAM pages with the bootmem allocator.
*/
{
- unsigned long curr_pfn, last_pfn, size;
+ unsigned long curr_pfn, last_pfn, pages;
/*
* We are rounding up the start address of usable memory:
*/
- curr_pfn = PFN_UP(0);
+ curr_pfn = PFN_UP(__MEMORY_START);
/*
* ... and at the end of the usable range downwards:
*/
- last_pfn = PFN_DOWN(memory_end-__MEMORY_START);
+ last_pfn = PFN_DOWN(__pa(memory_end));
if (last_pfn > max_low_pfn)
last_pfn = max_low_pfn;
- size = last_pfn - curr_pfn;
- free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+ pages = last_pfn - curr_pfn;
+ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
}
+
/*
* Reserve the kernel text and
- * Reserve the bootmem bitmap itself as well. We do this in two
- * steps (first step was init_bootmem()) because this catches
- * the (very unlikely) case of us accidentally initializing the
- * bootmem allocator with an invalid RAM area.
+ * Reserve the bootmem bitmap.We do this in two steps (first step
+ * was init_bootmem()), because this catches the (definitely buggy)
+ * case of us accidentally initializing the bootmem allocator with
+ * an invalid RAM area.
*/
- reserve_bootmem(PAGE_SIZE, PFN_PHYS(start_pfn) + bootmap_size);
+ reserve_bootmem(__MEMORY_START+PAGE_SIZE, (PFN_PHYS(start_pfn) +
+ bootmap_size + PAGE_SIZE-1) - __MEMORY_START);
/*
* reserve physical page 0 - it's a special BIOS page on many boxes,
* enabling clean reboots, SMP operation, laptop functions.
*/
- reserve_bootmem(0, PAGE_SIZE);
+ reserve_bootmem(__MEMORY_START, PAGE_SIZE);
#ifdef CONFIG_BLK_DEV_INITRD
- if (LOADER_TYPE) {
+ if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
- reserve_bootmem(INITRD_START, INITRD_SIZE);
- initrd_start =
- INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0;
- initrd_end = initrd_start+INITRD_SIZE;
+ reserve_bootmem(INITRD_START+__MEMORY_START, INITRD_SIZE);
+ initrd_start =
+ INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0;
+ initrd_end = initrd_start + INITRD_SIZE;
} else {
- printk("initrd extends beyond end of memory "
- "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
- INITRD_START + INITRD_SIZE,
- max_low_pfn << PAGE_SHIFT);
- initrd_start = 0;
- }
- }
+ printk("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ INITRD_START + INITRD_SIZE,
+ max_low_pfn << PAGE_SHIFT);
+ initrd_start = 0;
+ }
+ }
#endif
#if 0
conswitchp = &dummy_con;
#endif
#endif
+
+#if defined(__SH4__)
+ init_task.used_math = 1;
+ init_task.flags |= PF_USEDFPU;
+ grab_fpu();
+ fpu_init();
+#endif
+ paging_init();
}
/*
-/* $Id: signal.c,v 1.10 1999/09/27 23:25:44 gniibe Exp $
+/* $Id: signal.c,v 1.16 2000/01/29 11:31:31 gniibe Exp gniibe $
*
* linux/arch/sh/kernel/signal.c
*
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
- if (do_signal(®s,&saveset))
+ if (do_signal(®s, &saveset))
return -EINTR;
}
}
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
-
spin_lock_irq(¤t->sigmask_lock);
saveset = current->blocked;
current->blocked = newset;
if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
+
if (__get_user(set.sig[0], &frame->sc.oldmask)
|| (_NSIG_WORDS > 1
&& __copy_from_user(&set.sig[1], &frame->extramask,
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
+
spin_lock_irq(¤t->sigmask_lock);
current->blocked = set;
recalc_sigpending(current);
if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
+
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
current->blocked = set;
recalc_sigpending(current);
spin_unlock_irq(¤t->sigmask_lock);
-
+
if (restore_sigcontext(®s, &frame->uc.uc_mcontext, &r0))
goto badframe;
if (ka->sa.sa_flags & SA_RESTORER) {
regs->pr = (unsigned long) ka->sa.sa_restorer;
} else {
- /* This is ; mov #__NR_sigreturn,r0 ; trapa #0 */
+ /* This is : mov #__NR_sigreturn,r0 ; trapa #0 */
#ifdef __LITTLE_ENDIAN__
unsigned long code = 0xc300e000 | (__NR_sigreturn);
#else
if (ka->sa.sa_flags & SA_RESTORER) {
regs->pr = (unsigned long) ka->sa.sa_restorer;
} else {
- /* This is ; mov #__NR_sigreturn,r0 ; trapa #0 */
+ /* This is : mov #__NR_rt_sigreturn,r0 ; trapa #0 */
#ifdef __LITTLE_ENDIAN__
- unsigned long code = 0xc300e000 | (__NR_sigreturn);
+ unsigned long code = 0xc300e000 | (__NR_rt_sigreturn);
#else
- unsigned long code = 0xe000c300 | (__NR_sigreturn << 16);
+ unsigned long code = 0xe000c300 | (__NR_rt_sigreturn << 16);
#endif
regs->pr = (unsigned long) frame->retcode;
siginfo_t info;
struct k_sigaction *ka;
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return 1;
+
if (!oldset)
oldset = ¤t->blocked;
/* NOTREACHED */
}
}
+
/* Whee! Actually deliver the signal. */
handle_signal(signr, ka, &info, oldset, regs);
return 1;
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way Unix traditionally does this, though.
*/
-asmlinkage int sys_pipe(unsigned long * fildes)
+asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
{
int fd[2];
int error;
error = do_pipe(fd);
unlock_kernel();
if (!error) {
- if (copy_to_user(fildes, fd, 2*sizeof(int)))
- error = -EFAULT;
+ regs.regs[1] = fd[1];
+ return fd[0];
}
return error;
}
-asmlinkage unsigned long
-sys_mmap(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, int fd, unsigned long off)
+static inline long
+do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
+ unsigned long flags, int fd, unsigned long pgoff)
{
- int error = -EFAULT;
+ int error = -EBADF;
struct file *file = NULL;
- down(¤t->mm->mmap_sem);
- lock_kernel();
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
if (!(flags & MAP_ANONYMOUS)) {
- error = -EBADF;
file = fget(fd);
if (!file)
goto out;
}
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- error = do_mmap(file, addr, len, prot, flags, off);
- if (file)
- fput(file);
-out:
+ down(¤t->mm->mmap_sem);
+ lock_kernel();
+
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
unlock_kernel();
up(¤t->mm->mmap_sem);
+ if (file)
+ fput(file);
+out:
return error;
}
+asmlinkage int old_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ int fd, unsigned long off)
+{
+ if (off & ~PAGE_MASK)
+ return -EINVAL;
+ return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
+}
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
+}
+
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
*
* This is really horribly ugly.
*/
-asmlinkage int sys_ipc (uint call, int first, int second,
- int third, void *ptr, long fifth)
+asmlinkage int sys_ipc(uint call, int first, int second,
+ int third, void *ptr, long fifth)
{
int version, ret;
-/* $Id: time.c,v 1.7 1999/11/06 02:00:37 gniibe Exp $
+/* $Id: time.c,v 1.20 2000/02/28 12:42:51 gniibe Exp $
*
* linux/arch/sh/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
*/
-#include <linux/config.h>
-
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#define TMU0_TCNT 0xfffffe98 /* Long access */
#define TMU0_TCR 0xfffffe9c /* Word access */
-#define INTERVAL 37500 /* (1000000*CLOCK_MHZ/HZ/2) ??? for CqREEK */
-#if 0 /* Takeshi's board */
-#define INTERVAL 83333
-#endif
+#define FRQCR 0xffffff80
+
+#define RTC_IRQ 22
+#define RTC_IPR_OFFSET 0
/* SH-3 RTC */
#define R64CNT 0xfffffec0
#define TMU0_TCNT 0xffd8000c /* Long access */
#define TMU0_TCR 0xffd80010 /* Word access */
-#define INTERVAL 83333
+#define FRQCR 0xffc00000
+
+#define RTC_IRQ 22
+#define RTC_IPR_OFFSET 0
/* SH-4 RTC */
#define R64CNT 0xffc80000
static int set_rtc_time(unsigned long nowtime)
{
-#ifdef CONFIG_SH_CPU_RTC
int retval = 0;
int real_seconds, real_minutes, cmos_minutes;
- ctrl_outb(2, RCR2); /* reset pre-scaler & stop RTC */
+ ctrl_outb(0x02, RCR2); /* reset pre-scaler & stop RTC */
cmos_minutes = ctrl_inb(RMINCNT);
BCD_TO_BIN(cmos_minutes);
retval = -1;
}
- ctrl_outb(2, RCR2); /* start RTC */
+ ctrl_outb(0x01, RCR2); /* start RTC */
return retval;
-#else
- /* XXX should support other clock devices? */
- return -1;
-#endif
}
/* last time the RTC clock got updated */
static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
do_timer(regs);
-
#ifdef TAKESHI
{
unsigned long what_is_this=0xa4000124;
* locally disabled. -arca
*/
write_lock(&xtime_lock);
-
do_timer_interrupt(irq, NULL, regs);
-
write_unlock(&xtime_lock);
}
static unsigned long get_rtc_time(void)
{
-#ifdef CONFIG_SH_CPU_RTC
unsigned int sec, min, hr, wk, day, mon, yr, yr100;
again:
- ctrl_outb(1, RCR1); /* clear CF bit */
+ ctrl_outb(0x01, RCR1); /* clear CF bit */
do {
sec = ctrl_inb(RSECCNT);
min = ctrl_inb(RMINCNT);
hr > 23 || min > 59 || sec > 59) {
printk(KERN_ERR
"SH RTC: invalid value, resetting to 1 Jan 2000\n");
- ctrl_outb(2, RCR2); /* reset, stop */
+ ctrl_outb(0x02, RCR2); /* reset, stop */
ctrl_outb(0, RSECCNT);
ctrl_outb(0, RMINCNT);
ctrl_outb(0, RHRCNT);
#else
ctrl_outb(0, RYRCNT);
#endif
- ctrl_outb(1, RCR2); /* start */
+ ctrl_outb(0x01, RCR2); /* start */
goto again;
}
return mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
+}
+
+static __init unsigned int get_cpu_mhz(void)
+{
+ unsigned int count;
+ unsigned long __dummy;
+
+ sti();
+ do {} while (ctrl_inb(R64CNT) != 0);
+ ctrl_outb(0x11, RCR1);
+ asm volatile(
+ "1:\t"
+ "tst %1,%1\n\t"
+ "bt/s 1b\n\t"
+ " add #1,%0"
+ : "=&r"(count), "=&z" (__dummy)
+ : "0" (0), "1" (0));
+ cli();
+ /*
+ * SH-3:
+ * CPU clock = 4 stages * loop
+ * tst rm,rm if id ex
+ * bt/s 1b if id ex
+ * add #1,rd if id ex
+ * (if) pipe line stole
+ * tst rm,rm if id ex
+ * ....
+ *
+ *
+ * SH-4:
+ * CPU clock = 6 stages * loop
+ * I don't know why.
+ * ....
+ */
+#if defined(__SH4__)
+ return count*6;
#else
- /* XXX should support other clock devices? */
- return 0;
+ return count*4;
#endif
}
+static void rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ ctrl_outb(0x01, RCR1);
+ regs->regs[0] = 1;
+}
+
static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, 0, "timer", NULL, NULL};
+static struct irqaction irq1 = { rtc_interrupt, SA_INTERRUPT, 0, "rtc", NULL, NULL};
void __init time_init(void)
{
+ unsigned int cpu_clock, master_clock, module_clock;
+ unsigned short ifc, pfc;
+ unsigned long interval;
+#if defined(__sh3__)
+ static int ifc_table[] = { 1, 2, 4, 1, 3, 1, 1, 1 };
+ static int pfc_table[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
+#elif defined(__SH4__)
+ static int ifc_table[] = { 1, 2, 3, 4, 6, 8, 1, 1 };
+ static int pfc_table[] = { 2, 3, 4, 6, 8, 2, 2, 2 };
+#endif
+
xtime.tv_sec = get_rtc_time();
xtime.tv_usec = 0;
- set_ipr_data(TIMER_IRQ, TIMER_IRP_OFFSET, TIMER_PRIORITY);
+ set_ipr_data(TIMER_IRQ, TIMER_IPR_OFFSET, TIMER_PRIORITY);
setup_irq(TIMER_IRQ, &irq0);
+ set_ipr_data(RTC_IRQ, RTC_IPR_OFFSET, TIMER_PRIORITY);
+ setup_irq(RTC_IRQ, &irq1);
- /* Start TMU0 */
- ctrl_outb(TMU_TOCR_INIT,TMU_TOCR);
- ctrl_outw(TMU0_TCR_INIT,TMU0_TCR);
- ctrl_outl(INTERVAL,TMU0_TCOR);
- ctrl_outl(INTERVAL,TMU0_TCNT);
- ctrl_outb(TMU_TSTR_INIT,TMU_TSTR);
+ /* Check how fast it is.. */
+ cpu_clock = get_cpu_mhz();
+ disable_irq(RTC_IRQ);
-#if 0
- /* Start RTC */
- asm volatile("");
+ printk("CPU clock: %d.%02dMHz\n",
+ (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
+#if defined(__sh3__)
+ {
+ unsigned short tmp;
+ tmp = (ctrl_inw(FRQCR) & 0x000c) >> 2;
+ tmp |= (ctrl_inw(FRQCR) & 0x4000) >> 12;
+ ifc = ifc_table[tmp & 0x0007];
+ tmp = ctrl_inw(FRQCR) & 0x0003;
+ tmp |= (ctrl_inw(FRQCR) & 0x2000) >> 11;
+ pfc = pfc_table[ctrl_inw(FRQCR) & 0x0007];
+ }
+#elif defined(__SH4__)
+ ifc = ifc_table[(ctrl_inw(FRQCR)>> 6) & 0x0007];
+ pfc = pfc_table[ctrl_inw(FRQCR) & 0x0007];
#endif
+ master_clock = cpu_clock * ifc;
+ module_clock = master_clock/pfc;
+ printk("Module clock: %d.%02dMHz\n",
+ (module_clock/1000000), (module_clock % 1000000)/10000);
+ interval = (module_clock/400);
+
+ printk("Interval = %ld\n", interval);
+
+ /* Start TMU0 */
+ ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
+ ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
+ ctrl_outl(interval, TMU0_TCOR);
+ ctrl_outl(interval, TMU0_TCNT);
+ ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
}
-/* $Id: traps.c,v 1.3 1999/09/21 14:37:19 gniibe Exp $
+/* $Id: traps.c,v 1.5 2000/02/27 08:27:55 gniibe Exp $
*
* linux/arch/sh/traps.c
*
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/atomic.h>
+#include <asm/processor.h>
static inline void console_verbose(void)
{
{ \
unsigned long error_code; \
\
- asm volatile("stc r2_bank,%0": "=r" (error_code)); \
+ asm volatile("stc $r2_bank, %0": "=r" (error_code)); \
sti(); \
regs.syscall_nr = -1; \
tsk->thread.error_code = error_code; \
struct pt_regs regs)
{
long ex;
- asm volatile("stc r2_bank,%0" : "=r" (ex));
+ asm volatile("stc $r2_bank, %0" : "=r" (ex));
die_if_kernel("exception", ®s, ex);
}
(or P2, virtural "fixed" address space).
It's definitely should not in physical address. */
- asm volatile("ldc %0,vbr"
+ asm volatile("ldc %0, $vbr"
: /* no output */
: "r" (&vbr_base)
: "memory");
}
+
+void dump_stack(void)
+{
+ unsigned long *start;
+ unsigned long *end;
+ unsigned long *p;
+
+ asm("mov $r15, %0" : "=r" (start));
+ asm("stc $r4_bank, %0" : "=r" (end));
+
+ printk("%08lx:%08lx\n", (unsigned long)start, (unsigned long)end);
+ for (p=start; p < end; p++)
+ printk("%08lx\n", *p);
+}
-/* $Id: cache.c,v 1.7 1999/09/23 11:43:07 gniibe Exp $
+/* $Id: cache.c,v 1.9 2000/02/14 12:45:26 gniibe Exp $
*
* linux/arch/sh/mm/cache.c
*
}
#if defined(__SH4__)
-/* Write back data caches, and invalidates instructiin caches */
+void flush_icache_page(struct vm_area_struct *vma, struct page *pg)
+{
+ unsigned long flags, __dummy;
+ unsigned long addr, data, v;
+
+ save_and_cli(flags);
+ jump_to_p2(__dummy);
+
+ v = page_address(pg);
+
+ /* Write back O Cache */
+ asm volatile("ocbwb %0"
+ : /* no output */
+ : "m" (__m(v)));
+ /* Invalidate I Cache */
+ addr = CACHE_IC_ADDRESS_ARRAY |
+ (v&CACHE_IC_ENTRY_MASK) | 0x8 /* A-bit */;
+ data = (v&0xfffffc00); /* Valid=0 */
+ ctrl_outl(data,addr);
+
+ back_to_p1(__dummy);
+ restore_flags(flags);
+}
+
void flush_icache_range(unsigned long start, unsigned long end)
{
unsigned long flags, __dummy;
flush_cache_range(vma->vm_mm, addr, addr+PAGE_SIZE);
}
-void flush_page_to_ram(unsigned long page)
+void __flush_page_to_ram(unsigned long page)
{ /* Page is in physical address */
/* XXX: for the time being... */
flush_cache_all();
-/* $Id: fault.c,v 1.5 1999/10/31 13:17:31 gniibe Exp $
+/* $Id: fault.c,v 1.12 2000/03/01 11:15:27 gniibe Exp $
*
* linux/arch/sh/mm/fault.c
* Copyright (C) 1999 Niibe Yutaka
#include <asm/system.h>
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
printk(KERN_ALERT "Unable to handle kernel paging request");
printk(" at virtual address %08lx\n",address);
printk(KERN_ALERT "pc = %08lx\n", regs->pc);
- page = (unsigned long)mm->pgd;
- page = ((unsigned long *) __va(page))[address >> 22];
+ asm volatile("mov.l %1,%0"
+ : "=r" (page)
+ : "m" (__m(MMU_TTB)));
+ page = ((unsigned long *) page)[address >> 22];
printk(KERN_ALERT "*pde = %08lx\n", page);
- if (page & 1) {
+ if (page & _PAGE_PRESENT) {
page &= PAGE_MASK;
address &= 0x003ff000;
page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
{
unsigned long flags;
unsigned long pteval;
+ unsigned long pteaddr;
save_and_cli(flags);
/*
pteval |= _PAGE_FLAGS_HARDWARE_DEFAULT; /* add default flags */
/* Set PTEL register */
ctrl_outl(pteval, MMU_PTEL);
+ /* Set PTEH register */
+ pteaddr = (address & MMU_VPN_MASK) | (vma->vm_mm->context & MMU_CONTEXT_ASID_MASK);
+ ctrl_outl(pteaddr, MMU_PTEH);
/* Load the TLB */
asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
{
unsigned long addr, data, asid;
unsigned long saved_asid = MMU_NO_ASID;
+#if defined(__SH4__)
+ int i;
+#endif
if (mm->context == NO_CONTEXT)
return;
data = (page & 0xfffe0000) | asid; /* VALID bit is off */
ctrl_outl(data, addr);
#elif defined(__SH4__)
- int i;
-
addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT;
data = page | asid; /* VALID bit is off */
ctrl_outl(data, addr);
for (i=0; i<4; i++) {
addr = MMU_ITLB_ADDRESS_ARRAY | (i<<8);
data = ctrl_inl(addr);
- data &= ~0x30;
+ data &= ~0x300;
if (data == (page | asid)) {
ctrl_outl(data, addr);
break;
-/* $Id: init.c,v 1.4 1999/10/23 01:37:02 gniibe Exp gniibe $
+/* $Id: init.c,v 1.16 2000/02/14 15:19:05 gniibe Exp $
*
* linux/arch/sh/mm/init.c
*
#ifdef CONFIG_BLK_DEV_INITRD
#include <linux/blk.h>
#endif
+#include <linux/highmem.h>
#include <linux/bootmem.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
void __handle_bad_pmd(pmd_t *pmd)
{
pmd_ERROR(*pmd);
- pmd_val(*pmd) = _PAGE_TABLE + __pa(get_bad_pte_table());
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(get_bad_pte_table())));
}
void __handle_bad_pmd_kernel(pmd_t *pmd)
{
pmd_ERROR(*pmd);
- pmd_val(*pmd) = _KERNPG_TABLE + __pa(get_bad_pte_table());
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(get_bad_pte_table())));
}
pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
if (pmd_none(*pmd)) {
if (pte) {
clear_page(pte);
- pmd_val(*pmd) = _KERNPG_TABLE + __pa(pte);
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
return pte + offset;
}
- pmd_val(*pmd) = _KERNPG_TABLE + __pa(get_bad_pte_table());
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(get_bad_pte_table())));
return NULL;
}
free_page((unsigned long)pte);
if (pmd_none(*pmd)) {
if (pte) {
clear_page((void *)pte);
- pmd_val(*pmd) = _PAGE_TABLE + __pa(pte);
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)));
return (pte_t *)pte + offset;
}
- pmd_val(*pmd) = _PAGE_TABLE + __pa(get_bad_pte_table());
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(get_bad_pte_table())));
return NULL;
}
free_page(pte);
int do_check_pgt_cache(int low, int high)
{
int freed = 0;
- if(pgtable_cache_size > high) {
+ if (pgtable_cache_size > high) {
do {
- if(pgd_quicklist)
+ if (pgd_quicklist)
free_pgd_slow(get_pgd_fast()), freed++;
- if(pmd_quicklist)
+ if (pmd_quicklist)
free_pmd_slow(get_pmd_fast()), freed++;
- if(pte_quicklist)
+ if (pte_quicklist)
free_pte_slow(get_pte_fast()), freed++;
- } while(pgtable_cache_size > low);
+ } while (pgtable_cache_size > low);
}
return freed;
}
pgd_t swapper_pg_dir[1024];
+/* It'd be good if these lines were in the standard header file. */
+#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
+#define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn)
+
/*
* paging_init() sets up the page tables
*
mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK);
- free_area_init(max_low_pfn);
+ {
+ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+ unsigned long max_dma, low, start_pfn;
+
+ start_pfn = START_PFN;
+ max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+ low = MAX_LOW_PFN;
+
+ if (low < max_dma)
+ zones_size[ZONE_DMA] = low - start_pfn;
+ else {
+ zones_size[ZONE_DMA] = max_dma - start_pfn;
+ zones_size[ZONE_NORMAL] = low - max_dma;
+ }
+ free_area_init_node(0, 0, zones_size, __MEMORY_START);
+ }
}
void __init mem_init(void)
{
- int codepages = 0;
- int reservedpages = 0;
- int datapages = 0;
- int initpages = 0;
+ int codesize, reservedpages, datasize, initsize;
+ int tmp;
- max_mapnr = num_physpages = max_low_pfn;
- high_memory = (void *) ((unsigned long)__va(max_low_pfn * PAGE_SIZE)+__MEMORY_START);
+ max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN;
+ high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE);
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem();
+ reservedpages = 0;
+ for (tmp = 0; tmp < num_physpages; tmp++)
+ /*
+ * Only count reserved RAM pages
+ */
+ if (PageReserved(mem_map+tmp))
+ reservedpages++;
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
- (unsigned long) nr_free_pages << (PAGE_SHIFT-10),
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
max_mapnr << (PAGE_SHIFT-10),
- codepages << (PAGE_SHIFT-10),
+ codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
- datapages << (PAGE_SHIFT-10),
- initpages << (PAGE_SHIFT-10));
+ datasize >> 10,
+ initsize >> 10);
}
void free_initmem(void)
printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
}
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ unsigned long p;
+ for (p = start; p < end; p += PAGE_SIZE) {
+ ClearPageReserved(mem_map + MAP_NR(p));
+ set_page_count(mem_map+MAP_NR(p), 1);
+ free_page(p);
+ totalram_pages++;
+ }
+ printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+}
+#endif
+
void si_meminfo(struct sysinfo *val)
{
val->totalram = totalram_pages;
val->sharedram = 0;
- val->freeram = nr_free_pages;
+ val->freeram = nr_free_pages();
val->bufferram = atomic_read(&buffermem_pages);
val->totalhigh = totalhigh_pages;
- val->freehigh = nr_free_highpages;
+ val->freehigh = nr_free_highpages();
val->mem_unit = PAGE_SIZE;
return;
}
-/* $Id: ioremap.c,v 1.1 1999/09/18 16:57:48 gniibe Exp $
+/* $Id: ioremap.c,v 1.2 1999/11/25 14:00:28 gniibe Exp $
*
* arch/sh/mm/ioremap.c
*
#include <linux/vmalloc.h>
#include <asm/io.h>
+#include <asm/pgalloc.h>
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
-/* $Id: vmlinux.lds.S,v 1.3 1999/10/05 12:33:48 gniibe Exp $
+/* $Id: vmlinux.lds.S,v 1.4 1999/12/23 11:37:45 gniibe Exp $
* ld script to make SuperH Linux kernel
* Written by Niibe Yutaka
*/
SECTIONS
{
. = 0x80000000 + CONFIG_MEMORY_START + 0x1000;
- __text = .; /* Text and read-only data */
_text = .; /* Text and read-only data */
+ text = .; /* Text and read-only data */
.text : {
*(.empty_zero_page)
*(.text)
.kstrtab : { *(.kstrtab) }
. = ALIGN(16); /* Exception table */
- ___start___ex_table = .;
- ___ex_table : { *(__ex_table) }
- ___stop___ex_table = .;
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
- ___start___ksymtab = .; /* Kernel symbol table */
- ___ksymtab : { *(__ksymtab) }
- ___stop___ksymtab = .;
+ __start___ksymtab = .; /* Kernel symbol table */
+ __ksymtab : { *(__ksymtab) }
+ __stop___ksymtab = .;
- __etext = .; /* End of text section */
+ _etext = .; /* End of text section */
.data : { /* Data */
*(.data)
CONSTRUCTORS
}
- __edata = .; /* End of data section */
+ _edata = .; /* End of data section */
. = ALIGN(8192); /* init_task */
.data.init_task : { *(.data.init_task) }
/* stack */
- .stack : { _stack = .; __stack = .; }
+ .stack : { stack = .; _stack = .; }
. = ALIGN(4096); /* Init code and data */
- ___init_begin = .;
+ __init_begin = .;
.text.init : { *(.text.init) }
.data.init : { *(.data.init) }
. = ALIGN(16);
- ___setup_start = .;
+ __setup_start = .;
.setup.init : { *(.setup.init) }
- ___setup_end = .;
- ___initcall_start = .;
+ __setup_end = .;
+ __initcall_start = .;
.initcall.init : { *(.initcall.init) }
- ___initcall_end = .;
+ __initcall_end = .;
. = ALIGN(4096);
- ___init_end = .;
+ __init_end = .;
. = ALIGN(4096);
.data.page_aligned : { *(.data.idt) }
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
. = ALIGN(4);
- ___bss_start = .; /* BSS */
+ __bss_start = .; /* BSS */
.bss : {
*(.bss)
}
. = ALIGN(4);
- __end = . ;
+ _end = . ;
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
SERIAL =
endif
+ifeq ($(ARCH),sh)
+ KEYMAP =
+ KEYBD =
+ CONSOLE =
+ SERIAL =
+ ifeq ($(CONFIG_SERIAL),y)
+ SERIAL = generic_serial.o sh-sci.o
+ else
+ ifeq ($(CONFIG_SERIAL),m)
+ SERIAL = sh-sci.o
+ endif
+ endif
+endif
+
ifeq ($(CONFIG_DECSTATION),y)
KEYBD =
SERIAL =
--- /dev/null
+/* $Id: sh-sci.c,v 1.32 2000-03-05 13:56:18+09 gniibe Exp $
+ *
+ * linux/drivers/char/sh-sci.c
+ *
+ * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
+ * Copyright (C) 1999, 2000 Niibe Yutaka
+ *
+ * TTY code is based on sx.c (Specialix SX driver) by:
+ *
+ * (C) 1998 R.E.Wolff@BitWizard.nl
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/malloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+
+#include "generic_serial.h"
+#include "sh-sci.h"
+
+#ifdef CONFIG_DEBUG_KERNEL_WITH_GDB_STUB
+static void gdb_detach(void);
+#endif
+
+struct sci_port sci_ports[1];
+
+/* Function prototypes */
+static void sci_disable_tx_interrupts(void *ptr);
+static void sci_enable_tx_interrupts(void *ptr);
+static void sci_disable_rx_interrupts(void *ptr);
+static void sci_enable_rx_interrupts(void *ptr);
+static int sci_get_CD(void *ptr);
+static void sci_shutdown_port(void *ptr);
+static void sci_set_real_termios(void *ptr);
+static void sci_hungup(void *ptr);
+static void sci_close(void *ptr);
+static int sci_chars_in_buffer(void *ptr);
+static int sci_init_drivers(void);
+
+static struct tty_driver sci_driver, sci_callout_driver;
+
+#define SCI_NPORTS 1
+static struct tty_struct *sci_table[SCI_NPORTS] = { NULL, };
+static struct termios *sci_termios[2]; /* nomal, locked */
+
+int sci_refcount;
+int sci_debug = 0;
+
+#ifdef MODULE
+MODULE_PARM(sci_debug, "i");
+#endif
+
+static struct real_driver sci_real_driver = {
+ sci_disable_tx_interrupts,
+ sci_enable_tx_interrupts,
+ sci_disable_rx_interrupts,
+ sci_enable_rx_interrupts,
+ sci_get_CD,
+ sci_shutdown_port,
+ sci_set_real_termios,
+ sci_chars_in_buffer,
+ sci_close,
+ sci_hungup,
+ NULL
+};
+
+static void sci_setsignals(struct sci_port *port, int dtr, int rts)
+{
+ /* This routine is used for seting signals of: DTR, DCD, CTS/RTS */
+ /* We use SCIF's hardware for CTS/RTS, so don't need any for that. */
+ /* If you have signals for DTR and DCD, please implement here. */
+ ;
+}
+
+static int sci_getsignals(struct sci_port *port)
+{
+ /* This routine is used for geting signals of: DTR, DCD, DSR, RI,
+ and CTS/RTS */
+
+ return TIOCM_DTR|TIOCM_RTS|TIOCM_DSR;
+/*
+ (((o_stat & OP_DTR)?TIOCM_DTR:0) |
+ ((o_stat & OP_RTS)?TIOCM_RTS:0) |
+ ((i_stat & IP_CTS)?TIOCM_CTS:0) |
+ ((i_stat & IP_DCD)?TIOCM_CAR:0) |
+ ((i_stat & IP_DSR)?TIOCM_DSR:0) |
+ ((i_stat & IP_RI) ?TIOCM_RNG:0)
+*/
+}
+
+static void sci_set_baud(struct sci_port *port)
+{
+ int t;
+
+ switch (port->gs.baud) {
+ case 0:
+ t = -1;
+ break;
+ case 2400:
+ t = BPS_2400;
+ break;
+ case 4800:
+ t = BPS_4800;
+ break;
+ case 9600:
+ t = BPS_9600;
+ break;
+ case 19200:
+ t = BPS_19200;
+ break;
+ case 38400:
+ t = BPS_38400;
+ break;
+ default:
+ printk(KERN_INFO "sci: unsupported baud rate: %d, use 115200 instead.\n", port->gs.baud);
+ case 115200:
+ t = BPS_115200;
+ break;
+ }
+
+ if (t > 0) {
+ sci_setsignals (port, 1, -1);
+ ctrl_outb(t, SCBRR);
+ ctrl_outw(0xa400, RFCR); /* Refresh counter clear */
+ while (ctrl_inw(RFCR) < WAIT_RFCR_COUNTER)
+ ;
+ } else {
+ sci_setsignals (port, 0, -1);
+ }
+}
+
+static void sci_set_termios_cflag(struct sci_port *port)
+{
+ unsigned short status;
+ unsigned short smr_val=0;
+#if defined(CONFIG_SH_SCIF_SERIAL)
+ unsigned short fcr_val=6; /* TFRST=1, RFRST=1 */
+#endif
+
+ do
+ status = ctrl_in(SC_SR);
+ while (!(status & SCI_TEND));
+
+ port->old_cflag = port->gs.tty->termios->c_cflag;
+
+ ctrl_out(0x00, SCSCR); /* TE=0, RE=0, CKE1=0 */
+#if defined(CONFIG_SH_SCIF_SERIAL)
+ ctrl_out(fcr_val, SCFCR);
+ fcr_val = 0;
+#endif
+
+ if ((port->gs.tty->termios->c_cflag & CSIZE) == CS7)
+ smr_val |= 0x40;
+ if (C_PARENB(port->gs.tty))
+ smr_val |= 0x20;
+ if (C_PARODD(port->gs.tty))
+ smr_val |= 0x10;
+ if (C_CSTOPB(port->gs.tty))
+ smr_val |= 0x08;
+ ctrl_out(smr_val, SCSMR);
+
+#if defined(CONFIG_SH_SCIF_SERIAL)
+ if (C_CRTSCTS(port->gs.tty))
+ fcr_val |= 0x08;
+ ctrl_out(fcr_val, SCFCR);
+#endif
+
+ sci_set_baud(port);
+ ctrl_out(SCSCR_INIT, SCSCR); /* TIE=0,RIE=0,TE=1,RE=1 */
+#if 0 /* defined(CONFIG_SH_SCIF_SERIAL) */
+ ctrl_outw(0x0080, SCSPTR); /* Set RTS = 1 */
+#endif
+ sci_enable_rx_interrupts(port);
+}
+
+static void sci_set_real_termios(void *ptr)
+{
+ struct sci_port *port = ptr;
+
+ if (port->old_cflag != port->gs.tty->termios->c_cflag)
+ sci_set_termios_cflag(port);
+
+ /* Tell line discipline whether we will do input cooking */
+ if (I_OTHER(port->gs.tty))
+ clear_bit(TTY_HW_COOK_IN, &port->gs.tty->flags);
+ else
+ set_bit(TTY_HW_COOK_IN, &port->gs.tty->flags);
+
+/* Tell line discipline whether we will do output cooking.
+ * If OPOST is set and no other output flags are set then we can do output
+ * processing. Even if only *one* other flag in the O_OTHER group is set
+ * we do cooking in software.
+ */
+ if (O_OPOST(port->gs.tty) && !O_OTHER(port->gs.tty))
+ set_bit(TTY_HW_COOK_OUT, &port->gs.tty->flags);
+ else
+ clear_bit(TTY_HW_COOK_OUT, &port->gs.tty->flags);
+}
+
+/* ********************************************************************** *
+ * the interrupt related routines *
+ * ********************************************************************** */
+
+static void sci_transmit_chars(struct sci_port *port)
+{
+ int count, i;
+ int txroom;
+ unsigned long flags;
+ unsigned short status;
+ unsigned short ctrl;
+ unsigned char c;
+
+ status = ctrl_in(SC_SR);
+ if (!(status & SCI_TD_E)) {
+ save_and_cli(flags);
+ ctrl = ctrl_in(SCSCR);
+ if (port->gs.xmit_cnt == 0) {
+ ctrl &= ~SCI_CTRL_FLAGS_TIE;
+ port->gs.flags &= ~GS_TX_INTEN;
+ } else
+ ctrl |= SCI_CTRL_FLAGS_TIE;
+ ctrl_out(ctrl, SCSCR);
+ restore_flags(flags);
+ return;
+ }
+
+ while (1) {
+ count = port->gs.xmit_cnt;
+#if defined(CONFIG_SH_SCIF_SERIAL)
+ txroom = 16 - (ctrl_inw(SCFDR)>>8);
+#else
+ txroom = (ctrl_in(SC_SR)&SCI_TD_E)?1:0;
+#endif
+ if (count > txroom)
+ count = txroom;
+
+ /* Don't copy pas the end of the source buffer */
+ if (count > SERIAL_XMIT_SIZE - port->gs.xmit_tail)
+ count = SERIAL_XMIT_SIZE - port->gs.xmit_tail;
+
+ /* If for one reason or another, we can't copy more data, we're done! */
+ if (count == 0)
+ break;
+
+ for (i=0; i<count; i++) {
+ c = port->gs.xmit_buf[port->gs.xmit_tail + i];
+ ctrl_outb(c, SC_TDR);
+ }
+ ctrl_out(SCI_TD_E_CLEAR, SC_SR);
+
+ /* Update the kernel buffer end */
+ port->gs.xmit_tail = (port->gs.xmit_tail + count) & (SERIAL_XMIT_SIZE-1);
+
+ /* This one last. (this is essential)
+ It would allow others to start putting more data into the buffer! */
+ port->gs.xmit_cnt -= count;
+ }
+
+ if (port->gs.xmit_cnt <= port->gs.wakeup_chars) {
+ if ((port->gs.tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ port->gs.tty->ldisc.write_wakeup)
+ (port->gs.tty->ldisc.write_wakeup)(port->gs.tty);
+ wake_up_interruptible(&port->gs.tty->write_wait);
+ }
+
+ save_and_cli(flags);
+ ctrl = ctrl_in(SCSCR);
+ if (port->gs.xmit_cnt == 0) {
+ ctrl &= ~SCI_CTRL_FLAGS_TIE;
+ port->gs.flags &= ~GS_TX_INTEN;
+ } else {
+#if defined(CONFIG_SH_SCIF_SERIAL)
+ ctrl_in(SC_SR); /* Dummy read */
+ ctrl_out(SCI_TD_E_CLEAR, SC_SR);
+#endif
+ ctrl |= SCI_CTRL_FLAGS_TIE;
+ }
+ ctrl_out(ctrl, SCSCR);
+ restore_flags(flags);
+}
+
+static inline void sci_receive_chars(struct sci_port *port)
+{
+ int i, count;
+ struct tty_struct *tty;
+ int copied=0;
+ unsigned short status;
+
+ status = ctrl_in(SC_SR);
+ if (!(status & SCI_RD_F))
+ return;
+
+ tty = port->gs.tty;
+ while (1) {
+#if defined(CONFIG_SH_SCIF_SERIAL)
+ count = ctrl_inw(SCFDR)&0x001f;
+#else
+ count = (ctrl_in(SC_SR)&SCI_RD_F)?1:0;
+#endif
+
+ /* Don't copy more bytes than there is room for in the buffer */
+ if (tty->flip.count + count > TTY_FLIPBUF_SIZE)
+ count = TTY_FLIPBUF_SIZE - tty->flip.count;
+
+ /* If for one reason or another, we can't copy more data, we're done! */
+ if (count == 0)
+ break;
+
+ for (i=0; i<count; i++)
+ tty->flip.char_buf_ptr[i] = ctrl_inb(SC_RDR);
+ ctrl_in(SC_SR); /* dummy read */
+ ctrl_out(SCI_RDRF_CLEAR, SC_SR);
+
+ memset(tty->flip.flag_buf_ptr, TTY_NORMAL, count);
+
+ /* Update the kernel buffer end */
+ tty->flip.count += count;
+ tty->flip.char_buf_ptr += count;
+ tty->flip.flag_buf_ptr += count;
+
+ copied += count;
+ }
+
+ if (copied)
+ /* Tell the rest of the system the news. New characters! */
+ tty_flip_buffer_push(tty);
+}
+
+static void sci_rx_interrupt(int irq, void *ptr, struct pt_regs *regs)
+{
+ struct sci_port *port = ptr;
+
+ if (port->gs.flags & GS_ACTIVE)
+ if (!(port->gs.flags & SCI_RX_THROTTLE))
+ sci_receive_chars(port);
+}
+
+static void sci_tx_interrupt(int irq, void *ptr, struct pt_regs *regs)
+{
+ struct sci_port *port = ptr;
+
+ if (port->gs.flags & GS_ACTIVE)
+ if (port->gs.xmit_cnt) {
+ sci_transmit_chars(port);
+ }
+}
+
+static void sci_er_interrupt(int irq, void *ptr, struct pt_regs *regs)
+{
+ /* Handle errors */
+ if (ctrl_in(SC_SR) & SCI_ERRORS)
+ ctrl_out(SCI_ERROR_CLEAR, SC_SR);
+
+ /* Kick the transmission */
+ sci_tx_interrupt(irq, ptr, regs);
+}
+
+/* ********************************************************************** *
+ * Here are the routines that actually *
+ * interface with the generic_serial driver *
+ * ********************************************************************** */
+
+static void sci_disable_tx_interrupts(void *ptr)
+{
+ unsigned long flags;
+ unsigned short ctrl;
+
+ /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
+ save_and_cli(flags);
+ ctrl = ctrl_in(SCSCR);
+ ctrl &= ~SCI_CTRL_FLAGS_TIE;
+ ctrl_out(ctrl, SCSCR);
+ restore_flags(flags);
+}
+
+static void sci_enable_tx_interrupts(void *ptr)
+{
+ struct sci_port *port = ptr;
+
+ disable_irq(SCI_TXI_IRQ);
+ sci_transmit_chars(port);
+ enable_irq(SCI_TXI_IRQ);
+}
+
+static void sci_disable_rx_interrupts(void * ptr)
+{
+ unsigned long flags;
+ unsigned short ctrl;
+
+ /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
+ save_and_cli(flags);
+ ctrl = ctrl_in(SCSCR);
+ ctrl &= ~SCI_CTRL_FLAGS_RIE;
+ ctrl_out(ctrl, SCSCR);
+ restore_flags(flags);
+}
+
+static void sci_enable_rx_interrupts(void * ptr)
+{
+ unsigned long flags;
+ unsigned short ctrl;
+
+ /* Set RIE (Receive Interrupt Enable) bit in SCSCR */
+ save_and_cli(flags);
+ ctrl = ctrl_in(SCSCR);
+ ctrl |= SCI_CTRL_FLAGS_RIE;
+ ctrl_out(ctrl, SCSCR);
+ restore_flags(flags);
+}
+
+static int sci_get_CD(void * ptr)
+{
+ /* If you have signal for CD (Carrier Detect), please change here. */
+ return 1;
+}
+
+static int sci_chars_in_buffer(void * ptr)
+{
+#if defined(CONFIG_SH_SCIF_SERIAL)
+ return (ctrl_inw(SCFDR) >> 8) + ((ctrl_in(SC_SR) & SCI_TEND)? 0: 1);
+#else
+ return (ctrl_in(SC_SR) & SCI_TEND)? 0: 1;
+#endif
+}
+
+static void sci_shutdown_port(void * ptr)
+{
+ struct sci_port *port = ptr;
+
+ port->gs.flags &= ~ GS_ACTIVE;
+ if (port->gs.tty && port->gs.tty->termios->c_cflag & HUPCL)
+ sci_setsignals(port, 0, 0);
+}
+
+/* ********************************************************************** *
+ * Here are the routines that actually *
+ * interface with the rest of the system *
+ * ********************************************************************** */
+
+static int sci_open(struct tty_struct * tty, struct file * filp)
+{
+ struct sci_port *port;
+ int retval, line;
+
+ line = MINOR(tty->device) - SCI_MINOR_START;
+
+ if ((line < 0) || (line >= SCI_NPORTS))
+ return -ENODEV;
+
+ port = &sci_ports[line];
+
+ tty->driver_data = port;
+ port->gs.tty = tty;
+ port->gs.count++;
+
+ /*
+ * Start up serial port
+ */
+ retval = gs_init_port(&port->gs);
+ if (retval) {
+ port->gs.count--;
+ return retval;
+ }
+
+ port->gs.flags |= GS_ACTIVE;
+ sci_setsignals(port, 1,1);
+
+ if (port->gs.count == 1) {
+ MOD_INC_USE_COUNT;
+ }
+
+ retval = block_til_ready(port, filp);
+
+ if (retval) {
+ MOD_DEC_USE_COUNT;
+ port->gs.count--;
+ return retval;
+ }
+
+ if ((port->gs.count == 1) && (port->gs.flags & ASYNC_SPLIT_TERMIOS)) {
+ if (tty->driver.subtype == SERIAL_TYPE_NORMAL)
+ *tty->termios = port->gs.normal_termios;
+ else
+ *tty->termios = port->gs.callout_termios;
+ sci_set_real_termios(port);
+ }
+
+ sci_enable_rx_interrupts(port);
+
+ port->gs.session = current->session;
+ port->gs.pgrp = current->pgrp;
+
+ return 0;
+}
+
+static void sci_hungup(void *ptr)
+{
+ MOD_DEC_USE_COUNT;
+}
+
+static void sci_close(void *ptr)
+{
+ MOD_DEC_USE_COUNT;
+}
+
+static int sci_ioctl(struct tty_struct * tty, struct file * filp,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc;
+ struct sci_port *port = tty->driver_data;
+ int ival;
+
+ rc = 0;
+ switch (cmd) {
+ case TIOCGSOFTCAR:
+ rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
+ (unsigned int *) arg);
+ break;
+ case TIOCSSOFTCAR:
+ if ((rc = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(int))) == 0) {
+ get_user(ival, (unsigned int *) arg);
+ tty->termios->c_cflag =
+ (tty->termios->c_cflag & ~CLOCAL) |
+ (ival ? CLOCAL : 0);
+ }
+ break;
+ case TIOCGSERIAL:
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(struct serial_struct))) == 0)
+ gs_getserial(&port->gs, (struct serial_struct *) arg);
+ break;
+ case TIOCSSERIAL:
+ if ((rc = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(struct serial_struct))) == 0)
+ rc = gs_setserial(&port->gs,
+ (struct serial_struct *) arg);
+ break;
+ case TIOCMGET:
+ if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
+ sizeof(unsigned int))) == 0) {
+ ival = sci_getsignals(port);
+ put_user(ival, (unsigned int *) arg);
+ }
+ break;
+ case TIOCMBIS:
+ if ((rc = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(unsigned int))) == 0) {
+ get_user(ival, (unsigned int *) arg);
+ sci_setsignals(port, ((ival & TIOCM_DTR) ? 1 : -1),
+ ((ival & TIOCM_RTS) ? 1 : -1));
+ }
+ break;
+ case TIOCMBIC:
+ if ((rc = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(unsigned int))) == 0) {
+ get_user(ival, (unsigned int *) arg);
+ sci_setsignals(port, ((ival & TIOCM_DTR) ? 0 : -1),
+ ((ival & TIOCM_RTS) ? 0 : -1));
+ }
+ break;
+ case TIOCMSET:
+ if ((rc = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(unsigned int))) == 0) {
+ get_user(ival, (unsigned int *)arg);
+ sci_setsignals(port, ((ival & TIOCM_DTR) ? 1 : 0),
+ ((ival & TIOCM_RTS) ? 1 : 0));
+ }
+ break;
+
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+
+ return rc;
+}
+
+static void sci_throttle(struct tty_struct * tty)
+{
+ struct sci_port *port = (struct sci_port *)tty->driver_data;
+
+ /* If the port is using any type of input flow
+ * control then throttle the port.
+ */
+ if ((tty->termios->c_cflag & CRTSCTS) || (I_IXOFF(tty)) )
+ port->gs.flags |= SCI_RX_THROTTLE;
+}
+
+static void sci_unthrottle(struct tty_struct * tty)
+{
+ struct sci_port *port = (struct sci_port *)tty->driver_data;
+
+ /* Always unthrottle even if flow control is not enabled on
+ * this port in case we disabled flow control while the port
+ * was throttled
+ */
+ port->gs.flags &= ~SCI_RX_THROTTLE;
+ return;
+}
+
+/* ********************************************************************** *
+ * Here are the initialization routines. *
+ * ********************************************************************** */
+
+static int sci_init_drivers(void)
+{
+ int error;
+ struct sci_port *port;
+
+ memset(&sci_driver, 0, sizeof(sci_driver));
+ sci_driver.magic = TTY_DRIVER_MAGIC;
+ sci_driver.driver_name = "serial";
+ sci_driver.name = "ttyS";
+ sci_driver.major = TTY_MAJOR;
+ sci_driver.minor_start = SCI_MINOR_START;
+ sci_driver.num = 1;
+ sci_driver.type = TTY_DRIVER_TYPE_SERIAL;
+ sci_driver.subtype = SERIAL_TYPE_NORMAL;
+ sci_driver.init_termios = tty_std_termios;
+ sci_driver.init_termios.c_cflag =
+ B115200 | CS8 | CREAD | HUPCL | CLOCAL;
+ sci_driver.flags = TTY_DRIVER_REAL_RAW;
+ sci_driver.refcount = &sci_refcount;
+ sci_driver.table = sci_table;
+ sci_driver.termios = &sci_termios[0];
+ sci_driver.termios_locked = &sci_termios[1];
+ sci_termios[0] = sci_termios[1] = NULL;
+
+ sci_driver.open = sci_open;
+ sci_driver.close = gs_close;
+ sci_driver.write = gs_write;
+ sci_driver.put_char = gs_put_char;
+ sci_driver.flush_chars = gs_flush_chars;
+ sci_driver.write_room = gs_write_room;
+ sci_driver.chars_in_buffer = gs_chars_in_buffer;
+ sci_driver.flush_buffer = gs_flush_buffer;
+ sci_driver.ioctl = sci_ioctl;
+ sci_driver.throttle = sci_throttle;
+ sci_driver.unthrottle = sci_unthrottle;
+ sci_driver.set_termios = gs_set_termios;
+ sci_driver.stop = gs_stop;
+ sci_driver.start = gs_start;
+ sci_driver.hangup = gs_hangup;
+
+ sci_callout_driver = sci_driver;
+ sci_callout_driver.name = "cua";
+ sci_callout_driver.major = TTYAUX_MAJOR;
+ sci_callout_driver.subtype = SERIAL_TYPE_CALLOUT;
+
+ if ((error = tty_register_driver(&sci_driver))) {
+ printk(KERN_ERR "sci: Couldn't register SCI driver, error = %d\n",
+ error);
+ return 1;
+ }
+ if ((error = tty_register_driver(&sci_callout_driver))) {
+ tty_unregister_driver(&sci_driver);
+ printk(KERN_ERR "sci: Couldn't register SCI callout driver, error = %d\n",
+ error);
+ return 1;
+ }
+
+ port = &sci_ports[0];
+ port->gs.callout_termios = tty_std_termios;
+ port->gs.normal_termios = tty_std_termios;
+ port->gs.magic = SCI_MAGIC;
+ port->gs.close_delay = HZ/2;
+ port->gs.closing_wait = 30 * HZ;
+ port->gs.rd = &sci_real_driver;
+ init_waitqueue_head(&port->gs.open_wait);
+ init_waitqueue_head(&port->gs.close_wait);
+ port->old_cflag = 0;
+
+ return 0;
+}
+
+#ifdef MODULE
+#define sci_init init_module
+#else
+#define sci_init rs_init
+#endif
+
+int __init sci_init(void)
+{
+ struct sci_port *port;
+ int i;
+
+ for (i=SCI_ERI_IRQ; i<SCI_IRQ_END; i++)
+ set_ipr_data(i, SCI_IPR_OFFSET, SCI_PRIORITY);
+
+ port = &sci_ports[0];
+
+ if (request_irq(SCI_ERI_IRQ, sci_er_interrupt, SA_INTERRUPT,
+ "serial", port)) {
+ printk(KERN_ERR "sci: Cannot allocate error irq.\n");
+ return -ENODEV;
+ }
+ if (request_irq(SCI_RXI_IRQ, sci_rx_interrupt, SA_INTERRUPT,
+ "serial", port)) {
+ printk(KERN_ERR "sci: Cannot allocate rx irq.\n");
+ return -ENODEV;
+ }
+ if (request_irq(SCI_TXI_IRQ, sci_tx_interrupt, SA_INTERRUPT,
+ "serial", port)) {
+ printk(KERN_ERR "sci: Cannot allocate tx irq.\n");
+ return -ENODEV;
+ }
+ /* XXX: How about BRI interrupt?? */
+
+ sci_init_drivers();
+
+#ifdef CONFIG_DEBUG_KERNEL_WITH_GDB_STUB
+ gdb_detach();
+#endif
+ return 0; /* Return -EIO when not detected */
+}
+
+#ifdef MODULE
+#undef func_enter
+#undef func_exit
+
+void cleanup_module(void)
+{
+ int i;
+
+ for (i=SCI_ERI_IRQ; i<SCI_TEI_IRQ; i++) /* XXX: irq_end?? */
+ free_irq(i, port);
+
+ tty_unregister_driver(&sci_driver);
+ tty_unregister_driver(&sci_callout_driver);
+}
+
+#include "generic_serial.c"
+#endif
+
+#ifdef CONFIG_SERIAL_CONSOLE
+/*
+ * ------------------------------------------------------------
+ * Serial console driver for SH-3/SH-4 SCI (with no FIFO)
+ * ------------------------------------------------------------
+ */
+
+static inline void put_char(char c)
+{
+ unsigned long flags;
+ unsigned short status;
+
+ save_and_cli(flags);
+
+ do
+ status = ctrl_in(SC_SR);
+ while (!(status & SCI_TD_E));
+
+ ctrl_outb(c, SC_TDR);
+ ctrl_out(SCI_TD_E_CLEAR, SC_SR);
+
+ restore_flags(flags);
+}
+
+#ifdef CONFIG_DEBUG_KERNEL_WITH_GDB_STUB
+static int in_gdb = 1;
+
+static inline void handle_error(void)
+{ /* Clear error flags */
+ ctrl_out(SCI_ERROR_CLEAR, SC_SR);
+}
+
+static inline int get_char(void)
+{
+ unsigned long flags;
+ unsigned short status;
+ int c;
+
+ save_and_cli(flags);
+ do {
+ status = ctrl_in(SC_SR);
+ if (status & SCI_ERRORS) {
+ handle_error();
+ continue;
+ }
+ } while (!(status & SCI_RD_F));
+ c = ctrl_inb(SC_RDR);
+ ctrl_out(SCI_RDRF_CLEAR, SC_SR);
+ restore_flags(flags);
+
+ return c;
+}
+
+/* Taken from sh-stub.c of GDB 4.18 */
+static const char hexchars[] = "0123456789abcdef";
+static char highhex(int x)
+{
+ return hexchars[(x >> 4) & 0xf];
+}
+
+static char lowhex(int x)
+{
+ return hexchars[x & 0xf];
+}
+
+static void gdb_detach(void)
+{
+ asm volatile("trapa #0xff");
+
+ if (in_gdb == 1) {
+ in_gdb = 0;
+ get_char();
+ put_char('\r');
+ put_char('\n');
+ }
+}
+#endif
+
+/* send the packet in buffer. The host get's one chance to read it.
+ This routine does not wait for a positive acknowledge. */
+
+static void
+put_string(const char *buffer, int count)
+{
+ int i;
+ const unsigned char *p = buffer;
+#ifdef CONFIG_DEBUG_KERNEL_WITH_GDB_STUB
+ int checksum;
+
+if (in_gdb) {
+ /* $<packet info>#<checksum>. */
+ do {
+ unsigned char c;
+ put_char('$');
+ put_char('O'); /* 'O'utput to console */
+ checksum = 'O';
+
+ for (i=0; i<count; i++) { /* Don't use run length encoding */
+ int h, l;
+
+ c = *p++;
+ h = highhex(c);
+ l = lowhex(c);
+ put_char(h);
+ put_char(l);
+ checksum += h + l;
+ }
+ put_char('#');
+ put_char(highhex(checksum));
+ put_char(lowhex(checksum));
+ } while (get_char() != '+');
+} else
+#endif
+ for (i=0; i<count; i++) {
+ if (*p == 10)
+ put_char('\r');
+ put_char(*p++);
+ }
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ */
+static void serial_console_write(struct console *co, const char *s,
+ unsigned count)
+{
+ put_string(s, count);
+}
+
+/*
+ * Receive character from the serial port
+ */
+static int serial_console_wait_key(struct console *co)
+{
+ /* Not implemented yet */
+ return 0;
+}
+
+static kdev_t serial_console_device(struct console *c)
+{
+ return MKDEV(TTY_MAJOR, SCI_MINOR_START + c->index);
+}
+
+/*
+ * Setup initial baud/bits/parity. We do two things here:
+ * - construct a cflag setting for the first rs_open()
+ * - initialize the serial port
+ * Return non-zero if we didn't find a serial port.
+ */
+static int __init serial_console_setup(struct console *co, char *options)
+{
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int cflag = CREAD | HUPCL | CLOCAL;
+ char *s;
+
+ if (options) {
+ baud = simple_strtoul(options, NULL, 10);
+ s = options;
+ while(*s >= '0' && *s <= '9')
+ s++;
+ if (*s) parity = *s++;
+ if (*s) bits = *s - '0';
+ }
+
+ /*
+ * Now construct a cflag setting.
+ */
+ switch (baud) {
+ case 19200:
+ cflag |= B19200;
+ break;
+ case 38400:
+ cflag |= B38400;
+ break;
+ case 57600:
+ cflag |= B57600;
+ break;
+ case 115200:
+ cflag |= B115200;
+ break;
+ case 9600:
+ default:
+ cflag |= B9600;
+ break;
+ }
+ switch (bits) {
+ case 7:
+ cflag |= CS7;
+ break;
+ default:
+ case 8:
+ cflag |= CS8;
+ break;
+ }
+ switch (parity) {
+ case 'o': case 'O':
+ cflag |= PARODD;
+ break;
+ case 'e': case 'E':
+ cflag |= PARENB;
+ break;
+ }
+ co->cflag = cflag;
+
+ /* XXX: set baud, char, and parity here. */
+ return 0;
+}
+
+static struct console sercons = {
+ "ttyS",
+ serial_console_write,
+ NULL,
+ serial_console_device,
+ serial_console_wait_key,
+ NULL,
+ serial_console_setup,
+ CON_PRINTBUFFER,
+ -1,
+ 0,
+ NULL
+};
+
+/*
+ * Register console.
+ */
+
+void __init serial_console_init(void)
+{
+ register_console(&sercons);
+}
+#endif /* CONFIG_SERIAL_CONSOLE */
--- /dev/null
+/* $Id: sh-sci.h,v 1.5 2000-03-05 13:54:32+09 gniibe Exp $
+ *
+ * linux/drivers/char/sh-sci.h
+ *
+ * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
+ * Copyright (C) 1999, 2000 Niibe Yutaka
+ * Copyright (C) 2000 Greg Banks
+ *
+ */
+
+#if defined(CONFIG_SH_SCI_SERIAL)
+#if defined(__sh3__)
+#define SCSMR (volatile unsigned char *)0xfffffe80
+#define SCBRR 0xfffffe82
+#define SCSCR (volatile unsigned char *)0xfffffe84
+#define SC_TDR 0xfffffe86
+#define SC_SR (volatile unsigned char *)0xfffffe88
+#define SC_RDR 0xfffffe8a
+#define SCSPTR 0xffffff7c
+
+#define SCSCR_INIT 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */
+
+#elif defined(__SH4__)
+Not yet.
+#endif
+
+#define SCI_TD_E 0x80
+#define SCI_RD_F 0x40
+#define SCI_ORER 0x20
+#define SCI_FER 0x10
+#define SCI_PER 0x08
+#define SCI_TEND 0x04
+
+#define SCI_ERRORS ( SCI_PER | SCI_FER | SCI_ORER)
+#define SCI_TD_E_CLEAR 0x78
+#define SCI_RDRF_CLEAR 0xbc
+#define SCI_ERROR_CLEAR 0xc4
+
+#define SCI_CTRL_FLAGS_TIE 0x80
+#define SCI_CTRL_FLAGS_RIE 0x40
+#define SCI_CTRL_FLAGS_TE 0x20
+#define SCI_CTRL_FLAGS_RE 0x10
+/* TEIE=0x04 */
+#define SCI_CTRL_FLAGS_CKE1 0x02
+#define SCI_CTRL_FLAGS_CKE0 0x01
+
+#define RFCR 0xffffff74
+
+#define SCI_ERI_IRQ 23
+#define SCI_RXI_IRQ 24
+#define SCI_TXI_IRQ 25
+#define SCI_TEI_IRQ 26
+#define SCI_IRQ_END 27
+
+#define SCI_IPR_OFFSET (16+4)
+#endif
+\f
+#if defined(CONFIG_SH_SCIF_SERIAL)
+#if defined(__sh3__)
+#define SCSMR (volatile unsigned char *)0xA4000150
+#define SCBRR 0xA4000152
+#define SCSCR (volatile unsigned char *)0xA4000154
+#define SC_TDR 0xA4000156
+#define SC_SR (volatile unsigned short *)0xA4000158
+#define SC_RDR 0xA400015A
+#define SCFCR (volatile unsigned char *)0xA400015C
+#define SCFDR 0xA400015E
+#undef SCSPTR /* Is there any register for RTS?? */
+#undef SCLSR
+
+#define RFCR 0xffffff74
+
+#define SCSCR_INIT 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */
+ /* 0x33 when external clock is used */
+#define SCI_IPR_OFFSET (64+4)
+
+#elif defined(__SH4__)
+#define SCSMR (volatile unsigned short *)0xFFE80000
+#define SCBRR 0xFFE80004
+#define SCSCR (volatile unsigned short *)0xFFE80008
+#define SC_TDR 0xFFE8000C
+#define SC_SR (volatile unsigned short *)0xFFE80010
+#define SC_RDR 0xFFE80014
+#define SCFCR (volatile unsigned short *)0xFFE80018
+#define SCFDR 0xFFE8001C
+#define SCSPTR 0xFFE80020
+#define SCLSR 0xFFE80024
+
+#define RFCR 0xFF800028
+
+#define SCSCR_INIT 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
+#define SCI_IPR_OFFSET (32+4)
+
+#endif
+
+#define SCI_ER 0x0080
+#define SCI_TEND 0x0040
+#define SCI_TD_E 0x0020
+#define SCI_BRK 0x0010
+#define SCI_FER 0x0008
+#define SCI_PER 0x0004
+#define SCI_RD_F 0x0002
+#define SCI_DR 0x0001
+
+#define SCI_ERRORS ( SCI_PER | SCI_FER | SCI_ER | SCI_BRK)
+#define SCI_TD_E_CLEAR 0x00df
+#define SCI_TEND_CLEAR 0x00bf
+#define SCI_RDRF_CLEAR 0x00fc
+#define SCI_ERROR_CLEAR 0x0063
+
+#define SCI_CTRL_FLAGS_TIE 0x80
+#define SCI_CTRL_FLAGS_RIE 0x40
+#define SCI_CTRL_FLAGS_TE 0x20
+#define SCI_CTRL_FLAGS_RE 0x10
+#define SCI_CTRL_FLAGS_REIE 0x08
+#define SCI_CTRL_FLAGS_CKE1 0x02
+
+#if defined(__sh3__)
+#define SCI_ERI_IRQ 56
+#define SCI_RXI_IRQ 57
+#define SCI_BRI_IRQ 58
+#define SCI_TXI_IRQ 59
+#define SCI_IRQ_END 60
+#elif defined(__SH4__)
+#define SCI_ERI_IRQ 40
+#define SCI_RXI_IRQ 41
+#define SCI_BRI_IRQ 42
+#define SCI_TXI_IRQ 43
+#define SCI_IRQ_END 44
+#endif
+#endif
+\f
+#define SCI_PRIORITY 3
+
+#define SCI_MINOR_START 64
+#define SCI_RX_THROTTLE 0x0000001
+
+#define O_OTHER(tty) \
+ ((O_OLCUC(tty)) ||\
+ (O_ONLCR(tty)) ||\
+ (O_OCRNL(tty)) ||\
+ (O_ONOCR(tty)) ||\
+ (O_ONLRET(tty)) ||\
+ (O_OFILL(tty)) ||\
+ (O_OFDEL(tty)) ||\
+ (O_NLDLY(tty)) ||\
+ (O_CRDLY(tty)) ||\
+ (O_TABDLY(tty)) ||\
+ (O_BSDLY(tty)) ||\
+ (O_VTDLY(tty)) ||\
+ (O_FFDLY(tty)))
+
+#define I_OTHER(tty) \
+ ((I_INLCR(tty)) ||\
+ (I_IGNCR(tty)) ||\
+ (I_ICRNL(tty)) ||\
+ (I_IUCLC(tty)) ||\
+ (L_ISIG(tty)))
+
+#define SCI_MAGIC 0xbabeface
+
+struct sci_port {
+ struct gs_port gs;
+ unsigned int old_cflag;
+};
+
+#define WAIT_RFCR_COUNTER 200
+
+/*
+ * Values for the BitRate Register (SCBRR)
+ *
+ * The values are actually divisors for a frequency which can
+ * be internal to the SH3 (14.7456MHz) or derived from an external
+ * clock source. This driver assumes the internal clock is used;
+ * to support using an external clock source, config options or
+ * possibly command-line options would need to be added.
+ *
+ * Also, to support speeds below 2400 (why?) the lower 2 bits of
+ * the SCSMR register would also need to be set to non-zero values.
+ *
+ * -- Greg Banks 27Feb2000
+ */
+
+#if defined(__sh3__)
+#define BPS_2400 191
+#define BPS_4800 95
+#define BPS_9600 47
+#define BPS_19200 23
+#define BPS_38400 11
+#define BPS_115200 3
+#elif defined(__SH4__)
+/* Values for SH-4 please! */
+
+#define BPS_115200 8
+#endif
if (isapnp_reserve_irq[i] < 0) {
isapnp_reserve_irq[i] = irq;
#ifdef ISAPNP_DEBUG
- printk("IRQ %i is reserved now.\n", irq);
+ printk("isapnp: IRQ %i is reserved now.\n", irq);
#endif
return 0;
}
pci_for_each_dev(dev) {
#ifdef ISAPNP_DEBUG
- printk("PCI: reserved IRQ: %i\n", dev->irq);
+ printk("isapnp: PCI: reserved IRQ: %i\n", dev->irq);
#endif
if (dev->irq > 0)
isapnp_do_reserve_irq(dev->irq);
port3->min += 0x800;
port3->max += 0x800;
}
- printk(KERN_INFO "ISAPnP: AWE32 quirk - adding two ports\n");
+ printk(KERN_INFO "isapnp: AWE32 quirk - adding two ports\n");
}
while (isapnp_fixups[i].vendor != 0) {
if ((isapnp_fixups[i].vendor == dev->vendor) &&
(isapnp_fixups[i].device == dev->device)) {
- printk(KERN_DEBUG "PnP: Calling quirk for %02x:%02x\n",
+ printk(KERN_DEBUG "isapnp: Calling quirk for %02x:%02x\n",
dev->bus->number, dev->devfn);
isapnp_fixups[i].quirk_function(dev);
}
return(sb_dev);
}
+static struct pci_dev *sb_init_diamond(struct pci_bus *bus, struct pci_dev *card, struct address_info *hw_config, struct address_info *mpu_config)
+{
+ /*
+ * Diamonds DT0197H
+ * very similar to the CMI8330 above
+ */
+
+ /* @@@0001:Soundblaster.
+ */
+
+ if((sb_dev = isapnp_find_dev(bus,
+ ISAPNP_VENDOR('@','@','@'), ISAPNP_FUNCTION(0x0001), NULL)))
+ {
+ sb_dev->prepare(sb_dev);
+
+ if((sb_dev = activate_dev("DT0197H", "sb", sb_dev)))
+ {
+ hw_config->io_base = sb_dev->resource[0].start;
+ hw_config->irq = sb_dev->irq_resource[0].start;
+ hw_config->dma = sb_dev->dma_resource[0].start;
+ hw_config->dma2 = -1;
+
+ show_base("DT0197H", "sb", &sb_dev->resource[0]);
+ }
+
+ if(!sb_dev) return(NULL);
+
+ }
+ else
+ printk(KERN_ERR "sb: DT0197H panic: sb base not found\n");
+
+ /* @X@0001:mpu
+ */
+
+#ifdef CONFIG_MIDI
+ if((mpu_dev = isapnp_find_dev(bus,
+ ISAPNP_VENDOR('@','X','@'), ISAPNP_FUNCTION(0x0001), NULL)))
+ {
+ mpu_dev->prepare(mpu_dev);
+
+ if((mpu_dev = activate_dev("DT0197H", "mpu", mpu_dev)))
+ {
+ show_base("DT0197H", "mpu", &mpu_dev->resource[0]);
+ mpu_config->io_base = mpu_dev->resource[0].start;
+ }
+ }
+ else
+ printk(KERN_ERR "sb: DT0197H panic: mpu not found\n");
+#endif
+
+
+ /* @P@:Gameport
+ */
+
+ if((jp_dev = isapnp_find_dev(bus,
+ ISAPNP_VENDOR('@','P','@'), ISAPNP_FUNCTION(0x0001), NULL)))
+ {
+ jp_dev->prepare(jp_dev);
+
+ if((jp_dev = activate_dev("DT0197H", "gameport", jp_dev)))
+ show_base("DT0197H", "gameport", &jp_dev->resource[0]);
+ }
+ else
+ printk(KERN_ERR "sb: DT0197H panic: gameport not found\n");
+
+ /* @H@0001:OPL3
+ */
+
+#if defined(CONFIG_SOUND_YM3812) || defined(CONFIG_SOUND_YM3812_MODULE)
+ if((wss_dev = isapnp_find_dev(bus,
+ ISAPNP_VENDOR('@','H','@'), ISAPNP_FUNCTION(0x0001), NULL)))
+ {
+ wss_dev->prepare(wss_dev);
+
+ /* Let's disable IRQ and DMA for WSS device */
+
+ wss_dev->irq_resource[0].flags = 0;
+ wss_dev->dma_resource[0].flags = 0;
+
+ if((wss_dev = activate_dev("DT0197H", "opl3", wss_dev)))
+ show_base("DT0197H", "opl3", &wss_dev->resource[0]);
+ }
+ else
+ printk(KERN_ERR "sb: DT0197H panic: opl3 not found\n");
+#endif
+
+ printk(KERN_INFO "sb: DT0197H mail reports to Torsten Werner <twerner@intercomm.de>\n");
+
+ return(sb_dev);
+}
+
/* Specific support for awe will be dropped when:
* a) The new awe_wawe driver with PnP support will be introduced in the kernel
* b) The joystick driver will support PnP - a little patch is available from me....hint, hint :-)
{ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x009D), 0, &sb_init_awe, "Sound Blaster AWE 64" },
{ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x00C5), 0, &sb_init_awe, "Sound Blaster AWE 64" },
{ISAPNP_VENDOR('C','T','L'), ISAPNP_FUNCTION(0x00E4), 0, &sb_init_awe, "Sound Blaster AWE 64" },
+ {ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x0968), SBF_DEV, &sb_init_ess, "ESS 1688" },
{ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x1868), SBF_DEV, &sb_init_ess, "ESS 1868" },
{ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x8611), SBF_DEV, &sb_init_ess, "ESS 1868" },
{ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x1869), SBF_DEV, &sb_init_ess, "ESS 1869" },
{ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x1878), SBF_DEV, &sb_init_ess, "ESS 1878" },
{ISAPNP_VENDOR('E','S','S'), ISAPNP_FUNCTION(0x1879), SBF_DEV, &sb_init_ess, "ESS 1879" },
{ISAPNP_VENDOR('C','M','I'), ISAPNP_FUNCTION(0x0001), 0, &sb_init_cmi, "CMI 8330 SoundPRO" },
+ {ISAPNP_VENDOR('R','W','B'), ISAPNP_FUNCTION(0x1688), 0, &sb_init_diamond, "Diamond DT0197H" },
{0}
};
if [ "$ARCH" = "i386" ]; then
bool ' VESA VGA graphics console' CONFIG_FB_VESA
tristate ' VGA 16-color graphics console' CONFIG_FB_VGA16
+ tristate ' Hercules mono graphics console (EXPERIMENTAL)' CONFIG_FB_HGA
define_bool CONFIG_VIDEO_SELECT y
fi
if [ "$CONFIG_VISWS" = "y" ]; then
define_tristate CONFIG_FBCON_VGA_PLANES m
fi
fi
+ if [ "$CONFIG_FB_HGA" = "y" ]; then
+ define_tristate CONFIG_FBCON_HGA y
+ else
+ if [ "$CONFIG_FB_HGA" = "m" ]; then
+ define_tristate CONFIG_FBCON_HGA m
+ fi
+ fi
fi
bool ' Support only 8 pixels wide fonts' CONFIG_FBCON_FONTWIDTH8_ONLY
if [ "$ARCH" = "sparc" -o "$ARCH" = "sparc64" ]; then
fbcon-iplan2p8.o fbcon-vga-planes.o fbcon-cfb16.o \
fbcon-cfb2.o fbcon-cfb24.o fbcon-cfb32.o fbcon-cfb4.o \
fbcon-cfb8.o fbcon-mac.o fbcon-mfb.o fbcon-vga8-planes.o \
- matrox/matroxfb.o cyber2000fb.o
+ matrox/matroxfb.o cyber2000fb.o fbcon-hga.o
# Object file lists.
obj-y :=
obj-$(CONFIG_FB_CGFOURTEEN) += cgfourteenfb.o sbusfb.o
obj-$(CONFIG_FB_P9100) += p9100fb.o sbusfb.o
obj-$(CONFIG_FB_LEO) += leofb.o sbusfb.o
-obj-$(CONFIG_FB_SUN3) += sun3fb.o
-obj-$(CONFIG_FB_BWTWO) += bwtwofb.o
-obj-$(CONFIG_FB_VIRTUAL) += vfb.o
ifeq ($(CONFIG_FB_MATROX),y)
SUB_DIRS += matrox
endif
endif
+obj-$(CONFIG_FB_SUN3) += sun3fb.o
+obj-$(CONFIG_FB_BWTWO) += bwtwofb.o
+obj-$(CONFIG_FB_HGA) += hgafb.o
+obj-$(CONFIG_FB_VIRTUAL) += vfb.o
+
# Generic Low Level Drivers
obj-$(CONFIG_FBCON_AFB) += fbcon-afb.o
obj-$(CONFIG_FBCON_MAC) += fbcon-mac.o
obj-$(CONFIG_FBCON_MFB) += fbcon-mfb.o
obj-$(CONFIG_FBCON_VGA) += fbcon-vga.o
+obj-$(CONFIG_FBCON_HGA) += fbcon-hga.o
# Extract lists of the multi-part drivers.
# The 'int-*' lists are the intermediate files used to build the multi's.
--- /dev/null
+/*
+ * linux/drivers/video/fbcon-hga.c -- Low level frame buffer operations for
+ * the Hercules graphics adaptor
+ *
+ * Created 25 Nov 1999 by Ferenc Bakonyi (fero@drama.obuda.kando.hu)
+ * Based on fbcon-mfb.c by Geert Uytterhoeven
+ *
+ * History:
+ *
+ * - Revision 0.1.0 (6 Dec 1999): comment changes
+ * - First release (25 Nov 1999)
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/string.h>
+#include <linux/fb.h>
+
+#include <video/fbcon.h>
+#include <video/fbcon-hga.h>
+
+#if 0
+#define DPRINTK(args...) printk(KERN_DEBUG __FILE__": " ##args)
+#else
+#define DPRINTK(args...)
+#endif
+
+#define HGA_ROWADDR(row) ((row%4)*8192 + (row>>2)*90)
+
+ /*
+ * Hercules monochrome
+ */
+
+static inline u8* rowaddr(struct display *p, u_int row)
+{
+ return p->screen_base + HGA_ROWADDR(row);
+}
+
+void fbcon_hga_setup(struct display *p)
+{
+ DPRINTK("fbcon_hga_setup: ll:%d\n", (int)p->line_length);
+
+ p->next_line = p->line_length;
+ p->next_plane = 0;
+}
+
+void fbcon_hga_bmove(struct display *p, int sy, int sx, int dy, int dx,
+ int height, int width)
+{
+ u8 *src, *dest;
+ u_int rows, y1, y2;
+
+#if 0
+ if (sx == 0 && dx == 0 && width == p->next_line) {
+ src = p->screen_base+sy*fontheight(p)*width;
+ dest = p->screen_base+dy*fontheight(p)*width;
+ fb_memmove(dest, src, height*fontheight(p)*width);
+ } else
+#endif
+ if (dy <= sy) {
+ y1 = sy*fontheight(p);
+ y2 = dy*fontheight(p);
+ for (rows = height*fontheight(p); rows--; ) {
+ src = rowaddr(p, y1)+sx;
+ dest = rowaddr(p, y2)+dx;
+ fb_memmove(dest, src, width);
+ y1++;
+ y2++;
+ }
+ } else {
+ y1 = (sy+height)*fontheight(p)-1;
+ y2 = (dy+height)*fontheight(p)-1;
+ for (rows = height*fontheight(p); rows--;) {
+ src = rowaddr(p, y1)+sx;
+ dest = rowaddr(p, y2)+dx;
+ fb_memmove(dest, src, width);
+ y1--;
+ y2--;
+ }
+ }
+}
+
+void fbcon_hga_clear(struct vc_data *conp, struct display *p, int sy, int sx,
+ int height, int width)
+{
+ u8 *dest;
+ u_int rows, y;
+ int inverse = conp ? attr_reverse(p,conp->vc_video_erase_char) : 0;
+
+ DPRINTK("fbcon_hga_clear: sx:%d, sy:%d, height:%d, width:%d\n", sx, sy, height, width);
+
+ y = sy*fontheight(p);
+#if 0
+ if (sx == 0 && width == p->next_line) {
+ if (inverse) {
+ fb_memset255(dest, height*fontheight(p)*width);
+ } else {
+ fb_memclear(dest, height*fontheight(p)*width);
+ }
+ } else
+#endif
+ for (rows = height*fontheight(p); rows--; y++) {
+ dest = rowaddr(p, y)+sx;
+ if (inverse) {
+ fb_memset255(dest, width);
+ } else {
+ fb_memclear(dest, width);
+ }
+ }
+}
+
+void fbcon_hga_putc(struct vc_data *conp, struct display *p, int c, int yy,
+ int xx)
+{
+ u8 *dest, *cdat;
+ u_int rows, y, bold, revs, underl;
+ u8 d;
+
+ cdat = p->fontdata+(c&p->charmask)*fontheight(p);
+ bold = attr_bold(p, c);
+ revs = attr_reverse(p, c);
+ underl = attr_underline(p, c);
+ y = yy*fontheight(p);
+
+ for (rows = fontheight(p); rows--; y++) {
+ d = *cdat++;
+ if (underl && !rows)
+ d = 0xff;
+ else if (bold)
+ d |= d>>1;
+ if (revs)
+ d = ~d;
+ dest = rowaddr(p, y)+xx;
+ *dest = d;
+ }
+}
+
+void fbcon_hga_putcs(struct vc_data *conp, struct display *p,
+ const unsigned short *s, int count, int yy, int xx)
+{
+ u8 *dest, *cdat;
+ u_int rows, y, y0, bold, revs, underl;
+ u8 d;
+ u16 c;
+
+ bold = attr_bold(p,scr_readw(s));
+ revs = attr_reverse(p,scr_readw(s));
+ underl = attr_underline(p,scr_readw(s));
+ y0 = yy*fontheight(p);
+
+ while (count--) {
+ c = scr_readw(s++) & p->charmask;
+ cdat = p->fontdata+c*fontheight(p);
+ y = y0;
+ for (rows = fontheight(p); rows--; y++) {
+ d = *cdat++;
+ if (underl && !rows)
+ d = 0xff;
+ else if (bold)
+ d |= d>>1;
+ if (revs)
+ d = ~d;
+ dest = rowaddr(p, y)+xx;
+ *dest = d;
+ }
+ xx++;
+ }
+}
+
+void fbcon_hga_revc(struct display *p, int xx, int yy)
+{
+ u8 *dest;
+ u_int rows, y;
+
+ y = yy*fontheight(p);
+ for (rows = fontheight(p); rows--; y++) {
+ dest = rowaddr(p, y)+xx;
+ *dest = ~*dest;
+ }
+}
+
+void fbcon_hga_clear_margins(struct vc_data *conp, struct display *p,
+ int bottom_only)
+{
+ u8 *dest;
+ u_int height, y;
+ int inverse = conp ? attr_reverse(p,conp->vc_video_erase_char) : 0;
+
+ DPRINTK("fbcon_hga_clear_margins: enter\n");
+
+ /* No need to handle right margin. */
+
+ y = conp->vc_rows * fontheight(p);
+ for (height = p->var.yres - y; height-- > 0; y++) {
+ DPRINTK("fbcon_hga_clear_margins: y:%d, height:%d\n", y, height);
+ dest = rowaddr(p, y);
+ if (inverse) {
+ fb_memset255(dest, p->next_line);
+ } else {
+ fb_memclear(dest, p->next_line);
+ }
+ }
+}
+
+
+ /*
+ * `switch' for the low level operations
+ */
+
+struct display_switch fbcon_hga = {
+ fbcon_hga_setup, fbcon_hga_bmove, fbcon_hga_clear, fbcon_hga_putc,
+ fbcon_hga_putcs, fbcon_hga_revc, NULL, NULL, fbcon_hga_clear_margins,
+ FONTWIDTH(8)
+};
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ return 0;
+}
+
+void cleanup_module(void)
+{
+}
+#endif /* MODULE */
+
+
+ /*
+ * Visible symbols for modules
+ */
+
+EXPORT_SYMBOL(fbcon_hga);
+EXPORT_SYMBOL(fbcon_hga_setup);
+EXPORT_SYMBOL(fbcon_hga_bmove);
+EXPORT_SYMBOL(fbcon_hga_clear);
+EXPORT_SYMBOL(fbcon_hga_putc);
+EXPORT_SYMBOL(fbcon_hga_putcs);
+EXPORT_SYMBOL(fbcon_hga_revc);
+EXPORT_SYMBOL(fbcon_hga_clear_margins);
}
#endif
#if defined(CONFIG_FBCON_MFB) || defined(CONFIG_FBCON_AFB) || \
- defined(CONFIG_FBCON_ILBM)
+ defined(CONFIG_FBCON_ILBM) || defined(CONFIG_FBCON_HGA)
if (depth == 1 && (p->type == FB_TYPE_PACKED_PIXELS ||
p->type == FB_TYPE_PLANES ||
unsigned char inverse = p->inverse || p->visual == FB_VISUAL_MONO01
? 0x00 : 0xff;
+ int is_hga = !strncmp(p->fb_info->modename, "HGA", 3);
/* can't use simply memcpy because need to apply inverse */
for( y1 = 0; y1 < LOGO_H; y1++ ) {
src = logo + y1*LOGO_LINE;
- dst = fb + y1*line + x/8;
+ if (is_hga)
+ dst = fb + (y1%4)*8192 + (y1>>2)*line + x/8;
+ else
+ dst = fb + y1*line + x/8;
for( x1 = 0; x1 < LOGO_LINE; ++x1 )
fb_writeb(fb_readb(src++) ^ inverse, dst++);
}
extern int vesafb_setup(char*);
extern int vga16fb_init(void);
extern int vga16fb_setup(char*);
+extern int hgafb_init(void);
+extern int hgafb_setup(char*);
extern int matroxfb_init(void);
extern int matroxfb_setup(char*);
extern int hpfb_init(void);
#ifdef CONFIG_FB_VGA16
{ "vga16", vga16fb_init, vga16fb_setup },
#endif
+#ifdef CONFIG_FB_HGA
+ { "hga", hgafb_init, hgafb_setup },
+#endif
#ifdef CONFIG_FB_MATROX
{ "matrox", matroxfb_init, matroxfb_setup },
#endif
--- /dev/null
+/*
+ * linux/drivers/video/hgafb.c -- Hercules graphics adaptor frame buffer device
+ *
+ * Created 25 Nov 1999 by Ferenc Bakonyi (fero@drama.obuda.kando.hu)
+ * Based on skeletonfb.c by Geert Uytterhoeven and
+ * mdacon.c by Andrew Apted
+ *
+ * History:
+ *
+ * - Revision 0.1.3 (22 Jan 2000): modified for the new fb_info structure
+ * screen is cleared after rmmod
+ * virtual resolutions
+ * kernel parameter 'video=hga:font:{fontname}'
+ * module parameter 'font={fontname}'
+ * module parameter 'nologo={0|1}'
+ * the most important: boot logo :)
+ * - Revision 0.1.0 (6 Dec 1999): faster scrolling and minor fixes
+ * - First release (25 Nov 1999)
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file README.legal in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/malloc.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <asm/vga.h>
+#include <video/fbcon.h>
+#include <video/fbcon-hga.h>
+
+#ifdef MODULE
+
+#define INCLUDE_LINUX_LOGOBW
+#include <linux/linux_logo.h>
+
+#endif /* MODULE */
+
+#if 0
+#define DPRINTK(args...) printk(KERN_DEBUG __FILE__": " ##args)
+#else
+#define DPRINTK(args...)
+#endif
+
+#if 1
+#define CHKINFO(ret) if (info != &fb_info) { printk(KERN_DEBUG __FILE__": This should never happen, line:%d \n", __LINE__); return ret; }
+#else
+#define CHKINFO(ret)
+#endif
+
+/* Description of the hardware layout */
+
+static unsigned long hga_vram_base; /* Base of video memory */
+static unsigned long hga_vram_len; /* Size of video memory */
+
+#define HGA_TXT 0
+#define HGA_GFX 1
+
+static int hga_mode = -1; /* 0 = txt, 1 = gfx mode */
+
+static enum { TYPE_HERC, TYPE_HERCPLUS, TYPE_HERCCOLOR } hga_type;
+static char *hga_type_name;
+
+#define HGA_INDEX_PORT 0x3b4 /* Register select port */
+#define HGA_VALUE_PORT 0x3b5 /* Register value port */
+#define HGA_MODE_PORT 0x3b8 /* Mode control port */
+#define HGA_STATUS_PORT 0x3ba /* Status and Config port */
+#define HGA_GFX_PORT 0x3bf /* Graphics control port */
+
+/* HGA register values */
+
+#define HGA_CURSOR_BLINKING 0x00
+#define HGA_CURSOR_OFF 0x20
+#define HGA_CURSOR_SLOWBLINK 0x60
+
+#define HGA_MODE_GRAPHICS 0x02
+#define HGA_MODE_VIDEO_EN 0x08
+#define HGA_MODE_BLINK_EN 0x20
+#define HGA_MODE_GFX_PAGE1 0x80
+
+#define HGA_STATUS_HSYNC 0x01
+#define HGA_STATUS_VSYNC 0x80
+#define HGA_STATUS_VIDEO 0x08
+
+#define HGA_CONFIG_COL132 0x08
+#define HGA_GFX_MODE_EN 0x01
+#define HGA_GFX_PAGE_EN 0x02
+
+/* Framebuffer driver structures */
+
+static struct fb_var_screeninfo hga_default_var = {
+ 720, 348, /* xres, yres */
+ 720, 348, /* xres_virtual, yres_virtual */
+ 0, 0, /* xoffset, yoffset */
+ 1, /* bits_per_pixel */
+ 0, /* grayscale */
+ {0, 1, 0}, /* red */
+ {0, 1, 0}, /* green */
+ {0, 1, 0}, /* blue */
+ {0, 0, 0}, /* transp */
+ 0, /* nonstd (FB_NONSTD_HGA ?) */
+ 0, /* activate */
+ -1, -1, /* height, width */
+ 0, /* accel_flags */
+ /* pixclock */
+ /* left_margin, right_margin */
+ /* upper_margin, lower_margin */
+ /* hsync_len, vsync_len */
+ /* sync */
+ /* vmode */
+};
+
+static struct fb_fix_screeninfo hga_fix = {
+ "HGA", /* id */
+ (unsigned long) NULL, /* smem_start */
+ 0, /* smem_len */
+ FB_TYPE_PACKED_PIXELS, /* type (not sure) */
+ 0, /* type_aux (not sure) */
+ FB_VISUAL_MONO10, /* visual */
+ 8, /* xpanstep */
+ 8, /* ypanstep */
+ 0, /* ywrapstep */
+ 90, /* line_length */
+ 0, /* mmio_start */
+ 0, /* mmio_len */
+ FB_ACCEL_NONE /* accel */
+};
+
+static struct fb_info fb_info;
+static struct display disp;
+
+/* Don't assume that tty1 will be the initial current console. */
+static int currcon = -1;
+
+#ifdef MODULE
+static char *font = NULL;
+static int nologo = 0;
+#endif
+
+/* -------------------------------------------------------------------------
+ *
+ * Low level hardware functions
+ *
+ * ------------------------------------------------------------------------- */
+
+static void write_hga_b(unsigned int val, unsigned char reg)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+
+ outb_p(reg, HGA_INDEX_PORT);
+ outb_p(val, HGA_VALUE_PORT);
+
+ restore_flags(flags);
+}
+
+static void write_hga_w(unsigned int val, unsigned char reg)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+
+ outb_p(reg, HGA_INDEX_PORT); outb_p(val >> 8, HGA_VALUE_PORT);
+ outb_p(reg+1, HGA_INDEX_PORT); outb_p(val & 0xff, HGA_VALUE_PORT);
+
+ restore_flags(flags);
+}
+
+static int test_hga_b(unsigned char val, unsigned char reg)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+
+ outb_p(reg, HGA_INDEX_PORT);
+ outb (val, HGA_VALUE_PORT);
+
+ udelay(20); val = (inb_p(HGA_VALUE_PORT) == val);
+
+ restore_flags(flags);
+
+ return val;
+}
+
+static void hga_clear_screen(void)
+{
+ if (hga_mode == HGA_TXT)
+ memset((char *)hga_vram_base, ' ', hga_vram_len);
+ else if (hga_mode == HGA_GFX)
+ memset((char *)hga_vram_base, 0, hga_vram_len);
+}
+
+
+static void hga_txt_mode(void)
+{
+ outb_p(HGA_MODE_VIDEO_EN | HGA_MODE_BLINK_EN, HGA_MODE_PORT);
+ outb_p(0x00, HGA_GFX_PORT);
+ outb_p(0x00, HGA_STATUS_PORT);
+
+ write_hga_b(0x61, 0x00); /* horizontal total */
+ write_hga_b(0x50, 0x01); /* horizontal displayed */
+ write_hga_b(0x52, 0x02); /* horizontal sync pos */
+ write_hga_b(0x0f, 0x03); /* horizontal sync width */
+
+ write_hga_b(0x19, 0x04); /* vertical total */
+ write_hga_b(0x06, 0x05); /* vertical total adjust */
+ write_hga_b(0x19, 0x06); /* vertical displayed */
+ write_hga_b(0x19, 0x07); /* vertical sync pos */
+
+ write_hga_b(0x02, 0x08); /* interlace mode */
+ write_hga_b(0x0d, 0x09); /* maximum scanline */
+ write_hga_b(0x0c, 0x0a); /* cursor start */
+ write_hga_b(0x0d, 0x0b); /* cursor end */
+
+ write_hga_w(0x0000, 0x0c); /* start address */
+ write_hga_w(0x0000, 0x0e); /* cursor location */
+
+ hga_mode = HGA_TXT;
+}
+
+static void hga_gfx_mode(void)
+{
+ outb_p(0x00, HGA_STATUS_PORT);
+ outb_p(HGA_GFX_MODE_EN, HGA_GFX_PORT);
+ outb_p(HGA_MODE_VIDEO_EN | HGA_MODE_GRAPHICS, HGA_MODE_PORT);
+
+ write_hga_b(0x35, 0x00); /* horizontal total */
+ write_hga_b(0x2d, 0x01); /* horizontal displayed */
+ write_hga_b(0x2e, 0x02); /* horizontal sync pos */
+ write_hga_b(0x07, 0x03); /* horizontal sync width */
+
+ write_hga_b(0x5b, 0x04); /* vertical total */
+ write_hga_b(0x02, 0x05); /* vertical total adjust */
+ write_hga_b(0x57, 0x06); /* vertical displayed */
+ write_hga_b(0x57, 0x07); /* vertical sync pos */
+
+ write_hga_b(0x02, 0x08); /* interlace mode */
+ write_hga_b(0x03, 0x09); /* maximum scanline */
+ write_hga_b(0x00, 0x0a); /* cursor start */
+ write_hga_b(0x00, 0x0b); /* cursor end */
+
+ write_hga_w(0x0000, 0x0c); /* start address */
+ write_hga_w(0x0000, 0x0e); /* cursor location */
+
+ hga_mode = HGA_GFX;
+}
+
+#ifdef MODULE
+static void hga_show_logo(void)
+{
+ int x, y;
+ char *dest = (char *)hga_vram_base;
+ char *logo = linux_logo_bw;
+ for (y = 134; y < 134 + 80 ; y++) /* this needs some cleanup */
+ for (x = 0; x < 10 ; x++)
+ *(dest + (y%4)*8192 + (y>>2)*90 + x + 40) = ~*(logo++);
+}
+#endif /* MODULE */
+
+static void hga_pan(unsigned int xoffset, unsigned int yoffset)
+{
+ unsigned int base;
+ base = (yoffset / 8) * 90 + xoffset;
+ write_hga_w(base, 0x0c); /* start address */
+ DPRINTK("hga_pan: base:%d\n", base);
+}
+
+static int hga_card_detect(void)
+{
+ int count=0;
+ u16 *p, p_save;
+ u16 *q, q_save;
+
+ hga_vram_base = VGA_MAP_MEM(0xb0000);
+ hga_vram_len = 0x08000;
+
+ if (!request_mem_region(hga_vram_base, hga_vram_len, "hgafb")) {
+ printk(KERN_ERR "hgafb: cannot reserve video memory at 0x%lX\n",
+ hga_vram_base);
+ return 0;
+ }
+ if (!request_region(0x3b0, 16, "hgafb")) {
+ printk(KERN_ERR "hgafb: cannot reserve io ports\n");
+ }
+
+ /* do a memory check */
+
+ p = (u16 *) hga_vram_base;
+ q = (u16 *) (hga_vram_base + 0x01000);
+
+ p_save = scr_readw(p); q_save = scr_readw(q);
+
+ scr_writew(0xaa55, p); if (scr_readw(p) == 0xaa55) count++;
+ scr_writew(0x55aa, p); if (scr_readw(p) == 0x55aa) count++;
+ scr_writew(p_save, p);
+
+ if (count != 2) {
+ return 0;
+ }
+
+ /* Ok, there is definitely a card registering at the correct
+ * memory location, so now we do an I/O port test.
+ */
+
+ if (!test_hga_b(0x66, 0x0f)) { /* cursor low register */
+ return 0;
+ }
+ if (!test_hga_b(0x99, 0x0f)) { /* cursor low register */
+ return 0;
+ }
+
+ /* See if the card is a Hercules, by checking whether the vsync
+ * bit of the status register is changing. This test lasts for
+ * approximately 1/10th of a second.
+ */
+
+ p_save = q_save = inb_p(HGA_STATUS_PORT) & HGA_STATUS_VSYNC;
+
+ for (count=0; count < 50000 && p_save == q_save; count++) {
+ q_save = inb(HGA_STATUS_PORT) & HGA_STATUS_VSYNC;
+ udelay(2);
+ }
+
+ if (p_save != q_save) {
+ switch (inb_p(HGA_STATUS_PORT) & 0x70) {
+ case 0x10:
+ hga_type = TYPE_HERCPLUS;
+ hga_type_name = "HerculesPlus";
+ break;
+ case 0x50:
+ hga_type = TYPE_HERCCOLOR;
+ hga_type_name = "HerculesColor";
+ break;
+ default:
+ hga_type = TYPE_HERC;
+ hga_type_name = "Hercules";
+ break;
+ }
+ }
+ return 1;
+}
+
+/* ------------------------------------------------------------------------- *
+ *
+ * dispsw functions
+ *
+ * ------------------------------------------------------------------------- */
+
+
+ /*
+ * Open/Release the frame buffer device
+ */
+
+static int hgafb_open(struct fb_info *info, int user)
+{
+ /* Nothing, only a usage count for the moment */
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int hgafb_release(struct fb_info *info, int user)
+{
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+
+ /*
+ * Get the Fixed Part of the Display
+ */
+
+int hga_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
+{
+ CHKINFO(-EINVAL);
+ DPRINTK("hga_get_fix: con:%d, info:%x, fb_info:%x\n", con, (unsigned)info, (unsigned)&fb_info);
+
+ *fix = info->fix;
+ return 0;
+}
+
+
+ /*
+ * Get the User Defined Part of the Display
+ */
+
+int hga_get_var(struct fb_var_screeninfo *var, int con, struct fb_info *info)
+{
+ CHKINFO(-EINVAL);
+ DPRINTK("hga_get_var: con:%d, info:%x, fb_info:%x\n", con, (unsigned)info, (unsigned)&fb_info);
+
+ *var = info->var;
+ return 0;
+}
+
+ /*
+ * Set the User Defined Part of the Display
+ * This is the most mystical function (at least for me).
+ * What is the exact specification of xxx_set_var?
+ * Should it handle xoffset, yoffset? Should it do pannig?
+ * What does vmode mean?
+ */
+
+int hga_set_var(struct fb_var_screeninfo *var, int con, struct fb_info *info)
+{
+ CHKINFO(-EINVAL);
+ DPRINTK("hga_set_var: con:%d, activate:%x, info:0x%x, fb_info:%x\n", con, var->activate, (unsigned)info, (unsigned)&fb_info);
+
+ if (var->xres != 720 || var->yres != 348 ||
+ var->xres_virtual != 720 ||
+ var->yres_virtual < 348 || var->yres_virtual > 348 + 16 ||
+ var->bits_per_pixel != 1 || var->grayscale != 0) {
+ return -EINVAL;
+ }
+ if ((var->activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) {
+ info->var = *var;
+ if (info->changevar)
+ (*info->changevar)(con);
+ }
+ return 0;
+}
+
+
+ /*
+ * Get the Colormap
+ */
+
+static int hga_getcolreg(u_int regno, u_int *red, u_int *green, u_int *blue,
+ u_int *transp, struct fb_info *info)
+{
+ if (regno == 0) {
+ *red = *green = *blue = 0x0000;
+ *transp = 0;
+ } else if (regno == 1) {
+ *red = *green = *blue = 0xaaaa;
+ *transp = 0;
+ } else
+ return 1;
+ return 0;
+}
+
+int hga_get_cmap(struct fb_cmap *cmap, int kspc, int con,
+ struct fb_info *info)
+{
+ CHKINFO(-EINVAL);
+ DPRINTK("hga_get_cmap: con:%d\n", con);
+ return fb_get_cmap(cmap, kspc, hga_getcolreg, info);
+}
+
+ /*
+ * Set the Colormap
+ */
+
+static int hga_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *info)
+{
+ if (regno > 1)
+ return 1;
+ return 0;
+}
+
+int hga_set_cmap(struct fb_cmap *cmap, int kspc, int con,
+ struct fb_info *info)
+{
+ CHKINFO(-EINVAL);
+ DPRINTK("hga_set_cmap: con:%d\n", con);
+ return fb_set_cmap(cmap, kspc, hga_setcolreg, info);
+}
+
+ /*
+ * Pan or Wrap the Display
+ *
+ * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
+ */
+
+int hga_pan_display(struct fb_var_screeninfo *var, int con,
+ struct fb_info *info)
+{
+ CHKINFO(-EINVAL);
+ DPRINTK("pan_disp: con:%d, wrap:%d, xoff:%d, yoff:%d\n", con, var->vmode & FB_VMODE_YWRAP, var->xoffset, var->yoffset);
+
+ if (var->vmode & FB_VMODE_YWRAP) {
+ if (var->yoffset < 0 ||
+ var->yoffset >= info->var.yres_virtual ||
+ var->xoffset)
+ return -EINVAL;
+ } else {
+ if (var->xoffset + var->xres > info->var.xres_virtual
+ || var->yoffset + var->yres > info->var.yres_virtual
+ || var->yoffset % 8)
+ return -EINVAL;
+ }
+
+ hga_pan(var->xoffset, var->yoffset);
+
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ if (var->vmode & FB_VMODE_YWRAP)
+ info->var.vmode |= FB_VMODE_YWRAP;
+ else
+ info->var.vmode &= ~FB_VMODE_YWRAP;
+ return 0;
+}
+
+
+ /*
+ * Frame Buffer Specific ioctls
+ */
+
+int hga_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg, int con, struct fb_info *info)
+{
+ CHKINFO(-EINVAL);
+ DPRINTK("hga_ioctl: con:%d\n", con);
+ return -EINVAL;
+}
+
+
+
+
+static struct fb_ops hgafb_ops = {
+ hgafb_open, hgafb_release, hga_get_fix, hga_get_var, hga_set_var,
+ hga_get_cmap, hga_set_cmap, hga_pan_display, hga_ioctl
+};
+
+
+/* ------------------------------------------------------------------------- *
+ *
+ * Functions in fb_info
+ *
+ * ------------------------------------------------------------------------- */
+
+static int hgafbcon_switch(int con, struct fb_info *info)
+{
+ CHKINFO(-EINVAL);
+ DPRINTK("hgafbcon_switch: currcon:%d, con:%d, info:%x, fb_info:%x\n", currcon, con, (unsigned)info, (unsigned)&fb_info);
+
+ /* Save the colormap and video mode */
+#if 0 /* Not necessary in hgafb, we use fixed colormap */
+ fb_copy_cmap(&info->cmap, &fb_display[currcon].cmap, 0);
+#endif
+
+ if (currcon != -1) /* this check is absolute necessary! */
+ memcpy(&fb_display[currcon].var, &info->var,
+ sizeof(struct fb_var_screeninfo));
+
+ /* Install a new colormap and change the video mode. By default fbcon
+ * sets all the colormaps and video modes to the default values at
+ * bootup.
+ */
+#if 0
+ fb_copy_cmap(&fb_display[con].cmap, &info->cmap, 0);
+ fb_set_cmap(&info->cmap, 1, hga_setcolreg, info);
+#endif
+
+ memcpy(&info->var, &fb_display[con].var,
+ sizeof(struct fb_var_screeninfo));
+ /* hga_set_var(&info->var, con, &fb_info); is it necessary? */
+ currcon = con;
+
+ /* Hack to work correctly with XF86_Mono */
+ hga_gfx_mode();
+ return 0;
+}
+
+static int hgafbcon_updatevar(int con, struct fb_info *info)
+{
+ CHKINFO(-EINVAL);
+ DPRINTK("hga_update_var: con:%d, info:%x, fb_info:%x\n", con, (unsigned)info, (unsigned)&fb_info);
+ return (con < 0) ? -EINVAL : hga_pan_display(&fb_display[con].var, con, info);
+}
+
+static void hgafbcon_blank(int blank_mode, struct fb_info *info)
+{
+ /*
+ * Blank the screen if blank_mode != 0, else unblank.
+ * Implements VESA suspend and powerdown modes on hardware
+ * that supports disabling hsync/vsync:
+ * blank_mode == 2: suspend vsync
+ * blank_mode == 3: suspend hsync
+ * blank_mode == 4: powerdown
+ */
+
+ CHKINFO( );
+ DPRINTK("hga_blank: blank_mode:%d, info:%x, fb_info:%x\n", blank_mode, (unsigned)info, (unsigned)&fb_info);
+
+ if (blank_mode) {
+ outb_p(0x00, HGA_MODE_PORT); /* disable video */
+ } else {
+ outb_p(HGA_MODE_VIDEO_EN | HGA_MODE_GRAPHICS, HGA_MODE_PORT);
+ }
+}
+
+
+/* ------------------------------------------------------------------------- */
+
+ /*
+ * Initialization
+ */
+
+int __init hgafb_init(void)
+{
+ if (! hga_card_detect()) {
+ printk(KERN_ERR "hgafb: HGA card not detected.\n");
+ return -EINVAL;
+ }
+
+ printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
+ hga_type_name, hga_vram_len/1024);
+
+ hga_gfx_mode();
+ hga_clear_screen();
+#ifdef MODULE
+ if (!nologo) hga_show_logo();
+#endif /* MODULE */
+
+ hga_fix.smem_start = hga_vram_base;
+ hga_fix.smem_len = hga_vram_len;
+
+ disp.var = hga_default_var;
+/* disp.cmap = ???; */
+ disp.screen_base = (char*)hga_fix.smem_start;
+ disp.visual = hga_fix.visual;
+ disp.type = hga_fix.type;
+ disp.type_aux = hga_fix.type_aux;
+ disp.ypanstep = hga_fix.ypanstep;
+ disp.ywrapstep = hga_fix.ywrapstep;
+ disp.line_length = hga_fix.line_length;
+ disp.can_soft_blank = 1;
+ disp.inverse = 0;
+ disp.dispsw = &fbcon_hga;
+ disp.dispsw_data = NULL;
+
+ disp.scrollmode = SCROLL_YREDRAW;
+
+ strcpy (fb_info.modename, hga_fix.id);
+ fb_info.node = -1;
+ fb_info.flags = FBINFO_FLAG_DEFAULT;
+/* fb_info.open = ??? */
+ fb_info.var = hga_default_var;
+ fb_info.fix = hga_fix;
+ fb_info.monspecs.hfmin = 0;
+ fb_info.monspecs.hfmax = 0;
+ fb_info.monspecs.vfmin = 10000;
+ fb_info.monspecs.vfmax = 10000;
+ fb_info.monspecs.dpms = 0;
+ fb_info.fbops = &hgafb_ops;
+ fb_info.screen_base = (char *)hga_fix.smem_start;
+ fb_info.disp = &disp;
+/* fb_info.display_fg = ??? */
+/* fb_info.fontname initialized later */
+ fb_info.changevar = NULL;
+ fb_info.switch_con = hgafbcon_switch;
+ fb_info.updatevar = hgafbcon_updatevar;
+ fb_info.blank = hgafbcon_blank;
+ fb_info.pseudo_palette = NULL; /* ??? */
+ fb_info.par = NULL;
+
+ if (register_framebuffer(&fb_info) < 0)
+ return -EINVAL;
+
+ printk(KERN_INFO "fb%d: %s frame buffer device\n",
+ GET_FB_IDX(fb_info.node), fb_info.modename);
+
+ return 0;
+}
+
+ /*
+ * Setup
+ */
+
+#ifndef MODULE
+int __init hgafb_setup(char *options)
+{
+ /*
+ * Parse user speficied options
+ * `video=hga:font:VGA8x16' or
+ * `video=hga:font:SUN8x16' recommended
+ * Other supported fonts: VGA8x8, Acorn8x8, PEARL8x8
+ * More different fonts can be used with the `setfont' utility.
+ */
+
+ char *this_opt;
+
+ fb_info.fontname[0] = '\0';
+
+ if (!options || !*options)
+ return 0;
+
+ for (this_opt = strtok(options, ","); this_opt;
+ this_opt = strtok(NULL, ",")) {
+ if (!strncmp(this_opt, "font:", 5))
+ strcpy(fb_info.fontname, this_opt+5);
+ }
+ return 0;
+}
+#endif /* !MODULE */
+
+
+ /*
+ * Cleanup
+ */
+
+#ifdef MODULE
+static void hgafb_cleanup(struct fb_info *info)
+{
+ hga_txt_mode();
+ hga_clear_screen();
+ unregister_framebuffer(info);
+ release_region(0x3b0, 16);
+ release_mem_region(hga_vram_base, hga_vram_len);
+}
+#endif /* MODULE */
+
+
+
+/* -------------------------------------------------------------------------
+ *
+ * Modularization
+ *
+ * ------------------------------------------------------------------------- */
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (font)
+ strncpy(fb_info.fontname, font, sizeof(fb_info.fontname)-1);
+ else
+ fb_info.fontname[0] = '\0';
+
+ return hgafb_init();
+}
+
+void cleanup_module(void)
+{
+ hgafb_cleanup(&fb_info);
+}
+
+MODULE_AUTHOR("Ferenc Bakonyi (fero@drama.obuda.kando.hu)");
+MODULE_DESCRIPTION("FBDev driver for Hercules Graphics Adaptor");
+
+MODULE_PARM(font, "s");
+MODULE_PARM_DESC(font, "Specifies one of the compiled-in fonts (VGA8x8, VGA8x16, SUN8x16, Acorn8x8, PEARL8x8) (default=none)");
+MODULE_PARM(nologo, "i");
+MODULE_PARM_DESC(nologo, "Disables startup logo if != 0 (default=0)");
+
+#endif /* MODULE */
* region already (FIXME) */
request_region(0x3c0, 32, "vesafb");
- if (mtrr)
- mtrr_add(video_base, video_size, MTRR_TYPE_WRCOMB, 1);
+ if (mtrr) {
+ int temp_size = video_size;
+ while (mtrr_add(video_base, temp_size, MTRR_TYPE_WRCOMB, 1)==-EINVAL) {
+ temp_size >>= 1;
+ }
+ }
strcpy(fb_info.modename, "VESA VGA");
fb_info.changevar = NULL;
static inline void vga_set_mem_top(struct vc_data *c)
{
write_vga(12, (c->vc_visible_origin-vga_vram_base)/2);
+ clear_status_line();
}
static void vgacon_deinit(struct vc_data *c)
vgacon_build_attr,
vgacon_invert_region
};
+
+
+int inited = 0;
+
+void
+clear_status_line( void )
+{
+#if CONFIG_COMMENT_INT==3
+ u16 *org;
+ int i;
+ int currcons = fg_console;
+ struct vc_data *c = vc_cons[fg_console].d;
+ if (!inited) return;
+ if (vga_is_gfx) return;
+ if (c->vc_origin != c->vc_visible_origin) return;
+ org = screen_pos( fg_console, video_num_lines*video_num_columns, 1 );
+ for (i=0; i<video_num_columns; i++)
+ scr_writew( (0x0f << 8) + ' ', org++ );
+#endif
+}
+
+#if CONFIG_COMMENT_INT==3
+void
+paint_status_line( int timer )
+{
+ u16 *org;
+ int i,j;
+ int currcons = fg_console;
+ struct vc_data *c = vc_cons[fg_console].d;
+
+ if (!inited) return;
+ if (vga_is_gfx) /* We don't play origin tricks in graphic modes */
+ return;
+ if (c->vc_origin != c->vc_visible_origin) return;
+
+ org = screen_pos( fg_console, video_num_lines*video_num_columns, 1 );
+
+ /* Are we busy? */
+ {
+ i = current->pid;
+ j = 0;
+ if (i)
+ j = (i>16) ? ((current->priority < DEF_PRIORITY) ? 0xA0:0xE0) : 0xC0;
+ scr_writew( (j<<8) + ' ', org++ );
+
+#if 0
+ org++;
+
+#define DISP( x ) scr_writew( (((i&x) ? 0x90:0) << 8) + ' ', org++ );
+ DISP( 0x80 );
+ DISP( 0x40 );
+ DISP( 0x20 );
+ DISP( 0x10 );
+ DISP( 0x08 );
+ DISP( 0x04 );
+ DISP( 0x02 );
+ DISP( 0x01 );
+#endif
+ }
+
+ if (!timer) return;
+
+ org++;
+ /* Serial? */
+ {
+ j = (ledflags & 0x10) ? 0x90 : 0x00;
+ scr_writew( (j<<8) + 'S', org++ );
+ }
+
+ org++;
+ /* NE2000? */
+ {
+ j = (ledflags & 0x20) ? 0x90 : 0x00;
+ scr_writew( (j<<8) + 'N', org++ );
+ }
+}
+#endif
PIPE_BASE(*inode) = (char *) page;
PIPE_START(*inode) = PIPE_LEN(*inode) = 0;
PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 0;
+ PIPE_WAITING_WRITERS(*inode) = PIPE_WAITING_READERS(*inode) = 0;
}
switch (filp->f_mode) {
schedule();
remove_wait_queue(PIPE_WAIT(*inode), &wait);
current->state = TASK_RUNNING;
+ down(PIPE_SEM(*inode));
}
static ssize_t
PIPE_WAITING_READERS(*inode)--;
ret = -ERESTARTSYS;
if (signal_pending(current))
- goto out_nolock;
- if (down_interruptible(PIPE_SEM(*inode)))
- goto out_nolock;
+ goto out;
ret = 0;
if (!PIPE_EMPTY(*inode))
break;
PIPE_WAITING_WRITERS(*inode)--;
ret = -ERESTARTSYS;
if (signal_pending(current))
- goto out_nolock;
- if (down_interruptible(PIPE_SEM(*inode)))
- goto out_nolock;
+ goto out;
if (!PIPE_READERS(*inode))
goto sigpipe;
pipe_wait(inode);
PIPE_WAITING_WRITERS(*inode)--;
if (signal_pending(current))
- goto out_nolock;
- if (down_interruptible(PIPE_SEM(*inode)))
- goto out_nolock;
+ goto out;
if (!PIPE_READERS(*inode))
goto sigpipe;
} while (!PIPE_FREE(*inode));
ret = -EFAULT;
}
- if (count && PIPE_WAITING_READERS(*inode) &&
- !(filp->f_flags & O_NONBLOCK)) {
- wake_up_interruptible_sync(PIPE_WAIT(*inode));
- goto do_more_write;
- }
/* Signal readers asynchronously that there is more data. */
wake_up_interruptible(PIPE_WAIT(*inode));
-/*
+/* $Id: cache.h,v 1.3 1999/12/11 12:31:51 gniibe Exp $
+ *
* include/asm-sh/cache.h
+ *
* Copyright 1999 (C) Niibe Yutaka
*/
#ifndef __ASM_SH_CACHE_H
#define L1_CACHE_BYTES 32
#endif
-#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
-
-#define SMP_CACHE_BYTES L1_CACHE_BYTES
-
-#ifdef MODULE
-#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
-#else
-#define __cacheline_aligned \
- __attribute__((__aligned__(L1_CACHE_BYTES), \
- __section__(".data.cacheline_aligned")))
-#endif
-
extern void cache_flush_area(unsigned long start, unsigned long end);
extern void cache_purge_area(unsigned long start, unsigned long end);
extern void cache_wback_area(unsigned long start, unsigned long end);
--- /dev/null
+#ifndef __ASM_SH_DIV64
+#define __ASM_SH_DIV64
+
+#define do_div(n,base) ({ \
+int __res; \
+__res = ((unsigned long) n) % (unsigned) base; \
+n = ((unsigned long) n) / (unsigned) base; \
+__res; })
+
+#endif /* __ASM_SH_DIV64 */
#ifndef __ASM_SH_DMA_H
#define __ASM_SH_DMA_H
-/* Don't define MAX_DMA_ADDRESS; it's useless on the SuperH and any
- occurrence should be flagged as an error. */
+#include <asm/io.h> /* need byte IO */
#define MAX_DMA_CHANNELS 8
/* The maximum address that we can perform a DMA transfer to on this platform */
+/* Don't define MAX_DMA_ADDRESS; it's useless on the SuperH and any
+ occurrence should be flagged as an error. */
+/* But... */
/* XXX: This is not applicable to SuperH, just needed for alloc_bootmem */
-#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
+#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x10000000)
extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
extern void free_dma(unsigned int dmanr); /* release it again */
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-/* Though SH-3 has no floating point regs.. */
-#define ELF_NFPREG 34
-typedef double elf_fpreg_t;
-typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+typedef struct user_fpu_struct elf_fpregset_t;
/*
* This is used to ensure we don't load something for the wrong architecture.
extern unsigned int local_irq_count[NR_CPUS];
#define in_interrupt() (local_irq_count[smp_processor_id()] != 0)
+#define in_irq() (local_irq_count[smp_processor_id()] != 0)
+
#ifndef __SMP__
#define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
#define hardirq_endlock(cpu) do { } while (0)
-#define hardirq_enter(cpu) (local_irq_count[cpu]++)
-#define hardirq_exit(cpu) (local_irq_count[cpu]--)
+#define irq_enter(cpu, irq) (local_irq_count[cpu]++)
+#define irq_exit(cpu, irq) (local_irq_count[cpu]--)
#define synchronize_irq() barrier()
#ifndef __ASM_SH_HDREG_H
#define __ASM_SH_HDREG_H
-typedef unsigned short ide_ioreg_t;
+typedef unsigned int ide_ioreg_t;
#endif /* __ASM_SH_HDREG_H */
+++ /dev/null
-/*
- * highmem.h: virtual kernel memory mappings for high memory
- *
- * Used in CONFIG_HIGHMEM systems for memory pages which
- * are not addressable by direct kernel virtual adresses.
- *
- * Copyright (C) 1999 Gerhard Wichert, Siemens AG
- * Gerhard.Wichert@pdb.siemens.de
- *
- *
- * Redesigned the x86 32-bit VM architecture to deal with
- * up to 16 Terrabyte physical memory. With current x86 CPUs
- * we now support up to 64 Gigabytes physical RAM.
- *
- * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
- */
-
-#ifndef _ASM_HIGHMEM_H
-#define _ASM_HIGHMEM_H
-
-#include <linux/init.h>
-
-/* undef for production */
-#define HIGHMEM_DEBUG 1
-
-/* declarations for highmem.c */
-extern unsigned long highstart_pfn, highend_pfn;
-
-extern pte_t *kmap_pte;
-extern pgprot_t kmap_prot;
-
-extern void kmap_init(void) __init;
-
-/* kmap helper functions necessary to access the highmem pages in kernel */
-#include <asm/pgtable.h>
-#include <asm/kmap_types.h>
-
-extern inline unsigned long kmap(struct page *page, enum km_type type)
-{
- if (page < highmem_start_page)
- return page_address(page);
- {
- enum fixed_addresses idx = type+KM_TYPE_NR*smp_processor_id();
- unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN+idx);
-
-#if HIGHMEM_DEBUG
- if (!pte_none(*(kmap_pte-idx)))
- {
- __label__ here;
- here:
- printk(KERN_ERR "not null pte on CPU %d from %p\n",
- smp_processor_id(), &&here);
- }
-#endif
- set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
- __flush_tlb_one(vaddr);
-
- return vaddr;
- }
-}
-
-extern inline void kunmap(unsigned long vaddr, enum km_type type)
-{
-#if HIGHMEM_DEBUG
- enum fixed_addresses idx = type+KM_TYPE_NR*smp_processor_id();
- if ((vaddr & PAGE_MASK) == __fix_to_virt(FIX_KMAP_BEGIN+idx))
- {
- /* force other mappings to Oops if they'll try to access
- this pte without first remap it */
- pte_clear(kmap_pte-idx);
- __flush_tlb_one(vaddr);
- }
-#endif
-}
-
-extern inline void kmap_check(void)
-{
-#if HIGHMEM_DEBUG
- int idx_base = KM_TYPE_NR*smp_processor_id(), i;
- for (i = idx_base; i < idx_base+KM_TYPE_NR; i++)
- if (!pte_none(*(kmap_pte-i)))
- BUG();
-#endif
-}
-#endif /* _ASM_HIGHMEM_H */
#include <linux/config.h>
#ifndef MAX_HWIFS
-#define MAX_HWIFS 10
+#define MAX_HWIFS 1 /* XXX: For my board -- gniibe */
#endif
#define ide__sti() __sti()
static __inline__ int ide_default_irq(ide_ioreg_t base)
{
switch (base) {
- case 0x1f0: return 14;
- case 0x170: return 15;
- case 0x1e8: return 11;
- case 0x168: return 10;
- case 0x1e0: return 8;
- case 0x160: return 12;
+ case 0xba0001f0: return 14;
+ case 0xba000170: return 14;
default:
return 0;
}
static __inline__ ide_ioreg_t ide_default_io_base(int index)
{
switch (index) {
- case 0: return 0x1f0;
- case 1: return 0x170;
- case 2: return 0x1e8;
- case 3: return 0x168;
- case 4: return 0x1e0;
- case 5: return 0x160;
+ case 0:
+ return 0xba0001f0;
+ case 1:
+ return 0xba000170;
default:
return 0;
}
#include <asm/cache.h>
+#define inb_p inb
+#define outb_p outb
+
+#define inw_p inw
+#define outw_p outw
+
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
extern __inline__ unsigned long readb(unsigned long addr)
{
- return *(volatile unsigned char*)addr;
+ return *(volatile unsigned char*)addr;
}
extern __inline__ unsigned long readw(unsigned long addr)
{
- return *(volatile unsigned short*)addr;
+ return *(volatile unsigned short*)addr;
}
extern __inline__ unsigned long readl(unsigned long addr)
{
- return *(volatile unsigned long*)addr;
+ return *(volatile unsigned long*)addr;
}
extern __inline__ void writeb(unsigned char b, unsigned long addr)
{
- *(volatile unsigned char*)addr = b;
+ *(volatile unsigned char*)addr = b;
}
extern __inline__ void writew(unsigned short b, unsigned long addr)
{
- *(volatile unsigned short*)addr = b;
+ *(volatile unsigned short*)addr = b;
}
extern __inline__ void writel(unsigned int b, unsigned long addr)
extern __inline__ unsigned long inb_local(unsigned long addr)
{
- return readb(addr);
+ return readb(addr);
}
extern __inline__ void outb_local(unsigned char b, unsigned long addr)
{
- return writeb(b,addr);
+ return writeb(b,addr);
}
extern __inline__ unsigned long inb(unsigned long addr)
{
- return readb(addr);
+ return readb(addr);
}
extern __inline__ unsigned long inw(unsigned long addr)
{
- return readw(addr);
+ return readw(addr);
}
extern __inline__ unsigned long inl(unsigned long addr)
{
- return readl(addr);
+ return readl(addr);
}
extern __inline__ void insb(unsigned long addr, void *buffer, int count)
while(count--) outl(*buf++, addr);
}
+#define ctrl_in(addr) *(addr)
+#define ctrl_out(data,addr) *(addr) = (data)
+
extern __inline__ unsigned long ctrl_inb(unsigned long addr)
{
return *(volatile unsigned char*)addr;
extern __inline__ void ctrl_outw(unsigned short b, unsigned long addr)
{
- *(volatile unsigned short*)addr = b;
+ *(volatile unsigned short*)addr = b;
}
extern __inline__ void ctrl_outl(unsigned int b, unsigned long addr)
*(volatile unsigned long*)addr = b;
}
-#define inb_p inb
-#define outb_p outb
-
#ifdef __KERNEL__
-#define IO_SPACE_LIMIT 0xffff
+#define IO_SPACE_LIMIT 0xffffffff
#include <asm/addrspace.h>
--- /dev/null
+#ifndef __ASM_SH_IPCBUF_H__
+#define __ASM_SH_IPCBUF_H__
+
+/*
+ * The ipc64_perm structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 32-bit mode_t and seq
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_uid32_t cuid;
+ __kernel_gid32_t cgid;
+ __kernel_mode_t mode;
+ unsigned short __pad1;
+ unsigned short seq;
+ unsigned short __pad2;
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* __ASM_SH_IPCBUF_H__ */
#include <linux/config.h>
#define TIMER_IRQ 16 /* Hard-wired */
-#define TIMER_IRP_OFFSET 12
-#define TIMER_PRIORITY 1
+#define TIMER_IPR_OFFSET 12
+#define TIMER_PRIORITY 2
#if defined(__SH4__)
/*
*/
extern void set_ipr_data(unsigned int irq, int offset, int priority);
extern void make_onChip_irq(unsigned int irq);
+extern void make_imask_irq(unsigned int irq);
#endif /* __ASM_SH_IRQ_H */
(b) ASID (Address Space IDentifier)
*/
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
-{
-}
/*
* Cache of MMU context last used.
*/
/* ASID is 8-bit value, so it can't be 0x100 */
#define MMU_NO_ASID 0x100
+/*
+ * Virtual Page Number mask
+ */
+#define MMU_VPN_MASK 0xfffff000
+
extern __inline__ void
get_new_mmu_context(struct mm_struct *mm)
{
extern __inline__ void set_asid(unsigned long asid)
{
- __asm__ __volatile__ ("mov.l %0,%1"
- : /* no output */
- : "r" (asid), "m" (__m(MMU_PTEH)));
+ unsigned long __dummy;
+
+ __asm__ __volatile__ ("mov.l %2, %0\n\t"
+ "and %3, %0\n\t"
+ "or %1, %0\n\t"
+ "mov.l %0, %2"
+ : "=&r" (__dummy)
+ : "r" (asid), "m" (__m(MMU_PTEH)),
+ "r" (0xffffff00));
}
extern __inline__ unsigned long get_asid(void)
{
unsigned long asid;
- __asm__ __volatile__ ("mov.l %1,%0"
+ __asm__ __volatile__ ("mov.l %1, %0"
: "=r" (asid)
: "m" (__m(MMU_PTEH)));
asid &= MMU_CONTEXT_ASID_MASK;
struct mm_struct *next,
struct task_struct *tsk, unsigned int cpu)
{
+ set_bit(cpu, &next->cpu_vm_mask);
if (prev != next) {
unsigned long __pgdir = (unsigned long)next->pgd;
- __asm__ __volatile__("mov.l %0,%1"
+ __asm__ __volatile__("mov.l %0, %1"
: /* no output */
: "r" (__pgdir), "m" (__m(MMU_TTB)));
activate_context(next);
clear_bit(cpu, &prev->cpu_vm_mask);
}
- set_bit(cpu, &next->cpu_vm_mask);
}
#define activate_mm(prev, next) \
switch_mm((prev),(next),NULL,smp_processor_id())
+extern __inline__ void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
#endif /* __ASM_SH_MMU_CONTEXT_H */
--- /dev/null
+#ifndef __ASM_SH_MSGBUF_H
+#define __ASM_SH_MSGBUF_H
+
+/*
+ * The msqid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ unsigned long __unused1;
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ unsigned long __unused2;
+ __kernel_time_t msg_ctime; /* last change time */
+ unsigned long __unused3;
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif /* __ASM_SH_MSGBUF_H */
#define PAGE_SHIFT 12
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PTE_MASK PAGE_MASK
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
#endif /* !__ASSEMBLY__ */
--- /dev/null
+#ifndef __ASM_SH_PCI_H
+#define __ASM_SH_PCI_H
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ already-configured bus numbers - to be used for buggy BIOSes
+ or architectures with incomplete PCI setup by the loader */
+
+#define pcibios_assign_all_busses() 0
+
+#ifdef __KERNEL__
+
+/* Dynamic DMA mapping stuff.
+ * SuperH has everything mapped statically like x86.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <asm/scatterlist.h>
+#include <linux/string.h>
+#include <asm/io.h>
+
+struct pci_dev;
+
+/* Allocate and map kernel buffer using consistent mode DMA for a device.
+ * hwdev should be valid struct pci_dev pointer for PCI devices,
+ * NULL for PCI-like buses (ISA, EISA).
+ * Returns non-NULL cpu-view pointer to the buffer if successful and
+ * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
+ * is undefined.
+ */
+extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle);
+
+/* Free and unmap a consistent DMA buffer.
+ * cpu_addr is what was returned from pci_alloc_consistent,
+ * size must be the same as what as passed into pci_alloc_consistent,
+ * and likewise dma_addr must be the same as what *dma_addrp was set to.
+ *
+ * References to the memory and mappings associated with cpu_addr/dma_addr
+ * past this call are illegal.
+ */
+extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+/* Map a single buffer of the indicated size for DMA in streaming mode.
+ * The 32-bit bus address to use is returned.
+ *
+ * Once the device is given the dma address, the device owns this memory
+ * until either pci_unmap_single or pci_dma_sync_single is performed.
+ */
+extern inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
+ size_t size)
+{
+ return virt_to_bus(ptr);
+}
+
+/* Unmap a single streaming mode DMA translation. The dma_addr and size
+ * must match what was provided for in a previous pci_map_single call. All
+ * other usages are undefined.
+ *
+ * After this call, reads by the cpu to the buffer are guarenteed to see
+ * whatever the device wrote there.
+ */
+extern inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
+ size_t size)
+{
+ /* Nothing to do */
+}
+
+/* Map a set of buffers described by scatterlist in streaming
+ * mode for DMA. This is the scather-gather version of the
+ * above pci_map_single interface. Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length. They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+extern inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents)
+{
+ return nents;
+}
+
+/* Unmap a set of streaming mode DMA translations.
+ * Again, cpu read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+extern inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents)
+{
+ /* Nothing to do */
+}
+
+/* Make physical memory consistent for a single
+ * streaming mode DMA translation after a transfer.
+ *
+ * If you perform a pci_map_single() but wish to interrogate the
+ * buffer using the cpu, yet do not wish to teardown the PCI dma
+ * mapping, you must call this function before doing so. At the
+ * next point you give the PCI dma address back to the card, the
+ * device again owns the buffer.
+ */
+extern inline void pci_dma_sync_single(struct pci_dev *hwdev,
+ dma_addr_t dma_handle,
+ size_t size)
+{
+ /* Nothing to do */
+}
+
+/* Make physical memory consistent for a set of streaming
+ * mode DMA translations after a transfer.
+ *
+ * The same as pci_dma_sync_single but for a scatter-gather list,
+ * same rules and usage.
+ */
+extern inline void pci_dma_sync_sg(struct pci_dev *hwdev,
+ struct scatterlist *sg,
+ int nelems)
+{
+ /* Nothing to do */
+}
+
+/* These macros should be used after a pci_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries pci_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg) (virt_to_bus((sg)->address))
+#define sg_dma_len(sg) ((sg)->length)
+
+#endif /* __KERNEL__ */
+
+
+#endif /* __ASM_SH_PCI_H */
+
--- /dev/null
+#ifndef __ASM_SH_PGALLOC_2LEVEL_H
+#define __ASM_SH_PGALLOC_2LEVEL_H
+
+/*
+ * traditional two-level paging, page table allocation routines:
+ */
+
+extern __inline__ pmd_t *get_pmd_fast(void)
+{
+ return (pmd_t *)0;
+}
+
+extern __inline__ void free_pmd_fast(pmd_t *pmd) { }
+extern __inline__ void free_pmd_slow(pmd_t *pmd) { }
+
+extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
+{
+ if (!pgd)
+ BUG();
+ return (pmd_t *) pgd;
+}
+
+#endif /* __ASM_SH_PGALLOC_2LEVEL_H */
--- /dev/null
+#ifndef __ASM_SH_PGALLOC_H
+#define __ASM_SH_PGALLOC_H
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <linux/threads.h>
+
+#define pgd_quicklist (current_cpu_data.pgd_quick)
+#define pmd_quicklist ((unsigned long *)0)
+#define pte_quicklist (current_cpu_data.pte_quick)
+#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
+
+#include <asm/pgalloc-2level.h>
+
+/*
+ * Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any.
+ */
+
+extern __inline__ pgd_t *get_pgd_slow(void)
+{
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+
+ if (ret) {
+ memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return ret;
+}
+
+extern __inline__ pgd_t *get_pgd_fast(void)
+{
+ unsigned long *ret;
+
+ if ((ret = pgd_quicklist) != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = 0;
+ pgtable_cache_size--;
+ } else
+ ret = (unsigned long *)get_pgd_slow();
+ return (pgd_t *)ret;
+}
+
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
+{
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+
+extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
+extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
+
+extern __inline__ pte_t *get_pte_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ }
+ return (pte_t *)ret;
+}
+
+extern __inline__ void free_pte_fast(pte_t *pte)
+{
+ *(unsigned long *)pte = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pte;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pte_slow(pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+#define pte_free_kernel(pte) free_pte_slow(pte)
+#define pte_free(pte) free_pte_slow(pte)
+#define pgd_free(pgd) free_pgd_slow(pgd)
+#define pgd_alloc() get_pgd_fast()
+
+extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
+{
+ if (!pmd)
+ BUG();
+ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+ if (pmd_none(*pmd)) {
+ pte_t * page = (pte_t *) get_pte_fast();
+
+ if (!page)
+ return get_pte_kernel_slow(pmd, address);
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(page)));
+ return page + address;
+ }
+ if (pmd_bad(*pmd)) {
+ __handle_bad_pmd_kernel(pmd);
+ return NULL;
+ }
+ return (pte_t *) pmd_page(*pmd) + address;
+}
+
+extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
+{
+ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+
+ if (pmd_none(*pmd))
+ goto getnew;
+ if (pmd_bad(*pmd))
+ goto fix;
+ return (pte_t *)pmd_page(*pmd) + address;
+getnew:
+{
+ unsigned long page = (unsigned long) get_pte_fast();
+
+ if (!page)
+ return get_pte_slow(pmd, address);
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(page)));
+ return (pte_t *)page + address;
+}
+fix:
+ __handle_bad_pmd(pmd);
+ return NULL;
+}
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+extern inline void pmd_free(pmd_t * pmd)
+{
+}
+
+#define pmd_free_kernel pmd_free
+#define pmd_alloc_kernel pmd_alloc
+
+extern int do_check_pgt_cache(int, int);
+
+extern inline void set_pgdir(unsigned long address, pgd_t entry)
+{
+ struct task_struct * p;
+ pgd_t *pgd;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
+ for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+ pgd[address >> PGDIR_SHIFT] = entry;
+}
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(mm, start, end) flushes a range of pages
+ *
+ */
+
+extern void flush_tlb(void);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
+#endif /* __ASM_SH_PGALLOC_H */
extern inline int pgd_present(pgd_t pgd) { return 1; }
#define pgd_clear(xp) do { } while (0)
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+/*
+ * (pmds are folded into pgds so this doesnt get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
+
#define pgd_page(pgd) \
((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
return (pmd_t *) dir;
}
-extern __inline__ pmd_t *get_pmd_fast(void)
-{
- return (pmd_t *)0;
-}
-
-extern __inline__ void free_pmd_fast(pmd_t *pmd) { }
-extern __inline__ void free_pmd_slow(pmd_t *pmd) { }
-
-extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
-{
- if (!pgd)
- BUG();
- return (pmd_t *) pgd;
-}
-
#endif /* __ASM_SH_PGTABLE_2LEVEL_H */
#include <linux/threads.h>
extern pgd_t swapper_pg_dir[1024];
+extern void paging_init(void);
#if defined(__sh3__)
/* Cache flushing:
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
+#define flush_icache_page(vma,pg) do { } while (0)
#elif defined(__SH4__)
/*
* Caches are broken on SH-4, so we need them.
extern void flush_cache_range(struct mm_struct *mm, unsigned long start,
unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr);
-extern void flush_page_to_ram(unsigned long page);
+extern void __flush_page_to_ram(unsigned long page_va);
+#define flush_page_to_ram(page) __flush_page_to_ram(page_address(page))
extern void flush_icache_range(unsigned long start, unsigned long end);
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
#endif
-/* TLB flushing:
- *
- * - flush_tlb_all() flushes all processes TLB entries
- * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
- * - flush_tlb_page(mm, vmaddr) flushes a single page
- * - flush_tlb_range(mm, start, end) flushes a range of pages
- */
-extern void flush_tlb_all(void);
-extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_range(struct mm_struct *mm, unsigned long start,
- unsigned long end);
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
-extern inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
-}
-
/*
* Basically we have the same two-level (which is the logical three level
* Linux page table layout folded) page tables as the i386.
*/
-#endif /* !__ASSEMBLY__ */
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[1024];
+#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
-#define pgd_quicklist (current_cpu_data.pgd_quick)
-#define pmd_quicklist ((unsigned long *)0)
-#define pte_quicklist (current_cpu_data.pte_quick)
-#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
+#endif /* !__ASSEMBLY__ */
#include <asm/pgtable-2level.h>
-/*
- * Certain architectures need to do special things when PTEs
- * within a page table are directly modified. Thus, the following
- * hook is made available.
- */
-#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
-
#define __beep() asm("")
#define PMD_SIZE (1UL << PMD_SHIFT)
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#define VMALLOC_END P4SEG
-#define _PAGE_READ 0x001 /* software: read access alowed */
+#define _PAGE_READ 0x001 /* software: read access allowed */
#define _PAGE_ACCESSED 0x002 /* software: page referenced */
#define _PAGE_DIRTY 0x004 /* D-bit : page changed */
-/* 0x008 */
+#define _PAGE_CACHABLE 0x008 /* C-bit : cachable */
/* 0x010 */
#define _PAGE_RW 0x020 /* PR0-bit : write access allowed */
#define _PAGE_USER 0x040 /* PR1-bit : user space access allowed */
#if defined(__sh3__)
/* Mask which drop software flags */
-#define _PAGE_FLAGS_HARDWARE_MASK 0x1ffff164
-/* Flags defalult: SZ=1 (4k-byte), C=1 (cachable), SH=0 (not shared) */
-#define _PAGE_FLAGS_HARDWARE_DEFAULT 0x00000018
+#define _PAGE_FLAGS_HARDWARE_MASK 0x1ffff16c
+/* Flags defalult: SZ=1 (4k-byte), C=0 (non-cachable), SH=0 (not shared) */
+#define _PAGE_FLAGS_HARDWARE_DEFAULT 0x00000010
#elif defined(__SH4__)
/* Mask which drops software flags */
-#define _PAGE_FLAGS_HARDWARE_MASK 0x1ffff164
-/* Flags defalult: SZ=01 (4k-byte), C=1 (cachable), SH=0 (not shared), WT=0 */
-#define _PAGE_FLAGS_HARDWARE_DEFAULT 0x00000018
+#define _PAGE_FLAGS_HARDWARE_MASK 0x1ffff16c
+/* Flags defalult: SZ=01 (4k-byte), C=0 (non-cachable), SH=0 (not shared), WT=0 */
+#define _PAGE_FLAGS_HARDWARE_DEFAULT 0x00000010
#endif
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY)
-#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED)
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED)
/*
* As i386 and MIPS, SuperH can't do page protection for execute, and
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
-/*
- * BAD_PAGETABLE is used when we need a bogus page-table, while
- * BAD_PAGE is used for a bogus page.
- *
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern pte_t __bad_page(void);
-extern pte_t * __bad_pagetable(void);
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
-
/*
* Handling allocation failures during page table setup.
*/
#define pte_none(x) (!pte_val(x))
#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0)
+#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
#define pte_pagenr(x) ((unsigned long)(((pte_val(x) -__MEMORY_START) >> PAGE_SHIFT)))
#define pmd_none(x) (!pmd_val(x))
-#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
-#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
+#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
/*
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
-#define page_address(page) ({ PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT) + __MEMORY_START; })
+#define page_address(page) ({ if (!(page)->virtual) BUG(); (page)->virtual; })
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#define pte_page(x) (mem_map+pte_pagenr(x))
extern inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
extern inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
extern inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_RW; }
-
-extern inline pte_t pte_rdprotect(pte_t pte){ pte_val(pte) &= ~_PAGE_USER; return pte; }
-extern inline pte_t pte_exprotect(pte_t pte){ pte_val(pte) &= ~_PAGE_USER; return pte; }
-extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
-extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-extern inline pte_t pte_wrprotect(pte_t pte){ pte_val(pte) &= ~_PAGE_RW; return pte; }
-extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
-extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
-extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
-extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; }
+
+extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
+extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
+extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
+extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
+extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
+extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
+extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
+extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
+extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
+extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
+ *
+ * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
*/
-extern inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
-{
- pte_t __pte;
-
- pte_val(__pte) = (page-mem_map)*(unsigned long long)PAGE_SIZE +
- __MEMORY_START + pgprot_val(pgprot);
- return __pte;
-}
+#define mk_pte(page,pgprot) \
+({ pte_t __pte; \
+ \
+ set_pte(&__pte, __pte(((page)-mem_map) * \
+ (unsigned long long)PAGE_SIZE + pgprot_val(pgprot) + \
+ __MEMORY_START)); \
+ __pte; \
+})
/* This takes a physical page address that is used by the remapping functions */
#define mk_pte_phys(physpage, pgprot) \
-({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
+({ pte_t __pte; set_pte(&__pte, __pte(physpage + pgprot_val(pgprot))); __pte; })
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
+{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
-#define page_pte_prot(page,prot) mk_pte(page, prot)
#define page_pte(page) page_pte_prot(page, __pgprot(0))
#define pmd_page(pmd) \
#define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
__pte_offset(address))
-/*
- * Allocate and free page tables. The xxx_kernel() versions are
- * used to allocate a kernel page table - this turns on ASN bits
- * if any.
- */
-
-extern __inline__ pgd_t *get_pgd_slow(void)
-{
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
-
- if (ret) {
- /* Clear User space */
- memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
-
- /* XXX: Copy vmalloc-ed space??? */
- memcpy(ret + USER_PTRS_PER_PGD,
- swapper_pg_dir + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- }
- return ret;
-}
-
-extern __inline__ pgd_t *get_pgd_fast(void)
-{
- unsigned long *ret;
-
- if ((ret = pgd_quicklist) != NULL) {
- pgd_quicklist = (unsigned long *)(*ret);
- ret[0] = 0;
- pgtable_cache_size--;
- } else
- ret = (unsigned long *)get_pgd_slow();
- return (pgd_t *)ret;
-}
-
-extern __inline__ void free_pgd_fast(pgd_t *pgd)
-{
- *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
- pgd_quicklist = (unsigned long *) pgd;
- pgtable_cache_size++;
-}
-
-extern __inline__ void free_pgd_slow(pgd_t *pgd)
-{
- free_page((unsigned long)pgd);
-}
-
-extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
-extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
-
-extern __inline__ pte_t *get_pte_fast(void)
-{
- unsigned long *ret;
-
- if((ret = (unsigned long *)pte_quicklist) != NULL) {
- pte_quicklist = (unsigned long *)(*ret);
- ret[0] = ret[1];
- pgtable_cache_size--;
- }
- return (pte_t *)ret;
-}
-
-extern __inline__ void free_pte_fast(pte_t *pte)
-{
- *(unsigned long *)pte = (unsigned long) pte_quicklist;
- pte_quicklist = (unsigned long *) pte;
- pgtable_cache_size++;
-}
-
-extern __inline__ void free_pte_slow(pte_t *pte)
-{
- free_page((unsigned long)pte);
-}
-
-#define pte_free_kernel(pte) free_pte_slow(pte)
-#define pte_free(pte) free_pte_slow(pte)
-#define pgd_free(pgd) free_pgd_slow(pgd)
-#define pgd_alloc() get_pgd_fast()
-
-extern __inline__ pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
-{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
- if (pmd_none(*pmd)) {
- pte_t *page = (pte_t *) get_pte_fast();
-
- if (!page)
- return get_pte_kernel_slow(pmd, address);
- pmd_val(*pmd) = _KERNPG_TABLE + __pa(page);
- return page + address;
- }
- if (pmd_bad(*pmd)) {
- __handle_bad_pmd_kernel(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + address;
-}
-
-extern __inline__ pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
-{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
-
- if (pmd_none(*pmd))
- goto getnew;
- if (pmd_bad(*pmd))
- goto fix;
- return (pte_t *)pmd_page(*pmd) + address;
-getnew:
-{
- unsigned long page = (unsigned long) get_pte_fast();
-
- if (!page)
- return get_pte_slow(pmd, address);
- pmd_val(*pmd) = _PAGE_TABLE + __pa(page);
- return (pte_t *)page + address;
-}
-fix:
- __handle_bad_pmd(pmd);
- return NULL;
-}
-
-/*
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
- * inside the pgd, so has no extra memory associated with it.
- */
-extern inline void pmd_free(pmd_t * pmd)
-{
-}
-
-#define pmd_free_kernel pmd_free
-#define pmd_alloc_kernel pmd_alloc
-
-extern int do_check_pgt_cache(int, int);
-
-extern inline void set_pgdir(unsigned long address, pgd_t entry)
-{
- struct task_struct * p;
- pgd_t *pgd;
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if (!p->mm)
- continue;
- *pgd_offset(p->mm,address) = entry;
- }
- read_unlock(&tasklist_lock);
- for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
- pgd[address >> PGDIR_SHIFT] = entry;
-}
-
-extern pgd_t swapper_pg_dir[1024];
-
extern void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte);
typedef long __kernel_clock_t;
typedef int __kernel_daddr_t;
typedef char * __kernel_caddr_t;
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+typedef unsigned int __kernel_uid32_t;
+typedef unsigned int __kernel_gid32_t;
+
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
#ifdef __GNUC__
typedef long long __kernel_loff_t;
} __kernel_fsid_t;
#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
#undef __FD_SET
static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
{
/*
* include/asm-sh/processor.h
*
- * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 1999, 2000 Niibe Yutaka
*/
#ifndef __ASM_SH_PROCESSOR_H
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
-#define current_text_addr() ({ void *pc; __asm__("mova 1f,%0\n1:":"=z" (pc)); pc; })
+#define current_text_addr() ({ void *pc; __asm__("mova 1f, %0\n1:":"=z" (pc)); pc; })
/*
* CPU type and hardware bug flags. Kept separately for each CPU.
*/
#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+/*
+ * FPU structure and data
+ */
+/* FD-bit of SR register.
+ * When it's set, it means the processor doesn't have right to use FPU,
+ * and it results exception when the floating operation is executed.
+ */
+#define SR_FD 0x00008000
+
#define NUM_FPU_REGS 16
struct sh_fpu_hard_struct {
/* Dummy fpu emulator */
struct sh_fpu_soft_struct {
unsigned long fp_regs[NUM_FPU_REGS];
- unsigned long xf_regs[NUM_FPU_REGS];
unsigned long fpscr;
unsigned long fpul;
+ unsigned long xf_regs[NUM_FPU_REGS];
unsigned char lookahead;
unsigned long entry_pc;
#define INIT_THREAD { \
sizeof(init_stack) + (long) &init_stack, /* sp */ \
0, /* pc */ \
- 0, 0, \
- 0, \
- {{{0,}},} \
+ 0, 0, \
+ 0, \
+ {{{0,}},} /* fpu state */ \
}
/*
#define forget_segments() do { } while (0)
/*
- * FPU lazy state save handling..
+ * FPU lazy state save handling.
*/
-#define SR_FD 0x00008000
extern __inline__ void release_fpu(void)
{
unsigned long __dummy;
/* Set FD flag in SR */
- __asm__ __volatile__ ("stc sr,%0\n\t"
- "or %1,%0\n\t"
- "ldc %0,sr"
- : "=&r" (__dummy)
- : "r" (SR_FD));
+ __asm__ __volatile__("stc $sr, %0\n\t"
+ "or %1, %0\n\t"
+ "ldc %0, $sr"
+ : "=&r" (__dummy)
+ : "r" (SR_FD));
}
extern __inline__ void grab_fpu(void)
unsigned long __dummy;
/* Clear out FD flag in SR */
- __asm__ __volatile__ ("stc sr,%0\n\t"
- "and %1,%0\n\t"
- "ldc %0,sr"
- : "=&r" (__dummy)
- : "r" (~SR_FD));
+ __asm__ __volatile__("stc $sr, %0\n\t"
+ "and %1, %0\n\t"
+ "ldc %0, $sr"
+ : "=&r" (__dummy)
+ : "r" (~SR_FD));
}
extern void save_fpu(struct task_struct *__tsk);
-#define unlazy_fpu(tsk) do { \
- if (tsk->flags & PF_USEDFPU) \
- save_fpu(tsk); \
+#define unlazy_fpu(tsk) do { \
+ if ((tsk)->flags & PF_USEDFPU) { \
+ grab_fpu(); \
+ save_fpu(tsk); \
+ } \
} while (0)
-#define clear_fpu(tsk) do { \
- if (tsk->flags & PF_USEDFPU) { \
- tsk->flags &= ~PF_USEDFPU; \
- release_fpu(); \
- } \
+#define clear_fpu(tsk) do { \
+ if ((tsk)->flags & PF_USEDFPU) \
+ (tsk)->flags &= ~PF_USEDFPU; \
} while (0)
/*
return t->pc;
}
-static inline unsigned long get_wchan(struct task_struct *p)
-{
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
- /* FIXME: here the actual wchan calculation should sit */
- return 0;
-}
+extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.pc)
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
#define THREAD_SIZE (2*PAGE_SIZE)
extern struct task_struct * alloc_task_struct(void);
extern void free_task_struct(struct task_struct *);
+#define get_task_struct(tsk) atomic_inc(&mem_map[MAP_NR(tsk)].count)
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
#define RLIM_NLIMITS 10
+#ifdef __KERNEL__
+
/*
* SuS says limits have to be unsigned.
* Which makes a ton more sense anyway.
*/
#define RLIM_INFINITY (~0UL)
-#ifdef __KERNEL__
-
#define INIT_RLIMITS \
{ \
{ RLIM_INFINITY, RLIM_INFINITY }, \
--- /dev/null
+#ifndef __ASM_SH_SCATTERLIST_H
+#define __ASM_SH_SCATTERLIST_H
+
+struct scatterlist {
+ char * address; /* Location data is to be transferred to */
+ char * alt_address; /* Location of actual if address is a
+ * dma indirect buffer. NULL otherwise */
+ unsigned int length;
+};
+
+#define ISA_DMA_THRESHOLD (0x1fffffff)
+
+#endif /* !(__ASM_SH_SCATTERLIST_H) */
*/
static __inline__ void wake_one_more(struct semaphore * sem)
{
- atomic_inc((atomic_t *)&sem->waking);
+ atomic_inc((atomic_t *)&sem->sleepers);
}
static __inline__ int waking_non_zero(struct semaphore *sem)
int ret = 0;
spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
- sem->waking--;
+ if (sem->sleepers > 0) {
+ sem->sleepers--;
ret = 1;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
int ret = 0;
spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
- sem->waking--;
+ if (sem->sleepers > 0) {
+ sem->sleepers--;
ret = 1;
} else if (signal_pending(tsk)) {
atomic_inc(&sem->count);
int ret = 1;
spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking <= 0)
+ if (sem->sleepers <= 0)
atomic_inc(&sem->count);
else {
- sem->waking--;
+ sem->sleepers--;
ret = 0;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
* (C) Copyright 1996 Linus Torvalds
*
* SuperH verison by Niibe Yutaka
- *
+ * (Currently no asm implementation but generic C code...)
*/
#include <linux/spinlock.h>
struct semaphore {
atomic_t count;
- int waking;
+ int sleepers;
wait_queue_head_t wait;
#if WAITQUEUE_DEBUG
long __magic;
* GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
*/
atomic_set(&sem->count, val);
- sem->waking = 0;
+ sem->sleepers = 0;
init_waitqueue_head(&sem->wait);
#if WAITQUEUE_DEBUG
sem->__magic = (int)&sem->__magic;
sema_init(sem, 0);
}
+#if 0
asmlinkage void __down_failed(void /* special register calling convention */);
asmlinkage int __down_failed_interruptible(void /* params in registers */);
asmlinkage int __down_failed_trylock(void /* params in registers */);
asmlinkage void __up_wakeup(void /* special register calling convention */);
+#endif
asmlinkage void __down(struct semaphore * sem);
asmlinkage int __down_interruptible(struct semaphore * sem);
asmlinkage int __down_trylock(struct semaphore * sem);
asmlinkage void __up(struct semaphore * sem);
+extern struct rw_semaphore *__down_read(struct rw_semaphore *sem, int carry);
+extern struct rw_semaphore *__down_write(struct rw_semaphore *sem, int carry);
+asmlinkage struct rw_semaphore *__rwsem_wake(struct rw_semaphore *sem);
extern spinlock_t semaphore_wake_lock;
__up(sem);
}
+/* rw mutexes (should that be mutices? =) -- throw rw
+ * spinlocks and semaphores together, and this is what we
+ * end up with...
+ *
+ * SuperH version by Niibe Yutaka
+ */
+struct rw_semaphore {
+ atomic_t count;
+ volatile unsigned char write_bias_granted;
+ volatile unsigned char read_bias_granted;
+ volatile unsigned char pad1;
+ volatile unsigned char pad2;
+ wait_queue_head_t wait;
+ wait_queue_head_t write_bias_wait;
+#if WAITQUEUE_DEBUG
+ long __magic;
+ atomic_t readers;
+ atomic_t writers;
+#endif
+};
+
+#define RW_LOCK_BIAS 0x01000000
+
+#if WAITQUEUE_DEBUG
+#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0)
+#else
+#define __RWSEM_DEBUG_INIT /* */
+#endif
+
+#define __RWSEM_INITIALIZER(name,count) \
+{ ATOMIC_INIT(count), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
+ __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
+ __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
+
+#define __DECLARE_RWSEM_GENERIC(name,count) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
+
+#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS)
+#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1)
+#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0)
+
+extern inline void init_rwsem(struct rw_semaphore *sem)
+{
+ atomic_set(&sem->count, RW_LOCK_BIAS);
+ sem->read_bias_granted = 0;
+ sem->write_bias_granted = 0;
+ init_waitqueue_head(&sem->wait);
+ init_waitqueue_head(&sem->write_bias_wait);
+#if WAITQUEUE_DEBUG
+ sem->__magic = (long)&sem->__magic;
+ atomic_set(&sem->readers, 0);
+ atomic_set(&sem->writers, 0);
+#endif
+}
+
+extern inline void down_read(struct rw_semaphore *sem)
+{
+ int saved = atomic_read(&sem->count), new;
+#if WAITQUEUE_DEBUG
+ if (sem->__magic != (long)&sem->__magic)
+ BUG();
+#endif
+ if ((new = atomic_dec_return(&sem->count)) < 0)
+ __down_read(sem, (new < 0 && saved >=0));
+#if WAITQUEUE_DEBUG
+ if (sem->write_bias_granted)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_inc(&sem->readers);
+#endif
+}
+
+extern inline void down_write(struct rw_semaphore *sem)
+{
+ int saved = atomic_read(&sem->count), new;
+#if WAITQUEUE_DEBUG
+ if (sem->__magic != (long)&sem->__magic)
+ BUG();
+#endif
+ if ((new = atomic_sub_return(RW_LOCK_BIAS, &sem->count)) != 0)
+ __down_write(sem, (new < 0 && saved >=0));
+#if WAITQUEUE_DEBUG
+ if (atomic_read(&sem->writers))
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (sem->read_bias_granted)
+ BUG();
+ if (sem->write_bias_granted)
+ BUG();
+ atomic_inc(&sem->writers);
+#endif
+}
+
+/* When a reader does a release, the only significant
+ * case is when there was a writer waiting, and we've
+ * bumped the count to 0: we must wake the writer up.
+ */
+extern inline void __up_read(struct rw_semaphore *sem)
+{
+ if (atomic_inc_return(&sem->count) == 0)
+ __rwsem_wake(sem);
+}
+
+/* releasing the writer is easy -- just release it and
+ * wake up any sleepers.
+ */
+extern inline void __up_write(struct rw_semaphore *sem)
+{
+ int saved = atomic_read(&sem->count), new;
+
+ new = atomic_add_return(RW_LOCK_BIAS, &sem->count);
+ if (saved < 0 && new >= 0)
+ __rwsem_wake(sem);
+}
+
+extern inline void up_read(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ if (sem->write_bias_granted)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_dec(&sem->readers);
+#endif
+ __up_read(sem);
+}
+
+extern inline void up_write(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ if (sem->read_bias_granted)
+ BUG();
+ if (sem->write_bias_granted)
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (atomic_read(&sem->writers) != 1)
+ BUG();
+ atomic_dec(&sem->writers);
+#endif
+ __up_write(sem);
+}
+
#endif /* __ASM_SH_SEMAPHORE_H */
--- /dev/null
+#ifndef __ASM_SH_SEMBUF_H
+#define __ASM_SH_SEMBUF_H
+
+/*
+ * The semid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+ __kernel_time_t sem_otime; /* last semop time */
+ unsigned long __unused1;
+ __kernel_time_t sem_ctime; /* last change time */
+ unsigned long __unused2;
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* __ASM_SH_SEMBUF_H */
--- /dev/null
+#ifndef __ASM_SH_SHMBUF_H
+#define __ASM_SH_SHMBUF_H
+
+/*
+ * The shmid64_ds structure for i386 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_time_t shm_atime; /* last attach time */
+ unsigned long __unused1;
+ __kernel_time_t shm_dtime; /* last detach time */
+ unsigned long __unused2;
+ __kernel_time_t shm_ctime; /* last change time */
+ unsigned long __unused3;
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* __ASM_SH_SHMBUF_H */
#define CLD_TRAPPED 4 /* traced child has trapped */
#define CLD_STOPPED 5 /* child has stopped */
#define CLD_CONTINUED 6 /* stopped child has continued */
-#define NSIGCHLD
+#define NSIGCHLD 6
/*
* SIGPOLL si_codes
#ifndef __ASM_SH_SOFTIRQ_H
#define __ASM_SH_SOFTIRQ_H
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
+
extern unsigned int local_bh_count[NR_CPUS];
#define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0)
#define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0)
-#define cpu_bh_trylock(cpu) (local_bh_count[(cpu)] ? 0 : (local_bh_count[(cpu)] = 1))
-#define cpu_bh_endlock(cpu) (local_bh_count[(cpu)] = 0)
-
#define local_bh_disable() cpu_bh_disable(smp_processor_id())
#define local_bh_enable() cpu_bh_enable(smp_processor_id())
-#define get_active_bhs() (bh_mask & bh_active)
-#define clear_active_bhs(x) atomic_clear_mask((x),(atomic_t *)&bh_active)
-
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
-}
-
-extern inline void remove_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- mb();
- bh_base[nr] = NULL;
-}
-
-extern inline void mark_bh(int nr)
-{
- set_bit(nr, &bh_active);
-}
-
-extern inline void start_bh_atomic(void)
-{
- local_bh_disable();
- barrier();
-}
-
-extern inline void end_bh_atomic(void)
-{
- barrier();
- local_bh_enable();
-}
-
-/* These are for the irq's testing the lock */
-#define softirq_trylock(cpu) (cpu_bh_trylock(cpu))
-#define softirq_endlock(cpu) (cpu_bh_endlock(cpu))
-#define synchronize_bh() barrier()
-
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
- synchronize_bh();
-}
-
-extern inline void enable_bh(int nr)
-{
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
-}
+#define in_softirq() (local_bh_count[smp_processor_id()] != 0)
#endif /* __ASM_SH_SOFTIRQ_H */
unsigned long __unused5;
};
+/* This matches struct stat64 in glibc2.1, hence the absolutely
+ * insane amounts of padding around dev_t's.
+ */
+struct stat64 {
+ unsigned short st_dev;
+ unsigned char __pad0[10];
+
+ unsigned long st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+
+ unsigned long st_uid;
+ unsigned long st_gid;
+
+ unsigned short st_rdev;
+ unsigned char __pad3[10];
+
+ long long st_size;
+ unsigned long st_blksize;
+
+ unsigned long st_blocks; /* Number 512-byte blocks allocated. */
+ unsigned long __pad4; /* future possible st_blocks high bits */
+
+ unsigned long st_atime;
+ unsigned long __pad5;
+
+ unsigned long st_mtime;
+ unsigned long __pad6;
+
+ unsigned long st_ctime;
+ unsigned long __pad7; /* will be high 32 bits of ctime someday */
+
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
#endif /* __ASM_SH_STAT_H */
#define __ASM_SH_SYSTEM_H
/*
- * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 1999, 2000 Niibe Yutaka
*/
/*
#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) do { \
register struct task_struct *__last; \
- register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
- register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
- register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
- register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \
- register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \
- register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \
+ register unsigned long *__ts1 __asm__ ("$r1") = &prev->thread.sp; \
+ register unsigned long *__ts2 __asm__ ("$r2") = &prev->thread.pc; \
+ register unsigned long *__ts4 __asm__ ("$r4") = (unsigned long *)prev; \
+ register unsigned long *__ts5 __asm__ ("$r5") = (unsigned long *)next; \
+ register unsigned long *__ts6 __asm__ ("$r6") = &next->thread.sp; \
+ register unsigned long __ts7 __asm__ ("$r7") = next->thread.pc; \
__asm__ __volatile__ (".balign 4\n\t" \
- "stc.l gbr,@-r15\n\t" \
- "sts.l pr,@-r15\n\t" \
- "mov.l r8,@-r15\n\t" \
- "mov.l r9,@-r15\n\t" \
- "mov.l r10,@-r15\n\t" \
- "mov.l r11,@-r15\n\t" \
- "mov.l r12,@-r15\n\t" \
- "mov.l r13,@-r15\n\t" \
- "mov.l r14,@-r15\n\t" \
- "mov.l r15,@r1 ! save SP\n\t" \
- "mov.l @r6,r15 ! change to new stack\n\t" \
- "mov.l %0,@-r15 ! push R0 onto new stack\n\t" \
- "mova 1f,%0\n\t" \
- "mov.l %0,@r2 ! save PC\n\t" \
- "mov.l 2f,%0\n\t" \
+ "stc.l $gbr, @-$r15\n\t" \
+ "sts.l $pr, @-$r15\n\t" \
+ "mov.l $r8, @-$r15\n\t" \
+ "mov.l $r9, @-$r15\n\t" \
+ "mov.l $r10, @-$r15\n\t" \
+ "mov.l $r11, @-$r15\n\t" \
+ "mov.l $r12, @-$r15\n\t" \
+ "mov.l $r13, @-$r15\n\t" \
+ "mov.l $r14, @-$r15\n\t" \
+ "mov.l $r15, @$r1 ! save SP\n\t" \
+ "mov.l @$r6, $r15 ! change to new stack\n\t" \
+ "mov.l %0, @-$r15 ! push R0 onto new stack\n\t" \
+ "mova 1f, %0\n\t" \
+ "mov.l %0, @$r2 ! save PC\n\t" \
+ "mov.l 2f, %0\n\t" \
"jmp @%0 ! call __switch_to\n\t" \
- " lds r7,pr ! with return to new PC\n\t" \
+ " lds $r7, $pr ! with return to new PC\n\t" \
".balign 4\n" \
"2:\n\t" \
- ".long " "_" "__switch_to\n" \
+ ".long " "__switch_to\n" \
"1:\n\t" \
- "mov.l @r15+,%0 ! pop R0 from new stack\n\t" \
- "mov.l @r15+,r14\n\t" \
- "mov.l @r15+,r13\n\t" \
- "mov.l @r15+,r12\n\t" \
- "mov.l @r15+,r11\n\t" \
- "mov.l @r15+,r10\n\t" \
- "mov.l @r15+,r9\n\t" \
- "mov.l @r15+,r8\n\t" \
- "lds.l @r15+,pr\n\t" \
- "ldc.l @r15+,gbr\n\t" \
+ "mov.l @$r15+, %0 ! pop R0 from new stack\n\t" \
+ "mov.l @$r15+, $r14\n\t" \
+ "mov.l @$r15+, $r13\n\t" \
+ "mov.l @$r15+, $r12\n\t" \
+ "mov.l @$r15+, $r11\n\t" \
+ "mov.l @$r15+, $r10\n\t" \
+ "mov.l @$r15+, $r9\n\t" \
+ "mov.l @$r15+, $r8\n\t" \
+ "lds.l @$r15+, $pr\n\t" \
+ "ldc.l @$r15+, $gbr\n\t" \
:"=&z" (__last) \
:"0" (prev), \
"r" (__ts1), "r" (__ts2), \
{
unsigned long __dummy;
- __asm__ __volatile__("stc sr,%0\n\t"
- "and %1,%0\n\t"
- "ldc %0,sr"
+ __asm__ __volatile__("stc $sr, %0\n\t"
+ "and %1, %0\n\t"
+ "ldc %0, $sr"
: "=&r" (__dummy)
: "r" (0xefffffff)
: "memory");
extern __inline__ void __cli(void)
{
unsigned long __dummy;
- __asm__ __volatile__("stc sr,%0\n\t"
- "or %1,%0\n\t"
- "ldc %0,sr"
+ __asm__ __volatile__("stc $sr, %0\n\t"
+ "or %1, %0\n\t"
+ "ldc %0, $sr"
: "=&r" (__dummy)
: "r" (0x10000000)
: "memory");
}
-#define __save_flags(x) \
-__asm__ __volatile__("stc sr,%0":"=r" (x): /* no inputs */ :"memory")
+#define __save_flags(x) \
+x = (__extension__ ({ unsigned long __sr; \
+ __asm__ __volatile__( \
+ "stc $sr, %0" \
+ : "=r" (__sr) \
+ : /* no inputs */ \
+ : "memory"); \
+ (__sr & 0xffff7f0f);}))
#define __save_and_cli(x) \
x = (__extension__ ({ unsigned long __dummy,__sr; \
__asm__ __volatile__( \
- "stc sr,%1\n\t" \
- "or %0,%1\n\t" \
- "stc sr,%0\n\t" \
- "ldc %1,sr" \
+ "stc $sr, %1\n\t" \
+ "or %0, %1\n\t" \
+ "stc $sr, %0\n\t" \
+ "ldc %1, $sr" \
: "=r" (__sr), "=&r" (__dummy) \
: "0" (0x10000000) \
- : "memory"); __sr; }))
-
-#define __restore_flags(x) \
-__asm__ __volatile__("ldc %0,sr": /* no output */: "r" (x):"memory")
+ : "memory"); (__sr & 0xffff7f0f); }))
+
+#define __restore_flags(x) do { \
+ unsigned long __dummy; \
+ __asm__ __volatile__( \
+ "stc $sr, %0\n\t" \
+ "and %1, %0\n\t" \
+ "or %2, %0\n\t" \
+ "ldc %0, $sr" \
+ : "=&r" (__dummy) \
+ : "r" (0x000080f0), /* IMASK+FD */ \
+ "r" (x) \
+ : "memory"); \
+} while (0)
/* For spinlocks etc */
#define local_irq_save(x) __save_and_cli(x)
return retval;
}
+extern __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
+{
+ unsigned long flags, retval;
+
+ save_and_cli(flags);
+ retval = *m;
+ *m = val & 0xff;
+ restore_flags(flags);
+ return retval;
+}
+
static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 4:
return xchg_u32(ptr, x);
break;
+ case 1:
+ return xchg_u8(ptr, x);
+ break;
}
__xchg_called_with_bad_pointer();
return x;
#define TIOCM_RI TIOCM_RNG
#define TIOCM_OUT1 0x2000
#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
#define BITS_PER_LONG 32
+/* Dma addresses are 32-bits wide. */
+
+typedef u32 dma_addr_t;
+
#endif /* __KERNEL__ */
#endif /* __ASM_SH_TYPES_H */
#define __NR_lseek 19
#define __NR_getpid 20
#define __NR_mount 21
-#define __NR_oldumount 22
+#define __NR_umount 22
#define __NR_setuid 23
#define __NR_getuid 24
#define __NR_stime 25
#define __NR_geteuid 49
#define __NR_getegid 50
#define __NR_acct 51
-#define __NR_umount 52
+#define __NR_umount2 52
#define __NR_lock 53
#define __NR_ioctl 54
#define __NR_fcntl 55
#define __NR_sigpending 73
#define __NR_sethostname 74
#define __NR_setrlimit 75
-#define __NR_getrlimit 76
+#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
#define __NR_getrusage 77
#define __NR_gettimeofday 78
#define __NR_settimeofday 79
#define __NR_streams1 188 /* some people actually want it */
#define __NR_streams2 189 /* some people actually want it */
#define __NR_vfork 190
+#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_lchown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_chown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_pivot_root 217
/* user-visible error numbers are in the range -1 - -125: see <asm-sh/errno.h> */
#define _syscall0(type,name) \
type name(void) \
{ \
-register long __sc0 __asm__ ("r0") = __NR_##name; \
+register long __sc0 __asm__ ("$r0") = __NR_##name; \
__asm__ __volatile__ ("trapa #0" \
: "=z" (__sc0) \
: "0" (__sc0) \
#define _syscall1(type,name,type1,arg1) \
type name(type1 arg1) \
{ \
-register long __sc0 __asm__ ("r0") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
+register long __sc0 __asm__ ("$r0") = __NR_##name; \
+register long __sc4 __asm__ ("$r4") = (long) arg1; \
__asm__ __volatile__ ("trapa #0" \
: "=z" (__sc0) \
: "0" (__sc0), "r" (__sc4) \
#define _syscall2(type,name,type1,arg1,type2,arg2) \
type name(type1 arg1,type2 arg2) \
{ \
-register long __sc0 __asm__ ("r0") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
+register long __sc0 __asm__ ("$r0") = __NR_##name; \
+register long __sc4 __asm__ ("$r4") = (long) arg1; \
+register long __sc5 __asm__ ("$r5") = (long) arg2; \
__asm__ __volatile__ ("trapa #0" \
: "=z" (__sc0) \
: "0" (__sc0), "r" (__sc4), "r" (__sc5) \
#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
type name(type1 arg1,type2 arg2,type3 arg3) \
{ \
-register long __sc0 __asm__ ("r0") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
+register long __sc0 __asm__ ("$r0") = __NR_##name; \
+register long __sc4 __asm__ ("$r4") = (long) arg1; \
+register long __sc5 __asm__ ("$r5") = (long) arg2; \
+register long __sc6 __asm__ ("$r6") = (long) arg3; \
__asm__ __volatile__ ("trapa #0" \
: "=z" (__sc0) \
: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6) \
#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
{ \
-register long __sc0 __asm__ ("r0") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-register long __sc6 __asm__ ("r7") = (long) arg4; \
+register long __sc0 __asm__ ("$r0") = __NR_##name; \
+register long __sc4 __asm__ ("$r4") = (long) arg1; \
+register long __sc5 __asm__ ("$r5") = (long) arg2; \
+register long __sc6 __asm__ ("$r6") = (long) arg3; \
+register long __sc7 __asm__ ("$r7") = (long) arg4; \
__asm__ __volatile__ ("trapa #0" \
: "=z" (__sc0) \
: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), \
__syscall_return(type,__sc0); \
}
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+register long __sc0 __asm__ ("$r0") = __NR_##name; \
+register long __sc4 __asm__ ("$r4") = (long) arg1; \
+register long __sc5 __asm__ ("$r5") = (long) arg2; \
+register long __sc6 __asm__ ("$r6") = (long) arg3; \
+register long __sc7 __asm__ ("$r7") = (long) arg4; \
+__asm__ __volatile__ ("mov.l %2, @-$r15\n\t" \
+ "trapa #0" \
+ : "=z" (__sc0) \
+ : "0" (__sc0), "r" (arg5), \
+ "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7) \
+ : "memory" ); \
+__syscall_return(type,__sc0); \
+}
+
#ifdef __KERNEL_SYSCALLS__
/*
#define __ASM_SH_USER_H
#include <linux/types.h>
+#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/page.h>
* current->start_stack, so we round each of these in order to be able
* to write an integer number of pages.
*/
+
+struct user_fpu_struct {
+ unsigned long fp_regs[NUM_FPU_REGS];
+ unsigned long xf_regs[NUM_FPU_REGS];
+ unsigned long fpscr;
+ unsigned long fpul;
+};
+
struct user {
struct pt_regs regs; /* entire machine state */
+ struct user_fpu_struct fpu; /* Math Co-processor registers. */
+ int u_fpvalid; /* True if math co-processor being used. */
size_t u_tsize; /* text size (pages) */
size_t u_dsize; /* data size (pages) */
size_t u_ssize; /* stack size (pages) */
unsigned long start_stack; /* stack starting address */
long int signal; /* signal causing core dump */
struct regs * u_ar0; /* help gdb find registers */
+ struct user_fpu_struct* u_fpstate; /* Math Co-processor pointer. */
unsigned long magic; /* identifies a core file */
char u_comm[32]; /* user command name */
};
#define I2C_DRIVERID_MGATVO 23 /* Matrox TVOut */
#define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */
#define I2C_DRIVERID_PCF8583 25 /* real time clock */
+#define I2C_DRIVERID_SAB3036 26 /* SAB3036 tuner */
#define I2C_DRIVERID_EXP0 0xF0 /* experimental use id's */
#define I2C_DRIVERID_EXP1 0xF1
#define asmlinkage CPP_ASMLINKAGE
#endif
-#ifdef __sh__
-#define STRINGIFY(X) #X
-#define SYMBOL_NAME_STR(X) STRINGIFY(SYMBOL_NAME(X))
-#ifdef __STDC__
-#define SYMBOL_NAME(X) _##X
-#define SYMBOL_NAME_LABEL(X) _##X##:
-#else
-#define SYMBOL_NAME(X) _/**/X
-#define SYMBOL_NAME_LABEL(X) _/**/X/**/:
-#endif
-#else
#define SYMBOL_NAME_STR(X) #X
#define SYMBOL_NAME(X) X
#ifdef __STDC__
#else
#define SYMBOL_NAME_LABEL(X) X/**/:
#endif
-#endif
#ifdef __arm__
#define __ALIGN .align 0
schedule();
}
tasklet_unlock_wait(t);
+ clear_bit(TASKLET_STATE_SCHED, &t->state);
}
info->indev, info->outdev, &elem,
info->okfn);
}
+ br_read_unlock_bh(BR_NETPROTO_LOCK);
switch (verdict) {
case NF_ACCEPT: