S: (ask for current address)
S: Finland
+N: Dragos Acostachioaie
+E: dragos@iname.com
+W: http://www.arbornet.org/~dragos
+D: /proc/sysvipc
+S: C. Negri 6, bl. D3
+S: Iasi 6600
+S: Romania
+
N: Dave Airlie
E: airlied@linux.ie
W: http://www.csn.ul.ie/~airlied
N: Arnaldo Carvalho de Melo
E: acme@conectiva.com.br
+W: http://www.conectiva.com.br/~acme
D: wanrouter hacking
-D: cyclades 2X sync card driver (still in early devel stage)
+D: Cyclom 2X synchronous card driver
+D: i18n for minicom, net-tools, util-linux, fetchmail, etc
+S: Conectiva Informatica LTDA
S: R. Prof. Rubens Elke Braga, 558 - Parolin
S: 80220-320 Curitiba - Parana
S: Brazil
S: Santa Clara, California 95051
S: USA
+N: Marcelo W. Tosatti
+E: marcelo@conectiva.com.br
+W: http://lie-br.conectiva.com.br/~marcelo/
+D: Miscellaneous kernel hacker
+D: Cyclom 2X driver hacker
+D: linuxconf apache & proftpd module maintainer
+S: Conectiva Informatica LTDA
+S: R. Prof. Rubens Elke Braga, 558 - Parolin
+S: 80220-320 Curitiba - Parana
+S: Brazil
+
N: Stefan Traby
E: stefan@quant-x.com
D: Minor Alpha kernel hacks
module, say M here and read Documentation/modules.txt as well as
Documentation/networking/net-modules.txt.
+SKnet MCA support
+CONFIG_SKMC
+ This are Micro Channel ethernet adapters. You need to set CONFIG_MCA
+ to use this driver. It's both available as an in-kernel driver and
+ as a module ( = code which can be inserted in and removed from the
+ running kernel whenever you want). If you want to compile it as a module,
+ say M here and read Documentation/modules.txt as well as
+ Documentation/networking/net-modules.txt. If you plan to use more than
+ one network card under linux, read the Multiple-Ethernet-mini-HOWTO,
+ available from sunsite.unc.edu:/pub/Linux/docs/HOWTO/mini. Supported
+ cards are the SKnet Junior MC2 and the SKnet MC2(+). Distinguishing
+ both cards is done automatically. Note that using multiple boards
+ of different type hasn't been tested with this driver.
+
EISA, VLB, PCI and on board controllers
CONFIG_NET_EISA
This is another class of network cards which attach directly to the
-Read/Write HPFS 1.99b
+Read/Write HPFS 2.00
1998-1999, Mikulas Patocka
email: mikulas@artax.karlin.mff.cuni.cz
file
Now it tries to truncate the file if there's not enough space when deleting
Removed a lot of redundat code
+2.00 Fixed a bug in rename (it was there since 1.96)
+ Better anti-fragmentation strategy
vim: set textwidth=80:
MTRR (Memory Type Range Register) control
-16 May 1999
+3 Jun 1999
Richard Gooch
<rgooch@atnf.csiro.au>
- On Intel Pentium Pro/Pentium II systems the Memory Type Range
- Registers (MTRRs) may be used to control processor access to memory
- ranges. This is most useful when you have a video (VGA) card on a
- PCI or AGP bus. Enabling write-combining allows bus write transfers
- to be combined into a larger transfer before bursting over the
- PCI/AGP bus. This can increase performance of image write operations
- 2.5 times or more.
+ On Intel P6 family processors (Pentium Pro, Pentium II and later)
+ the Memory Type Range Registers (MTRRs) may be used to control
+ processor access to memory ranges. This is most useful when you have
+ a video (VGA) card on a PCI or AGP bus. Enabling write-combining
+ allows bus write transfers to be combined into a larger transfer
+ before bursting over the PCI/AGP bus. This can increase performance
+ of image write operations 2.5 times or more.
+
+ The Cyrix 6x86, 6x86MX and M II processors have Address Range
+ Registers (ARRs) which provide a similar functionality to MTRRs. For
+ these, the ARRs are used to emulate the MTRRs.
+
+ The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
+ MTRRs. These are supported.
+
+ The Centaur C6 (WinChip) has 8 MCRs, allowing write-combining. These
+ are supported.
The CONFIG_MTRR option creates a /proc/mtrr file which may be used
to manipulate your MTRRs. Typically the X server should use
M: jam@acm.org
S: Maintained
+CYCLADES 2X SYNC CARD DRIVER
+P: Arnaldo Carvalho de Melo
+M: acme@conectiva.com.br
+W: http://www.conectiva.com.br/~acme
+L: cycsyn-devel@bazar.conectiva.com.br
+S: Maintained
+
CYCLADES ASYNC MUX DRIVER
P: Ivan Passos
M: Ivan Passos <ivan@cyclades.com>
W: http://www.rustcorp.com/linux/ipchains
S: Supported
+IP MASQUERADING:
+P: Juanjo Ciarlante
+M: jjciarla@raiz.uncu.edu.ar
+S: Maintained
+
IPX/SPX NETWORK LAYER
P: Jay Schulist
M: Jay Schulist <Jay.Schulist@spacs.k12.wi.us>
VERSION = 2
PATCHLEVEL = 3
-SUBLEVEL = 5
+SUBLEVEL = 6
EXTRAVERSION =
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
flush_tlb();
}
-static struct vm_area_struct *
-find_extend_vma(struct task_struct * tsk, unsigned long addr)
-{
- struct vm_area_struct * vma;
-
- addr &= PAGE_MASK;
- vma = find_vma(tsk->mm,addr);
- if (!vma)
- return NULL;
- if (vma->vm_start <= addr)
- return vma;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- return NULL;
- if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
- return NULL;
- vma->vm_offset -= vma->vm_start - addr;
- vma->vm_start = addr;
- return vma;
-}
-
/*
* This routine checks the page boundaries, and that the offset is
* within the task area. It then calls get_long() to read a long.
* This should be done in one go with ldq_u*2/mask/stq_u. Do it
* with a macro so that we can fix it up later..
*/
-#define ALIGN_DEST_TO8(d,s,n) \
+#define ALIGN_DEST_TO8_UP(d,s,n) \
while (d & 7) { \
if (n <= 0) return; \
n--; \
*(char *) d = *(char *) s; \
d++; s++; \
}
+#define ALIGN_DEST_TO8_DN(d,s,n) \
+ while (d & 7) { \
+ if (n <= 0) return; \
+ n--; \
+ d--; s--; \
+ *(char *) d = *(char *) s; \
+ }
/*
* This should similarly be done with ldq_u*2/mask/stq. The destination
* is aligned, but we don't fill in a full quad-word
*/
-#define DO_REST(d,s,n) \
+#define DO_REST_UP(d,s,n) \
while (n > 0) { \
n--; \
*(char *) d = *(char *) s; \
d++; s++; \
}
+#define DO_REST_DN(d,s,n) \
+ while (n > 0) { \
+ n--; \
+ d--; s--; \
+ *(char *) d = *(char *) s; \
+ }
/*
* This should be done with ldq/mask/stq. The source and destination are
* aligned, but we don't fill in a full quad-word
*/
-#define DO_REST_ALIGNED(d,s,n) DO_REST(d,s,n)
+#define DO_REST_ALIGNED_UP(d,s,n) DO_REST_UP(d,s,n)
+#define DO_REST_ALIGNED_DN(d,s,n) DO_REST_DN(d,s,n)
/*
* This does unaligned memory copies. We want to avoid storing to
*
* Note the ordering to try to avoid load (and address generation) latencies.
*/
-static inline void __memcpy_unaligned(unsigned long d, unsigned long s, long n)
+static inline void __memcpy_unaligned_up (unsigned long d, unsigned long s,
+ long n)
{
- ALIGN_DEST_TO8(d,s,n);
+ ALIGN_DEST_TO8_UP(d,s,n);
n -= 8; /* to avoid compare against 8 in the loop */
if (n >= 0) {
unsigned long low_word, high_word;
} while (n >= 0);
}
n += 8;
- DO_REST(d,s,n);
+ DO_REST_UP(d,s,n);
+}
+
+static inline void __memcpy_unaligned_dn (unsigned long d, unsigned long s,
+ long n)
+{
+ /* I don't understand AXP assembler well enough for this. -Tim */
+ s += n;
+ d += n;
+ while (n--)
+ * (char *) --d = * (char *) --s;
}
/*
*
* Note the ordering to try to avoid load (and address generation) latencies.
*/
-static inline void __memcpy_aligned(unsigned long d, unsigned long s, long n)
+static inline void __memcpy_aligned_up (unsigned long d, unsigned long s,
+ long n)
{
- ALIGN_DEST_TO8(d,s,n);
+ ALIGN_DEST_TO8_UP(d,s,n);
n -= 8;
while (n >= 0) {
unsigned long tmp;
d += 8;
}
n += 8;
- DO_REST_ALIGNED(d,s,n);
+ DO_REST_ALIGNED_UP(d,s,n);
+}
+static inline void __memcpy_aligned_dn (unsigned long d, unsigned long s,
+ long n)
+{
+ s += n;
+ d += n;
+ ALIGN_DEST_TO8_DN(d,s,n);
+ n -= 8;
+ while (n >= 0) {
+ unsigned long tmp;
+ s -= 8;
+ __asm__("ldq %0,%1":"=r" (tmp):"m" (*(unsigned long *) s));
+ n -= 8;
+ d -= 8;
+ *(unsigned long *) d = tmp;
+ }
+ n += 8;
+ DO_REST_ALIGNED_DN(d,s,n);
}
void * memcpy(void * dest, const void *src, size_t n)
{
if (!(((unsigned long) dest ^ (unsigned long) src) & 7)) {
- __memcpy_aligned((unsigned long) dest, (unsigned long) src, n);
+ __memcpy_aligned_up ((unsigned long) dest, (unsigned long) src,
+ n);
return dest;
}
- __memcpy_unaligned((unsigned long) dest, (unsigned long) src, n);
+ __memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n);
return dest;
}
/* For backward modules compatibility, define __memcpy. */
asm("__memcpy = memcpy; .globl __memcpy");
+
+void *memmove (void *dest, const void *src, size_t n)
+{
+ if (dest <= src) {
+ if (!(((unsigned long) dest ^ (unsigned long) src) & 7))
+ __memcpy_aligned_up ((unsigned long) dest,
+ (unsigned long) src, n);
+ else
+ __memcpy_unaligned_up ((unsigned long) dest,
+ (unsigned long) src, n);
+ }
+ else {
+ if (!(((unsigned long) dest ^ (unsigned long) src) & 7))
+ __memcpy_aligned_dn ((unsigned long) dest,
+ (unsigned long) src, n);
+ else
+ __memcpy_unaligned_dn ((unsigned long) dest,
+ (unsigned long) src, n);
+ }
+ return dest;
+}
flush_tlb();
}
-static struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long addr)
-{
- struct vm_area_struct * vma;
-
- addr &= PAGE_MASK;
- vma = find_vma(tsk->mm,addr);
- if (!vma)
- return NULL;
- if (vma->vm_start <= addr)
- return vma;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- return NULL;
- if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
- return NULL;
- vma->vm_offset -= vma->vm_start - addr;
- vma->vm_start = addr;
- return vma;
-}
-
/*
* This routine checks the page boundaries, and that the offset is
* within the task area. It then calls get_long() to read a long.
endmenu
+source drivers/i2o/Config.in
+
source drivers/pnp/Config.in
source drivers/block/Config.in
# CONFIG_PARPORT is not set
# CONFIG_APM is not set
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+# CONFIG_I2O_PCI is not set
+# CONFIG_I2O_BLOCK is not set
+# CONFIG_I2O_LAN is not set
+# CONFIG_I2O_SCSI is not set
+# CONFIG_I2O_PROC is not set
+
#
# Plug and Play support
#
flush_tlb();
}
-static struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long addr)
-{
- struct vm_area_struct * vma;
-
- addr &= PAGE_MASK;
- vma = find_vma(tsk->mm,addr);
- if (!vma)
- return NULL;
- if (vma->vm_start <= addr)
- return vma;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- return NULL;
- if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
- return NULL;
- vma->vm_offset -= vma->vm_start - addr;
- vma->vm_start = addr;
- return vma;
-}
-
/*
* This routine checks the page boundaries, and that the offset is
* within the task area. It then calls get_long() to read a long.
default:
lock_kernel();
sigaddset(¤t->signal, signr);
+ recalc_sigpending(current);
current->flags |= PF_SIGNALED;
do_exit(exit_code);
/* NOTREACHED */
{
void * addr;
struct vm_struct * area;
- unsigned long offset;
+ unsigned long offset, last_addr;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
/*
* Don't remap the low PCI/ISA area, it's always mapped..
*/
- if (phys_addr >= 0xA0000 && (phys_addr+size) <= 0x100000)
+ if (phys_addr >= 0xA0000 && last_addr < 0x100000)
return phys_to_virt(phys_addr);
/*
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(size + offset);
-
- /*
- * Don't allow mappings that wrap..
- */
- if (!size || size > phys_addr + size)
- return NULL;
+ size = PAGE_ALIGN(last_addr) - phys_addr;
/*
* Ok, go for it..
flush_tlb_all();
}
-static struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long addr)
-{
- struct vm_area_struct * vma;
-
- addr &= PAGE_MASK;
- vma = find_vma(tsk->mm, addr);
- if (!vma)
- return NULL;
- if (vma->vm_start <= addr)
- return vma;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- return NULL;
- if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
- return NULL;
- vma->vm_offset -= vma->vm_start - addr;
- vma->vm_start = addr;
- return vma;
-}
-
/*
* This routine checks the page boundaries, and that the offset is
* within the task area. It then calls get_long() to read a long.
end = PAGE_ALIGN(end);
if (end <= start)
return;
- do_mmap(NULL, start, end - start,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE, 0);
+ do_brk(start, end - start);
}
/* Map the last of the bss segment */
if (last_bss > len) {
- do_mmap(NULL, len, (last_bss - len),
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ do_brk(len, (last_bss - len));
}
kfree(elf_phdata);
unsigned long v;
struct prda *pp;
- v = do_mmap (NULL, PRDA_ADDRESS, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE, 0);
+ v = do_brk (PRDA_ADDRESS, PAGE_SIZE);
if (v < 0)
return;
len = (elf_phdata->p_filesz + elf_phdata->p_vaddr+ 0xfff) & 0xfffff000;
bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
if (bss > len)
- do_mmap(NULL, len, bss-len,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ do_brk(len, bss-len);
kfree(elf_phdata);
return 0;
}
flush_tlb_page(vma, addr);
}
-static struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long addr)
-{
- struct vm_area_struct * vma;
-
- addr &= PAGE_MASK;
- vma = find_vma(tsk->mm, addr);
- if (!vma)
- return NULL;
- if (vma->vm_start <= addr)
- return vma;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- return NULL;
- if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
- return NULL;
- vma->vm_offset -= vma->vm_start - addr;
- vma->vm_start = addr;
- return vma;
-}
-
/*
* This routine checks the page boundaries, and that the offset is
* within the task area. It then calls get_long() to read a long.
* Check against existing mmap mappings.
*/
if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) {
- return -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
* Check if we have enough memory..
*/
if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT)) {
- return -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
* Ok, looks good - let it rip.
*/
mm->brk = brk;
- do_mmap(NULL, oldbrk, newbrk-oldbrk,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
-
+ do_brk(oldbrk, newbrk-oldbrk);
ret = 0;
out:
$(CC) -D__ASSEMBLY__ -traditional -c -o $*.o $<
CFLAGS = -O -fno-builtin -DSTDC_HEADERS -I$(TOPDIR)/include
-LD_ARGS = -T ../vmlinux.lds -Ttext 0x00800000
+LD_ARGS = -Ttext 0x00400000
OBJCOPY = $(CROSS_COMPILE)objcopy
OBJS = crt0.o start.o main.o misc.o ../coffboot/string.o ../coffboot/zlib.o image.o # initrd.o
initrd.o: ramdisk.image.gz piggyback
./piggyback initrd < ramdisk.image.gz | $(AS) -o initrd.o
-zImage: $(OBJS) no_initrd.o
+zImage: $(OBJS) no_initrd.o mknote
$(LD) $(LD_ARGS) -o $@ $(OBJS) no_initrd.o $(LIBS)
- objcopy zImage zImage
+ ./mknote > note
+ $(OBJCOPY) $@ $@ --add-section=.note=note -R .comment
zImage.initrd: $(OBJS) initrd.o
$(LD) $(LD_ARGS) -o $@ $(OBJS) initrd.o $(LIBS)
clean:
- rm -f piggyback
- rm -f $(OBJS) zImage
+ rm -f piggyback note mknote $(OBJS) zImage
fastdep:
$(TOPDIR)/scripts/mkdep *.[Sch] > .depend
#define get_32be(x) (*(unsigned *)(x))
#define RAM_START 0x00000000
-#define RAM_END 0x00800000 /* only 8M mapped with BATs */
+#define RAM_END (8<<20)
-#define RAM_FREE 0x00540000 /* after image of chrpboot */
+#define RAM_FREE (6<<20) /* after image of chrpboot */
#define PROG_START 0x00010000
char *avail_ram;
void *dst;
unsigned char *im;
unsigned initrd_start, initrd_size;
+ extern char _start;
- printf("chrpboot starting\n\r");
- /* setup_bats(); */
+ printf("chrpboot starting: loaded at 0x%x\n\r", &_start);
if (initrd_len) {
initrd_size = initrd_len;
initrd_start = (RAM_END - initrd_size) & ~0xFFF;
a1 = initrd_start;
a2 = initrd_size;
- printf("initial ramdisk at %x (%u bytes)\n\r", initrd_start,
+ printf("initial ramdisk at 0x%x (%u bytes)\n\r", initrd_start,
initrd_size);
memcpy((char *)initrd_start, initrd_data, initrd_size);
end_avail = (char *)initrd_start;
dst = (void *) PROG_START;
if (im[0] == 0x1f && im[1] == 0x8b) {
- void *cp = (void *) RAM_FREE;
- avail_ram = (void *) (RAM_FREE + ((len + 7) & -8));
- memcpy(cp, im, len);
- printf("gunzipping... ");
- gunzip(dst, 0x400000, cp, &len);
- printf("done\n\r");
-
+ avail_ram = (char *)RAM_FREE;
+ printf("gunzipping (0x%x <- 0x%x:0x%0x)...", dst, im, im+len);
+ gunzip(dst, 0x400000, im, &len);
+ printf("done %u bytes\n\r", len);
} else {
memmove(dst, im, len);
}
flush_cache(dst, len);
-
- sa = PROG_START+12;
+
+ sa = *(unsigned long *)PROG_START+PROG_START;
printf("start address = 0x%x\n\r", sa);
-#if 0
- pause();
-#endif
(*(void (*)())sa)(a1, a2, prom, 0, 0);
printf("returned?\n\r");
s.avail_out = dstlen;
r = inflate(&s, Z_FINISH);
if (r != Z_OK && r != Z_STREAM_END) {
- printf("inflate returned %d\n\r", r);
+ printf("inflate returned %d msg: %s\n\r", r, s.msg);
exit();
}
*lenp = s.next_out - (unsigned char *) dst;
--- /dev/null
+/*
+ * Copyright (C) Cort Dougan 1999.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Generate a note section as per the CHRP specification.
+ *
+ */
+
+#include <stdio.h>
+
+#define PL(x) printf("%c%c%c%c", ((x)>>24)&0xff, ((x)>>16)&0xff, ((x)>>8)&0xff, (x)&0xff );
+
+int main(void)
+{
+/* header */
+ /* namesz */
+ PL(strlen("PowerPC")+1);
+ /* descrsz */
+ PL(6*4);
+ /* type */
+ PL(0x1275);
+ /* name */
+ printf("PowerPC"); printf("%c", 0);
+
+/* descriptor */
+ /* real-mode */
+ PL(0xffffffff);
+ /* real-base */
+ PL(0x00c00000);
+ /* real-size */
+ PL(0xffffffff);
+ /* virt-base */
+ PL(0xffffffff);
+ /* virt-size */
+ PL(0xffffffff);
+ /* load-base */
+ PL(0x4000);
+ return 0;
+}
* - added Z_PACKET_FLUSH (see zlib.h for details)
* - added inflateIncomp
*
- * $Id: zlib.c,v 1.2 1998/09/03 17:40:53 cort Exp $
+ * $Id: zlib.c,v 1.3 1999/05/27 22:22:54 cort Exp $
*/
/*+++++*/
/* load local pointers */
#define LOAD {LOADIN LOADOUT}
+/*
+ * The IBM 150 firmware munges the data right after _etext[]. This
+ * protects it. -- Cort
+ */
+local uInt protect_mask[] = {0, 0, 0, 0, 0, 0, 0, 0, 0 ,0 ,0 ,0};
/* And'ing with mask[n] masks the lower n bits */
local uInt inflate_mask[] = {
0x0000,
CONFIG_6xx=y
# CONFIG_PPC64 is not set
# CONFIG_8xx is not set
-CONFIG_PMAC=y
+# CONFIG_PMAC is not set
# CONFIG_PREP is not set
# CONFIG_CHRP is not set
-# CONFIG_ALL_PPC is not set
+CONFIG_ALL_PPC=y
# CONFIG_APUS is not set
# CONFIG_MBX is not set
# CONFIG_SMP is not set
-CONFIG_MACH_SPECIFIC=y
CONFIG_6xx=y
#
# CONFIG_TOTALMP is not set
CONFIG_BOOTX_TEXT=y
# CONFIG_MOTOROLA_HOTSWAP is not set
+# CONFIG_CMDLINE_BOOL is not set
#
# Plug and Play support
#
# Block devices
#
-# CONFIG_BLK_DEV_FD is not set
+CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_IDE=y
#
# CONFIG_BLK_DEV_CMD640 is not set
# CONFIG_BLK_DEV_RZ1000 is not set
# CONFIG_BLK_DEV_IDEPCI is not set
-# CONFIG_BLK_DEV_SL82C105 is not set
+CONFIG_BLK_DEV_SL82C105=y
CONFIG_BLK_DEV_IDE_PMAC=y
CONFIG_BLK_DEV_IDEDMA_PMAC=y
CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_NET_IPGRE is not set
# CONFIG_IP_MROUTE is not set
CONFIG_IP_ALIAS=y
-# CONFIG_SYN_COOKIES is not set
+CONFIG_SYN_COOKIES=y
#
# (it is safe to leave these untouched)
# CONFIG_SCSI_SYM53C416 is not set
# CONFIG_SCSI_NCR53C7xx is not set
# CONFIG_SCSI_NCR53C8XX is not set
-# CONFIG_SCSI_SYM53C8XX is not set
+CONFIG_SCSI_SYM53C8XX=y
+CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS=8
+CONFIG_SCSI_NCR53C8XX_MAX_TAGS=32
+CONFIG_SCSI_NCR53C8XX_SYNC=20
+# CONFIG_SCSI_NCR53C8XX_PROFILE is not set
+# CONFIG_SCSI_NCR53C8XX_IOMAPPED is not set
+# CONFIG_SCSI_NCR53C8XX_PQS_PDS is not set
+# CONFIG_SCSI_NCR53C8XX_SYMBIOS_COMPAT is not set
# CONFIG_SCSI_PAS16 is not set
# CONFIG_SCSI_PCI2000 is not set
# CONFIG_SCSI_PCI2220I is not set
# CONFIG_ACENIC is not set
# CONFIG_NET_ISA is not set
CONFIG_NET_EISA=y
-# CONFIG_PCNET32 is not set
+CONFIG_PCNET32=y
# CONFIG_AC3200 is not set
# CONFIG_APRICOT is not set
# CONFIG_CS89x0 is not set
CONFIG_FB_CONTROL=y
CONFIG_FB_PLATINUM=y
CONFIG_FB_VALKYRIE=y
-CONFIG_FB_ATY=y
+# CONFIG_FB_ATY is not set
CONFIG_FB_IMSTT=y
CONFIG_FB_CT65550=y
# CONFIG_FB_S3TRIO is not set
-# CONFIG_FB_MATROX is not set
-CONFIG_FB_ATY=y
+CONFIG_FB_MATROX=y
+# CONFIG_FB_MATROX_MILLENIUM is not set
+CONFIG_FB_MATROX_MYSTIQUE=y
+# CONFIG_FB_MATROX_G100 is not set
+# CONFIG_FB_MATROX_MULTIHEAD is not set
+# CONFIG_FB_ATY is not set
# CONFIG_FB_VIRTUAL is not set
# CONFIG_FBCON_ADVANCED is not set
CONFIG_FBCON_CFB8=y
#
CONFIG_VT=y
CONFIG_VT_CONSOLE=y
-# CONFIG_SERIAL is not set
+CONFIG_SERIAL=m
# CONFIG_SERIAL_EXTENDED is not set
# CONFIG_SERIAL_NONSTANDARD is not set
CONFIG_UNIX98_PTYS=y
CONFIG_UNIX98_PTY_COUNT=256
-# CONFIG_MOUSE is not set
+CONFIG_MOUSE=y
+
+#
+# Mice
+#
+# CONFIG_ATIXL_BUSMOUSE is not set
+# CONFIG_BUSMOUSE is not set
+# CONFIG_MS_BUSMOUSE is not set
+CONFIG_PSMOUSE=y
+# CONFIG_82C710_MOUSE is not set
+# CONFIG_PC110_PAD is not set
# CONFIG_QIC02_TAPE is not set
# CONFIG_WATCHDOG is not set
CONFIG_NVRAM=y
# CONFIG_SOUND_SONICVIBES is not set
# CONFIG_SOUND_MSNDCLAS is not set
# CONFIG_SOUND_MSNDPIN is not set
-# CONFIG_SOUND_OSS is not set
+CONFIG_SOUND_OSS=y
+# CONFIG_SOUND_DMAP is not set
+# CONFIG_SOUND_PAS is not set
+# CONFIG_SOUND_SB is not set
+# CONFIG_SOUND_ADLIB is not set
+# CONFIG_SOUND_GUS is not set
+# CONFIG_SOUND_MPU401 is not set
+# CONFIG_SOUND_PSS is not set
+# CONFIG_SOUND_MSS is not set
+# CONFIG_SOUND_SSCAPE is not set
+# CONFIG_SOUND_TRIX is not set
+# CONFIG_SOUND_MAD16 is not set
+# CONFIG_SOUND_WAVEFRONT is not set
+CONFIG_SOUND_CS4232=m
+# CONFIG_SOUND_OPL3SA2 is not set
+# CONFIG_SOUND_MAUI is not set
+# CONFIG_SOUND_SGALAXY is not set
+# CONFIG_SOUND_AD1816 is not set
+# CONFIG_SOUND_OPL3SA1 is not set
+# CONFIG_SOUND_SOFTOSS is not set
+# CONFIG_SOUND_YM3812 is not set
+# CONFIG_SOUND_VMIDI is not set
+# CONFIG_SOUND_UART6850 is not set
+
+#
+# Additional low level sound drivers
+#
+# CONFIG_LOWLEVEL_SOUND is not set
#
# Kernel hacking
CONFIG_6xx=y
# CONFIG_PPC64 is not set
# CONFIG_8xx is not set
-CONFIG_PMAC=y
+# CONFIG_PMAC is not set
# CONFIG_PREP is not set
# CONFIG_CHRP is not set
-# CONFIG_ALL_PPC is not set
+CONFIG_ALL_PPC=y
# CONFIG_APUS is not set
# CONFIG_MBX is not set
# CONFIG_SMP is not set
-CONFIG_MACH_SPECIFIC=y
CONFIG_6xx=y
#
# CONFIG_TOTALMP is not set
CONFIG_BOOTX_TEXT=y
# CONFIG_MOTOROLA_HOTSWAP is not set
+# CONFIG_CMDLINE_BOOL is not set
#
# Plug and Play support
#
# Block devices
#
-# CONFIG_BLK_DEV_FD is not set
+CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_IDE=y
#
# CONFIG_BLK_DEV_CMD640 is not set
# CONFIG_BLK_DEV_RZ1000 is not set
# CONFIG_BLK_DEV_IDEPCI is not set
-# CONFIG_BLK_DEV_SL82C105 is not set
+CONFIG_BLK_DEV_SL82C105=y
CONFIG_BLK_DEV_IDE_PMAC=y
CONFIG_BLK_DEV_IDEDMA_PMAC=y
CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_NET_IPGRE is not set
# CONFIG_IP_MROUTE is not set
CONFIG_IP_ALIAS=y
-# CONFIG_SYN_COOKIES is not set
+CONFIG_SYN_COOKIES=y
#
# (it is safe to leave these untouched)
# CONFIG_SCSI_SYM53C416 is not set
# CONFIG_SCSI_NCR53C7xx is not set
# CONFIG_SCSI_NCR53C8XX is not set
-# CONFIG_SCSI_SYM53C8XX is not set
+CONFIG_SCSI_SYM53C8XX=y
+CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS=8
+CONFIG_SCSI_NCR53C8XX_MAX_TAGS=32
+CONFIG_SCSI_NCR53C8XX_SYNC=20
+# CONFIG_SCSI_NCR53C8XX_PROFILE is not set
+# CONFIG_SCSI_NCR53C8XX_IOMAPPED is not set
+# CONFIG_SCSI_NCR53C8XX_PQS_PDS is not set
+# CONFIG_SCSI_NCR53C8XX_SYMBIOS_COMPAT is not set
# CONFIG_SCSI_PAS16 is not set
# CONFIG_SCSI_PCI2000 is not set
# CONFIG_SCSI_PCI2220I is not set
# CONFIG_ACENIC is not set
# CONFIG_NET_ISA is not set
CONFIG_NET_EISA=y
-# CONFIG_PCNET32 is not set
+CONFIG_PCNET32=y
# CONFIG_AC3200 is not set
# CONFIG_APRICOT is not set
# CONFIG_CS89x0 is not set
CONFIG_FB_CONTROL=y
CONFIG_FB_PLATINUM=y
CONFIG_FB_VALKYRIE=y
-CONFIG_FB_ATY=y
+# CONFIG_FB_ATY is not set
CONFIG_FB_IMSTT=y
CONFIG_FB_CT65550=y
# CONFIG_FB_S3TRIO is not set
-# CONFIG_FB_MATROX is not set
-CONFIG_FB_ATY=y
+CONFIG_FB_MATROX=y
+# CONFIG_FB_MATROX_MILLENIUM is not set
+CONFIG_FB_MATROX_MYSTIQUE=y
+# CONFIG_FB_MATROX_G100 is not set
+# CONFIG_FB_MATROX_MULTIHEAD is not set
+# CONFIG_FB_ATY is not set
# CONFIG_FB_VIRTUAL is not set
# CONFIG_FBCON_ADVANCED is not set
CONFIG_FBCON_CFB8=y
#
CONFIG_VT=y
CONFIG_VT_CONSOLE=y
-# CONFIG_SERIAL is not set
+CONFIG_SERIAL=m
# CONFIG_SERIAL_EXTENDED is not set
# CONFIG_SERIAL_NONSTANDARD is not set
CONFIG_UNIX98_PTYS=y
CONFIG_UNIX98_PTY_COUNT=256
-# CONFIG_MOUSE is not set
+CONFIG_MOUSE=y
+
+#
+# Mice
+#
+# CONFIG_ATIXL_BUSMOUSE is not set
+# CONFIG_BUSMOUSE is not set
+# CONFIG_MS_BUSMOUSE is not set
+CONFIG_PSMOUSE=y
+# CONFIG_82C710_MOUSE is not set
+# CONFIG_PC110_PAD is not set
# CONFIG_QIC02_TAPE is not set
# CONFIG_WATCHDOG is not set
CONFIG_NVRAM=y
return PCIBIOS_SUCCESSFUL;
}
+
+int rtas_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned char *val)
+{
+ unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
+ if ( call_rtas( "read-pci-config", 2, 2, (ulong *)&val, addr, 1 ) != 0 )
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+int rtas_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned short *val)
+{
+ unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
+ if ( call_rtas( "read-pci-config", 2, 2, (ulong *)&val, addr, 2 ) != 0 )
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+
+int rtas_pcibios_read_config_dword(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned int *val)
+{
+ unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
+ if ( call_rtas( "read-pci-config", 2, 2, (ulong *)&val, addr, 4 ) != 0 )
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+int rtas_pcibios_write_config_byte(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned char val)
+{
+ unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
+ if ( call_rtas( "write-pci-config", 3, 1, NULL, addr, 1, (ulong)val ) != 0 )
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+int rtas_pcibios_write_config_word(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned short val)
+{
+ unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
+ if ( call_rtas( "write-pci-config", 3, 1, NULL, addr, 2, (ulong)val ) != 0 )
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+int rtas_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn,
+ unsigned char offset, unsigned int val)
+{
+ unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
+ if ( call_rtas( "write-pci-config", 3, 1, NULL, addr, 4, (ulong)val ) != 0 )
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return PCIBIOS_SUCCESSFUL;
+}
+
/*
* Temporary fixes for PCI devices. These should be replaced by OF query
* code -- Geert
decl_config_access_method(grackle);
decl_config_access_method(indirect);
+decl_config_access_method(rtas);
void __init
chrp_setup_pci_ptrs(void)
{
/* find out how many pythons */
while ( (py = py->next) ) python_busnr++;
- set_config_access_method(python);
+ set_config_access_method(python);
/*
* We base these values on the machine type but should
* try to read them from the python controller itself.
}
else
{
- pci_dram_offset = 0;
- isa_mem_base = 0xf7000000;
- isa_io_base = 0xf8000000;
- set_config_access_method(gg2);
+ if ( !strncmp("IBM,7043-150", get_property(find_path_device("/"), "name", NULL),12) )
+ {
+ pci_dram_offset = 0;
+ isa_mem_base = 0x80000000;
+ isa_io_base = 0xfe000000;
+ pci_config_address = (unsigned int *)0xfec00000;
+ pci_config_data = (unsigned char *)0xfee00000;
+ set_config_access_method(indirect);
+ }
+ else
+ {
+ pci_dram_offset = 0;
+ isa_mem_base = 0xf7000000;
+ isa_io_base = 0xf8000000;
+ set_config_access_method(gg2);
+ }
}
}
#include <linux/console.h>
#include <linux/pci.h>
#include <linux/openpic.h>
+#include <linux/version.h>
#include <asm/mmu.h>
#include <asm/processor.h>
unsigned long chrp_get_rtc_time(void);
int chrp_set_rtc_time(unsigned long nowtime);
+unsigned long rtas_event_scan_rate = 0, rtas_event_scan_ct = 0;
void chrp_calibrate_decr(void);
void chrp_time_init(void);
chrp_setup_arch(unsigned long * memory_start_p, unsigned long * memory_end_p))
{
extern char cmd_line[];
+ struct device_node *device;
/* init to some ~sane value until calibrate_delay() runs */
loops_per_sec = 50000000;
find_path_device("/"), "platform-open-pic", NULL);
OpenPIC = ioremap((unsigned long)OpenPIC, sizeof(struct OpenPIC));
}
-
+
/*
* Fix the Super I/O configuration
*/
- sio_init();
+ /*sio_init();*/
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
- /* my starmax 6000 needs this but the longtrail shouldn't do it -- Cort */
- if ( !strncmp("MOT", get_property(find_path_device("/"),
- "model", NULL),3) )
- *memory_start_p = pmac_find_bridges(*memory_start_p, *memory_end_p);
+ *memory_start_p = pmac_find_bridges(*memory_start_p, *memory_end_p);
+
+ /* Get the event scan rate for the rtas so we know how
+ * often it expects a heartbeat. -- Cort
+ */
+ if ( rtas_data )
+ {
+ struct property *p;
+ device = find_devices("rtas");
+ for ( p = device->properties;
+ strncmp(p->name, "rtas-event-scan-rate", 20) && p ;
+ p = p->next )
+ /* nothing */ ;
+ if ( p && *(unsigned long *)p->value )
+ {
+ rtas_event_scan_rate = (HZ/(*(unsigned long *)p->value)*30)-1;
+ rtas_event_scan_ct = 1;
+ printk("RTAS Event Scan Rate: %lu (%lu jiffies)\n",
+ *(unsigned long *)p->value, rtas_event_scan_rate );
+ }
+ }
}
+void
+chrp_event_scan(void)
+{
+ unsigned char log[1024];
+ if ( rtas_event_scan_rate && (rtas_event_scan_ct-- <= 0) )
+ {
+ call_rtas( "event-scan", 4, 1, NULL, 0x0, 1, __pa(log), 1024 );
+ rtas_event_scan_ct = rtas_event_scan_rate;
+ }
+}
+
void
chrp_restart(char *cmd)
{
-#if 0
- extern unsigned int rtas_entry, rtas_data, rtas_size;
printk("RTAS system-reboot returned %d\n",
call_rtas("system-reboot", 0, 1, NULL));
- printk("rtas_entry: %08lx rtas_data: %08lx rtas_size: %08lx\n",
- rtas_entry,rtas_data,rtas_size);
for (;;);
-#else
- printk("System Halted\n");
- while(1);
-#endif
}
void
chrp_power_off(void)
{
- /* RTAS doesn't seem to work on Longtrail.
- For now, do it the same way as the PReP. */
-#if 0
- extern unsigned int rtas_entry, rtas_data, rtas_size;
+ /* allow power on only with power button press */
+#define PWR_FIELD(x) (0x8000000000000000 >> ((x)-96))
printk("RTAS power-off returned %d\n",
- call_rtas("power-off", 2, 1, NULL, 0, 0));
- printk("rtas_entry: %08lx rtas_data: %08lx rtas_size: %08lx\n",
- rtas_entry,rtas_data,rtas_size);
+ call_rtas("power-off", 2, 1, NULL,
+ ((PWR_FIELD(96)|PWR_FIELD(97))>>32)&0xffffffff,
+ (PWR_FIELD(96)|PWR_FIELD(97))&0xffffffff));
+#undef PWR_FIELD
for (;;);
-#else
- chrp_restart(NULL);
-#endif
}
void
chrp_halt(void)
{
- chrp_restart(NULL);
+ chrp_power_off();
}
u_int
ppc_ide_md.ide_init_hwif = chrp_ide_init_hwif_ports;
ppc_ide_md.io_base = _IO_BASE;
-#endif
+#endif
+ /*
+ * Print the banner, then scroll down so boot progress
+ * can be printed. -- Cort
+ */
+ chrp_progress("Linux/PPC "UTS_RELEASE"\n");
+}
+
+void chrp_progress(char *s)
+{
+ extern unsigned int rtas_data;
+
+ if ( (_machine != _MACH_chrp) || !rtas_data )
+ return;
+ call_rtas( "display-character", 1, 1, NULL, '\r' );
+ while ( *s )
+ call_rtas( "display-character", 1, 1, NULL, *s++ );
}
/*
* arch/ppc/kernel/head.S
*
- * $Id: head.S,v 1.131 1999/05/14 22:37:21 cort Exp $
+ * $Id: head.S,v 1.133 1999/05/20 05:13:08 cort Exp $
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*/
.globl enter_rtas
enter_rtas:
- stwu r1,-16(r1)
mflr r0
stw r0,20(r1)
lis r4,rtas_data@ha
andi. r9,r9,MSR_ME|MSR_RI
sync /* disable interrupts so SRR0/1 */
mtmsr r0 /* don't get trashed */
- li r6,0
mtlr r6
mtspr SPRG2,r7
mtspr SRR0,r8
/*
- * $Id: idle.c,v 1.61 1999/03/18 04:15:45 cort Exp $
+ * $Id: idle.c,v 1.62 1999/05/24 05:43:18 cort Exp $
*
* Idle daemon for PowerPC. Idle daemon will handle any action
* that needs to be taken when the system becomes idle.
/* endless loop with no priority at all */
current->priority = 0;
current->counter = -100;
+ init_idle();
for (;;)
{
__sti();
/*
- * $Id: irq.c,v 1.105 1999/03/25 19:51:51 cort Exp $
+ * $Id: irq.c,v 1.106 1999/05/25 21:16:04 cort Exp $
*
* arch/ppc/kernel/irq.c
*
void enable_irq(unsigned int irq_nr);
void disable_irq(unsigned int irq_nr);
-/* Fixme - Need to figure out a way to get rid of this - Corey */
volatile unsigned char *chrp_int_ack_special;
#ifdef CONFIG_APUS
.long sys_getresuid /* 165 */
.long sys_query_module
.long sys_poll
+#ifdef CONFIG_NFS
.long sys_nfsservctl
+#else
+ .long sys_ni_syscall
+#endif
.long sys_setresgid
.long sys_getresgid /* 170 */
.long sys_prctl
/*
- * $Id: prom.c,v 1.54 1999/05/10 04:43:46 cort Exp $
+ * $Id: prom.c,v 1.60 1999/05/25 01:42:41 cort Exp $
*
* Procedures for interfacing to the Open Firmware PROM on
* Power Macintosh computers.
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/bootx.h>
+#include <asm/system.h>
/*
* Properties whose value is longer than this get excluded from our
mem = copy_device_tree(mem, mem + (1<<20));
prom_print(RELOC("done\n"));
+
+ RELOC(klimit) = (char *) (mem - offset);
+
prom_rtas = call_prom(RELOC("finddevice"), 1, 1, RELOC("/rtas"));
if (prom_rtas != (void *) -1) {
RELOC(rtas_size) = 0;
if (RELOC(rtas_size) == 0) {
RELOC(rtas_data) = 0;
} else {
- mem = (mem + 4095) & -4096; /* round to page bdry */
+ /*
+ * We do _not_ want the rtas_data inside the klimit
+ * boundry since it'll be squashed when we do the
+ * relocate of the kernel on chrp right after prom_init()
+ * in head.S. So, we just pick a spot in memory.
+ * -- Cort
+ */
+#if 0
+ mem = (mem + 4095) & -4096;
RELOC(rtas_data) = mem + KERNELBASE;
mem += RELOC(rtas_size);
+#endif
+ RELOC(rtas_data) = (6<<20) + KERNELBASE;
}
prom_rtas = call_prom(RELOC("open"), 1, 1, RELOC("/rtas"));
{
else
prom_print(RELOC(" done\n"));
}
- RELOC(klimit) = (char *) (mem - offset);
+
#ifdef CONFIG_SMP
/*
* With CHRP SMP we need to use the OF to start the other
unsigned long *outputs, ...)
{
va_list list;
- int i;
+ int i, s;
struct device_node *rtas;
int *tokp;
union {
printk(KERN_ERR "No RTAS service called %s\n", service);
return -1;
}
- u.words[0] = __pa(*tokp);
+ u.words[0] = *tokp;
u.words[1] = nargs;
u.words[2] = nret;
va_start(list, outputs);
for (i = 0; i < nargs; ++i)
u.words[i+3] = va_arg(list, unsigned long);
va_end(list);
+
+ s = _disable_interrupts();
spin_lock(&rtas_lock);
enter_rtas((void *)__pa(&u));
spin_unlock(&rtas_lock);
+ _enable_interrupts(s);
if (nret > 1 && outputs != NULL)
for (i = 0; i < nret-1; ++i)
outputs[i] = u.words[i+nargs+4];
abort()
{
#ifdef CONFIG_XMON
- extern void xmon(void *);
- xmon(0);
+ xmon(NULL);
#endif
prom_exit();
}
flush_tlb_all();
}
-static struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long addr)
-{
- struct vm_area_struct * vma;
-
- addr &= PAGE_MASK;
- vma = find_vma(tsk->mm,addr);
- if (!vma)
- return NULL;
- if (vma->vm_start <= addr)
- return vma;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- return NULL;
- if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
- return NULL;
- vma->vm_offset -= vma->vm_start - addr;
- vma->vm_start = addr;
- return vma;
-}
-
/*
* This routine checks the page boundaries, and that the offset is
* within the task area. It then calls get_long() to read a long.
/*
- * $Id: smp.c,v 1.49 1999/03/18 04:16:31 cort Exp $
+ * $Id: smp.c,v 1.52 1999/05/23 22:43:51 cort Exp $
*
* Smp support for ppc.
*
void __init smp_callin(void)
{
+ int i;
+
printk("SMP %d: smp_callin()\n",current->processor);
smp_store_cpu_info(current->processor);
set_dec(decrementer_count);
+
#if 0
current->mm->mmap->vm_page_prot = PAGE_SHARED;
current->mm->mmap->vm_start = PAGE_OFFSET;
int fd[2];
int error;
- error = verify_area(VERIFY_WRITE, fildes, 8);
- if (error)
- return error;
lock_kernel();
error = do_pipe(fd);
unlock_kernel();
- if (error)
- return error;
- if (__put_user(fd[0],0+fildes)
- || __put_user(fd[1],1+fildes))
- return -EFAULT; /* should we close the fds? */
- return 0;
+ if (!error) {
+ if (copy_to_user(fildes, fd, 2*sizeof(int)))
+ error = -EFAULT;
+ }
+ return error;
}
asmlinkage unsigned long sys_mmap(unsigned long addr, size_t len,
/*
- * $Id: time.c,v 1.47 1999/03/18 05:11:11 cort Exp $
+ * $Id: time.c,v 1.48 1999/05/22 19:35:57 cort Exp $
* Common time routines among all ppc machines.
*
* Written by Cort Dougan (cort@cs.nmt.edu) to merge
smp_local_timer_interrupt(regs);
#endif
- /* Fixme - make this more generic - Corey */
#ifdef CONFIG_APUS
{
extern void apus_heartbeat (void);
apus_heartbeat ();
}
#endif
+#if defined(CONFIG_ALL_PPC) || defined(CONFIG_CHRP)
+ if ( _machine == _MACH_chrp )
+ chrp_event_scan();
+#endif
+
hardirq_exit(cpu);
}
/*
- * $Id: init.c,v 1.165 1999/05/14 22:37:29 cort Exp $
+ * $Id: init.c,v 1.166 1999/05/22 18:18:30 cort Exp $
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
for (i = 0; i < size; i += PAGE_SIZE)
map_page(&init_task, v+i, p+i, flags);
out:
- return (void *) (v + (p & ~PAGE_MASK));
+ return (void *) (v + (addr & ~PAGE_MASK));
}
void iounmap(void *addr)
int c;
c = inchar();
- if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n')
+ if ((isxdigit(c) && (c != 'f') && (c != 'd')) || (c == '\n'))
termch = c;
scanhex(&adrs);
if( termch != '\n')
flush_tlb_page(vma, addr);
}
-static struct vm_area_struct * find_extend_vma(struct task_struct * tsk,
- unsigned long addr)
-{
- struct vm_area_struct * vma;
-
- addr &= PAGE_MASK;
- vma = find_vma(tsk->mm,addr);
- if (!vma)
- return NULL;
- if (vma->vm_start <= addr)
- return vma;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- return NULL;
- if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
- return NULL;
- vma->vm_offset -= vma->vm_start - addr;
- vma->vm_start = addr;
- return vma;
-}
-
/*
* This routine checks the page boundaries, and that the offset is
* within the task area. It then calls get_long() to read a long.
unsigned long newbrk, oldbrk;
down(¤t->mm->mmap_sem);
- lock_kernel();
if(ARCH_SUN4C_SUN4) {
if(brk >= 0x20000000 && brk < 0xe0000000) {
goto out;
* Ok, we have probably got enough memory - let it rip.
*/
current->mm->brk = brk;
- do_mmap(NULL, oldbrk, newbrk-oldbrk,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ do_brk(oldbrk, newbrk-oldbrk)
retval = 0;
out:
- unlock_kernel();
up(¤t->mm->mmap_sem);
return retval;
}
end = PAGE_ALIGN(end);
if (end <= start)
return;
- do_mmap(NULL, start, end - start,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE, 0);
+ do_brk(start, end - start);
}
/*
current->flags &= ~PF_FORKNOEXEC;
if (N_MAGIC(ex) == NMAGIC) {
/* Fuck me plenty... */
- error = do_mmap(NULL, N_TXTADDR(ex), ex.a_text,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ error = do_brk(N_TXTADDR(ex), ex.a_text);
read_exec(bprm->dentry, fd_offset, (char *) N_TXTADDR(ex),
ex.a_text, 0);
- error = do_mmap(NULL, N_DATADDR(ex), ex.a_data,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ error = do_brk(N_DATADDR(ex), ex.a_data);
read_exec(bprm->dentry, fd_offset + ex.a_text, (char *) N_DATADDR(ex),
ex.a_data, 0);
goto beyond_if;
}
if (N_MAGIC(ex) == OMAGIC) {
- do_mmap(NULL, N_TXTADDR(ex) & PAGE_MASK,
- ex.a_text+ex.a_data + PAGE_SIZE - 1,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ do_brk(N_TXTADDR(ex) & PAGE_MASK,
+ ex.a_text+ex.a_data + PAGE_SIZE - 1);
read_exec(bprm->dentry, fd_offset, (char *) N_TXTADDR(ex),
ex.a_text+ex.a_data, 0);
} else {
if (!file->f_op || !file->f_op->mmap) {
sys_close(fd);
- do_mmap(NULL, 0, ex.a_text+ex.a_data,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ do_brk(0, ex.a_text+ex.a_data);
read_exec(bprm->dentry, fd_offset,
(char *) N_TXTADDR(ex), ex.a_text+ex.a_data, 0);
goto beyond_if;
len = PAGE_ALIGN(ex.a_text + ex.a_data);
bss = ex.a_text + ex.a_data + ex.a_bss;
if (bss > len) {
- error = do_mmap(NULL, start_addr + len, bss - len,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_FIXED, 0);
+ error = do_brk(start_addr + len, bss - len);
retval = error;
if (error != start_addr + len)
goto out_putf;
flush_tlb_page(vma, addr);
}
-static struct vm_area_struct * find_extend_vma(struct task_struct * tsk,
- unsigned long addr)
-{
- struct vm_area_struct * vma;
-
- addr &= PAGE_MASK;
- vma = find_vma(tsk->mm,addr);
- if (!vma)
- return NULL;
- if (vma->vm_start <= addr)
- return vma;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- return NULL;
- if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
- return NULL;
- vma->vm_offset -= vma->vm_start - addr;
- vma->vm_start = addr;
- return vma;
-}
-
/*
* This routine checks the page boundaries, and that the offset is
* within the task area. It then calls get_long() to read a long.
unsigned long newbrk, oldbrk, brk = (unsigned long) baddr;
down(¤t->mm->mmap_sem);
- lock_kernel();
if (brk < current->mm->end_code)
goto out;
newbrk = PAGE_ALIGN(brk);
goto out;
/* Ok, we have probably got enough memory - let it rip. */
current->mm->brk = brk;
- do_mmap(NULL, oldbrk, newbrk-oldbrk,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ do_brk(oldbrk, newbrk-oldbrk);
retval = 0;
out:
- unlock_kernel();
up(¤t->mm->mmap_sem);
return retval;
}
endif
endif
+ifeq ($(CONFIG_I2O),y)
+SUB_DIRS += i2o
+MOD_SUB_DIRS += i2o
+else
+ ifeq ($(CONFIG_I2O),m)
+ MOD_SUB_DIRS += i2o
+ endif
+endif
+
# If CONFIG_SCSI is set, the core of SCSI support will be added to the kernel,
# but some of the low-level things may also be modules.
ifeq ($(CONFIG_SCSI),y)
__initfunc(void ide_init_cy82c693(ide_hwif_t *hwif))
{
hwif->chipset = ide_cy82c693;
- hwif->dmaproc = &cy82c693_dmaproc;
+ if (hwif->dma_base)
+ hwif->dmaproc = &cy82c693_dmaproc;
hwif->tuneproc = &cy82c693_tune_drive;
init_cy82c693_chip(hwif->pci_dev);
ide_ioreg_t ctrl_port,
int *irq)
{
- ide_ioreg_t reg = ide_ioreg_t data_port;
int i, r;
if (data_port == 0)
r = check_media_bay_by_base(data_port, MB_CD);
if (r == -EINVAL)
return;
-
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
- hw->io_ports[i] = reg * 0x10;
- reg += 1;
- }
- if (ctrl_port) {
- hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
- } else {
- hw->io_ports[IDE_CONTROL_OFFSET] = hw->io_ports[IDE_DATA_OFFSET] + 0x160;
- }
+
+ for ( i = 0; i < 8 ; ++i )
+ hw->io_ports[i] = data_port + i * 0x10;
+ hw->io_ports[8] = data_port + 0x160;
+
if (irq != NULL) {
*irq = 0;
for (i = 0; i < MAX_HWIFS; ++i) {
- if (base == pmac_ide_regbase[i]) {
+ if (data_port == pmac_ide_regbase[i]) {
*irq = pmac_ide_irq[i];
break;
}
np->full_name);
continue;
}
-
+
base = (unsigned long) ioremap(np->addrs[0].address, 0x200);
/* XXX This is bogus. Should be fixed in the registry by checking
case IDE3_MAJOR:
case IDE4_MAJOR:
case IDE5_MAJOR:
+ case IDE6_MAJOR:
+ case IDE7_MAJOR:
case ACSI_MAJOR:
case MFM_ACORN_MAJOR:
/*
case SCSI_DISK6_MAJOR:
case SCSI_DISK7_MAJOR:
case SCSI_CDROM_MAJOR:
+ case I2O_MAJOR:
do {
if (req->sem)
#endif
}
- outb(0x60, hwif->dma_base + 2);
+ if (hwif->dma_base)
+ outb(0x60, hwif->dma_base + 2);
if (!using_inta)
hwif->irq = hwif->channel ? 15 : 14; /* legacy mode */
else if (!hwif->irq && hwif->mate && hwif->mate->irq)
hwif->irq = hwif->mate->irq; /* share IRQ with mate */
- hwif->dmaproc = &ns87415_dmaproc;
+ if (hwif->dma_base)
+ hwif->dmaproc = &ns87415_dmaproc;
hwif->selectproc = &ns87415_selectproc;
}
/*
- * linux/drivers/block/piix.c Version 0.22 March 29, 1999
+ * linux/drivers/block/piix.c Version 0.23 May 29, 1999
*
* Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
* Copyright (C) 1998-1999 Andre Hedrick, Author and Maintainer
* 41
* 43
*
- * | PIO 0 | c0 | 80 | 0 |
- * | PIO 2 | SW2 | d0 | 90 | 4 |
- * | PIO 3 | MW1 | e1 | a1 | 9 |
- * | PIO 4 | MW2 | e3 | a3 | b |
+ * | PIO 0 | c0 | 80 | 0 | piix_tune_drive(drive, 0);
+ * | PIO 2 | SW2 | d0 | 90 | 4 | piix_tune_drive(drive, 2);
+ * | PIO 3 | MW1 | e1 | a1 | 9 | piix_tune_drive(drive, 3);
+ * | PIO 4 | MW2 | e3 | a3 | b | piix_tune_drive(drive, 4);
*
* sitre = word40 & 0x4000; primary
* sitre = word42 & 0x4000; secondary
#include "ide_modes.h"
-#define PIIX_DMA_PROC 0
-#define PIIX_DEBUG_SET_XFER 0
#define PIIX_DEBUG_DRIVE_INFO 0
+extern char *ide_xfer_verbose (byte xfer_rate);
+
+/*
+ *
+ */
+static byte piix_dma_2_pio (byte xfer_rate) {
+ switch(xfer_rate) {
+ case XFER_UDMA_4:
+ case XFER_UDMA_3:
+ case XFER_UDMA_2:
+ case XFER_UDMA_1:
+ case XFER_UDMA_0:
+ case XFER_MW_DMA_2:
+ case XFER_PIO_4:
+ return 4;
+ case XFER_MW_DMA_1:
+ case XFER_PIO_3:
+ return 3;
+ case XFER_SW_DMA_2:
+ case XFER_PIO_2:
+ return 2;
+ case XFER_MW_DMA_0:
+ case XFER_SW_DMA_1:
+ case XFER_SW_DMA_0:
+ case XFER_PIO_1:
+ case XFER_PIO_0:
+ case XFER_PIO_SLOW:
+ default:
+ return 0;
+ }
+}
+
/*
* Based on settings done by AMI BIOS
* (might be usefull if drive is not registered in CMOS for any reason).
{
unsigned long flags;
u16 master_data;
- byte slave_data, speed;
- int err;
- int is_slave = (&HWIF(drive)->drives[1] == drive);
- int master_port = HWIF(drive)->index ? 0x42 : 0x40;
- int slave_port = 0x44;
- /* ISP RTC */
- byte timings[][2] = { { 0, 0 },
- { 0, 0 },
- { 1, 0 },
- { 2, 1 },
- { 2, 3 }, };
-
+ byte slave_data;
+ int is_slave = (&HWIF(drive)->drives[1] == drive);
+ int master_port = HWIF(drive)->index ? 0x42 : 0x40;
+ int slave_port = 0x44;
+ /* ISP RTC */
+ byte timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+#if 1
+ pio = ide_get_best_pio_mode(drive, pio, 5, NULL);
+#else
pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+#endif
pci_read_config_word(HWIF(drive)->pci_dev, master_port, &master_data);
if (is_slave) {
master_data = master_data | 0x4000;
if (is_slave)
pci_write_config_byte(HWIF(drive)->pci_dev, slave_port, slave_data);
restore_flags(flags);
-
- switch(pio) {
- case 4: speed = XFER_PIO_4;break;
- case 3: speed = XFER_PIO_3;break;
- case 2: speed = XFER_PIO_2;break;
- case 1: speed = XFER_PIO_1;break;
- default:
- speed = (!drive->id->tPIO) ? XFER_PIO_0 : XFER_PIO_SLOW;
- break;
- }
-
- err = ide_wait_cmd(drive, WIN_SETFEATURES, speed, SETFEATURES_XFER, 0, NULL);
}
-extern char *ide_xfer_verbose (byte xfer_rate);
-
static int piix_config_drive_for_dma(ide_drive_t *drive, int ultra)
{
struct hd_driveid *id = drive->id;
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
+ unsigned long flags;
int sitre;
short reg4042, reg44, reg48, reg4a;
byte speed;
pci_read_config_word(dev, 0x48, ®48);
pci_read_config_word(dev, 0x4a, ®4a);
-#if PIIX_DEBUG_SET_XFER
- printk("PIIX%s: DMA enable ",
- (dev->device == PCI_DEVICE_ID_INTEL_82371FB_0) ? "a" :
- (dev->device == PCI_DEVICE_ID_INTEL_82371FB_1) ? "b" :
- (dev->device == PCI_DEVICE_ID_INTEL_82371SB_1) ? "3" :
- (dev->device == PCI_DEVICE_ID_INTEL_82371AB) ? "4" : " UNKNOWN" );
-#endif /* PIIX_DEBUG_SET_XFER */
+ save_flags(flags);
+ cli();
if (id->dma_ultra && (ultra)) {
if (!(reg48 & u_flag)) {
pci_write_config_word(dev, 0x48, reg48|u_flag);
}
} else {
- pci_write_config_word(dev, 0x48, reg48 & ~u_flag);
+ if (reg48 & u_flag) {
+ pci_write_config_word(dev, 0x48, reg48 & ~u_flag);
+ }
}
if ((id->dma_ultra & 0x0004) && (ultra)) {
+ drive->id->dma_mword &= ~0x0F00;
+ drive->id->dma_1word &= ~0x0F00;
if (!((id->dma_ultra >> 8) & 4)) {
drive->id->dma_ultra &= ~0x0F00;
drive->id->dma_ultra |= 0x0404;
}
speed = XFER_UDMA_2;
} else if ((id->dma_ultra & 0x0002) && (ultra)) {
+ drive->id->dma_mword &= ~0x0F00;
+ drive->id->dma_1word &= ~0x0F00;
if (!((id->dma_ultra >> 8) & 2)) {
drive->id->dma_ultra &= ~0x0F00;
drive->id->dma_ultra |= 0x0202;
}
speed = XFER_UDMA_1;
} else if ((id->dma_ultra & 0x0001) && (ultra)) {
+ drive->id->dma_mword &= ~0x0F00;
+ drive->id->dma_1word &= ~0x0F00;
if (!((id->dma_ultra >> 8) & 1)) {
drive->id->dma_ultra &= ~0x0F00;
drive->id->dma_ultra |= 0x0101;
}
speed = XFER_UDMA_0;
} else if (id->dma_mword & 0x0004) {
- drive->id->dma_ultra &= ~0x0F0F;
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
+ if (reg4a & a_speed)
+ pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
+ drive->id->dma_ultra &= ~0x0F00;
+ drive->id->dma_1word &= ~0x0F00;
if (!((id->dma_mword >> 8) & 4)) {
drive->id->dma_mword &= ~0x0F00;
drive->id->dma_mword |= 0x0404;
}
speed = XFER_MW_DMA_2;
} else if (id->dma_mword & 0x0002) {
- drive->id->dma_ultra &= ~0x0F0F;
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
+ if (reg4a & a_speed)
+ pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
+ drive->id->dma_ultra &= ~0x0F00;
+ drive->id->dma_1word &= ~0x0F00;
if (!((id->dma_mword >> 8) & 2)) {
drive->id->dma_mword &= ~0x0F00;
drive->id->dma_mword |= 0x0202;
}
speed = XFER_MW_DMA_1;
} else if (id->dma_1word & 0x0004) {
- drive->id->dma_ultra &= ~0x0F0F;
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
+ if (reg4a & a_speed)
+ pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
+ drive->id->dma_ultra &= ~0x0F00;
+ drive->id->dma_mword &= ~0x0F00;
if (!((id->dma_1word >> 8) & 4)) {
drive->id->dma_1word &= ~0x0F00;
drive->id->dma_1word |= 0x0404;
}
speed = XFER_SW_DMA_2;
} else {
- return ide_dma_off_quietly;
+#if 0
+ speed = XFER_PIO_0;
+#else
+ speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5, NULL);
+#endif
}
+ restore_flags(flags);
+ piix_tune_drive(drive, piix_dma_2_pio(speed));
+
(void) ide_wait_cmd(drive, WIN_SETFEATURES, speed, SETFEATURES_XFER, 0, NULL);
#if PIIX_DEBUG_DRIVE_INFO
int ultra = (HWIF(drive)->pci_dev->device == PCI_DEVICE_ID_INTEL_82371AB) ? 1 : 0;
switch (func) {
case ide_dma_check:
- return piix_config_drive_for_dma(drive, ultra);
+ return ide_dmaproc((ide_dma_action_t) piix_config_drive_for_dma(drive, ultra), drive);
default :
break;
}
void ide_init_piix (ide_hwif_t *hwif)
{
hwif->tuneproc = &piix_tune_drive;
-#if PIIX_DMA_PROC
- hwif->dmaproc = &piix_dmaproc;
-#endif /* PIIX_DMA_PROC */
+ if (hwif->dma_base) {
+ hwif->dmaproc = &piix_dmaproc;
+ }
}
#ifdef CONFIG_BLK_DEV_INITRD
__initfunc(void initrd_load(void))
{
- rd_load_image(MKDEV(MAJOR_NR, INITRD_MINOR),0,0);
+ rd_load_image(MKDEV(MAJOR_NR, INITRD_MINOR),rd_image_start,0);
}
#endif
{
mouse.active = 0;
mouse.ready = 0;
- mouse.wait = NULL;
+ init_waitqueue_head(&mouse.wait);
#ifdef __powerpc__
if ( (_machine != _MACH_chrp) && (_machine != _MACH_Pmac) )
--- /dev/null
+mainmenu_option next_comment
+comment 'I2O device support'
+
+tristate 'I2O support' CONFIG_I2O
+
+dep_tristate 'I2O PCI support' CONFIG_I2O_PCI $CONFIG_I2O
+dep_tristate 'I2O Block OSM' CONFIG_I2O_BLOCK $CONFIG_I2O
+dep_tristate 'I2O LAN OSM' CONFIG_I2O_LAN $CONFIG_I2O
+dep_tristate 'I2O SCSI OSM' CONFIG_I2O_SCSI $CONFIG_I2O
+dep_tristate 'I2O /proc support' CONFIG_I2O_PROC $CONFIG_I2O
+
+endmenu
--- /dev/null
+#
+# Makefile for the kernel I2O OSM.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now inherited from the
+# parent makefile.
+#
+
+#
+# Note : at this point, these files are compiled on all systems.
+# In the future, some of these should be built conditionally.
+#
+
+SUB_DIRS :=
+MOD_SUB_DIRS := $(SUB_DIRS)
+ALL_SUB_DIRS := $(SUB_DIRS)
+
+
+L_TARGET := i2o.a
+L_OBJS :=
+M_OBJS :=
+
+ifeq ($(CONFIG_I2O_PCI),y)
+L_OBJS += i2o_pci.o
+else
+ ifeq ($(CONFIG_I2O_PCI),m)
+ M_OBJS += i2o_pci.o
+ endif
+endif
+
+ifeq ($(CONFIG_I2O),y)
+LX_OBJS += i2o_core.o i2o_config.o
+else
+ ifeq ($(CONFIG_I2O),m)
+ MX_OBJS += i2o_core.o i2o_config.o
+ endif
+endif
+
+ifeq ($(CONFIG_I2O_BLOCK),y)
+LX_OBJS += i2o_block.o
+else
+ ifeq ($(CONFIG_I2O_BLOCK),m)
+ MX_OBJS += i2o_block.o
+ endif
+endif
+
+ifeq ($(CONFIG_I2O_LAN),y)
+LX_OBJS += i2o_lan.o
+else
+ ifeq ($(CONFIG_I2O_LAN),m)
+ MX_OBJS += i2o_lan.o
+ endif
+endif
+
+ifeq ($(CONFIG_I2O_SCSI),y)
+LX_OBJS += i2o_scsi.o
+else
+ ifeq ($(CONFIG_I2O_SCSI),m)
+ MX_OBJS += i2o_scsi.o
+ endif
+endif
+
+ifeq ($(CONFIG_I2O_PROC),y)
+LX_OBJS += i2o_proc.o
+else
+ ifeq ($(CONFIG_I2O_PROC),m)
+ MX_OBJS += i2o_proc.o
+ endif
+endif
+
+include $(TOPDIR)/Rules.make
+
--- /dev/null
+
+ Linux I2O Support (c) Copyright 1999 Red Hat Software
+ and others.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version
+ 2 of the License, or (at your option) any later version.
+
+AUTHORS (so far)
+
+Alan Cox, Building Number Three Ltd.
+ Core code, SCSI and Block OSMs
+
+Steve Ralston, LSI Logic Corp.
+ Debugging SCSI and Block OSM
+
+Deepak Saxena, Intel Corp.
+ /proc interface, bug fixes
+ Ioctl interfaces for control
+
+Philip Rumpf
+ Fixed assorted dumb SMP locking bugs
+
+Juha Sievanen, University Of Helsinki Finland
+ LAN OSM
+ Bug fixes
+ Core code extensions
+
+CREDITS
+
+ This work was made possible by
+
+Red Hat Software
+ Funding for the Building #3 part of the project
+
+Symbios Logic (Now LSI)
+ Host adapters, hints, known to work platforms when I hit
+ compatibility problems
+
+BoxHill Corporation
+ Loan of initial FibreChannel disk array used for development work.
+
+STATUS:
+
+o The core setup works within limits.
+o The scsi layer seems to almost work. I'm still chasing down the hang
+ bug.
+o The block OSM is fairly minimal but does seem to work.
+
+
+TO DO:
+
+General:
+o Support multiple IOP's and tell them about each other
+o Provide hidden address space if asked
+o Long term message flow control
+o PCI IOP's without interrupts are not supported yet
+o Push FAIL handling into the core
+o DDM control interfaces for module load etc
+
+Block:
+o Real error handler
+o Multiple major numbers
+o Read ahead and cache handling stuff. Talk to Ingo and people
+o Power management
+o Finish Media changers
+
+SCSI:
+o Find the right way to associate drives/luns/busses
+
+Net:
+o Port the existing RCPCI work to the frame work or write a new
+ driver. This one is with the Finns
+
+Tape:
+o Anyone seen anything implementing this ?
+
--- /dev/null
+
+Linux I2O User Space Interface
+rev 0.3 - 04/20/99
+
+=============================================================================
+Originally written by Deepak Saxena(deepak.saxena@intel.com)
+Currently maintained by Deepak Saxena(deepak.saxena@intel.com)
+=============================================================================
+
+I. Introduction
+
+The Linux I2O susbsytem provides a set of ioctl() commands than can be
+utilized by user space applications to communicate with IOPs and devices
+on individual IOPs. This document defines the specific ioctl() commands
+that are available to the user and provides examples of their uses.
+
+This document assumes the reader is familiar with or has access to the
+I2O specification as no I2O message parameters are outlined. For information
+on the specification, see http://www.i2osig.org
+
+This document and the I2O user space interface are currently maintained
+by Deepak Saxena. Please send all comments, errata, and bug fixes to
+deepak.saxena@intel.com
+
+II. IOP Access
+
+Access to the I2O subsystem is provided through the device file named
+/dev/i2octl. This file is a character file with major number 10 and minor
+number 166. It can be created through the following command:
+
+ mknod /dev/i2octl c 10 166
+
+III. Determining the IOP Count
+
+ SYNOPSIS
+
+ ioctl(fd, I2OGETIOPS, int *count);
+
+ u8 count[MAX_I2O_CONTROLLERS];
+
+ DESCRIPTION
+
+ This function returns the system's active IOP table. count should
+ point to a buffer containing MAX_I2O_CONTROLLERS entries. Upon
+ returning, each entry will contain a non-zero value if the given
+ IOP unit is active, and NULL if it is inactive or non-existent.
+
+ RETURN VALUE.
+
+ Returns 0 if no errors occur, and -1 otherwise. If an error occurs,
+ errno is set appropriately:
+
+ EIO Unkown error
+
+IV. ExecHrtGet Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OHRTGET, struct i2o_cmd_hrt *hrt);
+
+ struct i2o_cmd_hrtlct
+ {
+ u32 iop; /* IOP unit number */
+ void *resbuf; /* Buffer for result */
+ u32 *reslen; /* Buffer length in bytes */
+ };
+
+ DESCRIPTION
+
+ This function posts an ExecHrtHet message to the IOP specified by
+ hrt->iop and returns the data in the buffer pointed to by hrt->buf
+ The size of the data written is placed into the memory pointed to
+ by hrt->len.
+
+ RETURNS
+
+ This function returns 0 if no errors occur. If an error occurs, -1
+ is returned and errno is set appropriately:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(hrt->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+V. ExecLctNotify Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OLCTGET, struct i2o_cmd_lct *lct);
+
+ struct i2o_cmd_hrtlct
+ {
+ u32 iop; /* IOP unit number */
+ void *resbuf; /* Buffer for result */
+ u32 *reslen; /* Buffer length in bytes */
+ };
+
+ DESCRIPTION
+
+ This function posts an ExecLctGet message to the IOP specified by
+ lct->iop and returns the data in the buffer pointed to by lct->buf
+ The size of the data written is placed into the memory pointed to
+ by lct->reslen.
+
+ RETURNS
+
+ This function returns 0 if no errors occur. If an error occurs, -1
+ is returned and errno is set appropriately:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(lct->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+VI. UtilParamsSet Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OPARMSET, struct i2o_parm_setget *ops);
+
+ struct i2o_cmd_psetget
+ {
+ u32 iop; /* IOP unit number */
+ u32 tid; /* Target device TID */
+ void *opbuf; /* Operation List buffer */
+ u32 oplen; /* Operation List buffer length in bytes */
+ void *resbuf; /* Result List buffer */
+ u32 *reslen; /* Result List buffer length in bytes */
+ };
+
+ DESCRIPTION
+
+ This function posts a UtilParamsSet message to the device identified
+ by ops->iop and ops->tid. The operation list for the message is
+ sent through the ops->oplen buffer, and the result list is written
+ into the buffer pointed to by ops->oplen. The number of bytes
+ written is placed into *(ops->reslen).
+
+ RETURNS
+
+ The return value is the size in bytes of the data written into
+ ops->resbuf if no errors occur. If an error occurs, -1 is returned
+ and errno is set appropriatly:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+ A return value of 0 does not mean that the value was actually
+ changed properly on the IOP. The user should check the result
+ list to determine the specific status of the transaction.
+
+VII. UtilParamsGet Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OPARMGET, struct i2o_parm_setget *ops);
+
+ struct i2o_parm_setget
+ {
+ u32 iop; /* IOP unit number */
+ u32 tid; /* Target device TID */
+ void *opbuf; /* Operation List buffer */
+ u32 oplen; /* Operation List buffer length in bytes */
+ void *resbuf; /* Result List buffer */
+ u32 *reslen; /* Result List buffer length in bytes */
+ };
+
+ DESCRIPTION
+
+ This function posts a UtilParamsGet message to the device identified
+ by ops->iop and ops->tid. The operation list for the message is
+ sent through the ops->oplen buffer, and the result list is written
+ into the buffer pointed to by ops->oplen. The actual size of data
+ written is placed into *(ops->reslen).
+
+ RETURNS
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+ A return value of 0 does not mean that the value was actually
+ properly retreived. The user should check the result list
+ to determine the specific status of the transaction.
+
+VIII. ExecSwDownload Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OSWDL, struct i2o_sw_xfer *sw);
+
+ struct i2o_sw_xfer
+ {
+ u32 iop; /* IOP unit number */
+ u8 dl_flags; /* DownLoadFlags field */
+ u8 sw_type; /* Software type */
+ u32 sw_id; /* Software ID */
+ void *buf; /* Pointer to software buffer */
+ u32 *swlen; /* Length of software data */
+ u32 *maxfrag; /* Number of fragments */
+ u32 *curfrag; /* Current fragment number */
+ };
+
+ DESCRIPTION
+
+ This function downloads the software pointed to by sw->buf to the
+ iop identified by sw->iop. The DownloadFlags, SwID, and SwType fields
+ of the ExecSwDownload message are filed in with the values of
+ sw->dl_flags, sw->sw_id, and sw->sw_type.
+
+ Once the ioctl() is called and software transfer begins, the
+ user can read the value *(sw->maxfrag) and *(sw->curfrag) to
+ determine the status of the software transfer. As the IOP
+ is very slow when it comes to SW transfers, this can be
+ used by a separate thread to report status to the user. The
+ user _should not_ write to this memory location until the ioctl()
+ has returned.
+
+ RETURNS
+
+ This function returns 0 no errors occur. If an error occurs, -1
+ is returned and errno is set appropriatly:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+IX. ExecSwUpload Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OSWUL, struct i2o_sw_xfer *sw);
+
+ struct i2o_sw_xfer
+ {
+ u32 iop; /* IOP unit number */
+ u8 flags; /* Unused */
+ u8 sw_type; /* Software type */
+ u32 sw_id; /* Software ID */
+ void *buf; /* Pointer to software buffer */
+ u32 *swlen; /* Length in bytes of software */
+ u32 *maxfrag; /* Number of fragments */
+ u32 *curfrag; /* Current fragment number */
+ };
+
+ DESCRIPTION
+
+ This function uploads software from the IOP identified by sw->iop
+ and places it in the buffer pointed to by sw->buf. The SwID, SwType
+ and SwSize fields of the ExecSwDownload message are filed in
+ with the values of sw->sw_id, sw->sw_type, sw->swlen, and. The
+ actual size of the module is written into *(sw->buflen).
+
+ Once the ioctl() is called and software transfer begins, the
+ user can read the value *(sw->maxfrag) and *(sw->curfrag) to
+ determine the status of the software transfer. As the IOP
+ is very slow when it comes to SW transfers, this can be
+ used by a separate thread to report status to the user. The
+ user _should not_ write to this memory location until the ioctl()
+ has returned.
+
+ RETURNS
+
+ This function returns 0 if no errors occur. If an error occurs, -1
+ is returned and errno is set appropriatly:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+X. ExecSwRemove Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OSWDEL, struct i2o_sw_xfer *sw);
+
+ struct i2o_sw_xfer
+ {
+ u32 iop; /* IOP unit number */
+ u8 flags; /* Unused */
+ u8 sw_type; /* Software type */
+ u32 sw_id; /* Software ID */
+ void *buf; /* Unused */
+ u32 *swlen; /* Length in bytes of software data */
+ u32 *maxfrag; /* Unused */
+ u32 *curfrag; /* Unused */
+ };
+
+ DESCRIPTION
+
+ This function uploads software from the IOP identified by sw->iop
+ and places it in the buffer pointed to by sw->buf. The SwID, SwType
+ and SwSize fields of the ExecSwDownload message are filed in
+ with the values of sw->dl_flags, sw->sw_id, and sw->sw_type. The
+ actual size of the module is written into *(sw->buflen).
+
+ RETURNS
+
+ This function returns 0 if no errors occur. If an error occurs, -1
+ is returned and errno is set appropriatly:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+X. UtilConfigDialog Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OHTML, struct i2o_html *htquery);
+
+ struct i2o_html
+ {
+ u32 iop; /* IOP unit number */
+ u32 tid; /* Target device ID */
+ u32 page; /* HTML page */
+ void *resbuf; /* Buffer for reply HTML page */
+ u32 *reslen; /* Length in bytes of reply buffer */
+ void *qbuf; /* Pointer to HTTP query string */
+ u32 qlen; /* Length in bytes of query string buffer */
+ };
+
+ DESCRIPTION
+
+ This function posts an UtilConfigDialog message to the device identified
+ by htquery->iop and htquery->tid. The requested HTML page number is
+ provided by the htquery->page field, and the resultant data is stored
+ in the buffer pointed to by htquery->resbuf. If there is an HTTP query
+ string that is to be sent to the device, it should be sent in the buffer
+ pointed to by htquery->qbuf. If there is no query string, this field
+ should be set to NULL. The actual size of the reply received is written
+ into *(htquery->reslen)
+
+ RETURNS
+
+ This function returns 0 if no error occur. If an error occurs, -1
+ is returned and J errno is set appropriatly:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+XI. Events
+
+ In the process of determining this. Current idea is to have use
+ the select() interface to allow user apps to periodically poll
+ the /dev/i2octl device for events. When select() notifies the user
+ that an event is available, the user would call read() to retrieve
+ a list of all the events that are pending for the specific device.
+
+=============================================================================
+Revision History
+=============================================================================
+
+Rev 0.1 - 04/01/99
+- Initial revision
+
+Rev 0.2 - 04/06/99
+- Changed return values to match UNIX ioctl() standard. Only return values
+ are 0 and -1. All errors are reported through errno.
+- Added summary of proposed possible event interfaces
+
+Rev 0.3 - 04/20/99
+- Changed all ioctls() to use pointers to user data instead of actual data
+- Updated error values to match the code
+
+
--- /dev/null
+
+ Linux I2O LAN OSM
+ (c) University of Helsinki, Department of Computer Science
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version
+ 2 of the License, or (at your option) any later version.
+
+AUTHORS
+Auvo Häkkinen, Auvo.Hakkinen@cs.Helsinki.FI
+Juha Sievänen, Juha.Sievanen@cs.Helsinki.FI
+
+CREDITS
+
+ This work was made possible by
+
+European Committee
+ Funding for the project
+
+SysKonnect
+ Loaning of FDDI cards
+
+ASUSTeK
+ I2O motherboard
+
+STATUS:
+o The FDDI part of LAN OSM is working to some extent.
+o Only packet per bucket is now supported.
+
+TO DO:
+
+LAN:
+o Add support for bactches
+o Find why big packets flow from I2O box out, but don't want to come in
+o Find the bug in i2o_set_multicast_list(), which kills interrupt
+ handler in i2o_wait_reply()
+o Add support for Ethernet, Token Ring, AnyLAN, Fibre Channel
--- /dev/null
+/*
+ * I2O block device driver.
+ *
+ * (C) Copyright 1999 Red Hat Software
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This is an initial test release. Most of the good code was taken
+ * from the nbd driver by Pavel Machek, who in turn took some of it
+ * from loop.c. Isn't free software great for reusability 8)
+ *
+ * Fixes:
+ * Steve Ralston: Multiple device handling error fixes,
+ * Added a queue depth.
+ */
+
+#include <linux/major.h>
+
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/ioctl.h>
+#include <linux/i2o.h>
+#include <linux/blkdev.h>
+#include <linux/malloc.h>
+#include <linux/hdreg.h>
+
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+
+#define MAJOR_NR I2O_MAJOR
+
+#include <linux/blk.h>
+
+#define MAX_I2OB 16
+
+#define MAX_I2OB_DEPTH 4
+
+/*
+ * Some of these can be made smaller later
+ */
+
+static int i2ob_blksizes[MAX_I2OB<<4];
+static int i2ob_hardsizes[MAX_I2OB<<4];
+static int i2ob_sizes[MAX_I2OB<<4];
+static int i2ob_media_change_flag[MAX_I2OB];
+static u32 i2ob_max_sectors[MAX_I2OB<<4];
+
+static int i2ob_context;
+
+#ifdef __SMP__
+static spinlock_t i2ob_lock = SPIN_LOCK_UNLOCKED;
+#endif
+
+struct i2ob_device
+{
+ struct i2o_controller *controller;
+ struct i2o_device *i2odev;
+ int tid;
+ int flags;
+ int refcnt;
+ struct request *head, *tail;
+ int done_flag;
+};
+
+/*
+ * Each I2O disk is one of these.
+ */
+
+static struct i2ob_device i2ob_dev[MAX_I2OB<<4];
+static int i2ob_devices = 0;
+static struct hd_struct i2ob[MAX_I2OB<<4];
+static struct gendisk i2ob_gendisk; /* Declared later */
+
+static atomic_t queue_depth; /* For flow control later on */
+
+#define DEBUG( s )
+/* #define DEBUG( s ) printk( s )
+ */
+
+static int i2ob_install_device(struct i2o_controller *, struct i2o_device *, int);
+static void i2ob_end_request(struct request *);
+static void do_i2ob_request(void);
+
+/*
+ * Get a message
+ */
+
+static u32 i2ob_get(struct i2ob_device *dev)
+{
+ struct i2o_controller *c=dev->controller;
+ return I2O_POST_READ32(c);
+}
+
+/*
+ * Turn a Linux block request into an I2O block read/write.
+ */
+
+static int i2ob_send(u32 m, struct i2ob_device *dev, struct request *req, u32 base, int unit)
+{
+ struct i2o_controller *c = dev->controller;
+ int tid = dev->tid;
+ u32 *msg;
+ u32 *mptr;
+ u64 offset;
+ struct buffer_head *bh = req->bh;
+ static int old_qd = 2;
+ int count = req->nr_sectors<<9;
+
+ /*
+ * Build a message
+ */
+
+ msg = bus_to_virt(c->mem_offset + m);
+
+ msg[2] = i2ob_context|(unit<<8);
+ msg[3] = (u32)req; /* 64bit issue again here */
+ msg[5] = req->nr_sectors << 9;
+
+ /* This can be optimised later - just want to be sure its right for
+ starters */
+ offset = ((u64)(req->sector+base)) << 9;
+ msg[6] = offset & 0xFFFFFFFF;
+ msg[7] = (offset>>32);
+ mptr=msg+8;
+
+ if(req->cmd == READ)
+ {
+ msg[1] = I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid;
+ /* We don't yet do cache/readahead and other magic */
+ msg[4] = 1<<16;
+ while(bh!=NULL)
+ {
+ *mptr++ = 0x10000000|(bh->b_size);
+ *mptr++ = virt_to_bus(bh->b_data);
+ count -= bh->b_size;
+ bh = bh->b_reqnext;
+ }
+ }
+ else if(req->cmd == WRITE)
+ {
+ msg[1] = I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid;
+ msg[4] = 1<<16;
+ while(bh!=NULL)
+ {
+ *mptr++ = 0x14000000|(bh->b_size);
+ count -= bh->b_size;
+ *mptr++ = virt_to_bus(bh->b_data);
+ bh = bh->b_reqnext;
+ }
+ }
+ mptr[-2]|= 0xC0000000;
+ msg[0] = I2O_MESSAGE_SIZE(mptr-msg) | SGL_OFFSET_8;
+
+ if(req->current_nr_sectors > 8)
+ printk("Gathered sectors %ld.\n",
+ req->current_nr_sectors);
+
+ if(count != 0)
+ {
+ printk("Request count botched by %d.\n", count);
+ msg[5] -= count;
+ }
+
+// printk("Send for %p\n", req);
+
+ i2o_post_message(c,m);
+ atomic_inc(&queue_depth);
+ if(atomic_read(&queue_depth)>old_qd)
+ {
+ old_qd=atomic_read(&queue_depth);
+ printk("Depth now %d.\n", old_qd);
+ }
+ return 0;
+}
+
+/*
+ * Remove a request from the _locked_ request list. We update both the
+ * list chain and if this is the last item the tail pointer.
+ */
+
+static void i2ob_unhook_request(struct i2ob_device *dev, struct request *req)
+{
+ struct request **p = &dev->head;
+ struct request *nt = NULL;
+ static int crap = 0;
+
+ while(*p!=NULL)
+ {
+ if(*p==req)
+ {
+ if(dev->tail==req)
+ dev->tail = nt;
+ *p=req->next;
+ return;
+ }
+ nt=*p;
+ p=&(nt->next);
+ }
+ if(!crap++)
+ printk("i2o_block: request queue corrupt!\n");
+}
+
+/*
+ * Request completion handler
+ */
+
+static void i2ob_end_request(struct request *req)
+{
+ /*
+ * Loop until all of the buffers that are linked
+ * to this request have been marked updated and
+ * unlocked.
+ */
+ while (end_that_request_first( req, !req->errors, "i2o block" ));
+
+ /*
+ * It is now ok to complete the request.
+ */
+ end_that_request_last( req );
+}
+
+
+/*
+ * OSM reply handler. This gets all the message replies
+ */
+
+static void i2o_block_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *msg)
+{
+ struct request *req;
+ u8 st;
+ u32 *m = (u32 *)msg;
+ u8 unit = (m[2]>>8)&0xF0; /* low 4 bits are partition */
+
+ if(m[0] & (1<<13))
+ {
+ printk("IOP fail.\n");
+ printk("From %d To %d Cmd %d.\n",
+ (m[1]>>12)&0xFFF,
+ m[1]&0xFFF,
+ m[1]>>24);
+ printk("Failure Code %d.\n", m[4]>>24);
+ if(m[4]&(1<<16))
+ printk("Format error.\n");
+ if(m[4]&(1<<17))
+ printk("Path error.\n");
+ if(m[4]&(1<<18))
+ printk("Path State.\n");
+ if(m[4]&(1<<18))
+ printk("Congestion.\n");
+
+ m=(u32 *)bus_to_virt(m[7]);
+ printk("Failing message is %p.\n", m);
+
+ /* We need to up the request failure count here and maybe
+ abort it */
+ req=(struct request *)m[3];
+ /* Now flush the message by making it a NOP */
+ m[0]&=0x00FFFFFF;
+ m[0]|=(I2O_CMD_UTIL_NOP)<<24;
+ i2o_post_message(c,virt_to_bus(m));
+
+ }
+ else
+ {
+ if(m[2]&0x80000000)
+ {
+ int * ptr = (int *)m[3];
+ if(m[4]>>24)
+ *ptr = -1;
+ else
+ *ptr = 1;
+ return;
+ }
+ /*
+ * Lets see what is cooking. We stuffed the
+ * request in the context.
+ */
+
+ req=(struct request *)m[3];
+ st=m[4]>>24;
+
+ if(st!=0)
+ {
+ printk(KERN_ERR "i2ob: error %08X\n", m[4]);
+ /*
+ * Now error out the request block
+ */
+ req->errors++;
+ }
+ }
+ /*
+ * Dequeue the request.
+ */
+
+ spin_lock(&io_request_lock);
+ spin_lock(&i2ob_lock);
+ i2ob_unhook_request(&i2ob_dev[unit], req);
+ i2ob_end_request(req);
+
+ /*
+ * We may be able to do more I/O
+ */
+
+ atomic_dec(&queue_depth);
+ do_i2ob_request();
+ spin_unlock(&i2ob_lock);
+ spin_unlock(&io_request_lock);
+}
+
+static struct i2o_handler i2o_block_handler =
+{
+ i2o_block_reply,
+ "I2O Block OSM",
+ 0
+};
+
+
+/*
+ * Flush all pending requests as errors. Must call with the queue
+ * locked.
+ */
+
+#if 0
+static void i2ob_clear_queue(struct i2ob_device *dev)
+{
+ struct request *req;
+
+ while (1) {
+ req = dev->tail;
+ if (!req)
+ return;
+ req->errors++;
+ i2ob_end_request(req);
+
+ if (dev->tail == dev->head)
+ dev->head = NULL;
+ dev->tail = dev->tail->next;
+ }
+}
+#endif
+
+/*
+ * The I2O block driver is listed as one of those that pulls the
+ * front entry off the queue before processing it. This is important
+ * to remember here. If we drop the io lock then CURRENT will change
+ * on us. We must unlink CURRENT in this routine before we return, if
+ * we use it.
+ */
+
+static void do_i2ob_request(void)
+{
+ struct request *req;
+ int unit;
+ struct i2ob_device *dev;
+ u32 m;
+
+ while (CURRENT) {
+ /*
+ * On an IRQ completion if there is an inactive
+ * request on the queue head it means it isnt yet
+ * ready to dispatch.
+ */
+ if(CURRENT->rq_status == RQ_INACTIVE)
+ return;
+
+ /*
+ * Queue depths probably belong with some kind of
+ * generic IOP commit control. Certainly its not right
+ * its global!
+ */
+ if(atomic_read(&queue_depth)>=MAX_I2OB_DEPTH)
+ break;
+
+ req = CURRENT;
+ unit = MINOR(req->rq_dev);
+ dev = &i2ob_dev[(unit&0xF0)];
+ /* Get a message */
+ m = i2ob_get(dev);
+ /* No messages -> punt
+ FIXME: if we have no messages, and there are no messages
+ we deadlock now. Need a timer/callback ?? */
+ if(m==0xFFFFFFFF)
+ {
+ printk("i2ob: no messages!\n");
+ break;
+ }
+ req->errors = 0;
+ CURRENT = CURRENT->next;
+ req->next = NULL;
+
+ if (dev->head == NULL) {
+ dev->head = req;
+ dev->tail = req;
+ } else {
+ dev->tail->next = req;
+ dev->tail = req;
+ }
+ i2ob_send(m, dev, req, i2ob[unit].start_sect, (unit&0xF0));
+ }
+}
+
+static void i2ob_request(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&i2ob_lock, flags);
+ do_i2ob_request();
+ spin_unlock_irqrestore(&i2ob_lock, flags);
+}
+
+/*
+ * SCSI-CAM for ioctl geometry mapping
+ * Duplicated with SCSI - this should be moved into somewhere common
+ * perhaps genhd ?
+ */
+
+static void i2o_block_biosparam(
+ unsigned long capacity,
+ unsigned short *cyls,
+ unsigned char *hds,
+ unsigned char *secs)
+{
+ unsigned long heads, sectors, cylinders, temp;
+
+ cylinders = 1024L; /* Set number of cylinders to max */
+ sectors = 62L; /* Maximize sectors per track */
+
+ temp = cylinders * sectors; /* Compute divisor for heads */
+ heads = capacity / temp; /* Compute value for number of heads */
+ if (capacity % temp) { /* If no remainder, done! */
+ heads++; /* Else, increment number of heads */
+ temp = cylinders * heads; /* Compute divisor for sectors */
+ sectors = capacity / temp; /* Compute value for sectors per
+ track */
+ if (capacity % temp) { /* If no remainder, done! */
+ sectors++; /* Else, increment number of sectors */
+ temp = heads * sectors; /* Compute divisor for cylinders */
+ cylinders = capacity / temp;/* Compute number of cylinders */
+ }
+ }
+ /* if something went wrong, then apparently we have to return
+ a geometry with more than 1024 cylinders */
+ if (cylinders == 0 || heads > 255 || sectors > 63 || cylinders >1023)
+ {
+ unsigned long temp_cyl;
+
+ heads = 64;
+ sectors = 32;
+ temp_cyl = capacity / (heads * sectors);
+ if (temp_cyl > 1024)
+ {
+ heads = 255;
+ sectors = 63;
+ }
+ cylinders = capacity / (heads * sectors);
+ }
+ *cyls = (unsigned int) cylinders; /* Stuff return values */
+ *secs = (unsigned int) sectors;
+ *hds = (unsigned int) heads;
+}
+
+/*
+ * Rescan the partition tables
+ */
+
+static int do_i2ob_revalidate(kdev_t dev, int maxu)
+{
+ int minor=MINOR(dev);
+ int i;
+
+ minor&=0xF0;
+
+ i2ob_dev[minor].refcnt++;
+ if(i2ob_dev[minor].refcnt>maxu+1)
+ {
+ i2ob_dev[minor].refcnt--;
+ return -EBUSY;
+ }
+
+ for( i = 15; i>=0 ; i--)
+ {
+ int m = minor+i;
+ kdev_t d = MKDEV(MAJOR_NR, m);
+ struct super_block *sb = get_super(d);
+
+ sync_dev(d);
+ if(sb)
+ invalidate_inodes(sb);
+ invalidate_buffers(d);
+ i2ob_gendisk.part[m].start_sect = 0;
+ i2ob_gendisk.part[m].nr_sects = 0;
+ }
+
+ /*
+ * Do a physical check and then reconfigure
+ */
+
+ i2ob_install_device(i2ob_dev[minor].controller, i2ob_dev[minor].i2odev,
+ minor);
+ i2ob_dev[minor].refcnt--;
+ return 0;
+}
+
+/*
+ * Issue device specific ioctl calls.
+ */
+
+static int i2ob_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct i2ob_device *dev;
+ int minor;
+
+ /* Anyone capable of this syscall can do *real bad* things */
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (!inode)
+ return -EINVAL;
+ minor = MINOR(inode->i_rdev);
+ if (minor >= (MAX_I2OB<<4))
+ return -ENODEV;
+
+ dev = &i2ob_dev[minor];
+ switch (cmd) {
+ case BLKRASET:
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
+ if(arg > 0xff) return -EINVAL;
+ read_ahead[MAJOR(inode->i_rdev)] = arg;
+ return 0;
+
+ case BLKRAGET:
+ if (!arg) return -EINVAL;
+ return put_user(read_ahead[MAJOR(inode->i_rdev)],
+ (long *) arg);
+ case BLKGETSIZE:
+ return put_user(i2ob[minor].nr_sects, (long *) arg);
+
+ case BLKFLSBUF:
+ if(!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ fsync_dev(inode->i_rdev);
+ invalidate_buffers(inode->i_rdev);
+ return 0;
+
+ case HDIO_GETGEO:
+ {
+ struct hd_geometry g;
+ int u=minor&0xF0;
+ i2o_block_biosparam(i2ob_sizes[u]<<1,
+ &g.cylinders, &g.heads, &g.sectors);
+ g.start = i2ob[minor].start_sect;
+ return copy_to_user((void *)arg,&g, sizeof(g))?-EFAULT:0;
+ }
+
+ case BLKRRPART:
+ if(!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ return do_i2ob_revalidate(inode->i_rdev,1);
+
+ default:
+ return blk_ioctl(inode->i_rdev, cmd, arg);
+ }
+}
+
+/*
+ * Issue UTIL_CLAIM messages
+ */
+
+static int i2ob_claim_device(struct i2ob_device *dev, int onoff)
+{
+ return i2o_issue_claim(dev->controller, dev->tid, i2ob_context, onoff, &dev->done_flag);
+}
+
+/*
+ * Close the block device down
+ */
+
+static int i2ob_release(struct inode *inode, struct file *file)
+{
+ struct i2ob_device *dev;
+ int minor;
+
+ minor = MINOR(inode->i_rdev);
+ if (minor >= (MAX_I2OB<<4))
+ return -ENODEV;
+ sync_dev(inode->i_rdev);
+ dev = &i2ob_dev[(minor&0xF0)];
+ if (dev->refcnt <= 0)
+ printk(KERN_ALERT "i2ob_release: refcount(%d) <= 0\n", dev->refcnt);
+ dev->refcnt--;
+ if(dev->refcnt==0)
+ {
+ /*
+ * Flush the onboard cache on unmount
+ */
+ u32 msg[5];
+ int *query_done = &dev->done_flag;
+ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_BLOCK_CFLUSH<<24|HOST_TID<<12|dev->tid;
+ msg[2] = i2ob_context|0x80000000;
+ msg[3] = (u32)query_done;
+ msg[4] = 60<<16;
+ i2o_post_wait(dev->controller, dev->tid, msg, 20, query_done,2);
+ /*
+ * Unlock the media
+ */
+ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_BLOCK_MUNLOCK<<24|HOST_TID<<12|dev->tid;
+ msg[2] = i2ob_context|0x80000000;
+ msg[3] = (u32)query_done;
+ msg[4] = -1;
+ i2o_post_wait(dev->controller, dev->tid, msg, 20, query_done,2);
+
+ /*
+ * Now unclaim the device.
+ */
+ if (i2ob_claim_device(dev, 0)<0)
+ printk(KERN_ERR "i2ob_release: controller rejected unclaim.\n");
+
+ }
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * Open the block device.
+ */
+
+static int i2ob_open(struct inode *inode, struct file *file)
+{
+ int minor;
+ struct i2ob_device *dev;
+
+ if (!inode)
+ return -EINVAL;
+ minor = MINOR(inode->i_rdev);
+ if (minor >= MAX_I2OB<<4)
+ return -ENODEV;
+ dev=&i2ob_dev[(minor&0xF0)];
+
+ if(dev->refcnt++==0)
+ {
+ u32 msg[6];
+ int *query_done;
+
+
+ if(i2ob_claim_device(dev, 1)<0)
+ {
+ dev->refcnt--;
+ return -EBUSY;
+ }
+
+ query_done = &dev->done_flag;
+ /*
+ * Mount the media if needed. Note that we don't use
+ * the lock bit. Since we have to issue a lock if it
+ * refuses a mount (quite possible) then we might as
+ * well just send two messages out.
+ */
+ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_BLOCK_MMOUNT<<24|HOST_TID<<12|dev->tid;
+ msg[2] = i2ob_context|0x80000000;
+ msg[3] = (u32)query_done;
+ msg[4] = -1;
+ msg[5] = 0;
+ i2o_post_wait(dev->controller, dev->tid, msg, 24, query_done,2);
+ /*
+ * Lock the media
+ */
+ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_BLOCK_MLOCK<<24|HOST_TID<<12|dev->tid;
+ msg[2] = i2ob_context|0x80000000;
+ msg[3] = (u32)query_done;
+ msg[4] = -1;
+ i2o_post_wait(dev->controller, dev->tid, msg, 20, query_done,2);
+ }
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * Issue a device query
+ */
+
+static int i2ob_query_device(struct i2ob_device *dev, int table,
+ int field, void *buf, int buflen)
+{
+ return i2o_query_scalar(dev->controller, dev->tid, i2ob_context,
+ table, field, buf, buflen, &dev->done_flag);
+}
+
+
+/*
+ * Install the I2O block device we found.
+ */
+
+static int i2ob_install_device(struct i2o_controller *c, struct i2o_device *d, int unit)
+{
+ u64 size;
+ u32 blocksize;
+ u32 limit;
+ u8 type;
+ u32 flags, status;
+ struct i2ob_device *dev=&i2ob_dev[unit];
+ int i;
+
+ /*
+ * Ask for the current media data. If that isn't supported
+ * then we ask for the device capacity data
+ */
+
+ if(i2ob_query_device(dev, 0x0004, 1, &blocksize, 4) != 0
+ || i2ob_query_device(dev, 0x0004, 0, &size, 8) !=0 )
+ {
+ i2ob_query_device(dev, 0x0000, 3, &blocksize, 4);
+ i2ob_query_device(dev, 0x0000, 4, &size, 8);
+ }
+
+ i2ob_query_device(dev, 0x0000, 5, &flags, 4);
+ i2ob_query_device(dev, 0x0000, 6, &status, 4);
+ i2ob_sizes[unit] = (int)(size>>10);
+ i2ob_hardsizes[unit] = blocksize;
+ i2ob_gendisk.part[unit].nr_sects = i2ob_sizes[unit];
+
+ /* Setting this higher than 1024 breaks the symbios for some reason */
+
+ limit=4096; /* 8 deep scatter gather */
+
+ printk("Byte limit is %d.\n", limit);
+
+ for(i=unit;i<=unit+15;i++)
+ i2ob_max_sectors[i]=(limit>>9);
+
+ i2ob[unit].nr_sects = (int)(size>>9);
+
+ i2ob_query_device(dev, 0x0000, 0, &type, 1);
+
+ sprintf(d->dev_name, "%s%c", i2ob_gendisk.major_name, 'a' + (unit>>4));
+
+ printk("%s: ", d->dev_name);
+ if(status&(1<<10))
+ printk("RAID ");
+ switch(type)
+ {
+ case 0: printk("Disk Storage");break;
+ case 4: printk("WORM");break;
+ case 5: printk("CD-ROM");break;
+ case 7: printk("Optical device");break;
+ default:
+ printk("Type %d", type);
+ }
+ if(((flags & (1<<3)) && !(status & (1<<3))) ||
+ ((flags & (1<<4)) && !(status & (1<<4))))
+ {
+ printk(" Not loaded.\n");
+ return 0;
+ }
+ printk(" %dMb, %d byte sectors",
+ (int)(size>>20), blocksize);
+ if(status&(1<<0))
+ {
+ u32 cachesize;
+ i2ob_query_device(dev, 0x0003, 0, &cachesize, 4);
+ cachesize>>=10;
+ if(cachesize>4095)
+ printk(", %dMb cache", cachesize>>10);
+ else
+ printk(", %dKb cache", cachesize);
+ }
+ printk(".\n");
+ printk("%s: Maximum sectors/read set to %d.\n",
+ d->dev_name, i2ob_max_sectors[unit]);
+ resetup_one_dev(&i2ob_gendisk, unit>>4);
+ return 0;
+}
+
+static void i2ob_probe(void)
+{
+ int i;
+ int unit = 0;
+ int warned = 0;
+
+ for(i=0; i< MAX_I2O_CONTROLLERS; i++)
+ {
+ struct i2o_controller *c=i2o_find_controller(i);
+ struct i2o_device *d;
+
+ if(c==NULL)
+ continue;
+
+ for(d=c->devices;d!=NULL;d=d->next)
+ {
+ if(d->class!=I2O_CLASS_RANDOM_BLOCK_STORAGE)
+ continue;
+
+ if(unit<MAX_I2OB<<4)
+ {
+ /*
+ * Get the device and fill in the
+ * Tid and controller.
+ */
+ struct i2ob_device *dev=&i2ob_dev[unit];
+ dev->i2odev = d;
+ dev->controller = c;
+ dev->tid = d->id;
+
+ /*
+ * Insure the device can be claimed
+ * before installing it.
+ */
+ if(i2ob_claim_device(dev, 1)==0)
+ {
+ printk(KERN_INFO "Claimed Dev %x Tid %d Unit %d\n",dev,dev->tid,unit);
+ i2ob_install_device(c,d,unit);
+ unit+=16;
+
+ /*
+ * Now that the device has been
+ * installed, unclaim it so that
+ * it can be claimed by either
+ * the block or scsi driver.
+ */
+ if (i2ob_claim_device(dev, 0)<0)
+ printk(KERN_INFO "Could not unclaim Dev %x Tid %d\n",dev,dev->tid);
+
+ }
+ else
+ printk(KERN_INFO "TID %d not claimed\n",dev->tid);
+ }
+ else
+ {
+ if(!warned++)
+ printk("i2o_block: too many controllers, registering only %d.\n", unit>>4);
+ }
+ }
+ }
+ i2ob_devices = unit;
+}
+
+/*
+ * Have we seen a media change ?
+ */
+
+static int i2ob_media_change(kdev_t dev)
+{
+ int i=MINOR(dev);
+ i>>=4;
+ if(i2ob_media_change_flag[i])
+ {
+ i2ob_media_change_flag[i]=0;
+ return 1;
+ }
+ return 0;
+}
+
+static int i2ob_revalidate(kdev_t dev)
+{
+ return do_i2ob_revalidate(dev, 0);
+}
+
+static int i2ob_reboot_event(struct notifier_block *n, unsigned long code, void *p)
+{
+ int i;
+
+ if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
+ return NOTIFY_DONE;
+ for(i=0;i<MAX_I2OB;i++)
+ {
+ struct i2ob_device *dev=&i2ob_dev[(i<<4)];
+
+ if(dev->refcnt!=0)
+ {
+ /*
+ * Flush the onboard cache on power down
+ * also unlock the media
+ */
+ u32 msg[5];
+ int *query_done = &dev->done_flag;
+ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_BLOCK_CFLUSH<<24|HOST_TID<<12|dev->tid;
+ msg[2] = i2ob_context|0x80000000;
+ msg[3] = (u32)query_done;
+ msg[4] = 60<<16;
+ i2o_post_wait(dev->controller, dev->tid, msg, 20, query_done,2);
+ /*
+ * Unlock the media
+ */
+ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_BLOCK_MUNLOCK<<24|HOST_TID<<12|dev->tid;
+ msg[2] = i2ob_context|0x80000000;
+ msg[3] = (u32)query_done;
+ msg[4] = -1;
+ i2o_post_wait(dev->controller, dev->tid, msg, 20, query_done,2);
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+struct notifier_block i2ob_reboot_notifier =
+{
+ i2ob_reboot_event,
+ NULL,
+ 0
+};
+
+static struct file_operations i2ob_fops =
+{
+ NULL, /* lseek - default */
+ block_read, /* read - general block-dev read */
+ block_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ i2ob_ioctl, /* ioctl */
+ NULL, /* mmap */
+ i2ob_open, /* open */
+ NULL, /* flush */
+ i2ob_release, /* release */
+ NULL, /* fsync */
+ NULL, /* fasync */
+ i2ob_media_change, /* Media Change */
+ i2ob_revalidate, /* Revalidate */
+ NULL /* File locks */
+};
+
+/*
+ * Partitioning
+ */
+
+static void i2ob_geninit(struct gendisk *gd)
+{
+}
+
+static struct gendisk i2ob_gendisk =
+{
+ MAJOR_NR,
+ "i2ohd",
+ 4,
+ 1<<4,
+ MAX_I2OB,
+ i2ob_geninit,
+ i2ob,
+ i2ob_sizes,
+ 0,
+ NULL,
+ NULL
+};
+
+/*
+ * And here should be modules and kernel interface
+ * (Just smiley confuses emacs :-)
+ */
+
+#ifdef MODULE
+#define i2ob_init init_module
+#endif
+
+int i2ob_init(void)
+{
+ int i;
+
+ printk("I2O block device OSM v0.06. (C) 1999 Red Hat Software.\n");
+
+ /*
+ * Register the block device interfaces
+ */
+
+ if (register_blkdev(MAJOR_NR, "i2o_block", &i2ob_fops)) {
+ printk("Unable to get major number %d for i2o_block\n",
+ MAJOR_NR);
+ return -EIO;
+ }
+#ifdef MODULE
+ printk("i2o_block: registered device at major %d\n", MAJOR_NR);
+#endif
+
+ /*
+ * Now fill in the boiler plate
+ */
+
+ blksize_size[MAJOR_NR] = i2ob_blksizes;
+ hardsect_size[MAJOR_NR] = i2ob_hardsizes;
+ blk_size[MAJOR_NR] = i2ob_sizes;
+ max_sectors[MAJOR_NR] = i2ob_max_sectors;
+
+ blk_dev[MAJOR_NR].request_fn = i2ob_request;
+ for (i = 0; i < MAX_I2OB << 4; i++) {
+ i2ob_dev[i].refcnt = 0;
+ i2ob_dev[i].flags = 0;
+ i2ob_dev[i].controller = NULL;
+ i2ob_dev[i].i2odev = NULL;
+ i2ob_dev[i].tid = 0;
+ i2ob_dev[i].head = NULL;
+ i2ob_dev[i].tail = NULL;
+ i2ob_blksizes[i] = 1024;
+ i2ob_max_sectors[i] = 2;
+ }
+
+ /*
+ * Register the OSM handler as we will need this to probe for
+ * drives, geometry and other goodies.
+ */
+
+ if(i2o_install_handler(&i2o_block_handler)<0)
+ {
+ unregister_blkdev(MAJOR_NR, "i2o_block");
+ printk(KERN_ERR "i2o_block: unable to register OSM.\n");
+ return -EINVAL;
+ }
+ i2ob_context = i2o_block_handler.context;
+
+ /*
+ * Finally see what is actually plugged in to our controllers
+ */
+
+ i2ob_probe();
+
+ register_reboot_notifier(&i2ob_reboot_notifier);
+ return 0;
+}
+
+#ifdef MODULE
+
+EXPORT_NO_SYMBOLS;
+MODULE_AUTHOR("Red Hat Software");
+MODULE_DESCRIPTION("I2O Block Device OSM");
+
+void cleanup_module(void)
+{
+ struct gendisk **gdp;
+
+ unregister_reboot_notifier(&i2ob_reboot_notifier);
+
+ /*
+ * Flush the OSM
+ */
+
+ i2o_remove_handler(&i2o_block_handler);
+
+ /*
+ * Return the block device
+ */
+ if (unregister_blkdev(MAJOR_NR, "i2o_block") != 0)
+ printk("i2o_block: cleanup_module failed\n");
+ else
+ printk("i2o_block: module cleaned up.\n");
+
+ /*
+ * Why isnt register/unregister gendisk in the kernel ???
+ */
+
+ for (gdp = &gendisk_head; *gdp; gdp = &((*gdp)->next))
+ if (*gdp == &i2ob_gendisk)
+ break;
+
+}
+#endif
--- /dev/null
+/*
+ * I2O Configuration Interface Driver
+ *
+ * (C) Copyright 1999 Red Hat Software
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * Modified 04/20/199 by Deepak Saxena
+ * - Added basic ioctl() support
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/i2o.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/malloc.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/spinlock.h>
+
+#include "i2o_proc.h"
+
+static int i2o_cfg_token = 0;
+static int i2o_cfg_context = -1;
+static void *page_buf;
+static void *i2o_buffer;
+static int i2o_ready;
+static int i2o_pagelen;
+static int i2o_error;
+static int cfg_inuse;
+static int i2o_eof;
+static spinlock_t i2o_config_lock = SPIN_LOCK_UNLOCKED;
+struct wait_queue *i2o_wait_queue;
+
+static int ioctl_getiops(unsigned long);
+static int ioctl_gethrt(unsigned long);
+static int ioctl_getlct(unsigned long);
+static int ioctl_parms(unsigned long, unsigned int);
+static int ioctl_html(unsigned long);
+static int ioctl_swdl(unsigned long);
+static int ioctl_swul(unsigned long);
+static int ioctl_swdel(unsigned long);
+
+/*
+ * This is the callback for any message we have posted. The message itself
+ * will be returned to the message pool when we return from the IRQ
+ *
+ * This runs in irq context so be short and sweet.
+ */
+static void i2o_cfg_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *m)
+{
+ i2o_cfg_token = I2O_POST_WAIT_OK;
+
+ return;
+}
+
+/*
+ * Each of these describes an i2o message handler. They are
+ * multiplexed by the i2o_core code
+ */
+
+struct i2o_handler cfg_handler=
+{
+ i2o_cfg_reply,
+ "Configuration",
+ 0
+};
+
+static long long cfg_llseek(struct file *file, long long offset, int origin)
+{
+ return -ESPIPE;
+}
+
+/* i2ocontroller/i2odevice/page/?data */
+
+static ssize_t cfg_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
+{
+ printk(KERN_INFO "i2o_config write not yet supported\n");
+
+ return 0;
+}
+
+/* To be written for event management support */
+static ssize_t cfg_read(struct file *file, char *buf, size_t count, loff_t *ptr)
+{
+ return 0;
+}
+
+static int cfg_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ /* Only 1 token, so lock... */
+ spin_lock(&i2o_config_lock);
+
+ switch(cmd)
+ {
+ case I2OGETIOPS:
+ ret = ioctl_getiops(arg);
+ break;
+
+ case I2OHRTGET:
+ ret = ioctl_gethrt(arg);
+ break;
+
+ case I2OLCTGET:
+ ret = ioctl_getlct(arg);
+ break;
+
+ case I2OPARMSET:
+ ret = ioctl_parms(arg, I2OPARMSET);
+ break;
+
+ case I2OPARMGET:
+ ret = ioctl_parms(arg, I2OPARMGET);
+ break;
+
+ case I2OSWDL:
+ ret = ioctl_swdl(arg);
+ break;
+
+ case I2OSWUL:
+ ret = ioctl_swul(arg);
+ break;
+
+ case I2OSWDEL:
+ ret = ioctl_swdel(arg);
+ break;
+
+ case I2OHTML:
+ ret = ioctl_html(arg);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ spin_unlock(&i2o_config_lock);
+ return ret;
+}
+
+int ioctl_getiops(unsigned long arg)
+{
+ u8 *user_iop_table = (u8*)arg;
+ struct i2o_controller *c = NULL;
+ int i;
+ u8 foo[MAX_I2O_CONTROLLERS];
+
+ if(!access_ok(VERIFY_WRITE, user_iop_table, MAX_I2O_CONTROLLERS))
+ return -EFAULT;
+
+ for(i = 0; i < MAX_I2O_CONTROLLERS; i++)
+ {
+ c = i2o_find_controller(i);
+ if(c)
+ {
+ printk(KERN_INFO "ioctl: iop%d found\n", i);
+ foo[i] = 1;
+ i2o_unlock_controller(c);
+ }
+ else
+ {
+ printk(KERN_INFO "ioctl: iop%d not found\n", i);
+ foo[i] = 0;
+ }
+ }
+
+ __copy_to_user(user_iop_table, foo, MAX_I2O_CONTROLLERS);
+ return 0;
+}
+
+int ioctl_gethrt(unsigned long arg)
+{
+ struct i2o_controller *c;
+ struct i2o_cmd_hrtlct *cmd = (struct i2o_cmd_hrtlct*)arg;
+ struct i2o_cmd_hrtlct kcmd;
+ pi2o_hrt hrt;
+ u32 msg[6];
+ u32 *workspace;
+ int len;
+ int token;
+ u32 reslen;
+ int ret = 0;
+
+ if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
+ return -EFAULT;
+
+ if(get_user(reslen, kcmd.reslen) < 0)
+ return -EFAULT;
+
+ if(kcmd.resbuf == NULL)
+ return -EFAULT;
+
+ c = i2o_find_controller(kcmd.iop);
+ if(!c)
+ return -ENXIO;
+
+ workspace = kmalloc(8192, GFP_KERNEL);
+ hrt = (pi2o_hrt)workspace;
+ if(workspace==NULL)
+ return -ENOMEM;
+
+ memset(workspace, 0, 8192);
+
+ msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
+ msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2]= (u32)cfg_handler.context;
+ msg[3]= 0;
+ msg[4]= (0xD0000000 | 8192);
+ msg[5]= virt_to_phys(workspace);
+
+ token = i2o_post_wait(c, ADAPTER_TID, msg, 6*4, &i2o_cfg_token,2);
+ if(token == I2O_POST_WAIT_TIMEOUT)
+ {
+ kfree(workspace);
+ i2o_unlock_controller(c);
+ return -ETIMEDOUT;
+ }
+ i2o_unlock_controller(c);
+
+ len = 8 + ((hrt->entry_len * hrt->num_entries) << 2);
+ /* We did a get user...so assuming mem is ok...is this bad? */
+ put_user(len, kcmd.reslen);
+ if(len > reslen)
+ ret = -ENOBUFS;
+ if(copy_to_user(kcmd.resbuf, (void*)hrt, len))
+ ret = -EINVAL;
+
+ kfree(workspace);
+ return ret;
+}
+
+int ioctl_getlct(unsigned long arg)
+{
+ struct i2o_controller *c;
+ struct i2o_cmd_hrtlct *cmd = (struct i2o_cmd_hrtlct*)arg;
+ struct i2o_cmd_hrtlct kcmd;
+ pi2o_lct lct;
+ u32 msg[9];
+ u32 *workspace;
+ int len;
+ int token;
+ int ret = 0;
+ u32 reslen;
+
+ if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
+ return -EFAULT;
+
+ if(get_user(reslen, kcmd.reslen) < 0)
+ return -EFAULT;
+
+ if(kcmd.resbuf == NULL)
+ return -EFAULT;
+
+ c = i2o_find_controller(kcmd.iop);
+ if(!c)
+ return -ENXIO;
+
+ workspace = kmalloc(8192, GFP_KERNEL);
+ lct = (pi2o_lct)workspace;
+ if(workspace==NULL)
+ return -ENOMEM;
+
+ memset(workspace, 0, 8192);
+
+ msg[0]= EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6;
+ msg[1]= I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2]= (u32)cfg_handler.context;
+ msg[3]= 0;
+ msg[4]= 0xFFFFFFFF;
+ msg[5]= 0;
+ msg[6]= (0xD0000000 | 8192);
+ msg[7]= virt_to_phys(workspace);
+
+ token = i2o_post_wait(c, ADAPTER_TID, msg, 8*4, &i2o_cfg_token,2);
+ if(token == I2O_POST_WAIT_TIMEOUT)
+ {
+ kfree(workspace);
+ i2o_unlock_controller(c);
+ return -ETIMEDOUT;
+ }
+ i2o_unlock_controller(c);
+
+ len = (unsigned int)lct->table_size << 2;
+ put_user(len, kcmd.reslen);
+ if(len > reslen)
+ ret = -ENOBUFS;
+ else if(copy_to_user(kcmd.resbuf, (void*)lct, len))
+ ret = -EINVAL;
+
+ kfree(workspace);
+ return ret;
+}
+
+static int ioctl_parms(unsigned long arg, unsigned int type)
+{
+ int ret = 0;
+ struct i2o_controller *c;
+ struct i2o_cmd_psetget *cmd = (struct i2o_cmd_psetget*)arg;
+ struct i2o_cmd_psetget kcmd;
+ u32 msg[9];
+ u32 reslen;
+ int token;
+ u8 *ops;
+ u8 *res;
+ u16 *res16;
+ u32 *res32;
+ u16 count;
+ int len;
+ int i,j;
+
+ u32 i2o_cmd = (type == I2OPARMGET ?
+ I2O_CMD_UTIL_PARAMS_GET :
+ I2O_CMD_UTIL_PARAMS_SET);
+
+ if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget)))
+ return -EFAULT;
+
+ if(get_user(reslen, kcmd.reslen))
+ return -EFAULT;
+
+ c = i2o_find_controller(kcmd.iop);
+ if(!c)
+ return -ENXIO;
+
+ ops = (u8*)kmalloc(kcmd.oplen, GFP_KERNEL);
+ if(!ops)
+ return -ENOMEM;
+
+ if(copy_from_user(ops, kcmd.opbuf, kcmd.oplen))
+ {
+ kfree(ops);
+ return -EFAULT;
+ }
+
+ /*
+ * It's possible to have a _very_ large table
+ * and that the user asks for all of it at once...
+ */
+ res = (u8*)kmalloc(65536, GFP_KERNEL);
+ if(!res)
+ {
+ kfree(ops);
+ return -ENOMEM;
+ }
+
+ res16 = (u16*)res;
+
+ msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
+ msg[1]=i2o_cmd<<24|HOST_TID<<12|cmd->tid;
+ msg[2]=(u32)cfg_handler.context;
+ msg[3]=0;
+ msg[4]=0;
+ msg[5]=0x54000000|kcmd.oplen;
+ msg[6]=virt_to_bus(ops);
+ msg[7]=0xD0000000|(65536);
+ msg[8]=virt_to_bus(res);
+
+ /*
+ * Parm set sometimes takes a little while for some reason
+ */
+ token = i2o_post_wait(c, kcmd.tid, msg, 9*4, &i2o_cfg_token,10);
+ if(token == I2O_POST_WAIT_TIMEOUT)
+ {
+ kfree(ops);
+ kfree(res);
+ return -ETIMEDOUT;
+ }
+
+ kfree(ops);
+
+ /*
+ * Determine required size...there's got to be a quicker way?
+ * Dump data to syslog for debugging failures
+ */
+ count = res16[0];
+ printk(KERN_INFO "%0#6x\n%0#6x\n", res16[0], res16[1]);
+ len = 4;
+ res16 += 2;
+ for(i = 0; i < count; i++ )
+ {
+ len += res16[0] << 2; /* BlockSize field in ResultBlock */
+ res32 = (u32*)res16;
+ for(j = 0; j < res16[0]; j++)
+ printk(KERN_INFO "%0#10x\n", res32[j]);
+ res16 += res16[0] << 1; /* Shift to next block */
+ }
+
+ put_user(len, kcmd.reslen);
+ if(len > reslen)
+ ret = -ENOBUFS;
+ else if(copy_to_user(cmd->resbuf, res, len))
+ ret = -EFAULT;
+
+ kfree(res);
+
+ return ret;
+}
+
+int ioctl_html(unsigned long arg)
+{
+ struct i2o_html *cmd = (struct i2o_html*)arg;
+ struct i2o_html kcmd;
+ struct i2o_controller *c;
+ u8 *res = NULL;
+ void *query = NULL;
+ int ret = 0;
+ int token;
+ u32 len;
+ u32 reslen;
+ u32 msg[MSG_FRAME_SIZE/4];
+
+ if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_html)))
+ {
+ printk(KERN_INFO "i2o_config: can't copy html cmd\n");
+ return -EFAULT;
+ }
+
+ if(get_user(reslen, kcmd.reslen) < 0)
+ {
+ printk(KERN_INFO "i2o_config: can't copy html reslen\n");
+ return -EFAULT;
+ }
+
+ if(!kcmd.resbuf)
+ {
+ printk(KERN_INFO "i2o_config: NULL html buffer\n");
+ return -EFAULT;
+ }
+
+ c = i2o_find_controller(kcmd.iop);
+ if(!c)
+ return -ENXIO;
+
+ if(kcmd.qlen) /* Check for post data */
+ {
+ query = kmalloc(kcmd.qlen, GFP_KERNEL);
+ if(!query)
+ return -ENOMEM;
+ if(copy_from_user(query, kcmd.qbuf, kcmd.qlen))
+ {
+ printk(KERN_INFO "i2o_config: could not get query\n");
+ kfree(query);
+ return -EFAULT;
+ }
+ }
+
+ res = kmalloc(4096, GFP_KERNEL);
+ if(!res)
+ return -ENOMEM;
+
+ msg[1] = (I2O_CMD_UTIL_CONFIG_DIALOG << 24)|HOST_TID<<12|kcmd.tid;
+ msg[2] = i2o_cfg_context;
+ msg[3] = 0;
+ msg[4] = kcmd.page;
+ msg[5] = 0xD0000000|4096;
+ msg[6] = virt_to_bus(res);
+ if(!kcmd.qlen) /* Check for post data */
+ msg[0] = SEVEN_WORD_MSG_SIZE|SGL_OFFSET_5;
+ else
+ {
+ msg[0] = NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
+ msg[5] = 0x50000000|4096;
+ msg[7] = 0xD4000000|(kcmd.qlen);
+ msg[8] = virt_to_phys(query);
+ }
+
+ token = i2o_post_wait(c, cmd->tid, msg, 9*4, &i2o_cfg_token, 10);
+ if(token == I2O_POST_WAIT_TIMEOUT)
+ {
+ kfree(res);
+ if(kcmd.qlen) kfree(query);
+
+ return -ETIMEDOUT;
+ }
+
+ len = strnlen(res, 8192);
+ put_user(len, kcmd.reslen);
+ if(len > reslen)
+ ret = -ENOMEM;
+ if(copy_to_user(kcmd.resbuf, res, len))
+ ret = -EFAULT;
+
+ kfree(res);
+ if(kcmd.qlen)
+ kfree(query);
+
+ return ret;
+}
+
+/* To be written */
+int ioctl_swdl(unsigned long arg)
+{
+ return -ENOSYS;
+}
+
+/* To be written */
+int ioctl_swul(unsigned long arg)
+{
+ return -EINVAL;
+}
+
+/* To be written */
+int ioctl_swdel(unsigned long arg)
+{
+ return 0;
+}
+
+static int cfg_open(struct inode *inode, struct file *file)
+{
+ /*
+ * Should support multiple management users
+ */
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int cfg_release(struct inode *inode, struct file *file)
+{
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+
+static struct file_operations config_fops =
+{
+ cfg_llseek,
+ cfg_read,
+ cfg_write,
+ NULL,
+ NULL /*cfg_poll*/,
+ cfg_ioctl,
+ NULL, /* No mmap */
+ cfg_open,
+ NULL, /* No flush */
+ cfg_release
+};
+
+static struct miscdevice i2o_miscdev = {
+ I2O_MINOR,
+ "i2octl",
+ &config_fops
+};
+
+#ifdef MODULE
+int init_module(void)
+#else
+int i2o_config_init(void)
+#endif
+{
+ printk(KERN_INFO "i2o configuration manager v 0.02\n");
+
+ if((page_buf = kmalloc(4096, GFP_KERNEL))==NULL)
+ {
+ printk(KERN_ERR "i2o_config: no memory for page buffer.\n");
+ return -ENOBUFS;
+ }
+ if(misc_register(&i2o_miscdev)==-1)
+ {
+ printk(KERN_ERR "i2o_config: can't register device.\n");
+ kfree(page_buf);
+ return -EBUSY;
+ }
+ /*
+ * Install our handler
+ */
+ if(i2o_install_handler(&cfg_handler)<0)
+ {
+ kfree(page_buf);
+ printk(KERN_ERR "i2o_config: handler register failed.\n");
+ misc_deregister(&i2o_miscdev);
+ return -EBUSY;
+ }
+ /*
+ * The low 16bits of the transaction context must match this
+ * for everything we post. Otherwise someone else gets our mail
+ */
+ i2o_cfg_context = cfg_handler.context;
+ return 0;
+}
+
+#ifdef MODULE
+
+void cleanup_module(void)
+{
+ misc_deregister(&i2o_miscdev);
+
+ if(page_buf)
+ kfree(page_buf);
+ if(i2o_cfg_context != -1)
+ i2o_remove_handler(&cfg_handler);
+ if(i2o_buffer)
+ kfree(i2o_buffer);
+}
+
+EXPORT_NO_SYMBOLS;
+MODULE_AUTHOR("Red Hat Software");
+MODULE_DESCRIPTION("I2O Configuration");
+
+#endif
--- /dev/null
+/*
+ * Core I2O structure managment
+ *
+ * (C) Copyright 1999 Red Hat Software
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * A lot of the I2O message side code from this is taken from the
+ * Red Creek RCPCI45 adapter driver by Red Creek Communications
+ *
+ * Some fixes and cleanup by Philipp Rumpf
+ *
+ * Additional fixes by Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/i2o.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/malloc.h>
+
+#include <asm/io.h>
+#include <asm/spinlock.h>
+
+#include "i2o_lan.h"
+
+/*
+ * Size of the I2O module table
+ */
+
+
+static struct i2o_handler *i2o_handlers[MAX_I2O_MODULES];
+static struct i2o_controller *i2o_controllers[MAX_I2O_CONTROLLERS];
+int i2o_num_controllers = 0;
+
+
+extern int i2o_online_controller(struct i2o_controller *c);
+
+/*
+ * I2O configuration spinlock. This isnt a big deal for contention
+ * so we have one only
+ */
+
+#ifdef __SMP__
+static spinlock_t i2o_configuration_lock = SPIN_LOCK_UNLOCKED;
+#endif
+
+/*
+ * Install an I2O handler - these handle the asynchronous messaging
+ * from the card once it has initialised.
+ */
+
+int i2o_install_handler(struct i2o_handler *h)
+{
+ int i;
+ spin_lock(&i2o_configuration_lock);
+ for(i=0;i<MAX_I2O_MODULES;i++)
+ {
+ if(i2o_handlers[i]==NULL)
+ {
+ h->context = i;
+ i2o_handlers[i]=h;
+ spin_unlock(&i2o_configuration_lock);
+ return 0;
+ }
+ }
+ spin_unlock(&i2o_configuration_lock);
+ return -ENOSPC;
+}
+
+int i2o_remove_handler(struct i2o_handler *h)
+{
+ i2o_handlers[h->context]=NULL;
+ return 0;
+}
+
+
+/*
+ * Each I2O controller has a chain of devices on it - these match
+ * the useful parts of the LCT of the board.
+ */
+
+int i2o_install_device(struct i2o_controller *c, struct i2o_device *d)
+{
+ spin_lock(&i2o_configuration_lock);
+ d->controller=c;
+ d->owner=NULL;
+ d->next=c->devices;
+ c->devices=d;
+ *d->dev_name = 0;
+ spin_unlock(&i2o_configuration_lock);
+ return 0;
+}
+
+/* we need this version to call out of i2o_delete_controller */
+
+int __i2o_delete_device(struct i2o_device *d)
+{
+ struct i2o_device **p;
+
+ p=&(d->controller->devices);
+
+ /*
+ * Hey we have a driver!
+ */
+
+ if(d->owner)
+ return -EBUSY;
+
+ /*
+ * Seek, locate
+ */
+
+ while(*p!=NULL)
+ {
+ if(*p==d)
+ {
+ /*
+ * Destroy
+ */
+ *p=d->next;
+ kfree(d);
+ return 0;
+ }
+ p=&((*p)->next);
+ }
+ printk(KERN_ERR "i2o_delete_device: passed invalid device.\n");
+ return -EINVAL;
+}
+
+int i2o_delete_device(struct i2o_device *d)
+{
+ int ret;
+
+ spin_lock(&i2o_configuration_lock);
+
+ ret = __i2o_delete_device(d);
+
+ spin_unlock(&i2o_configuration_lock);
+
+ return ret;
+}
+
+/*
+ * Add and remove controllers from the I2O controller list
+ */
+
+int i2o_install_controller(struct i2o_controller *c)
+{
+ int i;
+ spin_lock(&i2o_configuration_lock);
+ for(i=0;i<MAX_I2O_CONTROLLERS;i++)
+ {
+ if(i2o_controllers[i]==NULL)
+ {
+ i2o_controllers[i]=c;
+ c->next=i2o_controller_chain;
+ i2o_controller_chain=c;
+ c->unit = i;
+ sprintf(c->name, "i2o/iop%d", i);
+ i2o_num_controllers++;
+ spin_unlock(&i2o_configuration_lock);
+ return 0;
+ }
+ }
+ printk(KERN_ERR "No free i2o controller slots.\n");
+ spin_unlock(&i2o_configuration_lock);
+ return -EBUSY;
+}
+
+int i2o_delete_controller(struct i2o_controller *c)
+{
+ struct i2o_controller **p;
+
+ spin_lock(&i2o_configuration_lock);
+ if(atomic_read(&c->users))
+ {
+ spin_unlock(&i2o_configuration_lock);
+ return -EBUSY;
+ }
+ while(c->devices)
+ {
+ if(__i2o_delete_device(c->devices)<0)
+ {
+ /* Shouldnt happen */
+ spin_unlock(&i2o_configuration_lock);
+ return -EBUSY;
+ }
+ }
+ c->destructor(c);
+
+ p=&i2o_controller_chain;
+
+ while(*p)
+ {
+ if(*p==c)
+ {
+ /* Prepare for restart */
+// i2o_clear_controller(c);
+
+ *p=c->next;
+ spin_unlock(&i2o_configuration_lock);
+ if(c->page_frame);
+ kfree(c->page_frame);
+ i2o_controllers[c->unit]=NULL;
+ kfree(c);
+ i2o_num_controllers--;
+ return 0;
+ }
+ p=&((*p)->next);
+ }
+ spin_unlock(&i2o_configuration_lock);
+ printk(KERN_ERR "i2o_delete_controller: bad pointer!\n");
+ return -ENOENT;
+}
+
+void i2o_unlock_controller(struct i2o_controller *c)
+{
+ atomic_dec(&c->users);
+}
+
+struct i2o_controller *i2o_find_controller(int n)
+{
+ struct i2o_controller *c;
+
+ if(n<0 || n>=MAX_I2O_CONTROLLERS)
+ return NULL;
+
+ spin_lock(&i2o_configuration_lock);
+ c=i2o_controllers[n];
+ if(c!=NULL)
+ atomic_inc(&c->users);
+ spin_unlock(&i2o_configuration_lock);
+ return c;
+}
+
+
+/*
+ * Track if a device is being used by a driver
+ */
+
+int i2o_claim_device(struct i2o_device *d, struct i2o_driver *r)
+{
+ spin_lock(&i2o_configuration_lock);
+ if(d->owner)
+ {
+ spin_unlock(&i2o_configuration_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&d->controller->users);
+ d->owner=r;
+ spin_unlock(&i2o_configuration_lock);
+ return 0;
+}
+
+int i2o_release_device(struct i2o_device *d)
+{
+ spin_lock(&i2o_configuration_lock);
+ if(d->owner==NULL)
+ {
+ spin_unlock(&i2o_configuration_lock);
+ return -EINVAL;
+ }
+ atomic_dec(&d->controller->users);
+ d->owner=NULL;
+ spin_unlock(&i2o_configuration_lock);
+ return 0;
+}
+
+/*
+ * This is called by the bus specific driver layer when an interrupt
+ * or poll of this card interface is desired.
+ */
+
+void i2o_run_queue(struct i2o_controller *c)
+{
+ struct i2o_message *m;
+ u32 mv;
+
+ while((mv=I2O_REPLY_READ32(c))!=0xFFFFFFFF)
+ {
+ struct i2o_handler *i;
+ m=(struct i2o_message *)bus_to_virt(mv);
+ /*
+ * Temporary Debugging
+ */
+ if(((m->function_addr>>24)&0xFF)==0x15)
+ printk("UTFR!\n");
+// printk("dispatching.\n");
+ i=i2o_handlers[m->initiator_context&(MAX_I2O_MODULES-1)];
+ if(i)
+ i->reply(i,c,m);
+ else
+ printk("Spurious reply\n");
+ i2o_flush_reply(c,mv);
+ mb();
+ }
+}
+
+
+/*
+ * Do i2o class name lookup
+ */
+const char *i2o_get_class_name(int class)
+{
+ int idx = 16;
+ static char *i2o_class_name[] = {
+ "Executive",
+ "Device Driver Module",
+ "Block Device",
+ "Tape Device",
+ "LAN Inteface",
+ "WAN Interface",
+ "Fibre Channel Port",
+ "Fibre Channel Device",
+ "SCSI Device",
+ "ATE Port",
+ "ATE Device",
+ "Floppy Controller",
+ "Floppy Device",
+ "Secondary Bus Port",
+ "Peer Transport Agent",
+ "Peer Transport",
+ "Unknown"
+ };
+
+ switch(class&0xFFF)
+ {
+ case I2O_CLASS_EXECUTIVE:
+ idx = 0; break;
+ case I2O_CLASS_DDM:
+ idx = 1; break;
+ case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+ idx = 2; break;
+ case I2O_CLASS_SEQUENTIAL_STORAGE:
+ idx = 3; break;
+ case I2O_CLASS_LAN:
+ idx = 4; break;
+ case I2O_CLASS_WAN:
+ idx = 5; break;
+ case I2O_CLASS_FIBRE_CHANNEL_PORT:
+ idx = 6; break;
+ case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
+ idx = 7; break;
+ case I2O_CLASS_SCSI_PERIPHERAL:
+ idx = 8; break;
+ case I2O_CLASS_ATE_PORT:
+ idx = 9; break;
+ case I2O_CLASS_ATE_PERIPHERAL:
+ idx = 10; break;
+ case I2O_CLASS_FLOPPY_CONTROLLER:
+ idx = 11; break;
+ case I2O_CLASS_FLOPPY_DEVICE:
+ idx = 12; break;
+ case I2O_CLASS_BUS_ADAPTER_PORT:
+ idx = 13; break;
+ case I2O_CLASS_PEER_TRANSPORT_AGENT:
+ idx = 14; break;
+ case I2O_CLASS_PEER_TRANSPORT:
+ idx = 15; break;
+ }
+
+ return i2o_class_name[idx];
+}
+
+
+/*
+ * Wait up to 5 seconds for a message slot to be available.
+ */
+
+u32 i2o_wait_message(struct i2o_controller *c, char *why)
+{
+ long time=jiffies;
+ u32 m;
+ while((m=I2O_POST_READ32(c))==0xFFFFFFFF)
+ {
+ if((jiffies-time)>=5*HZ)
+ {
+ printk(KERN_ERR "%s: Timeout waiting for message to send %s.\n",
+ c->name, why);
+ return 0xFFFFFFFF;
+ }
+ schedule();
+ barrier();
+ }
+ return m;
+}
+
+
+/*
+ * Wait up to 5 seconds for a reply to be available.
+ */
+
+u32 i2o_wait_reply(struct i2o_controller *c, char *why, int timeout)
+{
+ u32 m;
+ long time=jiffies;
+
+ while((m=I2O_REPLY_READ32(c))==0xFFFFFFFF)
+ {
+ if(jiffies-time >= timeout*HZ )
+ {
+ printk(KERN_ERR "%s: timeout waiting for %s reply.\n",
+ c->name, why);
+ return 0xFFFFFFFF;
+ }
+ schedule();
+ }
+ return m;
+}
+
+
+
+/* Quiesce and clear IOP */
+int i2o_quiesce_controller(struct i2o_controller *c)
+{
+ u32 m;
+ u32 *msg;
+
+ /* now we stop receiving messages to this IOP */
+ m=i2o_wait_message(c, "Quiesce IOP");
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1]=I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2]=0;
+ msg[3]=0;
+
+ printk(KERN_DEBUG "Sending SysQuiesce to %s\n", c->name);
+ i2o_post_message(c,m);
+
+ m=i2o_wait_reply(c, "System Quiesce", 20);
+
+ if (m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+ /* Someday we should check return status... */
+
+ return 0;
+}
+
+int i2o_clear_controller(struct i2o_controller *c)
+{
+ u32 m;
+ u32 *msg;
+
+ m=i2o_wait_message(c, "IOP Clear");
+ if (m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1]=I2O_CMD_ADAPTER_CLEAR<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2]=0;
+ msg[3]=0;
+
+ printk(KERN_DEBUG "Sending IOPClear to %s\n", c->name);
+ i2o_post_message(c, m);
+
+ m=i2o_wait_reply(c, "IOP Clear timeout", 5);
+
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+
+/*
+ * i2o table walking. We just provide a single element retrieve. You can
+ * all sorts of fancy lookups in I2O but we have no performance critical
+ * lookups so why write all the code for it.
+ */
+
+#if 0
+static int i2o_query_table_polled(struct i2o_controller *c, int tid, void *buf, int buflen,
+ int group, int field, u32 *key, int keylen)
+{
+ u32 m;
+ u32 *msg;
+ u16 op[64];
+ u32 *p;
+ int i;
+ u32 *rbuf;
+
+ op[0]=1; /* One Operation */
+ op[1]=0; /* PAD */
+ op[2]=2; /* LIST_GET */
+ op[3]=group; /* group number */
+ op[4]=1; /* 1 field */
+ op[5]=field; /* Field number */
+ op[6]=1; /* Key count */
+ memcpy(op+7, key, keylen); /* Key */
+
+ m=i2o_wait_message(c, "I2O query table.");
+ if(m==0xFFFFFFFF)
+ {
+ return -ETIMEDOUT;
+ }
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ rbuf=kmalloc(buflen+32, GFP_KERNEL);
+ if(rbuf==NULL)
+ {
+ printk(KERN_ERR "No free memory for table read.\n");
+ return -ENOMEM;
+ }
+ msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
+ msg[1]=I2O_CMD_UTIL_PARAMS_GET<<24|HOST_TID<<12|tid;
+ msg[2]=0; /* Context */
+ msg[3]=0;
+ msg[4]=0;
+ msg[5]=0x54000000|(14);
+ msg[6]=virt_to_bus(op);
+ msg[7]=0xD0000000|(32+buflen);
+ msg[8]=virt_to_bus(rbuf);
+
+ i2o_post_message(c,m);
+ barrier();
+
+ /*
+ * Now wait for a reply
+ */
+
+
+ m=i2o_wait_reply(c, "Table read timeout", 5);
+
+ if(m==0xFFFFFFFF)
+ {
+ kfree(rbuf);
+ return -ETIMEDOUT;
+ }
+
+ msg = (u32 *)bus_to_virt(m);
+
+ if(msg[4]>>24)
+ {
+ i2o_report_status(KERN_WARNING, "i2o_core",
+ (msg[1]>>24)&0xFF, (msg[4]>>24)&0xFF,
+ msg[4]&0xFFFF);
+ }
+
+ p=rbuf;
+
+ /* Ok 'p' is the reply block - lets see what happened */
+ /* p0->p2 are the header */
+
+ /* FIXME: endians - turn p3 to little endian */
+
+ i=(p[0]&0xFFFF)<<2; /* Message size */
+ if(i<buflen)
+ buflen=i;
+
+ /* Do we have an error block ? */
+ if(p[0]&0xFF000000)
+ {
+ printk(KERN_ERR "%s: error in field read.\n",
+ c->name);
+ kfree(rbuf);
+ return -EBADR;
+ }
+
+ /* p[1] holds the more flag and row count - we dont care */
+
+ /* Ok it worked p[2]-> hold the data */
+ memcpy(buf, p+2, buflen);
+
+ kfree(rbuf);
+
+ /* Finally return the message */
+ I2O_REPLY_WRITE32(c,m);
+ return buflen;
+}
+#endif
+
+static int i2o_query_scalar_polled(struct i2o_controller *c, int tid, void *buf, int buflen,
+ int group, int field)
+{
+ u32 m;
+ u32 *msg;
+ u16 op[8];
+ u32 *p;
+ int i;
+ u32 *rbuf;
+
+ op[0]=1; /* One Operation */
+ op[1]=0; /* PAD */
+ op[2]=1; /* FIELD_GET */
+ op[3]=group; /* group number */
+ op[4]=1; /* 1 field */
+ op[5]=field; /* Field number */
+
+ m=i2o_wait_message(c, "I2O query scalar.");
+ if(m==0xFFFFFFFF)
+ {
+ return -ETIMEDOUT;
+ }
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ rbuf=kmalloc(buflen+32, GFP_KERNEL);
+ if(rbuf==NULL)
+ {
+ printk(KERN_ERR "No free memory for scalar read.\n");
+ return -ENOMEM;
+ }
+
+ msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
+ msg[1]=I2O_CMD_UTIL_PARAMS_GET<<24|HOST_TID<<12|tid;
+ msg[2]=0; /* Context */
+ msg[3]=0;
+ msg[4]=0;
+ msg[5]=0x54000000|12;
+ msg[6]=virt_to_bus(op);
+ msg[7]=0xD0000000|(32+buflen);
+ msg[8]=virt_to_bus(rbuf);
+
+ i2o_post_message(c,m);
+ barrier();
+
+ /*
+ * Now wait for a reply
+ */
+
+
+ m=i2o_wait_reply(c, "Scalar read timeout", 5);
+
+ if(m==0xFFFFFFFF)
+ {
+ kfree(rbuf);
+ return -ETIMEDOUT;
+ }
+
+ msg = (u32 *)bus_to_virt(m);
+ if(msg[4]>>24)
+ {
+ i2o_report_status(KERN_WARNING, "i2o_core",
+ (msg[1]>>24)&0xFF, (msg[4]>>24)&0xFF,
+ msg[4]&0xFFFF);
+ }
+
+ p=rbuf;
+
+ /* Ok 'p' is the reply block - lets see what happened */
+ /* p0->p2 are the header */
+
+ /* FIXME: endians - turn p3 to little endian */
+
+ if((p[0]&0xFFFF)!=1)
+ printk(KERN_WARNING "Suspicious field read return 0x%08X\n", p[0]);
+
+ i=(p[1]&0xFFFF)<<2; /* Message size */
+ if(i<buflen)
+ buflen=i;
+
+ /* Do we have an error block ? */
+ if(p[1]&0xFF000000)
+ {
+ printk(KERN_ERR "%s: error in field read.\n",
+ c->name);
+ kfree(rbuf);
+ return -EBADR;
+ }
+
+ /* p[1] holds the more flag and row count - we dont care */
+
+ /* Ok it worked p[2]-> hold the data */
+ memcpy(buf, p+2, buflen);
+
+ kfree(rbuf);
+
+ /* Finally return the message */
+ I2O_REPLY_WRITE32(c,m);
+ return buflen;
+}
+
+/*
+ * Dump the information block associated with a given unit (TID)
+ */
+
+void i2o_report_controller_unit(struct i2o_controller *c, int unit)
+{
+ char buf[64];
+
+ if(i2o_query_scalar_polled(c, unit, buf, 16, 0xF100, 3)>=0)
+ {
+ buf[16]=0;
+ printk(KERN_INFO " Vendor: %s\n", buf);
+ }
+ if(i2o_query_scalar_polled(c, unit, buf, 16, 0xF100, 4)>=0)
+ {
+ buf[16]=0;
+ printk(KERN_INFO " Device: %s\n", buf);
+ }
+#if 0
+ if(i2o_query_scalar_polled(c, unit, buf, 16, 0xF100, 5)>=0)
+ {
+ buf[16]=0;
+ printk(KERN_INFO "Description: %s\n", buf);
+ }
+#endif
+ if(i2o_query_scalar_polled(c, unit, buf, 8, 0xF100, 6)>=0)
+ {
+ buf[8]=0;
+ printk(KERN_INFO " Rev: %s\n", buf);
+ }
+}
+
+
+/*
+ * Parse the hardware resource table. Right now we print it out
+ * and don't do a lot with it. We should collate these and then
+ * interact with the Linux resource allocation block.
+ *
+ * Lets prove we can read it first eh ?
+ *
+ * This is full of endianisms!
+ */
+
+static int i2o_parse_hrt(struct i2o_controller *c, u8 *p)
+{
+ u32 *rows=(u32 *)p;
+ u8 *d;
+ int count;
+ int length;
+ int i;
+ int state;
+
+ if(p[3]!=0)
+ {
+ printk(KERN_ERR "i2o: HRT table for controller is too new a version.\n");
+ return -1;
+ }
+
+ count=p[0]|(p[1]<<8);
+ length = p[2];
+
+ printk(KERN_INFO "HRT has %d entries of %d bytes each.\n",
+ count, length<<2);
+
+ rows+=2;
+
+ for(i=0;i<count;i++)
+ {
+ printk(KERN_INFO "Adapter %08X: ", rows[0]);
+ p=(u8 *)(rows+1);
+ d=(u8 *)(rows+2);
+ state=p[1]<<8|p[0];
+
+ printk("TID %04X:[", state&0xFFF);
+ state>>=12;
+ if(state&(1<<0))
+ printk("H"); /* Hidden */
+ if(state&(1<<2))
+ {
+ printk("P"); /* Present */
+ if(state&(1<<1))
+ printk("C"); /* Controlled */
+ }
+ if(state>9)
+ printk("*"); /* Hard */
+
+ printk("]:");
+
+ switch(p[3]&0xFFFF)
+ {
+ case 0:
+ /* Adapter private bus - easy */
+ printk("Local bus %d: I/O at 0x%04X Mem 0x%08X",
+ p[2], d[1]<<8|d[0], *(u32 *)(d+4));
+ break;
+ case 1:
+ /* ISA bus */
+ printk("ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X",
+ p[2], d[2], d[1]<<8|d[0], *(u32 *)(d+4));
+ break;
+
+ case 2: /* EISA bus */
+ printk("EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
+ p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4));
+ break;
+
+ case 3: /* MCA bus */
+ printk("MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
+ p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4));
+ break;
+
+ case 4: /* PCI bus */
+ printk("PCI %d: Bus %d Device %d Function %d",
+ p[2], d[2], d[1], d[0]);
+ break;
+
+ case 0x80: /* Other */
+ default:
+ printk("Unsupported bus type.");
+ break;
+ }
+ printk("\n");
+ rows+=length;
+ }
+ return 0;
+}
+
+/*
+ * The logical configuration table tells us what we can talk to
+ * on the board. Most of the stuff isn't interesting to us.
+ */
+
+static int i2o_parse_lct(struct i2o_controller *c, u32 *lct)
+{
+ int i;
+ int max;
+ int tid;
+ u32 *p;
+ struct i2o_device *d;
+ char str[22];
+
+ max=lct[0]&0xFFFF;
+
+ max-=3;
+ max/=9;
+
+ printk(KERN_INFO "LCT has %d entries.\n", max);
+
+ if(max > 128)
+ {
+ printk(KERN_INFO "LCT was truncated.\n");
+ max=128;
+ }
+
+ if(lct[1]&(1<<0))
+ printk(KERN_WARNING "Configuration dialog desired.\n");
+
+ p=lct+3;
+
+ for(i=0;i<max;i++)
+ {
+ d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
+ if(d==NULL)
+ {
+ printk("i2o_core: Out of memory for LCT data.\n");
+ return -ENOMEM;
+ }
+
+ d->controller = c;
+ d->next = NULL;
+
+ d->id = tid = (p[0]>>16)&0xFFF;
+ d->class = p[3]&0xFFF;
+ d->subclass = p[4]&0xFFF;
+ d->parent = (p[5]>>12)&0xFFF;
+ d->flags = 0;
+
+ printk(KERN_INFO "TID %d.\n", tid);
+
+ i2o_report_controller_unit(c, tid);
+
+ i2o_install_device(c, d);
+
+ printk(KERN_INFO " Class: ");
+
+ sprintf(str, "%-21s", i2o_get_class_name(d->class));
+ printk("%s", str);
+
+ printk(" Subclass: 0x%03X Flags: ",
+ d->subclass);
+
+ if(p[2]&(1<<0))
+ printk("C"); // ConfigDialog requested
+ if(p[2]&(1<<1))
+ printk("M"); // Multi-user capable
+ if(!(p[2]&(1<<4)))
+ printk("P"); // Peer service enabled!
+ if(!(p[2]&(1<<5)))
+ printk("m"); // Mgmt service enabled!
+ printk("\n");
+ p+=9;
+ }
+ return 0;
+}
+
+#if 0
+/* Reset the IOP to sane state */
+/* I think we need handler for core (or executive class in I2O terms) */
+static int i2o_reset_adapter(struct i2o_controller *c)
+{
+ u32 m;
+ u8 *work8;
+ u32 *msg;
+ long time;
+
+ /* First stop extral operations */
+ m=i2o_wait_message(c, "quiesce IOP");
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1]=I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2]=0;
+ msg[3]=0;
+
+ i2o_post_message(c,m);
+
+ m=i2o_wait_reply(c, "System Quiesce timeout", 5);
+
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ /* Then reset the IOP */
+ m=i2o_wait_message(c, "reset IOP");
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ work8=(void *)kmalloc(4, GFP_KERNEL);
+ if(work8==NULL) {
+ printk(KERN_ERR "IOP reset failed - no free memory.\n");
+ return -ENOMEM;
+ }
+
+ memset(work8, 0, 4);
+
+ msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2]=0;
+ msg[3]=0;
+ msg[4]=0;
+ msg[5]=0;
+ msg[6]=virt_to_phys(work8);
+ msg[7]=0; /* 64bit host FIXME */
+
+ i2o_post_message(c,m);
+
+ /* Wait for a reply */
+ time=jiffies;
+
+ while(work8[0]==0x01) {
+ if((jiffies-time)>=5*HZ) {
+ printk(KERN_ERR "IOP reset timeout.\n");
+ kfree(work8);
+ return -ETIMEDOUT;
+ }
+ schedule();
+ barrier();
+ }
+
+ if (work8[0]==0x02)
+ printk(KERN_WARNING "IOP Reset rejected\n");
+
+ return 0;
+}
+#endif
+
+/*
+ * Bring an I2O controller into HOLD state. See the 1.5
+ * spec. Basically we go
+ *
+ * Wait for the message queue to initialise.
+ * If it didnt -> controller is dead
+ *
+ * Send a get status using the message queue
+ * Poll for a reply block 88 bytes long
+ *
+ * Send an initialise outbound queue
+ * Poll for a reply
+ *
+ * Post our blank messages to the queue FIFO
+ *
+ * Send GetHRT, Parse it
+ */
+
+int i2o_activate_controller(struct i2o_controller *c)
+{
+ long time;
+ u32 m;
+ u8 *workspace;
+ u32 *msg;
+ int i;
+
+ printk(KERN_INFO "Configuring I2O controller at 0x%08X.\n", (u32)c->mem_phys);
+
+ /* First reset the IOP to sane state */
+// i2o_reset_adapter(c)
+
+ m=i2o_wait_message(c, "initialise");
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ workspace = (void *)kmalloc(88, GFP_KERNEL);
+ if(workspace==NULL)
+ {
+ printk(KERN_ERR "IOP initialisation failed - no free memory.\n");
+ return -ENOMEM;
+ }
+
+ memset(workspace, 0, 88);
+
+ msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1]=I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2]=0;
+ msg[3]=0;
+ msg[4]=0;
+ msg[5]=0;
+ msg[6]=virt_to_phys(workspace);
+ msg[7]=0; /* 64bit host FIXME */
+ msg[8]=88;
+
+ i2o_post_message(c,m);
+
+ /*
+ * Wait for a reply
+ */
+
+ time=jiffies;
+
+ while(workspace[87]!=0xFF)
+ {
+ if((jiffies-time)>=5*HZ)
+ {
+ printk(KERN_ERR "IOP get status timeout.\n");
+ kfree(workspace);
+ return -ETIMEDOUT;
+ }
+ schedule();
+ barrier();
+ }
+
+ /*
+ * Ok the reply has arrived. Fill in the important stuff
+ */
+
+ c->status = workspace[10];
+ c->i2oversion = (workspace[9]>>4)&0xFF;
+ c->inbound_size = (workspace[12]|(workspace[13]<<8))*4; /* 32bit words */
+
+ /*
+ * If the board is running, reset it - we have no idea
+ * what kind of a mess the previous owner left it in.
+ */
+
+// if(c->status == ADAPTER_STATE_OPERATIONAL)
+// i2o_reset_device(c);
+
+
+ m=i2o_wait_message(c, "initqueue");
+ if(m==0xFFFFFFFF)
+ {
+ kfree(workspace);
+ return -ETIMEDOUT;
+ }
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ msg[0]= EIGHT_WORD_MSG_SIZE| TRL_OFFSET_6;
+ msg[1]= I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2]= 0;
+ msg[3]= 0x0106; /* Transaction context */
+ msg[4]= 4096; /* Host page frame size */
+ msg[5]= MSG_FRAME_SIZE<<16|0x80; /* Outbound msg frame size and Initcode */
+ msg[6]= 0xD0000004; /* Simple SG LE, EOB */
+ msg[7]= virt_to_phys(workspace);
+ *((u32 *)workspace)=0;
+
+ /*
+ * Post it
+ */
+
+ i2o_post_message(c,m);
+
+ barrier();
+
+ time=jiffies;
+
+ while(workspace[0]!=I2O_CMD_OUTBOUND_INIT_COMPLETE)
+ {
+ if((jiffies-time)>=5*HZ)
+ {
+ printk(KERN_ERR "IOP outbound initialise failed.\n");
+ kfree(workspace);
+ return -ETIMEDOUT;
+ }
+ schedule();
+ barrier();
+ }
+
+ kfree(workspace);
+
+ c->page_frame = kmalloc(MSG_POOL_SIZE, GFP_KERNEL);
+ if(c->page_frame==NULL)
+ {
+ printk(KERN_ERR "IOP init failed: no memory for message page.\n");
+ return -ENOMEM;
+ }
+
+ m=virt_to_phys(c->page_frame);
+
+ for(i=0; i< NMBR_MSG_FRAMES; i++)
+ {
+ I2O_REPLY_WRITE32(c,m);
+ mb();
+ m+=MSG_FRAME_SIZE;
+ }
+
+ /*
+ * The outbound queue is initialised and loaded,
+ *
+ * Now we need the Hardware Resource Table. We must ask for
+ * this next we can't issue random messages yet.
+ */
+
+
+ workspace=kmalloc(2048, GFP_KERNEL);
+ if(workspace==NULL)
+ {
+ printk(KERN_ERR "IOP init failed; no memory.\n");
+ return -ENOMEM;
+ }
+
+ m=i2o_wait_message(c, "I2O HRT timeout.");
+ if(m==0xFFFFFFFF)
+ {
+ kfree(workspace);
+ return -ETIMEDOUT;
+ }
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
+ msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2]= 0x0;
+ msg[3]= 0x0; /* Transaction context */
+ msg[4]= (0xD0000000 | 2048); /* Simple transaction , 2K */
+ msg[5]= virt_to_phys(workspace); /* Dump it here */
+ *((u32 *)workspace)=0xFFFFFFFF;
+
+ i2o_post_message(c,m);
+
+ barrier();
+
+ /*
+ * Now wait for a reply
+ */
+
+ m=i2o_wait_reply(c, "HRT table", 5);
+
+ if(m==0xFFFFFFFF)
+ {
+ kfree(workspace);
+ return -ETIMEDOUT;
+ }
+
+ msg=(u32 *)bus_to_virt(m);
+
+ if(msg[4]>>24)
+ {
+ i2o_report_status(KERN_WARNING, "i2o_core",
+ (msg[1]>>24)&0xFF, (msg[4]>>24)&0xFF,
+ msg[4]&0xFFFF);
+ }
+ I2O_REPLY_WRITE32(c,m);
+
+ i2o_parse_hrt(c, workspace);
+
+ kfree(workspace);
+
+ return i2o_online_controller(c);
+// i2o_report_controller_unit(c, ADAPTER_TID);
+}
+
+
+/*
+ * Bring a controller online. Needs completing for multiple controllers
+ */
+
+int i2o_online_controller(struct i2o_controller *c)
+{
+ u32 m;
+ u32 *msg;
+ u32 systab[32];
+ u32 privmem[2];
+ u32 privio[2];
+ u32 *workspace;
+
+ systab[0]=1;
+ systab[1]=0;
+ systab[2]=0;
+ systab[3]=0;
+ systab[4]=0; /* Organisation ID */
+ systab[5]=2; /* Ident 2 for now */
+ systab[6]=0<<24|0<<16|I2OVERSION<<12|1; /* Memory mapped, IOPState, v1.5, segment 1 */
+ systab[7]=MSG_FRAME_SIZE>>2; /* Message size */
+ systab[8]=0; /* LastChanged */
+ systab[9]=0; /* Should be IOP capabilities */
+ systab[10]=virt_to_phys(c->post_port);
+ systab[11]=0;
+
+ privmem[0]=c->priv_mem; /* Private memory space base address */
+ privmem[1]=c->priv_mem_size;
+ privio[0]=c->priv_io; /* Private I/O address */
+ privio[1]=c->priv_io_size;
+
+ m=i2o_wait_message(c, "SetSysTab");
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ /* Now we build the systab */
+ msg=(u32 *)(c->mem_offset+m);
+
+ msg[0] = NINE_WORD_MSG_SIZE|SGL_OFFSET_6;
+ msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2] = 0; /* Context not needed */
+ msg[3] = 0;
+ msg[4] = (1<<16)|(2<<12); /* Host 1 I2O 2 */
+ msg[5] = 1; /* Segment 1 */
+
+ /*
+ * Scatter Gather List
+ */
+
+ msg[6] = 0x54000000|48; /* One table for now */
+ msg[7] = virt_to_phys(systab);
+ msg[8] = 0xD4000000|48; /* One table for now */
+ msg[9] = virt_to_phys(privmem);
+/* msg[10] = virt_to_phys(privio); */
+
+ i2o_post_message(c,m);
+
+ barrier();
+
+ /*
+ * Now wait for a reply
+ */
+
+
+ m=i2o_wait_reply(c, "Systab read", 5);
+
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)bus_to_virt(m);
+
+ if(msg[4]>>24)
+ {
+ i2o_report_status(KERN_ERR, "i2o_core",
+ (msg[1]>>24)&0xFF, (msg[4]>>24)&0xFF,
+ msg[4]&0xFFFF);
+ }
+ I2O_REPLY_WRITE32(c,m);
+
+ /*
+ * Finally we go online
+ */
+
+ m=i2o_wait_message(c, "No message for SysEnable");
+
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_SYS_ENABLE<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2] = 0; /* Context not needed */
+ msg[3] = 0;
+
+ i2o_post_message(c,m);
+
+ barrier();
+
+ /*
+ * Now wait for a reply
+ */
+
+
+ m=i2o_wait_reply(c, "Enable", 240);
+
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)bus_to_virt(m);
+
+ if(msg[4]>>24)
+ {
+ i2o_report_status(KERN_ERR, "i2o_core",
+ (msg[1]>>24)&0xFF, (msg[4]>>24)&0xFF,
+ msg[4]&0xFFFF);
+ }
+ I2O_REPLY_WRITE32(c,m);
+
+ /*
+ * Grab the LCT, see what is attached
+ */
+
+ m=i2o_wait_message(c, "No message for LCT");
+
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)(c->mem_offset+m);
+
+
+ workspace = kmalloc(8192, GFP_KERNEL);
+ if(workspace==NULL)
+ {
+ msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1]= HOST_TID<<12|ADAPTER_TID; /* NOP */
+ i2o_post_message(c,m);
+ printk(KERN_ERR "No free memory for i2o controller buffer.\n");
+ return -ENOMEM;
+ }
+
+ memset(workspace, 0, 8192);
+
+ msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_6;
+ msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2] = 0; /* Context not needed */
+ msg[3] = 0;
+ msg[4] = 0xFFFFFFFF; /* All devices */
+ msg[5] = 0x00000000; /* Report now */
+ msg[6] = 0xD0000000|8192;
+ msg[7] = virt_to_bus(workspace);
+
+ i2o_post_message(c,m);
+
+ barrier();
+
+ /*
+ * Now wait for a reply
+ */
+
+ m=i2o_wait_reply(c, "LCT", 5);
+
+ if(m==0xFFFFFFFF)
+ {
+ kfree(workspace);
+ return -ETIMEDOUT;
+ }
+
+ msg=(u32 *)bus_to_virt(m);
+
+ if(msg[4]>>24)
+ {
+ i2o_report_status(KERN_ERR, "i2o_core",
+ (msg[1]>>24)&0xFF, (msg[4]>>24)&0xFF,
+ msg[4]&0xFFFF);
+ }
+
+ i2o_parse_lct(c, workspace);
+ kfree(workspace);
+
+ I2O_REPLY_WRITE32(c,m);
+
+ return 0;
+}
+
+/*
+ * Run time support routines
+ */
+
+/*
+ * Generic "post and forget" helpers. This is less efficient - we do
+ * a memcpy for example that isnt strictly needed, but for most uses
+ * this is simply not worth optimising
+ */
+
+int i2o_post_this(struct i2o_controller *c, int tid, u32 *data, int len)
+{
+ u32 m;
+ u32 *msg;
+ unsigned long t=jiffies;
+
+ do
+ {
+ mb();
+ m = I2O_POST_READ32(c);
+ }
+ while(m==0xFFFFFFFF && (jiffies-t)<HZ);
+
+
+ if(m==0xFFFFFFFF)
+ {
+ printk(KERN_ERR "i2o: controller not responding.\n");
+ return -1;
+ }
+ msg = bus_to_virt(c->mem_offset + m);
+ memcpy(msg, data, len);
+ i2o_post_message(c,m);
+ return 0;
+}
+
+/*
+ * Post a message and wait for a response flag to be set. This API will
+ * change to use wait_queue's one day
+ */
+
+int i2o_post_wait(struct i2o_controller *c, int tid, u32 *data, int len, int *flag, int timeout)
+{
+ unsigned long t=jiffies;
+
+ *flag = 0;
+
+ if(i2o_post_this(c, tid, data, len))
+ return -1;
+
+ while(!*flag && (jiffies-t)<timeout*HZ)
+ {
+ schedule();
+ mb();
+ }
+ if(*flag <= 0)
+ return -1;
+ return 0;
+}
+
+/*
+ * Issue UTIL_CLAIM messages
+ */
+
+int i2o_issue_claim(struct i2o_controller *c, int tid, int context, int onoff, int *flag)
+{
+ u32 msg[6];
+
+ msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
+ if(onoff)
+ msg[1] = I2O_CMD_UTIL_CLAIM << 24 | HOST_TID<<12 | tid;
+ else
+ msg[1] = I2O_CMD_UTIL_RELEASE << 24 | HOST_TID << 12 | tid;
+
+ /* The 0x80000000 convention for flagging is assumed by this helper */
+
+ msg[2] = 0x80000000|context;
+ msg[3] = (u32)flag;
+ msg[4] = 0x01<<24; /* Primary user */
+
+ return i2o_post_wait(c, tid, msg, 20, flag,2);
+}
+
+/*
+ * Query a scalar value
+ */
+
+int i2o_query_scalar(struct i2o_controller *c, int tid, int context,
+ int group, int field, void *buf, int buflen, int *flag)
+{
+ u16 *op;
+ u32 *bl;
+ u32 msg[9];
+
+ bl=kmalloc(buflen+64, GFP_KERNEL); /* Enough space for error replys */
+ if(bl==NULL)
+ {
+ printk(KERN_ERR "i2o: no memory for query buffer.\n");
+ return -ENOMEM;
+ }
+
+ op = (u16*)bl;
+ op[0]=1; /* One Operation */
+ op[1]=0; /* PAD */
+ op[2]=1; /* FIELD_GET */
+ op[3]=group; /* group number */
+ op[4]=1; /* field count, default = 1 */
+ op[5]=field; /* field index */
+
+ if(field == -1)
+ /* Single value or the whole group? */
+ {
+ op[4]=-1;
+ op[5]=0;
+ }
+
+ msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
+ msg[1]=I2O_CMD_UTIL_PARAMS_GET<<24|HOST_TID<<12|tid;
+ msg[2]=context|0x80000000; /* So we can pick it out */
+ msg[3]=(u32)flag;
+ msg[4]=0;
+ msg[5]=0x54000000|12;
+ msg[6]=virt_to_bus(bl);
+ /*
+ * There are 8 bytes of "overhead" required to pull in
+ * a Params ResultsList; 2 bytes for ResultCount
+ * (which should have value=1), plus 2 bytes for pad,
+ * plus 2 bytes for BlockSize, plus 1 byte BlockStatus,
+ * plus 1 byte ErrorInfoSize (8 bytes total overhead).
+ * This is followed finally by actual result value(s).
+ *
+ * Tell the IOP to return 8 + buflen bytes.
+ */
+ msg[7]=0xD0000000|(8+buflen);
+ msg[8]=virt_to_bus(bl+3);
+
+ bl[3]=0xFCFCFCFC; // Pad,ResultCount
+ bl[4]=0xFAFAFCFC; // ErrorInfoSize,BlockStatus,BlockSize
+
+ /*
+ * Post the message and await a reply
+ */
+
+ if (i2o_post_wait(c, tid, msg, sizeof(msg), flag,2) < 0)
+ {
+ kfree(bl);
+ return -1;
+ }
+
+ if(bl[4]&0x00FF00000) /* BlockStatus != SUCCESS */
+ {
+ printk(KERN_WARNING "i2o_query_scalar - Error\n"
+ "ErrorInfoSize = 0x%02x, BlockStatus = 0x%02x, "
+ "BlockSize = 0x%04x\n",
+ bl[4]>>24, (bl[4]>>16)&0xFF, bl[4]&0xFFFF);
+ kfree(bl);
+ return -1;
+ }
+ if((bl[3] & 0xFFFF) != 1)
+ {
+ printk(KERN_ERR "i2o: query ResultCount = 0x%04x\n", bl[3]&0xFFFF);
+ }
+
+ memcpy(buf, bl+5, buflen);
+ kfree(bl);
+ return 0;
+}
+
+
+#if 0
+/*
+ * Query a table field
+ * FIXME: NOT TESTED!
+ */
+int i2o_query_table(struct i2o_controller *c, int tid, int context,
+ void *buf, int buflen,
+ int table,
+ int *field, int fieldlen,
+ u32 *key, int keylen,
+ int *flag)
+{
+ static u16 op[32];
+ u32 *bl;
+ u32 msg[9];
+ int i;
+
+ bl=kmalloc(buflen+64, GFP_KERNEL);
+ if(bl==NULL)
+ {
+ printk(KERN_ERR "i2o: no memory for query buffer.\n");
+ return -ENOMEM;
+ }
+
+ op[0]=1; /* Operation count */
+ op[1]=0; /* Reserved */
+ op[2]=I2O_PARAMS_LIST_GET; /* Operation */
+ op[3]=table; /* Group */
+ /* Specific fields or the whole group? */
+ if(*field != -1)
+ { /* FIXME: Fields can be variable size */
+ op[4]=fieldlen;
+ for (i=0; i < fieldlen; i++)
+ op[4+i]=field[i];
+ }
+ else
+ {
+ op[4]=-1;
+ op[5]=0;
+ }
+
+ memcpy(bl, op, 12);
+
+ msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
+ msg[1]=I2O_CMD_UTIL_PARAMS_GET<<24|HOST_TID<<12|tid;
+ msg[2]=context|0x80000000; /* So we can pick it out */
+ msg[3]=(u32)flag;
+ msg[4]=0;
+ msg[5]=0x54000000|12;
+ msg[6]=virt_to_bus(bl);
+
+ msg[7]=0xD0000000|(buflen+48);
+ msg[8]=virt_to_bus(bl+4);
+
+ /*
+ * Post the message and await a reply
+ */
+
+ if(i2o_post_wait(c, tid, msg, sizeof(msg), flag,2)<0)
+ return -1;
+
+ if(bl[5]&0x00FF00000) /* BlockStatus != SUCCESS */
+ {
+ printk(KERN_WARNING "i2o_query_table - Error\n"
+ "ErrorInfoSize = 0x%02x, BlockStatus = 0x%02x, "
+ "BlockSize = 0x%04x\n",
+ bl[5]>>24, (bl[5]>>16)&0xFF, bl[5]&0xFFFF);
+ kfree(bl);
+ return -1;
+ }
+
+ if((bl[4]&0xFFFF)!=1)
+ printk(KERN_ERR "i2o: query ResultCount = %0#4x\n",
+ bl[4]&0xFFFF);
+
+ memcpy(buf, bl+6, buflen);
+ kfree(bl);
+ return 0;
+}
+#endif
+
+/*
+ * Set (for now) scalar value
+ *
+ * TODO: Add support for table groups
+ */
+
+int i2o_params_set(struct i2o_controller *c, int tid, int context, int table,
+ int field, void *buf, int buflen, int *flag)
+{
+ static u16 opdata[]={1,0,6,0,1,4,0};
+ u32 *bl;
+ u32 msg[9];
+
+ bl=kmalloc(buflen+64, GFP_KERNEL);
+ if(bl==NULL)
+ {
+ printk(KERN_ERR "i2o: no memory for set buffer.\n");
+ return -ENOMEM;
+ }
+
+ opdata[3]=table;
+ /* Single value or the whole group? */
+ if(field != -1) {
+ opdata[4]=1;
+ opdata[5]=field;
+ opdata[6]=*(u16 *)buf;
+ }
+ else {
+ opdata[4]=-1;
+ opdata[5]=0;
+ }
+
+ memcpy(bl, opdata, 14);
+
+ msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
+ msg[1]=I2O_CMD_UTIL_PARAMS_SET<<24|HOST_TID<<12|tid;
+ msg[2]=context|0x80000000; /* So we can pick it out */
+ msg[3]=(u32)flag;
+ msg[4]=0;
+ msg[5]=0x54000000|14;
+ msg[6]=virt_to_bus(bl);
+ msg[7]=0xD0000000|(buflen+48);
+ msg[8]=virt_to_bus(bl+4);
+
+ /* Post the message and wait for a reply */
+ if(i2o_post_wait(c, tid, msg, 36, flag, 5)<0)
+ {
+ kfree(bl);
+ return -1;
+ }
+
+ /* Perhaps we should check errors, eh? */
+ if(bl[5]&0x00FF00000) /* BlockStatus != SUCCESS */
+ {
+ printk(KERN_WARNING "i2o_params_set - Error\n"
+ "ErrorInfoSize = %0#2x, BlockStatus = %0#2x, "
+ "BlockSize = %0#4x\n",
+ bl[5]>>24, (bl[5]>>16)&0xFF, bl[5]&0xFFFF);
+ kfree(bl);
+ return -1;
+ }
+
+ if((bl[4] & 0xFFFF) != 1)
+ {
+ printk(KERN_ERR "i2o: params set ResultCount = %0#4x\n",
+ bl[4]&0xFFFF);
+ }
+
+ kfree(bl);
+ return 0;
+}
+
+
+void report_common_status(u8 req_status)
+{
+ /* the following reply status strings are common to all classes */
+
+ static char *REPLY_STATUS[] = {
+ "SUCCESS",
+ "ABORT_DIRTY",
+ "ABORT_NO_DATA_TRANSFER",
+ "ABORT_PARTIAL_TRANSFER",
+ "ERROR_DIRTY",
+ "ERROR_NO_DATA_TRANSFER",
+ "ERROR_PARTIAL_TRANSFER",
+ "PROCESS_ABORT_DIRTY",
+ "PROCESS_ABORT_NO_DATA_TRANSFER",
+ "PROCESS_ABORT_PARTIAL_TRANSFER",
+ "TRANSACTION_ERROR",
+ "PROGRESS_REPORT"
+ };
+
+ if (req_status > I2O_REPLY_STATUS_PROGRESS_REPORT)
+ printk("%0#4x / ", req_status);
+ else
+ printk("%s / ", REPLY_STATUS[req_status]);
+
+ return;
+}
+
+static void report_common_dsc(u16 detailed_status)
+{
+ /* The following detailed statuscodes are valid
+ - for executive class, utility class, DDM class and
+ - for transaction error replies
+ */
+
+ static char *COMMON_DSC[] = {
+ "SUCCESS",
+ "0x01", // not used
+ "BAD_KEY",
+ "TCL_ERROR",
+ "REPLY_BUFFER_FULL",
+ "NO_SUCH_PAGE",
+ "INSUFFICIENT_RESOURCE_SOFT",
+ "INSUFFICIENT_RESOURCE_HARD",
+ "0x08", // not used
+ "CHAIN_BUFFER_TOO_LARGE",
+ "UNSUPPORTED_FUNCTION",
+ "DEVICE_LOCKED",
+ "DEVICE_RESET",
+ "INAPPROPRIATE_FUNCTION",
+ "INVALID_INITIATOR_ADDRESS",
+ "INVALID_MESSAGE_FLAGS",
+ "INVALID_OFFSET",
+ "INVALID_PARAMETER",
+ "INVALID_REQUEST",
+ "INVALID_TARGET_ADDRESS",
+ "MESSAGE_TOO_LARGE",
+ "MESSAGE_TOO_SMALL",
+ "MISSING_PARAMETER",
+ "TIMEOUT",
+ "UNKNOWN_ERROR",
+ "UNKNOWN_FUNCTION",
+ "UNSUPPORTED_VERSION",
+ "DEVICE_BUSY",
+ "DEVICE_NOT_AVAILABLE"
+ };
+
+ if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
+ printk("%0#4x.\n", detailed_status);
+ else
+ printk("%s.\n", COMMON_DSC[detailed_status]);
+
+ return;
+}
+
+void report_lan_dsc(u16 detailed_status)
+{
+ static char *LAN_DSC[] = { // Lan detailed status code strings
+ "SUCCESS",
+ "DEVICE_FAILURE",
+ "DESTINATION_NOT_FOUND",
+ "TRANSMIT_ERROR",
+ "TRANSMIT_ABORTED",
+ "RECEIVE_ERROR",
+ "RECEIVE_ABORTED",
+ "DMA_ERROR",
+ "BAD_PACKET_DETECTED",
+ "OUT_OF_MEMORY",
+ "BUCKET_OVERRUN",
+ "IOP_INTERNAL_ERROR",
+ "CANCELED",
+ "INVALID_TRANSACTION_CONTEXT",
+ "DEST_ADDRESS_DETECTED",
+ "DEST_ADDRESS_OMITTED",
+ "PARTIAL_PACKET_RETURNED",
+ "TEMP_SUSPENDED_STATE"
+ };
+
+ if (detailed_status > I2O_LAN_DSC_TEMP_SUSPENDED_STATE)
+ printk("%0#4x.\n", detailed_status);
+ else
+ printk("%s.\n", LAN_DSC[detailed_status]);
+
+ return;
+}
+
+static void report_util_cmd(u8 cmd)
+{
+ switch (cmd) {
+ case I2O_CMD_UTIL_NOP:
+ printk("UTIL_NOP, ");
+ break;
+ case I2O_CMD_UTIL_ABORT:
+ printk("UTIL_ABORT, ");
+ break;
+ case I2O_CMD_UTIL_CLAIM:
+ printk("UTIL_CLAIM, ");
+ break;
+ case I2O_CMD_UTIL_RELEASE:
+ printk("UTIL_CLAIM_RELEASE, ");
+ break;
+ case I2O_CMD_UTIL_CONFIG_DIALOG:
+ printk("UTIL_CONFIG_DIALOG, ");
+ break;
+ case I2O_CMD_UTIL_DEVICE_RESERVE:
+ printk("UTIL_DEVICE_RESERVE, ");
+ break;
+ case I2O_CMD_UTIL_DEVICE_RELEASE:
+ printk("UTIL_DEVICE_RELEASE, ");
+ break;
+ case I2O_CMD_UTIL_ACK:
+ printk("UTIL_EVENT_ACKNOWLEDGE, ");
+ break;
+ case I2O_CMD_UTIL_EVT_REGISTER:
+ printk("UTIL_EVENT_REGISTER, ");
+ break;
+ case I2O_CMD_UTIL_LOCK:
+ printk("UTIL_LOCK, ");
+ break;
+ case I2O_CMD_UTIL_LOCK_RELEASE:
+ printk("UTIL_LOCK_RELEASE, ");
+ break;
+ case I2O_CMD_UTIL_PARAMS_GET:
+ printk("UTIL_PARAMS_GET, ");
+ break;
+ case I2O_CMD_UTIL_PARAMS_SET:
+ printk("UTIL_PARAMS_SET, ");
+ break;
+ case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
+ printk("UTIL_REPLY_FAULT_NOTIFY, ");
+ break;
+ default:
+ printk("%0#2x, ",cmd);
+ }
+
+ return;
+}
+
+
+static void report_exec_cmd(u8 cmd)
+{
+ switch (cmd) {
+ case I2O_CMD_ADAPTER_ASSIGN:
+ printk("EXEC_ADAPTER_ASSIGN, ");
+ break;
+ case I2O_CMD_ADAPTER_READ:
+ printk("EXEC_ADAPTER_READ, ");
+ break;
+ case I2O_CMD_ADAPTER_RELEASE:
+ printk("EXEC_ADAPTER_RELEASE, ");
+ break;
+ case I2O_CMD_BIOS_INFO_SET:
+ printk("EXEC_BIOS_INFO_SET, ");
+ break;
+ case I2O_CMD_BOOT_DEVICE_SET:
+ printk("EXEC_BOOT_DEVICE_SET, ");
+ break;
+ case I2O_CMD_CONFIG_VALIDATE:
+ printk("EXEC_CONFIG_VALIDATE, ");
+ break;
+ case I2O_CMD_CONN_SETUP:
+ printk("EXEC_CONN_SETUP, ");
+ break;
+ case I2O_CMD_DDM_DESTROY:
+ printk("EXEC_DDM_DESTROY, ");
+ break;
+ case I2O_CMD_DDM_ENABLE:
+ printk("EXEC_DDM_ENABLE, ");
+ break;
+ case I2O_CMD_DDM_QUIESCE:
+ printk("EXEC_DDM_QUIESCE, ");
+ break;
+ case I2O_CMD_DDM_RESET:
+ printk("EXEC_DDM_RESET, ");
+ break;
+ case I2O_CMD_DDM_SUSPEND:
+ printk("EXEC_DDM_SUSPEND, ");
+ break;
+ case I2O_CMD_DEVICE_ASSIGN:
+ printk("EXEC_DEVICE_ASSIGN, ");
+ break;
+ case I2O_CMD_DEVICE_RELEASE:
+ printk("EXEC_DEVICE_RELEASE, ");
+ break;
+ case I2O_CMD_HRT_GET:
+ printk("EXEC_HRT_GET, ");
+ break;
+ case I2O_CMD_ADAPTER_CLEAR:
+ printk("EXEC_IOP_CLEAR, ");
+ break;
+ case I2O_CMD_ADAPTER_CONNECT:
+ printk("EXEC_IOP_CONNECT, ");
+ break;
+ case I2O_CMD_ADAPTER_RESET:
+ printk("EXEC_IOP_RESET, ");
+ break;
+ case I2O_CMD_LCT_NOTIFY:
+ printk("EXEC_LCT_NOTIFY, ");
+ break;
+ case I2O_CMD_OUTBOUND_INIT:
+ printk("EXEC_OUTBOUND_INIT, ");
+ break;
+ case I2O_CMD_PATH_ENABLE:
+ printk("EXEC_PATH_ENABLE, ");
+ break;
+ case I2O_CMD_PATH_QUIESCE:
+ printk("EXEC_PATH_QUIESCE, ");
+ break;
+ case I2O_CMD_PATH_RESET:
+ printk("EXEC_PATH_RESET, ");
+ break;
+ case I2O_CMD_STATIC_MF_CREATE:
+ printk("EXEC_STATIC_MF_CREATE, ");
+ break;
+ case I2O_CMD_STATIC_MF_RELEASE:
+ printk("EXEC_STATIC_MF_RELEASE, ");
+ break;
+ case I2O_CMD_STATUS_GET:
+ printk("EXEC_STATUS_GET, ");
+ break;
+ case I2O_CMD_SW_DOWNLOAD:
+ printk("EXEC_SW_DOWNLOAD, ");
+ break;
+ case I2O_CMD_SW_UPLOAD:
+ printk("EXEC_SW_UPLOAD, ");
+ break;
+ case I2O_CMD_SW_REMOVE:
+ printk("EXEC_SW_REMOVE, ");
+ break;
+ case I2O_CMD_SYS_ENABLE:
+ printk("EXEC_SYS_ENABLE, ");
+ break;
+ case I2O_CMD_SYS_MODIFY:
+ printk("EXEC_SYS_MODIFY, ");
+ break;
+ case I2O_CMD_SYS_QUIESCE:
+ printk("EXEC_SYS_QUIESCE, ");
+ break;
+ case I2O_CMD_SYS_TAB_SET:
+ printk("EXEC_SYS_TAB_SET, ");
+ break;
+ default:
+ printk("%02x, ",cmd);
+ }
+
+ return;
+}
+
+static void report_lan_cmd(u8 cmd)
+{
+ switch (cmd) {
+ case LAN_PACKET_SEND:
+ printk("LAN_PACKET_SEND, ");
+ break;
+ case LAN_SDU_SEND:
+ printk("LAN_SDU_SEND, ");
+ break;
+ case LAN_RECEIVE_POST:
+ printk("LAN_RECEIVE_POST, ");
+ break;
+ case LAN_RESET:
+ printk("LAN_RESET, ");
+ break;
+ case LAN_SUSPEND:
+ printk("LAN_SUSPEND, ");
+ break;
+ default:
+ printk("%02x, ",cmd);
+ }
+
+ return;
+}
+
+/* TODO: Add support for other classes */
+void i2o_report_status(const char *severity, const char *module, u8 cmd,
+ u8 req_status, u16 detailed_status)
+{
+ printk("%s", severity);
+ printk("%s: ", module);
+
+ if (cmd < 0x1F) { // Utility Class
+ report_util_cmd(cmd);
+ report_common_status(req_status);
+ report_common_dsc(detailed_status);
+ return;
+ }
+
+ if (cmd >= 0x30 && cmd <= 0x3F) { // LAN class
+ report_lan_cmd(cmd);
+ report_common_status(req_status);
+ report_lan_dsc(detailed_status);
+ return;
+ }
+
+ if (cmd >= 0xA0 && cmd <= 0xEF) { // Executive class
+ report_exec_cmd(cmd);
+ report_common_status(req_status);
+ report_common_dsc(detailed_status);
+ return;
+ }
+
+ printk("%02x, %02x / %04x.\n", cmd, req_status, detailed_status);
+ return;
+}
+
+
+EXPORT_SYMBOL(i2o_install_handler);
+EXPORT_SYMBOL(i2o_remove_handler);
+EXPORT_SYMBOL(i2o_install_device);
+EXPORT_SYMBOL(i2o_delete_device);
+EXPORT_SYMBOL(i2o_quiesce_controller);
+EXPORT_SYMBOL(i2o_clear_controller);
+EXPORT_SYMBOL(i2o_install_controller);
+EXPORT_SYMBOL(i2o_delete_controller);
+EXPORT_SYMBOL(i2o_unlock_controller);
+EXPORT_SYMBOL(i2o_find_controller);
+EXPORT_SYMBOL(i2o_num_controllers);
+EXPORT_SYMBOL(i2o_claim_device);
+EXPORT_SYMBOL(i2o_release_device);
+EXPORT_SYMBOL(i2o_run_queue);
+EXPORT_SYMBOL(i2o_report_controller_unit);
+EXPORT_SYMBOL(i2o_activate_controller);
+EXPORT_SYMBOL(i2o_online_controller);
+EXPORT_SYMBOL(i2o_get_class_name);
+
+EXPORT_SYMBOL(i2o_query_scalar);
+EXPORT_SYMBOL(i2o_params_set);
+EXPORT_SYMBOL(i2o_post_this);
+EXPORT_SYMBOL(i2o_post_wait);
+EXPORT_SYMBOL(i2o_issue_claim);
+
+EXPORT_SYMBOL(i2o_report_status);
+EXPORT_SYMBOL(report_common_status);
+EXPORT_SYMBOL(report_lan_dsc);
+
+EXPORT_SYMBOL(i2o_wait_message);
+
+MODULE_AUTHOR("Red Hat Software");
+MODULE_DESCRIPTION("I2O Core");
--- /dev/null
+/*
+ * linux/drivers/i2o/i2o_lan.c
+ *
+ * I2O LAN CLASS OSM Prototyping, May 7th 1999
+ *
+ * (C) Copyright 1999 University of Helsinki,
+ * Department of Computer Science
+ *
+ * This code is still under development / test.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Author: Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
+ *
+ * Tested: in FDDI environment (using SysKonnect's DDM)
+ * in ETH environment (using Intel 82558 DDM proto)
+ *
+ * TODO: batch mode networking
+ * - this one assumes that we always get one packet in a bucket
+ * - we've not been able to test batch replies and batch receives
+ * error checking / timeouts
+ * - code/test for other LAN classes
+ */
+
+#include <linux/module.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/malloc.h>
+#include <linux/trdevice.h>
+#include <asm/io.h>
+
+#include <linux/errno.h>
+
+#include <linux/i2o.h>
+#include "i2o_lan.h"
+
+//#define DRIVERDEBUG
+#ifdef DRIVERDEBUG
+#define dprintk(s, args...) printk(s, ## args)
+#else
+#define dprintk(s, args...)
+#endif
+
+#define MAX_LAN_CARDS 4
+static struct device *i2o_landevs[MAX_LAN_CARDS+1];
+static int unit = -1; /* device unit number */
+
+struct i2o_lan_local {
+ u8 unit;
+ struct i2o_device *i2o_dev;
+ int reply_flag; // needed by scalar/table queries
+ struct fddi_statistics stats;
+/* first fields are same as in struct net_device_stats stats; */
+ unsigned short (*type_trans)(struct sk_buff *, struct device *);
+};
+
+/* function prototypes */
+static int i2o_lan_receive_post(struct device *dev);
+static int i2o_lan_receive_post_reply(struct device *dev, struct i2o_message *m);
+
+
+static void i2o_lan_reply(struct i2o_handler *h, struct i2o_controller *iop,
+ struct i2o_message *m)
+{
+ u32 *msg = (u32 *)m;
+ u8 unit = (u8)(msg[2]>>16); // InitiatorContext
+ struct device *dev = i2o_landevs[unit];
+
+#ifdef DRIVERDEBUG
+ i2o_report_status(KERN_INFO, "i2o_lan", msg[1]>>24, msg[4]>>24,
+ msg[4]&0xFFFF);
+#endif
+ if (msg[0] & (1<<13)) // Fail bit is set
+ {
+ printk(KERN_INFO "IOP failed to process the msg\n");
+ printk("From tid=%d to tid=%d",(msg[1]>>12)&0xFFF,msg[1]&0xFFF);
+ return;
+ }
+
+ switch (msg[1] >> 24) {
+ case LAN_RECEIVE_POST:
+ if (dev->start)
+ i2o_lan_receive_post_reply(dev,m);
+ else {
+ // we are getting unused buckets back
+ u8 trl_count = msg[3] & 0x000000FF;
+ struct i2o_bucket_descriptor *bucket =
+ (struct i2o_bucket_descriptor *)&msg[6];
+ struct sk_buff *skb;
+ do {
+ dprintk("Releasing unused bucket\n");
+ skb = (struct sk_buff *)bucket->context;
+ dev_kfree_skb(skb);
+ bucket++;
+ } while (--trl_count);
+ }
+ break;
+
+ case LAN_PACKET_SEND:
+ case LAN_SDU_SEND:
+ {
+ u8 trl_count = msg[3] & 0x000000FF;
+
+ if (msg[4] >> 24) // ReqStatus != SUCCESS
+ {
+ printk(KERN_WARNING "%s: ",dev->name);
+ report_common_status(msg[4]>>24);
+ report_lan_dsc(msg[4]&0xFFFF);
+ }
+
+ do { // The HDM has handled the outgoing packet
+ dev_kfree_skb((struct sk_buff *)msg[4 + trl_count]);
+ dprintk(KERN_INFO "%s: Request skb freed (trl_count=%d).\n",
+ dev->name,trl_count);
+ } while (--trl_count);
+
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* inform upper layers */
+ }
+ break;
+
+ default:
+ if (msg[2] & 0x80000000) // reply to a util get/set
+ { // flag for the i2o_post_wait
+ int *flag = (int *)msg[3];
+ // ReqStatus != I2O_REPLY_STATUS_SUCCESS
+ *flag = (msg[4] >> 24) ? I2O_POST_WAIT_TIMEOUT
+ : I2O_POST_WAIT_OK ;
+ }
+ }
+}
+
+static struct i2o_handler i2o_lan_handler =
+{
+ i2o_lan_reply,
+ "I2O Lan OSM",
+ 0 // context
+};
+static int lan_context;
+
+
+static int i2o_lan_receive_post_reply(struct device *dev, struct i2o_message *m)
+{
+ u32 *msg = (u32 *)m;
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_bucket_descriptor *bucket = (struct i2o_bucket_descriptor *)&msg[6];
+ struct i2o_packet_info *packet;
+
+ u8 trl_count = msg[3] & 0x000000FF;
+ struct sk_buff *skb;
+
+#ifdef 0
+ dprintk(KERN_INFO "TrlFlags = 0x%02X, TrlElementSize = %d, TrlCount = %d\n"
+ "msgsize = %d, buckets_remaining = %d\n",
+ msg[3]>>24, msg[3]&0x0000FF00, trl_count, msg[0]>>16, msg[5]);
+#endif
+
+/*
+ * NOTE: here we assume that also in batch mode we will get only
+ * one packet per bucket. This can be ensured by setting the
+ * PacketOrphanLimit to MaxPacketSize, as well as the bucket size.
+ */
+ do {
+ /* packet is not at all needed here */
+ packet = (struct i2o_packet_info *)bucket->packet_info;
+#ifdef 0
+ dprintk(KERN_INFO "flags = 0x%02X, offset = 0x%06X, status = 0x%02X, length = %d\n",
+ packet->flags, packet->offset, packet->status, packet->len);
+#endif
+ skb = (struct sk_buff *)(bucket->context);
+ skb_put(skb,packet->len);
+ skb->dev = dev;
+ skb->protocol = priv->type_trans(skb, dev);
+ netif_rx(skb);
+
+ dprintk(KERN_INFO "%s: Incoming packet (%d bytes) delivered "
+ "to upper level.\n",dev->name,packet->len);
+
+ bucket++; // to next Packet Descriptor Block
+
+ } while (--trl_count);
+
+ if (msg[5] <= I2O_BUCKET_THRESH) // BucketsRemaining
+ i2o_lan_receive_post(dev);
+
+ return 0;
+}
+
+/* ====================================================
+ * Interface to i2o: functions to send lan class request
+ */
+
+/*
+ * i2o_lan_receive_post(): Post buckets to receive packets.
+ */
+static int i2o_lan_receive_post(struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ struct sk_buff *skb;
+ u32 m; u32 *msg;
+
+ u32 bucket_len = (dev->mtu + dev->hard_header_len);
+ u32 bucket_count;
+ int n_elems = (iop->inbound_size - 16 ) / 12; // msg header + SGLs
+ u32 total = 0;
+ int i;
+
+ dprintk(KERN_INFO "%s: Allocating %d buckets (size %d).\n",
+ dev->name, I2O_BUCKET_COUNT, bucket_len);
+
+ while (total < I2O_BUCKET_COUNT)
+ {
+ m = I2O_POST_READ32(iop);
+ if (m == 0xFFFFFFFF)
+ return -ETIMEDOUT;
+ msg = bus_to_virt(iop->mem_offset + m);
+
+ bucket_count = (total + n_elems < I2O_BUCKET_COUNT)
+ ? n_elems
+ : I2O_BUCKET_COUNT - total;
+
+ msg[0] = I2O_MESSAGE_SIZE(4 + 3 * bucket_count) | 1<<12 | SGL_OFFSET_4;
+ msg[1] = LAN_RECEIVE_POST<<24 | HOST_TID<<12 | i2o_dev->id;
+ msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
+ msg[3] = bucket_count; // BucketCount
+
+ for (i = 0; i < bucket_count; i++)
+ {
+ skb = dev_alloc_skb(bucket_len + 2);
+ if (skb == NULL)
+ return -ENOMEM;
+ skb_reserve(skb, 2);
+ msg[4 + 3*i] = 0x51000000 | bucket_len;
+ msg[5 + 3*i] = (u32)skb;
+ msg[6 + 3*i] = virt_to_bus(skb->data);
+ }
+ msg[4 + 3*i - 3] |= 0x80000000; // set LE flag
+ i2o_post_message(iop,m);
+
+ dprintk(KERN_INFO "%s: Sending %d buckets (size %d) to LAN HDM.\n",
+ dev->name,bucket_count,bucket_len);
+
+ total += bucket_count;
+ }
+ return 0;
+}
+
+/*
+ * i2o_lan_reset(): Reset the LAN adapter into the operational state and
+ * restore it to full operation.
+ */
+static int i2o_lan_reset(struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u32 m; u32 *msg;
+
+ m = I2O_POST_READ32(iop);
+ if (m == 0xFFFFFFFF)
+ return -ETIMEDOUT;
+ msg = bus_to_virt(iop->mem_offset + m);
+
+ msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
+ msg[1] = LAN_RESET<<24 | HOST_TID<<12 | i2o_dev->id;
+ msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
+ msg[3] = 0; // TransactionContext
+ msg[4] = 1 << 16; // return posted buckets
+
+ i2o_post_message(iop,m);
+
+ return 0;
+}
+
+/*
+ * i2o_lan_suspend(): Put LAN adapter into a safe, non-active state.
+ * Reply to any LAN class message with status error_no_data_transfer
+ * / suspended.
+ */
+static int i2o_lan_suspend(struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u32 m; u32 *msg;
+
+ m = I2O_POST_READ32(iop);
+ if (m == 0xFFFFFFFF)
+ return -ETIMEDOUT;
+ msg = bus_to_virt(iop->mem_offset + m);
+
+ msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
+ msg[1] = LAN_SUSPEND<<24 | HOST_TID<<12 | i2o_dev->id;
+ msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
+ msg[3] = 0; // TransactionContext
+ msg[4] = 1 << 16; // return posted buckets
+
+ i2o_post_message(iop,m);
+
+ return 0;
+}
+
+/*
+ * Set DDM into batch mode.
+ */
+static void i2o_set_batch_mode(struct device *dev)
+{
+
+/*
+ * NOTE: we have not been able to test batch mode
+ * since HDMs we have, don't implement it
+ */
+
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u32 val;
+
+ /* set LAN_BATCH_CONTROL attributes */
+
+ // enable batch mode, toggle automatically
+ val = 0x00000000;
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0003, 0,
+ &val, 4, &priv->reply_flag) <0)
+ printk(KERN_WARNING "Unable to enter I2O LAN batch mode.\n");
+ else
+ dprintk(KERN_INFO "%s: I2O LAN batch mode enabled.\n",dev->name);
+
+ /*
+ * When PacketOrphanlimit is same as the maximum packet length,
+ * the packets will never be split into two separate buckets
+ */
+
+ /* set LAN_OPERATION attributes */
+
+ val = dev->mtu + dev->hard_header_len; // PacketOrphanLimit
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0004, 2,
+ &val, 4, &priv->reply_flag) < 0)
+ printk(KERN_WARNING "i2o_lan: Unable to set PacketOrphanLimit.\n");
+ else
+ dprintk(KERN_INFO "PacketOrphanLimit set to %d\n",val);
+
+#ifdef 0
+/*
+ * I2O spec 2.0: there should be proper default values for other attributes
+ * used in batch mode.
+ */
+
+ /* set LAN_RECEIVE_INFO attributes */
+
+ val = 10; // RxMaxBucketsReply
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0008, 3,
+ &val, 4, &priv->reply_flag) < 0)
+ printk(KERN_WARNING "%s: Unable to set RxMaxBucketsReply.\n",
+ dev->name);
+
+ val = 10; // RxMaxPacketsBuckets
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0008, 4,
+ &val, 4, &priv->reply_flag) < 0)
+ printk(KERN_WARNING "%s: Unable to set RxMaxPacketsBucket.\n",
+ dev->name);
+
+ /* set LAN_BATCH_CONTROL attributes */
+
+ val = 10; // MaxRxBatchCount
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0003, 5,
+ &val, 4, &priv->reply_flag) < 0)
+ printk(KERN_WARNING "%s: Unable to set MaxRxBatchCount.\n",
+ dev->name);
+
+ val = 10; // MaxTxBatchCount
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0003, 8,
+ &val, 4, &priv->reply_flag) < 0)
+ printk(KERN_WARNING "%s Unable to set MaxTxBatchCount.\n",
+ dev->name);
+#endif
+
+ return;
+}
+
+/*
+ * i2o_lan_open(): Open the device to send/receive packets via
+ * the network device.
+ */
+static int i2o_lan_open(struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+
+ i2o_lan_reset(dev);
+
+ if (i2o_issue_claim(iop, i2o_dev->id, lan_context, 1,
+ &priv->reply_flag) < 0)
+ {
+ printk(KERN_WARNING "%s: Unable to claim the I2O LAN device.\n", dev->name);
+ return -EAGAIN;
+ }
+ dprintk(KERN_INFO "%s: I2O LAN device claimed (tid=%d).\n", dev->name, i2o_dev->id);
+
+ dev->tbusy = 0;
+ dev->start = 1;
+
+ i2o_set_batch_mode(dev);
+ i2o_lan_receive_post(dev);
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+/*
+ * i2o_lan_close(): End the transfering.
+ */
+static int i2o_lan_close(struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ if (i2o_issue_claim(iop, i2o_dev->id, lan_context, 0,
+ &priv->reply_flag) < 0)
+ {
+ printk(KERN_WARNING "%s: Unable to unclaim I2O LAN device (tid=%d)\n",
+ dev->name, i2o_dev->id);
+ }
+
+ i2o_lan_suspend(dev);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/*
+ * i2o_lan_sdu_send(): Send a packet, MAC header added by the HDM.
+ * Must be supported by Fibre Channel, optional for Ethernet/802.3,
+ * Token Ring, FDDI
+ */
+static int i2o_lan_sdu_send(struct sk_buff *skb, struct device *dev)
+{
+#ifdef 0
+/* not yet tested */
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u32 m; u32 *msg;
+
+ dprintk(KERN_INFO "LanSDUSend called, skb->len = %d\n", skb->len);
+
+ m = *iop->post_port;
+ if (m == 0xFFFFFFFF)
+ {
+ dev_kfree_skb(skb);
+ return -1;
+ }
+ msg = bus_to_virt(iop->mem_offset + m);
+
+ msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_4;
+ msg[1] = LAN_SDU_SEND<<24 | HOST_TID<<12 | i2o_dev->id;
+ msg[2] = priv->unit << 16 | lan_context; // IntiatorContext
+ msg[3] = 1<<4; // TransmitControlWord: suppress CRC generation
+
+ // create a simple SGL, see fig. 3-26
+ // D7 = 1101 0111 = LE eob 0 1 LA dir bc1 bc0
+
+ msg[4] = 0xD7000000 | (skb->len); // no MAC hdr included
+ msg[5] = (u32)skb; // TransactionContext
+ memcpy(&msg[6], skb->data, 8); // Destination MAC Addr ??
+ msg[7] &= 0x0000FFFF; // followed by two bytes zeros
+ msg[8] = virt_to_bus(skb->data);
+ dev->trans_start = jiffies;
+ i2o_post_message(iop,m);
+
+ dprintk(KERN_INFO "%s: Packet (%d bytes) sent to network.\n",
+ dev->name,skb->len);
+#endif
+ return 0;
+}
+
+/*
+ * i2o_lan_packet_send(): Send a packet as is, including the MAC header.
+ *
+ * Must be supported by Ethernet/802.3, Token Ring, FDDI, optional for
+ * Fibre Channel
+ */
+static int i2o_lan_packet_send(struct sk_buff *skb, struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u32 m; u32 *msg;
+
+ m = *iop->post_port;
+ if (m == 0xFFFFFFFF) {
+ dev_kfree_skb(skb);
+ return -1;
+ }
+
+ msg = bus_to_virt(iop->mem_offset + m);
+
+ msg[0] = SEVEN_WORD_MSG_SIZE | 1<<12 | SGL_OFFSET_4;
+ msg[1] = LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->id;
+ msg[2] = priv->unit << 16 | lan_context; // IntiatorContext
+ msg[3] = 1 << 4; // TransmitControlWord
+
+ // create a simple SGL, see fig. 3-26
+ // D5 = 1101 0101 = LE eob 0 1 LA dir bc1 bc0
+
+ msg[4] = 0xD5000000 | skb->len; // MAC hdr included
+ msg[5] = (u32)skb; // TransactionContext
+ msg[6] = virt_to_bus(skb->data);
+
+ i2o_post_message(iop,m);
+
+ dprintk(KERN_INFO "%s: Packet (%d bytes) sent to network.\n",
+ dev->name, skb->len);
+
+ return 0;
+}
+
+/*
+ * net_device_stats(): Return statistical information.
+ */
+static struct net_device_stats *i2o_lan_get_stats(struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u64 val[16];
+
+ /* query LAN_HISTORICAL_STATS scalar parameter group 0x0100 */
+
+ i2o_query_scalar(iop, i2o_dev->id, lan_context, 0x0100, -1,
+ &val, 16*8, &priv->reply_flag);
+ priv->stats.tx_packets = val[0];
+ priv->stats.tx_bytes = val[1];
+ priv->stats.rx_packets = val[2];
+ priv->stats.rx_bytes = val[3];
+ priv->stats.tx_errors = val[4];
+ priv->stats.rx_errors = val[5];
+ priv->stats.rx_dropped = val[6];
+
+ // other net_device_stats and FDDI class specific fields follow ...
+
+ return (struct net_device_stats *)&priv->stats;
+}
+
+/*
+ * i2o_lan_set_multicast_list(): Enable a network device to receive packets
+ * not send to the protocol address.
+ */
+static void i2o_lan_set_multicast_list(struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u32 filter_mask;
+
+ dprintk(KERN_INFO "Entered i2o_lan_set_multicast_list().\n");
+
+return;
+
+/*
+ * FIXME: For some reason this kills interrupt handler in i2o_post_wait :-(
+ *
+ */
+ dprintk(KERN_INFO "dev->flags = 0x%08X, dev->mc_count = 0x%08X\n",
+ dev->flags,dev->mc_count);
+
+ if (i2o_query_scalar(iop, i2o_dev->id, lan_context, 0x0001, 3,
+ &filter_mask, 4, &priv->reply_flag) < 0 )
+ printk(KERN_WARNING "i2o_lan: Unable to query filter mask.\n");
+
+ dprintk(KERN_INFO "filter_mask = 0x%08X\n",filter_mask);
+
+ if (dev->flags & IFF_PROMISC)
+ {
+ // Enable promiscuous mode
+
+ filter_mask |= 0x00000002;
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0001, 3,
+ &filter_mask, 4, &priv->reply_flag) <0)
+ printk(KERN_WARNING "i2o_lan: Unable to enable promiscuous multicast mode.\n");
+ else
+ dprintk(KERN_INFO "i2o_lan: Promiscuous multicast mode enabled.\n");
+
+ return;
+ }
+
+// if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS)
+// {
+// // Disable promiscuous mode, use normal mode.
+// hardware_set_filter(NULL);
+//
+// dprintk(KERN_INFO "i2o_lan: Disabled promiscuous mode, uses normal mode\n");
+//
+// filter_mask = 0x00000000;
+// i2o_params_set(iop, i2o_dev->id, lan_context, 0x0001, 3,
+// &filter_mask, 4, &priv->reply_flag);
+//
+// return;
+// }
+
+ if (dev->mc_count)
+ {
+ // Walk the address list, and load the filter
+// hardware_set_filter(dev->mc_list);
+
+ filter_mask = 0x00000004;
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0001, 3,
+ &filter_mask, 4, &priv->reply_flag) <0)
+ printk(KERN_WARNING "i2o_lan: Unable to enable Promiscuous multicast mode.\n");
+ else
+ dprintk(KERN_INFO "i2o_lan: Promiscuous multicast mode enabled.\n");
+
+ return;
+ }
+
+ // Unicast
+
+ filter_mask |= 0x00000300; // Broadcast, Multicast disabled
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0001, 3,
+ &filter_mask, 4, &priv->reply_flag) <0)
+ printk(KERN_WARNING "i2o_lan: Unable to enable unicast mode.\n");
+ else
+ dprintk(KERN_INFO "i2o_lan: Unicast mode enabled.\n");
+
+ return;
+}
+
+struct device *i2o_lan_register_device(struct i2o_device *i2o_dev)
+{
+ struct device *dev = NULL;
+ struct i2o_lan_local *priv = NULL;
+ u8 hw_addr[8];
+ unsigned short (*type_trans)(struct sk_buff *, struct device *);
+
+ switch (i2o_dev->subclass)
+ {
+ case I2O_LAN_ETHERNET:
+ /* Note: init_etherdev calls
+ ether_setup() and register_netdevice()
+ and allocates the priv structure */
+
+ dev = init_etherdev(NULL, sizeof(struct i2o_lan_local));
+ if (dev == NULL)
+ return NULL;
+ type_trans = eth_type_trans;
+ break;
+
+/*
+#ifdef CONFIG_ANYLAN
+ case I2O_LAN_100VG:
+ printk(KERN_WARNING "i2o_lan: 100base VG not yet supported\n");
+ break;
+#endif
+*/
+
+#ifdef CONFIG_TR
+ case I2O_LAN_TR:
+ dev = init_trdev(NULL, sizeof(struct i2o_lan_local));
+ if(dev==NULL)
+ return NULL;
+ type_trans = tr_type_trans;
+ break;
+#endif
+
+#ifdef CONFIG_FDDI
+ case I2O_LAN_FDDI:
+ {
+ int size = sizeof(struct device) + sizeof(struct i2o_lan_local)
+ + sizeof("fddi%d ");
+
+ dev = (struct device *) kmalloc(size, GFP_KERNEL);
+ memset((char *)dev, 0, size);
+ dev->priv = (void *)(dev + 1);
+ dev->name = (char *)(dev + 1) + sizeof(struct i2o_lan_local);
+
+ if (dev_alloc_name(dev,"fddi%d") < 0)
+ {
+ printk(KERN_WARNING "i2o_lan: Too many FDDI devices.\n");
+ kfree(dev);
+ return NULL;
+ }
+ type_trans = fddi_type_trans;
+
+ fddi_setup(dev);
+ register_netdev(dev);
+ }
+ break;
+#endif
+
+/*
+#ifdef CONFIG_FIBRE_CHANNEL
+ case I2O_LAN_FIBRE_CHANNEL:
+ printk(KERN_WARNING "i2o_lan: Fibre Channel not yet supported\n");
+ break;
+#endif
+*/
+ case I2O_LAN_UNKNOWN:
+ default:
+ printk(KERN_WARNING "i2o_lan: LAN type 0x%08X not supported\n",
+ i2o_dev->subclass);
+ return NULL;
+ }
+
+ priv = (struct i2o_lan_local *)dev->priv;
+ priv->i2o_dev = i2o_dev;
+ priv->type_trans = type_trans;
+
+ if (i2o_query_scalar(i2o_dev->controller, i2o_dev->id, lan_context,
+ 0x0001, 0, &hw_addr, 8, &priv->reply_flag) < 0)
+ {
+ printk("%s: Unable to query hardware address.\n",
+ dev->name);
+ return NULL;
+ }
+
+ dprintk("%s hwaddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
+ dev->name,hw_addr[0], hw_addr[1], hw_addr[2], hw_addr[3],
+ hw_addr[4], hw_addr[5]);
+
+ dev->addr_len = 6;
+ memcpy(dev->dev_addr, hw_addr, 6);
+
+ dev->open = i2o_lan_open;
+ dev->stop = i2o_lan_close;
+ dev->hard_start_xmit = i2o_lan_packet_send;
+ dev->get_stats = i2o_lan_get_stats;
+ dev->set_multicast_list = i2o_lan_set_multicast_list;
+
+ return dev;
+}
+
+#ifdef MODULE
+
+int init_module(void)
+{
+ struct device *dev;
+ struct i2o_lan_local *priv;
+ int i;
+
+ if (i2o_install_handler(&i2o_lan_handler) < 0)
+ {
+ printk(KERN_ERR "Unable to register I2O LAN OSM.\n");
+ return -EINVAL;
+ }
+
+ lan_context = i2o_lan_handler.context;
+
+ for (i=0; i < MAX_I2O_CONTROLLERS; i++)
+ {
+ struct i2o_controller *iop = i2o_find_controller(i);
+ struct i2o_device *i2o_dev;
+
+ if (iop==NULL)
+ continue;
+
+ for (i2o_dev=iop->devices;i2o_dev != NULL;i2o_dev=i2o_dev->next)
+ {
+ int class = i2o_dev->class;
+
+ if (class != 0x020) /* not I2O_CLASS_LAN device*/
+ continue;
+
+ if (unit == MAX_LAN_CARDS)
+ {
+ printk(KERN_WARNING "Too many I2O LAN devices.\n");
+ return -EINVAL;
+ }
+
+ dev = i2o_lan_register_device(i2o_dev);
+ if (dev == NULL)
+ {
+ printk(KERN_WARNING "Unable to register I2O LAN device\n");
+ continue; // try next one
+ }
+ priv = (struct i2o_lan_local *)dev->priv;
+
+ unit++;
+ i2o_landevs[unit] = dev;
+ priv->unit = unit;
+
+ printk(KERN_INFO "%s: I2O LAN device registered, tid = %d,"
+ " subclass = 0x%08X, unit = %d.\n",
+ dev->name, i2o_dev->id, i2o_dev->subclass,
+ priv->unit);
+ }
+ }
+
+ dprintk(KERN_INFO "%d I2O LAN devices found and registered.\n", unit+1);
+
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ int i;
+
+ for (i = 0; i <= unit; i++)
+ {
+ struct device *dev = i2o_landevs[i];
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+
+ switch (i2o_dev->subclass)
+ {
+ case I2O_LAN_ETHERNET:
+ unregister_netdev(dev);
+ kfree(dev);
+ break;
+#ifdef CONFIG_FDDI
+ case I2O_LAN_FDDI:
+ unregister_netdevice(dev);
+ kfree(dev);
+ break;
+#endif
+#ifdef CONFIG_TR
+ case I2O_LAN_TR:
+ unregister_netdev(dev);
+ kfree(dev);
+ break;
+#endif
+ default:
+ printk(KERN_WARNING "i2o_lan: Spurious I2O LAN subclass 0x%08X.\n",
+ i2o_dev->subclass);
+ }
+
+ dprintk(KERN_INFO "%s: I2O LAN device unregistered.\n",
+ dev->name);
+ }
+
+ i2o_remove_handler(&i2o_lan_handler);
+}
+
+EXPORT_NO_SYMBOLS;
+MODULE_AUTHOR("Univ of Helsinki, CS Department");
+MODULE_DESCRIPTION("I2O Lan OSM");
+
+#endif
--- /dev/null
+/*
+ * i2o_lan.h LAN Class specific definitions
+ *
+ * I2O LAN CLASS OSM Prototyping, May 7th 1999
+ *
+ * (C) Copyright 1999 University of Helsinki,
+ * Department of Computer Science
+ *
+ * This code is still under development / test.
+ *
+ * Author: Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
+ *
+ */
+
+#ifndef I2O_LAN_H
+#define I2O_LAN_H
+
+/* Tunable parameters first */
+
+#define I2O_BUCKET_COUNT 64
+#define I2O_BUCKET_THRESH 5
+
+/* LAN types */
+#define I2O_LAN_ETHERNET 0x0030
+#define I2O_LAN_100VG 0x0040
+#define I2O_LAN_TR 0x0050
+#define I2O_LAN_FDDI 0x0060
+#define I2O_LAN_FIBRE_CHANNEL 0x0070
+#define I2O_LAN_UNKNOWN 0x00000000
+
+/* Connector types */
+
+/* Ethernet */
+#define I2O_LAN_AUI (I2O_LAN_ETHERNET << 4) + 0x00000001
+#define I2O_LAN_10BASE5 (I2O_LAN_ETHERNET << 4) + 0x00000002
+#define I2O_LAN_FIORL (I2O_LAN_ETHERNET << 4) + 0x00000003
+#define I2O_LAN_10BASE2 (I2O_LAN_ETHERNET << 4) + 0x00000004
+#define I2O_LAN_10BROAD36 (I2O_LAN_ETHERNET << 4) + 0x00000005
+#define I2O_LAN_10BASE_T (I2O_LAN_ETHERNET << 4) + 0x00000006
+#define I2O_LAN_10BASE_FP (I2O_LAN_ETHERNET << 4) + 0x00000007
+#define I2O_LAN_10BASE_FB (I2O_LAN_ETHERNET << 4) + 0x00000008
+#define I2O_LAN_10BASE_FL (I2O_LAN_ETHERNET << 4) + 0x00000009
+#define I2O_LAN_100BASE_TX (I2O_LAN_ETHERNET << 4) + 0x0000000A
+#define I2O_LAN_100BASE_FX (I2O_LAN_ETHERNET << 4) + 0x0000000B
+#define I2O_LAN_100BASE_T4 (I2O_LAN_ETHERNET << 4) + 0x0000000C
+#define I2O_LAN_1000BASE_SX (I2O_LAN_ETHERNET << 4) + 0x0000000D
+#define I2O_LAN_1000BASE_LX (I2O_LAN_ETHERNET << 4) + 0x0000000E
+#define I2O_LAN_1000BASE_CX (I2O_LAN_ETHERNET << 4) + 0x0000000F
+#define I2O_LAN_1000BASE_T (I2O_LAN_ETHERNET << 4) + 0x00000010
+
+/* AnyLAN */
+#define I2O_LAN_100VG_ETHERNET (I2O_LAN_100VG << 4) + 0x00000001
+#define I2O_LAN_100VG_TR (I2O_LAN_100VG << 4) + 0x00000002
+
+/* Token Ring */
+#define I2O_LAN_4MBIT (I2O_LAN_TR << 4) + 0x00000001
+#define I2O_LAN_16MBIT (I2O_LAN_TR << 4) + 0x00000002
+
+/* FDDI */
+#define I2O_LAN_125MBAUD (I2O_LAN_FDDI << 4) + 0x00000001
+
+/* Fibre Channel */
+#define I2O_LAN_POINT_POINT (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000001
+#define I2O_LAN_ARB_LOOP (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000002
+#define I2O_LAN_PUBLIC_LOOP (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000003
+#define I2O_LAN_FABRIC (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000004
+
+#define I2O_LAN_EMULATION 0x00000F00
+#define I2O_LAN_OTHER 0x00000F01
+#define I2O_LAN_DEFAULT 0xFFFFFFFF
+
+/* LAN class functions */
+
+#define LAN_PACKET_SEND 0x3B
+#define LAN_SDU_SEND 0x3D
+#define LAN_RECEIVE_POST 0x3E
+#define LAN_RESET 0x35
+#define LAN_SUSPEND 0x37
+
+/* LAN DetailedStatusCode defines */
+#define I2O_LAN_DSC_SUCCESS 0x00
+#define I2O_LAN_DSC_DEVICE_FAILURE 0x01
+#define I2O_LAN_DSC_DESTINATION_NOT_FOUND 0x02
+#define I2O_LAN_DSC_TRANSMIT_ERROR 0x03
+#define I2O_LAN_DSC_TRANSMIT_ABORTED 0x04
+#define I2O_LAN_DSC_RECEIVE_ERROR 0x05
+#define I2O_LAN_DSC_RECEIVE_ABORTED 0x06
+#define I2O_LAN_DSC_DMA_ERROR 0x07
+#define I2O_LAN_DSC_BAD_PACKET_DETECTED 0x08
+#define I2O_LAN_DSC_OUT_OF_MEMORY 0x09
+#define I2O_LAN_DSC_BUCKET_OVERRUN 0x0A
+#define I2O_LAN_DSC_IOP_INTERNAL_ERROR 0x0B
+#define I2O_LAN_DSC_CANCELED 0x0C
+#define I2O_LAN_DSC_INVALID_TRANSACTION_CONTEXT 0x0D
+#define I2O_LAN_DSC_DEST_ADDRESS_DETECTED 0x0E
+#define I2O_LAN_DSC_DEST_ADDRESS_OMITTED 0x0F
+#define I2O_LAN_DSC_PARTIAL_PACKET_RETURNED 0x10
+#define I2O_LAN_DSC_TEMP_SUSPENDED_STATE 0x11
+
+struct i2o_packet_info {
+ u32 offset : 24;
+ u32 flags : 8;
+ u32 len : 24;
+ u32 status : 8;
+};
+
+struct i2o_bucket_descriptor {
+ u32 context; /* FIXME: 64bit support */
+ struct i2o_packet_info packet_info[1];
+};
+
+#endif /* I2O_LAN_H */
--- /dev/null
+/*
+ * Find I2O capable controllers on the PCI bus, and register/install
+ * them with the I2O layer
+ *
+ * (C) Copyright 1999 Red Hat Software
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/i2o.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/malloc.h>
+#include <asm/io.h>
+
+/*
+ * Free bus specific resources
+ */
+
+static void i2o_pci_dispose(struct i2o_controller *c)
+{
+ I2O_IRQ_WRITE32(c,0xFFFFFFFF);
+ if(c->bus.pci.irq > 0)
+ free_irq(c->bus.pci.irq, c);
+ iounmap(((u8 *)c->post_port)-0x40);
+}
+
+/*
+ * No real bus specific handling yet (note that later we will
+ * need to 'steal' PCI devices on i960 mainboards)
+ */
+
+static int i2o_pci_bind(struct i2o_controller *c, struct i2o_device *dev)
+{
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int i2o_pci_unbind(struct i2o_controller *c, struct i2o_device *dev)
+{
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * Bus specific interrupt handler
+ */
+
+static void i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r)
+{
+ struct i2o_controller *c = dev_id;
+ i2o_run_queue(c);
+}
+
+/*
+ * Install a PCI (or in theory AGP) i2o controller
+ */
+
+int __init i2o_pci_install(struct pci_dev *dev)
+{
+ struct i2o_controller *c=kmalloc(sizeof(struct i2o_controller),
+ GFP_KERNEL);
+ u8 *mem;
+ u32 memptr = 0;
+ u32 size;
+
+ int i;
+
+ if(c==NULL)
+ {
+ printk(KERN_ERR "i2o_pci: insufficient memory to add controller.\n");
+ return -ENOMEM;
+ }
+ memset(c, 0, sizeof(*c));
+
+ for(i=0; i<6; i++)
+ {
+ /* Skip I/O spaces */
+ if(!(dev->base_address[i]&PCI_BASE_ADDRESS_SPACE))
+ {
+ memptr=PCI_BASE_ADDRESS_MEM_MASK&dev->base_address[i];
+ break;
+ }
+ }
+
+ if(i==6)
+ {
+ printk(KERN_ERR "i2o_pci: I2O controller has no memory regions defined.\n");
+ return -ENOMEM;
+ }
+
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0+4*i, 0xFFFFFFFF);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0+4*i, &size);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0+4*i, dev->base_address[i]);
+
+ /* Map the I2O controller */
+
+ printk(KERN_INFO "PCI I2O controller at 0x%08X size=%d\n", memptr, -size);
+ mem = ioremap(memptr, -size);
+
+ c->bus.pci.irq = -1;
+
+ c->irq_mask = (volatile u32 *)(mem+0x34);
+ c->post_port = (volatile u32 *)(mem+0x40);
+ c->reply_port = (volatile u32 *)(mem+0x44);
+
+ c->mem_phys = memptr;
+ c->mem_offset = (u32)mem;
+ c->destructor = i2o_pci_dispose;
+
+ c->bind = i2o_pci_bind;
+ c->unbind = i2o_pci_unbind;
+
+ c->type = I2O_TYPE_PCI;
+
+ I2O_IRQ_WRITE32(c,0xFFFFFFFF);
+
+ i = i2o_install_controller(c);
+
+ if(i<0)
+ {
+ printk(KERN_ERR "i2o: unable to install controller.\n");
+ return i;
+ }
+
+ c->bus.pci.irq = dev->irq;
+ if(c->bus.pci.irq)
+ {
+ i=request_irq(dev->irq, i2o_pci_interrupt, SA_SHIRQ,
+ c->name, c);
+ if(i<0)
+ {
+ printk(KERN_ERR "%s: unable to allocate interrupt %d.\n",
+ c->name, dev->irq);
+ c->bus.pci.irq = -1;
+ i2o_delete_controller(c);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+int __init i2o_pci_scan(void)
+{
+ struct pci_dev *dev;
+ int count=0;
+
+ printk(KERN_INFO "Checking for PCI I2O controllers...\n");
+
+ for(dev=pci_devices; dev!=NULL; dev=dev->next)
+ {
+ if((dev->class>>8)!=PCI_CLASS_INTELLIGENT_I2O)
+ continue;
+ if((dev->class&0xFF)>1)
+ {
+ printk(KERN_INFO "I2O controller found but does not support I2O 1.5 (skipping).\n");
+ continue;
+ }
+ printk(KERN_INFO "I2O controller on bus %d at %d.\n",
+ dev->bus->number, dev->devfn);
+ if(!dev->master)
+ printk(KERN_WARNING "Controller not master enabled.\n");
+ if(i2o_pci_install(dev)==0)
+ count++;
+ }
+ if(count)
+ printk(KERN_INFO "%d I2O controller%s found and installed.\n", count,
+ count==1?"":"s");
+ return count?count:-ENODEV;
+}
+
+static void i2o_pci_unload(void)
+{
+ int i=0;
+ struct i2o_controller *c;
+
+ for(i = 0; i < MAX_I2O_CONTROLLERS; i++)
+ {
+ c=i2o_find_controller(i);
+ if(c==NULL)
+ continue;
+ if(c->type == I2O_TYPE_PCI)
+ i2o_delete_controller(c);
+ i2o_unlock_controller(c);
+ }
+}
+
+static void i2o_pci_activate(void)
+{
+ int i=0;
+ struct i2o_controller *c;
+
+ for(i = 0; i < MAX_I2O_CONTROLLERS; i++)
+ {
+ c=i2o_find_controller(i);
+ if(c==NULL)
+ continue;
+ if(c->type == I2O_TYPE_PCI)
+ {
+ if(i2o_activate_controller(c))
+ {
+ printk("I2O: Failed to initialize iop%d\n", c->unit);
+ i2o_unlock_controller(c);
+ free_irq(c->bus.pci.irq, c);
+ i2o_delete_controller(c);
+ continue;
+ }
+
+ I2O_IRQ_WRITE32(c,0);
+ }
+ i2o_unlock_controller(c);
+ }
+}
+
+#ifdef MODULE
+
+EXPORT_NO_SYMBOLS;
+MODULE_AUTHOR("Red Hat Software");
+MODULE_DESCRIPTION("I2O PCI Interface");
+
+int init_module(void)
+{
+ if(i2o_pci_scan()<0)
+ return -ENODEV;
+ i2o_pci_activate();
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ i2o_pci_unload();
+}
+
+#endif
--- /dev/null
+/*
+ * procfs handler for Linux I2O subsystem
+ *
+ * Copyright (c) 1999 Intel Corporation
+ *
+ * Originally written by Deepak Saxena(deepak.saxena@intel.com)
+ *
+ * This program is free software. You can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This is an initial test release. The code is based on the design
+ * of the ide procfs system (drivers/block/ide-proc.c). Some code
+ * taken from i2o-core module by Alan Cox.
+ *
+ * DISCLAIMER: This code is still under development/test and may cause
+ * your system to behave unpredictably. Use at your own discretion.
+ *
+ * LAN entries by Juha Sievänen(Juha.Sievanen@cs.Helsinki.FI),
+ * University of Helsinki, Department of Computer Science
+ *
+ */
+
+/*
+ * set tabstop=3
+ */
+
+/*
+ * TODO List
+ *
+ * - Add support for any version 2.0 spec changes once 2.0 IRTOS is
+ * is available to test with
+ * - Clean up code to use official structure definitions
+ */
+
+// FIXME!
+#define FMT_U64_HEX "0x%08x%08x"
+#define U64_VAL(pu64) *((u32*)(pu64)+1), *((u32*)(pu64))
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/i2o.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/spinlock.h>
+
+
+#include "i2o_proc.h"
+
+#include "i2o_lan.h"
+
+/*
+ * Structure used to define /proc entries
+ */
+typedef struct _i2o_proc_entry_t
+{
+ char *name; /* entry name */
+ mode_t mode; /* mode */
+ read_proc_t *read_proc; /* read func */
+ write_proc_t *write_proc; /* write func */
+} i2o_proc_entry;
+
+static int proc_context = 0;
+
+
+static int i2o_proc_read_lct(char *, char **, off_t, int, int *, void *);
+static int i2o_proc_read_hrt(char *, char **, off_t, int, int *, void *);
+static int i2o_proc_read_stat(char *, char **, off_t, int, int *, void *);
+static int i2o_proc_read_hw(char *, char **, off_t, int, int *, void *);
+static int i2o_proc_read_dev(char *, char **, off_t, int, int *, void *);
+static int i2o_proc_read_dev_name(char *, char **, off_t, int, int *, void *);
+static int i2o_proc_read_ddm(char *, char **, off_t, int, int *, void *);
+static int i2o_proc_read_uinfo(char *, char **, off_t, int, int *, void *);
+static int print_serial_number(char *, int, u8 *, int);
+static int i2o_proc_create_entries(void *,
+ i2o_proc_entry *p, struct proc_dir_entry *);
+static void i2o_proc_remove_entries(i2o_proc_entry *p,
+ struct proc_dir_entry *);
+static int i2o_proc_add_controller(struct i2o_controller *,
+ struct proc_dir_entry * );
+static void i2o_proc_remove_controller(struct i2o_controller *,
+ struct proc_dir_entry * );
+static int create_i2o_procfs(void);
+static int destroy_i2o_procfs(void);
+static void i2o_proc_reply(struct i2o_handler *, struct i2o_controller *,
+ struct i2o_message *);
+
+static int i2o_proc_read_lan_dev_info(char *, char **, off_t, int, int *,
+ void *);
+static int i2o_proc_read_lan_mac_addr(char *, char **, off_t, int, int *,
+ void *);
+static int i2o_proc_read_lan_curr_addr(char *, char **, off_t, int, int *,
+ void *);
+#if 0
+static int i2o_proc_read_lan_mcast_addr(char *, char **, off_t, int, int *,
+ void *);
+#endif
+static int i2o_proc_read_lan_batch_control(char *, char **, off_t, int, int *,
+ void *);
+static int i2o_proc_read_lan_operation(char *, char **, off_t, int, int *,
+ void *);
+static int i2o_proc_read_lan_media_operation(char *, char **, off_t, int,
+ int *, void *);
+#if 0
+static int i2o_proc_read_lan_alt_addr(char *, char **, off_t, int, int *,
+ void *);
+#endif
+static int i2o_proc_read_lan_tx_info(char *, char **, off_t, int, int *,
+ void *);
+static int i2o_proc_read_lan_rx_info(char *, char **, off_t, int, int *,
+ void *);
+static int i2o_proc_read_lan_hist_stats(char *, char **, off_t, int, int *,
+ void *);
+static int i2o_proc_read_lan_opt_tx_hist_stats(char *, char **, off_t, int,
+ int *, void *);
+static int i2o_proc_read_lan_opt_rx_hist_stats(char *, char **, off_t, int,
+ int *, void *);
+static int i2o_proc_read_lan_fddi_stats(char *, char **, off_t, int, int *,
+ void *);
+
+#if 0
+/* Do we really need this??? */
+
+static loff_t i2o_proc_lseek(struct file *file, loff_t off, int whence)
+{
+ return 0;
+}
+#endif
+
+static struct proc_dir_entry *i2o_proc_dir_root;
+
+/*
+ * Message handler
+ */
+static struct i2o_handler i2o_proc_handler =
+{
+ (void *)i2o_proc_reply,
+ "I2O procfs Layer",
+ 0
+};
+
+/*
+ * IOP specific entries...write field just in case someone
+ * ever wants one.
+ */
+static i2o_proc_entry generic_iop_entries[] =
+{
+ {"hrt", S_IFREG|S_IRUGO, i2o_proc_read_hrt, NULL},
+ {"lct", S_IFREG|S_IRUGO, i2o_proc_read_lct, NULL},
+ {"stat", S_IFREG|S_IRUGO, i2o_proc_read_stat, NULL},
+ {"hw", S_IFREG|S_IRUGO, i2o_proc_read_hw, NULL},
+ {NULL, 0, NULL, NULL}
+};
+
+/*
+ * Device specific entries
+ */
+static i2o_proc_entry generic_dev_entries[] =
+{
+ {"dev_identity", S_IFREG|S_IRUGO, i2o_proc_read_dev, NULL},
+ {"ddm_identity", S_IFREG|S_IRUGO, i2o_proc_read_ddm, NULL},
+ {"user_info", S_IFREG|S_IRUGO, i2o_proc_read_uinfo, NULL},
+ {NULL, 0, NULL, NULL}
+};
+
+/*
+ * Storage unit specific entries (SCSI Periph, BS) with device names
+ */
+static i2o_proc_entry rbs_dev_entries[] =
+{
+ {"dev_name", S_IFREG|S_IRUGO, i2o_proc_read_dev_name, NULL},
+ {NULL, 0, NULL, NULL}
+};
+
+#define SCSI_TABLE_SIZE 13
+ static char *scsi_devices[] =
+ {
+ "Direct-Access Read/Write",
+ "Sequential-Access Storage",
+ "Printer",
+ "Processor",
+ "WORM Device",
+ "CD-ROM Device",
+ "Scanner Device",
+ "Optical Memory Device",
+ "Medium Changer Device",
+ "Communications Device",
+ "Graphics Art Pre-Press Device",
+ "Graphics Art Pre-Press Device",
+ "Array Controller Device"
+ };
+
+/* private */
+
+/*
+ * LAN specific entries
+ *
+ * Should groups with r/w entries have their own subdirectory?
+ *
+ */
+static i2o_proc_entry lan_entries[] =
+{
+ /* LAN param groups 0000h-0008h */
+ {"lan_dev_info", S_IFREG|S_IRUGO, i2o_proc_read_lan_dev_info, NULL},
+ {"lan_mac_addr", S_IFREG|S_IRUGO, i2o_proc_read_lan_mac_addr, NULL},
+#if 0
+ {"lan_mcast_addr", S_IFREG|S_IRUGO|S_IWUSR,
+ i2o_proc_read_lan_mcast_addr, NULL},
+#endif
+ {"lan_batch_ctrl", S_IFREG|S_IRUGO|S_IWUSR,
+ i2o_proc_read_lan_batch_control, NULL},
+ {"lan_operation", S_IFREG|S_IRUGO, i2o_proc_read_lan_operation, NULL},
+ {"lan_media_operation", S_IFREG|S_IRUGO,
+ i2o_proc_read_lan_media_operation, NULL},
+#if 0
+ {"lan_alt_addr", S_IFREG|S_IRUGO, i2o_proc_read_lan_alt_addr, NULL},
+#endif
+ {"lan_tx_info", S_IFREG|S_IRUGO, i2o_proc_read_lan_tx_info, NULL},
+ {"lan_rx_info", S_IFREG|S_IRUGO, i2o_proc_read_lan_rx_info, NULL},
+ {"lan_stats", S_IFREG|S_IRUGO, i2o_proc_read_lan_hist_stats, NULL},
+ {"lan_opt_tx_stats", S_IFREG|S_IRUGO,
+ i2o_proc_read_lan_opt_tx_hist_stats, NULL},
+ {"lan_opt_rx_stats", S_IFREG|S_IRUGO,
+ i2o_proc_read_lan_opt_rx_hist_stats, NULL},
+ {"lan_fddi_stats", S_IFREG|S_IRUGO, i2o_proc_read_lan_fddi_stats, NULL},
+ /* some useful r/w entries, no write yet */
+ {"lan_curr_addr", S_IFREG|S_IRUGO|S_IWUSR,
+ i2o_proc_read_lan_curr_addr, NULL},
+ {NULL, 0, NULL, NULL}
+};
+
+static u32 i2o_proc_token = 0;
+
+static char* bus_strings[] =
+{
+ "Local Bus",
+ "ISA",
+ "EISA",
+ "MCA",
+ "PCI",
+ "PCMCIA",
+ "NUBUS",
+ "CARDBUS"
+};
+
+static spinlock_t i2o_proc_lock = SPIN_LOCK_UNLOCKED;
+
+void i2o_proc_reply(struct i2o_handler *phdlr, struct i2o_controller *pctrl,
+ struct i2o_message *pmsg)
+{
+ i2o_proc_token = I2O_POST_WAIT_OK;
+}
+
+int i2o_proc_read_hrt(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_controller *c = (struct i2o_controller *)data;
+ pi2o_hrt hrt;
+ u32 msg[6];
+ u32 *workspace;
+ u32 bus;
+ int count;
+ int i;
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ workspace = kmalloc(2048, GFP_KERNEL);
+ hrt = (pi2o_hrt)workspace;
+ if(workspace==NULL)
+ {
+ len += sprintf(buf, "No free memory for HRT buffer\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ memset(workspace, 0, 2048);
+
+ msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
+ msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2]= (u32)proc_context;
+ msg[3]= 0;
+ msg[4]= (0xD0000000 | 2048);
+ msg[5]= virt_to_phys(workspace);
+
+ token = i2o_post_wait(c, ADAPTER_TID, msg, 6*4, &i2o_proc_token,2);
+ if(token == I2O_POST_WAIT_TIMEOUT)
+ {
+ kfree(workspace);
+ len += sprintf(buf, "Timeout waiting for HRT\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ if(hrt->hrt_version)
+ {
+ len += sprintf(buf+len,
+ "HRT table for controller is too new a version.\n");
+ return len;
+ }
+
+ count = hrt->num_entries;
+
+ if((count * hrt->entry_len + 8) > 2048) {
+ printk(KERN_WARNING "i2o_proc: HRT does not fit into buffer\n");
+ len += sprintf(buf+len,
+ "HRT table too big to fit in buffer.\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf+len, "HRT has %d entries of %d bytes each.\n",
+ count, hrt->entry_len);
+
+ for(i = 0; i < count; i++)
+ {
+ len += sprintf(buf+len, "Entry %d:\n", i);
+ len += sprintf(buf+len, " Adapter ID: %0#10x\n",
+ hrt->hrt_entry[i].adapter_id);
+ len += sprintf(buf+len, " Controlled by: %0#6x\n",
+ hrt->hrt_entry[i].parent_tid);
+ len += sprintf(buf+len, " Bus#%d\n",
+ hrt->hrt_entry[i].bus_num);
+
+ if(hrt->hrt_entry[i].bus_type != 0x80)
+ {
+ bus = hrt->hrt_entry[i].bus_type;
+ len += sprintf(buf+len, " %s Information\n", bus_strings[bus]);
+
+ switch(bus)
+ {
+ case I2O_BUS_LOCAL:
+ len += sprintf(buf+len, " IOBase: %0#6x,",
+ hrt->hrt_entry[i].bus.local_bus.LbBaseIOPort);
+ len += sprintf(buf+len, " MemoryBase: %0#10x\n",
+ hrt->hrt_entry[i].bus.local_bus.LbBaseMemoryAddress);
+ break;
+
+ case I2O_BUS_ISA:
+ len += sprintf(buf+len, " IOBase: %0#6x,",
+ hrt->hrt_entry[i].bus.isa_bus.IsaBaseIOPort);
+ len += sprintf(buf+len, " MemoryBase: %0#10x,",
+ hrt->hrt_entry[i].bus.isa_bus.IsaBaseMemoryAddress);
+ len += sprintf(buf+len, " CSN: %0#4x,",
+ hrt->hrt_entry[i].bus.isa_bus.CSN);
+ break;
+
+ case I2O_BUS_EISA:
+ len += sprintf(buf+len, " IOBase: %0#6x,",
+ hrt->hrt_entry[i].bus.eisa_bus.EisaBaseIOPort);
+ len += sprintf(buf+len, " MemoryBase: %0#10x,",
+ hrt->hrt_entry[i].bus.eisa_bus.EisaBaseMemoryAddress);
+ len += sprintf(buf+len, " Slot: %0#4x,",
+ hrt->hrt_entry[i].bus.eisa_bus.EisaSlotNumber);
+ break;
+
+ case I2O_BUS_MCA:
+ len += sprintf(buf+len, " IOBase: %0#6x,",
+ hrt->hrt_entry[i].bus.mca_bus.McaBaseIOPort);
+ len += sprintf(buf+len, " MemoryBase: %0#10x,",
+ hrt->hrt_entry[i].bus.mca_bus.McaBaseMemoryAddress);
+ len += sprintf(buf+len, " Slot: %0#4x,",
+ hrt->hrt_entry[i].bus.mca_bus.McaSlotNumber);
+ break;
+
+ case I2O_BUS_PCI:
+ len += sprintf(buf+len, " Bus: %0#4x",
+ hrt->hrt_entry[i].bus.pci_bus.PciBusNumber);
+ len += sprintf(buf+len, " Dev: %0#4x",
+ hrt->hrt_entry[i].bus.pci_bus.PciDeviceNumber);
+ len += sprintf(buf+len, " Func: %0#4x",
+ hrt->hrt_entry[i].bus.pci_bus.PciFunctionNumber);
+ len += sprintf(buf+len, " Vendor: %0#6x",
+ hrt->hrt_entry[i].bus.pci_bus.PciVendorID);
+ len += sprintf(buf+len, " Device: %0#6x\n",
+ hrt->hrt_entry[i].bus.pci_bus.PciDeviceID);
+ break;
+
+ default:
+ len += sprintf(buf+len, " Unsupported Bus Type\n");
+ }
+ }
+ else
+ len += sprintf(buf+len, " Unknown Bus Type\n");
+ }
+
+ kfree(workspace);
+
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+int i2o_proc_read_lct(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_controller *c = (struct i2o_controller*)data;
+ u32 msg[8];
+ u32 *workspace;
+ pi2o_lct lct; /* = (pi2o_lct)c->lct; */
+ int entries;
+ int token;
+ int i;
+
+#define BUS_TABLE_SIZE 3
+ static char *bus_ports[] =
+ {
+ "Generic Bus",
+ "SCSI Bus",
+ "Fibre Channel Bus"
+ };
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ workspace = kmalloc(8192, GFP_KERNEL);
+ lct = (pi2o_lct)workspace;
+ if(workspace==NULL)
+ {
+ len += sprintf(buf, "No free memory for LCT buffer\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ memset(workspace, 0, 8192);
+
+ msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_6;
+ msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2] = (u32)proc_context;
+ msg[3] = 0;
+ msg[4] = 0xFFFFFFFF; /* All devices */
+ msg[5] = 0x00000000; /* Report now */
+ msg[6] = 0xD0000000|8192;
+ msg[7] = virt_to_bus(workspace);
+
+ token = i2o_post_wait(c, ADAPTER_TID, msg, 8*4, &i2o_proc_token,2);
+ if(token == I2O_POST_WAIT_TIMEOUT)
+ {
+ kfree(workspace);
+ len += sprintf(buf, "Timeout waiting for LCT\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ entries = (lct->table_size - 3)/9;
+
+ len += sprintf(buf, "LCT contains %d %s\n", entries,
+ entries == 1 ? "entry" : "entries");
+ if(lct->boot_tid)
+ len += sprintf(buf+len, "Boot Device @ ID %d\n", lct->boot_tid);
+
+ for(i = 0; i < entries; i++)
+ {
+ len += sprintf(buf+len, "Entry %d\n", i);
+
+ len += sprintf(buf+len, " %s", i2o_get_class_name(lct->lct_entry[i].class_id));
+
+ /*
+ * Classes which we'll print subclass info for
+ */
+ switch(lct->lct_entry[i].class_id & 0xFFF)
+ {
+ case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+ switch(lct->lct_entry[i].sub_class)
+ {
+ case 0x00:
+ len += sprintf(buf+len, ": Direct-Access Read/Write");
+ break;
+
+ case 0x04:
+ len += sprintf(buf+len, ": WORM Drive");
+ break;
+
+ case 0x05:
+ len += sprintf(buf+len, ": CD-ROM Drive");
+ break;
+
+ case 0x07:
+ len += sprintf(buf+len, ": Optical Memory Device");
+ break;
+
+ default:
+ len += sprintf(buf+len, ": Unknown");
+ break;
+ }
+ break;
+
+ case I2O_CLASS_LAN:
+ switch(lct->lct_entry[i].sub_class & 0xFF)
+ {
+ case 0x30:
+ len += sprintf(buf+len, ": Ethernet");
+ break;
+
+ case 0x40:
+ len += sprintf(buf+len, ": 100base VG");
+ break;
+
+ case 0x50:
+ len += sprintf(buf+len, ": IEEE 802.5/Token-Ring");
+ break;
+
+ case 0x60:
+ len += sprintf(buf+len, ": ANSI X3T9.5 FDDI");
+ break;
+
+ case 0x70:
+ len += sprintf(buf+len, ": Fibre Channel");
+ break;
+
+ default:
+ len += sprintf(buf+len, ": Unknown Sub-Class");
+ break;
+ }
+ break;
+
+ case I2O_CLASS_SCSI_PERIPHERAL:
+ if(lct->lct_entry[i].sub_class < SCSI_TABLE_SIZE)
+ len += sprintf(buf+len, ": %s",
+ scsi_devices[lct->lct_entry[i].sub_class]);
+ else
+ len += sprintf(buf+len, ": Unknown Device Type");
+ break;
+
+ case I2O_CLASS_BUS_ADAPTER_PORT:
+ if(lct->lct_entry[i].sub_class < BUS_TABLE_SIZE)
+ len += sprintf(buf+len, ": %s",
+ bus_ports[lct->lct_entry[i].sub_class]);
+ else
+ len += sprintf(buf+len, ": Unknown Bus Type");
+ break;
+ }
+ len += sprintf(buf+len, "\n");
+
+ len += sprintf(buf+len, " Local TID: 0x%03x\n", lct->lct_entry[i].tid);
+ len += sprintf(buf+len, " User TID: 0x%03x\n", lct->lct_entry[i].user_tid);
+ len += sprintf(buf+len, " Parent TID: 0x%03x\n",
+ lct->lct_entry[i].parent_tid);
+ len += sprintf(buf+len, " Identity Tag: 0x%x%x%x%x%x%x%x%x\n",
+ lct->lct_entry[i].identity_tag[0],
+ lct->lct_entry[i].identity_tag[1],
+ lct->lct_entry[i].identity_tag[2],
+ lct->lct_entry[i].identity_tag[3],
+ lct->lct_entry[i].identity_tag[4],
+ lct->lct_entry[i].identity_tag[5],
+ lct->lct_entry[i].identity_tag[6],
+ lct->lct_entry[i].identity_tag[7]);
+ len += sprintf(buf+len, " Change Indicator: %0#10x\n",
+ lct->lct_entry[i].change_ind);
+ len += sprintf(buf+len, " Device Flags: %0#10x\n",
+ lct->lct_entry[i].device_flags);
+ }
+
+ kfree(workspace);
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+int i2o_proc_read_stat(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_controller *c = (struct i2o_controller*)data;
+ u32 *msg;
+ u32 m;
+ u8 *workspace;
+ u16 *work16;
+ u32 *work32;
+ long time;
+ char prodstr[25];
+ int version;
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ workspace = (u8*)kmalloc(88, GFP_KERNEL);
+ if(!workspace)
+ {
+ len += sprintf(buf, "No memory for status transfer\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ m = I2O_POST_READ32(c);
+ if(m == 0xFFFFFFFF)
+ {
+ len += sprintf(buf, "Could not get inbound message frame from IOP!\n");
+ kfree(workspace);
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ msg = (u32 *)(m+c->mem_offset);
+
+ memset(workspace, 0, 88);
+ work32 = (u32*)workspace;
+ work16 = (u16*)workspace;
+
+ msg[0] = NINE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2] = msg[3] = msg[4] = msg[5] = 0;
+ msg[6] = virt_to_phys(workspace);
+ msg[7] = 0; /* FIXME: 64-bit */
+ msg[8] = 88;
+
+ /*
+ * hmm...i2o_post_message should just take ptr to message, and
+ * determine offset on it's own...less work for OSM developers
+ */
+ i2o_post_message(c, m);
+
+ time = jiffies;
+
+ while(workspace[87] != 0xFF)
+ {
+ if(jiffies-time >= 2*HZ)
+ {
+ len += sprintf(buf, "Timeout waiting for status reply\n");
+ kfree(workspace);
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+ schedule();
+ barrier();
+ }
+
+ len += sprintf(buf+len, "Organization ID: %0#6x\n", work16[0]);
+
+ version = workspace[9]&0xF0>>4;
+ if(version == 0x02) {
+ len += sprintf(buf+len, "Lowest I2O version supported: ");
+ switch(workspace[2]) {
+ case 0x00:
+ case 0x01:
+ len += sprintf(buf+len, "1.5\n");
+ break;
+ case 0x02:
+ len += sprintf(buf+len, "2.0\n");
+ break;
+ }
+
+ len += sprintf(buf+len, "Highest I2O version supported: ");
+ switch(workspace[3]) {
+ case 0x00:
+ case 0x01:
+ len += sprintf(buf+len, "1.5\n");
+ break;
+ case 0x02:
+ len += sprintf(buf+len, "2.0\n");
+ break;
+ }
+ }
+
+ len += sprintf(buf+len, "IOP ID: %0#5x\n", work16[2]&0xFFF);
+ len += sprintf(buf+len, "Host Unit ID: %0#6x\n", work16[3]);
+ len += sprintf(buf+len, "Segment Number: %0#5x\n", work16[4]&0XFFF);
+
+ len += sprintf(buf+len, "I2O Version: ");
+ switch(version)
+ {
+ case 0x00:
+ case 0x01:
+ len += sprintf(buf+len, "1.5\n");
+ break;
+ case 0x02:
+ len += sprintf(buf+len, "2.0\n");
+ break;
+ default:
+ len += sprintf(buf+len, "Unknown version\n");
+ }
+
+ len += sprintf(buf+len, "IOP State: ");
+ switch(workspace[10])
+ {
+ case 0x01:
+ len += sprintf(buf+len, "Init\n");
+ break;
+
+ case 0x02:
+ len += sprintf(buf+len, "Reset\n");
+ break;
+
+ case 0x04:
+ len += sprintf(buf+len, "Hold\n");
+ break;
+
+ case 0x05:
+ len += sprintf(buf+len, "Hold\n");
+ break;
+
+ case 0x08:
+ len += sprintf(buf+len, "Operational\n");
+ break;
+
+ case 0x10:
+ len += sprintf(buf+len, "FAILED\n");
+ break;
+
+ case 0x11:
+ len += sprintf(buf+len, "FAULTED\n");
+ break;
+
+ default:
+ len += sprintf(buf+len, "Unknown\n");
+ break;
+ }
+
+ /* 0x00 is the only type supported w/spec 1.5 */
+ /* Added 2.0 types */
+ len += sprintf(buf+len, "Messenger Type: ");
+ switch (workspace[11])
+ {
+ case 0x00:
+ len += sprintf(buf+len, "Memory Mapped\n");
+ break;
+ case 0x01:
+ len += sprintf(buf+len, "Memory mapped only\n");
+ break;
+ case 0x02:
+ len += sprintf(buf+len, "Remote only\n");
+ break;
+ case 0x03:
+ len += sprintf(buf+len, "Memory mapped and remote\n");
+ break;
+ default:
+ len += sprintf(buf+len, "Unknown\n");
+ break;
+ }
+ len += sprintf(buf+len, "Inbound Frame Size: %d bytes\n", work16[6]*4);
+ len += sprintf(buf+len, "Max Inbound Frames: %d\n", work32[4]);
+ len += sprintf(buf+len, "Current Inbound Frames: %d\n", work32[5]);
+ len += sprintf(buf+len, "Max Outbound Frames: %d\n", work32[6]);
+
+ /* Spec doesn't say if NULL terminated or not... */
+ memcpy(prodstr, work32+7, 24);
+ prodstr[24] = '\0';
+ len += sprintf(buf+len, "Product ID: %s\n", prodstr);
+
+ len += sprintf(buf+len, "LCT Size: %d\n", work32[13]);
+
+ len += sprintf(buf+len, "Desired Private Memory Space: %d kB\n",
+ work32[15]>>10);
+ len += sprintf(buf+len, "Allocated Private Memory Space: %d kB\n",
+ work32[16]>>10);
+ len += sprintf(buf+len, "Private Memory Base Address: %0#10x\n",
+ work32[17]);
+ len += sprintf(buf+len, "Desired Private I/O Space: %d kB\n",
+ work32[18]>>10);
+ len += sprintf(buf+len, "Allocated Private I/O Space: %d kB\n",
+ work32[19]>>10);
+ len += sprintf(buf+len, "Private I/O Base Address: %0#10x\n",
+ work32[20]);
+
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+int i2o_proc_read_hw(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_controller *c = (struct i2o_controller*)data;
+ static u32 work32[5];
+ static u8 *work8 = (u8*)work32;
+ static u16 *work16 = (u16*)work32;
+ int token;
+ u32 hwcap;
+
+ static char *cpu_table[] =
+ {
+ "Intel 80960 Series",
+ "AMD2900 Series",
+ "Motorola 68000 Series",
+ "ARM Series",
+ "MIPS Series",
+ "Sparc Series",
+ "PowerPC Series",
+ "Intel x86 Series"
+ };
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(c, ADAPTER_TID, proc_context,
+ 0, // ParamGroup 0x0000h
+ -1, // all fields
+ &work32,
+ sizeof(work32),
+ &i2o_proc_token);
+
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "IOP Hardware Information Table\n");
+
+ len += sprintf(buf+len, "I2O Vendor ID: %0#6x\n", work16[0]);
+ len += sprintf(buf+len, "Product ID: %0#6x\n", work16[1]);
+ len += sprintf(buf+len, "RAM: %dkB\n", work32[1]>>10);
+ len += sprintf(buf+len, "Non-Volatile Storage: %dkB\n", work32[2]>>10);
+
+ hwcap = work32[3];
+ len += sprintf(buf+len, "Capabilities:\n");
+ if(hwcap&0x00000001)
+ len += sprintf(buf+len, " Self-booting\n");
+ if(hwcap&0x00000002)
+ len += sprintf(buf+len, " Upgradable IRTOS\n");
+ if(hwcap&0x00000004)
+ len += sprintf(buf+len, " Supports downloading DDMs\n");
+ if(hwcap&0x00000008)
+ len += sprintf(buf+len, " Supports installing DDMs\n");
+ if(hwcap&0x00000010)
+ len += sprintf(buf+len, " Battery-backed RAM\n");
+
+ len += sprintf(buf+len, "CPU: ");
+ if(work8[16] > 8)
+ len += sprintf(buf+len, "Unknown\n");
+ else
+ len += sprintf(buf+len, "%s\n", cpu_table[work8[16]]);
+ /* Anyone using ProcessorVersion? */
+
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+int i2o_proc_read_dev(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
+ // == (allow) 512d bytes (max)
+ static u16 *work16 = (u16*)work32;
+ char sz[17];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0xF100, // ParamGroup F100h (Device Identity)
+ -1, // all fields
+ &work32,
+ sizeof(work32),
+ &i2o_proc_token);
+
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Device Class: %s\n", i2o_get_class_name(work16[0]));
+
+ len += sprintf(buf+len, "Owner TID: %0#5x\n", work16[2]);
+ len += sprintf(buf+len, "Parent TID: %0#5x\n", work16[3]);
+
+ memcpy(sz, work32+2, 16);
+ sz[16] = '\0';
+ len += sprintf(buf+len, "Vendor Info: %s\n", sz);
+
+ memcpy(sz, work32+6, 16);
+ sz[16] = '\0';
+ len += sprintf(buf+len, "Product Info: %s\n", sz);
+
+ memcpy(sz, work32+10, 16);
+ sz[16] = '\0';
+ len += sprintf(buf+len, "Description: %s\n", sz);
+
+ memcpy(sz, work32+14, 8);
+ sz[8] = '\0';
+ len += sprintf(buf+len, "Product Revision: %s\n", sz);
+
+ len += sprintf(buf+len, "Serial Number: ");
+ len = print_serial_number(buf, len,
+ (u8*)(work32+16),
+ /* allow for SNLen plus
+ * possible trailing '\0'
+ */
+ sizeof(work32)-(16*sizeof(u32))-2
+ );
+ len += sprintf(buf+len, "\n");
+
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+
+int i2o_proc_read_dev_name(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+
+ if ( d->dev_name[0] == '\0' )
+ return 0;
+
+ len = sprintf(buf, "%s\n", d->dev_name);
+
+ return len;
+}
+
+
+
+int i2o_proc_read_ddm(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[128];
+ static u16 *work16 = (u16*)work32;
+ int token;
+ char mod[25];
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0xF101, // ParamGroup F101h (DDM Identity)
+ -1, // all fields
+ &work32,
+ sizeof(work32),
+ &i2o_proc_token);
+
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Registering DDM TID: 0x%03x\n", work16[0]&0xFFF);
+
+ memcpy(mod, (char*)(work16+1), 24);
+ mod[24] = '\0';
+ len += sprintf(buf+len, "Module Name: %s\n", mod);
+
+ memcpy(mod, (char*)(work16+13), 8);
+ mod[8] = '\0';
+ len += sprintf(buf+len, "Module Rev: %s\n", mod);
+
+ len += sprintf(buf+len, "Serial Number: ");
+ len = print_serial_number(buf, len,
+ (u8*)(work16+17),
+ /* allow for SNLen plus
+ * possible trailing '\0'
+ */
+ sizeof(work32)-(17*sizeof(u16))-2
+ );
+ len += sprintf(buf+len, "\n");
+
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+int i2o_proc_read_uinfo(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[128];
+ int token;
+ char sz[65];
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0xF102, // ParamGroup F102h (User Information)
+ -1, // all fields
+ &work32,
+ sizeof(work32),
+ &i2o_proc_token);
+
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ memcpy(sz, (char*)work32, 64);
+ sz[64] = '\0';
+ len += sprintf(buf, "Device Name: %s\n", sz);
+
+ memcpy(sz, (char*)(work32+16), 64);
+ sz[64] = '\0';
+ len += sprintf(buf+len, "Service Name: %s\n", sz);
+
+ memcpy(sz, (char*)(work32+32), 64);
+ sz[64] = '\0';
+ len += sprintf(buf+len, "Physical Name: %s\n", sz);
+
+ memcpy(sz, (char*)(work32+48), 4);
+ sz[4] = '\0';
+ len += sprintf(buf+len, "Instance Number: %s\n", sz);
+
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+static int print_serial_number(char *buff, int pos, u8 *serialno, int max_len)
+{
+ int i;
+
+ /* 19990419 -sralston
+ * The I2O v1.5 (and v2.0 so far) "official specification"
+ * got serial numbers WRONG!
+ * Apparently, and despite what Section 3.4.4 says and
+ * Figure 3-35 shows (pg 3-39 in the pdf doc),
+ * the convention / consensus seems to be:
+ * + First byte is SNFormat
+ * + Second byte is SNLen (but only if SNFormat==7 (?))
+ * + (v2.0) SCSI+BS may use IEEE Registered (64 or 128 bit) format
+ */
+ switch(serialno[0])
+ {
+ case I2O_SNFORMAT_BINARY: /* Binary */
+ pos += sprintf(buff+pos, "0x");
+ for(i = 0; i < serialno[1]; i++)
+ {
+ pos += sprintf(buff+pos, "%02X", serialno[2+i]);
+ }
+ break;
+
+ case I2O_SNFORMAT_ASCII: /* ASCII */
+ if ( serialno[1] < ' ' ) /* printable or SNLen? */
+ {
+ /* sanity */
+ max_len = (max_len < serialno[1]) ? max_len : serialno[1];
+ serialno[1+max_len] = '\0';
+
+ /* just print it */
+ pos += sprintf(buff+pos, "%s", &serialno[2]);
+ }
+ else
+ {
+ /* print chars for specified length */
+ for(i = 0; i < serialno[1]; i++)
+ {
+ pos += sprintf(buff+pos, "%c", serialno[2+i]);
+ }
+ }
+ break;
+
+ case I2O_SNFORMAT_UNICODE: /* UNICODE */
+ pos += sprintf(buff+pos, "UNICODE Format. Can't Display\n");
+ break;
+
+ case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */
+ pos += sprintf(buff+pos,
+ "LAN-48 MAC Address @ %02X:%02X:%02X:%02X:%02X:%02X",
+ serialno[2], serialno[3],
+ serialno[4], serialno[5],
+ serialno[6], serialno[7]);
+
+ case I2O_SNFORMAT_WAN: /* WAN MAC Address */
+ /* FIXME: Figure out what a WAN access address looks like?? */
+ pos += sprintf(buff+pos, "WAN Access Address");
+ break;
+
+
+/* plus new in v2.0 */
+ case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */
+ /* FIXME: Figure out what a LAN-64 address really looks like?? */
+ pos += sprintf(buff+pos,
+ "LAN-64 MAC Address @ [?:%02X:%02X:?] %02X:%02X:%02X:%02X:%02X:%02X",
+ serialno[8], serialno[9],
+ serialno[2], serialno[3],
+ serialno[4], serialno[5],
+ serialno[6], serialno[7]);
+ break;
+
+
+ case I2O_SNFORMAT_DDM: /* I2O DDM */
+ pos += sprintf(buff+pos,
+ "DDM: Tid=%03Xh, Rsvd=%04Xh, OrgId=%04Xh",
+ *(u16*)&serialno[2],
+ *(u16*)&serialno[4],
+ *(u16*)&serialno[6]);
+ break;
+
+ case I2O_SNFORMAT_IEEE_REG64: /* IEEE Registered (64-bit) */
+ case I2O_SNFORMAT_IEEE_REG128: /* IEEE Registered (128-bit) */
+ /* FIXME: Figure if this is even close?? */
+ pos += sprintf(buff+pos,
+ "IEEE NodeName(hi,lo)=(%08Xh:%08Xh), PortName(hi,lo)=(%08Xh:%08Xh)\n",
+ *(u32*)&serialno[2],
+ *(u32*)&serialno[6],
+ *(u32*)&serialno[10],
+ *(u32*)&serialno[14]);
+ break;
+
+
+ case I2O_SNFORMAT_UNKNOWN: /* Unknown 0 */
+ case I2O_SNFORMAT_UNKNOWN2: /* Unknown 0xff */
+ default:
+ pos += sprintf(buff+pos, "Unknown Data Format");
+ break;
+ }
+
+ return pos;
+}
+
+/* LAN group 0000h - Device info (scalar) */
+int i2o_proc_read_lan_dev_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[56];
+ static u8 *work8 = (u8*)work32;
+ static u16 *work16 = (u16*)work32;
+ static u64 *work64 = (u64*)work32;
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0000, -1, &work32, 56*4, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "LAN Type ........... ");
+ switch (work16[0])
+ {
+ case 0x0030:
+ len += sprintf(buf+len, "Ethernet, ");
+ break;
+ case 0x0040:
+ len += sprintf(buf+len, "100Base VG, ");
+ break;
+ case 0x0050:
+ len += sprintf(buf+len, "Token Ring, ");
+ break;
+ case 0x0060:
+ len += sprintf(buf+len, "FDDI, ");
+ break;
+ case 0x0070:
+ len += sprintf(buf+len, "Fibre Channel, ");
+ break;
+ default:
+ len += sprintf(buf+len, "Unknown type, ");
+ break;
+ }
+
+ if (work16[1]&0x00000001)
+ len += sprintf(buf+len, "emulated LAN, ");
+ else
+ len += sprintf(buf+len, "physical LAN port, ");
+
+ if (work16[1]&0x00000002)
+ len += sprintf(buf+len, "full duplex\n");
+ else
+ len += sprintf(buf+len, "simplex\n");
+
+ len += sprintf(buf+len, "Address format: ");
+ switch(work8[4]) {
+ case 0x00:
+ len += sprintf(buf+len, "IEEE 48bit\n");
+ break;
+ case 0x01:
+ len += sprintf(buf+len, "FC IEEE\n");
+ break;
+ default:
+ len += sprintf(buf+len, "Unknown\n");
+ break;
+ }
+
+ len += sprintf(buf+len, "State: ");
+ switch(work8[5])
+ {
+ case 0x00:
+ len += sprintf(buf+len, "Unknown\n");
+ break;
+ case 0x01:
+ len += sprintf(buf+len, "Unclaimed\n");
+ break;
+ case 0x02:
+ len += sprintf(buf+len, "Operational\n");
+ break;
+ case 0x03:
+ len += sprintf(buf+len, "Suspended\n");
+ break;
+ case 0x04:
+ len += sprintf(buf+len, "Resetting\n");
+ break;
+ case 0x05:
+ len += sprintf(buf+len, "Error\n");
+ break;
+ case 0x06:
+ len += sprintf(buf+len, "Operational no Rx\n");
+ break;
+ case 0x07:
+ len += sprintf(buf+len, "Suspended no Rx\n");
+ break;
+ default:
+ len += sprintf(buf+len, "Unspecified\n");
+ break;
+ }
+
+ len += sprintf(buf+len, "Error status: ");
+ if(work16[3]&0x0001)
+ len += sprintf(buf+len, "Transmit Control Unit Inoperative ");
+ if(work16[3]&0x0002)
+ len += sprintf(buf+len, "Receive Control Unit Inoperative\n");
+ if(work16[3]&0x0004)
+ len += sprintf(buf+len, "Local memory Allocation Error\n");
+ len += sprintf(buf+len, "\n");
+
+ len += sprintf(buf+len, "Min Packet size: %d\n", work32[2]);
+ len += sprintf(buf+len, "Max Packet size: %d\n", work32[3]);
+ len += sprintf(buf+len, "HW Address: "
+ "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ work8[16],work8[17],work8[18],work8[19],
+ work8[20],work8[21],work8[22],work8[23]);
+
+ len += sprintf(buf+len, "Max Tx Wire Speed: " FMT_U64_HEX " bps\n", U64_VAL(&work64[3]));
+ len += sprintf(buf+len, "Max Rx Wire Speed: " FMT_U64_HEX " bps\n", U64_VAL(&work64[4]));
+
+ len += sprintf(buf+len, "Min SDU packet size: 0x%08x\n", work32[10]);
+ len += sprintf(buf+len, "Max SDU packet size: 0x%08x\n", work32[11]);
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+/* LAN group 0001h - MAC address table (scalar) */
+int i2o_proc_read_lan_mac_addr(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[48];
+ static u8 *work8 = (u8*)work32;
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0001, -1, &work32, 48*4, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Active address: "
+ "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ work8[0],work8[1],work8[2],work8[3],
+ work8[4],work8[5],work8[6],work8[7]);
+ len += sprintf(buf+len, "Current address: "
+ "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ work8[8],work8[9],work8[10],work8[11],
+ work8[12],work8[13],work8[14],work8[15]);
+ len += sprintf(buf+len, "Functional address mask: "
+ "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ work8[16],work8[17],work8[18],work8[19],
+ work8[20],work8[21],work8[22],work8[23]);
+
+ len += sprintf(buf+len, "Filter mask: 0x%08x\n", work32[6]);
+ len += sprintf(buf+len, "HW/DDM capabilities: 0x%08x\n", work32[7]);
+ len += sprintf(buf+len, " Unicast packets %ssupported (%sabled)\n",
+ (work32[7]&0x00000001)?"":"not ",
+ (work32[6]&0x00000001)?"en":"dis");
+ len += sprintf(buf+len, " Promiscuous mode %ssupported (%sabled)\n",
+ (work32[7]&0x00000002)?"":"not",
+ (work32[6]&0x00000002)?"en":"dis");
+ len += sprintf(buf+len,
+ " Multicast promiscuous mode %ssupported (%sabled)\n",
+ (work32[7]&0x00000004)?"":"not ",
+ (work32[6]&0x00000004)?"en":"dis");
+ len += sprintf(buf+len,
+ " Broadcast Reception disabling %ssupported (%sabled)\n",
+ (work32[7]&0x00000100)?"":"not ",
+ (work32[6]&0x00000100)?"en":"dis");
+ len += sprintf(buf+len,
+ " Multicast Reception disabling %ssupported (%sabled)\n",
+ (work32[7]&0x00000200)?"":"not ",
+ (work32[6]&0x00000200)?"en":"dis");
+ len += sprintf(buf+len,
+ " Functional address disabling %ssupported (%sabled)\n",
+ (work32[7]&0x00000400)?"":"not ",
+ (work32[6]&0x00000400)?"en":"dis");
+ len += sprintf(buf+len, " MAC reporting %ssupported\n",
+ (work32[7]&0x00000800)?"":"not ");
+
+ len += sprintf(buf+len, " MAC Reporting mode: ");
+ if (work32[6]&0x00000800)
+ len += sprintf(buf+len, "Pass only priority MAC packets\n");
+ else if (work32[6]&0x00001000)
+ len += sprintf(buf+len, "Pass all MAC packets\n");
+ else if (work32[6]&0x00001800)
+ len += sprintf(buf+len, "Pass all MAC packets (promiscuous)\n");
+ else
+ len += sprintf(buf+len, "Do not pass MAC packets\n");
+
+ len += sprintf(buf+len, "Number of multicast addesses: %d\n", work32[8]);
+ len += sprintf(buf+len, "Perfect filtering for max %d multicast addesses\n",
+ work32[9]);
+ len += sprintf(buf+len, "Imperfect filtering for max %d multicast addesses\n",
+ work32[10]);
+
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+/* LAN group 0001h, field 1 - Current MAC (scalar) */
+int i2o_proc_read_lan_curr_addr(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[2];
+ static u8 *work8 = (u8*)work32;
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0001, 2, &work32, 8, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Current address: "
+ "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ work8[0],work8[1],work8[2],work8[3],
+ work8[4],work8[5],work8[6],work8[7]);
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+
+#if 0
+/* LAN group 0002h - Multicast MAC address table (table) */
+int i2o_proc_read_lan_mcast_addr(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u8 work8[32];
+ static u32 field32[8];
+ static u8 *field8 = (u8 *)field32;
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_table_polled(d->controller, d->id, &work8, 32,
+ 0x0002, 0, field32, 8);
+
+ switch (token) {
+ case -ETIMEDOUT:
+ len += sprintf(buf, "Timeout reading table.\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ break;
+ case -ENOMEM:
+ len += sprintf(buf, "No free memory to read the table.\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ break;
+ case -EBADR:
+ len += sprintf(buf, "Error reading field.\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ break;
+ default:
+ break;
+ }
+
+ len += sprintf(buf, "Multicast MAC address: "
+ "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ field8[0],field8[1],field8[2],field8[3],
+ field8[4],field8[5],field8[6],field8[7]);
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+#endif
+
+/* LAN group 0003h - Batch Control (scalar) */
+int i2o_proc_read_lan_batch_control(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[18];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0003, -1, &work32, 72, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Batch mode ");
+ if (work32[0]&0x00000001)
+ len += sprintf(buf+len, "disabled");
+ else
+ len += sprintf(buf+len, "enabled");
+ if (work32[0]&0x00000002)
+ len += sprintf(buf+len, " (current setting)");
+ if (work32[0]&0x00000004)
+ len += sprintf(buf+len, ", forced");
+ else
+ len += sprintf(buf+len, ", toggle");
+ len += sprintf(buf+len, "\n");
+
+ if(d->i2oversion == 0x00) { /* Reserved in 1.53 and 2.0 */
+ len += sprintf(buf+len, "Rising Load Delay: %d ms\n",
+ work32[1]/10);
+ len += sprintf(buf+len, "Rising Load Threshold: %d ms\n",
+ work32[2]/10);
+ len += sprintf(buf+len, "Falling Load Delay: %d ms\n",
+ work32[3]/10);
+ len += sprintf(buf+len, "Falling Load Threshold: %d ms\n",
+ work32[4]/10);
+ }
+
+ len += sprintf(buf+len, "Max Rx Batch Count: %d\n", work32[5]);
+ len += sprintf(buf+len, "Max Rx Batch Delay: %d\n", work32[6]);
+
+ if(d->i2oversion == 0x00) {
+ len += sprintf(buf+len,
+ "Transmission Completion Reporting Delay: %d ms\n",
+ work32[7]);
+ } else {
+ len += sprintf(buf+len, "Max Tx Batch Delay: %d\n", work32[7]);
+ len += sprintf(buf+len, "Max Tx Batch Count: %d\n", work32[8]);
+ }
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+/* LAN group 0004h - LAN Operation (scalar) */
+int i2o_proc_read_lan_operation(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[5];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0004, -1, &work32, 20, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Packet prepadding (32b words): %d\n", work32[0]);
+ len += sprintf(buf+len, "Transmission error reporting: %s\n",
+ (work32[1]&1)?"on":"off");
+ len += sprintf(buf+len, "Bad packet handling: %s\n",
+ (work32[1]&0x2)?"by host":"by DDM");
+ len += sprintf(buf+len, "Packet orphan limit: %d\n", work32[2]);
+
+ len += sprintf(buf+len, "Tx modes:\n");
+ if (work32[3]&0x00000004)
+ len += sprintf(buf+len, " HW CRC supressed\n");
+ else
+ len += sprintf(buf+len, " HW CRC\n");
+ if (work32[3]&0x00000100)
+ len += sprintf(buf+len, " HW IPv4 checksumming\n");
+ if (work32[3]&0x00000200)
+ len += sprintf(buf+len, " HW TCP checksumming\n");
+ if (work32[3]&0x00000400)
+ len += sprintf(buf+len, " HW UDP checksumming\n");
+ if (work32[3]&0x00000800)
+ len += sprintf(buf+len, " HW RSVP checksumming\n");
+ if (work32[3]&0x00001000)
+ len += sprintf(buf+len, " HW ICMP checksumming\n");
+ if (work32[3]&0x00002000)
+ len += sprintf(buf+len, " Loopback packet not delivered\n");
+
+ len += sprintf(buf+len, "Rx modes:\n");
+ if (work32[4]&0x00000004)
+ len += sprintf(buf+len, " FCS in payload\n");
+ if (work32[4]&0x00000100)
+ len += sprintf(buf+len, " HW IPv4 checksum validation\n");
+ if (work32[4]&0x00000200)
+ len += sprintf(buf+len, " HW TCP checksum validation\n");
+ if (work32[4]&0x00000400)
+ len += sprintf(buf+len, " HW UDP checksum validation\n");
+ if (work32[4]&0x00000800)
+ len += sprintf(buf+len, " HW RSVP checksum validation\n");
+ if (work32[4]&0x00001000)
+ len += sprintf(buf+len, " HW ICMP checksum validation\n");
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+/* LAN group 0005h - Media operation (scalar) */
+int i2o_proc_read_lan_media_operation(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[9];
+ static u8 *work8 = (u8*)work32;
+ static u64 *work64 = (u64*)work32;
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0005, -1, &work32, 36, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Connector type: ");
+ switch(work32[0])
+ {
+ case 0x00000000:
+ len += sprintf(buf+len, "OTHER\n");
+ break;
+ case 0x00000001:
+ len += sprintf(buf+len, "UNKNOWN\n");
+ break;
+ case 0x00000002:
+ len += sprintf(buf+len, "AUI\n");
+ break;
+ case 0x00000003:
+ len += sprintf(buf+len, "UTP\n");
+ break;
+ case 0x00000004:
+ len += sprintf(buf+len, "BNC\n");
+ break;
+ case 0x00000005:
+ len += sprintf(buf+len, "RJ45\n");
+ break;
+ case 0x00000006:
+ len += sprintf(buf+len, "STP DB9\n");
+ break;
+ case 0x00000007:
+ len += sprintf(buf+len, "FIBER MIC\n");
+ break;
+ case 0x00000008:
+ len += sprintf(buf+len, "APPLE AUI\n");
+ break;
+ case 0x00000009:
+ len += sprintf(buf+len, "MII\n");
+ break;
+ case 0x0000000A:
+ len += sprintf(buf+len, "DB9\n");
+ break;
+ case 0x0000000B:
+ len += sprintf(buf+len, "HSSDC\n");
+ break;
+ case 0x0000000C:
+ len += sprintf(buf+len, "DUPLEX SC FIBER\n");
+ break;
+ case 0x0000000D:
+ len += sprintf(buf+len, "DUPLEX ST FIBER\n");
+ break;
+ case 0x0000000E:
+ len += sprintf(buf+len, "TNC/BNC\n");
+ break;
+ case 0xFFFFFFFF:
+ len += sprintf(buf+len, "HW DEFAULT\n");
+ break;
+ }
+
+ len += sprintf(buf+len, "Connection type: ");
+ switch(work32[1])
+ {
+ case I2O_LAN_UNKNOWN:
+ len += sprintf(buf+len, "UNKNOWN\n");
+ break;
+ case I2O_LAN_AUI:
+ len += sprintf(buf+len, "AUI\n");
+ break;
+ case I2O_LAN_10BASE5:
+ len += sprintf(buf+len, "10BASE5\n");
+ break;
+ case I2O_LAN_FIORL:
+ len += sprintf(buf+len, "FIORL\n");
+ break;
+ case I2O_LAN_10BASE2:
+ len += sprintf(buf+len, "10BASE2\n");
+ break;
+ case I2O_LAN_10BROAD36:
+ len += sprintf(buf+len, "10BROAD36\n");
+ break;
+ case I2O_LAN_10BASE_T:
+ len += sprintf(buf+len, "10BASE-T\n");
+ break;
+ case I2O_LAN_10BASE_FP:
+ len += sprintf(buf+len, "10BASE-FP\n");
+ break;
+ case I2O_LAN_10BASE_FB:
+ len += sprintf(buf+len, "10BASE-FB\n");
+ break;
+ case I2O_LAN_10BASE_FL:
+ len += sprintf(buf+len, "10BASE-FL\n");
+ break;
+ case I2O_LAN_100BASE_TX:
+ len += sprintf(buf+len, "100BASE-TX\n");
+ break;
+ case I2O_LAN_100BASE_FX:
+ len += sprintf(buf+len, "100BASE-FX\n");
+ break;
+ case I2O_LAN_100BASE_T4:
+ len += sprintf(buf+len, "100BASE-T4\n");
+ break;
+ case I2O_LAN_1000BASE_SX:
+ len += sprintf(buf+len, "1000BASE-SX\n");
+ break;
+ case I2O_LAN_1000BASE_LX:
+ len += sprintf(buf+len, "1000BASE-LX\n");
+ break;
+ case I2O_LAN_1000BASE_CX:
+ len += sprintf(buf+len, "1000BASE-CX\n");
+ break;
+ case I2O_LAN_1000BASE_T:
+ len += sprintf(buf+len, "1000BASE-T\n");
+ break;
+ case I2O_LAN_100VG_ETHERNET:
+ len += sprintf(buf+len, "100VG-ETHERNET\n");
+ break;
+ case I2O_LAN_100VG_TR:
+ len += sprintf(buf+len, "100VG-TOKEN RING\n");
+ break;
+ case I2O_LAN_4MBIT:
+ len += sprintf(buf+len, "4MBIT TOKEN RING\n");
+ break;
+ case I2O_LAN_16MBIT:
+ len += sprintf(buf+len, "16 Mb Token Ring\n");
+ break;
+ case I2O_LAN_125MBAUD:
+ len += sprintf(buf+len, "125 MBAUD FDDI\n");
+ break;
+ case I2O_LAN_POINT_POINT:
+ len += sprintf(buf+len, "Point-to-point\n");
+ break;
+ case I2O_LAN_ARB_LOOP:
+ len += sprintf(buf+len, "Arbitrated loop\n");
+ break;
+ case I2O_LAN_PUBLIC_LOOP:
+ len += sprintf(buf+len, "Public loop\n");
+ break;
+ case I2O_LAN_FABRIC:
+ len += sprintf(buf+len, "Fabric\n");
+ break;
+ case I2O_LAN_EMULATION:
+ len += sprintf(buf+len, "Emulation\n");
+ break;
+ case I2O_LAN_OTHER:
+ len += sprintf(buf+len, "Other\n");
+ break;
+ case I2O_LAN_DEFAULT:
+ len += sprintf(buf+len, "HW default\n");
+ break;
+ }
+
+ len += sprintf(buf+len, "Current Tx Wire Speed: " FMT_U64_HEX " bps\n",
+ U64_VAL(&work64[1]));
+ len += sprintf(buf+len, "Current Rx Wire Speed: " FMT_U64_HEX " bps\n",
+ U64_VAL(&work64[2]));
+
+ len += sprintf(buf+len, "%s duplex\n", (work8[24]&1)?"Full":"Half");
+
+ len += sprintf(buf+len, "Link status: ");
+ if(work8[25] == 0x00)
+ len += sprintf(buf+len, "Unknown\n");
+ else if(work8[25] == 0x01)
+ len += sprintf(buf+len, "Normal\n");
+ else if(work8[25] == 0x02)
+ len += sprintf(buf+len, "Failure\n");
+ else if(work8[25] == 0x03)
+ len += sprintf(buf+len, "Reset\n");
+ else
+ len += sprintf(buf+len, "Unspecified\n");
+
+ if (d->i2oversion == 0x00) { /* Reserved in 1.53 and 2.0 */
+ len += sprintf(buf+len, "Bad packets handled by: %s\n",
+ (work8[26] == 0xFF)?"host":"DDM");
+ }
+ if (d->i2oversion != 0x00) {
+ len += sprintf(buf+len, "Duplex mode target: ");
+ switch (work8[27]) {
+ case 0:
+ len += sprintf(buf+len, "Half Duplex\n");
+ break;
+ case 1:
+ len += sprintf(buf+len, "Full Duplex\n");
+ break;
+ default:
+ len += sprintf(buf+len, "\n");
+ break;
+ }
+
+ len += sprintf(buf+len, "Connector type target: ");
+ switch(work32[7])
+ {
+ case 0x00000000:
+ len += sprintf(buf+len, "OTHER\n");
+ break;
+ case 0x00000001:
+ len += sprintf(buf+len, "UNKNOWN\n");
+ break;
+ case 0x00000002:
+ len += sprintf(buf+len, "AUI\n");
+ break;
+ case 0x00000003:
+ len += sprintf(buf+len, "UTP\n");
+ break;
+ case 0x00000004:
+ len += sprintf(buf+len, "BNC\n");
+ break;
+ case 0x00000005:
+ len += sprintf(buf+len, "RJ45\n");
+ break;
+ case 0x00000006:
+ len += sprintf(buf+len, "STP DB9\n");
+ break;
+ case 0x00000007:
+ len += sprintf(buf+len, "FIBER MIC\n");
+ break;
+ case 0x00000008:
+ len += sprintf(buf+len, "APPLE AUI\n");
+ break;
+ case 0x00000009:
+ len += sprintf(buf+len, "MII\n");
+ break;
+ case 0x0000000A:
+ len += sprintf(buf+len, "DB9\n");
+ break;
+ case 0x0000000B:
+ len += sprintf(buf+len, "HSSDC\n");
+ break;
+ case 0x0000000C:
+ len += sprintf(buf+len, "DUPLEX SC FIBER\n");
+ break;
+ case 0x0000000D:
+ len += sprintf(buf+len, "DUPLEX ST FIBER\n");
+ break;
+ case 0x0000000E:
+ len += sprintf(buf+len, "TNC/BNC\n");
+ break;
+ case 0xFFFFFFFF:
+ len += sprintf(buf+len, "HW DEFAULT\n");
+ break;
+ default:
+ len += sprintf(buf+len, "\n");
+ break;
+ }
+
+ len += sprintf(buf+len, "Connection type target: ");
+ switch(work32[8])
+ {
+ case I2O_LAN_UNKNOWN:
+ len += sprintf(buf+len, "UNKNOWN\n");
+ break;
+ case I2O_LAN_AUI:
+ len += sprintf(buf+len, "AUI\n");
+ break;
+ case I2O_LAN_10BASE5:
+ len += sprintf(buf+len, "10BASE5\n");
+ break;
+ case I2O_LAN_FIORL:
+ len += sprintf(buf+len, "FIORL\n");
+ break;
+ case I2O_LAN_10BASE2:
+ len += sprintf(buf+len, "10BASE2\n");
+ break;
+ case I2O_LAN_10BROAD36:
+ len += sprintf(buf+len, "10BROAD36\n");
+ break;
+ case I2O_LAN_10BASE_T:
+ len += sprintf(buf+len, "10BASE-T\n");
+ break;
+ case I2O_LAN_10BASE_FP:
+ len += sprintf(buf+len, "10BASE-FP\n");
+ break;
+ case I2O_LAN_10BASE_FB:
+ len += sprintf(buf+len, "10BASE-FB\n");
+ break;
+ case I2O_LAN_10BASE_FL:
+ len += sprintf(buf+len, "10BASE-FL\n");
+ break;
+ case I2O_LAN_100BASE_TX:
+ len += sprintf(buf+len, "100BASE-TX\n");
+ break;
+ case I2O_LAN_100BASE_FX:
+ len += sprintf(buf+len, "100BASE-FX\n");
+ break;
+ case I2O_LAN_100BASE_T4:
+ len += sprintf(buf+len, "100BASE-T4\n");
+ break;
+ case I2O_LAN_1000BASE_SX:
+ len += sprintf(buf+len, "1000BASE-SX\n");
+ break;
+ case I2O_LAN_1000BASE_LX:
+ len += sprintf(buf+len, "1000BASE-LX\n");
+ break;
+ case I2O_LAN_1000BASE_CX:
+ len += sprintf(buf+len, "1000BASE-CX\n");
+ break;
+ case I2O_LAN_1000BASE_T:
+ len += sprintf(buf+len, "1000BASE-T\n");
+ break;
+ case I2O_LAN_100VG_ETHERNET:
+ len += sprintf(buf+len, "100VG-ETHERNET\n");
+ break;
+ case I2O_LAN_100VG_TR:
+ len += sprintf(buf+len, "100VG-TOKEN RING\n");
+ break;
+ case I2O_LAN_4MBIT:
+ len += sprintf(buf+len, "4MBIT TOKEN RING\n");
+ break;
+ case I2O_LAN_16MBIT:
+ len += sprintf(buf+len, "16 Mb Token Ring\n");
+ break;
+ case I2O_LAN_125MBAUD:
+ len += sprintf(buf+len, "125 MBAUD FDDI\n");
+ break;
+ case I2O_LAN_POINT_POINT:
+ len += sprintf(buf+len, "Point-to-point\n");
+ break;
+ case I2O_LAN_ARB_LOOP:
+ len += sprintf(buf+len, "Arbitrated loop\n");
+ break;
+ case I2O_LAN_PUBLIC_LOOP:
+ len += sprintf(buf+len, "Public loop\n");
+ break;
+ case I2O_LAN_FABRIC:
+ len += sprintf(buf+len, "Fabric\n");
+ break;
+ case I2O_LAN_EMULATION:
+ len += sprintf(buf+len, "Emulation\n");
+ break;
+ case I2O_LAN_OTHER:
+ len += sprintf(buf+len, "Other\n");
+ break;
+ case I2O_LAN_DEFAULT:
+ len += sprintf(buf+len, "HW default\n");
+ break;
+ default:
+ len += sprintf(buf+len, "\n");
+ break;
+ }
+ }
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+#if 0
+/* LAN group 0006h - Alternate address (table) */
+int i2o_proc_read_lan_alt_addr(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u8 work8[32];
+ static u32 field32[2];
+ static u8 *field8 = (u8 *)field32;
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_table_polled(d->controller, d->id, &work8, 32,
+ 0x0006, 0, field32, 8);
+ switch (token) {
+ case -ETIMEDOUT:
+ len += sprintf(buf, "Timeout reading table.\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ break;
+ case -ENOMEM:
+ len += sprintf(buf, "No free memory to read the table.\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ break;
+ case -EBADR:
+ len += sprintf(buf, "Error reading field.\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ break;
+ default:
+ break;
+ }
+
+ len += sprintf(buf, "Alternate Address: "
+ "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ field8[0],field8[1],field8[2],field8[3],
+ field8[4],field8[5],field8[6],field8[7]);
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+#endif
+
+/* LAN group 0007h - Transmit info (scalar) */
+int i2o_proc_read_lan_tx_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[10];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0007, -1, &work32, 8, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Max SG Elements per packet: %d\n", work32[0]);
+ len += sprintf(buf+len, "Max SG Elements per chain: %d\n", work32[1]);
+ len += sprintf(buf+len, "Max outstanding packets: %d\n", work32[2]);
+ len += sprintf(buf+len, "Max packets per request: %d\n", work32[3]);
+
+ len += sprintf(buf+len, "Tx modes:\n");
+ if(work32[4]&0x00000002)
+ len += sprintf(buf+len, " No DA in SGL\n");
+ if(work32[4]&0x00000004)
+ len += sprintf(buf+len, " CRC suppression\n");
+ if(work32[4]&0x00000008)
+ len += sprintf(buf+len, " Loop suppression\n");
+ if(work32[4]&0x00000010)
+ len += sprintf(buf+len, " MAC insertion\n");
+ if(work32[4]&0x00000020)
+ len += sprintf(buf+len, " RIF insertion\n");
+ if(work32[4]&0x00000100)
+ len += sprintf(buf+len, " IPv4 Checksum\n");
+ if(work32[4]&0x00000200)
+ len += sprintf(buf+len, " TCP Checksum\n");
+ if(work32[4]&0x00000400)
+ len += sprintf(buf+len, " UDP Checksum\n");
+ if(work32[4]&0x00000800)
+ len += sprintf(buf+len, " RSVP Checksum\n");
+ if(work32[4]&0x00001000)
+ len += sprintf(buf+len, " ICMP Checksum\n");
+ if (d->i2oversion == 0x00) {
+ if(work32[4]&0x00008000)
+ len += sprintf(buf+len, " Loopback Enabled\n");
+ if(work32[4]&0x00010000)
+ len += sprintf(buf+len, " Loopback Suppression Enabled\n");
+ } else {
+ if(work32[4]&0x00010000)
+ len += sprintf(buf+len, " Loopback Enabled\n");
+ if(work32[4]&0x00020000)
+ len += sprintf(buf+len, " Loopback Suppression Enabled\n");
+ }
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+/* LAN group 0008h - Receive info (scalar) */
+int i2o_proc_read_lan_rx_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[10];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0008, -1, &work32, 8, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Max size of chain element: %d\n", work32[0]);
+ len += sprintf(buf+len, "Max number of buckets: %d\n", work32[1]);
+
+ if (d->i2oversion > 0x00) { /* not in 1.5 */
+ len += sprintf(buf+len, "Rx modes: %d\n", work32[2]);
+ len += sprintf(buf+len, "RxMaxBucketsReply: %d\n", work32[3]);
+ len += sprintf(buf+len, "RxMaxPacketsPerBuckets: %d\n", work32[4]);
+ len += sprintf(buf+len, "RxMaxPostBuckets: %d\n", work32[5]);
+ }
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+
+/* LAN group 0100h - LAN Historical statistics (scalar) */
+int i2o_proc_read_lan_hist_stats(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u64 work64[9];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0100, -1, &work64, 9*8, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Tx packets: " FMT_U64_HEX "\n", U64_VAL(&work64[0]));
+ len += sprintf(buf+len, "Tx bytes: " FMT_U64_HEX "\n", U64_VAL(&work64[1]));
+ len += sprintf(buf+len, "Rx packets: " FMT_U64_HEX "\n", U64_VAL(&work64[2]));
+ len += sprintf(buf+len, "Rx bytes: " FMT_U64_HEX "\n", U64_VAL(&work64[3]));
+ len += sprintf(buf+len, "Tx errors: " FMT_U64_HEX "\n", U64_VAL(&work64[4]));
+ len += sprintf(buf+len, "Rx errors: " FMT_U64_HEX "\n", U64_VAL(&work64[5]));
+ len += sprintf(buf+len, "Rx dropped: " FMT_U64_HEX "\n", U64_VAL(&work64[6]));
+ len += sprintf(buf+len, "Adapter resets: " FMT_U64_HEX "\n", U64_VAL(&work64[7]));
+ len += sprintf(buf+len, "Adapter suspends: " FMT_U64_HEX "\n", U64_VAL(&work64[8]));
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+
+/* LAN group 0182h - Optional Non Media Specific Transmit Historical Statistics
+ * (scalar) */
+int i2o_proc_read_lan_opt_tx_hist_stats(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u64 work64[9];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0182, -1, &work64, 9*8, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "TxRetryCount: " FMT_U64_HEX "\n", U64_VAL(&work64[0]));
+ len += sprintf(buf+len, "DirectedBytesTx: " FMT_U64_HEX "\n", U64_VAL(&work64[1]));
+ len += sprintf(buf+len, "DirectedPacketsTx: " FMT_U64_HEX "\n", U64_VAL(&work64[2]));
+ len += sprintf(buf+len, "MulticastBytesTx: " FMT_U64_HEX "\n", U64_VAL(&work64[3]));
+ len += sprintf(buf+len, "MulticastPacketsTx: " FMT_U64_HEX "\n", U64_VAL(&work64[4]));
+ len += sprintf(buf+len, "BroadcastBytesTx: " FMT_U64_HEX "\n", U64_VAL(&work64[5]));
+ len += sprintf(buf+len, "BroadcastPacketsTx: " FMT_U64_HEX "\n", U64_VAL(&work64[6]));
+ len += sprintf(buf+len, "TotalGroupAddrTxCount: " FMT_U64_HEX "\n", U64_VAL(&work64[7]));
+ len += sprintf(buf+len, "TotalTxPacketsTooShort: " FMT_U64_HEX "\n", U64_VAL(&work64[8]));
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+/* LAN group 0183h - Optional Non Media Specific Receive Historical Statistics
+ * (scalar) */
+int i2o_proc_read_lan_opt_rx_hist_stats(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u64 work64[11];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0183, -1, &work64, 11*8, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "ReceiveCRCErrorCount: " FMT_U64_HEX "\n", U64_VAL(&work64[0]));
+ len += sprintf(buf+len, "DirectedBytesRx: " FMT_U64_HEX "\n", U64_VAL(&work64[1]));
+ len += sprintf(buf+len, "DirectedPacketsRx: " FMT_U64_HEX "\n", U64_VAL(&work64[2]));
+ len += sprintf(buf+len, "MulticastBytesRx: " FMT_U64_HEX "\n", U64_VAL(&work64[3]));
+ len += sprintf(buf+len, "MulticastPacketsRx: " FMT_U64_HEX "\n", U64_VAL(&work64[4]));
+ len += sprintf(buf+len, "BroadcastBytesRx: " FMT_U64_HEX "\n", U64_VAL(&work64[5]));
+ len += sprintf(buf+len, "BroadcastPacketsRx: " FMT_U64_HEX "\n", U64_VAL(&work64[6]));
+ len += sprintf(buf+len, "TotalGroupAddrRxCount: " FMT_U64_HEX "\n", U64_VAL(&work64[7]));
+ len += sprintf(buf+len, "TotalRxPacketsTooShort: " FMT_U64_HEX "\n", U64_VAL(&work64[8]));
+ len += sprintf(buf+len, "TotalRxPacketsTooLong: " FMT_U64_HEX "\n", U64_VAL(&work64[9]));
+ len += sprintf(buf+len, "TotalRuntPacketsReceived: " FMT_U64_HEX "\n", U64_VAL(&work64[10]));
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+
+/* LAN group 0400h - Required FDDI Statistics (scalar) */
+int i2o_proc_read_lan_fddi_stats(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u64 work64[11];
+ int token;
+
+ static char *conf_state[] =
+ {
+ "Isolated",
+ "Local a",
+ "Local b",
+ "Local ab",
+ "Local s",
+ "Wrap a",
+ "Wrap b",
+ "Wrap ab",
+ "Wrap s",
+ "C-Wrap a",
+ "C-Wrap b",
+ "C-Wrap s",
+ "Through",
+ };
+
+ static char *ring_state[] =
+ {
+ "Isolated",
+ "Non-op",
+ "Rind-op",
+ "Detect",
+ "Non-op-Dup",
+ "Ring-op-Dup",
+ "Directed",
+ "Trace"
+ };
+
+ static char *link_state[] =
+ {
+ "Off",
+ "Break",
+ "Trace",
+ "Connect",
+ "Next",
+ "Signal",
+ "Join",
+ "Verify",
+ "Active",
+ "Maintenance"
+ };
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0400, -1, &work64, 11*8, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "ConfigurationState: %s\n", conf_state[work64[0]]);
+ len += sprintf(buf+len, "UpstreamNode: " FMT_U64_HEX "\n", U64_VAL(&work64[1]));
+ len += sprintf(buf+len, "DownStreamNode: " FMT_U64_HEX "\n", U64_VAL(&work64[2]));
+ len += sprintf(buf+len, "FrameErrors: " FMT_U64_HEX "\n", U64_VAL(&work64[3]));
+ len += sprintf(buf+len, "FramesLost: " FMT_U64_HEX "\n", U64_VAL(&work64[4]));
+ len += sprintf(buf+len, "RingMgmtState: %s\n", ring_state[work64[5]]);
+ len += sprintf(buf+len, "LCTFailures: " FMT_U64_HEX "\n", U64_VAL(&work64[6]));
+ len += sprintf(buf+len, "LEMRejects: " FMT_U64_HEX "\n", U64_VAL(&work64[7]));
+ len += sprintf(buf+len, "LEMCount: " FMT_U64_HEX "\n", U64_VAL(&work64[8]));
+ len += sprintf(buf+len, "LConnectionState: %s\n", link_state[work64[9]]);
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+static int i2o_proc_create_entries(void *data,
+ i2o_proc_entry *pentry, struct proc_dir_entry *parent)
+{
+ struct proc_dir_entry *ent;
+
+ while(pentry->name != NULL)
+ {
+ ent = create_proc_entry(pentry->name, pentry->mode, parent);
+ if(!ent) return -1;
+
+ ent->data = data;
+ ent->read_proc = pentry->read_proc;
+ ent->write_proc = pentry->write_proc;
+ ent->nlink = 1;
+
+ pentry++;
+ }
+
+ return 0;
+}
+
+static void i2o_proc_remove_entries(i2o_proc_entry *pentry,
+ struct proc_dir_entry *parent)
+{
+ while(pentry->name != NULL)
+ {
+ remove_proc_entry(pentry->name, parent);
+ pentry++;
+ }
+}
+
+static int i2o_proc_add_controller(struct i2o_controller *pctrl,
+ struct proc_dir_entry *root )
+{
+ struct proc_dir_entry *dir, *dir1;
+ struct i2o_device *dev;
+ char buff[10];
+
+ sprintf(buff, "iop%d", pctrl->unit);
+
+ dir = create_proc_entry(buff, S_IFDIR, root);
+ if(!dir)
+ return -1;
+
+ pctrl->proc_entry = dir;
+
+ i2o_proc_create_entries(pctrl, generic_iop_entries, dir);
+
+ for(dev = pctrl->devices; dev; dev = dev->next)
+ {
+ sprintf(buff, "%0#5x", dev->id);
+
+ dir1 = create_proc_entry(buff, S_IFDIR, dir);
+ dev->proc_entry = dir1;
+
+ if(!dir1)
+ printk(KERN_INFO "i2o_proc: Could not allocate proc dir\n");
+
+ i2o_proc_create_entries(dev, generic_dev_entries, dir1);
+
+ switch(dev->class)
+ {
+ case I2O_CLASS_SCSI_PERIPHERAL:
+ case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+ i2o_proc_create_entries(dev, rbs_dev_entries, dir1);
+ break;
+ case I2O_CLASS_LAN:
+ i2o_proc_create_entries(dev, lan_entries, dir1);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void i2o_proc_remove_controller(struct i2o_controller *pctrl,
+ struct proc_dir_entry *parent)
+{
+ char buff[10];
+
+ sprintf(buff, "iop%d", pctrl->unit);
+
+ i2o_proc_remove_entries(generic_iop_entries, pctrl->proc_entry);
+
+ remove_proc_entry(buff, parent);
+
+ pctrl->proc_entry = NULL;
+}
+
+static int create_i2o_procfs(void)
+{
+ struct i2o_controller *pctrl = NULL;
+ int i;
+
+ i2o_proc_dir_root = create_proc_entry("i2o", S_IFDIR, 0);
+ if(!i2o_proc_dir_root)
+ return -1;
+
+ for(i = 0; i < MAX_I2O_CONTROLLERS; i++)
+ {
+ pctrl = i2o_find_controller(i);
+ if(pctrl)
+ i2o_proc_add_controller(pctrl, i2o_proc_dir_root);
+ };
+
+ return 0;
+}
+
+static int destroy_i2o_procfs(void)
+{
+ struct i2o_controller *pctrl = NULL;
+ int i;
+
+ if(!i2o_find_controller(0))
+ return -1;
+
+ for(i = 0; i < MAX_I2O_CONTROLLERS; i++)
+ {
+ pctrl = i2o_find_controller(i);
+ if(pctrl)
+ i2o_proc_remove_controller(pctrl, i2o_proc_dir_root);
+ };
+
+ remove_proc_entry("i2o", 0);
+ return 0;
+}
+
+#ifdef MODULE
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("I2O procfs Handler");
+
+int init_module(void)
+{
+ if(create_i2o_procfs())
+ return -EBUSY;
+
+ if (i2o_install_handler(&i2o_proc_handler) < 0)
+ {
+ printk(KERN_ERR "i2o_proc: Unable to install PROC handler.\n");
+ return 0;
+ }
+
+ proc_context = i2o_proc_handler.context;
+
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ destroy_i2o_procfs();
+ i2o_remove_handler(&i2o_proc_handler);
+}
+#endif
--- /dev/null
+#ifndef i2oproc_h
+#define i2oproc_h
+
+/*
+ * Fixme: make this dependent on architecture
+ * The official header files to this already...but we can't use them
+ */
+#define I2O_64BIT_CONTEXT 0
+
+typedef struct _i2o_msg {
+ u8 ver_offset;
+ u8 msg_flags;
+ u16 msg_size;
+ u32 target_addr:12;
+ u32 initiator_addr:12;
+ u32 function:8;
+ u32 init_context; /* FIXME: 64-bit support! */
+} i2o_msg, *pi2o_msg;
+
+typedef struct _i2o_reply_message {
+ i2o_msg msg_frame;
+ u32 tctx; /* FIXME: 64-bit */
+ u16 detailed_status_code;
+ u8 reserved;
+ u8 req_status;
+} i2o_reply_msg, *pi2o_reply_msg;
+
+typedef struct _i2o_mult_reply_message {
+ i2o_msg msg_frame;
+ u32 tctx; /* FIXME: 64-bit */
+ u16 detailed_status_code;
+ u8 reserved;
+ u8 req_status;
+} i2o_mult_reply_msg, *pi2o_mult_reply_msg;
+
+/**************************************************************************
+ * HRT related constants and structures
+ **************************************************************************/
+#define I2O_BUS_LOCAL 0
+#define I2O_BUS_ISA 1
+#define I2O_BUS_EISA 2
+#define I2O_BUS_MCA 3
+#define I2O_BUS_PCI 4
+#define I2O_BUS_PCMCIA 5
+#define I2O_BUS_NUBUS 6
+#define I2O_BUS_CARDBUS 7
+#define I2O_BUS_UNKNOWN 0x80
+
+typedef struct _i2o_pci_bus {
+ u8 PciFunctionNumber;
+ u8 PciDeviceNumber;
+ u8 PciBusNumber;
+ u8 reserved;
+ u16 PciVendorID;
+ u16 PciDeviceID;
+} i2o_pci_bus, *pi2o_pci_bus;
+
+typedef struct _i2o_local_bus {
+ u16 LbBaseIOPort;
+ u16 reserved;
+ u32 LbBaseMemoryAddress;
+} i2o_local_bus, *pi2o_local_bus;
+
+typedef struct _i2o_isa_bus {
+ u16 IsaBaseIOPort;
+ u8 CSN;
+ u8 reserved;
+ u32 IsaBaseMemoryAddress;
+} i2o_isa_bus, *pi2o_isa_bus;
+
+/* I2O_EISA_BUS_INFO */
+typedef struct _i2o_eisa_bus_info {
+ u16 EisaBaseIOPort;
+ u8 reserved;
+ u8 EisaSlotNumber;
+ u32 EisaBaseMemoryAddress;
+} i2o_eisa_bus, *pi2o_eisa_bus;
+
+typedef struct _i2o_mca_bus {
+ u16 McaBaseIOPort;
+ u8 reserved;
+ u8 McaSlotNumber;
+ u32 McaBaseMemoryAddress;
+} i2o_mca_bus, *pi2o_mca_bus;
+
+typedef struct _i2o_other_bus {
+ u16 BaseIOPort;
+ u16 reserved;
+ u32 BaseMemoryAddress;
+} i2o_other_bus, *pi2o_other_bus;
+
+
+typedef struct _i2o_hrt_entry {
+ u32 adapter_id;
+ u32 parent_tid:12;
+ u32 state:4;
+ u32 bus_num:8;
+ u32 bus_type:8;
+ union {
+ i2o_pci_bus pci_bus;
+ i2o_local_bus local_bus;
+ i2o_isa_bus isa_bus;
+ i2o_eisa_bus eisa_bus;
+ i2o_mca_bus mca_bus;
+ i2o_other_bus other_bus;
+ } bus;
+} i2o_hrt_entry, *pi2o_hrt_entry;
+
+typedef struct _i2o_hrt {
+ u16 num_entries;
+ u8 entry_len;
+ u8 hrt_version;
+ u32 change_ind;
+ i2o_hrt_entry hrt_entry[1];
+} i2o_hrt, *pi2o_hrt;
+
+typedef struct _i2o_lct_entry {
+ u32 entry_size:16;
+ u32 tid:12;
+ u32 reserved:4;
+ u32 change_ind;
+ u32 device_flags;
+ u32 class_id;
+ u32 sub_class;
+ u32 user_tid:12;
+ u32 parent_tid:12;
+ u32 bios_info:8;
+ u8 identity_tag[8];
+ u32 event_capabilities;
+} i2o_lct_entry, *pi2o_lct_entry;
+
+typedef struct _i2o_lct {
+ u32 table_size:16;
+ u32 boot_tid:12;
+ u32 lct_ver:4;
+ u32 iop_flags;
+ u32 current_change_ind;
+ i2o_lct_entry lct_entry[1];
+} i2o_lct, *pi2o_lct;
+
+#endif /* i2oproc_h */
--- /dev/null
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Complications for I2O scsi
+ *
+ * o Each (bus,lun) is a logical device in I2O. We keep a map
+ * table. We spoof failed selection for unmapped units
+ * o Request sense buffers can come back for free.
+ * o Scatter gather is a bit dynamic. We have to investigate at
+ * setup time.
+ * o Some of our resources are dynamically shared. The i2o core
+ * needs a message reservation protocol to avoid swap v net
+ * deadlocking. We need to back off queue requests.
+ *
+ * In general the firmware wants to help. Where its help isn't performance
+ * useful we just ignore the aid. Its not worth the code in truth.
+ *
+ * Fixes:
+ * Steve Ralston : Scatter gather now works
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <asm/dma.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <linux/blk.h>
+#include <linux/version.h>
+#include <linux/i2o.h>
+#include "../scsi/scsi.h"
+#include "../scsi/hosts.h"
+#include "../scsi/sd.h"
+#include "i2o_scsi.h"
+
+#define VERSION_STRING "Version 0.0.1"
+
+#define dprintk(x)
+
+#define MAXHOSTS 32
+
+struct proc_dir_entry proc_scsi_i2o_scsi = {
+ PROC_SCSI_I2O, 8, "i2o_scsi", S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+struct i2o_scsi_host
+{
+ struct i2o_controller *controller;
+ s16 task[16][8]; /* Allow 16 devices for now */
+ unsigned long tagclock[16][8]; /* Tag clock for queueing */
+ s16 bus_task; /* The adapter TID */
+};
+
+static int scsi_context;
+static int lun_done;
+static int i2o_scsi_hosts;
+
+static u32 *retry[32];
+static struct i2o_controller *retry_ctrl[32];
+static struct timer_list retry_timer;
+static int retry_ct = 0;
+
+static atomic_t queue_depth;
+
+/*
+ * SG Chain buffer support...
+ */
+#define SG_MAX_FRAGS 64
+
+/*
+ * FIXME: we should allocate one of these per bus we find as we
+ * locate them not in a lump at boot.
+ */
+
+typedef struct _chain_buf
+{
+ u32 sg_flags_cnt[SG_MAX_FRAGS];
+ u32 sg_buf[SG_MAX_FRAGS];
+} chain_buf;
+
+#define SG_CHAIN_BUF_SZ sizeof(chain_buf)
+
+#define SG_MAX_BUFS (i2o_num_controllers * I2O_SCSI_CAN_QUEUE)
+#define SG_CHAIN_POOL_SZ (SG_MAX_BUFS * SG_CHAIN_BUF_SZ)
+
+static int max_sg_len = 0;
+static chain_buf *sg_chain_pool = NULL;
+static int sg_chain_tag = 0;
+static int sg_max_frags = SG_MAX_FRAGS;
+
+/*
+ * Retry congested frames. This actually needs pushing down into
+ * i2o core. We should only bother the OSM with this when we can't
+ * queue and retry the frame. Or perhaps we should call the OSM
+ * and its default handler should be this in the core, and this
+ * call a 2nd "I give up" handler in the OSM ?
+ */
+
+static void i2o_retry_run(unsigned long f)
+{
+ int i;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ for(i=0;i<retry_ct;i++)
+ i2o_post_message(retry_ctrl[i], virt_to_bus(retry[i]));
+ retry_ct=0;
+
+ restore_flags(flags);
+}
+
+static void flush_pending(void)
+{
+ int i;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ for(i=0;i<retry_ct;i++)
+ {
+ retry[i][0]&=~0xFFFFFF;
+ retry[i][0]|=I2O_CMD_UTIL_NOP<<24;
+ i2o_post_message(retry_ctrl[i],virt_to_bus(retry[i]));
+ }
+ retry_ct=0;
+
+ restore_flags(flags);
+}
+
+static void i2o_scsi_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *msg)
+{
+ Scsi_Cmnd *current_command;
+ u32 *m = (u32 *)msg;
+ u8 as,ds,st;
+
+ if(m[0] & (1<<13))
+ {
+ printk("IOP fail.\n");
+ printk("From %d To %d Cmd %d.\n",
+ (m[1]>>12)&0xFFF,
+ m[1]&0xFFF,
+ m[1]>>24);
+ printk("Failure Code %d.\n", m[4]>>24);
+ if(m[4]&(1<<16))
+ printk("Format error.\n");
+ if(m[4]&(1<<17))
+ printk("Path error.\n");
+ if(m[4]&(1<<18))
+ printk("Path State.\n");
+ if(m[4]&(1<<18))
+ printk("Congestion.\n");
+
+ m=(u32 *)bus_to_virt(m[7]);
+ printk("Failing message is %p.\n", m);
+
+ if((m[4]&(1<<18)) && retry_ct < 32)
+ {
+ retry_ctrl[retry_ct]=c;
+ retry[retry_ct]=m;
+ if(!retry_ct++)
+ {
+ retry_timer.expires=jiffies+1;
+ add_timer(&retry_timer);
+ }
+ }
+ else
+ {
+ /* Create a scsi error for this */
+ current_command = (Scsi_Cmnd *)m[3];
+ printk("Aborted %ld\n", current_command->serial_number);
+
+ spin_lock_irq(&io_request_lock);
+ current_command->result = DID_ERROR << 16;
+ current_command->scsi_done(current_command);
+ spin_unlock_irq(&io_request_lock);
+
+ /* Now flush the message by making it a NOP */
+ m[0]&=0x00FFFFFF;
+ m[0]|=(I2O_CMD_UTIL_NOP)<<24;
+ i2o_post_message(c,virt_to_bus(m));
+ }
+ return;
+ }
+
+
+ /* Low byte is the adapter status, next is the device */
+ as=(u8)m[4];
+ ds=(u8)(m[4]>>8);
+ st=(u8)(m[4]>>24);
+
+ dprintk(("i2o got a scsi reply %08X: ", m[0]));
+ dprintk(("m[2]=%08X: ", m[2]));
+ dprintk(("m[4]=%08X\n", m[4]));
+
+ if(m[2]&0x80000000)
+ {
+ if(m[2]&0x40000000)
+ {
+ dprintk(("Event.\n"));
+ lun_done=1;
+ return;
+ }
+ printk(KERN_ERR "i2o_scsi: bus reset reply.\n");
+ return;
+ }
+
+ current_command = (Scsi_Cmnd *)m[3];
+
+ /*
+ * Is this a control request coming back - eg an abort ?
+ */
+
+ if(current_command==NULL)
+ {
+ if(st)
+ dprintk(("SCSI abort: %08X", m[4]));
+ dprintk(("SCSI abort completed.\n"));
+ return;
+ }
+
+ dprintk(("Completed %ld\n", current_command->serial_number));
+
+ atomic_dec(&queue_depth);
+
+ if(st == 0x06)
+ {
+ if(m[5] < current_command->underflow)
+ {
+ int i;
+ printk(KERN_ERR "SCSI: underflow 0x%08X 0x%08X\n",
+ m[5], current_command->underflow);
+ printk("Cmd: ");
+ for(i=0;i<15;i++)
+ printk("%02X ", current_command->cmnd[i]);
+ printk(".\n");
+ }
+ else st=0;
+ }
+
+ if(st)
+ {
+ /* An error has occured */
+
+ dprintk((KERN_DEBUG "SCSI error %08X", m[4]));
+
+ if (ds == 0x0E)
+ /* SCSI Reset */
+ current_command->result = DID_RESET << 16;
+ else if (ds == 0x0F)
+ current_command->result = DID_PARITY << 16;
+ else
+ current_command->result = DID_ERROR << 16;
+ }
+ else
+ /*
+ * It worked maybe ?
+ */
+ current_command->result = DID_OK << 16 | ds;
+ spin_lock(&io_request_lock);
+ current_command->scsi_done(current_command);
+ spin_unlock(&io_request_lock);
+ return;
+}
+
+struct i2o_handler i2o_scsi_handler=
+{
+ i2o_scsi_reply,
+ "I2O SCSI OSM",
+ 0
+};
+
+static int i2o_find_lun(struct i2o_controller *c, struct i2o_device *d, int *target, int *lun)
+{
+ u8 reply[8];
+
+ if(i2o_query_scalar(c, d->id, scsi_context|0x40000000,
+ 0, 3, reply, 4, &lun_done)<0)
+ return -1;
+
+ *target=reply[0];
+
+ if(i2o_query_scalar(c, d->id, scsi_context|0x40000000,
+ 0, 4, reply, 8, &lun_done)<0)
+ return -1;
+
+ *lun=reply[1];
+
+ dprintk(("SCSI (%d,%d)\n", *target, *lun));
+ return 0;
+}
+
+static void i2o_scsi_init(struct i2o_controller *c, struct i2o_device *d, struct Scsi_Host *shpnt)
+{
+ struct i2o_device *unit;
+ struct i2o_scsi_host *h =(struct i2o_scsi_host *)shpnt->hostdata;
+ int lun;
+ int target;
+
+ h->controller=c;
+ h->bus_task=d->id;
+
+ for(target=0;target<16;target++)
+ for(lun=0;lun<8;lun++)
+ h->task[target][lun] = -1;
+
+ for(unit=c->devices;unit!=NULL;unit=unit->next)
+ {
+ dprintk(("Class %03X, parent %d, want %d.\n",
+ unit->class, unit->parent, d->id));
+
+ /* Only look at scsi and fc devices */
+ if ( (unit->class != I2O_CLASS_SCSI_PERIPHERAL)
+ && (unit->class != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL)
+ )
+ continue;
+
+ /* On our bus ? */
+ dprintk(("Found a disk.\n"));
+ if ( (unit->parent == d->id)
+ || (unit->parent == d->parent)
+ )
+ {
+ u16 limit;
+ dprintk(("Its ours.\n"));
+ if(i2o_find_lun(c, unit, &target, &lun)==-1)
+ {
+ printk(KERN_ERR "i2o_scsi: Unable to get lun for tid %d.\n", d->id);
+ continue;
+ }
+ dprintk(("Found disk %d %d.\n", target, lun));
+ h->task[target][lun]=unit->id;
+ h->tagclock[target][lun]=jiffies;
+
+ /* Get the max fragments/request */
+ i2o_query_scalar(c, d->id, scsi_context|0x40000000,
+ 0xF103, 3, &limit, 2, &lun_done);
+
+ /* sanity */
+ if ( limit == 0 )
+ {
+ printk(KERN_WARNING "i2o_scsi: Ignoring unreasonable SG limit of 0 from IOP!\n");
+ limit = 1;
+ }
+
+ shpnt->sg_tablesize = limit;
+
+ dprintk(("i2o_scsi: set scatter-gather to %d.\n",
+ shpnt->sg_tablesize));
+ }
+ }
+}
+
+int i2o_scsi_detect(Scsi_Host_Template * tpnt)
+{
+ unsigned long flags;
+ struct Scsi_Host *shpnt = NULL;
+ int i;
+ int count;
+
+ printk("i2o_scsi.c: %s\n", VERSION_STRING);
+
+ if(i2o_install_handler(&i2o_scsi_handler)<0)
+ {
+ printk(KERN_ERR "i2o_scsi: Unable to install OSM handler.\n");
+ return 0;
+ }
+ scsi_context = i2o_scsi_handler.context;
+
+ if((sg_chain_pool = kmalloc(SG_CHAIN_POOL_SZ, GFP_KERNEL)) == NULL)
+ {
+ printk("i2o_scsi: Unable to alloc %d byte SG chain buffer pool.\n", SG_CHAIN_POOL_SZ);
+ printk("i2o_scsi: SG chaining DISABLED!\n");
+ sg_max_frags = 11;
+ }
+ else
+ {
+ printk(" chain_pool: %d bytes @ %p\n", SG_CHAIN_POOL_SZ, sg_chain_pool);
+ printk(" (%d byte buffers X %d can_queue X %d i2o controllers)\n",
+ SG_CHAIN_BUF_SZ, I2O_SCSI_CAN_QUEUE, i2o_num_controllers);
+ sg_max_frags = SG_MAX_FRAGS; // 64
+ }
+
+ init_timer(&retry_timer);
+ retry_timer.data = 0UL;
+ retry_timer.function = i2o_retry_run;
+
+// printk("SCSI OSM at %d.\n", scsi_context);
+
+ for (count = 0, i = 0; i < MAX_I2O_CONTROLLERS; i++)
+ {
+ struct i2o_controller *c=i2o_find_controller(i);
+ struct i2o_device *d;
+ /*
+ * This controller doesn't exist.
+ */
+
+ if(c==NULL)
+ continue;
+
+ /*
+ * Fixme - we need some altered device locking. This
+ * is racing with device addition in theory. Easy to fix.
+ */
+
+ for(d=c->devices;d!=NULL;d=d->next)
+ {
+ /*
+ * bus_adapter, SCSI (obsolete), or FibreChannel busses only
+ */
+ if( (d->class!=I2O_CLASS_BUS_ADAPTER_PORT) // bus_adapter
+ && (d->class!=I2O_CLASS_FIBRE_CHANNEL_PORT) // FC_PORT
+ )
+ continue;
+
+// printk("Found a controller.\n");
+ shpnt = scsi_register(tpnt, sizeof(struct i2o_scsi_host));
+ save_flags(flags);
+ cli();
+ shpnt->unique_id = (u32)d;
+ shpnt->io_port = 0;
+ shpnt->n_io_port = 0;
+ shpnt->irq = 0;
+ shpnt->this_id = /* Good question */15;
+ restore_flags(flags);
+// printk("Scanning I2O port %d.\n", d->id);
+ i2o_scsi_init(c, d, shpnt);
+ count++;
+ }
+ }
+ i2o_scsi_hosts = count;
+
+ if(count==0)
+ {
+ if(sg_chain_pool!=NULL)
+ {
+ kfree(sg_chain_pool);
+ sg_chain_pool = NULL;
+ }
+ flush_pending();
+ del_timer(&retry_timer);
+ i2o_remove_handler(&i2o_scsi_handler);
+ }
+
+ return count;
+}
+
+int i2o_scsi_release(struct Scsi_Host *host)
+{
+ if(--i2o_scsi_hosts==0)
+ {
+ if(sg_chain_pool!=NULL)
+ {
+ kfree(sg_chain_pool);
+ sg_chain_pool = NULL;
+ }
+ flush_pending();
+ del_timer(&retry_timer);
+ i2o_remove_handler(&i2o_scsi_handler);
+ }
+ return 0;
+}
+
+
+const char *i2o_scsi_info(struct Scsi_Host *SChost)
+{
+ struct i2o_scsi_host *hostdata;
+
+ hostdata = (struct i2o_scsi_host *)SChost->hostdata;
+
+ return(&hostdata->controller->name[0]);
+}
+
+
+/*
+ * From the wd93 driver:
+ * Returns true if there will be a DATA_OUT phase with this command,
+ * false otherwise.
+ * (Thanks to Joerg Dorchain for the research and suggestion.)
+ *
+ */
+static int is_dir_out(Scsi_Cmnd *cmd)
+{
+ switch (cmd->cmnd[0])
+ {
+ case WRITE_6: case WRITE_10: case WRITE_12:
+ case WRITE_LONG: case WRITE_SAME: case WRITE_BUFFER:
+ case WRITE_VERIFY: case WRITE_VERIFY_12:
+ case COMPARE: case COPY: case COPY_VERIFY:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case SEARCH_EQUAL_12: case SEARCH_HIGH_12: case SEARCH_LOW_12:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case MODE_SELECT: case MODE_SELECT_10: case LOG_SELECT:
+ case SEND_DIAGNOSTIC: case CHANGE_DEFINITION: case UPDATE_BLOCK:
+ case SET_WINDOW: case MEDIUM_SCAN: case SEND_VOLUME_TAG:
+ case 0xea:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int i2o_scsi_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
+{
+ int i;
+ int tid;
+ struct i2o_controller *c;
+ Scsi_Cmnd *current_command;
+ struct Scsi_Host *host;
+ struct i2o_scsi_host *hostdata;
+ u32 *msg, *mptr;
+ u32 m;
+ u32 *lenptr;
+ int direction;
+ int scsidir;
+ u32 len;
+
+ static int max_qd = 1;
+
+ /*
+ * The scsi layer should be handling this stuff
+ */
+
+ if(is_dir_out(SCpnt))
+ {
+ direction=0x04000000;
+ scsidir=0x80000000;
+ }
+ else
+ {
+ scsidir=0x40000000;
+ direction=0x00000000;
+ }
+
+ /*
+ * Do the incoming paperwork
+ */
+
+ host = SCpnt->host;
+ hostdata = (struct i2o_scsi_host *)host->hostdata;
+ SCpnt->scsi_done = done;
+
+ if(SCpnt->target > 15)
+ {
+ printk(KERN_ERR "i2o_scsi: Wild target %d.\n", SCpnt->target);
+ return -1;
+ }
+
+ tid = hostdata->task[SCpnt->target][SCpnt->lun];
+
+ dprintk(("qcmd: Tid = %d\n", tid));
+
+ current_command = SCpnt; /* set current command */
+ current_command->scsi_done = done; /* set ptr to done function */
+
+ /* We don't have such a device. Pretend we did the command
+ and that selection timed out */
+
+ if(tid == -1)
+ {
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done(SCpnt);
+ return 0;
+ }
+
+ dprintk(("Real scsi messages.\n"));
+
+ c = hostdata->controller;
+
+ /*
+ * Obtain an I2O message. Right now we _have_ to obtain one
+ * until the scsi layer stuff is cleaned up.
+ */
+
+ do
+ {
+ mb();
+ m = I2O_POST_READ32(c);
+ }
+ while(m==0xFFFFFFFF);
+ msg = bus_to_virt(c->mem_offset + m);
+
+ /*
+ * Put together a scsi execscb message
+ */
+
+ msg[1] = I2O_CMD_SCSI_EXEC<<24|HOST_TID<<12|tid;
+ msg[2] = scsi_context; /* So the I2O layer passes to us */
+ /* Sorry 64bit folks. FIXME */
+ msg[3] = (u32)SCpnt; /* We want the SCSI control block back */
+ /* Direction, disconnect ok, no tagging (yet) */
+ msg[4] = scsidir|(1<<29)|SCpnt->cmd_len;
+
+ /*
+ * Attach tags to the devices
+ */
+ if(SCpnt->device->tagged_supported)
+ {
+ /*
+ * Some drives are too stupid to handle fairness issues
+ * with tagged queueing. We throw in the odd ordered
+ * tag to stop them starving themselves.
+ */
+ if((jiffies - hostdata->tagclock[SCpnt->target][SCpnt->lun]) > (5*HZ))
+ {
+ msg[4]|=(1<<23)|(1<<24);
+ hostdata->tagclock[SCpnt->target][SCpnt->lun]=jiffies;
+ }
+ else switch(SCpnt->tag)
+ {
+ case SIMPLE_QUEUE_TAG:
+ msg[4]|=(1<<23);
+ break;
+ case HEAD_OF_QUEUE_TAG:
+ msg[4]|=(1<<24);
+ break;
+ case ORDERED_QUEUE_TAG:
+ msg[4]|=(1<<23)|(1<<24);
+ break;
+ default:
+ msg[4]|=(1<<23);
+ }
+ }
+
+ mptr=msg+5;
+
+ /*
+ * Write SCSI command into the message - always 16 byte block
+ */
+
+ memcpy(mptr, SCpnt->cmnd, 16);
+ mptr+=4;
+ lenptr=mptr++; /* Remember me - fill in when we know */
+
+
+ /*
+ * Now fill in the SGList and command
+ *
+ * FIXME: we need to set the sglist limits according to the
+ * message size of the I2O controller. We might only have room
+ * for 6 or so worst case
+ */
+
+ if(SCpnt->use_sg)
+ {
+ struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer;
+
+ if((sg_max_frags > 11) && (SCpnt->use_sg > 11))
+ {
+ /*
+ * Need to chain!
+ */
+ SCpnt->host_scribble = (void*)(sg_chain_pool + sg_chain_tag);
+ *mptr++=direction|0xB0000000|(SCpnt->use_sg*2*4);
+ *mptr=virt_to_bus(SCpnt->host_scribble);
+ mptr = (u32*)SCpnt->host_scribble;
+ if (SCpnt->use_sg > max_sg_len)
+ {
+ max_sg_len = SCpnt->use_sg;
+ printk("i2o_scsi: Chain SG! SCpnt=%p, SG_FragCnt=%d, SG_idx=%d\n",
+ SCpnt, SCpnt->use_sg, (chain_buf*)SCpnt->host_scribble-sg_chain_pool);
+ }
+ if ( ++sg_chain_tag == SG_MAX_BUFS )
+ sg_chain_tag = 0;
+ }
+
+ len = 0;
+
+ for(i = 0 ; i < SCpnt->use_sg; i++)
+ {
+ *mptr++=direction|0x10000000|sg->length;
+ len+=sg->length;
+ *mptr++=virt_to_bus(sg->address);
+ sg++;
+ }
+ mptr[-2]|=0xC0000000; /* End of List and block */
+ *lenptr=len;
+ if(len != SCpnt->underflow)
+ printk("Cmd len %08X Cmd underflow %08X\n",
+ len, SCpnt->underflow);
+ }
+ else
+ {
+ dprintk(("non sg for %p, %d\n", SCpnt->request_buffer,
+ SCpnt->request_bufflen));
+ *mptr++=0xD0000000|direction|SCpnt->request_bufflen;
+ *mptr++=virt_to_bus(SCpnt->request_buffer);
+ *lenptr = len = SCpnt->request_bufflen;
+ /* No transfer ? - fix up the request */
+ if(len == 0)
+ msg[4]&=~0xC0000000;
+ }
+
+ /*
+ * Stick the headers on
+ */
+
+ msg[0] = (mptr-msg)<<16 | SGL_OFFSET_10;
+
+ /* Queue the message */
+ i2o_post_message(c,m);
+
+ atomic_inc(&queue_depth);
+
+ if(atomic_read(&queue_depth)> max_qd)
+ {
+ max_qd=atomic_read(&queue_depth);
+ printk("Queue depth now %d.\n", max_qd);
+ }
+
+ mb();
+ dprintk(("Issued %ld\n", current_command->serial_number));
+
+ return 0;
+}
+
+static void internal_done(Scsi_Cmnd * SCpnt)
+{
+ SCpnt->SCp.Status++;
+}
+
+int i2o_scsi_command(Scsi_Cmnd * SCpnt)
+{
+ i2o_scsi_queuecommand(SCpnt, internal_done);
+ SCpnt->SCp.Status = 0;
+ while (!SCpnt->SCp.Status)
+ barrier();
+ return SCpnt->result;
+}
+
+int i2o_scsi_abort(Scsi_Cmnd * SCpnt)
+{
+ struct i2o_controller *c;
+ struct Scsi_Host *host;
+ struct i2o_scsi_host *hostdata;
+ u32 *msg;
+ u32 m;
+ int tid;
+
+ printk("i2o_scsi_abort\n");
+
+ host = SCpnt->host;
+ hostdata = (struct i2o_scsi_host *)host->hostdata;
+ tid = hostdata->task[SCpnt->target][SCpnt->lun];
+ if(tid==-1)
+ {
+ printk(KERN_ERR "impossible command to abort.\n");
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+ c = hostdata->controller;
+
+ /*
+ * Obtain an I2O message. Right now we _have_ to obtain one
+ * until the scsi layer stuff is cleaned up.
+ */
+
+ do
+ {
+ mb();
+ m = I2O_POST_READ32(c);
+ }
+ while(m==0xFFFFFFFF);
+ msg = bus_to_virt(c->mem_offset + m);
+
+ msg[0] = FIVE_WORD_MSG_SIZE;
+ msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|tid;
+ msg[2] = scsi_context;
+ msg[3] = 0; /* Not needed for an abort */
+ msg[4] = (u32)SCpnt;
+ wmb();
+ i2o_post_message(c,m);
+ wmb();
+// SCpnt->result = DID_RESET << 16;
+// SCpnt->scsi_done(SCpnt);
+ return SCSI_ABORT_PENDING;
+}
+
+int i2o_scsi_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
+{
+ int tid;
+ struct i2o_controller *c;
+ struct Scsi_Host *host;
+ struct i2o_scsi_host *hostdata;
+ u32 m;
+ u32 *msg;
+
+ printk("i2o_scsi_reset\n");
+
+ /*
+ * Find the TID for the bus
+ */
+
+ host = SCpnt->host;
+ hostdata = (struct i2o_scsi_host *)host->hostdata;
+ tid = hostdata->bus_task;
+ c = hostdata->controller;
+
+ /*
+ * Now send a SCSI reset request. Any remaining commands
+ * will be aborted by the IOP. We need to catch the reply
+ * possibly ?
+ */
+
+ m = I2O_POST_READ32(c);
+
+ /*
+ * No free messages, try again next time - no big deal
+ */
+
+ if(m == 0xFFFFFFFF)
+ return SCSI_RESET_PUNT;
+
+ msg = bus_to_virt(c->mem_offset + m);
+ msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_SCSI_BUSRESET<<24|HOST_TID<<12|tid;
+ msg[2] = scsi_context|0x80000000;
+ /* We use the top bit to split controller and unit transactions */
+ /* Now store unit,tid so we can tie the completion back to a specific device */
+ msg[3] = c->unit << 16 | tid;
+ i2o_post_message(c,m);
+ return SCSI_RESET_PENDING;
+}
+
+/*
+ * This is anyones guess quite frankly.
+ */
+
+int i2o_scsi_bios_param(Disk * disk, kdev_t dev, int *ip)
+{
+ int size;
+
+ size = disk->capacity;
+ ip[0] = 64; /* heads */
+ ip[1] = 32; /* sectors */
+ if ((ip[2] = size >> 11) > 1024) { /* cylinders, test for big disk */
+ ip[0] = 255; /* heads */
+ ip[1] = 63; /* sectors */
+ ip[2] = size / (255 * 63); /* cylinders */
+ }
+ return 0;
+}
+
+/* Loadable module support */
+#ifdef MODULE
+
+MODULE_AUTHOR("Red Hat Software");
+
+Scsi_Host_Template driver_template = I2OSCSI;
+
+#include "../scsi/scsi_module.c"
+#endif
--- /dev/null
+#ifndef _I2O_SCSI_H
+#define _I2O_SCSI_H
+
+#if !defined(LINUX_VERSION_CODE)
+#include <linux/version.h>
+#endif
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+#define I2O_SCSI_ID 15
+#define I2O_SCSI_CAN_QUEUE 8
+#define I2O_SCSI_CMD_PER_LUN 6
+
+extern struct proc_dir_entry proc_scsi_i2o_scsi;
+
+extern int i2o_scsi_detect(Scsi_Host_Template *);
+extern const char *i2o_scsi_info(struct Scsi_Host *);
+extern int i2o_scsi_command(Scsi_Cmnd *);
+extern int i2o_scsi_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+extern int i2o_scsi_abort(Scsi_Cmnd *);
+extern int i2o_scsi_reset(Scsi_Cmnd *, unsigned int);
+extern int i2o_scsi_bios_param(Disk *, kdev_t, int *);
+extern void i2o_scsi_setup(char *str, int *ints);
+
+#define I2OSCSI { \
+ next: NULL, \
+ proc_dir: &proc_scsi_i2o_scsi, \
+ name: "I2O SCSI Layer", \
+ detect: i2o_scsi_detect, \
+ release: i2o_scsi_release, \
+ info: i2o_scsi_info, \
+ command: i2o_scsi_command, \
+ queuecommand: i2o_scsi_queuecommand, \
+ abort: i2o_scsi_abort, \
+ reset: i2o_scsi_reset, \
+ bios_param: i2o_scsi_bios_param, \
+ can_queue: I2O_SCSI_CAN_QUEUE, \
+ this_id: I2O_SCSI_ID, \
+ sg_tablesize: 8, \
+ cmd_per_lun: I2O_SCSI_CMD_PER_LUN, \
+ unchecked_isa_dma: 0, \
+ use_clustering: ENABLE_CLUSTERING \
+ }
+
+#endif
ifndef CONFIG_MBX
L_OBJS := via-cuda.o macio-adb.o via-pmu.o mediabay.o
+endif
+ifeq ($(CONFIG_MAC_KEYBOARD),y)
LX_OBJS := adb.o
endif
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/sched.h>
+#include <linux/wait.h>
#include <asm/prom.h>
#include <asm/adb.h>
#include <asm/cuda.h>
spinlock_t lock;
atomic_t n_pending;
struct adb_request *completed;
- wait_queue_head_t wait_queue;
+ wait_queue_head_t wait_queue;
int inuse;
};
spin_lock_init(&state->lock);
atomic_set(&state->n_pending, 0);
state->completed = NULL;
- state->wait_queue = NULL;
+ init_waitqueue_head(&state->wait_queue);
state->inuse = 1;
return 0;
int ret;
struct adbdev_state *state = file->private_data;
struct adb_request *req;
- DECLARE_WAITQUEUE(wait, current);
+ wait_queue_t wait = __WAITQUEUE_INITIALIZER(wait,current);
unsigned long flags;
if (count < 2)
extern int console_loglevel;
extern struct kbd_struct kbd_table[];
-extern struct wait_queue_head_t keypress_wait;
extern void handle_scancode(unsigned char, int);
* memory if large numbers of serial ports are open.
*/
static unsigned char tmp_buf[4096]; /* This is cheating */
-static struct semaphore tmp_buf_sem = MUTEX;
+DECLARE_MUTEX(tmp_buf_sem);
-__openfirmware
-static inline int serial_paranoia_check(struct mac_serial *info,
+static inline int __pmac
+serial_paranoia_check(struct mac_serial *info,
dev_t device, const char *routine)
{
#ifdef SERIAL_PARANOIA_CHECK
/*
* Reading and writing Z8530 registers.
*/
-static inline unsigned char read_zsreg(struct mac_zschannel *channel,
+static inline unsigned char __pmac read_zsreg(struct mac_zschannel *channel,
unsigned char reg)
{
unsigned char retval;
return retval;
}
-static inline void write_zsreg(struct mac_zschannel *channel,
+static inline void __pmac write_zsreg(struct mac_zschannel *channel,
unsigned char reg, unsigned char value)
{
unsigned long flags;
return;
}
-static inline unsigned char read_zsdata(struct mac_zschannel *channel)
+static inline unsigned char __pmac read_zsdata(struct mac_zschannel *channel)
{
unsigned char retval;
info->tqueue.data = info;
info->callout_termios =callout_driver.init_termios;
info->normal_termios = serial_driver.init_termios;
- info->open_wait = 0;
- info->close_wait = 0;
+ init_waitqueue_head(&info->open_wait);
+ init_waitqueue_head(&info->close_wait);
info->timeout = HZ;
printk("tty%02d at 0x%08x (irq = %d)", info->line,
info->port, info->irq);
o lp doesn't allow you to read status while printing is in progress.
+See <URL:http://www.cyberelk.demon.co.uk/parport.html>.
#
# Note 2! The CFLAGS definitions are now inherited from the
# parent makes..
+#
+# Note 3! Parport is the Borg. We have assimilated some other
+# drivers in the `char', `net' and `scsi' directories, but left them
+# there to allay suspicion.
SUB_DIRS :=
MOD_SUB_DIRS := $(SUB_DIRS)
3. Support more hardware (eg m68k, Sun bpp).
4. A better PLIP (make use of bidirectional/ECP/EPP ports).
+
+See <URL:http://www.cyberelk.demon.co.uk/parport.html>.
}
-static void amiga_release_resources(struct parport *p)
-{
-DPRINTK("realease_resources\n");
- if (p->irq != PARPORT_IRQ_NONE)
- free_irq(IRQ_AMIGA_CIAA_FLG, p);
-}
-
-static int amiga_claim_resources(struct parport *p)
-{
-DPRINTK("claim_resources\n");
- return request_irq(IRQ_AMIGA_CIAA_FLG, amiga_interrupt, 0, p->name, p);
-}
-
static void amiga_init_state(struct parport_state *s)
{
s->u.amiga.data = 0;
amiga_change_mode,
- amiga_release_resources,
- amiga_claim_resources,
-
-
NULL, /* epp_write_data */
NULL, /* epp_read_data */
NULL, /* epp_write_addr */
printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name);
/* XXX: set operating mode */
parport_proc_register(p);
- p->flags |= PARPORT_FLAG_COMA;
+ if (request_irq(IRQ_AMIGA_CIAA_FLG, amiga_interrupt, 0,
+ p->name, p)) {
+ parport_unregister_port (p);
+ return 0;
+ }
if (parport_probe_hook)
(*parport_probe_hook)(p);
+
+ parport_announce_port (p);
+
return 1;
}
void cleanup_module(void)
{
- if (!(this_port->flags & PARPORT_FLAG_COMA))
- parport_quiesce(this_port);
+ if (p->irq != PARPORT_IRQ_NONE)
+ free_irq(IRQ_AMIGA_CIAA_FLG, p);
parport_proc_unregister(this_port);
parport_unregister_port(this_port);
}
#include <asm/arch/oldlatches.h>
#include <asm/arch/irqs.h>
-#define DATA_LATCH 0x3350010
+#define DATA_ADDRESS 0x3350010
-/* ARC can't read from the data latch, so we must use a soft copy. */
+/* This is equivalent to the above and only used for request_region. */
+#define PORT_BASE 0x80000000 | ((DATA_ADDRESS - IO_BASE) >> 2)
+
+/* The hardware can't read from the data latch, so we must use a soft
+ copy. */
static unsigned char data_copy;
+/* These are pretty simple. We know the irq is never shared and the
+ kernel does all the magic that's required. */
+static void arc_enable_irq(struct parport *p)
+{
+ enable_irq(p->irq);
+}
+
+static void arc_disable_irq(struct parport *p)
+{
+ disable_irq(p->irq);
+}
+
static void arc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
parport_generic_irq(irq, (struct parport *) dev_id, regs);
static void arc_write_data(struct parport *p, unsigned char data)
{
data_copy = data;
- outb(data, DATA_LATCH);
+ outb_t(data, DATA_LATCH);
}
static unsigned char arc_read_data(struct parport *p)
NULL, /* change_mode */
- arc_release_resources,
- arc_claim_resources,
-
NULL, /* epp_write_data */
NULL, /* epp_read_data */
NULL, /* epp_write_addr */
/* Archimedes hardware provides only one port, at a fixed address */
struct parport *p;
- if (check_region(DATA_LATCH, 4))
+ if (check_region(PORT_BASE, 4))
return 0;
- if (!(p = parport_register_port(base, IRQ_PRINTERACK,
- PARPORT_DMA_NONE, &parport_arc_ops)))
+ p = parport_register_port(base, IRQ_PRINTERACK,
+ PARPORT_DMA_NONE, &parport_arc_ops);
+
+ if (!p)
return 0;
p->modes = PARPORT_MODE_ARCSPP;
- p->size = 4;
+ p->size = 1;
printk(KERN_INFO "%s: Archimedes on-board port, using irq %d\n",
p->irq);
parport_proc_register(p);
- p->flags |= PARPORT_FLAG_COMA;
if (parport_probe_hook)
(*parport_probe_hook)(p);
+ /* Tell the high-level drivers about the port. */
+ parport_announce_port (p);
+
return 1;
}
parport_generic_irq(irq, (struct parport *) dev_id, regs);
}
-static void
-parport_atari_release_resources(struct parport *p)
-{
- if (p->irq != PARPORT_IRQ_NONE)
- free_irq(IRQ_MFP_BUSY, p);
-}
-
-static int
-parport_atari_claim_resources(struct parport *p)
-{
- return request_irq(IRQ_MFP_BUSY, parport_atari_interrupt,
- IRQ_TYPE_SLOW, p->name, p);
-}
-
static void
parport_atari_inc_use_count(void)
{
NULL, /* change_mode */
- parport_atari_release_resources,
- parport_atari_claim_resources,
-
NULL, /* epp_write_data */
NULL, /* epp_read_data */
NULL, /* epp_write_addr */
&parport_atari_ops);
if (!p)
return 0;
+ if (request_irq(IRQ_MFP_BUSY, parport_atari_interrupt,
+ IRQ_TYPE_SLOW, p->name, p)) {
+ parport_unregister_port (p);
+ return 0;
+ }
+
this_port = p;
printk(KERN_INFO "%s: Atari built-in port using irq\n", p->name);
parport_proc_register(p);
- p->flags |= PARPORT_FLAG_COMA;
if (parport_probe_hook)
(*parport_probe_hook)(p);
+
+ parport_announce_port (p);
+
return 1;
}
return 0;
void
cleanup_module(void)
{
- if (!(this_port->flags & PARPORT_FLAG_COMA))
- parport_quiesce(this_port);
+ if (p->irq != PARPORT_IRQ_NONE)
+ free_irq(IRQ_MFP_BUSY, p);
parport_proc_unregister(this_port);
parport_unregister_port(this_port);
}
writel(dcsr, (unsigned long)&dma->dcsr);
}
-void
-parport_ax_release_resources(struct parport *p)
-{
- if (p->irq != PARPORT_IRQ_NONE) {
- parport_ax_disable_irq(p);
- free_irq(p->irq, p);
- }
- release_region(p->base, p->size);
- if (p->modes & PARPORT_MODE_PCECR)
- release_region(p->base+0x400, 3);
- release_region((unsigned long)p->private_data,
- sizeof(struct linux_ebus_dma));
-}
-
int
parport_ax_claim_resources(struct parport *p)
{
- /* FIXME check that resources are free */
- int err;
-
- if (p->irq != PARPORT_IRQ_NONE) {
- if ((err = request_irq(p->irq, parport_ax_interrupt,
- 0, p->name, p)) != 0)
- return err;
- else
- parport_ax_enable_irq(p);
- }
- request_region(p->base, p->size, p->name);
- if (p->modes & PARPORT_MODE_PCECR)
- request_region(p->base+0x400, 3, p->name);
- request_region((unsigned long)p->private_data,
- sizeof(struct linux_ebus_dma), p->name);
- return 0;
}
void
parport_ax_change_mode,
- parport_ax_release_resources,
- parport_ax_claim_resources,
-
parport_ax_write_epp,
parport_ax_read_epp,
parport_ax_write_epp_addr,
if (p->dma == PARPORT_DMA_AUTO)
p->dma = (p->modes & PARPORT_MODE_PCECP) ? 0 : PARPORT_DMA_NONE;
+ if (p->irq != PARPORT_IRQ_NONE) {
+ int err;
+ if ((err = request_irq(p->irq, parport_ax_interrupt,
+ 0, p->name, p)) != 0)
+ return err;
+ else
+ parport_ax_enable_irq(p);
+ }
+ request_region(p->base, p->size, p->name);
+ if (p->modes & PARPORT_MODE_PCECR)
+ request_region(p->base+0x400, 3, p->name);
+ request_region((unsigned long)p->private_data,
+ sizeof(struct linux_ebus_dma), p->name);
+
printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base);
if (p->irq != PARPORT_IRQ_NONE)
printk(", irq %s", __irq_itoa(p->irq));
}
printk("]\n");
parport_proc_register(p);
- p->flags |= PARPORT_FLAG_COMA;
p->ops->write_control(p, 0x0c);
p->ops->write_data(p, 0);
if (parport_probe_hook)
(*parport_probe_hook)(p);
+ parport_announce_port (p);
+
return 1;
}
while (p) {
tmp = p->next;
if (p->modes & PARPORT_MODE_PCSPP) {
- if (!(p->flags & PARPORT_FLAG_COMA))
- parport_quiesce(p);
+ if (p->irq != PARPORT_IRQ_NONE) {
+ parport_ax_disable_irq(p);
+ free_irq(p->irq, p);
+ }
+ release_region(p->base, p->size);
+ if (p->modes & PARPORT_MODE_PCECR)
+ release_region(p->base+0x400, 3);
+ release_region((unsigned long)p->private_data,
+ sizeof(struct linux_ebus_dma));
parport_proc_unregister(p);
parport_unregister_port(p);
}
#ifdef MODULE
int init_module(void)
{
- (void)parport_proc_init(); /* We can go on without it. */
+#ifdef CONFIG_SYSCTL
+ parport_default_proc_register ();
+#endif
return 0;
}
void cleanup_module(void)
{
- parport_proc_cleanup();
+#ifdef CONFIG_SYSCTL
+ parport_default_proc_unregister ();
+#endif
}
#else
#ifdef CONFIG_PNP_PARPORT
parport_probe_hook = &parport_probe_one;
#endif
-#ifdef CONFIG_PROC_FS
- parport_proc_init();
+#ifdef CONFIG_SYSCTL
+ parport_default_proc_register ();
#endif
+
#ifdef CONFIG_PARPORT_PC
parport_pc_init(io, io_hi, irq, dma);
#endif
#endif
#ifdef CONFIG_PARPORT_ATARI
parport_atari_init();
+#endif
+#ifdef CONFIG_PARPORT_ARC
+ parport_arc_init();
#endif
return 0;
}
EXPORT_SYMBOL(parport_register_port);
EXPORT_SYMBOL(parport_announce_port);
EXPORT_SYMBOL(parport_unregister_port);
-EXPORT_SYMBOL(parport_quiesce);
EXPORT_SYMBOL(parport_register_driver);
EXPORT_SYMBOL(parport_unregister_driver);
EXPORT_SYMBOL(parport_register_device);
}
}
-static void mfc3_release_resources(struct parport *p)
-{
-DPRINTK("realease_resources\n");
- if (p->irq != PARPORT_IRQ_NONE)
- if (--use_cnt == 0)
- free_irq(IRQ_AMIGA_PORTS, &pp_mfc3_ops);
-}
-
static int mfc3_claim_resources(struct parport *p)
{
DPRINTK("claim_resources\n");
- if (p->irq != PARPORT_IRQ_NONE)
- if (use_cnt++ == 0)
- if (request_irq(IRQ_AMIGA_PORTS, mfc3_interrupt, 0, p->name, &pp_mfc3_ops))
- return use_cnt--;
- return 0;
}
static void mfc3_init_state(struct parport_state *s)
printk(KERN_INFO "%s: Multiface III port using irq\n", p->name);
/* XXX: set operating mode */
parport_proc_register(p);
- p->flags |= PARPORT_FLAG_COMA;
+
+ if (p->irq != PARPORT_IRQ_NONE)
+ if (use_cnt++ == 0)
+ if (request_irq(IRQ_AMIGA_PORTS, mfc3_interrupt, 0, p->name, &pp_mfc3_ops))
+ use_cnt--;
+
if (parport_probe_hook)
(*parport_probe_hook)(p);
zorro_config_board(key, 0);
p->private_data = (void *)key;
+ parport_announce_port (p);
}
}
}
for (i = 0; i < MAX_MFC; i++)
if (this_port[i] != NULL) {
- if (!(this_port[i]->flags & PARPORT_FLAG_COMA))
- parport_quiesce(this_port[i]);
+ if (p->irq != PARPORT_IRQ_NONE)
+ if (--use_cnt == 0)
+ free_irq(IRQ_AMIGA_PORTS, &pp_mfc3_ops);
parport_proc_unregister(this_port[i]);
parport_unregister_port(this_port[i]);
zorro_unconfig_board((unsigned int)this_port[i]->private_data, 0);
* based on work by Grant Guenther <grant@torque.net> and Phil Blundell.
*
* Cleaned up include files - Russell King <linux@arm.uk.linux.org>
+ * Better EPP probing - Carlos Henrique Bauer <chbauer@acm.org>
*/
/* This driver should work with any hardware that is broadly compatible
#include <linux/pci.h>
#include <asm/io.h>
+#include <asm/dma.h>
#include <linux/parport.h>
#include <linux/parport_pc.h>
static int user_specified __initdata = 0;
+/*
+ * Clear TIMEOUT BIT in EPP MODE
+ */
+int parport_pc_epp_clear_timeout(struct parport *pb)
+{
+ unsigned char r;
+
+ if (!(parport_pc_read_status(pb) & 0x01))
+ return 1;
+
+ /* To clear timeout some chips require double read */
+ parport_pc_read_status(pb);
+ r = parport_pc_read_status(pb);
+ parport_pc_write_status(pb, r | 0x01); /* Some reset by writing 1 */
+ parport_pc_write_status(pb, r & 0xfe); /* Others by writing 0 */
+ r = parport_pc_read_status(pb);
+
+ return !(r & 0x01);
+}
+
static void parport_pc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
parport_generic_irq(irq, (struct parport *) dev_id, regs);
parport_pc_frob_control(p, 0x10, 0x10);
}
-void parport_pc_release_resources(struct parport *p)
-{
- if (p->irq != PARPORT_IRQ_NONE)
- free_irq(p->irq, p);
- release_region(p->base, p->size);
- if (p->modes & PARPORT_MODE_PCECR)
- release_region(p->base_hi, 3);
-}
-
-int parport_pc_claim_resources(struct parport *p)
-{
- int err;
- if (p->irq != PARPORT_IRQ_NONE)
- if ((err = request_irq(p->irq, parport_pc_interrupt,
- 0, p->name, p)) != 0)
- return err;
- request_region(p->base, p->size, p->name);
- if (p->modes & PARPORT_MODE_PCECR)
- request_region(p->base_hi, 3, p->name);
- return 0;
-}
-
void parport_pc_init_state(struct parport_state *s)
{
s->u.pc.ctr = 0xc;
parport_pc_change_mode,
- parport_pc_release_resources,
- parport_pc_claim_resources,
-
parport_pc_write_epp,
parport_pc_read_epp,
parport_pc_write_epp_addr,
/* --- Mode detection ------------------------------------- */
-/*
- * Clear TIMEOUT BIT in EPP MODE
- */
-int parport_pc_epp_clear_timeout(struct parport *pb)
-{
- unsigned char r;
-
- if (!(parport_pc_read_status(pb) & 0x01))
- return 1;
-
- /* To clear timeout some chips require double read */
- parport_pc_read_status(pb);
- r = parport_pc_read_status(pb);
- parport_pc_write_status(pb, r | 0x01); /* Some reset by writing 1 */
- parport_pc_write_status(pb, r & 0xfe); /* Others by writing 0 */
- r = parport_pc_read_status(pb);
-
- return !(r & 0x01);
-}
-
/*
* Checks for port existence, all ports support SPP MODE
* copy. Some ports _do_ allow reads, so bypass the software
* copy here. In addition, some bits aren't writable. */
r = inb (CONTROL (pb));
- if ((r & 0x3f) == w) {
+ if ((r & 0xf) == w) {
w = 0xe;
parport_pc_write_control (pb, w);
r = inb (CONTROL(pb));
parport_pc_write_control (pb, 0xc);
- if ((r & 0x3f) == w)
+ if ((r & 0xf) == w)
return PARPORT_MODE_PCSPP;
}
if (!parport_pc_epp_clear_timeout(pb))
return 0; /* No way to clear timeout */
+ /*
+ * Theory:
+ * Bit 0 of STR is the EPP timeout bit, this bit is 0
+ * when EPP is possible and is set high when an EPP timeout
+ * occurs (EPP uses the HALT line to stop the CPU while it does
+ * the byte transfer, an EPP timeout occurs if the attached
+ * device fails to respond after 10 micro seconds).
+ *
+ * This bit is cleared by either reading it (National Semi)
+ * or writing a 1 to the bit (SMC, UMC, WinBond), others ???
+ * This bit is always high in non EPP modes.
+ */
+
parport_pc_write_control(pb, parport_pc_read_control(pb) | 0x20);
parport_pc_write_control(pb, parport_pc_read_control(pb) | 0x10);
parport_pc_epp_clear_timeout(pb);
return PARPORT_MODE_PCEPP;
}
+ /*
+ * Theory:
+ * Write two values to the EPP address register and
+ * read them back. When the transfer times out, the state of
+ * the EPP register is undefined in some cases (EPP 1.9?) but
+ * in others (EPP 1.7, ECPEPP?) it is possible to read back
+ * its value.
+ */
+ parport_pc_epp_clear_timeout(pb);
+ udelay(30); /* Wait for possible EPP timeout */
+
+ /* Previous test left outputs disabled. */
+ outb (0x55, EPPADDR (pb));
+
+ parport_pc_epp_clear_timeout(pb);
+ udelay(30); /* Wait for possible EPP timeout */
+
+ /* We must enable the outputs to be able to read the address
+ register. */
+ parport_pc_frob_control (pb, 0x20, 0x00);
+
+ if (inb (EPPADDR (pb)) == 0x55) {
+
+ /* wash ... */
+ parport_pc_frob_control (pb, 0x20, 0x20);
+ outb (0xaa, EPPADDR (pb));
+
+ parport_pc_epp_clear_timeout(pb);
+ udelay(30); /* Wait for possible EPP timeout */
+
+ /* ... and repeat */
+ parport_pc_frob_control (pb, 0x20, 0x00);
+
+ if (inb (EPPADDR (pb)) == 0xaa) {
+ parport_pc_epp_clear_timeout (pb);
+ return PARPORT_MODE_PCEPP;
+ }
+ }
+
return 0;
}
*/
static int __init irq_probe_EPP(struct parport *pb)
{
+#ifndef ADVANCED_DETECT
+ return PARPORT_IRQ_NONE;
+#else
int irqs;
unsigned char octr = parport_pc_read_control(pb);
unsigned char oecr;
-#ifndef ADVANCED_DETECT
- return PARPORT_IRQ_NONE;
-#endif
-
if (pb->modes & PARPORT_MODE_PCECR)
oecr = parport_pc_read_econtrol(pb);
pb->irq = PARPORT_IRQ_NONE;
return pb->irq;
+#endif /* Advanced detection. */
}
static int __init irq_probe_SPP(struct parport *pb)
{
+#ifndef ADVANCED_DETECT
+ /* Don't even try to do this. */
+ return PARPORT_IRQ_NONE;
+#else
int irqs;
unsigned char octr = parport_pc_read_control(pb);
unsigned char oecr;
-#ifndef ADVANCED_DETECT
- return PARPORT_IRQ_NONE;
-#endif
-
if (pb->modes & PARPORT_MODE_PCECR)
oecr = parport_pc_read_econtrol(pb);
probe_irq_off(probe_irq_on()); /* Clear any interrupts */
parport_pc_write_econtrol(pb, oecr);
parport_pc_write_control(pb, octr);
return pb->irq;
+#endif /* Advanced detection. */
}
/* We will attempt to share interrupt requests since other devices
unsigned long int base_hi,
int irq, int dma)
{
- struct parport *p;
+ struct parport_pc_private *priv;
+ struct parport tmp;
+ struct parport *p = &tmp;
int probedirq = PARPORT_IRQ_NONE;
if (check_region(base, 3)) return 0;
- if (!(p = parport_register_port(base, irq, dma, &parport_pc_ops)))
- return 0;
- p->private_data = kmalloc (sizeof (struct parport_pc_private),
- GFP_KERNEL);
- if (!p->private_data) {
- /* Not enough memory. */
+ priv = kmalloc (sizeof (struct parport_pc_private), GFP_KERNEL);
+ if (!priv) {
printk (KERN_DEBUG "parport (0x%lx): no memory!\n", base);
- parport_unregister_port (p);
return 0;
}
- ((struct parport_pc_private *) (p->private_data))->ctr = 0xc;
+ priv->ctr = 0xc;
+ p->base = base;
p->base_hi = base_hi;
+ p->irq = irq;
+ p->dma = dma;
+ p->modes = PARPORT_MODE_PCSPP;
+ p->ops = &parport_pc_ops;
+ p->private_data = priv;
+ if (base_hi && !check_region (base_hi, 3)) {
+ p->modes |= parport_ECR_present (p);
+ p->modes |= parport_ECP_supported (p);
+ p->modes |= parport_ECPPS2_supported (p);
+ }
if (p->base != 0x3bc) {
- if (base_hi && !check_region(base_hi,3)) {
- p->modes |= parport_ECR_present(p);
- p->modes |= parport_ECP_supported(p);
- p->modes |= parport_ECPPS2_supported(p);
- }
if (!check_region(base+0x3, 5)) {
- p->modes |= parport_EPP_supported(p);
- p->modes |= parport_ECPEPP_supported(p);
+ p->modes |= parport_EPP_supported (p);
+ p->modes |= parport_ECPEPP_supported (p);
}
}
if (!parport_SPP_supported(p)) {
/* No port. */
- kfree (p->private_data);
- parport_unregister_port (p);
+ kfree (priv);
return 0;
}
- p->modes |= PARPORT_MODE_PCSPP | parport_PS2_supported(p);
- p->size = (p->modes & (PARPORT_MODE_PCEPP
- | PARPORT_MODE_PCECPEPP))?8:3;
+
+ p->modes |= parport_PS2_supported(p);
+
+ if (!(p = parport_register_port (base, PARPORT_IRQ_NONE,
+ PARPORT_DMA_NONE, &parport_pc_ops))) {
+ kfree (priv);
+ return 0;
+ }
+
+ p->base_hi = base_hi;
+ p->modes = tmp.modes;
+ p->size = (p->modes & PARPORT_MODE_PCEPP) ? 8 : 3;
+ p->private_data = priv;
+
printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base);
if (p->base_hi && (p->modes & PARPORT_MODE_PCECR))
printk (" (0x%lx)", p->base_hi);
+ p->irq = irq;
+ p->dma = dma;
if (p->irq == PARPORT_IRQ_AUTO) {
p->irq = PARPORT_IRQ_NONE;
parport_irq_probe(p);
printk("%s: detected irq %d; use procfs to enable interrupt-driven operation.\n", p->name, probedirq);
#endif
parport_proc_register(p);
- p->flags |= PARPORT_FLAG_COMA;
+
+ request_region (p->base, p->size, p->name);
+ if (p->modes & PARPORT_MODE_PCECR)
+ request_region (p->base_hi, 3, p->name);
+
+ if (p->irq != PARPORT_IRQ_NONE) {
+ if (request_irq (p->irq, parport_pc_interrupt,
+ 0, p->name, p)) {
+ printk (KERN_WARNING "%s: irq %d in use, "
+ "resorting to polled operation\n",
+ p->name, p->irq);
+ p->irq = PARPORT_IRQ_NONE;
+ p->dma = PARPORT_DMA_NONE;
+ }
+
+ if (p->dma != PARPORT_DMA_NONE) {
+ if (request_dma (p->dma, p->name)) {
+ printk (KERN_WARNING "%s: dma %d in use, "
+ "resorting to PIO operation\n",
+ p->name, p->dma);
+ p->dma = PARPORT_DMA_NONE;
+ }
+ }
+ }
/* Done probing. Now put the port into a sensible start-up state. */
if (p->modes & PARPORT_MODE_PCECR)
/* Look for PCI parallel port cards. */
static int __init parport_pc_init_pci (int irq, int dma)
{
+/* These need to go in pci.h: */
+#ifndef PCI_VENDOR_ID_SIIG
+#define PCI_VENDOR_ID_SIIG 0x131f
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_550 0x1010
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_650 0x1011
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_850 0x1012
+#define PCI_DEVICE_ID_SIIG_1P_10x 0x1020
+#define PCI_DEVICE_ID_SIIG_2P_10x 0x1021
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_550 0x1034
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_650 0x1035
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_850 0x1036
+#define PCI_DEVICE_ID_SIIG_1P_20x 0x2020
+#define PCI_DEVICE_ID_SIIG_2P_20x 0x2021
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_550 0x2040
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_650 0x2041
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_850 0x2042
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_550 0x2010
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_650 0x2011
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_850 0x2012
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062
+#define PCI_VENDOR_ID_LAVA 0x1407
+#define PCI_DEVICE_ID_LAVA_PARALLEL 0x8000
+#define PCI_DEVICE_ID_LAVA_DUAL_PAR_A 0x8001 /* The Lava Dual Parallel is */
+#define PCI_DEVICE_ID_LAVA_DUAL_PAR_B 0x8002 /* two PCI devices on a card */
+#endif /* IDs not defined */
+
int count = 0;
#ifdef CONFIG_PCI
int i;
unsigned int hi; /* -ve if not there */
} addr[4];
} cards[] = {
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_550, 1,
+ { { 3, 4 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_650, 1,
+ { { 3, 4 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_10x_850, 1,
+ { { 3, 4 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1P_10x, 1,
+ { { 2, 3 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P_10x, 2,
+ { { 2, 3 }, { 4, 5 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_550, 1,
+ { { 4, 5 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_650, 1,
+ { { 4, 5 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_10x_850, 1,
+ { { 4, 5 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1P_20x, 1,
+ { { 0, 1 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P_20x, 2,
+ { { 0, 1 }, { 2, 3 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_550, 2,
+ { { 1, 2 }, { 3, 4 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_650, 2,
+ { { 1, 2 }, { 3, 4 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2P1S_20x_850, 2,
+ { { 1, 2 }, { 3, 4 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_550, 1,
+ { { 1, 2 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_650, 1,
+ { { 1, 2 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S1P_20x_850, 1,
+ { { 1, 2 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_550, 1,
+ { { 2, 3 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_650, 1,
+ { { 2, 3 }, } },
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_850, 1,
+ { { 2, 3 }, } },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_PARALLEL, 1,
+ { { 0, -1 }, } },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_DUAL_PAR_A, 1,
+ { { 0, -1 }, } },
+ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_DUAL_PAR_B, 1,
+ { { 0, -1 }, } },
{ 0, }
};
struct parport *p = parport_enumerate(), *tmp;
while (p) {
tmp = p->next;
- if (p->modes & PARPORT_MODE_PCSPP) {
- if (!(p->flags & PARPORT_FLAG_COMA))
- parport_quiesce(p);
+ if (p->modes & PARPORT_MODE_PCSPP) {
+ if (p->dma != PARPORT_DMA_NONE)
+ free_dma (p->dma);
+ if (p->irq != PARPORT_IRQ_NONE)
+ free_irq (p->irq, p);
+ release_region (p->base, p->size);
+ if (p->modes & PARPORT_MODE_PCECP)
+ release_region (p->base_hi, 3);
parport_proc_unregister(p);
kfree (p->private_data);
parport_unregister_port(p);
-/* Parallel port /proc interface code.
+/* Sysctl interface for parport devices.
*
* Authors: David Campbell <campbell@torque.net>
* Tim Waugh <tim@cyberelk.demon.co.uk>
#include <linux/string.h>
#include <linux/config.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/malloc.h>
-#include <linux/proc_fs.h>
#include <linux/parport.h>
#include <linux/ctype.h>
+#include <linux/sysctl.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/irq.h>
+#include <asm/uaccess.h>
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_SYSCTL
-struct proc_dir_entry *base = NULL;
-static int irq_write_proc(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static int do_active_device(ctl_table *table, int write, struct file *filp,
+ void *result, size_t *lenp)
{
- int retval = -EINVAL;
- int newirq = PARPORT_IRQ_NONE;
- struct parport *pp = (struct parport *)data;
- int oldirq = pp->irq;
-
-/*
- * We can have these valid cases:
- * "none" (count == 4 || count == 5)
- * decimal number (count == 2 || count == 3)
- * octal number (count == 3 || count == 4)
- * hex number (count == 4 || count == 5)
- * all other cases are -EINVAL
- *
- * Note: newirq is alredy set up to NONE.
- *
- * -RF
- */
- if (count > 5 || count < 1)
- goto out;
+ struct parport *port = (struct parport *)table->extra1;
+ char buffer[256];
+ struct pardevice *dev;
+ int len = 0;
- if (isdigit(buffer[0]))
- newirq = simple_strtoul(buffer, NULL, 0);
- else if (strncmp(buffer, "none", 4) != 0) {
- if (buffer[0] < 32)
- /* Things like '\n' are harmless */
- retval = count;
+ if (write) /* can't happen anyway */
+ return -EACCES;
- goto out;
+ if (filp->f_pos) {
+ *lenp = 0;
+ return 0;
+ }
+
+ for (dev = port->devices; dev ; dev = dev->next) {
+ if(dev == port->cad) {
+ len += sprintf(buffer, "%s\n", dev->name);
+ }
}
- retval = count;
+ if(!len) {
+ len += sprintf(buffer, "%s\n", "none");
+ }
- if (oldirq == newirq)
- goto out;
+ if (len > *lenp)
+ len = *lenp;
+ else
+ *lenp = len;
- if (pp->flags & PARPORT_FLAG_COMA)
- goto out_ok;
+ filp->f_pos += len;
- retval = -EBUSY;
+ return copy_to_user(result, buffer, len) ? -EFAULT : 0;
+}
- /*
- * Here we don' t need the irq version of spinlocks because
- * the parport_lowlevel irq handler must not change the cad,
- * and so has no one reason to write_lock() the cad_lock spinlock.
- * -arca
- */
- read_lock(&pp->cad_lock);
+#if 0 && defined (CONFIG_PARPORT_1284)
+static int do_autoprobe(ctl_table *table, int write, struct file *filp,
+ void *result, size_t *lenp)
+{
+ struct parport_device_info *info = table->extra2;
+ const char *str;
+ char buffer[256];
+ int len = 0;
- if (pp->cad)
- {
- read_unlock(&pp->cad_lock);
- return retval;
- }
+ if (write) /* permissions stop this */
+ return -EACCES;
- if (newirq != PARPORT_IRQ_NONE) {
- retval = request_irq(newirq, pp->ops->interrupt,
- 0, pp->name, pp);
- if (retval)
- {
- read_unlock(&pp->cad_lock);
- return retval;
- }
+ if (filp->f_pos) {
+ *lenp = 0;
+ return 0;
}
+
+ if ((str = info->class_name) != NULL)
+ len += sprintf (buffer + len, "CLASS:%s;\n", str);
- if (oldirq != PARPORT_IRQ_NONE)
- free_irq(oldirq, pp);
+ if ((str = info->model) != NULL)
+ len += sprintf (buffer + len, "MODEL:%s;\n", str);
- retval = count;
+ if ((str = info->mfr) != NULL)
+ len += sprintf (buffer + len, "MANUFACTURER:%s;\n", str);
- read_unlock(&pp->cad_lock);
+ if ((str = info->description) != NULL)
+ len += sprintf (buffer + len, "DESCRIPTION:%s;\n", str);
-out_ok:
- pp->irq = newirq;
+ if ((str = info->cmdset) != NULL)
+ len += sprintf (buffer + len, "COMMAND SET:%s;\n", str);
-out:
- return retval;
-}
+ if (len > *lenp)
+ len = *lenp;
+ else
+ *lenp = len;
-static int irq_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- struct parport *pp = (struct parport *)data;
- int len;
-
- if (pp->irq == PARPORT_IRQ_NONE) {
- len = sprintf(page, "none\n");
- } else {
-#ifdef __sparc__
- len = sprintf(page, "%s\n", __irq_itoa(pp->irq));
-#else
- len = sprintf(page, "%d\n", pp->irq);
-#endif
- }
-
- *start = 0;
- *eof = 1;
- return len;
+ filp->f_pos += len;
+
+ return copy_to_user (result, buffer, len) ? -EFAULT : 0;
}
+#endif /* IEEE1284.3 support. */
-static int devices_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int do_hardware(ctl_table *table, int write, struct file *filp,
+ void *result, size_t *lenp)
{
- struct parport *pp = (struct parport *)data;
- struct pardevice *pd1;
- int len=0;
-
- for (pd1 = pp->devices; pd1 ; pd1 = pd1->next) {
- if (pd1 == pp->cad)
- page[len++] = '+';
- else
- page[len++] = ' ';
-
- len += sprintf(page+len, "%s", pd1->name);
+ struct parport *port = (struct parport *)table->extra1;
+ char buffer[256];
+ int len = 0;
- page[len++] = '\n';
+ if (filp->f_pos) {
+ *lenp = 0;
+ return 0;
}
-
- *start = 0;
- *eof = 1;
- return len;
-}
-
-static int hardware_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- struct parport *pp = (struct parport *)data;
- int len=0;
- len += sprintf(page+len, "base:\t0x%lx\n",pp->base);
+ if (write) /* can't happen anyway */
+ return -EACCES;
+
+ len += sprintf(buffer+len, "base:\t0x%lx", port->base);
+ if (port->base_hi)
+ len += sprintf(buffer+len, " (0x%lx)", port->base_hi);
+ buffer[len++] = '\n';
- if (pp->irq == PARPORT_IRQ_NONE) {
- len += sprintf(page+len, "irq:\tnone\n");
+ if (port->irq == PARPORT_IRQ_NONE) {
+ len += sprintf(buffer+len, "irq:\tnone\n");
} else {
#ifdef __sparc__
- len += sprintf(page+len, "irq:\t%s\n",__irq_itoa(pp->irq));
+ len += sprintf(buffer+len, "irq:\t%s\n",
+ __irq_itoa(port->irq));
#else
- len += sprintf(page+len, "irq:\t%d\n",pp->irq);
+ len += sprintf(buffer+len, "irq:\t%d\n", port->irq);
#endif
}
- if (pp->dma == PARPORT_DMA_NONE)
- len += sprintf(page+len, "dma:\tnone\n");
+ if (port->dma == PARPORT_DMA_NONE)
+ len += sprintf(buffer+len, "dma:\tnone\n");
else
- len += sprintf(page+len, "dma:\t%d\n",pp->dma);
+ len += sprintf(buffer+len, "dma:\t%d\n", port->dma);
- len += sprintf(page+len, "modes:\t");
+ len += sprintf(buffer+len, "modes:\t");
{
-#define printmode(x) {if(pp->modes&PARPORT_MODE_PC##x){len+=sprintf(page+len,"%s%s",f?",":"",#x);f++;}}
+#define printmode(x) {if(port->modes&PARPORT_MODE_##x){len+=sprintf(buffer+len,"%s%s",f?",":"",#x);f++;}}
int f = 0;
- printmode(SPP);
- printmode(PS2);
- printmode(EPP);
- printmode(ECP);
- printmode(ECPEPP);
- printmode(ECPPS2);
+ printmode(PCSPP);
+ printmode(PCPS2);
+ printmode(PCEPP);
+ printmode(PCECP);
#undef printmode
}
- page[len++] = '\n';
+ buffer[len++] = '\n';
+
+ if (len > *lenp)
+ len = *lenp;
+ else
+ *lenp = len;
- *start = 0;
- *eof = 1;
- return len;
+ filp->f_pos += len;
+
+ return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
-static int autoprobe_read_proc (char *page, char **start, off_t off,
- int count, int *eof, void *data)
+#define PARPORT_PORT_DIR(child) { 0, NULL, NULL, 0, 0555, child }
+#define PARPORT_PARPORT_DIR(child) { DEV_PARPORT, "parport", \
+ NULL, 0, 0555, child }
+#define PARPORT_DEV_DIR(child) { CTL_DEV, "dev", NULL, 0, 0555, child }
+#define PARPORT_DEVICES_ROOT_DIR { DEV_PARPORT_DEVICES, "devices", \
+ NULL, 0, 0555, NULL }
+
+
+struct parport_sysctl_table {
+ struct ctl_table_header *sysctl_header;
+ ctl_table vars[9];
+ ctl_table device_dir[2];
+ ctl_table port_dir[2];
+ ctl_table parport_dir[2];
+ ctl_table dev_dir[2];
+};
+
+static const struct parport_sysctl_table parport_sysctl_template = {
+ NULL,
+ {
+ { DEV_PARPORT_SPINTIME, "spintime",
+ NULL, sizeof(int), 0644, NULL,
+ &proc_dointvec },
+ { DEV_PARPORT_HARDWARE, "hardware",
+ NULL, 0, 0444, NULL,
+ &do_hardware },
+ PARPORT_DEVICES_ROOT_DIR,
+#if 0 && defined(CONFIG_PARPORT_1284)
+ { DEV_PARPORT_AUTOPROBE, "autoprobe",
+ NULL, 0, 0444, NULL,
+ &do_autoprobe },
+ { DEV_PARPORT_AUTOPROBE + 1, "autoprobe0",
+ NULL, 0, 0444, NULL,
+ &do_autoprobe },
+ { DEV_PARPORT_AUTOPROBE + 2, "autoprobe1",
+ NULL, 0, 0444, NULL,
+ &do_autoprobe },
+ { DEV_PARPORT_AUTOPROBE + 3, "autoprobe2",
+ NULL, 0, 0444, NULL,
+ &do_autoprobe },
+ { DEV_PARPORT_AUTOPROBE + 4, "autoprobe3",
+ NULL, 0, 0444, NULL,
+ &do_autoprobe },
+#endif /* IEEE 1284 support */
+ {0}
+ },
+ { {DEV_PARPORT_DEVICES_ACTIVE, "active", NULL, 0, 444, NULL,
+ &do_active_device }, {0}},
+ { PARPORT_PORT_DIR(NULL), {0}},
+ { PARPORT_PARPORT_DIR(NULL), {0}},
+ { PARPORT_DEV_DIR(NULL), {0}}
+};
+
+struct parport_device_sysctl_table
{
- struct parport *pp = (struct parport *) data;
- int len = 0;
- const char *str;
+ struct ctl_table_header *sysctl_header;
+ ctl_table vars[2];
+ ctl_table device_dir[2];
+ ctl_table devices_root_dir[2];
+ ctl_table port_dir[2];
+ ctl_table parport_dir[2];
+ ctl_table dev_dir[2];
+};
+
+static const struct parport_device_sysctl_table
+parport_device_sysctl_template = {
+ NULL,
+ {
+ { DEV_PARPORT_DEVICE_TIMESLICE, "timeslice",
+ NULL, sizeof(int), 0644, NULL,
+ &proc_dointvec },
+ },
+ { {0, NULL, NULL, 0, 0555, NULL}, {0}},
+ { PARPORT_DEVICES_ROOT_DIR, {0}},
+ { PARPORT_PORT_DIR(NULL), {0}},
+ { PARPORT_PARPORT_DIR(NULL), {0}},
+ { PARPORT_DEV_DIR(NULL), {0}}
+};
+
+struct parport_default_sysctl_table
+{
+ struct ctl_table_header *sysctl_header;
+ ctl_table vars[3];
+ ctl_table default_dir[2];
+ ctl_table parport_dir[2];
+ ctl_table dev_dir[2];
+};
+
+extern unsigned long parport_default_timeslice;
+extern int parport_default_spintime;
+
+static struct parport_default_sysctl_table
+parport_default_sysctl_table = {
+ NULL,
+ {
+ { DEV_PARPORT_DEFAULT_TIMESLICE, "timeslice",
+ &parport_default_timeslice,
+ sizeof(parport_default_timeslice), 0644, NULL,
+ &proc_dointvec },
+ { DEV_PARPORT_DEFAULT_SPINTIME, "spintime",
+ &parport_default_spintime,
+ sizeof(parport_default_timeslice), 0644, NULL,
+ &proc_dointvec },
+ {0}
+ },
+ { { DEV_PARPORT_DEFAULT, "default", NULL, 0, 0555,
+ parport_default_sysctl_table.vars },{0}},
+ {
+ PARPORT_PARPORT_DIR(parport_default_sysctl_table.default_dir),
+ {0}},
+ { PARPORT_DEV_DIR(parport_default_sysctl_table.parport_dir), {0}}
+};
- page[0] = '\0';
- if ((str = pp->probe_info.class_name) != NULL)
- len += sprintf (page+len, "CLASS:%s;\n", str);
+int parport_proc_register(struct parport *port)
+{
+ struct parport_sysctl_table *t;
+ int i;
- if ((str = pp->probe_info.model) != NULL)
- len += sprintf (page+len, "MODEL:%s;\n", str);
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
+ if (t == NULL)
+ return -ENOMEM;
+ memcpy(t, &parport_sysctl_template, sizeof(*t));
- if ((str = pp->probe_info.mfr) != NULL)
- len += sprintf (page+len, "MANUFACTURER:%s;\n", str);
+ t->device_dir[0].extra1 = port;
- if ((str = pp->probe_info.description) != NULL)
- len += sprintf (page+len, "DESCRIPTION:%s;\n", str);
+ for (i = 0; i < 8; i++)
+ t->vars[i].extra1 = port;
- if ((str = pp->probe_info.cmdset) != NULL)
- len += sprintf (page+len, "COMMAND SET:%s;\n", str);
+#if 0 /* Wait for IEEE 1284 support */
+ t->vars[0].data = &port->spintime;
+#endif
+ t->vars[2].child = t->device_dir;
+
+ for (i = 0; i < 5; i++)
+#if 0
+ t->vars[3 + i].extra2 = &port->probe_info[i];
+#else
+ t->vars[3 + i].extra2 = &port->probe_info;
+#endif
- *start = 0;
- *eof = 1;
- return len;
-}
+ t->port_dir[0].procname = port->name;
+ t->port_dir[0].ctl_name = port->number + 1; /* nb 0 isn't legal here */
-static inline void destroy_proc_entry(struct proc_dir_entry *root,
- struct proc_dir_entry **d)
-{
- proc_unregister(root, (*d)->low_ino);
- kfree(*d);
- *d = NULL;
+ t->port_dir[0].child = t->vars;
+ t->parport_dir[0].child = t->port_dir;
+ t->dev_dir[0].child = t->parport_dir;
+
+ t->sysctl_header = register_sysctl_table(t->dev_dir, 0);
+ if (t->sysctl_header == NULL) {
+ kfree(t);
+ t = NULL;
+ }
+ port->sysctl_table = t;
+ return 0;
}
-static void destroy_proc_tree(struct parport *pp) {
- if (pp->pdir.entry) {
- if (pp->pdir.irq)
- destroy_proc_entry(pp->pdir.entry, &pp->pdir.irq);
- if (pp->pdir.devices)
- destroy_proc_entry(pp->pdir.entry, &pp->pdir.devices);
- if (pp->pdir.hardware)
- destroy_proc_entry(pp->pdir.entry, &pp->pdir.hardware);
- if (pp->pdir.probe)
- destroy_proc_entry(pp->pdir.entry, &pp->pdir.probe);
- destroy_proc_entry(base, &pp->pdir.entry);
+int parport_proc_unregister(struct parport *port)
+{
+ if (port->sysctl_table) {
+ struct parport_sysctl_table *t = port->sysctl_table;
+ port->sysctl_table = NULL;
+ unregister_sysctl_table(t->sysctl_header);
+ kfree(t);
}
+ return 0;
}
-static struct proc_dir_entry *new_proc_entry(const char *name, mode_t mode,
- struct proc_dir_entry *parent,
- unsigned short ino,
- struct parport *p)
+int parport_device_proc_register(struct pardevice *device)
{
- struct proc_dir_entry *ent;
+ struct parport_device_sysctl_table *t;
+ struct parport * port = device->port;
+
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
+ if (t == NULL)
+ return -ENOMEM;
+ memcpy(t, &parport_device_sysctl_template, sizeof(*t));
- ent = kmalloc(sizeof(struct proc_dir_entry), GFP_KERNEL);
- if (!ent)
- return NULL;
+ t->dev_dir[0].child = t->parport_dir;
+ t->parport_dir[0].child = t->port_dir;
+ t->port_dir[0].procname = port->name;
+ t->port_dir[0].ctl_name = port->number + 1; /* nb 0 isn't legal here */
+ t->port_dir[0].child = t->devices_root_dir;
+ t->devices_root_dir[0].child = t->device_dir;
- memset(ent, 0, sizeof(struct proc_dir_entry));
-
- if (mode == S_IFDIR)
- mode |= S_IRUGO | S_IXUGO;
- else if (mode == 0)
- mode = S_IFREG | S_IRUGO;
+#if 0 && defined(CONFIG_PARPORT_1284)
- ent->low_ino = ino;
- ent->name = name;
- ent->namelen = strlen(name);
- ent->mode = mode;
+ t->device_dir[0].ctl_name =
+ parport_device_num(port->number, port->muxport,
+ device->daisy)
+ + 1; /* nb 0 isn't legal here */
- if (S_ISDIR(mode))
- {
- if (p && p->ops)
- ent->fill_inode = p->ops->fill_inode;
- ent->nlink = 2;
- } else
- ent->nlink = 1;
+#else /* No IEEE 1284 support */
- proc_register(parent, ent);
+ /* parport_device_num isn't available. */
+ t->device_dir[0].ctl_name = 1;
- return ent;
-}
+#endif /* IEEE 1284 support or not */
-/*
- * This is called as the fill_inode function when an inode
- * is going into (fill = 1) or out of service (fill = 0).
- * We use it here to manage the module use counts.
- *
- * Note: only the top-level directory needs to do this; if
- * a lower level is referenced, the parent will be as well.
- */
-static void parport_modcount(struct inode *inode, int fill)
-{
-#ifdef MODULE
- if (fill)
- inc_parport_count();
- else
- dec_parport_count();
-#endif
-}
+ t->device_dir[0].procname = device->name;
+ t->device_dir[0].extra1 = device;
+ t->device_dir[0].child = t->vars;
+ t->vars[0].data = &device->timeslice;
-int parport_proc_init(void)
-{
- base = new_proc_entry("parport", S_IFDIR, &proc_root,PROC_PARPORT,
- NULL);
- if (base == NULL) {
- printk(KERN_ERR "Unable to initialise /proc/parport.\n");
- return 0;
+ t->sysctl_header = register_sysctl_table(t->dev_dir, 0);
+ if (t->sysctl_header == NULL) {
+ kfree(t);
+ t = NULL;
}
- base->fill_inode = &parport_modcount;
-
- return 1;
+ device->sysctl_table = t;
+ return 0;
}
-void parport_proc_cleanup(void)
+int parport_device_proc_unregister(struct pardevice *device)
{
- if (base) {
- proc_unregister(&proc_root,base->low_ino);
- kfree(base);
- base = NULL;
+ if (device->sysctl_table) {
+ struct parport_device_sysctl_table *t = device->sysctl_table;
+ device->sysctl_table = NULL;
+ unregister_sysctl_table(t->sysctl_header);
+ kfree(t);
}
+ return 0;
}
-int parport_proc_register(struct parport *pp)
+int parport_default_proc_register(void)
{
- memset(&pp->pdir, 0, sizeof(struct parport_dir));
+ parport_default_sysctl_table.sysctl_header =
+ register_sysctl_table(parport_default_sysctl_table.dev_dir, 0);
+ return 0;
+}
- if (base == NULL) {
- printk(KERN_ERR "parport_proc not initialised yet.\n");
- return 1;
+int parport_default_proc_unregister(void)
+{
+ if (parport_default_sysctl_table.sysctl_header) {
+ unregister_sysctl_table(parport_default_sysctl_table.
+ sysctl_header);
+ parport_default_sysctl_table.sysctl_header = NULL;
}
-
- strncpy(pp->pdir.name, pp->name + strlen("parport"),
- sizeof(pp->pdir.name));
-
- pp->pdir.entry = new_proc_entry(pp->pdir.name, S_IFDIR, base, 0, pp);
- if (pp->pdir.entry == NULL)
- goto out_fail;
-
- pp->pdir.irq = new_proc_entry("irq", S_IFREG | S_IRUGO | S_IWUSR,
- pp->pdir.entry, 0, pp);
- if (pp->pdir.irq == NULL)
- goto out_fail;
-
- pp->pdir.irq->read_proc = irq_read_proc;
- pp->pdir.irq->write_proc = irq_write_proc;
- pp->pdir.irq->data = pp;
-
- pp->pdir.devices = new_proc_entry("devices", 0, pp->pdir.entry, 0, pp);
- if (pp->pdir.devices == NULL)
- goto out_fail;
-
- pp->pdir.devices->read_proc = devices_read_proc;
- pp->pdir.devices->data = pp;
-
- pp->pdir.hardware = new_proc_entry("hardware", 0, pp->pdir.entry, 0,
- pp);
- if (pp->pdir.hardware == NULL)
- goto out_fail;
-
- pp->pdir.hardware->read_proc = hardware_read_proc;
- pp->pdir.hardware->data = pp;
-
- pp->pdir.probe = new_proc_entry("autoprobe", 0, pp->pdir.entry, 0, pp);
- if (pp->pdir.probe == NULL)
- goto out_fail;
-
- pp->pdir.probe->read_proc = autoprobe_read_proc;
- pp->pdir.probe->data = pp;
-
return 0;
+}
-out_fail:
+#else /* no sysctl */
- printk(KERN_ERR "%s: failure registering /proc/ entry.\n", pp->name);
- destroy_proc_tree(pp);
- return 1;
+int parport_proc_register(struct parport *pp)
+{
+ return 0;
}
int parport_proc_unregister(struct parport *pp)
{
- destroy_proc_tree(pp);
return 0;
}
-#else
-
-int parport_proc_register(struct parport *p)
+int parport_device_proc_register(struct pardevice *device)
{
return 0;
}
-int parport_proc_unregister(struct parport *p)
+int parport_device_proc_unregister(struct pardevice *device)
{
return 0;
}
-int parport_proc_init(void)
+int parport_default_proc_register (void)
{
return 0;
}
-void parport_proc_cleanup(void)
+int parport_default_proc_unregister (void)
{
+ return 0;
}
-
#endif
#define PARPORT_DEFAULT_TIMESLICE (HZ/5)
+unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
+
+/* This doesn't do anything yet. */
+int parport_default_spintime;
+
static struct parport *portlist = NULL, *portlist_tail = NULL;
spinlock_t parportlist_lock = SPIN_LOCK_UNLOCKED;
kfree(port);
}
-void parport_quiesce(struct parport *port)
-{
- if (port->devices) {
- printk(KERN_WARNING "%s: attempt to quiesce active port.\n",
- port->name);
- return;
- }
-
- if (port->flags & PARPORT_FLAG_COMA) {
- printk(KERN_WARNING "%s: attempt to quiesce comatose port.\n",
- port->name);
- return;
- }
-
- port->ops->release_resources(port);
-
- port->flags |= PARPORT_FLAG_COMA;
-}
-
struct pardevice *parport_register_device(struct parport *port, const char *name,
int (*pf)(void *), void (*kf)(void *),
void (*irq_func)(int, void *, struct pt_regs *),
return NULL;
}
- /* We may need to claw back the port hardware. */
- if (port->flags & PARPORT_FLAG_COMA) {
- if (port->ops->claim_resources(port)) {
- printk(KERN_WARNING
- "%s: unable to get hardware to register %s.\n",
- port->name, name);
- kfree (tmp->state);
- kfree (tmp);
- return NULL;
- }
- port->flags &= ~PARPORT_FLAG_COMA;
- }
-
tmp->name = name;
tmp->port = port;
tmp->preempt = pf;
port->ops->inc_use_count();
init_waitqueue_head(&tmp->wait_q);
- tmp->timeslice = PARPORT_DEFAULT_TIMESLICE;
+ tmp->timeslice = parport_default_timeslice;
tmp->waitnext = tmp->waitprev = NULL;
return tmp;
port = dev->port;
if (port->cad == dev) {
- printk(KERN_WARNING "%s: refused to unregister "
- "currently active device %s.\n", port->name, dev->name);
- return;
+ printk(KERN_DEBUG "%s: %s forgot to release port\n",
+ port->name, dev->name);
+ parport_release (dev);
}
spin_lock(&port->pardevice_lock);
dec_parport_count();
port->ops->dec_use_count();
- /* If there are no more devices, put the port to sleep. */
- if (!port->devices)
- parport_quiesce(port);
-
return;
}
fi
if [ "$CONFIG_MCA" = "y" ]; then
tristate 'NE/2 (ne2000 MCA version) support' CONFIG_NE2_MCA
+ tristate 'SKnet MCA support' CONFIG_SKMC
fi
bool 'EISA, VLB, PCI and on board controllers' CONFIG_NET_EISA
if [ "$CONFIG_NET_EISA" = "y" ]; then
bool ' WANPIPE Frame Relay support' CONFIG_WANPIPE_FR
bool ' WANPIPE PPP support' CONFIG_WANPIPE_PPP
fi
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ dep_tristate 'Cyclom 2X(tm) multiprotocol cards (EXPERIMENTAL)' CONFIG_CYCLADES_SYNC $CONFIG_WAN_DRIVERS
+ if [ "$CONFIG_CYCLADES_SYNC" != "n" ]; then
+ bool ' Cyclom 2X X.25 support (EXPERIMENTAL)' CONFIG_CYCLOMX_X25
+ fi
+ fi
fi
fi
#
endif
endif
+ifeq ($(CONFIG_SKMC),y)
+L_OBJS += sk_mca.o
+else
+ ifeq ($(CONFIG_SKMC),m)
+ M_OBJS += sk_mca.o
+ endif
+endif
+
ifeq ($(CONFIG_ELMC_II),y)
L_OBJS += 3c527.o
else
endif
endif
+ifeq ($(CONFIG_CYCLADES_SYNC),y)
+ LX_OBJS += cycx_drv.o
+ L_OBJS += cycx_main.o
+ ifeq ($(CONFIG_CYCLOMX_X25),y)
+ L_OBJS += cycx_x25.o
+ endif
+endif
+
+ifeq ($(CONFIG_CYCLADES_SYNC),m)
+ MX_OBJS += cycx_drv.o
+ M_OBJS += cyclomx.o
+ CYCLOMX_OBJS = cycx_main.o
+ ifeq ($(CONFIG_CYCLOMX_X25),y)
+ CYCLOMX_OBJS += cycx_x25.o
+ endif
+endif
+
ifeq ($(CONFIG_X25_ASY),y)
L_OBJS += x25_asy.o
else
wanpipe.o: $(WANPIPE_OBJS)
ld -r -o $@ $(WANPIPE_OBJS)
+cyclomx.o: $(CYCLOMX_OBJS)
+ ld -r -o $@ $(CYCLOMX_OBJS)
+
rcpci.o: rcpci45.o rclanmtl.o
$(LD) -r -o rcpci.o rcpci45.o rclanmtl.o
extern int wavelan_probe(struct device *);
extern int el16_probe(struct device *);
extern int elmc_probe(struct device *);
+extern int skmca_probe(struct device *);
extern int elplus_probe(struct device *);
extern int ac3200_probe(struct device *);
extern int es_probe(struct device *);
#endif
#ifdef CONFIG_ELMC /* 3c523 */
{elmc_probe, 0},
+#endif
+#ifdef CONFIG_SKMC /* SKnet Microchannel */
+ {skmca_probe, 0},
#endif
{NULL, 0},
};
--- /dev/null
+/*
+* cycx_drv.c cycx Support Module.
+*
+* This module is a library of common hardware-specific
+* functions used by all Cyclades sync and some async (8x & 16x)
+* drivers.
+*
+* Copyright: (c) 1998, 1999 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+*
+* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+*
+* Based on sdladrv.c by Gene Kozin <genek@compuserve.com>
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* 1999/05/28 acme cycx_intack & cycx_intde gone for good
+* 1999/05/18 acme lots of unlogged work, submitting to Linus...
+* 1999/01/03 acme more judicious use of data types
+* 1999/01/03 acme judicious use of data types :>
+* cycx_inten trying to reset pending interrupts
+* from cyclom 2x - I think this isn't the way to
+* go, but for now...
+* 1999/01/02 acme cycx_intack ok, I think there's nothing to do
+* to ack an int in cycx_drv.c, only handle it in
+* cyx_isr (or in the other protocols: cyp_isr,
+* cyf_isr, when they get implemented.
+* Dec 31, 1998 Arnaldo cycx_data_boot & cycx_code_boot fixed, crossing
+* fingers to see x25_configure in cycx_x25.c
+* work... :)
+* Dec 26, 1998 Arnaldo load implementation fixed, seems to work! :)
+* cycx_2x_dpmbase_options with all the possible
+* DPM addresses (20).
+* cycx_intr implemented (test this!)
+* general code cleanup
+* Dec 8, 1998 Ivan Passos Cyclom-2X firmware load implementation.
+* Aug 8, 1998 Arnaldo Initial version.
+*/
+
+#include <linux/config.h>
+#ifdef MODULE
+#ifdef MODVERSIONS
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+#else
+#define EXPORT_SYMBOL(function)
+#endif
+#include <linux/kernel.h> /* printk(), and other useful stuff */
+#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/errno.h> /* return codes */
+#include <linux/sched.h> /* for jiffies, HZ, etc. */
+#include <linux/cycx_drv.h> /* API definitions */
+#include <linux/cycx_cfm.h> /* CYCX firmware module definitions */
+#include <linux/delay.h> /* udelay */
+#include <asm/io.h> /* for inb(), outb(), etc. */
+
+#define MOD_VERSION 0
+#define MOD_RELEASE 1
+
+#ifdef MODULE
+MODULE_AUTHOR("Arnaldo Carvalho de Melo");
+MODULE_DESCRIPTION("Cyclades Sync Cards Driver.");
+#endif
+
+/* Function Prototypes */
+/* Module entry points. These are called by the OS and must be public. */
+int init_module (void);
+void cleanup_module (void);
+
+/* Hardware-specific functions */
+static int cycx_detect (cycxhw_t *hw);
+static int cycx_load (cycxhw_t *hw, cfm_t *cfm, u32 len);
+static int cycx_init (cycxhw_t *hw);
+static int cycx_reset (cycxhw_t *hw);
+static void cycx_bootcfg (cycxhw_t *hw);
+
+static int init_cycx_2x (cycxhw_t *hw);
+static int reset_cycx_2x (u32 addr);
+static int detect_cycx_2x (u32 addr);
+
+/* Miscellaneous functions */
+static void delay_cycx (int sec);
+static int get_option_index (u32 *optlist, u32 optval);
+static u16 checksum (u8 *buf, u32 len);
+
+#define wait_cyc(addr) cycx_exec(addr + CMD_OFFSET)
+
+/* Global Data
+ * Note: All data must be explicitly initialized!!! */
+
+/* private data */
+static char modname[] = "cycx_drv";
+static char fullname[] = "Cyclom X Support Module";
+static char copyright[] = "(c) 1998, 1999 Arnaldo Carvalho de Melo";
+
+/* Hardware configuration options.
+ * These are arrays of configuration options used by verification routines.
+ * The first element of each array is its size (i.e. number of options).
+ */
+static u32 cycx_2x_dpmbase_options[] =
+{
+ 20,
+ 0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000, 0xB8000,
+ 0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000, 0xD0000, 0xD4000,
+ 0xD8000, 0xDC000, 0xE0000, 0xE4000, 0xE8000, 0xEC000
+};
+
+static u32 cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
+
+/* Kernel Loadable Module Entry Points */
+/* Module 'insert' entry point.
+ * o print announcement
+ * o initialize static data
+ *
+ * Return: 0 Ok
+ * < 0 error.
+ * Context: process */
+#ifdef MODULE
+int init_module (void)
+{
+ printk(KERN_INFO "%s v%u.%u %s\n",
+ fullname, MOD_VERSION, MOD_RELEASE, copyright);
+ printk(KERN_INFO "version=0x%X\n", LINUX_VERSION_CODE);
+ return 0;
+}
+/* Module 'remove' entry point.
+ * o release all remaining system resources */
+void cleanup_module (void)
+{
+}
+#endif
+/* Kernel APIs */
+/* Set up adapter.
+ * o detect adapter type
+ * o verify hardware configuration options
+ * o check for hardware conflicts
+ * o set up adapter shared memory
+ * o test adapter memory
+ * o load firmware
+ * Return: 0 ok.
+ * < 0 error */
+EXPORT_SYMBOL(cycx_setup);
+int cycx_setup (cycxhw_t *hw, void *cfm, u32 len)
+{
+ u32 *irq_opt = NULL; /* IRQ options */
+ u32 *dpmbase_opt = NULL;/* DPM window base options */
+ int err = 0;
+
+ if (cycx_detect(hw)) {
+ printk(KERN_ERR "%s: adapter Cyclom %uX not found at "
+ "address 0x%lX!\n",
+ modname, hw->type, (unsigned long) hw->dpmbase);
+ return -EINVAL;
+ }
+
+ printk(KERN_INFO "%s: found Cyclom %uX card at address 0x%lx.\n",
+ modname, hw->type, (unsigned long) hw->dpmbase);
+
+ switch (hw->type) {
+ case CYCX_2X:
+ irq_opt = cycx_2x_irq_options;
+ dpmbase_opt = cycx_2x_dpmbase_options;
+ break;
+ default:
+ printk(KERN_ERR "%s: unknown card.\n", modname);
+ return -EINVAL;
+ }
+
+ /* Verify IRQ configuration options */
+ if (!get_option_index(irq_opt, hw->irq)) {
+ printk (KERN_ERR "%s: IRQ %d is illegal!\n", modname, hw->irq);
+ return -EINVAL;
+ }
+
+ /* Setup adapter dual-port memory window and test memory */
+ if (!hw->dpmbase) {
+ printk(KERN_ERR "%s: you must specify the dpm address!\n",
+ modname);
+ return -EINVAL;
+ }
+ else if (!get_option_index(dpmbase_opt, hw->dpmbase)) {
+ printk(KERN_ERR "%s: memory address 0x%lX is illegal!\n",
+ modname, (unsigned long) hw->dpmbase);
+ return -EINVAL;
+ }
+
+ hw->dpmsize = CYCX_WINDOWSIZE;
+ /* FIXME! Is this the only amount ever available? */
+ hw->memory = 0x40000;
+
+ cycx_init(hw);
+
+ printk(KERN_INFO "%s: dual-port memory window is set at 0x%lX.\n",
+ modname, (unsigned long) hw->dpmbase);
+ printk(KERN_INFO "%s: found %luK bytes of on-board memory.\n",
+ modname, (unsigned long) hw->memory / 1024);
+
+ /* Load firmware. If loader fails then shut down adapter */
+ err = cycx_load(hw, cfm, len);
+ if (err) cycx_down(hw); /* shutdown adapter */
+ return err;
+}
+
+/* Shut down CYCX: disable shared memory access and interrupts, stop CPU,etc.*/
+EXPORT_SYMBOL(cycx_down);
+int cycx_down (cycxhw_t *hw)
+{
+ return 0; /* FIXME: anything needed here? */
+}
+
+/* Enable interrupt generation. */
+EXPORT_SYMBOL(cycx_inten);
+int cycx_inten (cycxhw_t *hw)
+{
+ switch (hw->type) {
+ case CYCX_2X: writeb (0, hw->dpmbase); break;
+ default: return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Generate an interrupt to adapter's CPU. */
+EXPORT_SYMBOL(cycx_intr);
+int cycx_intr (cycxhw_t *hw)
+{
+ switch (hw->type) {
+ case CYCX_2X:
+ writew(0, hw->dpmbase + GEN_CYCX_INTR);
+ return 0;
+ default: return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Execute Adapter Command.
+ * o Set exec flag.
+ * o Busy-wait until flag is reset. */
+EXPORT_SYMBOL(cycx_exec);
+int cycx_exec (u32 addr)
+{
+ u16 i = 0;
+ /* wait till addr content is zeroed */
+
+ while (readw(addr) != 0) {
+ udelay(1000);
+ if (++i > 50) return -1;
+ }
+
+ return 0;
+}
+
+/* Read absolute adapter memory.
+ * Transfer data from adapter's memory to data buffer. */
+EXPORT_SYMBOL(cycx_peek);
+int cycx_peek (cycxhw_t *hw, u32 addr, void *buf, u32 len)
+{
+ if (len == 1) *(u8*)buf = readb (hw->dpmbase + addr);
+ else memcpy_fromio(buf, hw->dpmbase + addr, len);
+
+ return 0;
+}
+
+/* Write Absolute Adapter Memory.
+ * Transfer data from data buffer to adapter's memory. */
+EXPORT_SYMBOL(cycx_poke);
+int cycx_poke (cycxhw_t *hw, u32 addr, void *buf, u32 len)
+{
+ if (len == 1) writeb (*(u8*)buf, hw->dpmbase + addr);
+ else memcpy_toio(hw->dpmbase + addr, buf, len);
+
+ return 0;
+}
+
+/* Hardware-Specific Functions */
+/* Detect adapter type.
+ * o if adapter type is specified then call detection routine for that adapter
+ * type. Otherwise call detection routines for every adapter types until
+ * adapter is detected.
+ *
+ * Notes:
+ * 1) Detection tests are destructive! Adapter will be left in shutdown state
+ * after the test. */
+static int cycx_detect (cycxhw_t *hw)
+{
+ int err = 0;
+
+ if (!hw->dpmbase) return -EFAULT;
+
+ switch (hw->type) {
+ case CYCX_2X:
+ if (!detect_cycx_2x(hw->dpmbase)) err = -ENODEV;
+ break;
+ default:
+ if (detect_cycx_2x(hw->dpmbase)) hw->type = CYCX_2X;
+ else err = -ENODEV;
+ }
+
+ return err;
+}
+
+/* Load Aux Routines */
+/* Reset board hardware.
+ return 1 if memory exists at addr and 0 if not. */
+static int memory_exists(u32 addr)
+{
+ int timeout = 0;
+
+ for (; timeout < 3 ; timeout++) {
+ writew (TEST_PATTERN, addr + 0x10);
+
+ if (readw (addr + 0x10) == TEST_PATTERN)
+ if (readw (addr + 0x10) == TEST_PATTERN) return 1;
+
+ delay_cycx(1);
+ }
+
+ return 0;
+}
+
+/* Reset board hardware. */
+static int cycx_reset(cycxhw_t *hw)
+{
+ /* Reset board */
+ switch (hw->type) {
+ case CYCX_2X: return reset_cycx_2x(hw->dpmbase);
+ }
+
+ return -EINVAL;
+}
+
+/* Load reset code. */
+static void reset_load(u32 addr, u8 *buffer, u32 cnt)
+{
+ u32 pt_code = addr + RESET_OFFSET;
+ u16 i, j;
+
+ for ( i = 0 ; i < cnt ; i++) {
+ for (j = 0 ; j < 50 ; j++); /* Delay - FIXME busy waiting... */
+ writeb(*buffer++, pt_code++);
+ }
+}
+
+/* Load buffer using boot interface.
+ * o copy data from buffer to Cyclom-X memory
+ * o wait for reset code to copy it to right portion of memory */
+static int buffer_load(u32 addr, u8 *buffer, u32 cnt)
+{
+ memcpy_toio(addr + DATA_OFFSET, buffer, cnt);
+ writew(GEN_BOOT_DAT, addr + CMD_OFFSET);
+ return wait_cyc(addr);
+}
+
+/* Set up entry point and kick start Cyclom-X CPU. */
+static void cycx_start (u32 addr)
+{
+ /* put in 0x30 offset the jump instruction to the code entry point */
+ writeb(0xea, addr + 0x30);
+ writeb(0x00, addr + 0x31);
+ writeb(0xc4, addr + 0x32);
+ writeb(0x00, addr + 0x33);
+ writeb(0x00, addr + 0x34);
+
+ /* cmd to start executing code */
+ writew(GEN_START, addr + CMD_OFFSET);
+}
+
+/* Load and boot reset code. */
+static void cycx_reset_boot(u32 addr, u8 *code, u32 len)
+{
+ u32 pt_start = addr + START_OFFSET;
+
+ writeb(0xea, pt_start++); /* jmp to f000:3f00 */
+ writeb(0x00, pt_start++);
+ writeb(0xfc, pt_start++);
+ writeb(0x00, pt_start++);
+ writeb(0xf0, pt_start);
+ reset_load(addr, code, len);
+
+ /* 80186 was in hold, go */
+ writeb(0, addr + START_CPU);
+ delay_cycx(1);
+}
+
+/* Load data.bin file through boot (reset) interface. */
+static int cycx_data_boot(u32 addr, u8 *code, u32 len)
+{
+ u32 pt_boot_cmd = addr + CMD_OFFSET;
+ u32 i;
+
+ /* boot buffer lenght */
+ writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16));
+ writew(GEN_DEFPAR, pt_boot_cmd);
+
+ if (wait_cyc(addr) < 0) return 2;
+
+ writew(0, pt_boot_cmd + sizeof(u16));
+ writew(0x4000, pt_boot_cmd + 2 * sizeof(u16));
+ writew(GEN_SET_SEG, pt_boot_cmd);
+
+ if (wait_cyc(addr) < 0) return 2;
+
+ for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ)
+ if (buffer_load(addr, code + i,
+ MIN(CFM_LOAD_BUFSZ, (len - i))) < 0) {
+ printk(KERN_ERR "%s: Error !!\n", modname);
+ return 4;
+ }
+
+ return 0;
+}
+
+
+/* Load code.bin file through boot (reset) interface. */
+static int cycx_code_boot(u32 addr, u8 *code, u32 len)
+{
+ u32 pt_boot_cmd = addr + CMD_OFFSET;
+ u32 i;
+
+ /* boot buffer lenght */
+ writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16));
+ writew(GEN_DEFPAR, pt_boot_cmd);
+
+ if (wait_cyc(addr) == -1) return 2;
+
+ writew(0x0000, pt_boot_cmd + sizeof(u16));
+ writew(0xc400, pt_boot_cmd + 2 * sizeof(u16));
+ writew(GEN_SET_SEG, pt_boot_cmd);
+
+ if (wait_cyc(addr) == -1) return 1;
+
+ for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ)
+ if (buffer_load(addr, code + i,MIN(CFM_LOAD_BUFSZ,(len - i)))) {
+ printk(KERN_ERR "%s: Error !!\n", modname);
+ return 4;
+ }
+
+ return 0;
+}
+
+/* Initialize CYCX hardware: setup memory window, IRQ, etc. */
+static int cycx_init (cycxhw_t *hw)
+{
+ switch (hw->type) {
+ case CYCX_2X: return init_cycx_2x(hw);
+ }
+
+ return -EINVAL;
+}
+
+/* Load adapter from the memory image of the CYCX firmware module.
+ * o verify firmware integrity and compatibility
+ * o start adapter up */
+static int cycx_load (cycxhw_t *hw, cfm_t *cfm, u32 len)
+{
+ int i, j, status;
+ cycx_header_t *img_hdr;
+ u8 *reset_image,
+ *data_image,
+ *code_image;
+ u32 pt_cycld = hw->dpmbase + 0x400;
+ u16 cksum;
+
+ /* Announce */
+ printk(KERN_INFO "%s: firmware signature=\"%s\"\n",
+ modname, cfm->signature);
+
+ /* Verify firmware signature */
+ if (strcmp(cfm->signature, CFM_SIGNATURE)) {
+ printk(KERN_ERR "%s:cycx_load: not Cyclom-2X firmware!\n",
+ modname);
+ return -EINVAL;
+ }
+
+ printk(KERN_INFO "%s: firmware version=%u\n", modname, cfm->version);
+
+ /* Verify firmware module format version */
+ if (cfm->version != CFM_VERSION) {
+ printk(KERN_ERR "%s:cycx_load: firmware format %u rejected! "
+ "Expecting %u.\n",
+ modname, cfm->version, CFM_VERSION);
+ return -EINVAL;
+ }
+
+ /* Verify firmware module length and checksum */
+ cksum = checksum((u8*)&cfm->info, sizeof(cfm_info_t) +
+ cfm->info.codesize);
+/*
+ FIXME cfm->info.codesize is off by 2
+ if (((len - sizeof(cfm_t) - 1) != cfm->info.codesize) ||
+*/
+ if (cksum != cfm->checksum) {
+ printk(KERN_ERR "%s:cycx_load: firmware corrupted!\n", modname);
+ printk(KERN_ERR " cdsize = 0x%x (expected 0x%lx)\n",
+ len - sizeof(cfm_t) - 1, cfm->info.codesize);
+ printk(KERN_ERR " chksum = 0x%x (expected 0x%x)\n",
+ cksum, cfm->checksum);
+ return -EINVAL;
+ }
+
+ /* If everything is ok, set reset, data and code pointers */
+
+ img_hdr = (cycx_header_t*)(((u8*) cfm) + sizeof(cfm_t) - 1);
+#ifdef FIRMWARE_DEBUG
+ printk(KERN_INFO "%s:cycx_load: image sizes\n", modname);
+ printk(KERN_INFO " reset=%lu\n", img_hdr->reset_size);
+ printk(KERN_INFO " data=%lu\n", img_hdr->data_size);
+ printk(KERN_INFO " code=%lu\n", img_hdr->code_size);
+#endif
+ reset_image = ((u8 *) img_hdr) + sizeof(cycx_header_t);
+ data_image = reset_image + img_hdr->reset_size;
+ code_image = data_image + img_hdr->data_size;
+
+ /*---- Start load ----*/
+ /* Announce */
+ printk(KERN_INFO "%s: loading firmware %s (ID=%u)...\n", modname,
+ (cfm->descr[0] != '\0') ? cfm->descr : "unknown firmware",
+ cfm->info.codeid);
+
+ for (i = 0 ; i < 5 ; i++) {
+ /* Reset Cyclom hardware */
+ if ((status = cycx_reset(hw)) != 0) {
+ printk(KERN_ERR "%s: dpm problem or board not "
+ "found (%d).\n", modname, status);
+ return -EINVAL;
+ }
+
+ /* Load reset.bin */
+ cycx_reset_boot(hw->dpmbase, reset_image, img_hdr->reset_size);
+ /* reset is waiting for boot */
+ writew(GEN_POWER_ON, pt_cycld);
+ delay_cycx(1);
+
+ for (j = 0 ; j < 3 ; j++)
+ if (!readw(pt_cycld)) goto reset_loaded;
+ else delay_cycx(1);
+ }
+
+ printk(KERN_ERR "%s: reset not started.\n", modname);
+ return -EINVAL;
+reset_loaded:
+ /* Load data.bin */
+ if((status = cycx_data_boot(hw->dpmbase, data_image,
+ img_hdr->data_size)) != 0) {
+ printk(KERN_ERR "%s: cannot load data file (%d).\n",
+ modname, status);
+ return -EINVAL;
+ }
+
+ /* Load code.bin */
+ if((status = cycx_code_boot(hw->dpmbase, code_image,
+ img_hdr->code_size)) != 0) {
+ printk(KERN_ERR "%s: cannot load code file (%d).\n",
+ modname, status);
+ return -EINVAL;
+ }
+
+ /* Prepare boot-time configuration data */
+ cycx_bootcfg(hw);
+
+ /* kick-off CPU */
+ cycx_start(hw->dpmbase);
+
+ /* Arthur Ganzert's tip: wait a while after the firmware loading...
+ seg abr 26 17:17:12 EST 1999 - acme */
+ delay_cycx(7);
+ printk(KERN_INFO "%s: firmware loaded!\n", modname);
+
+ /* enable interrupts */
+ if (cycx_inten(hw)) {
+ printk(KERN_ERR "%s: adapter hardware failure!\n", modname);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Prepare boot-time firmware configuration data.
+ * o initialize configuration data area
+ From async.doc - V_3.4.0 - 07/18/1994
+ - As of now, only static buffers are available to the user.
+ So, the bit VD_RXDIRC must be set in 'valid'. That means that user
+ wants to use the static transmission and reception buffers. */
+static void cycx_bootcfg (cycxhw_t *hw)
+{
+ /* use fixed buffers */
+ writeb(FIXED_BUFFERS, hw->dpmbase + CONF_OFFSET);
+}
+
+/* Initialize CYCX_2X adapter. */
+static int init_cycx_2x (cycxhw_t *hw)
+{
+ if (!detect_cycx_2x(hw->dpmbase)) return -ENODEV;
+ return 0;
+}
+
+/* Detect Cyclom 2x adapter.
+ * Following tests are used to detect Cyclom 2x adapter:
+ * to be completed based on the tests done below
+ * Return 1 if detected o.k. or 0 if failed.
+ * Note: This test is destructive! Adapter will be left in shutdown
+ * state after the test. */
+static int detect_cycx_2x (u32 addr)
+{
+ printk(KERN_INFO "%s: looking for a cyclom 2x at 0x%lX...\n",
+ modname, (unsigned long) addr);
+
+ reset_cycx_2x(addr);
+ return memory_exists(addr);
+}
+
+/* Miscellaneous */
+/* Get option's index into the options list.
+ * Return option's index (1 .. N) or zero if option is invalid. */
+static int get_option_index (u32 *optlist, u32 optval)
+{
+ int i = 1;
+ for (; i <= optlist[0]; ++i) if (optlist[i] == optval) return i;
+ return 0;
+}
+
+/* Reset adapter's CPU. */
+static int reset_cycx_2x (u32 addr)
+{
+ writeb (0, addr + RST_ENABLE); delay_cycx (2);
+ writeb (0, addr + RST_DISABLE); delay_cycx (2);
+ return memory_exists(addr) ? 0 : 1;
+}
+
+/* Delay */
+static void delay_cycx (int sec)
+{
+/* acme
+ Thu dez 31 21:45:16 EDT 1998
+ FIXME I'll keep this comment here just in case, as of now I don't
+ know it all the contexts where this routine is used are interruptible... */
+
+ current->state = TASK_INTERRUPTIBLE;
+ current->counter = 0; /* make us low-priority */
+ schedule_timeout(sec*HZ);
+}
+
+/* Calculate 16-bit CRC using CCITT polynomial. */
+static u16 checksum (u8 *buf, u32 len)
+{
+ u16 crc = 0;
+ u16 mask, flag;
+
+ for (; len; --len, ++buf)
+ for (mask = 0x80; mask; mask >>= 1) {
+ flag = (crc & 0x8000);
+ crc <<= 1;
+ crc |= ((*buf & mask) ? 1 : 0);
+ if (flag) crc ^= 0x1021;
+ }
+
+ return crc;
+}
+/* End */
--- /dev/null
+/*
+* cycx_main.c Cyclades Cyclom X Multiprotocol WAN Link Driver. Main module.
+*
+* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+*
+* Copyright: (c) 1998, 1999 Arnaldo Carvalho de Melo
+*
+* Based on sdlamain.c by Gene Kozin <genek@compuserve.com> &
+* Jaspreet Singh <jaspreet@sangoma.com>
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* 1999/05/19 acme works directly linked into the kernel
+* init_waitqueue_head for 2.3.* kernel
+* 1999/05/18 acme major cleanup (polling not needed), etc
+* Aug 28, 1998 Arnaldo minor cleanup (ioctls for firmware deleted)
+* queue_task activated
+* Aug 08, 1998 Arnaldo Initial version.
+*/
+
+#include <linux/config.h> /* OS configuration options */
+#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/errno.h> /* return codes */
+#include <linux/string.h> /* inline memset(), etc. */
+#include <linux/malloc.h> /* kmalloc(), kfree() */
+#include <linux/kernel.h> /* printk(), and other useful stuff */
+#include <linux/module.h> /* support for loadable modules */
+#include <linux/ioport.h> /* request_region(), release_region() */
+#include <linux/tqueue.h> /* for kernel task queues */
+#include <linux/wanrouter.h> /* WAN router definitions */
+#include <linux/cyclomx.h> /* cyclomx common user API definitions */
+#include <asm/uaccess.h> /* kernel <-> user copy */
+#include <linux/init.h> /* __initfunc (when not using as a module) */
+
+#ifdef MODULE
+MODULE_AUTHOR("Arnaldo Carvalho de Melo");
+MODULE_DESCRIPTION("Cyclades Sync Cards Driver.");
+#endif
+
+/* Defines & Macros */
+
+#define DRV_VERSION 0 /* version number */
+#define DRV_RELEASE 3 /* release (minor version) number */
+#define MAX_CARDS 1 /* max number of adapters */
+
+#ifndef CONFIG_CYCLOMX_CARDS /* configurable option */
+#define CONFIG_CYCLOMX_CARDS 1
+#endif
+
+/* Function Prototypes */
+
+/* Module entry points */
+int init_module (void);
+void cleanup_module (void);
+
+/* WAN link driver entry points */
+static int setup (wan_device_t *wandev, wandev_conf_t *conf);
+static int shutdown (wan_device_t *wandev);
+static int ioctl (wan_device_t *wandev, unsigned cmd, unsigned long arg);
+
+/* Miscellaneous functions */
+static void cycx_isr (int irq, void *dev_id, struct pt_regs *regs);
+
+/* Global Data
+ * Note: All data must be explicitly initialized!!!
+ */
+
+/* private data */
+static char drvname[] = "cyclomx";
+static char fullname[] = "CYCLOM X(tm) Multiprotocol Driver";
+static char copyright[] = "(c) 1998, 1999 Arnaldo Carvalho de Melo";
+static int ncards = CONFIG_CYCLOMX_CARDS;
+static cycx_t *card_array = NULL; /* adapter data space */
+
+/* Kernel Loadable Module Entry Points */
+
+/*
+ * Module 'insert' entry point.
+ * o print announcement
+ * o allocate adapter data space
+ * o initialize static data
+ * o register all cards with WAN router
+ * o calibrate CYCX shared memory access delay.
+ *
+ * Return: 0 Ok
+ * < 0 error.
+ * Context: process
+ */
+#ifdef MODULE
+int init_module (void)
+#else
+__initfunc(int cyclomx_init (void))
+#endif
+{
+ int cnt, err = 0;
+
+ printk(KERN_INFO "%s v%u.%u %s\n",
+ fullname, DRV_VERSION, DRV_RELEASE, copyright);
+
+ /* Verify number of cards and allocate adapter data space */
+ ncards = min(ncards, MAX_CARDS);
+ ncards = max(ncards, 1);
+ card_array = kmalloc(sizeof(cycx_t) * ncards, GFP_KERNEL);
+
+ if (card_array == NULL) return -ENOMEM;
+
+ memset(card_array, 0, sizeof(cycx_t) * ncards);
+
+ /* Register adapters with WAN router */
+ for (cnt = 0; cnt < ncards; ++cnt) {
+ cycx_t *card = &card_array[cnt];
+ wan_device_t *wandev = &card->wandev;
+
+ sprintf(card->devname, "%s%d", drvname, cnt + 1);
+ wandev->magic = ROUTER_MAGIC;
+ wandev->name = card->devname;
+ wandev->private = card;
+ wandev->enable_tx_int = 0;
+ wandev->setup = &setup;
+ wandev->shutdown = &shutdown;
+ wandev->ioctl = &ioctl;
+ err = register_wan_device(wandev);
+
+ if (err) {
+ printk(KERN_ERR
+ "%s: %s registration failed with error %d!\n",
+ drvname, card->devname, err);
+ break;
+ }
+ }
+
+ if (cnt) ncards = cnt; /* adjust actual number of cards */
+ else {
+ kfree(card_array);
+ err = -ENODEV;
+ }
+
+ return err;
+}
+
+/*
+ * Module 'remove' entry point.
+ * o unregister all adapters from the WAN router
+ * o release all remaining system resources
+ */
+#ifdef MODULE
+void cleanup_module (void)
+{
+ int i = 0;
+
+ for (; i < ncards; ++i) {
+ cycx_t *card = &card_array[i];
+ unregister_wan_device(card->devname);
+ }
+
+ kfree(card_array);
+}
+#endif
+/* WAN Device Driver Entry Points */
+/*
+ * Setup/confugure WAN link driver.
+ * o check adapter state
+ * o make sure firmware is present in configuration
+ * o allocate interrupt vector
+ * o setup CYCLOM X hardware
+ * o call appropriate routine to perform protocol-specific initialization
+ * o mark I/O region as used
+ *
+ * This function is called when router handles ROUTER_SETUP IOCTL. The
+ * configuration structure is in kernel memory (including extended data, if
+ * any).
+ */
+static int setup (wan_device_t *wandev, wandev_conf_t *conf)
+{
+ cycx_t *card;
+ int err = 0;
+ int irq;
+
+ /* Sanity checks */
+ if (!wandev || !wandev->private || !conf) return -EFAULT;
+
+ card = wandev->private;
+
+ if (wandev->state != WAN_UNCONFIGURED) return -EBUSY;
+
+ if (!conf->data_size || (conf->data == NULL)) {
+ printk(KERN_ERR "%s: firmware not found in configuration "
+ "data!\n", wandev->name);
+ return -EINVAL;
+ }
+
+ if (conf->irq <= 0) {
+ printk(KERN_ERR "%s: can't configure without IRQ!\n",
+ wandev->name);
+ return -EINVAL;
+ }
+
+ /* Allocate IRQ */
+ irq = conf->irq == 2 ? 9 : conf->irq; /* IRQ2 -> IRQ9 */
+
+ if (request_irq(irq, cycx_isr, 0, wandev->name, card)) {
+ printk(KERN_ERR "%s: can't reserve IRQ %d!\n",
+ wandev->name, irq);
+ return -EINVAL;
+ }
+
+ /* Configure hardware, load firmware, etc. */
+ memset(&card->hw, 0, sizeof(cycxhw_t));
+ card->hw.irq = (conf->irq == 9) ? 2 : conf->irq;
+ card->hw.dpmbase = conf->maddr;
+ card->hw.dpmsize = CYCX_WINDOWSIZE;
+ card->hw.type = conf->hw_opt[0];
+ card->hw.fwid = CFID_X25_2X;
+ card->lock = SPIN_LOCK_UNLOCKED;
+#if LINUX_VERSION_CODE >= 0x020300
+ init_waitqueue_head(&card->wait_stats);
+#else
+ card->wait_stats = NULL;
+#endif
+ err = cycx_setup(&card->hw, conf->data, conf->data_size);
+
+ if (err) {
+ free_irq(irq, card);
+ return err;
+ }
+
+ /* Intialize WAN device data space */
+ wandev->irq = irq;
+ wandev->dma = wandev->ioport = 0;
+ wandev->maddr = (unsigned long*)card->hw.dpmbase;
+ wandev->msize = card->hw.dpmsize;
+ wandev->hw_opt[0] = card->hw.type;
+ wandev->hw_opt[1] = card->hw.pclk;
+ wandev->hw_opt[2] = card->hw.memory;
+ wandev->hw_opt[3] = card->hw.fwid;
+
+ /* Protocol-specific initialization */
+ switch (card->hw.fwid) {
+#ifdef CONFIG_CYCLOMX_X25
+ case CFID_X25_2X: err = cyx_init(card, conf); break;
+#endif
+ default:
+ printk(KERN_ERR "%s: this firmware is not supported!\n",
+ wandev->name);
+ err = -EINVAL;
+ }
+
+ if (err) {
+ cycx_down(&card->hw);
+ free_irq(irq, card);
+ return err;
+ }
+
+ wandev->critical = 0;
+ return 0;
+}
+
+/*
+ * Shut down WAN link driver.
+ * o shut down adapter hardware
+ * o release system resources.
+ *
+ * This function is called by the router when device is being unregistered or
+ * when it handles ROUTER_DOWN IOCTL.
+ */
+static int shutdown (wan_device_t *wandev)
+{
+ cycx_t *card;
+
+ /* sanity checks */
+ if (!wandev || !wandev->private) return -EFAULT;
+
+ if (wandev->state == WAN_UNCONFIGURED) return 0;
+
+ card = wandev->private;
+ wandev->state = WAN_UNCONFIGURED;
+ cycx_down(&card->hw);
+ printk(KERN_INFO "%s: irq %d being freed!\n", wandev->name,wandev->irq);
+ free_irq(wandev->irq, card);
+ wandev->critical = 0;
+ return 0;
+}
+
+/*
+ * Driver I/O control.
+ * o verify arguments
+ * o perform requested action
+ *
+ * This function is called when router handles one of the reserved user
+ * IOCTLs. Note that 'arg' stil points to user address space.
+ */
+static int ioctl (wan_device_t *wandev, unsigned cmd, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+/* Miscellaneous */
+/*
+ * CYCX Interrupt Service Routine.
+ * o acknowledge CYCX hardware interrupt.
+ * o call protocol-specific interrupt service routine, if any.
+ */
+static void cycx_isr (int irq, void *dev_id, struct pt_regs *regs)
+{
+#define card ((cycx_t*)dev_id)
+ if (!card || card->wandev.state == WAN_UNCONFIGURED) return;
+
+ if (card->in_isr) {
+ printk(KERN_WARNING "%s: interrupt re-entrancy on IRQ %d!\n",
+ card->devname, card->wandev.irq);
+ return;
+ }
+
+ if (card->isr) card->isr(card);
+#undef card
+}
+
+/*
+ * This routine is called by the protocol-specific modules when network
+ * interface is being open. The only reason we need this, is because we
+ * have to call MOD_INC_USE_COUNT, but cannot include 'module.h' where it's
+ * defined more than once into the same kernel module.
+ */
+void cyclomx_open (cycx_t *card)
+{
+ ++card->open_cnt;
+ MOD_INC_USE_COUNT;
+}
+
+/*
+ * This routine is called by the protocol-specific modules when network
+ * interface is being closed. The only reason we need this, is because we
+ * have to call MOD_DEC_USE_COUNT, but cannot include 'module.h' where it's
+ * defined more than once into the same kernel module.
+ */
+void cyclomx_close (cycx_t *card)
+{
+ --card->open_cnt;
+ MOD_DEC_USE_COUNT;
+}
+
+/* Set WAN device state. */
+void cyclomx_set_state (cycx_t *card, int state)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+
+ if (card->wandev.state != state) {
+ switch (state) {
+ case WAN_CONNECTED:
+ printk (KERN_INFO "%s: link connected!\n",
+ card->devname);
+ break;
+
+ case WAN_CONNECTING:
+ printk (KERN_INFO "%s: link connecting...\n",
+ card->devname);
+ break;
+
+ case WAN_DISCONNECTED:
+ printk (KERN_INFO "%s: link disconnected!\n",
+ card->devname);
+ break;
+ }
+
+ card->wandev.state = state;
+ }
+
+ card->state_tick = jiffies;
+ restore_flags(flags);
+}
+
+/* End */
--- /dev/null
+/*
+* cycx_x25.c CYCLOM X Multiprotocol WAN Link Driver. X.25 module.
+*
+* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+* Copyright: (c) 1998, 1999 Arnaldo Carvalho de Melo
+*
+* Based on sdla_x25.c by Gene Kozin <genek@compuserve.com>
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* 1999/05/28 acme fixed nibble_to_byte, ackvc now properly treated
+* if_send simplified
+* 1999/05/25 acme fixed t1, t2, t21 & t23 configuration
+* use spinlocks instead of cli/sti in some points
+* 1999/05/24 acme finished the x25_get_stat function
+* 1999/05/23 acme dev->type = ARPHRD_X25 (tcpdump only works,
+* AFAIT, with ARPHRD_ETHER). This seems to be
+* needed to use socket(AF_X25)...
+* Now the config file must specify a peer media
+* address for svc channes over a crossover cable.
+* Removed hold_timeout from x25_channel_t,
+* not used.
+* A little enhancement in the DEBUG processing
+* 1999/05/22 acme go to DISCONNECTED in disconnect_confirm_intr,
+* instead of chan_disc.
+* 1999/05/16 marcelo fixed timer initialization in SVCs
+* 1999/01/05 acme x25_configure now get (most of) all
+* parameters...
+* 1999/01/05 acme pktlen now (correctly) uses log2 (value
+* configured)
+* 1999/01/03 acme judicious use of data types (u8, u16, u32, etc)
+* 1999/01/03 acme cyx_isr: reset dpmbase to acknowledge
+* indication (interrupt from cyclom 2x)
+* 1999/01/02 acme cyx_isr: first hackings...
+* 1999/01/0203 acme when initializing an array don't give less
+* elements than declared...
+* example: char send_cmd[6] = "?\xFF\x10";
+* you'll gonna lose a couple hours, 'cause your
+* brain won't admit that there's an error in the
+* above declaration... the side effect is that
+* memset is put into the unresolved symbols
+* instead of using the inline memset functions...
+* 1999/01/02 acme began chan_connect, chan_send, x25_send
+* Dec 31, 1998 Arnaldo x25_configure
+* this code can be compiled as non module
+* Dec 27, 1998 Arnaldo code cleanup
+* IPX code wiped out! let's decrease code
+* complexity for now, remember: I'm learning! :)
+* bps_to_speed_code OK
+* Dec 26, 1998 Arnaldo Minimal debug code cleanup
+* Aug 08, 1998 Arnaldo Initial version.
+*/
+#define CYCLOMX_X25_DEBUG 1
+
+#include <linux/version.h>
+#include <linux/kernel.h> /* printk(), and other useful stuff */
+#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/errno.h> /* return codes */
+#include <linux/string.h> /* inline memset(), etc. */
+#include <linux/malloc.h> /* kmalloc(), kfree() */
+#include <linux/wanrouter.h> /* WAN router definitions */
+#include <asm/byteorder.h> /* htons(), etc. */
+#include <linux/if_arp.h> /* ARPHRD_X25 */
+#include <linux/cyclomx.h> /* CYCLOM X common user API definitions */
+#include <linux/cycx_x25.h> /* X.25 firmware API definitions */
+
+/* Defines & Macros */
+#define MAX_CMD_RETRY 5
+#define X25_CHAN_MTU 2048 /* unfragmented logical channel MTU */
+#define OUT_INTR 1
+#define IN_INTR 0
+
+/* Data Structures */
+/* This is an extention of the 'struct device' we create for each network
+ interface to keep the rest of X.25 channel-specific data. */
+typedef struct x25_channel {
+ char name[WAN_IFNAME_SZ+1]; /* interface name, ASCIIZ */
+ char addr[WAN_ADDRESS_SZ+1]; /* media address, ASCIIZ */
+ char *local_addr; /* local media address, ASCIIZ -
+ svc thru crossover cable */
+ s16 lcn; /* logical channel number/conn.req.key*/
+ u8 link;
+ struct timer_list timer; /* timer used for svc channel disc. */
+ spinlock_t lock;
+ u16 protocol; /* ethertype, 0 - multiplexed */
+ u8 svc; /* 0 - permanent, 1 - switched */
+ u8 state; /* channel state */
+ u8 drop_sequence; /* mark sequence for dropping */
+ u32 idle_tmout; /* sec, before disconnecting */
+ struct sk_buff *rx_skb; /* receive socket buffer */
+ cycx_t *card; /* -> owner */
+ struct enet_statistics ifstats; /* interface statistics */
+} x25_channel_t;
+
+/* Function Prototypes */
+/* WAN link driver entry points. These are called by the WAN router module. */
+static int update (wan_device_t *wandev),
+ new_if (wan_device_t *wandev, struct device *dev,wanif_conf_t *conf),
+ del_if (wan_device_t *wandev, struct device *dev);
+
+/* Network device interface */
+static int if_init (struct device *dev),
+ if_open (struct device *dev),
+ if_close (struct device *dev),
+ if_header (struct sk_buff *skb, struct device *dev,
+ u16 type, void *daddr, void *saddr, unsigned len),
+ if_rebuild_hdr (struct sk_buff *skb),
+ if_send (struct sk_buff *skb, struct device *dev);
+
+static struct net_device_stats * if_stats (struct device *dev);
+
+/* Interrupt handlers */
+static void cyx_isr (cycx_t *card),
+ tx_intr (cycx_t *card, TX25Cmd *cmd),
+ rx_intr (cycx_t *card, TX25Cmd *cmd),
+ log_intr (cycx_t *card, TX25Cmd *cmd),
+ stat_intr (cycx_t *card, TX25Cmd *cmd),
+ connect_confirm_intr (cycx_t *card, TX25Cmd *cmd),
+ disconnect_confirm_intr (cycx_t *card, TX25Cmd *cmd),
+ connect_intr (cycx_t *card, TX25Cmd *cmd),
+ disconnect_intr (cycx_t *card, TX25Cmd *cmd),
+ spur_intr (cycx_t *card, TX25Cmd *cmd);
+
+/* X.25 firmware interface functions */
+static int x25_configure (cycx_t *card, TX25Config *conf),
+ x25_get_stats (cycx_t *card),
+ x25_send (cycx_t *card, u8 link, u8 lcn, u8 bitm, int len,void *buf),
+ x25_connect_response (cycx_t *card, x25_channel_t *chan),
+ x25_disconnect_response (cycx_t *card, u8 link, u8 lcn);
+
+/* Miscellaneous functions */
+static int chan_connect (struct device *dev),
+ chan_send (struct device *dev, struct sk_buff *skb);
+
+static void set_chan_state (struct device *dev, u8 state, u8 outside_intr),
+ nibble_to_byte (u8 *s, u8 *d, u8 len, u8 nibble),
+ reset_timer (struct device *dev),
+ chan_disc (struct device *dev),
+ chan_timer (unsigned long data);
+
+static u8 bps_to_speed_code (u32 bps);
+static u8 log2 (u32 n);
+
+static unsigned dec_to_uint (u8 *str, int len);
+
+static struct device *get_dev_by_lcn (wan_device_t *wandev, s16 lcn);
+static struct device *get_dev_by_dte_addr (wan_device_t *wandev, char *dte);
+
+#ifdef CYCLOMX_X25_DEBUG
+static void hex_dump(char *msg, unsigned char *p, int len);
+static void x25_dump_config(TX25Config *conf);
+static void x25_dump_stats(TX25Stats *stats);
+static void x25_dump_devs(wan_device_t *wandev);
+#define dprintk(format, a...) printk(format, ##a)
+#else
+#define hex_dump(msg, p, len)
+#define x25_dump_config(conf)
+#define x25_dump_stats(stats)
+#define x25_dump_devs(wandev)
+#define dprintk(format, a...)
+#endif
+/* Public Functions */
+
+/* X.25 Protocol Initialization routine.
+ *
+ * This routine is called by the main CYCLOM X module during setup. At this
+ * point adapter is completely initialized and X.25 firmware is running.
+ * o read firmware version (to make sure it's alive)
+ * o configure adapter
+ * o initialize protocol-specific fields of the adapter data space.
+ *
+ * Return: 0 o.k.
+ * < 0 failure. */
+int cyx_init (cycx_t *card, wandev_conf_t *conf)
+{
+ TX25Config cfg;
+
+ /* Verify configuration ID */
+ if (conf->config_id != WANCONFIG_X25) {
+ printk(KERN_INFO "%s: invalid configuration ID %u!\n",
+ card->devname, conf->config_id);
+ return -EINVAL;
+ }
+
+ /* Initialize protocol-specific fields */
+ card->mbox = card->hw.dpmbase + X25_MBOX_OFFS;
+ card->u.x.critical = 0; /* critical section flag */
+ card->u.x.connection_keys = 0;
+
+ /* Configure adapter. Here we set resonable defaults, then parse
+ * device configuration structure and set configuration options.
+ * Most configuration options are verified and corrected (if
+ * necessary) since we can't rely on the adapter to do so and don't
+ * want it to fail either. */
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.link = 0;
+ cfg.clock = conf->clocking == WANOPT_EXTERNAL ? 8 : 55;
+ cfg.speed = bps_to_speed_code(conf->bps);
+ cfg.n3win = 7;
+ cfg.n2win = 2;
+ cfg.n2 = 5;
+ cfg.nvc = 1;
+ cfg.npvc = 1;
+ cfg.flags = 0x02; /* default = V35 */
+ cfg.t1 = 10; /* line carrier timeout */
+ cfg.t2 = 29; /* tx timeout */
+ cfg.t21 = 180; /* CALL timeout */
+ cfg.t23 = 180; /* CLEAR timeout */
+
+ /* adjust MTU */
+ if (!conf->mtu || conf->mtu >= 512)
+ card->wandev.mtu = 512;
+ else if (conf->mtu >= 256)
+ card->wandev.mtu = 256;
+ else if (conf->mtu >= 128)
+ card->wandev.mtu = 128;
+ else
+ card->wandev.mtu = 64;
+
+ cfg.pktlen = log2(card->wandev.mtu);
+
+ if (conf->station == WANOPT_DTE) {
+ cfg.locaddr = 3; /* DTE */
+ cfg.remaddr = 1; /* DCE */
+ } else {
+ cfg.locaddr = 1; /* DCE */
+ cfg.remaddr = 3; /* DTE */
+ }
+
+ if (conf->interface == WANOPT_RS232)
+ cfg.flags = 0; /* FIXME just reset the 2nd bit */
+
+ if (conf->u.x25.hi_pvc) {
+ card->u.x.hi_pvc = min(conf->u.x25.hi_pvc, 4095);
+ card->u.x.lo_pvc = min(conf->u.x25.lo_pvc, card->u.x.hi_pvc);
+ }
+
+ if (conf->u.x25.hi_svc) {
+ card->u.x.hi_svc = min(conf->u.x25.hi_svc, 4095);
+ card->u.x.lo_svc = min(conf->u.x25.lo_svc, card->u.x.hi_svc);
+ }
+
+ if (card->u.x.lo_pvc == 255)
+ cfg.npvc = 0;
+ else
+ cfg.npvc = card->u.x.hi_pvc - card->u.x.lo_pvc + 1;
+
+ cfg.nvc = card->u.x.hi_svc - card->u.x.lo_svc + 1 + cfg.npvc;
+
+ if (conf->u.x25.hdlc_window)
+ cfg.n2win = min(conf->u.x25.hdlc_window, 7);
+
+ if (conf->u.x25.pkt_window)
+ cfg.n3win = min(conf->u.x25.pkt_window, 7);
+
+ if (conf->u.x25.t1)
+ cfg.t1 = min(conf->u.x25.t1, 30);
+
+ if (conf->u.x25.t2)
+ cfg.t2 = min(conf->u.x25.t2, 30);
+
+ if (conf->u.x25.t11_t21)
+ cfg.t21 = min(conf->u.x25.t11_t21, 30);
+
+ if (conf->u.x25.t13_t23)
+ cfg.t23 = min(conf->u.x25.t13_t23, 30);
+
+ if (conf->u.x25.n2)
+ cfg.n2 = min(conf->u.x25.n2, 30);
+
+ /* initialize adapter */
+ if (x25_configure(card, &cfg))
+ return -EIO;
+
+ /* Initialize protocol-specific fields of adapter data space */
+ card->wandev.bps = conf->bps;
+ card->wandev.interface = conf->interface;
+ card->wandev.clocking = conf->clocking;
+ card->wandev.station = conf->station;
+ card->isr = &cyx_isr;
+ card->exec = NULL;
+ card->wandev.update = &update;
+ card->wandev.new_if = &new_if;
+ card->wandev.del_if = &del_if;
+ card->wandev.state = WAN_DISCONNECTED;
+ card->wandev.enable_tx_int = card->irq_dis_if_send_count = 0;
+ return 0;
+}
+
+/* WAN Device Driver Entry Points */
+/* Update device status & statistics. */
+static int update (wan_device_t *wandev)
+{
+ /* sanity checks */
+ if (!wandev || !wandev->private)
+ return -EFAULT;
+
+ if (wandev->state == WAN_UNCONFIGURED)
+ return -ENODEV;
+
+ x25_get_stats(wandev->private);
+ return 0;
+}
+
+/* Create new logical channel.
+ * This routine is called by the router when ROUTER_IFNEW IOCTL is being
+ * handled.
+ * o parse media- and hardware-specific configuration
+ * o make sure that a new channel can be created
+ * o allocate resources, if necessary
+ * o prepare network device structure for registaration.
+ *
+ * Return: 0 o.k.
+ * < 0 failure (channel will not be created) */
+static int new_if (wan_device_t *wandev, struct device *dev, wanif_conf_t *conf)
+{
+ cycx_t *card = wandev->private;
+ x25_channel_t *chan;
+ int err = 0;
+
+ if (conf->name[0] == '\0' || strlen(conf->name) > WAN_IFNAME_SZ) {
+ printk(KERN_INFO "%s: invalid interface name!\n",card->devname);
+ return -EINVAL;
+ }
+
+ /* allocate and initialize private data */
+ if ((chan = kmalloc(sizeof(x25_channel_t), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ memset(chan, 0, sizeof(x25_channel_t));
+ strcpy(chan->name, conf->name);
+ chan->card = card;
+ chan->link = conf->port;
+ chan->protocol = ETH_P_IP;
+ chan->rx_skb = NULL;
+ /* only used in svc connected thru crossover cable */
+ chan->local_addr = NULL;
+ chan->lock = SPIN_LOCK_UNLOCKED;
+
+ if (conf->addr[0] == '@') { /* SVC */
+ int local_len = strlen(conf->local_addr);
+
+ if (local_len) {
+ if (local_len > WAN_ADDRESS_SZ) {
+ printk(KERN_ERR "%s: %s local addr too long!\n",
+ wandev->name, chan->name);
+ kfree(chan);
+ return -EINVAL;
+ } else if ((chan->local_addr = kmalloc(local_len + 1,
+ GFP_KERNEL)) == NULL) {
+ kfree(chan);
+ return ENOMEM;
+ }
+
+ strncpy(chan->local_addr, conf->local_addr,
+ WAN_ADDRESS_SZ);
+ }
+
+ chan->svc = 1;
+ strncpy(chan->addr, &conf->addr[1], WAN_ADDRESS_SZ);
+ init_timer(&chan->timer);
+ chan->timer.function = chan_timer;
+ chan->timer.data = (unsigned long) dev;
+
+ /* Set channel timeouts (default if not specified) */
+ chan->idle_tmout = conf->idle_timeout ? conf->idle_timeout : 90;
+ } else if (is_digit(conf->addr[0])) { /* PVC */
+ s16 lcn = dec_to_uint(conf->addr, 0);
+
+ if (lcn >= card->u.x.lo_pvc && lcn <= card->u.x.hi_pvc)
+ chan->lcn = lcn;
+ else {
+ printk(KERN_ERR
+ "%s: PVC %u is out of range on interface %s!\n",
+ wandev->name, lcn, chan->name);
+ err = -EINVAL;
+ }
+ } else {
+ printk(KERN_ERR "%s: invalid media address on interface %s!\n",
+ wandev->name, chan->name);
+ err = -EINVAL;
+ }
+
+ if (err) {
+ if (chan->local_addr)
+ kfree(chan->local_addr);
+ kfree(chan);
+ return err;
+ }
+
+ /* prepare network device data space for registration */
+ dev->name = chan->name;
+ dev->init = &if_init;
+ dev->priv = chan;
+ return 0;
+}
+
+/* Delete logical channel. */
+static int del_if (wan_device_t *wandev, struct device *dev)
+{
+ if (!dev) {
+ printk(KERN_ERR "cycx_x25:del_if:dev == NULL!\n");
+ return 0;
+ }
+
+ if (dev->priv) {
+ x25_channel_t *chan = dev->priv;
+ if (chan->svc) {
+ if (chan->local_addr)
+ kfree(chan->local_addr);
+
+ if (chan->state == WAN_CONNECTED)
+ del_timer(&chan->timer);
+ }
+ kfree(chan);
+ dev->priv = NULL;
+ }
+
+ return 0;
+}
+
+/* Network Device Interface */
+/* Initialize Linux network interface.
+ *
+ * This routine is called only once for each interface, during Linux network
+ * interface registration. Returning anything but zero will fail interface
+ * registration. */
+static int if_init (struct device *dev)
+{
+ x25_channel_t *chan = dev->priv;
+ cycx_t *card = chan->card;
+ wan_device_t *wandev = &card->wandev;
+
+ /* Initialize device driver entry points */
+ dev->open = &if_open;
+ dev->stop = &if_close;
+ dev->hard_header = &if_header;
+ dev->rebuild_header = &if_rebuild_hdr;
+ dev->hard_start_xmit = &if_send;
+ dev->get_stats = &if_stats;
+
+ /* Initialize media-specific parameters */
+ dev->mtu = X25_CHAN_MTU;
+ dev->type = ARPHRD_X25; /* ARP h/w type */
+ dev->hard_header_len = 0; /* media header length */
+ dev->addr_len = 0; /* hardware address length */
+
+ if (!chan->svc)
+ *(u16*)dev->dev_addr = htons(chan->lcn);
+
+ /* Initialize hardware parameters (just for reference) */
+ dev->irq = wandev->irq;
+ dev->dma = wandev->dma;
+ dev->base_addr = wandev->ioport;
+ dev->mem_start = (unsigned long)wandev->maddr;
+ dev->mem_end = (unsigned long)(wandev->maddr + wandev->msize - 1);
+ dev->flags |= IFF_NOARP;
+
+ /* Set transmit buffer queue length */
+ dev->tx_queue_len = 10;
+
+ /* Initialize socket buffers */
+ dev_init_buffers(dev);
+ set_chan_state(dev, WAN_DISCONNECTED, OUT_INTR);
+ return 0;
+}
+
+/* Open network interface.
+ * o prevent module from unloading by incrementing use count
+ * o if link is disconnected then initiate connection
+ *
+ * Return 0 if O.k. or errno. */
+static int if_open (struct device *dev)
+{
+ x25_channel_t *chan = dev->priv;
+ cycx_t *card = chan->card;
+
+ if (dev->start)
+ return -EBUSY; /* only one open is allowed */
+
+ if (test_and_set_bit(0, (void*)&card->wandev.critical))
+ return -EAGAIN;
+
+ dev->interrupt = 0;
+ dev->tbusy = 0;
+ dev->start = 1;
+ cyclomx_open(card);
+
+ card->wandev.critical = 0;
+ return 0;
+}
+
+/* Close network interface.
+ * o reset flags.
+ * o if there's no more open channels then disconnect physical link. */
+static int if_close (struct device *dev)
+{
+ x25_channel_t *chan = dev->priv;
+ cycx_t *card = chan->card;
+
+ if (test_and_set_bit(0, (void*)&card->wandev.critical))
+ return -EAGAIN;
+
+ dev->start = 0;
+
+ if (chan->state == WAN_CONNECTED || chan->state == WAN_CONNECTING)
+ chan_disc(dev);
+
+ cyclomx_close(card);
+
+ card->wandev.critical = 0;
+ return 0;
+}
+
+/* Build media header.
+ * o encapsulate packet according to encapsulation type.
+ *
+ * The trick here is to put packet type (Ethertype) into 'protocol' field of
+ * the socket buffer, so that we don't forget it. If encapsulation fails,
+ * set skb->protocol to 0 and discard packet later.
+ *
+ * Return: media header length. */
+static int if_header (struct sk_buff *skb, struct device *dev,
+ u16 type, void *daddr, void *saddr, unsigned len)
+{
+ skb->protocol = type;
+ return dev->hard_header_len;
+}
+
+/* * Re-build media header.
+ * Return: 1 physical address resolved.
+ * 0 physical address not resolved */
+static int if_rebuild_hdr (struct sk_buff *skb)
+{
+ return 1;
+}
+
+/* Send a packet on a network interface.
+ * o set tbusy flag (marks start of the transmission).
+ * o check link state. If link is not up, then drop the packet.
+ * o check channel status. If it's down then initiate a call.
+ * o pass a packet to corresponding WAN device.
+ * o free socket buffer
+ *
+ * Return: 0 complete (socket buffer must be freed)
+ * non-0 packet may be re-transmitted (tbusy must be set)
+ *
+ * Notes:
+ * 1. This routine is called either by the protocol stack or by the "net
+ * bottom half" (with interrupts enabled).
+ * 2. Setting tbusy flag will inhibit further transmit requests from the
+ * protocol stack and can be used for flow control with protocol layer. */
+static int if_send (struct sk_buff *skb, struct device *dev)
+{
+ x25_channel_t *chan = dev->priv;
+ cycx_t *card = chan->card;
+
+ if (dev->tbusy) {
+ ++chan->ifstats.rx_dropped;
+ return -EBUSY;
+ }
+
+ dev->tbusy = 1;
+
+ reset_timer(dev);
+
+ if (!chan->svc)
+ chan->protocol = skb->protocol;
+
+ if (card->wandev.state != WAN_CONNECTED)
+ ++chan->ifstats.tx_dropped;
+ else if (chan->svc && chan->protocol &&
+ chan->protocol != skb->protocol) {
+ printk(KERN_INFO
+ "%s: unsupported Ethertype 0x%04X on interface %s!\n",
+ card->devname, skb->protocol, dev->name);
+ ++chan->ifstats.tx_errors;
+ } else switch (chan->state) {
+ case WAN_DISCONNECTED:
+ if (chan_connect(dev))
+ return -EBUSY;
+ /* fall thru */
+ case WAN_CONNECTED:
+ dev->trans_start = jiffies;
+ if (chan_send(dev, skb)) {
+ dev->tbusy = 1;
+ return -EBUSY;
+ }
+ break;
+ default:
+ ++chan->ifstats.tx_dropped;
+ ++card->wandev.stats.tx_dropped;
+ }
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+/* Get Ethernet-style interface statistics.
+ * Return a pointer to struct net_device_stats */
+static struct net_device_stats *if_stats (struct device *dev)
+{
+ x25_channel_t *chan = dev->priv;
+
+ return chan ? &chan->ifstats : NULL;
+}
+
+/* Interrupt Handlers */
+/* X.25 Interrupt Service Routine. */
+static void cyx_isr (cycx_t *card)
+{
+ unsigned long host_cpu_flags;
+ TX25Cmd cmd;
+ u16 z = 0;
+
+ card->in_isr = 1;
+ card->buff_int_mode_unbusy = 0;
+
+ if (test_and_set_bit(0, (void*)&card->wandev.critical)) {
+ printk(KERN_INFO "cyx_isr: %s, wandev.critical set to 0x%02X\n",
+ card->devname, card->wandev.critical);
+ card->in_isr = 0;
+ return;
+ }
+
+ /* For all interrupts set the critical flag to CRITICAL_RX_INTR.
+ * If the if_send routine is called with this flag set it will set
+ * the enable transmit flag to 1. (for a delayed interrupt) */
+ card->wandev.critical = CRITICAL_IN_ISR;
+ cycx_peek(&card->hw, X25_RXMBOX_OFFS, &cmd, sizeof(cmd));
+ switch (cmd.command) {
+ case X25_DATA_INDICATION:
+ rx_intr(card, &cmd);
+ break;
+ case X25_ACK_FROM_VC:
+ tx_intr(card, &cmd);
+ break;
+ case X25_LOG:
+ log_intr(card, &cmd);
+ break;
+ case X25_STATISTIC:
+ stat_intr(card, &cmd);
+ break;
+ case X25_CONNECT_CONFIRM:
+ connect_confirm_intr(card, &cmd);
+ break;
+ case X25_CONNECT_INDICATION:
+ connect_intr(card, &cmd);
+ break;
+ case X25_DISCONNECT_INDICATION:
+ disconnect_intr(card, &cmd);
+ break;
+ case X25_DISCONNECT_CONFIRM:
+ disconnect_confirm_intr(card, &cmd);
+ break;
+ case X25_LINE_ON:
+ cyclomx_set_state(card, WAN_CONNECTED);
+ break;
+ case X25_LINE_OFF:
+ cyclomx_set_state(card, WAN_DISCONNECTED);
+ break;
+ default:
+ spur_intr(card, &cmd); /* unwanted interrupt */
+ }
+
+ cycx_poke(&card->hw, 0, &z, sizeof(z));
+ cycx_poke(&card->hw, X25_RXMBOX_OFFS, &z, sizeof(z));
+
+ card->wandev.critical = CRITICAL_INTR_HANDLED;
+
+ if (card->wandev.enable_tx_int)
+ card->wandev.enable_tx_int = 0;
+
+ spin_lock_irqsave(&card->lock, host_cpu_flags);
+ card->in_isr = 0;
+ card->wandev.critical = 0;
+ spin_unlock_irqrestore(&card->lock, host_cpu_flags);
+
+ if (card->buff_int_mode_unbusy)
+ mark_bh(NET_BH);
+}
+
+/* Transmit interrupt handler.
+ * o Release socket buffer
+ * o Clear 'tbusy' flag */
+static void tx_intr (cycx_t *card, TX25Cmd *cmd)
+{
+ struct device *dev;
+ wan_device_t *wandev = &card->wandev;
+ u8 lcn;
+
+ cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+
+ /* unbusy device and then dev_tint(); */
+ if ((dev = get_dev_by_lcn (wandev, lcn)) != NULL) {
+ card->buff_int_mode_unbusy = 1;
+ dev->tbusy = 0;
+ } else
+ printk(KERN_ERR "%s:ackvc for inexistent lcn %d\n",
+ card->devname, lcn);
+}
+
+/* Receive interrupt handler.
+ * This routine handles fragmented IP packets using M-bit according to the
+ * RFC1356.
+ * o map ligical channel number to network interface.
+ * o allocate socket buffer or append received packet to the existing one.
+ * o if M-bit is reset (i.e. it's the last packet in a sequence) then
+ * decapsulate packet and pass socket buffer to the protocol stack.
+ *
+ * Notes:
+ * 1. When allocating a socket buffer, if M-bit is set then more data is
+ * comming and we have to allocate buffer for the maximum IP packet size
+ * expected on this channel.
+ * 2. If something goes wrong and X.25 packet has to be dropped (e.g. no
+ * socket buffers available) the whole packet sequence must be discarded. */
+static void rx_intr (cycx_t *card, TX25Cmd *cmd)
+{
+ wan_device_t *wandev = &card->wandev;
+ struct device *dev;
+ x25_channel_t *chan;
+ struct sk_buff *skb;
+ u8 bitm, lcn;
+ int pktlen = cmd->len - 5;
+
+ cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+ cycx_peek(&card->hw, cmd->buf + 4, &bitm, sizeof(bitm));
+ bitm &= 0x10;
+
+ if ((dev = get_dev_by_lcn(wandev, lcn)) == NULL) {
+ /* Invalid channel, discard packet */
+ printk(KERN_INFO "%s: receiving on orphaned LCN %d!\n",
+ card->devname, lcn);
+ return;
+ }
+
+ chan = dev->priv;
+ reset_timer(dev);
+
+ if (chan->drop_sequence)
+ if (!bitm)
+ chan->drop_sequence = 0;
+ else
+ return;
+
+ if ((skb = chan->rx_skb) == NULL) {
+ /* Allocate new socket buffer */
+ int bufsize = bitm ? dev->mtu : pktlen;
+
+ if ((skb = dev_alloc_skb(bufsize +
+ dev->hard_header_len)) == NULL) {
+ printk(KERN_INFO "%s: no socket buffers available!\n",
+ card->devname);
+ chan->drop_sequence = 1;
+ ++chan->ifstats.rx_dropped;
+ return;
+ }
+
+ skb->dev = dev;
+ skb->protocol = htons(chan->protocol);
+ chan->rx_skb = skb;
+ }
+
+ if (skb_tailroom(skb) < pktlen) {
+ /* No room for the packet. Call off the whole thing! */
+ dev_kfree_skb(skb);
+ chan->rx_skb = NULL;
+
+ if (bitm)
+ chan->drop_sequence = 1;
+
+ printk(KERN_INFO "%s: unexpectedly long packet sequence "
+ "on interface %s!\n", card->devname, dev->name);
+ ++chan->ifstats.rx_length_errors;
+ return;
+ }
+
+ /* Append packet to the socket buffer */
+ cycx_peek(&card->hw, cmd->buf + 5, skb_put(skb, pktlen), pktlen);
+
+ if (bitm)
+ return; /* more data is coming */
+
+ dev->last_rx = jiffies; /* timestamp */
+ chan->rx_skb = NULL; /* dequeue packet */
+
+ skb->protocol = htons(ETH_P_IP);
+ skb->dev = dev;
+ skb->mac.raw = skb->data;
+ netif_rx(skb);
+ ++chan->ifstats.rx_packets;
+ chan->ifstats.rx_bytes += skb->len;
+}
+
+/* Connect interrupt handler. */
+static void connect_intr (cycx_t *card, TX25Cmd *cmd)
+{
+ wan_device_t *wandev = &card->wandev;
+ struct device *dev = NULL;
+ x25_channel_t *chan;
+ u8 data[32],
+ local[24],
+ rem[24];
+ u8 lcn, sizelocal, sizerem;
+
+ cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+ cycx_peek(&card->hw, cmd->buf + 5, &sizelocal, sizeof(sizelocal));
+ cycx_peek(&card->hw, cmd->buf + 6, data, cmd->len - 6);
+
+ sizerem = sizelocal >> 4;
+ sizelocal &= 0x0F;
+
+ local[0] = rem[0] = '\0';
+
+ if (sizelocal)
+ nibble_to_byte(data, local, sizelocal, 0);
+
+ if (sizerem)
+ nibble_to_byte(data + (sizelocal >> 1), rem, sizerem, sizelocal & 1);
+ dprintk(KERN_INFO "connect_intr:lcn=%d, local=%s, remote=%s\n",
+ lcn, local, rem);
+ if ((dev = get_dev_by_dte_addr(wandev, rem)) == NULL) {
+ /* Invalid channel, discard packet */
+ printk(KERN_INFO "%s: connect not expected: remote %s!\n",
+ card->devname, rem);
+ return;
+ }
+
+ chan = dev->priv;
+ chan->lcn = lcn;
+ x25_connect_response(card, chan);
+ set_chan_state(dev, WAN_CONNECTED, IN_INTR);
+}
+
+/* Connect confirm interrupt handler. */
+static void connect_confirm_intr (cycx_t *card, TX25Cmd *cmd)
+{
+ wan_device_t *wandev = &card->wandev;
+ struct device *dev;
+ x25_channel_t *chan;
+ u8 lcn, key;
+
+ cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+ cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key));
+ dprintk(KERN_INFO "%s: connect_confirm_intr:lcn=%d, key=%d\n",
+ card->devname, lcn, key);
+ if ((dev = get_dev_by_lcn(wandev, -key)) == NULL) {
+ /* Invalid channel, discard packet */
+ clear_bit(--key, (void*)&card->u.x.connection_keys);
+ printk(KERN_INFO "%s: connect confirm not expected: lcn %d, "
+ "key=%d!\n", card->devname, lcn, key);
+ return;
+ }
+
+ clear_bit(--key, (void*)&card->u.x.connection_keys);
+ chan = dev->priv;
+ chan->lcn = lcn;
+ set_chan_state(dev, WAN_CONNECTED, IN_INTR);
+}
+
+/* Disonnect confirm interrupt handler. */
+static void disconnect_confirm_intr (cycx_t *card, TX25Cmd *cmd)
+{
+ wan_device_t *wandev = &card->wandev;
+ struct device *dev;
+ u8 lcn;
+
+ cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+ dprintk(KERN_INFO "%s: disconnect_confirm_intr:lcn=%d\n",
+ card->devname, lcn);
+ if ((dev = get_dev_by_lcn(wandev, lcn)) == NULL) {
+ /* Invalid channel, discard packet */
+ printk(KERN_INFO "%s:disconnect confirm not expected!:lcn %d\n",
+ card->devname, lcn);
+ return;
+ }
+
+ set_chan_state(dev, WAN_DISCONNECTED, IN_INTR);
+}
+
+/* disconnect interrupt handler. */
+static void disconnect_intr (cycx_t *card, TX25Cmd *cmd)
+{
+ wan_device_t *wandev = &card->wandev;
+ struct device *dev;
+ u8 lcn;
+
+ cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
+ dprintk(KERN_INFO "disconnect_intr:lcn=%d\n", lcn);
+ x25_disconnect_response(card, 0, lcn);
+
+ if ((dev = get_dev_by_lcn(wandev, lcn)) != NULL)
+ set_chan_state(dev, WAN_DISCONNECTED, IN_INTR);
+}
+
+/* LOG interrupt handler. */
+static void log_intr (cycx_t *card, TX25Cmd *cmd)
+{
+#if CYCLOMX_X25_DEBUG
+ char bf[20];
+ u16 size, toread, link, msg_code;
+ u8 code, routine;
+
+ cycx_peek(&card->hw, cmd->buf, &msg_code, sizeof(msg_code));
+ cycx_peek(&card->hw, cmd->buf + 2, &link, sizeof(link));
+ cycx_peek(&card->hw, cmd->buf + 4, &size, sizeof(size));
+ /* at most 20 bytes are available... thanx to Daniela :) */
+ toread = size < 20 ? size : 20;
+ cycx_peek(&card->hw, cmd->buf + 10, &bf, toread);
+ cycx_peek(&card->hw, cmd->buf + 10 + toread, &code, 1);
+ cycx_peek(&card->hw, cmd->buf + 10 + toread + 1, &routine, 1);
+
+ printk(KERN_INFO "cyx_isr: X25_LOG (0x4500) indic.:\n");
+ printk(KERN_INFO "cmd->buf=0x%X\n", cmd->buf);
+ printk(KERN_INFO "Log message code=0x%X\n", msg_code);
+ printk(KERN_INFO "Link=%d\n", link);
+ printk(KERN_INFO "log code=0x%X\n", code);
+ printk(KERN_INFO "log routine=0x%X\n", routine);
+ printk(KERN_INFO "Message size=%d\n", size);
+ hex_dump("Message", bf, toread);
+#endif
+}
+
+/* STATISTIC interrupt handler. */
+static void stat_intr (cycx_t *card, TX25Cmd *cmd)
+{
+ cycx_peek(&card->hw, cmd->buf, &card->u.x.stats,
+ sizeof(card->u.x.stats));
+ hex_dump("stat_intr", (unsigned char*)&card->u.x.stats,
+ sizeof(card->u.x.stats));
+ x25_dump_stats(&card->u.x.stats);
+ wake_up_interruptible(&card->wait_stats);
+}
+
+/* Spurious interrupt handler.
+ * o print a warning
+ * If number of spurious interrupts exceeded some limit, then ??? */
+static void spur_intr (cycx_t *card, TX25Cmd *cmd)
+{
+ printk(KERN_INFO "%s: spurious interrupt (0x%X)!\n",
+ card->devname, cmd->command);
+}
+#ifdef CYCLOMX_X25_DEBUG
+static void hex_dump(char *msg, unsigned char *p, int len)
+{
+ unsigned char hex[1024],
+ * phex = hex;
+
+ if (len >= (sizeof(hex) / 2))
+ len = (sizeof(hex) / 2) - 1;
+
+ while (len--) {
+ sprintf(phex, "%02x", *p++);
+ phex += 2;
+ }
+
+ printk(KERN_INFO "%s: %s\n", msg, hex);
+}
+#endif
+/* CYCLOM X Firmware-Specific Functions
+ *
+ * Almost all X.25 commands can unexpetedly fail due to so called 'X.25
+ * asynchronous events' such as restart, interrupt, incoming call request,
+ * call clear request, etc. They can't be ignored and have to be dealt with
+ * immediately. To tackle with this problem we execute each interface command
+ * in a loop until good return code is received or maximum number of retries
+ * is reached. Each interface command returns non-zero return code, an
+ * asynchronous event/error handler x25_error() is called.
+ */
+/* Exec x25 command. */
+static int x25_exec (cycx_t *card, int command, int link,
+ void *data1, int len1, void *data2, int len2)
+{
+ TX25Cmd c;
+ u32 addr = 0x1200 + 0x2E0 * link + 0x1E2;
+ int err = 0;
+
+ c.command = command;
+ c.link = link;
+ c.len = len1 + len2;
+
+ if (test_and_set_bit(0, (void*)&card->u.x.critical))
+ return -EAGAIN;
+
+ /* write command */
+ cycx_poke(&card->hw, X25_MBOX_OFFS, &c, sizeof(c) - sizeof(c.buf));
+
+ /* write x25 data */
+ if (data1) {
+ cycx_poke(&card->hw, addr, data1, len1);
+
+ if (data2)
+ if (len2 > 254) {
+ u32 addr1 = 0xA00 + 0x400 * link;
+
+ cycx_poke(&card->hw, addr + len1, data2, 249);
+ cycx_poke(&card->hw, addr1, ((u8*) data2) + 249,
+ len2 - 249);
+ } else
+ cycx_poke(&card->hw, addr + len1, data2, len2);
+ }
+
+ /* generate interruption, executing command */
+ cycx_intr(&card->hw);
+
+ /* wait till card->mbox == 0 */
+ err = cycx_exec(card->mbox);
+ card->u.x.critical = 0;
+
+ return err;
+}
+
+/* Configure adapter. */
+static int x25_configure (cycx_t *card, TX25Config *conf)
+{
+ struct {
+ u16 nlinks;
+ TX25Config conf[2];
+ } x25_cmd_conf;
+
+ memset (&x25_cmd_conf, 0, sizeof(x25_cmd_conf));
+ x25_cmd_conf.nlinks = 2;
+ x25_cmd_conf.conf[0] = *conf;
+ /* FIXME: we need to find a way in the wanrouter framework
+ to configure the second link, for now lets use it
+ with the same config from the first link, fixing
+ the interface type to RS232, the speed in 38400 and
+ the clock to external */
+ x25_cmd_conf.conf[1] = *conf;
+ x25_cmd_conf.conf[1].link = 1;
+ x25_cmd_conf.conf[1].speed = 5; /* 38400 */
+ x25_cmd_conf.conf[1].clock = 8;
+ x25_cmd_conf.conf[1].flags = 0; /* default = RS232 */
+
+ x25_dump_config(&x25_cmd_conf.conf[0]);
+ x25_dump_config(&x25_cmd_conf.conf[1]);
+
+ return x25_exec(card, X25_CONFIG, 0,
+ &x25_cmd_conf, sizeof(x25_cmd_conf), NULL, 0);
+}
+
+/* Get protocol statistics. */
+static int x25_get_stats (cycx_t *card)
+{
+ /* the firmware expects 20 in the size field!!!
+ thanx to Daniela */
+ int err = x25_exec(card, X25_STATISTIC, 0, NULL, 20, NULL, 0);
+
+ if (err)
+ return err;
+
+ interruptible_sleep_on(&card->wait_stats);
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ card->wandev.stats.rx_packets = card->u.x.stats.n2_rx_frames;
+ card->wandev.stats.rx_over_errors = card->u.x.stats.rx_over_errors;
+ card->wandev.stats.rx_crc_errors = card->u.x.stats.rx_crc_errors;
+ card->wandev.stats.rx_length_errors = 0; /* not available from fw */
+ card->wandev.stats.rx_frame_errors = 0; /* not available from fw */
+ card->wandev.stats.rx_missed_errors = card->u.x.stats.rx_aborts;
+ card->wandev.stats.rx_dropped = 0; /* not available from fw */
+ card->wandev.stats.rx_errors = 0; /* not available from fw */
+ card->wandev.stats.tx_packets = card->u.x.stats.n2_tx_frames;
+ card->wandev.stats.tx_aborted_errors = card->u.x.stats.tx_aborts;
+ card->wandev.stats.tx_dropped = 0; /* not available from fw */
+ card->wandev.stats.collisions = 0; /* not available from fw */
+ card->wandev.stats.tx_errors = 0; /* not available from fw */
+
+ x25_dump_devs(&card->wandev);
+ return 0;
+}
+
+/* return the number of nibbles */
+static int byte_to_nibble(u8 *s, u8 *d, char *nibble)
+{
+ int i = 0;
+
+ if (*nibble && *s) {
+ d[i] |= *s++ - '0';
+ *nibble = 0;
+ ++i;
+ }
+
+ while (*s) {
+ d[i] = (*s - '0') << 4;
+ if (*(s + 1))
+ d[i] |= *(s + 1) - '0';
+ else {
+ *nibble = 1;
+ break;
+ }
+ ++i;
+ s += 2;
+ }
+
+ return i;
+}
+
+static void nibble_to_byte(u8 *s, u8 *d, u8 len, u8 nibble)
+{
+ if (nibble) {
+ *d++ = '0' + (*s++ & 0x0F);
+ --len;
+ }
+
+ while (len) {
+ *d++ = '0' + (*s >> 4);
+ if (--len) {
+ *d++ = '0' + (*s & 0x0F);
+ --len;
+ } else break;
+
+ ++s;
+ }
+
+ *d = '\0';
+}
+
+/* Place X.25 call. */
+static int x25_place_call (cycx_t *card, x25_channel_t *chan)
+{
+ int err = 0,
+ retry = MAX_CMD_RETRY,
+ len;
+ char data[64],
+ nibble = 0,
+ mylen = chan->local_addr ? strlen(chan->local_addr) : 0,
+ remotelen = strlen(chan->addr);
+ u8 key;
+
+ if (card->u.x.connection_keys == ~0UL) {
+ printk(KERN_INFO "%s: too many simultaneous connection "
+ "requests!\n", card->devname);
+ return -EAGAIN;
+ }
+
+ key = ffz(card->u.x.connection_keys);
+ set_bit(key, (void*)&card->u.x.connection_keys);
+ ++key;
+ dprintk(KERN_INFO "%s:x25_place_call:key=%d\n", card->devname, key);
+ memset(data, 0, sizeof(data));
+ data[1] = key; /* user key */
+ data[2] = 0x10;
+ data[4] = 0x0B;
+
+ len = byte_to_nibble(chan->addr, data + 6, &nibble);
+ len += chan->local_addr ? byte_to_nibble(chan->local_addr,
+ data + 6 + len, &nibble) : 0;
+ if (nibble)
+ ++len;
+ data[5] = mylen << 4 | remotelen;
+ data[6 + len + 1] = 0xCC; /* TCP/IP over X.25, thanx to Daniela :) */
+
+ do err = x25_exec(card, X25_CONNECT_REQUEST, chan->link,
+ &data, 7 + len + 1, NULL, 0);
+ while (err && retry--);
+
+ if (err)
+ clear_bit(--key, (void*)&card->u.x.connection_keys);
+ else {
+ chan->lcn = -key;
+ chan->protocol = ETH_P_IP;
+ }
+
+ return err;
+}
+
+/* Place X.25 CONNECT RESPONSE. */
+static int x25_connect_response (cycx_t *card, x25_channel_t *chan)
+{
+ int err = 0,
+ retry = MAX_CMD_RETRY;
+ char data[32];
+
+ memset(data, 0, sizeof(data));
+ data[0] = data[3] = chan->lcn;
+ data[2] = 0x10;
+ data[4] = 0x0F;
+ data[7] = 0xCC; /* TCP/IP over X.25, thanx Daniela */
+
+ do err = x25_exec(card, X25_CONNECT_RESPONSE, chan->link,
+ &data, 8, NULL, 0);
+ while (err && retry--);
+
+ return err;
+}
+
+/* Place X.25 DISCONNECT RESPONSE. */
+static int x25_disconnect_response (cycx_t *card, u8 link, u8 lcn)
+{
+ int err = 0,
+ retry = MAX_CMD_RETRY;
+ char data[5];
+
+ memset(data, 0, sizeof(data));
+ data[0] = data[3] = lcn;
+ data[2] = 0x10;
+ data[4] = 0x17;
+ do err = x25_exec(card, X25_DISCONNECT_RESPONSE, link,
+ &data, 5, NULL, 0);
+ while (err && retry--);
+
+ return err;
+}
+
+/* Clear X.25 call. */
+static int x25_clear_call (cycx_t *card, u8 link, u8 lcn, u8 cause, u8 diagn)
+{
+ int retry = MAX_CMD_RETRY,
+ err;
+ u8 data[7];
+
+ memset(data, 0, sizeof(data));
+ data[0] = data[3] = lcn;
+ data[2] = 0x10;
+ data[4] = 0x13;
+ data[5] = cause;
+ data[6] = diagn;
+
+ do err = x25_exec(card, X25_DISCONNECT_REQUEST, link, data, 7, NULL, 0);
+ while (err && retry--);
+
+ return err;
+}
+
+/* Send X.25 data packet. */
+static int x25_send (cycx_t *card, u8 link, u8 lcn, u8 bitm, int len, void *buf)
+{
+ int err = 0,
+ retry = MAX_CMD_RETRY;
+ u8 data[] = "?\xFF\x10??";
+
+ data[0] = data[3] = lcn;
+ data[4] = bitm;
+
+ do err = x25_exec(card, X25_DATA_REQUEST, link, &data, 5, buf, len);
+ while (err && retry--);
+
+ return err;
+}
+
+/* Miscellaneous */
+/* Find network device by its channel number. */
+static struct device *get_dev_by_lcn (wan_device_t *wandev, s16 lcn)
+{
+ struct device *dev = wandev->dev;
+
+ for (; dev; dev = dev->slave)
+ if (((x25_channel_t*)dev->priv)->lcn == lcn)
+ break;
+ return dev;
+}
+
+/* Find network device by its remote dte address. */
+static struct device *get_dev_by_dte_addr (wan_device_t *wandev, char *dte)
+{
+ struct device *dev = wandev->dev;
+
+ for (; dev; dev = dev->slave)
+ if (!strcmp (((x25_channel_t*)dev->priv)->addr, dte))
+ break;
+ return dev;
+}
+
+/* Initiate connection on the logical channel.
+ * o for PVC we just get channel configuration
+ * o for SVCs place an X.25 call
+ *
+ * Return: 0 connected
+ * >0 connection in progress
+ * <0 failure */
+static int chan_connect (struct device *dev)
+{
+ x25_channel_t *chan = dev->priv;
+ cycx_t *card = chan->card;
+
+ if (chan->svc) {
+ if (!chan->addr[0])
+ return -EINVAL; /* no destination address */
+ dprintk(KERN_INFO "%s: placing X.25 call to %s...\n",
+ card->devname, chan->addr);
+ if (x25_place_call(card, chan))
+ return -EIO;
+ set_chan_state(dev, WAN_CONNECTING, OUT_INTR);
+ return 1;
+ } else
+ set_chan_state(dev, WAN_CONNECTED, OUT_INTR);
+
+ return 0;
+}
+
+/* Disconnect logical channel.
+ * o if SVC then clear X.25 call */
+static void chan_disc (struct device *dev)
+{
+ x25_channel_t *chan = dev->priv;
+
+ if (chan->svc) {
+ x25_clear_call(chan->card, chan->link, chan->lcn, 0, 0);
+ set_chan_state(dev, WAN_DISCONNECTING, OUT_INTR);
+ } else
+ set_chan_state(dev, WAN_DISCONNECTED, OUT_INTR);
+}
+
+/* Called by kernel timer */
+static void chan_timer (unsigned long data)
+{
+ struct device *dev = (struct device*) data;
+ x25_channel_t *chan = dev->priv;
+
+ switch (chan->state) {
+ case WAN_CONNECTED:
+ chan_disc(dev);
+ break;
+ default:
+ printk (KERN_ERR "%s: chan_timer for svc (%s) not "
+ "connected!\n",
+ chan->card->devname, dev->name);
+ }
+}
+
+/* Set logical channel state. */
+static void set_chan_state (struct device *dev, u8 state, u8 outside_intr)
+{
+ x25_channel_t *chan = dev->priv;
+ cycx_t *card = chan->card;
+ u32 flags = 0;
+
+ if (outside_intr)
+ spin_lock(&card->lock);
+ else
+ spin_lock_irqsave(&card->lock, flags);
+
+ if (chan->state != state) {
+ if (chan->svc && chan->state == WAN_CONNECTED)
+ del_timer(&chan->timer);
+
+ switch (state) {
+ case WAN_CONNECTED:
+ printk (KERN_INFO "%s: interface %s "
+ "connected!\n",
+ card->devname, dev->name);
+ *(u16*)dev->dev_addr = htons(chan->lcn);
+ dev->tbusy = 0;
+ reset_timer(dev);
+ break;
+
+ case WAN_CONNECTING:
+ printk (KERN_INFO "%s: interface %s "
+ "connecting...\n",
+ card->devname, dev->name);
+ break;
+
+ case WAN_DISCONNECTING:
+ printk (KERN_INFO "%s: interface %s "
+ "disconnecting...\n",
+ card->devname, dev->name);
+ break;
+
+ case WAN_DISCONNECTED:
+ printk (KERN_INFO "%s: interface %s "
+ "disconnected!\n",
+ card->devname, dev->name);
+ if (chan->svc) {
+ *(unsigned short*)dev->dev_addr = 0;
+ chan->lcn = 0;
+ }
+ break;
+ }
+
+ chan->state = state;
+ }
+
+ if (outside_intr)
+ spin_unlock(&card->lock);
+ else
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+/* Send packet on a logical channel.
+ * When this function is called, tx_skb field of the channel data space
+ * points to the transmit socket buffer. When transmission is complete,
+ * release socket buffer and reset 'tbusy' flag.
+ *
+ * Return: 0 - transmission complete
+ * 1 - busy
+ *
+ * Notes:
+ * 1. If packet length is greater than MTU for this channel, we'll fragment
+ * the packet into 'complete sequence' using M-bit.
+ * 2. When transmission is complete, an event notification should be issued
+ * to the router. */
+static int chan_send (struct device *dev, struct sk_buff *skb)
+{
+ x25_channel_t *chan = dev->priv;
+ cycx_t *card = chan->card;
+ int bitm = 0; /* final packet */
+ unsigned len = skb->len;
+
+ if (skb->len > card->wandev.mtu) {
+ len = card->wandev.mtu;
+ bitm = 0x10; /* set M-bit (more data) */
+ }
+
+ if (x25_send(card, chan->link, chan->lcn, bitm, len, skb->data))
+ return 1;
+
+ if (bitm) {
+ skb_pull(skb, len);
+ return 1;
+ }
+
+ ++chan->ifstats.tx_packets;
+ chan->ifstats.tx_bytes += len;
+ return 0;
+}
+
+/* Convert line speed in bps to a number used by cyclom 2x code. */
+static u8 bps_to_speed_code (u32 bps)
+{
+ u8 number = 0; /* defaults to the lowest (1200) speed ;> */
+
+ if (bps >= 512000) number = 8;
+ else if (bps >= 256000) number = 7;
+ else if (bps >= 64000) number = 6;
+ else if (bps >= 38400) number = 5;
+ else if (bps >= 19200) number = 4;
+ else if (bps >= 9600) number = 3;
+ else if (bps >= 4800) number = 2;
+ else if (bps >= 2400) number = 1;
+
+ return number;
+}
+
+/* log base 2 */
+static u8 log2 (u32 n)
+{
+ u8 log = 0;
+
+ if (!n)
+ return 0;
+
+ while (n > 1) {
+ n >>= 1;
+ ++log;
+ }
+
+ return log;
+}
+
+/* Convert decimal string to unsigned integer.
+ * If len != 0 then only 'len' characters of the string are converted. */
+static unsigned dec_to_uint (u8 *str, int len)
+{
+ unsigned val = 0;
+
+ if (!len)
+ len = strlen(str);
+
+ for (; len && is_digit(*str); ++str, --len)
+ val = (val * 10) + (*str - (unsigned)'0');
+
+ return val;
+}
+
+static void reset_timer(struct device *dev)
+{
+ x25_channel_t *chan = dev->priv;
+
+ if (!chan->svc)
+ return;
+
+ del_timer(&chan->timer);
+ chan->timer.expires = jiffies + chan->idle_tmout * HZ;
+ add_timer(&chan->timer);
+}
+#ifdef CYCLOMX_X25_DEBUG
+static void x25_dump_config(TX25Config *conf)
+{
+ printk (KERN_INFO "x25 configuration\n");
+ printk (KERN_INFO "-----------------\n");
+ printk (KERN_INFO "link number=%d\n", conf->link);
+ printk (KERN_INFO "line speed=%d\n", conf->speed);
+ printk (KERN_INFO "clock=%sternal\n", conf->clock == 8 ? "Ex" : "In");
+ printk (KERN_INFO "# level 2 retransm.=%d\n", conf->n2);
+ printk (KERN_INFO "level 2 window=%d\n", conf->n2win);
+ printk (KERN_INFO "level 3 window=%d\n", conf->n3win);
+ printk (KERN_INFO "# logical channels=%d\n", conf->nvc);
+ printk (KERN_INFO "level 3 pkt len=%d\n", conf->pktlen);
+ printk (KERN_INFO "my address=%d\n", conf->locaddr);
+ printk (KERN_INFO "remote address=%d\n", conf->remaddr);
+ printk (KERN_INFO "t1=%d seconds\n", conf->t1);
+ printk (KERN_INFO "t2=%d seconds\n", conf->t2);
+ printk (KERN_INFO "t21=%d seconds\n", conf->t21);
+ printk (KERN_INFO "# PVCs=%d\n", conf->npvc);
+ printk (KERN_INFO "t23=%d seconds\n", conf->t23);
+ printk (KERN_INFO "flags=0x%x\n", conf->flags);
+}
+
+static void x25_dump_stats(TX25Stats *stats)
+{
+ printk (KERN_INFO "x25 statistics\n");
+ printk (KERN_INFO "--------------\n");
+ printk (KERN_INFO "rx_crc_errors=%d\n", stats->rx_crc_errors);
+ printk (KERN_INFO "rx_over_errors=%d\n", stats->rx_over_errors);
+ printk (KERN_INFO "n2_tx_frames=%d\n", stats->n2_tx_frames);
+ printk (KERN_INFO "n2_rx_frames=%d\n", stats->n2_rx_frames);
+ printk (KERN_INFO "tx_timeouts=%d\n", stats->tx_timeouts);
+ printk (KERN_INFO "rx_timeouts=%d\n", stats->rx_timeouts);
+ printk (KERN_INFO "n3_tx_packets=%d\n", stats->n3_tx_packets);
+ printk (KERN_INFO "n3_rx_packets=%d\n", stats->n3_rx_packets);
+ printk (KERN_INFO "tx_aborts=%d\n", stats->tx_aborts);
+ printk (KERN_INFO "rx_aborts=%d\n", stats->rx_aborts);
+}
+
+static void x25_dump_devs(wan_device_t *wandev)
+{
+ struct device *dev = wandev->dev;
+
+ printk (KERN_INFO "x25 dev states\n");
+ printk (KERN_INFO "name: addr: tbusy:\n");
+ printk (KERN_INFO "----------------------------\n");
+
+ for (; dev; dev = dev->slave) {
+ x25_channel_t *chan = dev->priv;
+
+ printk (KERN_INFO "%-5.5s %-15.15s %ld\n",
+ chan->name, chan->addr, dev->tbusy);
+ }
+}
+
+#endif /* CYCLOMX_X25_DEBUG */
+/* End */
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* ============================================================================
+* May 19, 1999 Arnaldo Melo wanpipe_init belongs to sdlamain.c
* Dec 20, 1996 Gene Kozin Version 3.0.0. Complete overhaul.
* Jul 12, 1996 Gene Kozin Changes for Linux 2.0 compatibility.
* Jun 12, 1996 Gene Kozin Added support for S503 card.
#include <linux/sched.h> /* for jiffies, HZ, etc. */
#include <linux/sdladrv.h> /* API definitions */
#include <linux/sdlasfm.h> /* SDLA firmware module definitions */
-#include <linux/init.h>
#include <asm/io.h> /* for inb(), outb(), etc. */
#define _INB(port) (inb(port))
#define _OUTB(port, byte) (outb((byte),(port)))
#ifdef MODULE
int init_module (void)
-#else
-__initfunc(int wanpipe_init(void))
-#endif
{
printk(KERN_INFO "%s v%u.%u %s\n",
fullname, MOD_VERSION, MOD_RELEASE, copyright);
return 0;
}
-#ifdef MODULE
/*============================================================================
* Module 'remove' entry point.
* o release all remaining system resources
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* ============================================================================
+* May 19, 1999 Arnaldo Melo __initfunc for wanpipe_init
* Nov 28, 1997 Jaspreet Singh Changed DRV_RELEASE to 1
* Nov 10, 1997 Jaspreet Singh Changed sti() to restore_flags();
* Nov 06, 1997 Jaspreet Singh Changed DRV_VERSION to 4 and DRV_RELEASE to 0
#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
#include <asm/uaccess.h> /* kernel <-> user copy */
#include <asm/io.h> /* phys_to_virt() */
+#include <linux/init.h> /* __initfunc (when not using as a module) */
/****** Defines & Macros ****************************************************/
#ifdef MODULE
int init_module (void)
#else
-int wanpipe_init(void)
+__initfunc(int wanpipe_init(void))
#endif
{
int cnt, err = 0;
--- /dev/null
+/*
+net-3-driver for the SKNET MCA-based cards
+
+This is an extension to the Linux operating system, and is covered by the
+same Gnu Public License that covers that work.
+
+Copyright 1999 by Alfred Arnold (alfred@ccac.rwth-aachen.de, aarnold@elsa.de)
+
+This driver is based both on the 3C523 driver and the SK_G16 driver.
+
+paper sources:
+ 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
+ Hans-Peter Messmer for the basic Microchannel stuff
+
+ 'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
+ for help on Ethernet driver programming
+
+ 'Ethernet/IEEE 802.3 Family 1992 World Network Data Book/Handbook' by AMD
+ for documentation on the AM7990 LANCE
+
+ 'SKNET Personal Technisches Manual', Version 1.2 by Schneider&Koch
+ for documentation on the Junior board
+
+ 'SK-NET MC2+ Technical Manual", Version 1.1 by Schneider&Koch for
+ documentation on the MC2 bord
+
+ A big thank you to the S&K support for providing me so quickly with
+ documentation!
+
+ Also see http://www.syskonnect.com/
+
+ Missing things:
+
+ -> set debug level via ioctl instead of compile-time switches
+ -> I didn't follow the development of the 2.1.x kernels, so my
+ assumptions about which things changed with which kernel version
+ are probably nonsense
+
+History:
+ May 16th, 1999
+ startup
+ May 22st, 1999
+ added private structure, methods
+ begun building data structures in RAM
+ May 23nd, 1999
+ can receive frames, send frames
+ May 24th, 1999
+ modularized intialization of LANCE
+ loadable as module
+ still Tx problem :-(
+ May 26th, 1999
+ MC2 works
+ support for multiple devices
+ display media type for MC2+
+ May 28th, 1999
+ fixed problem in GetLANCE leaving interrupts turned off
+ increase TX queue to 4 packets to improve send performance
+ May 29th, 1999
+ a few corrections in statistics, caught rcvr overruns
+ reinitialization of LANCE/board in critical situations
+ MCA info implemented
+ implemented LANCE multicast filter
+ Jun 6th, 1999
+ additions for Linux 2.2
+
+ *************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/mca.h>
+#include <asm/processor.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef MODULE
+#include <linux/module.h>
+#include <linux/version.h>
+#endif
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#define _SK_MCA_DRIVER_
+#include "sk_mca.h"
+
+/* ------------------------------------------------------------------------
+ * global static data - not more since we can handle multiple boards and
+ * have to pack all state info into the device struct!
+ * ------------------------------------------------------------------------ */
+
+static char *MediaNames[Media_Count]=
+ {"10Base2", "10BaseT", "10Base5", "Unknown"};
+
+static unsigned char poly[] =
+ {1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0};
+
+/* ------------------------------------------------------------------------
+ * private subfunctions
+ * ------------------------------------------------------------------------ */
+
+/* dump parts of shared memory - only needed during debugging */
+
+#ifdef DEBUG
+static void dumpmem(struct device *dev, u32 start, u32 len)
+{
+ int z;
+
+ for (z = 0; z < len; z++)
+ {
+ if ((z & 15) == 0)
+ printk("%04x:", z);
+ printk(" %02x", readb(dev->mem_start + start + z));
+ if ((z & 15) == 15)
+ printk("\n");
+ }
+}
+
+/* print exact time - ditto */
+
+static void PrTime(void)
+{
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+ printk("%9d:%06d: ", tv.tv_sec, tv.tv_usec);
+}
+#endif
+
+/* deduce resources out of POS registers */
+
+static void getaddrs(int slot, int junior, int *base, int *irq,
+ skmca_medium *medium)
+{
+ u_char pos0, pos1, pos2;
+
+ if (junior)
+ {
+ pos0 = mca_read_stored_pos(slot, 2);
+ *base = ((pos0 & 0x0e) << 13) + 0xc0000;
+ *irq = ((pos0 & 0x10) >> 4) + 10;
+ *medium = Media_Unknown;
+ }
+ else
+ {
+ /* reset POS 104 Bits 0+1 so the shared memory region goes to the
+ configured area between 640K and 1M. Afterwards, enable the MC2.
+ I really don't know what rode SK to do this... */
+
+ mca_write_pos(slot, 4, mca_read_stored_pos(slot, 4) & 0xfc);
+ mca_write_pos(slot, 2, mca_read_stored_pos(slot, 2) | 0x01);
+
+ pos1 = mca_read_stored_pos(slot, 3);
+ pos2 = mca_read_stored_pos(slot, 4);
+ *base = ((pos1 & 0x07) << 14) + 0xc0000;
+ switch (pos2 & 0x0c)
+ {
+ case 0: *irq = 3; break;
+ case 4: *irq = 5; break;
+ case 8: *irq = 10; break;
+ case 12: *irq = 11; break;
+ }
+ *medium = (pos2 >> 6) & 3;
+ }
+}
+
+/* check for both cards:
+ When the MC2 is turned off, it was configured for more than 15MB RAM,
+ is disabled and won't get detected using the standard probe. We
+ therefore have to scan the slots manually :-( */
+
+static int dofind(int *junior, int firstslot)
+{
+ int slot;
+ unsigned int id;
+
+ for (slot = firstslot; slot < MCA_MAX_SLOT_NR; slot++)
+ {
+ id = mca_read_stored_pos(slot, 0)
+ + (((unsigned int) mca_read_stored_pos(slot, 1)) << 8);
+
+ *junior = 0;
+ if (id == SKNET_MCA_ID)
+ return slot;
+ *junior = 1;
+ if (id == SKNET_JUNIOR_MCA_ID)
+ return slot;
+ }
+ return MCA_NOTFOUND;
+}
+
+/* reset the whole board */
+
+static void ResetBoard(struct device *dev)
+{
+ skmca_priv *priv = (skmca_priv*) dev->priv;
+
+ writeb(CTRL_RESET_ON, priv->ctrladdr);
+ udelay(10);
+ writeb(CTRL_RESET_OFF, priv->ctrladdr);
+}
+
+/* set LANCE register - must be atomic */
+
+static void SetLANCE(struct device *dev, u16 addr, u16 value)
+{
+ skmca_priv *priv = (skmca_priv*) dev->priv;
+ unsigned long flags;
+
+ /* disable interrupts */
+
+ save_flags(flags);
+ cli();
+
+ /* wait until no transfer is pending */
+
+ while ((readb(priv->ctrladdr) & STAT_IO_BUSY) == STAT_IO_BUSY);
+
+ /* transfer register address to RAP */
+
+ writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_RAP, priv->ctrladdr);
+ writew(addr, priv->ioregaddr);
+ writeb(IOCMD_GO, priv->cmdaddr);
+ udelay(1);
+ while ((readb(priv->ctrladdr) & STAT_IO_BUSY) == STAT_IO_BUSY);
+
+ /* transfer data to register */
+
+ writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_DATA, priv->ctrladdr);
+ writew(value, priv->ioregaddr);
+ writeb(IOCMD_GO, priv->cmdaddr);
+ udelay(1);
+ while ((readb(priv->ctrladdr) & STAT_IO_BUSY) == STAT_IO_BUSY);
+
+ /* reenable interrupts */
+
+ restore_flags(flags);
+}
+
+/* get LANCE register */
+
+static u16 GetLANCE(struct device *dev, u16 addr)
+{
+ skmca_priv *priv = (skmca_priv*) dev->priv;
+ unsigned long flags;
+ unsigned int res;
+
+ /* disable interrupts */
+
+ save_flags(flags);
+ cli();
+
+ /* wait until no transfer is pending */
+
+ while ((readb(priv->ctrladdr) & STAT_IO_BUSY) == STAT_IO_BUSY);
+
+ /* transfer register address to RAP */
+
+ writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_RAP, priv->ctrladdr);
+ writew(addr, priv->ioregaddr);
+ writeb(IOCMD_GO, priv->cmdaddr);
+ udelay(1);
+ while ((readb(priv->ctrladdr) & STAT_IO_BUSY) == STAT_IO_BUSY);
+
+ /* transfer data from register */
+
+ writeb(CTRL_RESET_OFF | CTRL_RW_READ | CTRL_ADR_DATA, priv->ctrladdr);
+ writeb(IOCMD_GO, priv->cmdaddr);
+ udelay(1);
+ while ((readb(priv->ctrladdr) & STAT_IO_BUSY) == STAT_IO_BUSY);
+ res = readw(priv->ioregaddr);
+
+ /* reenable interrupts */
+
+ restore_flags(flags);
+
+ return res;
+}
+
+/* build up descriptors in shared RAM */
+
+static void InitDscrs(struct device *dev)
+{
+ u32 bufaddr;
+
+ /* Set up Tx descriptors. The board has only 16K RAM so bits 16..23
+ are always 0. */
+
+ bufaddr = RAM_DATABASE;
+ {
+ LANCE_TxDescr descr;
+ int z;
+
+ for (z = 0; z < TXCOUNT; z++)
+ {
+ descr.LowAddr = bufaddr;
+ descr.Flags = 0;
+ descr.Len = 0xf000;
+ descr.Status = 0;
+ memcpy_toio(dev->mem_start + RAM_TXBASE + (z * sizeof(LANCE_TxDescr)),
+ &descr, sizeof(LANCE_TxDescr));
+ memset_io(dev->mem_start + bufaddr, 0, RAM_BUFSIZE);
+ bufaddr += RAM_BUFSIZE;
+ }
+ }
+
+ /* do the same for the Rx descriptors */
+
+ {
+ LANCE_RxDescr descr;
+ int z;
+
+ for (z = 0; z < RXCOUNT; z++)
+ {
+ descr.LowAddr = bufaddr;
+ descr.Flags = RXDSCR_FLAGS_OWN;
+ descr.MaxLen = -RAM_BUFSIZE;
+ descr.Len = 0;
+ memcpy_toio(dev->mem_start + RAM_RXBASE + (z * sizeof(LANCE_RxDescr)),
+ &descr, sizeof(LANCE_RxDescr));
+ memset_io(dev->mem_start + bufaddr, 0, RAM_BUFSIZE);
+ bufaddr += RAM_BUFSIZE;
+ }
+ }
+}
+
+/* calculate the hash bit position for a given multicast address
+ taken more or less directly from the AMD datasheet... */
+
+static void UpdateCRC(unsigned char *CRC, int bit)
+{
+ int j;
+
+ /* shift CRC one bit */
+
+ memmove(CRC + 1, CRC, 32 * sizeof(unsigned char));
+ CRC[0] = 0;
+
+ /* if bit XOR controlbit = 1, set CRC = CRC XOR polynomial */
+
+ if (bit ^ CRC[32])
+ for (j = 0; j < 32; j++)
+ CRC[j] ^= poly[j];
+}
+
+static unsigned int GetHash(char *address)
+{
+ unsigned char CRC[33];
+ int i, byte, hashcode;
+
+ /* a multicast address has bit 0 in the first byte set */
+
+ if ((address[0] & 1) == 0)
+ return -1;
+
+ /* initialize CRC */
+
+ memset(CRC, 1, sizeof(CRC));
+
+ /* loop through address bits */
+
+ for (byte = 0; byte < 6; byte++)
+ for (i = 0; i < 8; i++)
+ UpdateCRC(CRC, (address[byte] >> i) & 1);
+
+ /* hashcode is the 6 least significant bits of the CRC */
+
+ hashcode = 0;
+ for (i = 0; i < 6; i++)
+ hashcode = (hashcode << 1) + CRC[i];
+ return hashcode;
+}
+
+/* feed ready-built initialization block into LANCE */
+
+static void InitLANCE(struct device *dev)
+{
+ skmca_priv *priv = (skmca_priv*) dev->priv;
+
+ /* build up descriptors. */
+
+ InitDscrs(dev);
+
+ /* next RX descriptor to be read is the first one. Since the LANCE
+ will start from the beginning after initialization, we have to
+ reset out pointers too. */
+
+ priv->nextrx = 0;
+
+ /* no TX descriptors active */
+
+ priv->nexttxput = priv->nexttxdone = priv->txbusy = 0;
+
+ /* set up the LANCE bus control register - constant for SKnet boards */
+
+ SetLANCE(dev, LANCE_CSR3, CSR3_BSWAP_OFF | CSR3_ALE_LOW | CSR3_BCON_HOLD);
+
+ /* write address of initialization block into LANCE */
+
+ SetLANCE(dev, LANCE_CSR1, RAM_INITBASE & 0xffff);
+ SetLANCE(dev, LANCE_CSR2, (RAM_INITBASE >> 16) & 0xff);
+
+ /* we don't get ready until the LANCE has read the init block */
+
+ dev->tbusy = 1;
+
+ /* let LANCE read the initialization block. LANCE is ready
+ when we receive the corresponding interrupt. */
+
+ SetLANCE(dev, LANCE_CSR0, CSR0_INEA | CSR0_INIT);
+}
+
+/* stop the LANCE so we can reinitialize it */
+
+static void StopLANCE(struct device *dev)
+{
+ /* can't take frames any more */
+
+ dev->tbusy = 1;
+
+ /* disable interrupts, stop it */
+
+ SetLANCE(dev, LANCE_CSR0, CSR0_STOP);
+}
+
+/* initialize card and LANCE for proper operation */
+
+static void InitBoard(struct device *dev)
+{
+ LANCE_InitBlock block;
+
+ /* Lay out the shared RAM - first we create the init block for the LANCE.
+ We do not overwrite it later because we need it again when we switch
+ promiscous mode on/off. */
+
+ block.Mode = 0;
+ if (dev->flags & IFF_PROMISC)
+ block.Mode |= LANCE_INIT_PROM;
+ memcpy(block.PAdr, dev->dev_addr, 6);
+ memset(block.LAdrF, 0, sizeof(block.LAdrF));
+ block.RdrP = (RAM_RXBASE & 0xffffff) | (LRXCOUNT << 29);
+ block.TdrP = (RAM_TXBASE & 0xffffff) | (LTXCOUNT << 29);
+
+ memcpy_toio(dev->mem_start + RAM_INITBASE, &block, sizeof(block));
+
+ /* initialize LANCE. Implicitly sets up other structures in RAM. */
+
+ InitLANCE(dev);
+}
+
+/* deinitialize card and LANCE */
+
+static void DeinitBoard(struct device *dev)
+{
+ /* stop LANCE */
+
+ StopLANCE(dev);
+
+ /* reset board */
+
+ ResetBoard(dev);
+}
+
+/* ------------------------------------------------------------------------
+ * interrupt handler(s)
+ * ------------------------------------------------------------------------ */
+
+/* LANCE has read initializazion block -> start it */
+
+static u16 irqstart_handler(struct device *dev, u16 oldcsr0)
+{
+ /* now we're ready to transmit */
+
+ dev->tbusy = 0;
+
+ /* reset IDON bit, start LANCE */
+
+ SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_IDON | CSR0_STRT);
+ return GetLANCE(dev, LANCE_CSR0);
+}
+
+/* receive interrupt */
+
+static u16 irqrx_handler(struct device *dev, u16 oldcsr0)
+{
+ skmca_priv *priv = (skmca_priv*) dev->priv;
+ LANCE_RxDescr descr;
+ unsigned int descraddr;
+
+ /* did we loose blocks due to a FIFO overrun ? */
+
+ if (oldcsr0 & CSR0_MISS)
+ priv->stat.rx_fifo_errors++;
+
+ /* run through queue until we reach a descriptor we do not own */
+
+ descraddr = RAM_RXBASE + (priv->nextrx * sizeof(LANCE_RxDescr));
+ while (1)
+ {
+ /* read descriptor */
+ memcpy_fromio(&descr, dev->mem_start + descraddr, sizeof(LANCE_RxDescr));
+
+ /* if we reach a descriptor we do not own, we're done */
+ if ((descr.Flags & RXDSCR_FLAGS_OWN) != 0)
+ break;
+
+#ifdef DEBUG
+ PrTime(); printk("Receive packet on descr %d len %d\n", priv->nextrx, descr.Len);
+#endif
+
+ /* erroneous packet ? */
+ if ((descr.Flags & RXDSCR_FLAGS_ERR) != 0)
+ {
+ priv->stat.rx_errors++;
+ if ((descr.Flags & RXDSCR_FLAGS_CRC) != 0)
+ priv->stat.rx_crc_errors++;
+ else if ((descr.Flags & RXDSCR_FLAGS_CRC) != 0)
+ priv->stat.rx_frame_errors++;
+ else if ((descr.Flags & RXDSCR_FLAGS_OFLO) != 0)
+ priv->stat.rx_fifo_errors++;
+ }
+
+ /* good packet ? */
+ else
+ {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(descr.Len + 2);
+ if (skb == NULL)
+ priv->stat.rx_dropped++;
+ else
+ {
+ memcpy_fromio(skb_put(skb, descr.Len),
+ dev->mem_start + descr.LowAddr, descr.Len);
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stat.rx_packets++;
+#if LINUX_VERSION_CODE >= 0x020119 /* byte counters for >= 2.1.25 */
+ priv->stat.rx_bytes += descr.Len;
+#endif
+ netif_rx(skb);
+ }
+ }
+
+ /* give descriptor back to LANCE */
+ descr.Len = 0;
+ descr.Flags |= RXDSCR_FLAGS_OWN;
+
+ /* update descriptor in shared RAM */
+ memcpy_toio(dev->mem_start + descraddr, &descr, sizeof(LANCE_RxDescr));
+
+ /* go to next descriptor */
+ priv->nextrx++; descraddr += sizeof(LANCE_RxDescr);
+ if (priv->nextrx >= RXCOUNT)
+ {
+ priv->nextrx = 0;
+ descraddr = RAM_RXBASE;
+ }
+ }
+
+ /* reset RINT bit */
+
+ SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_RINT);
+ return GetLANCE(dev, LANCE_CSR0);
+}
+
+/* transmit interrupt */
+
+static u16 irqtx_handler(struct device *dev, u16 oldcsr0)
+{
+ skmca_priv *priv = (skmca_priv*) dev->priv;
+ LANCE_TxDescr descr;
+ unsigned int descraddr;
+
+ /* check descriptors at most until no busy one is left */
+
+ descraddr = RAM_TXBASE + (priv->nexttxdone * sizeof(LANCE_TxDescr));
+ while (priv->txbusy > 0)
+ {
+ /* read descriptor */
+ memcpy_fromio(&descr, dev->mem_start + descraddr, sizeof(LANCE_TxDescr));
+
+ /* if the LANCE still owns this one, we've worked out all sent packets */
+ if ((descr.Flags & TXDSCR_FLAGS_OWN) != 0)
+ break;
+
+#ifdef DEBUG
+ PrTime(); printk("Send packet done on descr %d\n", priv->nexttxdone);
+#endif
+
+ /* update statistics */
+ if ((descr.Flags & TXDSCR_FLAGS_ERR) == 0)
+ {
+ priv->stat.tx_packets++;
+#if LINUX_VERSION_CODE >= 0x020119 /* byte counters for >= 2.1.25 */
+ priv->stat.tx_bytes++;
+#endif
+ }
+ else
+ {
+ priv->stat.tx_errors++;
+ if ((descr.Status & TXDSCR_STATUS_UFLO) != 0)
+ {
+ priv->stat.tx_fifo_errors++;
+ InitLANCE(dev);
+ }
+ else if ((descr.Status & TXDSCR_STATUS_LCOL) != 0)
+ priv->stat.tx_window_errors++;
+ else if ((descr.Status & TXDSCR_STATUS_LCAR) != 0)
+ priv->stat.tx_carrier_errors++;
+ else if ((descr.Status & TXDSCR_STATUS_RTRY) != 0)
+ priv->stat.tx_aborted_errors++;
+ }
+
+ /* go to next descriptor */
+ priv->nexttxdone++;
+ descraddr += sizeof(LANCE_TxDescr);
+ if (priv->nexttxdone >= TXCOUNT)
+ {
+ priv->nexttxdone = 0;
+ descraddr = RAM_TXBASE;
+ }
+ priv->txbusy--;
+ }
+
+ /* reset TX interrupt bit */
+
+ SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_TINT);
+ oldcsr0 = GetLANCE(dev, LANCE_CSR0);
+
+ /* at least one descriptor is freed. Therefore we can accept
+ a new one */
+
+ dev->tbusy = 0;
+
+ /* inform upper layers we're in business again */
+
+ mark_bh(NET_BH);
+
+ return oldcsr0;
+}
+
+/* general interrupt entry */
+
+static void irq_handler(int irq, void *device, struct pt_regs *regs)
+{
+ struct device *dev = (struct device*) device;
+ u16 csr0val;
+
+ /* read CSR0 to get interrupt cause */
+
+ csr0val = GetLANCE(dev, LANCE_CSR0);
+
+ /* in case we're not meant... */
+
+ if ((csr0val & CSR0_INTR) == 0)
+ return;
+
+ dev->interrupt = 1;
+
+ /* loop through the interrupt bits until everything is clear */
+
+ do
+ {
+ if ((csr0val & CSR0_IDON) != 0)
+ csr0val = irqstart_handler(dev, csr0val);
+ if ((csr0val & CSR0_RINT) != 0)
+ csr0val = irqrx_handler(dev, csr0val);
+ if ((csr0val & CSR0_TINT) != 0)
+ csr0val = irqtx_handler(dev, csr0val);
+ }
+ while ((csr0val & CSR0_INTR) != 0);
+
+ dev->interrupt = 0;
+}
+
+/* ------------------------------------------------------------------------
+ * driver methods
+ * ------------------------------------------------------------------------ */
+
+/* MCA info */
+
+static int skmca_getinfo(char *buf, int slot, void *d)
+{
+ int len = 0, i;
+ struct device *dev = (struct device*) d;
+ skmca_priv *priv;
+
+ /* can't say anything about an uninitialized device... */
+
+ if (dev == NULL)
+ return len;
+ if (dev->priv == NULL)
+ return len;
+ priv = (skmca_priv*) dev->priv;
+
+ /* print info */
+
+ len += sprintf(buf + len, "IRQ: %d\n", priv->realirq);
+ len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start,
+ dev->mem_end - 1);
+ len += sprintf(buf + len, "Transceiver: %s\n", MediaNames[priv->medium]);
+ len += sprintf(buf + len, "Device: %s\n", dev->name);
+ len += sprintf(buf + len, "MAC address:");
+ for (i = 0; i < 6; i ++ )
+ len += sprintf( buf+len, " %02x", dev->dev_addr[i] );
+ buf[len++] = '\n';
+ buf[len] = 0;
+
+ return len;
+}
+
+/* open driver. Means also initialization and start of LANCE */
+
+static int skmca_open(struct device *dev)
+{
+ int result;
+ skmca_priv *priv = (skmca_priv*) dev->priv;
+
+ /* register resources - only necessary for IRQ */
+ result = request_irq(priv->realirq, irq_handler, SA_SHIRQ | SA_SAMPLE_RANDOM,
+ "sk_mca", dev);
+ if (result != 0)
+ {
+ printk("%s: failed to register irq %d\n", dev->name, dev->irq);
+ return result;
+ }
+ dev->irq = priv->realirq;
+
+ /* set up the card and LANCE */
+ InitBoard(dev);
+
+#ifdef MODULE
+ MOD_INC_USE_COUNT;
+#endif
+
+ return 0;
+}
+
+/* close driver. Shut down board and free allocated resources */
+
+static int skmca_close(struct device *dev)
+{
+ /* turn off board */
+ DeinitBoard(dev);
+
+ /* release resources */
+ if (dev->irq != 0)
+ free_irq(dev->irq, dev);
+ dev->irq = 0;
+
+#ifdef MODULE
+ MOD_DEC_USE_COUNT;
+#endif
+
+ return 0;
+}
+
+/* transmit a block. */
+
+static int skmca_tx(struct sk_buff *skb, struct device *dev)
+{
+ skmca_priv *priv = (skmca_priv*) dev->priv;
+ LANCE_TxDescr descr;
+ unsigned int address;
+ int tmplen, retval = 0;
+ unsigned long flags;
+
+ /* if we get called with a NULL descriptor, the Ethernet layer thinks
+ our card is stuck an we should reset it. We'll do this completely: */
+
+ if (skb == NULL)
+ {
+ DeinitBoard(dev);
+ InitBoard(dev);
+ return 0; /* don't try to free the block here ;-) */
+ }
+
+ /* is there space in the Tx queue ? If no, the upper layer gave us a
+ packet in spite of us not being ready and is really in trouble.
+ We'll do the dropping for him: */
+ if (priv->txbusy >= TXCOUNT)
+ {
+ priv->stat.tx_dropped++;
+ retval = -EIO;
+ goto tx_done;
+ }
+
+ /* get TX descriptor */
+ address = RAM_TXBASE + (priv->nexttxput * sizeof(LANCE_TxDescr));
+ memcpy_fromio(&descr, dev->mem_start + address, sizeof(LANCE_TxDescr));
+
+ /* enter packet length as 2s complement - assure minimum length */
+ tmplen = skb->len;
+ if (tmplen < 60)
+ tmplen = 60;
+ descr.Len = 65536 - tmplen;
+
+ /* copy filler into RAM - in case we're filling up...
+ we're filling a bit more than necessary, but that doesn't harm
+ since the buffer is far larger... */
+ if (tmplen > skb->len)
+ {
+ char *fill = "NetBSD is a nice OS too! ";
+ unsigned int destoffs = 0, l = strlen(fill);
+
+ while (destoffs < tmplen)
+ {
+ memcpy_toio(dev->mem_start + descr.LowAddr + destoffs, fill, l);
+ destoffs += l;
+ }
+ }
+
+ /* do the real data copying */
+ memcpy_toio(dev->mem_start + descr.LowAddr, skb->data, skb->len);
+
+ /* hand descriptor over to LANCE - this is the first and last chunk */
+ descr.Flags = TXDSCR_FLAGS_OWN | TXDSCR_FLAGS_STP | TXDSCR_FLAGS_ENP;
+
+#ifdef DEBUG
+ PrTime(); printk("Send packet on descr %d len %d\n", priv->nexttxput, skb->len);
+#endif
+
+ /* one more descriptor busy */
+ save_flags(flags);
+ cli();
+ priv->nexttxput++;
+ if (priv->nexttxput >= TXCOUNT)
+ priv->nexttxput = 0;
+ priv->txbusy++;
+ dev->tbusy = (priv->txbusy >= TXCOUNT);
+
+ /* write descriptor back to RAM */
+ memcpy_toio(dev->mem_start + address, &descr, sizeof(LANCE_TxDescr));
+
+ /* if no descriptors were active, give the LANCE a hint to read it
+ immediately */
+
+ if (priv->txbusy == 0)
+ SetLANCE(dev, LANCE_CSR0, CSR0_INEA | CSR0_TDMD);
+
+ restore_flags(flags);
+
+tx_done:
+
+ /* When did that change exactly ? */
+
+#if LINUX_VERSION_CODE >= 0x020200
+ dev_kfree_skb(skb);
+#else
+ dev_kfree_skb(skb, FREE_WRITE);
+#endif
+ return retval;
+}
+
+/* return pointer to Ethernet statistics */
+
+static struct enet_statistics *skmca_stats(struct device *dev)
+{
+ skmca_priv *priv = (skmca_priv*) dev->priv;
+
+ return &(priv->stat);
+}
+
+/* we don't support runtime reconfiguration, since am MCA card can
+ be unambigously identified by its POS registers. */
+
+static int skmca_config(struct device *dev, struct ifmap *map)
+{
+ return 0;
+}
+
+/* switch receiver mode. We use the LANCE's multicast filter to prefilter
+ multicast addresses. */
+
+static void skmca_set_multicast_list(struct device *dev)
+{
+ LANCE_InitBlock block;
+
+ /* first stop the LANCE... */
+ StopLANCE(dev);
+
+ /* ...then modify the initialization block... */
+ memcpy_fromio(&block, dev->mem_start + RAM_INITBASE, sizeof(block));
+ if (dev->flags & IFF_PROMISC)
+ block.Mode |= LANCE_INIT_PROM;
+ else
+ block.Mode &= ~LANCE_INIT_PROM;
+
+ if (dev->flags & IFF_ALLMULTI) /* get all multicasts */
+ {
+ memset(block.LAdrF, 8, 0xff);
+ }
+ else /* get selected/no multicasts */
+ {
+ struct dev_mc_list *mptr;
+ int code;
+
+ memset(block.LAdrF, 8, 0x00);
+ for (mptr = dev->mc_list; mptr != NULL; mptr = mptr->next)
+ {
+ code = GetHash(mptr->dmi_addr);
+ block.LAdrF[(code >> 3) & 7] |= 1 << (code & 7);
+ }
+ }
+
+ memcpy_toio(dev->mem_start + RAM_INITBASE, &block, sizeof(block));
+
+ /* ...then reinit LANCE with the correct flags */
+ InitLANCE(dev);
+}
+
+/* ------------------------------------------------------------------------
+ * hardware check
+ * ------------------------------------------------------------------------ */
+
+#ifdef MODULE
+static int startslot; /* counts through slots when probing multiple devices */
+#else
+#define startslot 0 /* otherwise a dummy, since there is only eth0 in-kern*/
+#endif
+
+int skmca_probe(struct device *dev)
+{
+ int force_detect = 0;
+ int junior, slot, i;
+ int base = 0, irq = 0;
+ skmca_priv *priv;
+ skmca_medium medium;
+
+ /* can't work without an MCA bus ;-) */
+
+ if (MCA_bus == 0)
+ return ENODEV;
+
+ /* start address of 1 --> forced detection */
+
+ if (dev->mem_start == 1)
+ force_detect = 1;
+
+ /* search through slots */
+
+ if (dev != NULL)
+ {
+ base = dev->mem_start;
+ irq = dev->irq;
+ }
+ slot = dofind(&junior, startslot);
+
+ while (slot != -1)
+ {
+ /* deduce card addresses */
+
+ getaddrs(slot, junior, &base, &irq, &medium);
+
+#if 0
+ /* this should work, but it doesn't with 2.2.9 :-(
+ somehow 'mca_is_adapter_used()' is missing in kernel syms... */
+#if LINUX_VERSION_CODE >= 0x020200
+ /* slot already in use ? */
+
+ if (mca_is_adapter_used(slot))
+ {
+ slot = dofind(&junior, slot + 1);
+ continue;
+ }
+#endif
+#endif
+
+ /* were we looking for something different ? */
+
+ if ((dev->irq != 0) || (dev->mem_start != 0))
+ {
+ if ((dev->irq != 0) && (dev->irq != irq))
+ {
+ slot = dofind(&junior, slot + 1);
+ continue;
+ }
+ if ((dev->mem_start != 0) && (dev->mem_start != base))
+ {
+ slot = dofind(&junior, slot + 1);
+ continue;
+ }
+ }
+
+ /* found something that matches */
+
+ break;
+ }
+
+ /* nothing found ? */
+
+ if (slot == -1)
+ return ((base != 0) || (irq != 0)) ? ENXIO : ENODEV;
+
+ /* make procfs entries */
+
+ if (junior)
+ mca_set_adapter_name(slot, "SKNET junior MC2 Ethernet Adapter");
+ else
+ mca_set_adapter_name(slot, "SKNET MC2+ Ethernet Adapter");
+ mca_set_adapter_procfn(slot, (MCA_ProcFn) skmca_getinfo, dev);
+
+#if LINUX_VERSION_CODE >= 0x020200
+ mca_mark_as_used(slot);
+#endif
+
+ /* announce success */
+ printk("%s: SKNet %s adapter found in slot %d\n", dev->name,
+ junior ? "Junior MC2" : "MC2+", slot + 1);
+
+ /* allocate structure */
+ priv = dev->priv = (skmca_priv*) kmalloc(sizeof(skmca_priv), GFP_KERNEL);
+ priv->slot = slot;
+ priv->macbase = base + 0x3fc0;
+ priv->ioregaddr = base + 0x3ff0;
+ priv->ctrladdr = base + 0x3ff2;
+ priv->cmdaddr = base + 0x3ff3;
+ priv->realirq = irq;
+ priv->medium = medium;
+ memset(&(priv->stat), 0, sizeof(struct enet_statistics));
+
+ /* set base + irq for this device (irq not allocated so far) */
+ dev->irq = 0;
+ dev->mem_start = base;
+ dev->mem_end = base + 0x4000;
+
+ /* set methods */
+ dev->open = skmca_open;
+ dev->stop = skmca_close;
+ dev->set_config = skmca_config;
+ dev->hard_start_xmit = skmca_tx;
+ dev->do_ioctl = NULL;
+ dev->get_stats = skmca_stats;
+ dev->set_multicast_list = skmca_set_multicast_list;
+ dev->flags |= IFF_MULTICAST;
+
+ /* generic setup */
+ ether_setup(dev);
+ dev->interrupt = 0;
+ dev->tbusy = 0;
+ dev->start = 0;
+
+ /* copy out MAC address */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(priv->macbase + (i << 1));
+
+ /* print config */
+ printk("%s: IRQ %d, memory %#lx-%#lx, "
+ "MAC address %02x:%02x:%02x:%02x:%02x:%02x.\n",
+ dev->name, priv->realirq, dev->mem_start, dev->mem_end - 1,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ printk("%s: %s medium\n", dev->name, MediaNames[priv->medium]);
+
+ /* reset board */
+
+ ResetBoard(dev);
+
+#ifdef MODULE
+ startslot = slot + 1;
+#endif
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------
+ * modularization support
+ * ------------------------------------------------------------------------ */
+
+#ifdef MODULE
+
+#define DEVMAX 5
+
+static char NameSpace[8 * DEVMAX];
+static struct device moddevs[DEVMAX] =
+ {{NameSpace + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, skmca_probe},
+ {NameSpace + 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, skmca_probe},
+ {NameSpace + 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, skmca_probe},
+ {NameSpace + 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, skmca_probe},
+ {NameSpace + 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, skmca_probe}};
+
+int irq=0;
+int io=0;
+
+int init_module(void)
+{
+ int z, res;
+
+ startslot = 0;
+ for (z = 0; z < DEVMAX; z++)
+ {
+ strcpy(moddevs[z].name, " ");
+ res = register_netdev(moddevs + z);
+ if (res != 0)
+ return (z > 0) ? 0 : -EIO;
+ }
+
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ struct device *dev;
+ skmca_priv *priv;
+ int z;
+
+ if (MOD_IN_USE)
+ {
+ printk("cannot unload, module in use\n");
+ return;
+ }
+
+ for (z = 0; z < DEVMAX; z++)
+ {
+ dev = moddevs + z;
+ if (dev->priv != NULL)
+ {
+ priv = (skmca_priv*) dev->priv;
+ DeinitBoard(dev);
+ if (dev->irq != 0)
+ free_irq(dev->irq, dev);
+ dev->irq = 0;
+ unregister_netdev(dev);
+#if LINUX_VERSION_CODE >= 0x020200
+ mca_mark_as_unused(priv->slot);
+#endif
+ mca_set_adapter_procfn(priv->slot, NULL, NULL);
+ kfree_s(dev->priv, sizeof(skmca_priv));
+ dev->priv = NULL;
+ }
+ }
+}
+#endif /* MODULE */
--- /dev/null
+#ifndef _SK_MCA_INCLUDE_
+#define _SK_MCA_INCLUDE_
+
+#ifdef _SK_MCA_DRIVER_
+
+/* Adapter ID's */
+#define SKNET_MCA_ID 0x6afd
+#define SKNET_JUNIOR_MCA_ID 0x6be9
+
+/* media enumeration - defined in a way that it fits onto the MC2+'s
+ POS registers... */
+
+typedef enum {Media_10Base2, Media_10BaseT,
+ Media_10Base5, Media_Unknown, Media_Count} skmca_medium;
+
+/* private structure */
+typedef struct
+ {
+ unsigned int slot; /* MCA-Slot-# */
+ unsigned int macbase; /* base address of MAC address PROM */
+ unsigned int ioregaddr; /* address of I/O-register (Lo) */
+ unsigned int ctrladdr; /* address of control/stat register */
+ unsigned int cmdaddr; /* address of I/O-command register */
+ int nextrx; /* index of next RX descriptor to
+ be read */
+ int nexttxput; /* index of next free TX descriptor */
+ int nexttxdone; /* index of next TX descriptor to
+ be finished */
+ int txbusy; /* # of busy TX descriptors */
+ struct enet_statistics stat; /* packet statistics */
+ int realirq; /* memorizes actual IRQ, even when
+ currently not allocated */
+ skmca_medium medium; /* physical cannector */
+ } skmca_priv;
+
+/* card registers: control/status register bits */
+
+#define CTRL_ADR_DATA 0 /* Bit 0 = 0 ->access data register */
+#define CTRL_ADR_RAP 1 /* Bit 0 = 1 ->access RAP register */
+#define CTRL_RW_WRITE 0 /* Bit 1 = 0 ->write register */
+#define CTRL_RW_READ 2 /* Bit 1 = 1 ->read register */
+#define CTRL_RESET_ON 0 /* Bit 3 = 0 ->reset board */
+#define CTRL_RESET_OFF 8 /* Bit 3 = 1 ->no reset of board */
+
+#define STAT_ADR_DATA 0 /* Bit 0 of ctrl register read back */
+#define STAT_ADR_RAP 1
+#define STAT_RW_WRITE 0 /* Bit 1 of ctrl register read back */
+#define STAT_RW_READ 2
+#define STAT_RESET_ON 0 /* Bit 3 of ctrl register read back */
+#define STAT_RESET_OFF 8
+#define STAT_IRQ_ACT 0 /* interrupt pending */
+#define STAT_IRQ_NOACT 16 /* no interrupt pending */
+#define STAT_IO_NOBUSY 0 /* no transfer busy */
+#define STAT_IO_BUSY 32 /* transfer busy */
+
+/* I/O command register bits */
+
+#define IOCMD_GO 128 /* Bit 7 = 1 -> start register xfer */
+
+/* LANCE registers */
+
+#define LANCE_CSR0 0 /* Status/Control */
+
+#define CSR0_ERR 0x8000 /* general error flag */
+#define CSR0_BABL 0x4000 /* transmitter timeout */
+#define CSR0_CERR 0x2000 /* collision error */
+#define CSR0_MISS 0x1000 /* lost Rx block */
+#define CSR0_MERR 0x0800 /* memory access error */
+#define CSR0_RINT 0x0400 /* receiver interrupt */
+#define CSR0_TINT 0x0200 /* transmitter interrupt */
+#define CSR0_IDON 0x0100 /* initialization done */
+#define CSR0_INTR 0x0080 /* general interrupt flag */
+#define CSR0_INEA 0x0040 /* interrupt enable */
+#define CSR0_RXON 0x0020 /* receiver enabled */
+#define CSR0_TXON 0x0010 /* transmitter enabled */
+#define CSR0_TDMD 0x0008 /* force transmission now */
+#define CSR0_STOP 0x0004 /* stop LANCE */
+#define CSR0_STRT 0x0002 /* start LANCE */
+#define CSR0_INIT 0x0001 /* read initialization block */
+
+#define LANCE_CSR1 1 /* addr bit 0..15 of initialization */
+#define LANCE_CSR2 2 /* 16..23 block */
+
+#define LANCE_CSR3 3 /* Bus control */
+#define CSR3_BCON_HOLD 0 /* Bit 0 = 0 -> BM1,BM0,HOLD */
+#define CSR3_BCON_BUSRQ 1 /* Bit 0 = 1 -> BUSAK0,BYTE,BUSRQ */
+#define CSR3_ALE_HIGH 0 /* Bit 1 = 0 -> ALE asserted high */
+#define CSR3_ALE_LOW 2 /* Bit 1 = 1 -> ALE asserted low */
+#define CSR3_BSWAP_OFF 0 /* Bit 2 = 0 -> no byte swap */
+#define CSR3_BSWAP_ON 0 /* Bit 2 = 1 -> byte swap */
+
+/* LANCE structures */
+
+typedef struct /* LANCE initialization block */
+ {
+ u16 Mode; /* mode flags */
+ u8 PAdr[6]; /* MAC address */
+ u8 LAdrF[8]; /* Multicast filter */
+ u32 RdrP; /* Receive descriptor */
+ u32 TdrP; /* Transmit descriptor */
+ } LANCE_InitBlock;
+
+/* Mode flags init block */
+
+#define LANCE_INIT_PROM 0x8000 /* enable promiscous mode */
+#define LANCE_INIT_INTL 0x0040 /* internal loopback */
+#define LANCE_INIT_DRTY 0x0020 /* disable retry */
+#define LANCE_INIT_COLL 0x0010 /* force collision */
+#define LANCE_INIT_DTCR 0x0008 /* disable transmit CRC */
+#define LANCE_INIT_LOOP 0x0004 /* loopback */
+#define LANCE_INIT_DTX 0x0002 /* disable transmitter */
+#define LANCE_INIT_DRX 0x0001 /* disable receiver */
+
+typedef struct /* LANCE Tx descriptor */
+ {
+ u16 LowAddr; /* bit 0..15 of address */
+ u16 Flags; /* bit 16..23 of address + Flags */
+ u16 Len; /* 2s complement of packet length */
+ u16 Status; /* Result of transmission */
+ } LANCE_TxDescr;
+
+#define TXDSCR_FLAGS_OWN 0x8000 /* LANCE owns descriptor */
+#define TXDSCR_FLAGS_ERR 0x4000 /* summary error flag */
+#define TXDSCR_FLAGS_MORE 0x1000 /* more than one retry needed? */
+#define TXDSCR_FLAGS_ONE 0x0800 /* one retry? */
+#define TXDSCR_FLAGS_DEF 0x0400 /* transmission deferred? */
+#define TXDSCR_FLAGS_STP 0x0200 /* first packet in chain? */
+#define TXDSCR_FLAGS_ENP 0x0100 /* last packet in chain? */
+
+#define TXDSCR_STATUS_BUFF 0x8000 /* buffer error? */
+#define TXDSCR_STATUS_UFLO 0x4000 /* silo underflow during transmit? */
+#define TXDSCR_STATUS_LCOL 0x1000 /* late collision? */
+#define TXDSCR_STATUS_LCAR 0x0800 /* loss of carrier? */
+#define TXDSCR_STATUS_RTRY 0x0400 /* retry error? */
+
+typedef struct /* LANCE Rx descriptor */
+ {
+ u16 LowAddr; /* bit 0..15 of address */
+ u16 Flags; /* bit 16..23 of address + Flags */
+ u16 MaxLen; /* 2s complement of buffer length */
+ u16 Len; /* packet length */
+ } LANCE_RxDescr;
+
+#define RXDSCR_FLAGS_OWN 0x8000 /* LANCE owns descriptor */
+#define RXDSCR_FLAGS_ERR 0x4000 /* summary error flag */
+#define RXDSCR_FLAGS_FRAM 0x2000 /* framing error flag */
+#define RXDSCR_FLAGS_OFLO 0x1000 /* FIFO overflow? */
+#define RXDSCR_FLAGS_CRC 0x0800 /* CRC error? */
+#define RXDSCR_FLAGS_BUFF 0x0400 /* buffer error? */
+#define RXDSCR_FLAGS_STP 0x0200 /* first packet in chain? */
+#define RXDCSR_FLAGS_ENP 0x0100 /* last packet in chain? */
+
+/* RAM layout */
+
+#define TXCOUNT 4 /* length of TX descriptor queue */
+#define LTXCOUNT 2 /* log2 of it */
+#define RXCOUNT 4 /* length of RX descriptor queue */
+#define LRXCOUNT 2 /* log2 of it */
+
+#define RAM_INITBASE 0 /* LANCE init block */
+#define RAM_TXBASE 24 /* Start of TX descriptor queue */
+#define RAM_RXBASE \
+(RAM_TXBASE + (TXCOUNT * 8)) /* Start of RX descriptor queue */
+#define RAM_DATABASE \
+(RAM_RXBASE + (RXCOUNT * 8)) /* Start of data area for frames */
+#define RAM_BUFSIZE 1580 /* max. frame size - should never be
+ reached */
+
+#endif /* _SK_MCA_DRIVER_ */
+
+extern int skmca_probe(struct device *);
+
+
+#endif /* _SK_MCA_INCLUDE_ */
\ No newline at end of file
DEVICE( DEC, DEC_21150, "DC21150"),
DEVICE( DEC, DEC_21152, "DC21152"),
DEVICE( DEC, DEC_21153, "DC21153"),
+ DEVICE( DEC, DEC_21154, "DC21154"),
DEVICE( CIRRUS, CIRRUS_7548, "GD 7548"),
DEVICE( CIRRUS, CIRRUS_5430, "GD 5430"),
DEVICE( CIRRUS, CIRRUS_5434_4, "GD 5434"),
DEVICE( MOTOROLA, MOTOROLA_MPC105,"MPC105 Eagle"),
DEVICE( MOTOROLA, MOTOROLA_MPC106,"MPC106 Grackle"),
DEVICE( MOTOROLA, MOTOROLA_RAVEN, "Raven"),
+ DEVICE( MOTOROLA, MOTOROLA_FALCON,"Falcon"),
+ DEVICE( MOTOROLA, MOTOROLA_CPX8216,"CPX8216"),
DEVICE( PROMISE, PROMISE_20246, "IDE UltraDMA/33"),
DEVICE( PROMISE, PROMISE_20262, "IDE UltraDMA/66"),
DEVICE( PROMISE, PROMISE_5300, "DC5030"),
DEVICE( PICTUREL, PICTUREL_PCIVST,"PCIVST"),
DEVICE( NVIDIA_SGS, NVIDIA_SGS_RIVA128, "Riva 128"),
DEVICE( CBOARDS, CBOARDS_DAS1602_16,"DAS1602/16"),
+ DEVICE( MOTOROLA_OOPS, MOTOROLA_FALCON,"Falcon"),
DEVICE( SYMPHONY, SYMPHONY_101, "82C101"),
DEVICE( TEKRAM, TEKRAM_DC290, "DC-290"),
DEVICE( 3DLABS, 3DLABS_300SX, "GLINT 300SX"),
case PCI_CLASS_SERIAL_USB: return "USB Controller";
case PCI_CLASS_SERIAL_FIBER: return "Fiber Channel";
+ case PCI_CLASS_HOT_SWAP_CONTROLLER: return "Hot Swap Controller";
+
default: return "Unknown class";
}
}
case PCI_VENDOR_ID_OAK: return "OAK";
case PCI_VENDOR_ID_WINBOND2: return "Winbond";
case PCI_VENDOR_ID_MOTOROLA: return "Motorola";
+ case PCI_VENDOR_ID_MOTOROLA_OOPS: return "Motorola";
case PCI_VENDOR_ID_PROMISE: return "Promise Technology";
case PCI_VENDOR_ID_N9: return "Number Nine";
case PCI_VENDOR_ID_UMC: return "UMC";
{"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */
{"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */
{"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* extra reset */
+{"RELISYS", "Scorpio", "*", BLIST_NOLUN}, /* responds to all LUN */
/*
* Other types of devices that have special flags.
struct sound_mixer {
int busy;
+ int modify_counter;
};
static struct sound_mixer mixer;
u_long arg)
{
int data;
+ if (_SIOC_DIR(cmd) & _SIOC_WRITE)
+ mixer.modify_counter++;
+ if (cmd == OSS_GETVERSION)
+ return IOCTL_OUT(arg, SOUND_VERSION);
switch (sound.mach.type) {
#ifdef CONFIG_ATARI
case DMASND_FALCON:
switch (cmd) {
+ case SOUND_MIXER_INFO: {
+ mixer_info info;
+ strncpy(info.id, "FALCON", sizeof(info.id));
+ strncpy(info.name, "FALCON", sizeof(info.name));
+ info.name[sizeof(info.name)-1] = 0;
+ info.modify_counter = mixer.modify_counter;
+ copy_to_user_ret((int *)arg, &info, sizeof(info), -EFAULT);
+ return 0;
+ }
case SOUND_MIXER_READ_DEVMASK:
return IOCTL_OUT(arg, SOUND_MASK_VOLUME | SOUND_MASK_MIC | SOUND_MASK_SPEAKER);
case SOUND_MIXER_READ_RECMASK:
case DMASND_TT:
switch (cmd) {
+ case SOUND_MIXER_INFO: {
+ mixer_info info;
+ strncpy(info.id, "TT", sizeof(info.id));
+ strncpy(info.name, "TT", sizeof(info.name));
+ info.name[sizeof(info.name)-1] = 0;
+ info.modify_counter = mixer.modify_counter;
+ copy_to_user_ret((int *)arg, &info, sizeof(info), -EFAULT);
+ return 0;
+ }
case SOUND_MIXER_READ_DEVMASK:
return IOCTL_OUT(arg,
SOUND_MASK_VOLUME | SOUND_MASK_TREBLE | SOUND_MASK_BASS |
#ifdef CONFIG_AMIGA
case DMASND_AMIGA:
switch (cmd) {
+ case SOUND_MIXER_INFO: {
+ mixer_info info;
+ strncpy(info.id, "AMIGA", sizeof(info.id));
+ strncpy(info.name, "AMIGA", sizeof(info.name));
+ info.name[sizeof(info.name)-1] = 0;
+ info.modify_counter = mixer.modify_counter;
+ copy_to_user_ret((int *)arg, &info, sizeof(info), -EFAULT);
+ return 0;
+ }
case SOUND_MIXER_READ_DEVMASK:
return IOCTL_OUT(arg, SOUND_MASK_VOLUME | SOUND_MASK_TREBLE);
case SOUND_MIXER_READ_RECMASK:
case DMASND_AWACS:
if (awacs_revision<AWACS_BURGUNDY) { /* Different IOCTLS for burgundy*/
switch (cmd) {
+ case SOUND_MIXER_INFO: {
+ mixer_info info;
+ strncpy(info.id, "AWACS", sizeof(info.id));
+ strncpy(info.name, "AWACS", sizeof(info.name));
+ info.name[sizeof(info.name)-1] = 0;
+ info.modify_counter = mixer.modify_counter;
+ copy_to_user_ret((int *)arg, &info,
+ sizeof(info), -EFAULT);
+ return 0;
+ }
case SOUND_MIXER_READ_DEVMASK:
data = SOUND_MASK_VOLUME | SOUND_MASK_SPEAKER
| SOUND_MASK_LINE | SOUND_MASK_MIC
} else {
/* We are, we are, we are... Burgundy or better */
switch(cmd) {
+ case SOUND_MIXER_INFO: {
+ mixer_info info;
+ strncpy(info.id, "AWACS", sizeof(info.id));
+ strncpy(info.name, "AWACS", sizeof(info.name));
+ info.name[sizeof(info.name)-1] = 0;
+ info.modify_counter = mixer.modify_counter;
+ copy_to_user_ret((int *)arg, &info,
+ sizeof(info), -EFAULT);
+ return 0;
+ }
case SOUND_MIXER_READ_DEVMASK:
data = SOUND_MASK_VOLUME | SOUND_MASK_CD |
SOUND_MASK_LINE | SOUND_MASK_MIC |
struct sound_unit *next;
};
+#ifdef CONFIG_SOUND_SONICVIBES
+extern int init_sonicvibes(void);
+#endif
+#ifdef CONFIG_SOUND_ES1370
+extern int init_es1370(void);
+#endif
+#ifdef CONFIG_SOUND_ES1371
+extern int init_es1371(void);
+#endif
+#ifdef CONFIG_SOUND_MSNDCLAS
+extern int msnd_classic_init(void);
+#endif
+#ifdef CONFIG_SOUND_MSNDPIN
+extern int msnd_pinnacle_init(void);
+#endif
+
/*
* Low level list operator. Scan the ordered list, find a hole and
* join into it. Called with the lock asserted
* This lock guards the sound loader list.
*/
-static spinlock_t sound_loader_lock = SPIN_LOCK_UNLOCKED;
+spinlock_t sound_loader_lock = SPIN_LOCK_UNLOCKED;
/*
* Allocate the controlling structure and add it to the sound driver
/*Now scan all configs for a ACM configuration*/
for (cfgnum=0;cfgnum<dev->descriptor.bNumConfigurations;cfgnum++) {
/* The first one should be Communications interface? */
- interface = &dev->config[cfgnum].interface[0];
+ interface = &dev->config[cfgnum].altsetting[0].interface[0];
if (interface->bInterfaceClass != 2 ||
interface->bInterfaceSubClass != 2 ||
interface->bInterfaceProtocol != 1 ||
continue;
/* The second one should be a Data interface? */
- interface = &dev->config[cfgnum].interface[1];
+ interface = &dev->config[cfgnum].altsetting[0].interface[1];
if (interface->bInterfaceClass != 10 ||
interface->bInterfaceSubClass != 0 ||
interface->bInterfaceProtocol != 0 ||
printk("USB ACM found\n");
usb_set_configuration(dev, dev->config[cfgnum].bConfigurationValue);
acm->dev=dev;
- acm->readendp=dev->config[cfgnum].interface[1].endpoint[0].bEndpointAddress;
- acm->writeendp=dev->config[cfgnum].interface[1].endpoint[1].bEndpointAddress;
- acm->ctrlendp=dev->config[cfgnum].interface[0].endpoint[0].bEndpointAddress;
+ acm->readendp=dev->config[cfgnum].altsetting[0].interface[1].endpoint[0].bEndpointAddress;
+ acm->writeendp=dev->config[cfgnum].altsetting[0].interface[1].endpoint[1].bEndpointAddress;
+ acm->ctrlendp=dev->config[cfgnum].altsetting[0].interface[0].endpoint[0].bEndpointAddress;
acm->readpipe=usb_rcvbulkpipe(dev,acm->readendp);
acm->writepipe=usb_sndbulkpipe(dev,acm->writeendp);
- usb_request_irq(dev,acm->ctrlpipe=usb_rcvctrlpipe(dev,acm->ctrlendp), acm_irq, dev->config[cfgnum].interface[0].endpoint[0].bInterval, &acm->ctrlbuffer);
+ usb_request_irq(dev,acm->ctrlpipe=usb_rcvctrlpipe(dev,acm->ctrlendp), acm_irq, dev->config[cfgnum].altsetting[0].interface[0].endpoint[0].bInterval, &acm->ctrlbuffer);
acm->present = 1;
acm->buffer=0;
return 0;
int i;
int na=0;
- interface = &dev->config[0].interface[0];
+ interface = &dev->config[0].altsetting[0].interface[0];
for(i=0;i<dev->config[0].bNumInterfaces;i++)
{
return usb_audio_init();
}
-void module_cleanup(void)
+void cleanup_module(void)
{
usb_deregister(&usb_audio_driver);
}
return -1;
#endif
- interface = &dev->config[0].interface[0];
+ interface = &dev->config[0].altsetting[0].interface[0];
/* Is it a CPiA? */
/*
{
return usb_cpia_init();
}
-void module_cleanup(void)
+void cleanup_module(void)
{
}
#endif
if (dev->config[0].bNumInterfaces != 1)
return -1;
- interface = &dev->config[0].interface[0];
+ interface = &dev->config[0].altsetting[0].interface[0];
/* Is it a hub? */
if (interface->bInterfaceClass != 9)
return usb_hub_init();
}
-void module_cleanup(void){
+void cleanup_module(void){
usb_hub_cleanup();
}
#endif
struct usb_endpoint_descriptor *endpoint;
struct usb_keyboard *kbd;
- interface = &dev->config[0].interface[0];
+ interface = &dev->config[0].altsetting[0].interface[0];
endpoint = &interface->endpoint[0];
if(interface->bInterfaceClass != 3
return usb_kbd_init();
}
-void module_cleanup(void)
+void cleanup_module(void)
{
usb_deregister(&usb_kbd_driver);
}
mouse->buttons = data[0] & 0x07;
mouse->dx += data[1]; /* data[] is signed, so this works */
mouse->dy -= data[2]; /* y-axis is reversed */
- mouse->dz += data[3];
+ mouse->dz -= data[3];
mouse->ready = 1;
add_mouse_randomness((mouse->buttons << 24) + (mouse->dz << 16 ) +
return -1;
/* Is it a mouse interface? */
- interface = &dev->config[0].interface[0];
+ interface = &dev->config[0].altsetting[0].interface[0];
if (interface->bInterfaceClass != 3)
return -1;
if (interface->bInterfaceSubClass != 1)
/*
* Clean up when unloading the module
*/
-void module_cleanup(void){
+void cleanup_module(void){
# ifdef CONFIG_APM
apm_unregister_callback(&handle_apm_event);
# endif
return -1;
}
- interface = dev->config->interface;
+ interface = dev->config->altsetting->interface;
/* Lets be paranoid (for the moment)*/
if (interface->bInterfaceClass != 7 ||
#!/bin/sh
-killall ohci-control
+killall uhci-control
sleep 2
-rmmod usb-ohci
+rmmod usb-uhci
{
return usb_init();
}
-void module_cleanup(void)
+void cleanup_module(void)
{
cleanup_drivers();
}
static void usb_show_config(struct usb_config_descriptor *config)
{
- int i;
-
- usb_show_config_descriptor(config);
- for (i = 0 ; i < config->bNumInterfaces; i++)
- usb_show_interface(config->interface + i);
+ int i, j;
+ struct usb_alternate_setting *as;
+
+ usb_show_config_descriptor(config);
+ for (i = 0; i < config->num_altsetting; i++) {
+ as = config->altsetting + i;
+ if ((as) == NULL)
+ break;
+ printk("\n Alternate Setting: %d\n", i);
+ for (j = 0 ; j < config->bNumInterfaces; j++)
+ usb_show_interface(as->interface + j);
+ }
}
void usb_show_device(struct usb_device *dev)
static int usb_parse_config(struct usb_device *dev, struct usb_config_descriptor *config, unsigned char *ptr, int len)
{
- int i;
+ int i, j;
+ int retval;
+ struct usb_alternate_setting *as;
int parsed = usb_expect_descriptor(ptr, len, USB_DT_CONFIG, 9);
if (parsed < 0)
}
- config->interface = (struct usb_interface_descriptor *)
+ config->altsetting = (struct usb_alternate_setting *)
+ kmalloc(USB_MAXALTSETTING * sizeof(struct usb_alternate_setting), GFP_KERNEL);
+ if (config->altsetting == NULL) {
+ printk(KERN_WARNING "usb: out of memory.\n");
+ return -1;
+ }
+ config->act_altsetting = 0;
+ config->num_altsetting = 1;
+
+ config->altsetting->interface = (struct usb_interface_descriptor *)
kmalloc(config->bNumInterfaces * sizeof(struct usb_interface_descriptor), GFP_KERNEL);
- if(config->interface==NULL)
+ if(config->altsetting->interface==NULL)
{
printk(KERN_WARNING "usb: out of memory.\n");
return -1;
}
- memset(config->interface, 0, config->bNumInterfaces*sizeof(struct usb_interface_descriptor));
+ memset(config->altsetting->interface,
+ 0, config->bNumInterfaces*sizeof(struct usb_interface_descriptor));
for (i = 0; i < config->bNumInterfaces; i++) {
- int retval = usb_parse_interface(dev, config->interface + i, ptr + parsed, len);
+ retval = usb_parse_interface(dev, config->altsetting->interface + i, ptr + parsed, len);
if (retval < 0)
return parsed; // HACK
// return retval;
parsed += retval;
len -= retval;
}
+
+ printk("parsed = %d len = %d\n", parsed, len);
+
+ // now parse for additional alternate settings
+ for (j = 1; j < USB_MAXALTSETTING; j++) {
+ retval = usb_expect_descriptor(ptr + parsed, len, USB_DT_INTERFACE, 9);
+ if (retval)
+ break;
+ config->num_altsetting++;
+ as = config->altsetting + j;
+ as->interface = (struct usb_interface_descriptor *)
+ kmalloc(config->bNumInterfaces * sizeof(struct usb_interface_descriptor), GFP_KERNEL);
+ if (as->interface == NULL) {
+ printk(KERN_WARNING "usb: out of memory.\n");
+ return -1;
+ }
+ memset(as->interface, 0, config->bNumInterfaces * sizeof(struct usb_interface_descriptor));
+ for (i = 0; i < config->bNumInterfaces; i++) {
+ retval = usb_parse_interface(dev, as->interface + i,
+ ptr + parsed, len);
+ if (retval < 0)
+ return parsed;
+ parsed += retval;
+ len -= retval;
+ }
+ }
return parsed;
}
void usb_destroy_configuration(struct usb_device *dev)
{
- int c, i;
+ int c, a, i;
struct usb_config_descriptor *cf;
+ struct usb_alternate_setting *as;
struct usb_interface_descriptor *ifp;
if(dev->config==NULL)
return;
- for(c=0;c<dev->descriptor.bNumConfigurations;c++)
+ for(c = 0; c < dev->descriptor.bNumConfigurations; c++)
{
- cf=&dev->config[c];
- if(cf->interface==NULL)
- break;
- for(i=0;i<cf->bNumInterfaces;i++)
+ cf = &dev->config[c];
+ if (cf->altsetting == NULL)
+ break;
+ for (a = 0; a < cf->num_altsetting; a++)
{
- ifp=&cf->interface[i];
- if(ifp->endpoint==NULL)
- break;
- kfree(ifp->endpoint);
+ as = &cf->altsetting[a];
+ if (as->interface == NULL)
+ break;
+ for(i=0;i<cf->bNumInterfaces;i++)
+ {
+ ifp = &as->interface[i];
+ if(ifp->endpoint==NULL)
+ break;
+ kfree(ifp->endpoint);
+ }
+ kfree(as->interface);
}
- kfree(cf->interface);
+ kfree(cf->altsetting);
}
kfree(dev->config);
static void usb_set_maxpacket(struct usb_device *dev)
{
- struct usb_endpoint_descriptor *ep;
- struct usb_interface_descriptor *ip = dev->actconfig->interface;
int i;
+ struct usb_endpoint_descriptor *ep;
+ int act_as = dev->actconfig->act_altsetting;
+ struct usb_alternate_setting *as = dev->actconfig->altsetting + act_as;
+ struct usb_interface_descriptor *ip = as->interface;
for (i=0; i<dev->actconfig->bNumInterfaces; i++) {
- if (dev->actconfig->interface[i].bInterfaceNumber == dev->ifnum) {
- ip = &dev->actconfig->interface[i];
+ if (as->interface[i].bInterfaceNumber == dev->ifnum) {
+ ip = &as->interface[i];
break;
}
}
return -1;
dev->ifnum = interface;
+ dev->actconfig->act_altsetting = alternate;
usb_set_maxpacket(dev);
return 0;
}
-
int usb_set_configuration(struct usb_device *dev, int configuration)
{
devrequest dr;
{
return dev->bus->op->request_irq(dev, pipe, handler, period, dev_id);
}
-
*/
#define USB_MAXCONFIG 8
+#define USB_MAXALTSETTING 5
#define USB_MAXINTERFACES 32
#define USB_MAXENDPOINTS 32
#define USB_MAXSTRINGS 16
void *audio;
};
+/* hack for alternate settings */
+struct usb_alternate_setting {
+ struct usb_interface_descriptor *interface;
+};
+
/* Configuration descriptor information.. */
struct usb_config_descriptor {
__u8 bLength;
__u8 iConfiguration;
__u8 bmAttributes;
__u8 MaxPower;
-
- struct usb_interface_descriptor *interface;
+ int act_altsetting; /* active alternate setting */
+ int num_altsetting; /* number of alternate settings */
+ struct usb_alternate_setting *altsetting;
};
/* String descriptor */
#define USB_MAXCHILDREN (8)
struct usb_device {
- int devnum; /* Device number on USB bus */
- int slow; /* Slow device? */
- int maxpacketsize; /* Maximum packet size */
- __u16 toggle; /* one bit for each endpoint */
- struct usb_config_descriptor *actconfig; /* the active configuration */
- int epmaxpacket[16]; /* endpoint specific maximums */
- int ifnum; /* active interface number */
- struct usb_bus *bus; /* Bus we're apart of */
- struct usb_driver *driver; /* Driver */
- struct usb_device_descriptor descriptor; /* Descriptor */
- struct usb_config_descriptor *config; /* All of the configs */
+ int devnum; /* Device number on USB bus */
+ int slow; /* Slow device? */
+ int maxpacketsize; /* Maximum packet size */
+ __u16 toggle; /* one bit for each endpoint */
+ struct usb_config_descriptor *actconfig;/* the active configuration */
+ int epmaxpacket[16]; /* endpoint specific maximums */
+ int ifnum; /* active interface number */
+ struct usb_bus *bus; /* Bus we're apart of */
+ struct usb_driver *driver; /* Driver */
+ struct usb_device_descriptor descriptor;/* Descriptor */
+ struct usb_config_descriptor *config; /* All of the configs */
struct usb_device *parent;
- char *stringtable; /* Strings (multiple, null term) */
- char **stringindex; /* pointers to strings */
- int maxstring; /* max valid index */
-
+ char *stringtable; /* Strings (multiple, null term) */
+ char **stringindex; /* pointers to strings */
+ int maxstring; /* max valid index */
+
/*
* Child devices - these can be either new devices
* (if this is a hub device), or different instances
end = PAGE_ALIGN(end);
if (end <= start)
return;
- do_mmap(NULL, start, end - start,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE, 0);
+ do_brk(start, end - start);
}
/*
#ifdef __sparc__
if (N_MAGIC(ex) == NMAGIC) {
/* Fuck me plenty... */
- error = do_mmap(NULL, N_TXTADDR(ex), ex.a_text,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ error = do_brk(N_TXTADDR(ex), ex.a_text);
read_exec(bprm->dentry, fd_offset, (char *) N_TXTADDR(ex),
ex.a_text, 0);
- error = do_mmap(NULL, N_DATADDR(ex), ex.a_data,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ error = do_brk(N_DATADDR(ex), ex.a_data);
read_exec(bprm->dentry, fd_offset + ex.a_text, (char *) N_DATADDR(ex),
ex.a_data, 0);
goto beyond_if;
if (N_MAGIC(ex) == OMAGIC) {
#if defined(__alpha__) || defined(__sparc__)
- do_mmap(NULL, N_TXTADDR(ex) & PAGE_MASK,
- ex.a_text+ex.a_data + PAGE_SIZE - 1,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ do_brk(N_TXTADDR(ex) & PAGE_MASK,
+ ex.a_text+ex.a_data + PAGE_SIZE - 1)
read_exec(bprm->dentry, fd_offset, (char *) N_TXTADDR(ex),
ex.a_text+ex.a_data, 0);
#else
- do_mmap(NULL, 0, ex.a_text+ex.a_data,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ do_brk(0, ex.a_text+ex.a_data);
read_exec(bprm->dentry, 32, (char *) 0, ex.a_text+ex.a_data, 0);
#endif
flush_icache_range((unsigned long) 0,
if (!file->f_op || !file->f_op->mmap || ((fd_offset & ~PAGE_MASK) != 0)) {
sys_close(fd);
- do_mmap(NULL, 0, ex.a_text+ex.a_data,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ do_brk(0, ex.a_text+ex.a_data);
read_exec(bprm->dentry, fd_offset,
(char *) N_TXTADDR(ex), ex.a_text+ex.a_data, 0);
flush_icache_range((unsigned long) N_TXTADDR(ex),
len = PAGE_ALIGN(ex.a_text + ex.a_data);
bss = ex.a_text + ex.a_data + ex.a_bss;
if (bss > len) {
- error = do_mmap(NULL, start_addr + len, bss - len,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_FIXED, 0);
+ error = do_brk(start_addr + len, bss - len);
retval = error;
if (error != start_addr + len)
goto out_putf;
end = ELF_PAGEALIGN(end);
if (end <= start)
return;
- do_mmap(NULL, start, end - start,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE, 0);
+ do_brk(start, end - start);
}
/* Map the last of the bss segment */
if (last_bss > elf_bss)
- do_mmap(NULL, elf_bss, last_bss - elf_bss,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ do_brk(elf_bss, last_bss - elf_bss);
*interp_load_addr = load_addr;
error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
goto out;
}
- do_mmap(NULL, 0, text_data,
- PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, 0);
+ do_brk(0, text_data);
retval = read_exec(interpreter_dentry, offset, addr, text_data, 0);
if (retval < 0)
goto out;
flush_icache_range((unsigned long)addr,
(unsigned long)addr + text_data);
- do_mmap(NULL, ELF_PAGESTART(text_data + ELF_EXEC_PAGESIZE - 1),
- interp_ex->a_bss,
- PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, 0);
+ do_brk(ELF_PAGESTART(text_data + ELF_EXEC_PAGESIZE - 1),
+ interp_ex->a_bss);
elf_entry = interp_ex->a_entry;
out:
ELF_EXEC_PAGESIZE - 1);
bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
if (bss > len)
- do_mmap(NULL, len, bss - len,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0);
+ do_brk(len, bss - len);
error = 0;
out_free_ph:
return err;
}
-static void send_sigio(struct fown_struct *fown, struct fasync_struct *fa)
+static void send_sigio_to_task(struct task_struct *p,
+ struct fown_struct *fown, struct fasync_struct *fa)
{
- struct task_struct * p;
- int pid = fown->pid;
- uid_t uid = fown->uid;
- uid_t euid = fown->euid;
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- int match = p->pid;
- if (pid < 0)
- match = -p->pgrp;
- if (pid != match)
- continue;
- if ((euid != 0) &&
- (euid ^ p->suid) && (euid ^ p->uid) &&
- (uid ^ p->suid) && (uid ^ p->uid))
- continue;
- switch (fown->signum) {
- siginfo_t si;
+ if ((fown->euid != 0) &&
+ (fown->euid ^ p->suid) && (fown->euid ^ p->uid) &&
+ (fown->uid ^ p->suid) && (fown->uid ^ p->uid))
+ return;
+ switch (fown->signum) {
+ siginfo_t si;
default:
/* Queue a rt signal with the appropriate fd as its
value. We use SI_SIGIO as the source, not
si.si_signo = fown->signum;
si.si_errno = 0;
si.si_code = SI_SIGIO;
- si.si_pid = pid;
- si.si_uid = uid;
+ si.si_pid = fown->pid;
+ si.si_uid = fown->uid;
si.si_fd = fa->fa_fd;
if (!send_sig_info(fown->signum, &si, p))
break;
/* fall-through: fall back on the old plain SIGIO signal */
case 0:
send_sig(SIGIO, p, 1);
- }
}
+}
+
+static void send_sigio(struct fown_struct *fown, struct fasync_struct *fa)
+{
+ struct task_struct * p;
+ int pid = fown->pid;
+
+ read_lock(&tasklist_lock);
+ if ( (pid > 0) && (p = find_task_by_pid(pid)) ) {
+ send_sigio_to_task(p, fown, fa);
+ goto out;
+ }
+ for_each_task(p) {
+ int match = p->pid;
+ if (pid < 0)
+ match = -p->pgrp;
+ if (pid != match)
+ continue;
+ send_sigio_to_task(p, fown, fa);
+ }
+out:
read_unlock(&tasklist_lock);
}
bnode->tree = tree;
bnode->node = node;
bnode->sticky = sticky;
+ hfs_init_waitqueue(&bnode->rqueue);
+ hfs_init_waitqueue(&bnode->wqueue);
if (sticky == HFS_NOT_STICKY) {
/* Insert it in the cache if appropriate */
bt->sys_mdb = mdb->sys_mdb;
bt->reserved = 0;
bt->lock = 0;
- init_waitqueue_head(&bt->wait);
+ hfs_init_waitqueue(&bt->wait);
bt->dirt = 0;
memset(bt->cache, 0, sizeof(bt->cache));
entry->modify_date = hfs_get_nl(cat->u.dir.MdDat);
entry->backup_date = hfs_get_nl(cat->u.dir.BkDat);
dir->dirs = dir->files = 0;
+ hfs_init_waitqueue(&dir->read_wait);
+ hfs_init_waitqueue(&dir->write_wait);
} else if (cat->cdrType == HFS_CDR_FIL) {
struct hfs_file *fil = &entry->u.file;
memset(mdb, 0, sizeof(*mdb));
mdb->magic = HFS_MDB_MAGIC;
mdb->sys_mdb = sys_mdb;
- INIT_LIST_HEAD(&mdb->entry_dirty);
+ INIT_LIST_HEAD(&mdb->entry_dirty);
+ hfs_init_waitqueue(&mdb->rename_wait);
+ hfs_init_waitqueue(&mdb->bitmap_wait);
/* See if this is an HFS filesystem */
buf = hfs_buffer_get(sys_mdb, part_start + HFS_MDB_BLK, 1);
if (near && near < s->s_hpfs_fs_size)
if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret;
if (b != -1) {
- if (b < 0x10000000) if ((sec = alloc_in_bmp(s, b<<14, n, f_p ? forward : forward/2))) goto ret;
- else if ((sec = alloc_in_bmp(s, (b&0xfffffff)<<14, n, f_p ? forward : 0))) goto ret;
+ if ((sec = alloc_in_bmp(s, b<<14, n, f_p ? forward : forward/2))) {
+ b &= 0x0fffffff;
+ goto ret;
+ }
+ if (b > 0x10000000) if ((sec = alloc_in_bmp(s, (b&0xfffffff)<<14, n, f_p ? forward : 0))) goto ret;
}
n_bmps = (s->s_hpfs_fs_size + 0x4000 - 1) >> 14;
for (i = 0; i < n_bmps / 2; i++) {
#define ANODE_ALLOC_FWD 512
#define FNODE_ALLOC_FWD 0
#define ALLOC_FWD_MIN 16
-#define ALLOC_FWD_MAX 512
+#define ALLOC_FWD_MAX 128
#define ALLOC_M 1
#define FNODE_RD_AHEAD 16
#define ANODE_RD_AHEAD 16
de.hidden = new_name[0] == '.';
if (new_inode) {
- hpfs_brelse4(&qbh);
- if ((nde = map_dirent(new_dir, new_dir->i_hpfs_dno, (char *)new_name, new_len, NULL, &qbh1))) {
- int r;
- if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 1)) != 2) {
- if (!(nde = map_dirent(new_dir, new_dir->i_hpfs_dno, (char *)new_name, new_len, NULL, &qbh1))) {
- hpfs_error(new_dir->i_sb, "hpfs_rename: could not find dirent #2");
- goto end1;
- }
+ int r;
+ if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 1)) != 2) {
+ if ((nde = map_dirent(new_dir, new_dir->i_hpfs_dno, (char *)new_name, new_len, NULL, &qbh1))) {
new_inode->i_nlink = 0;
copy_de(nde, &de);
memcpy(nde->name, new_name, new_len);
hpfs_brelse4(&qbh1);
goto end;
}
- err = r == 2 ? -ENOSPC : r == 1 ? -EFSERROR : 0;
+ hpfs_error(new_dir->i_sb, "hpfs_rename: could not find dirent");
+ err = -EFSERROR;
goto end1;
}
- hpfs_error(new_dir->i_sb, "hpfs_rename: could not find dirent");
- err = -EFSERROR;
+ err = r == 2 ? -ENOSPC : r == 1 ? -EFSERROR : 0;
goto end1;
}
out_error:
clear_bit(PG_locked, &page->flags);
+ wake_up(&page->wait);
out_free:
free_page(page_address(page));
out:
O_TARGET := proc.o
O_OBJS := inode.o root.o base.o generic.o mem.o link.o fd.o array.o \
- kmsg.o scsi.o proc_tty.o
+ kmsg.o scsi.o proc_tty.o sysvipc.o
ifdef CONFIG_OMIRR
O_OBJS := $(O_OBJS) omirr.o
endif
&proc_root, NULL
};
-struct proc_dir_entry *proc_net, *proc_scsi, *proc_bus;
+struct proc_dir_entry *proc_net, *proc_scsi, *proc_bus, *proc_sysvipc;
#ifdef CONFIG_MCA
struct proc_dir_entry proc_mca = {
proc_register(&proc_root, &proc_root_self);
proc_net = create_proc_entry("net", S_IFDIR, 0);
proc_scsi = create_proc_entry("scsi", S_IFDIR, 0);
+#ifdef CONFIG_SYSVIPC
+ proc_sysvipc = create_proc_entry("sysvipc", S_IFDIR, 0);
+#endif
#ifdef CONFIG_SYSCTL
proc_register(&proc_root, &proc_sys_root);
#endif
--- /dev/null
+/*
+ * linux/fs/proc/sysvipc.c
+ *
+ * Copyright (c) 1999 Dragos Acostachioaie
+ *
+ * This code is derived from linux/fs/proc/generic.c,
+ * which is Copyright (C) 1991, 1992 Linus Torvalds.
+ *
+ * /proc/sysvipc directory handling functions
+ */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+
+#include <asm/uaccess.h>
+
+#ifndef MIN
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#endif
+
+/* 4K page size but our output routines use some slack for overruns */
+#define PROC_BLOCK_SIZE (3*1024)
+
+static ssize_t
+proc_sysvipc_read(struct file * file, char * buf, size_t nbytes, loff_t *ppos)
+{
+ struct inode * inode = file->f_dentry->d_inode;
+ char *page;
+ ssize_t retval=0;
+ int eof=0;
+ ssize_t n, count;
+ char *start;
+ struct proc_dir_entry * dp;
+
+ dp = (struct proc_dir_entry *) inode->u.generic_ip;
+ if (!(page = (char*) __get_free_page(GFP_KERNEL)))
+ return -ENOMEM;
+
+ while ((nbytes > 0) && !eof)
+ {
+ count = MIN(PROC_BLOCK_SIZE, nbytes);
+
+ start = NULL;
+ if (dp->get_info) {
+ /*
+ * Handle backwards compatibility with the old net
+ * routines.
+ *
+ * XXX What gives with the file->f_flags & O_ACCMODE
+ * test? Seems stupid to me....
+ */
+ n = dp->get_info(page, &start, *ppos, count,
+ (file->f_flags & O_ACCMODE) == O_RDWR);
+ if (n < count)
+ eof = 1;
+ } else if (dp->read_proc) {
+ n = dp->read_proc(page, &start, *ppos,
+ count, &eof, dp->data);
+ } else
+ break;
+
+ if (!start) {
+ /*
+ * For proc files that are less than 4k
+ */
+ start = page + *ppos;
+ n -= *ppos;
+ if (n <= 0)
+ break;
+ if (n > count)
+ n = count;
+ }
+ if (n == 0)
+ break; /* End of file */
+ if (n < 0) {
+ if (retval == 0)
+ retval = n;
+ break;
+ }
+
+ /* This is a hack to allow mangling of file pos independent
+ * of actual bytes read. Simply place the data at page,
+ * return the bytes, and set `start' to the desired offset
+ * as an unsigned int. - Paul.Russell@rustcorp.com.au
+ */
+ n -= copy_to_user(buf, start < page ? page : start, n);
+ if (n == 0) {
+ if (retval == 0)
+ retval = -EFAULT;
+ break;
+ }
+
+ *ppos += start < page ? (long)start : n; /* Move down the file */
+ nbytes -= n;
+ buf += n;
+ retval += n;
+ }
+ free_page((unsigned long) page);
+ return retval;
+}
+
+static struct file_operations proc_sysvipc_operations = {
+ NULL, /* lseek */
+ proc_sysvipc_read, /* read */
+ NULL, /* write */
+ NULL, /* readdir */
+ NULL, /* poll */
+ NULL, /* ioctl */
+ NULL, /* mmap */
+ NULL, /* no special open code */
+ NULL, /* no special release code */
+ NULL /* can't fsync */
+};
+
+/*
+ * proc directories can do almost nothing..
+ */
+struct inode_operations proc_sysvipc_inode_operations = {
+ &proc_sysvipc_operations, /* default net file-ops */
+ NULL, /* create */
+ NULL, /* lookup */
+ NULL, /* link */
+ NULL, /* unlink */
+ NULL, /* symlink */
+ NULL, /* mkdir */
+ NULL, /* rmdir */
+ NULL, /* mknod */
+ NULL, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ NULL, /* readpage */
+ NULL, /* writepage */
+ NULL, /* bmap */
+ NULL, /* truncate */
+ NULL /* permission */
+};
#include <asm/atomic.h>
#include <asm/hardirq.h>
+/*
+ * This works but is wrong - on SMP it should disable only on the
+ * current CPU and shouldn't synchronize like the heavy global
+ * disable does. Oh, well.
+ *
+ * See the x86 version for an example.
+ */
+#define local_bh_enable() start_bh_atomic()
+#define local_bh_disable() end_bh_atomic()
+
extern unsigned int local_bh_count[NR_CPUS];
#define get_active_bhs() (bh_mask & bh_active)
#include <asm/system.h>
+/*
+ * These are the generic versions of the spinlocks
+ * and read-write locks.. We should actually do a
+ * <linux/spinlock.h> with all of this. Oh, well.
+ */
+#define spin_lock_irqsave(lock, flags) do { local_irq_save(flags); spin_lock(lock); } while (0)
+#define spin_lock_irq(lock) do { local_irq_disable(); spin_lock(lock); } while (0)
+#define spin_lock_bh(lock) do { local_bh_disable(); spin_lock(lock); } while (0)
+
+#define read_lock_irqsave(lock, flags) do { local_irq_save(flags); read_lock(lock); } while (0)
+#define read_lock_irq(lock) do { local_irq_disable(); read_lock(lock); } while (0)
+#define read_lock_bh(lock) do { local_bh_disable(); read_lock(lock); } while (0)
+
+#define write_lock_irqsave(lock, flags) do { local_irq_save(flags); write_lock(lock); } while (0)
+#define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0)
+#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0)
+
+#define spin_unlock_irqrestore(lock, flags) do { spin_unlock(lock); local_irq_restore(flags); } while (0)
+#define spin_unlock_irq(lock) do { spin_unlock(lock); local_irq_enable(); } while (0)
+#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0)
+
+#define read_unlock_irqrestore(lock, flags) do { read_unlock(lock); local_irq_restore(flags); } while (0)
+#define read_unlock_irq(lock) do { read_unlock(lock); local_irq_enable(); } while (0)
+#define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0)
+
+#define write_unlock_irqrestore(lock, flags) do { write_unlock(lock); local_irq_restore(flags); } while (0)
+#define write_unlock_irq(lock) do { write_unlock(lock); local_irq_enable(); } while (0)
+#define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0)
+
#ifndef __SMP__
/*
#define spin_trylock(lock) ((void) 0)
#define spin_unlock_wait(lock) ((void) 0)
#define spin_unlock(lock) ((void) 0)
-#define spin_lock_irq(lock) cli()
-#define spin_unlock_irq(lock) sti()
-
-#define spin_lock_irqsave(lock, flags) save_and_cli(flags)
-#define spin_unlock_irqrestore(lock, flags) restore_flags(flags)
/*
* Read-write spinlocks, allowing multiple readers
#define read_unlock(lock) ((void) 0)
#define write_lock(lock) ((void) 0)
#define write_unlock(lock) ((void) 0)
-#define read_lock_irq(lock) cli()
-#define read_unlock_irq(lock) sti()
-#define write_lock_irq(lock) cli()
-#define write_unlock_irq(lock) sti()
-
-#define read_lock_irqsave(lock, flags) save_and_cli(flags)
-#define read_unlock_irqrestore(lock, flags) restore_flags(flags)
-#define write_lock_irqsave(lock, flags) save_and_cli(flags)
-#define write_unlock_irqrestore(lock, flags) restore_flags(flags)
#else /* __SMP__ */
#define spin_lock_own(LOCK, LOCATION) ((void)0)
#endif /* DEBUG_SPINLOCK */
-#define spin_lock_irq(lock) \
- (__cli(), spin_lock(lock))
-#define spin_unlock_irq(lock) \
- (spin_unlock(lock), __sti())
-#define spin_lock_irqsave(lock, flags) \
- (__save_and_cli(flags), spin_lock(lock))
-#define spin_unlock_irqrestore(lock, flags) \
- (spin_unlock(lock), __restore_flags(flags))
-
/***********************************************************/
typedef struct { volatile int write_lock:1, read_counter:31; } rwlock_t;
: "m" (__dummy_lock(lock)));
}
-#define read_lock_irq(lock) (__cli(), read_lock(lock))
-#define read_unlock_irq(lock) (read_unlock(lock), __sti())
-#define write_lock_irq(lock) (__cli(), write_lock(lock))
-#define write_unlock_irq(lock) (write_unlock(lock), __sti())
-
-#define read_lock_irqsave(lock, flags) \
- (__save_and_cli(flags), read_lock(lock))
-#define read_unlock_irqrestore(lock, flags) \
- (read_unlock(lock), __restore_flags(flags))
-#define write_lock_irqsave(lock, flags) \
- (__save_and_cli(flags), write_lock(lock))
-#define write_unlock_irqrestore(lock, flags) \
- (write_unlock(lock), __restore_flags(flags))
-
#endif /* SMP */
#endif /* _ALPHA_SPINLOCK_H */
*/
#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMMOVE
/* For backward compatibility with modules. Unused otherwise. */
extern void * __memcpy(void *, const void *, size_t);
#define __save_and_cli(flags) ((flags) = swpipl(7))
#define __restore_flags(flags) setipl(flags)
+#define local_irq_save(flags) __save_and_cli(flags)
+#define local_irq_restore(flags) __restore_flags(flags)
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
+
#ifdef __SMP__
extern int global_irq_holder;
extern void enable_kernel_fp(void);
extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
extern void cvt_df(double *from, float *to, unsigned long *fpscr);
+extern int call_rtas(const char *, int, int, unsigned long *, ...);
+extern void chrp_progress(char *);
+void chrp_event_scan(void);
struct device_node;
extern void note_scsi_host(struct device_node *, void *);
struct thread_struct *next,
unsigned long context);
+extern unsigned int rtas_data;
+
struct pt_regs;
extern void dump_regs(struct pt_regs *);
--- /dev/null
+/*
+* cyclomx.h CYCLOM X Multiprotocol WAN Link Driver.
+* User-level API definitions.
+*
+* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+*
+* Copyright: (c) 1998, 1999 Arnaldo Carvalho de Melo
+*
+* Based on wanpipe.h by Gene Kozin <genek@compuserve.com>
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* 1999/05/19 acme wait_queue_head_t wait_stats(support for 2.3.*)
+* 1999/01/03 acme judicious use of data types
+* Dec 27, 1998 Arnaldo cleanup: PACKED not needed
+* Aug 08, 1998 Arnaldo Version 0.0.1
+*/
+#ifndef _CYCLOMX_H
+#define _CYCLOMX_H
+
+#include <linux/wanrouter.h>
+#include <asm/spinlock.h>
+
+#ifdef __KERNEL__
+/* Kernel Interface */
+
+#include <linux/cycx_drv.h> /* CYCLOM X support module API definitions */
+#include <linux/cycx_cfm.h> /* CYCLOM X firmware module definitions */
+#ifdef CONFIG_CYCLOMX_X25
+#include <linux/cycx_x25.h>
+#endif
+
+#ifndef min
+#define min(a,b) (((a)<(b))?(a):(b))
+#endif
+#ifndef max
+#define max(a,b) (((a)>(b))?(a):(b))
+#endif
+
+#define is_digit(ch) (((ch)>=(unsigned)'0'&&(ch)<=(unsigned)'9')?1:0)
+
+/* Adapter Data Space.
+ * This structure is needed because we handle multiple cards, otherwise
+ * static data would do it.
+ */
+typedef struct cycx {
+ char devname[WAN_DRVNAME_SZ+1]; /* card name */
+ cycxhw_t hw; /* hardware configuration */
+ wan_device_t wandev; /* WAN device data space */
+ u32 open_cnt; /* number of open interfaces */
+ u32 state_tick; /* link state timestamp */
+ spinlock_t lock;
+ char in_isr; /* interrupt-in-service flag */
+ char buff_int_mode_unbusy; /* flag for carrying out dev_tint */
+ u16 irq_dis_if_send_count; /* Disabling irqs in if_send*/
+#if LINUX_VERSION_CODE >= 0x020300
+ wait_queue_head_t wait_stats; /* to wait for the STATS indication */
+#else
+ struct wait_queue* wait_stats; /* to wait for the STATS indication */
+#endif
+ u32 mbox; /* -> mailbox */
+ void (*isr)(struct cycx* card); /* interrupt service routine */
+ int (*exec)(struct cycx* card, void* u_cmd, void* u_data);
+ union {
+#ifdef CONFIG_CYCLOMX_X25
+ struct { /* X.25 specific data */
+ u32 lo_pvc;
+ u32 hi_pvc;
+ u32 lo_svc;
+ u32 hi_svc;
+ TX25Stats stats;
+ unsigned critical; /* critical section flag */
+ u32 connection_keys;
+ } x;
+#endif
+ } u;
+} cycx_t;
+
+/* Public Functions */
+void cyclomx_open (cycx_t* card); /* cycx_main.c */
+void cyclomx_close (cycx_t* card); /* cycx_main.c */
+void cyclomx_set_state (cycx_t* card, int state); /* cycx_main.c */
+
+#ifdef CONFIG_CYCLOMX_X25
+int cyx_init (cycx_t* card, wandev_conf_t* conf); /* cycx_x25.c */
+#endif
+#endif /* __KERNEL__ */
+#endif /* _CYCLOMX_H */
--- /dev/null
+/*
+* cycx_cfm.h CYCLOM X Multiprotocol WAN Link Driver.
+* Definitions for the CYCLOM X Firmware Module (CFM).
+*
+* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+* Copyright: (c) 1998, 1999 Arnaldo Carvalho de Melo
+*
+* Based on sdlasfm.h by Gene Kozin <74604.152@compuserve.com>
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* Aug 08, 1998 Arnaldo Initial version.
+*/
+#ifndef _CYCX_CFM_H
+#define _CYCX_CFM_H
+
+/* Defines */
+
+#define CFM_VERSION 2
+#define CFM_SIGNATURE "CFM - Cyclades CYCX Firmware Module"
+
+/* min/max */
+#define CFM_IMAGE_SIZE 0x20000 /* max size of CYCX code image file */
+#define CFM_DESCR_LEN 256 /* max length of description string */
+#define CFM_MAX_CYCX 1 /* max number of compatible adapters */
+#define CFM_LOAD_BUFSZ 0x400 /* buffer size for reset code (buffer_load) */
+
+/* Firmware Commands */
+#define GEN_POWER_ON 0x1280
+
+#define GEN_SET_SEG 0x1401 /* boot segment setting. */
+#define GEN_BOOT_DAT 0x1402 /* boot data. */
+#define GEN_START 0x1403 /* board start. */
+#define GEN_DEFPAR 0x1404 /* buffer length for boot. */
+
+/* Adapter types */
+#define CYCX_2X 2
+#define CYCX_8X 8
+#define CYCX_16X 16
+
+#define CFID_X25_2X 5200
+
+
+/* Data Types */
+
+typedef struct cfm_info /* firmware module information */
+{
+ unsigned short codeid; /* firmware ID */
+ unsigned short version; /* firmaware version number */
+ unsigned short adapter[CFM_MAX_CYCX]; /* compatible adapter types */
+ unsigned long memsize; /* minimum memory size */
+ unsigned short reserved[2]; /* reserved */
+ unsigned short startoffs; /* entry point offset */
+ unsigned short winoffs; /* dual-port memory window offset */
+ unsigned short codeoffs; /* code load offset */
+ unsigned long codesize; /* code size */
+ unsigned short dataoffs; /* configuration data load offset */
+ unsigned long datasize; /* configuration data size */
+} cfm_info_t;
+
+typedef struct cfm /* CYCX firmware file structire */
+{
+ char signature[80]; /* CFM file signature */
+ unsigned short version; /* file format version */
+ unsigned short checksum; /* info + image */
+ unsigned short reserved[6]; /* reserved */
+ char descr[CFM_DESCR_LEN]; /* description string */
+ cfm_info_t info; /* firmware module info */
+ unsigned char image[1]; /* code image (variable size) */
+} cfm_t;
+
+typedef struct cycx_header_s {
+ unsigned long reset_size;
+ unsigned long data_size;
+ unsigned long code_size;
+} cycx_header_t;
+
+#endif /* _CYCX_CFM_H */
--- /dev/null
+/*
+* cycx_drv.h CYCX Support Module. Kernel API Definitions.
+*
+* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+* Copyright: (c) 1998, 1999 Arnaldo Carvalho de Melo
+*
+* Based on sdladrv.h by Gene Kozin <genek@compuserve.com>
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* 1999/01/03 acme more judicious use of data types...
+* uclong, ucchar, etc deleted, the u8, u16, u32
+* types are the portable way to go.
+* 1999/01/03 acme judicious use of data types... u16, u32, etc
+* Dec 26, 1998 Arnaldo FIXED_BUFFERS, CONF_OFFSET,
+* removal of cy_read{bwl}
+* Aug 08, 1998 Arnaldo Initial version.
+*/
+#ifndef _CYCX_DRV_H
+#define _CYCX_DRV_H
+
+#define CYCX_WINDOWSIZE 0x4000 /* default dual-port memory window size */
+#define GEN_CYCX_INTR 0x02
+#define RST_ENABLE 0x04
+#define START_CPU 0x06
+#define RST_DISABLE 0x08
+#define FIXED_BUFFERS 0x08
+#define TEST_PATTERN 0xaa55
+#define CMD_OFFSET 0x20
+#define CONF_OFFSET 0x0380
+#define RESET_OFFSET 0x3c00 /* For reset file load */
+#define DATA_OFFSET 0x0100 /* For code and data files load */
+#define START_OFFSET 0x3ff0 /* 80186 starts here */
+
+#ifndef MIN
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#endif
+
+/* Data Structures */
+/* Adapter hardware configuration. Pointer to this structure is passed to all
+ * APIs. */
+typedef struct cycxhw {
+ u32 type; /* adapter type */
+ u32 fwid; /* firmware ID */
+ int irq; /* interrupt request level */
+ u32 dpmbase; /* dual-port memory base */
+ u32 dpmsize; /* dual-port memory size */
+ u32 pclk; /* CPU clock rate, kHz */
+ u32 memory; /* memory size */
+ u32 reserved[5];
+} cycxhw_t;
+
+/* Function Prototypes */
+extern int cycx_setup (cycxhw_t* hw, void* sfm, u32 len);
+extern int cycx_down (cycxhw_t* hw);
+extern int cycx_inten (cycxhw_t* hw);
+extern int cycx_intde (cycxhw_t* hw);
+extern int cycx_intack (cycxhw_t* hw);
+extern int cycx_intr (cycxhw_t* hw);
+extern int cycx_peek (cycxhw_t* hw, u32 addr, void* buf, u32 len);
+extern int cycx_poke (cycxhw_t* hw, u32 addr, void* buf, u32 len);
+extern int cycx_exec (u32 addr);
+#endif /* _CYCX_DRV_H */
--- /dev/null
+/*
+* cycx_x25.h Cyclom X.25 firmware API definitions.
+*
+* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+*
+* Copyright: (c) 1998, 1999 Arnaldo Carvalho de Melo
+*
+* Based on sdla_x25.h by Gene Kozin <74604.152@compuserve.com>
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+* ============================================================================
+* 1999/01/03 acme judicious use of data types
+*
+* 1999/01/02 acme #define X25_ACK_N3 0x4411
+* Dec 28, 1998 Arnaldo cleanup: lot'o'things removed
+* commands listed,
+* TX25Cmd & TX25Config structs
+* typedef'ed
+*/
+#ifndef _CYCX_X25_H
+#define _CYCX_X25_H
+
+#ifndef PACKED
+#define PACKED __attribute__((packed))
+#endif
+
+/* X.25 shared memory layout. */
+#define X25_MBOX_OFFS 0x300 /* general mailbox block */
+#define X25_RXMBOX_OFFS 0x340 /* receive mailbox */
+
+/* DATA STRUCTURES */
+/* X.25 Command Block. */
+typedef struct X25Cmd
+{
+ u16 command PACKED;
+ u16 link PACKED; /* values: 0 or 1 */
+ u16 len PACKED; /* values: 0 thru 0x205 (517) */
+ u32 buf PACKED;
+} TX25Cmd;
+
+/* Defines for the 'command' field. */
+#define X25_CONNECT_REQUEST 0x4401
+#define X25_CONNECT_RESPONSE 0x4402
+#define X25_DISCONNECT_REQUEST 0x4403
+#define X25_DISCONNECT_RESPONSE 0x4404
+#define X25_DATA_REQUEST 0x4405
+#define X25_ACK_TO_VC 0x4406
+#define X25_INTERRUPT_RESPONSE 0x4407
+#define X25_CONFIG 0x4408
+#define X25_CONNECT_INDICATION 0x4409
+#define X25_CONNECT_CONFIRM 0x440A
+#define X25_DISCONNECT_INDICATION 0x440B
+#define X25_DISCONNECT_CONFIRM 0x440C
+#define X25_DATA_INDICATION 0x440E
+#define X25_INTERRUPT_INDICATION 0x440F
+#define X25_ACK_FROM_VC 0x4410
+#define X25_ACK_N3 0x4411
+#define X25_CONNECT_COLLISION 0x4413
+#define X25_N3WIN 0x4414
+#define X25_LINE_ON 0x4415
+#define X25_LINE_OFF 0x4416
+#define X25_RESET_REQUEST 0x4417
+#define X25_LOG 0x4500
+#define X25_STATISTIC 0x4600
+#define X25_TRACE 0x4700
+#define X25_N2TRACEXC 0x4702
+#define X25_N3TRACEXC 0x4703
+
+typedef struct X25Config {
+ u8 link PACKED; /* link number */
+ u8 speed PACKED; /* line speed */
+ u8 clock PACKED; /* internal/external */
+ u8 n2 PACKED; /* # of level 2 retransm.(values: 1 thru FF) */
+ u8 n2win PACKED; /* level 2 window (values: 1 thru 7) */
+ u8 n3win PACKED; /* level 3 window (values: 1 thru 7) */
+ u8 nvc PACKED; /* # of logical channels (values: 1 thru 64) */
+ u8 pktlen PACKED; /* level 3 packet lenght - log base 2 of size */
+ u8 locaddr PACKED; /* my address */
+ u8 remaddr PACKED; /* remote address */
+ u16 t1 PACKED; /* time, in seconds */
+ u16 t2 PACKED; /* time, in seconds */
+ u8 t21 PACKED; /* time, in seconds */
+ u8 npvc PACKED; /* # of permanent virt. circuits (1 thru nvc) */
+ u8 t23 PACKED; /* time, in seconds */
+ u8 flags PACKED; /* see dosx25.doc, in portuguese, for details */
+} TX25Config;
+
+typedef struct X25Stats {
+ u16 rx_crc_errors PACKED;
+ u16 rx_over_errors PACKED;
+ u16 n2_tx_frames PACKED;
+ u16 n2_rx_frames PACKED;
+ u16 tx_timeouts PACKED;
+ u16 rx_timeouts PACKED;
+ u16 n3_tx_packets PACKED;
+ u16 n3_rx_packets PACKED;
+ u16 tx_aborts PACKED;
+ u16 rx_aborts PACKED;
+} TX25Stats;
+#endif /* _CYCX_X25_H */
--- /dev/null
+#ifndef _I2O_H
+#define _I2O_H
+
+/*
+ * Tunable parameters first
+ */
+
+/* How many different OSM's are we allowing */
+#define MAX_I2O_MODULES 64
+/* How many controllers are we allowing */
+#define MAX_I2O_CONTROLLERS 32
+
+
+#ifdef __KERNEL__ /* ioctl stuff only thing exported to users */
+
+/*
+ * I2O Interface Objects
+ */
+
+#include <linux/notifier.h>
+#include <asm/atomic.h>
+
+/*
+ * message structures
+ */
+
+#define TID_SZ 12
+#define FUNCTION_SZ 8
+
+struct i2o_message
+{
+ u32 version_size;
+ u32 function_addr;
+ u32 initiator_context;
+ /* List follows */
+};
+
+
+/*
+ * Each I2O device entity has one or more of these. There is one
+ * per device. *FIXME* how to handle multiple types on one unit.
+ */
+
+struct i2o_device
+{
+ int class; /* Block, Net, SCSI etc (from spec) */
+ int subclass; /* eth, fddi, tr etc (from spec) */
+ int id; /* I2O ID assigned by the controller */
+ int parent; /* Parent device */
+ int flags; /* Control flags */
+ int i2oversion; /* I2O version supported. Actually there
+ * should be high and low version */
+ struct proc_dir_entry* proc_entry; /* /proc dir */
+ struct i2o_driver *owner; /* Owning device */
+ struct i2o_controller *controller; /* Controlling IOP */
+ struct i2o_device *next; /* Chain */
+ char dev_name[8]; /* linux /dev name if available */
+};
+
+/*
+ * Resource data for each PCI I2O controller
+ */
+
+struct i2o_pci
+{
+ int irq;
+};
+
+/*
+ * Each I2O controller has one of these objects
+ */
+
+struct i2o_controller
+{
+ char name[16];
+ int unit;
+ int status; /* I2O status */
+ int i2oversion;
+ int type;
+#define I2O_TYPE_PCI 0x01 /* PCI I2O controller */
+ struct notifier_block *event_notifer; /* Events */
+ atomic_t users;
+ struct i2o_device *devices; /* I2O device chain */
+ struct i2o_controller *next; /* Controller chain */
+ volatile u32 *post_port; /* Messaging ports */
+ volatile u32 *reply_port;
+ volatile u32 *irq_mask; /* Interrupt port */
+ u32 mem_offset; /* MFA offset */
+ u32 mem_phys; /* MFA physical */
+ u32 priv_mem;
+ u32 priv_mem_size;
+ u32 priv_io;
+ u32 priv_io_size;
+
+ struct proc_dir_entry* proc_entry; /* /proc dir */
+
+ union
+ { /* Bus information */
+ struct i2o_pci pci;
+ } bus;
+ void (*destructor)(struct i2o_controller *); /* Bus specific destructor */
+ int (*bind)(struct i2o_controller *, struct i2o_device *); /* Bus specific attach/detach */
+ int (*unbind)(struct i2o_controller *, struct i2o_device *);
+ void *page_frame; /* Message buffers */
+ int inbound_size; /* Inbound queue size */
+};
+
+struct i2o_handler
+{
+ void (*reply)(struct i2o_handler *, struct i2o_controller *, struct i2o_message *);
+ char *name;
+ int context; /* Low 8 bits of the transaction info */
+ /* User data follows */
+};
+
+/*
+ * Messenger inlines
+ */
+
+extern inline u32 I2O_POST_READ32(struct i2o_controller *c)
+{
+ return *c->post_port;
+}
+
+extern inline void I2O_POST_WRITE32(struct i2o_controller *c, u32 Val)
+{
+ *c->post_port = Val;
+}
+
+
+extern inline u32 I2O_REPLY_READ32(struct i2o_controller *c)
+{
+ return *c->reply_port;
+}
+
+extern inline void I2O_REPLY_WRITE32(struct i2o_controller *c, u32 Val)
+{
+ *c->reply_port= Val;
+}
+
+
+extern inline u32 I2O_IRQ_READ32(struct i2o_controller *c)
+{
+ return *c->irq_mask;
+}
+
+extern inline void I2O_IRQ_WRITE32(struct i2o_controller *c, u32 Val)
+{
+ *c->irq_mask = Val;
+}
+
+
+extern inline void i2o_post_message(struct i2o_controller *c, u32 m)
+{
+ /* The second line isnt spurious - thats forcing PCI posting */
+ I2O_POST_WRITE32(c,m);
+ (void) I2O_IRQ_READ32(c);
+}
+
+extern inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
+{
+ I2O_REPLY_WRITE32(c,m);
+}
+
+
+struct i2o_controller *i2o_controller_chain;
+
+extern int i2o_quiesce_controller(struct i2o_controller *);
+extern int i2o_clear_controller(struct i2o_controller *);
+extern int i2o_install_controller(struct i2o_controller *);
+extern int i2o_delete_controller(struct i2o_controller *);
+extern int i2o_activate_controller(struct i2o_controller *);
+extern void i2o_unlock_controller(struct i2o_controller *);
+extern struct i2o_controller *i2o_find_controller(int);
+extern int i2o_num_controllers;
+
+extern int i2o_install_handler(struct i2o_handler *);
+extern int i2o_remove_handler(struct i2o_handler *);
+
+extern int i2o_install_device(struct i2o_controller *, struct i2o_device *);
+extern int i2o_delete_device(struct i2o_device *);
+extern int i2o_claim_device(struct i2o_device *, struct i2o_driver *);
+extern int i2o_release_device(struct i2o_device *);
+
+extern int i2o_post_this(struct i2o_controller *, int, u32 *, int);
+extern int i2o_post_wait(struct i2o_controller *, int, u32 *, int, int *, int);
+extern int i2o_issue_claim(struct i2o_controller *, int, int, int, int *);
+extern int i2o_query_scalar(struct i2o_controller *, int, int, int, int, void *,
+ int, int *);
+extern int i2o_params_set(struct i2o_controller *c, int, int, int, int, void *,
+ int, int *);
+
+extern void i2o_run_queue(struct i2o_controller *);
+
+extern void i2o_report_status(const char *, const char *, u8, u8, u16);
+extern void report_common_status(u8);
+extern void report_lan_dsc(u16);
+
+extern u32 i2o_wait_message(struct i2o_controller *, char *);
+
+extern const char *i2o_get_class_name(int);
+
+
+/*
+ * I2O classes / subclasses
+ */
+
+/* Class ID and Code Assignments
+ * (LCT.ClassID.Version field)
+ */
+#define I2O_CLASS_VERSION_10 0x00
+#define I2O_CLASS_VERSION_11 0x01
+
+/* Class code names
+ * (from v1.5 Table 6-1 Class Code Assignments.)
+ */
+
+#define I2O_CLASS_EXECUTIVE 0x000
+#define I2O_CLASS_DDM 0x001
+#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010
+#define I2O_CLASS_SEQUENTIAL_STORAGE 0x011
+#define I2O_CLASS_LAN 0x020
+#define I2O_CLASS_WAN 0x030
+#define I2O_CLASS_FIBRE_CHANNEL_PORT 0x040
+#define I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL 0x041
+#define I2O_CLASS_SCSI_PERIPHERAL 0x051
+#define I2O_CLASS_ATE_PORT 0x060
+#define I2O_CLASS_ATE_PERIPHERAL 0x061
+#define I2O_CLASS_FLOPPY_CONTROLLER 0x070
+#define I2O_CLASS_FLOPPY_DEVICE 0x071
+#define I2O_CLASS_BUS_ADAPTER_PORT 0x080
+#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090
+#define I2O_CLASS_PEER_TRANSPORT 0x091
+
+/* Rest of 0x092 - 0x09f reserved for peer-to-peer classes
+ */
+
+#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff
+
+/* Subclasses
+ */
+
+#define I2O_SUBCLASS_i960 0x001
+#define I2O_SUBCLASS_HDM 0x020
+#define I2O_SUBCLASS_ISM 0x021
+
+/* Operation functions */
+
+#define I2O_PARAMS_FIELD_GET 0x0001
+#define I2O_PARAMS_LIST_GET 0x0002
+#define I2O_PARAMS_MORE_GET 0x0003
+#define I2O_PARAMS_SIZE_GET 0x0004
+#define I2O_PARAMS_TABLE_GET 0x0005
+#define I2O_PARAMS_FIELD_SET 0x0006
+#define I2O_PARAMS_LIST_SET 0x0007
+#define I2O_PARAMS_ROW_ADD 0x0008
+#define I2O_PARAMS_ROW_DELETE 0x0009
+#define I2O_PARAMS_TABLE_CLEAR 0x000A
+
+/*
+ * I2O serial number conventions / formats
+ * (circa v1.5)
+ */
+
+#define I2O_SNFORMAT_UNKNOWN 0
+#define I2O_SNFORMAT_BINARY 1
+#define I2O_SNFORMAT_ASCII 2
+#define I2O_SNFORMAT_UNICODE 3
+#define I2O_SNFORMAT_LAN48_MAC 4
+#define I2O_SNFORMAT_WAN 5
+
+/* Plus new in v2.0 (Yellowstone pdf doc)
+ */
+
+#define I2O_SNFORMAT_LAN64_MAC 6
+#define I2O_SNFORMAT_DDM 7
+#define I2O_SNFORMAT_IEEE_REG64 8
+#define I2O_SNFORMAT_IEEE_REG128 9
+#define I2O_SNFORMAT_UNKNOWN2 0xff
+
+
+/*
+ * "Special" TID assignments
+ */
+#define I2O_IOP_TID 0
+#define I2O_HOST_TID 1
+
+
+/* Transaction Reply Lists (TRL) Control Word structure */
+
+#define TRL_SINGLE_FIXED_LENGTH 0x00
+#define TRL_SINGLE_VARIABLE_LENGTH 0x40
+#define TRL_MULTIPLE_FIXED_LENGTH 0x80
+
+/* LAN Class specific functions */
+
+#define LAN_PACKET_SEND 0x3B
+#define LAN_SDU_SEND 0x3D
+#define LAN_RECEIVE_POST 0x3E
+#define LAN_RESET 0x35
+#define LAN_SUSPEND 0x37
+
+/*
+ * Messaging API values
+ */
+
+#define I2O_CMD_ADAPTER_ASSIGN 0xB3
+#define I2O_CMD_ADAPTER_READ 0xB2
+#define I2O_CMD_ADAPTER_RELEASE 0xB5
+#define I2O_CMD_BIOS_INFO_SET 0xA5
+#define I2O_CMD_BOOT_DEVICE_SET 0xA7
+#define I2O_CMD_CONFIG_VALIDATE 0xBB
+#define I2O_CMD_CONN_SETUP 0xCA
+#define I2O_CMD_DDM_DESTROY 0xB1
+#define I2O_CMD_DDM_ENABLE 0xD5
+#define I2O_CMD_DDM_QUIESCE 0xC7
+#define I2O_CMD_DDM_RESET 0xD9
+#define I2O_CMD_DDM_SUSPEND 0xAF
+#define I2O_CMD_DEVICE_ASSIGN 0xB7
+#define I2O_CMD_DEVICE_RELEASE 0xB9
+#define I2O_CMD_HRT_GET 0xA8
+#define I2O_CMD_ADAPTER_CLEAR 0xBE
+#define I2O_CMD_ADAPTER_CONNECT 0xC9
+#define I2O_CMD_ADAPTER_RESET 0xBD
+#define I2O_CMD_LCT_NOTIFY 0xA2
+#define I2O_CMD_OUTBOUND_INIT 0xA1
+#define I2O_CMD_PATH_ENABLE 0xD3
+#define I2O_CMD_PATH_QUIESCE 0xC5
+#define I2O_CMD_PATH_RESET 0xD7
+#define I2O_CMD_STATIC_MF_CREATE 0xDD
+#define I2O_CMD_STATIC_MF_RELEASE 0xDF
+#define I2O_CMD_STATUS_GET 0xA0
+#define I2O_CMD_SW_DOWNLOAD 0xA9
+#define I2O_CMD_SW_UPLOAD 0xAB
+#define I2O_CMD_SW_REMOVE 0xAD
+#define I2O_CMD_SYS_ENABLE 0xD1
+#define I2O_CMD_SYS_MODIFY 0xC1
+#define I2O_CMD_SYS_QUIESCE 0xC3
+#define I2O_CMD_SYS_TAB_SET 0xA3
+
+#define I2O_CMD_UTIL_NOP 0x00
+#define I2O_CMD_UTIL_ABORT 0x01
+#define I2O_CMD_UTIL_CLAIM 0x09
+#define I2O_CMD_UTIL_RELEASE 0x0B
+#define I2O_CMD_UTIL_PARAMS_GET 0x06
+#define I2O_CMD_UTIL_PARAMS_SET 0x05
+#define I2O_CMD_UTIL_EVT_REGISTER 0x13
+#define I2O_CMD_UTIL_ACK 0x14
+#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
+#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
+#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
+#define I2O_CMD_UTIL_LOCK 0x17
+#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
+#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
+
+#define I2O_CMD_SCSI_EXEC 0x81
+#define I2O_CMD_SCSI_ABORT 0x83
+#define I2O_CMD_SCSI_BUSRESET 0x27
+
+#define I2O_CMD_BLOCK_READ 0x30
+#define I2O_CMD_BLOCK_WRITE 0x31
+#define I2O_CMD_BLOCK_CFLUSH 0x37
+#define I2O_CMD_BLOCK_MLOCK 0x49
+#define I2O_CMD_BLOCK_MUNLOCK 0x4B
+#define I2O_CMD_BLOCK_MMOUNT 0x41
+#define I2O_CMD_BLOCK_MEJECT 0x43
+
+#define I2O_PRIVATE_MSG 0xFF
+
+/*
+ * Init Outbound Q status
+ */
+
+#define I2O_CMD_OUTBOUND_INIT_IN_PROGRESS 0x01
+#define I2O_CMD_OUTBOUND_INIT_REJECTED 0x02
+#define I2O_CMD_OUTBOUND_INIT_FAILED 0x03
+#define I2O_CMD_OUTBOUND_INIT_COMPLETE 0x04
+
+/*
+ * I2O Get Status State values
+ */
+
+#define ADAPTER_STATE_INITIALIZING 0x01
+#define ADAPTER_STATE_RESET 0x02
+#define ADAPTER_STATE_HOLD 0x04
+#define ADAPTER_STATE_READY 0x05
+#define ADAPTER_STATE_OPERATIONAL 0x08
+#define ADAPTER_STATE_FAILED 0x10
+#define ADAPTER_STATE_FAULTED 0x11
+
+/* I2O API function return values */
+
+#define I2O_RTN_NO_ERROR 0
+#define I2O_RTN_NOT_INIT 1
+#define I2O_RTN_FREE_Q_EMPTY 2
+#define I2O_RTN_TCB_ERROR 3
+#define I2O_RTN_TRANSACTION_ERROR 4
+#define I2O_RTN_ADAPTER_ALREADY_INIT 5
+#define I2O_RTN_MALLOC_ERROR 6
+#define I2O_RTN_ADPTR_NOT_REGISTERED 7
+#define I2O_RTN_MSG_REPLY_TIMEOUT 8
+#define I2O_RTN_NO_STATUS 9
+#define I2O_RTN_NO_FIRM_VER 10
+#define I2O_RTN_NO_LINK_SPEED 11
+
+/* Reply message status defines for all messages */
+
+#define I2O_REPLY_STATUS_SUCCESS 0x00
+#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
+#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
+#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
+#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
+#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
+#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
+#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
+#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
+#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
+#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
+#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
+
+/* Status codes and Error Information for Parameter functions */
+
+#define I2O_PARAMS_STATUS_SUCCESS 0x00
+#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
+#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
+#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
+#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
+#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
+#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
+#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
+#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
+#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
+#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
+#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
+#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
+#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
+#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
+#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
+#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
+
+/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
+ * messages: Table 3-2 Detailed Status Codes.*/
+
+#define I2O_DSC_SUCCESS 0x0000
+#define I2O_DSC_BAD_KEY 0x0002
+#define I2O_DSC_TCL_ERROR 0x0003
+#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
+#define I2O_DSC_NO_SUCH_PAGE 0x0005
+#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
+#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
+#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
+#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
+#define I2O_DSC_DEVICE_LOCKED 0x000B
+#define I2O_DSC_DEVICE_RESET 0x000C
+#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
+#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
+#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
+#define I2O_DSC_INVALID_OFFSET 0x0010
+#define I2O_DSC_INVALID_PARAMETER 0x0011
+#define I2O_DSC_INVALID_REQUEST 0x0012
+#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
+#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
+#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
+#define I2O_DSC_MISSING_PARAMETER 0x0016
+#define I2O_DSC_TIMEOUT 0x0017
+#define I2O_DSC_UNKNOWN_ERROR 0x0018
+#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
+#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
+#define I2O_DSC_DEVICE_BUSY 0x001B
+#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
+
+/* Message header defines for VersionOffset */
+#define I2OVER15 0x0001
+#define I2OVER20 0x0002
+/* Default is 1.5, FIXME: Need support for both 1.5 and 2.0 */
+#define I2OVERSION I2OVER15
+#define SGL_OFFSET_0 I2OVERSION
+#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
+#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
+#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
+#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
+#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
+
+#define TRL_OFFSET_5 (0x0050 | I2OVERSION)
+#define TRL_OFFSET_6 (0x0060 | I2OVERSION)
+
+ /* msg header defines for MsgFlags */
+#define MSG_STATIC 0x0100
+#define MSG_64BIT_CNTXT 0x0200
+#define MSG_MULTI_TRANS 0x1000
+#define MSG_FAIL 0x2000
+#define MSG_LAST 0x4000
+#define MSG_REPLY 0x8000
+
+ /* normal LAN request message MsgFlags and VersionOffset (0x1041) */
+#define LAN_MSG_REQST (MSG_MULTI_TRANS | SGL_OFFSET_4)
+
+ /* minimum size msg */
+#define THREE_WORD_MSG_SIZE 0x00030000
+#define FOUR_WORD_MSG_SIZE 0x00040000
+#define FIVE_WORD_MSG_SIZE 0x00050000
+#define SIX_WORD_MSG_SIZE 0x00060000
+#define SEVEN_WORD_MSG_SIZE 0x00070000
+#define EIGHT_WORD_MSG_SIZE 0x00080000
+#define NINE_WORD_MSG_SIZE 0x00090000
+#define TEN_WORD_MSG_SIZE 0x000A0000
+#define I2O_MESSAGE_SIZE(x) ((x)<<16)
+
+
+/* Special TID Assignments */
+
+#define ADAPTER_TID 0
+#define HOST_TID 1
+
+#define MSG_FRAME_SIZE 128
+#define NMBR_MSG_FRAMES 128
+
+#define MSG_POOL_SIZE 16384
+
+#define I2O_POST_WAIT_OK 1
+#define I2O_POST_WAIT_TIMEOUT -1
+
+#endif /* __KERNEL__ */
+
+#include <asm/ioctl.h>
+
+/*
+ * I2O Control IOCTLs and structures
+ */
+#define I2O_MAGIC_NUMBER 'i'
+#define I2OGETIOPS _IO(I2O_MAGIC_NUMBER,0)
+#define I2OHRTGET _IO(I2O_MAGIC_NUMBER,1)
+#define I2OLCTGET _IO(I2O_MAGIC_NUMBER,2)
+#define I2OPARMSET _IO(I2O_MAGIC_NUMBER,3)
+#define I2OPARMGET _IO(I2O_MAGIC_NUMBER,4)
+#define I2OSWDL _IO(I2O_MAGIC_NUMBER,5)
+#define I2OSWUL _IO(I2O_MAGIC_NUMBER,6)
+#define I2OSWDEL _IO(I2O_MAGIC_NUMBER,7)
+#define I2OHTML _IO(I2O_MAGIC_NUMBER,8)
+
+/* On hold until we figure this out
+#define I2OEVTREG _IO(I2O_MAGIC_NUMBER,9)
+#define I2OEVTCLR _IO(I2O_MAGIC_NUMBER,10)
+#define I2OEVTGET _IO(I2O_MAGIC_NUMBER,11)
+ */
+
+struct i2o_cmd_hrtlct
+{
+ unsigned int iop; /* IOP unit number */
+ void *resbuf; /* Buffer for result */
+ unsigned int *reslen; /* Buffer length in bytes */
+};
+
+
+struct i2o_cmd_psetget
+{
+ unsigned int iop; /* IOP unit number */
+ unsigned int tid; /* Target device TID */
+ void *opbuf; /* Operation List buffer */
+ unsigned int oplen; /* Operation List buffer length in bytes */
+ void *resbuf; /* Result List buffer */
+ unsigned int *reslen; /* Result List buffer length in bytes */
+};
+
+struct i2o_sw_xfer
+{
+ unsigned int iop; /* IOP unit number */
+ unsigned char dl_flags; /* DownLoadFlags field */
+ unsigned char sw_type; /* Software type */
+ unsigned int sw_id; /* Software ID */
+ void *buf; /* Pointer to software buffer */
+ unsigned int *swlen; /* Length of software data */
+ unsigned int *maxfrag; /* Maximum fragment count */
+ unsigned int *curfrag; /* Current fragment count */
+};
+
+struct i2o_html
+{
+ unsigned int iop; /* IOP unit number */
+ unsigned int tid; /* Target device ID */
+ unsigned int page; /* HTML page */
+ void *resbuf; /* Buffer for reply HTML page */
+ unsigned int *reslen; /* Length in bytes of reply buffer */
+ void *qbuf; /* Pointer to HTTP query string */
+ unsigned int qlen; /* Length in bytes of query string buffer */
+};
+
+#endif
extern unsigned long do_mmap(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long);
extern int do_munmap(unsigned long, size_t);
+extern unsigned long do_brk(unsigned long, unsigned long);
/* filemap.c */
extern void remove_inode_page(struct page *);
return vma;
}
+extern struct vm_area_struct *find_extend_vma(struct task_struct *tsk, unsigned long addr);
+
#define buffer_under_min() ((buffermem >> PAGE_SHIFT) * 100 < \
buffer_mem.min_percent * num_physpages)
#define pgcache_under_min() (page_cache_size * 100 < \
void (*change_mode)(struct parport *, int);
- void (*release_resources)(struct parport *);
- int (*claim_resources)(struct parport *);
-
void (*epp_write_data)(struct parport *, unsigned char);
unsigned char (*epp_read_data)(struct parport *);
void (*epp_write_addr)(struct parport *, unsigned char);
unsigned int waiting;
struct pardevice *waitprev;
struct pardevice *waitnext;
+ void * sysctl_table;
};
/* Directory information for the /proc interface */
spinlock_t pardevice_lock;
spinlock_t waitlist_lock;
rwlock_t cad_lock;
+ void * sysctl_table;
};
struct parport_driver {
/* parport_in_use returns nonzero if there are devices attached to a port. */
#define parport_in_use(x) ((x)->devices != NULL)
-/* Put a parallel port to sleep; release its hardware resources. Only possible
- * if no devices are registered. */
-extern void parport_quiesce(struct parport *);
-
/* parport_enumerate returns a pointer to the linked list of all the ports
* in this machine.
*/
#define PARPORT_DEV_LURK (1<<0) /* WARNING !! DEPRECATED !! */
#define PARPORT_DEV_EXCL (1<<1) /* Need exclusive access. */
-#define PARPORT_FLAG_COMA (1<<0)
+#define PARPORT_FLAG_COMA_ (1<<0) /* No longer used. */
#define PARPORT_FLAG_EXCL (1<<1) /* EXCL driver registered. */
extern int parport_parse_irqs(int, const char *[], int irqval[]);
char);
/* Prototypes from parport_procfs */
-extern int parport_proc_init(void);
-extern void parport_proc_cleanup(void);
extern int parport_proc_register(struct parport *pp);
extern int parport_proc_unregister(struct parport *pp);
+extern int parport_device_proc_register(struct pardevice *device);
+extern int parport_device_proc_unregister(struct pardevice *device);
+extern int parport_default_proc_register(void);
+extern int parport_default_proc_unregister(void);
extern void dec_parport_count(void);
extern void inc_parport_count(void);
#define PCI_CLASS_SERIAL_USB 0x0c03
#define PCI_CLASS_SERIAL_FIBER 0x0c04
+#define PCI_CLASS_HOT_SWAP_CONTROLLER 0xff00
+
#define PCI_CLASS_OTHERS 0xff
/*
#define PCI_DEVICE_ID_DEC_21150 0x0022
#define PCI_DEVICE_ID_DEC_21152 0x0024
#define PCI_DEVICE_ID_DEC_21153 0x0025
+#define PCI_DEVICE_ID_DEC_21154 0x0026
#define PCI_VENDOR_ID_CIRRUS 0x1013
#define PCI_DEVICE_ID_CIRRUS_7548 0x0038
#define PCI_DEVICE_ID_WINBOND2_89C940 0x0940
#define PCI_VENDOR_ID_MOTOROLA 0x1057
+#define PCI_VENDOR_ID_MOTOROLA_OOPS 0x1507
#define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001
#define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002
#define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801
+#define PCI_DEVICE_ID_MOTOROLA_FALCON 0x4802
+#define PCI_DEVICE_ID_MOTOROLA_CPX8216 0x4806
#define PCI_VENDOR_ID_PROMISE 0x105a
#define PCI_DEVICE_ID_PROMISE_20246 0x4d33
PROC_STRAM,
PROC_SOUND,
PROC_MTRR, /* whether enabled or not */
- PROC_FS
+ PROC_FS,
+ PROC_SYSVIPC
};
enum pid_directory_inos {
PROC_CODA_FS_LAST
};
+enum sysvipc_directory_inos {
+ PROC_SYSVIPC_SHM = PROC_CODA_FS_LAST,
+ PROC_SYSVIPC_SEM,
+ PROC_SYSVIPC_MSG
+};
+
/* Finally, the dynamically allocatable proc entries are reserved: */
#define PROC_DYNAMIC_FIRST 4096
extern struct proc_dir_entry proc_pid_fd;
extern struct proc_dir_entry proc_mca;
extern struct proc_dir_entry *proc_bus;
+extern struct proc_dir_entry *proc_sysvipc;
extern struct inode_operations proc_scsi_inode_operations;
#endif
extern struct inode_operations proc_omirr_inode_operations;
extern struct inode_operations proc_ppc_htab_inode_operations;
+extern struct inode_operations proc_sysvipc_inode_operations;
/*
* generic.c
/* CTL_DEV names: */
enum {
DEV_CDROM=1,
- DEV_HWMON=2
+ DEV_HWMON=2,
+ DEV_PARPORT=3
};
/* /proc/sys/dev/cdrom */
DEV_CDROM_INFO=1
};
+/* /proc/sys/dev/parport */
+enum {
+ DEV_PARPORT_DEFAULT=-3
+};
+
+/* /proc/sys/dev/parport/default */
+enum {
+ DEV_PARPORT_DEFAULT_TIMESLICE=1,
+ DEV_PARPORT_DEFAULT_SPINTIME=2
+};
+
+/* /proc/sys/dev/parport/parport n */
+enum {
+ DEV_PARPORT_SPINTIME=1,
+ DEV_PARPORT_HARDWARE=2,
+ DEV_PARPORT_DEVICES=3,
+ DEV_PARPORT_AUTOPROBE=16
+};
+
+/* /proc/sys/dev/parport/parport n/devices/ */
+enum {
+ DEV_PARPORT_DEVICES_ACTIVE=-3,
+};
+
+/* /proc/sys/dev/parport/parport n/devices/device n */
+enum {
+ DEV_PARPORT_DEVICE_TIMESLICE=1,
+};
+
#ifdef __KERNEL__
extern asmlinkage int sys_sysctl(struct __sysctl_args *);
*
* Author: Gene Kozin <genek@compuserve.com>
* Jaspreet Singh <jaspreet@sangoma.com>
+* Additions: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* Copyright: (c) 1995-1997 Sangoma Technologies Inc.
*
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* ============================================================================
+* May 23, 1999 Arnaldo Melo Added local_addr to wanif_conf_t
+* WAN_DISCONNECTING state added
* Nov 06, 1997 Jaspreet Singh Changed Router Driver version to 1.1 from 1.0
* Oct 20, 1997 Jaspreet Singh Added 'cir','bc','be' and 'mc' to 'wanif_conf_t'
* Added 'enable_IPX' and 'network_number' to
* 'wan_device_t'. Also added defines for
-* UDP PACKET TYPE, Interrupt test, critical values* for RACE conditions.
+* UDP PACKET TYPE, Interrupt test, critical values
+* for RACE conditions.
* Oct 05, 1997 Jaspreet Singh Added 'dlci_num' and 'dlci[100]' to
* 'wan_fr_conf_t' to configure a list of dlci(s)
* for a NODE
unsigned n392; /* error threshold counter */
unsigned n393; /* monitored events counter */
unsigned dlci_num; /* number of DLCs (access node) */
- unsigned dlci[100]; /* List of all DLCIs */
+ unsigned dlci[100]; /* List of all DLCIs */
} wan_fr_conf_t;
/*----------------------------------------------------------------------------
WAN_DISCONNECTED, /* link/channel is disconnected */
WAN_CONNECTING, /* connection is in progress */
WAN_CONNECTED, /* link/channel is operational */
+ WAN_DISCONNECTING, /* disconnection is in progress */
WAN_LIMIT /* for verification only */
};
unsigned bc; /* Committed Burst Size fwd, bwd */
unsigned be; /* Excess Burst Size fwd, bwd */
char mc; /* Multicast on or off */
+ char local_addr[WAN_ADDRESS_SZ+1];/* local media address, ASCIIZ */
+ unsigned char port; /* board port */
int reserved[8]; /* reserved for future extensions */
} wanif_conf_t;
#define TCP_PROBEWAIT_LEN (1*HZ)/* time to wait between probes when
* I've got something to write and
* there is no window */
-#define TCP_KEEPALIVE_TIME (180*60*HZ) /* two hours */
+#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
#define TCP_KEEPALIVE_PERIOD ((75*HZ)>>2) /* period of keepalive check */
* Fixed up the unchecked user space derefs
* Copyright (C) 1998 Alan Cox & Andi Kleen
*
+ * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
*/
#include <linux/malloc.h>
#include <linux/interrupt.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
+#include <linux/proc_fs.h>
#include <asm/uaccess.h>
static void freeque (int id);
static int newque (key_t key, int msgflg);
static int findkey (key_t key);
+#ifdef CONFIG_PROC_FS
+static int sysvipc_msg_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
+#endif
static struct msqid_ds *msgque[MSGMNI];
static int msgbytes = 0;
void __init msg_init (void)
{
int id;
-
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *ent;
+#endif
+
for (id = 0; id < MSGMNI; id++)
msgque[id] = (struct msqid_ds *) IPC_UNUSED;
msgbytes = msghdrs = msg_seq = max_msqid = used_queues = 0;
init_waitqueue_head(&msg_lock);
+#ifdef CONFIG_PROC_FS
+ ent = create_proc_entry("sysvipc/msg", 0, 0);
+ ent->read_proc = sysvipc_msg_read_proc;
+#endif
return;
}
return err;
}
+#ifdef CONFIG_PROC_FS
+static int sysvipc_msg_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
+{
+ off_t pos = 0;
+ off_t begin = 0;
+ int i, len = 0;
+
+ len += sprintf(buffer, " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n");
+
+ for(i = 0; i < MSGMNI; i++)
+ if(msgque[i] != IPC_UNUSED) {
+ len += sprintf(buffer + len, "%10d %10d %4o %5u %5u %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
+ msgque[i]->msg_perm.key,
+ msgque[i]->msg_perm.seq * MSGMNI + i,
+ msgque[i]->msg_perm.mode,
+ msgque[i]->msg_cbytes,
+ msgque[i]->msg_qnum,
+ msgque[i]->msg_lspid,
+ msgque[i]->msg_lrpid,
+ msgque[i]->msg_perm.uid,
+ msgque[i]->msg_perm.gid,
+ msgque[i]->msg_perm.cuid,
+ msgque[i]->msg_perm.cgid,
+ msgque[i]->msg_stime,
+ msgque[i]->msg_rtime,
+ msgque[i]->msg_ctime);
+
+ pos += len;
+ if(pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if(pos > offset + length)
+ goto done;
+ }
+ *eof = 1;
+done:
+ *start = buffer + (offset - begin);
+ len -= (offset - begin);
+ if(len > length)
+ len = length;
+ if(len < 0)
+ len = 0;
+ return len;
+}
+#endif
* better but only get the semops right which only wait for zero or
* increase. If there are decrement operations in the operations
* array we do the same as before.
+ *
+ * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
*/
#include <linux/malloc.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
+#include <linux/proc_fs.h>
#include <asm/uaccess.h>
static int newary (key_t, int, int);
static int findkey (key_t key);
static void freeary (int id);
+#ifdef CONFIG_PROC_FS
+static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
+#endif
static struct semid_ds *semary[SEMMNI];
static int used_sems = 0, used_semids = 0;
void __init sem_init (void)
{
int i;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *ent;
+#endif
init_waitqueue_head(&sem_lock);
used_sems = used_semids = max_semid = sem_seq = 0;
for (i = 0; i < SEMMNI; i++)
semary[i] = (struct semid_ds *) IPC_UNUSED;
+#ifdef CONFIG_PROC_FS
+ ent = create_proc_entry("sysvipc/sem", 0, 0);
+ ent->read_proc = sysvipc_sem_read_proc;
+#endif
return;
}
}
current->semundo = NULL;
}
+
+#ifdef CONFIG_PROC_FS
+static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
+{
+ off_t pos = 0;
+ off_t begin = 0;
+ int i, len = 0;
+
+ len += sprintf(buffer, " key semid perms nsems uid gid cuid cgid otime ctime\n");
+
+ for(i = 0; i < SEMMNI; i++)
+ if(semary[i] != IPC_UNUSED) {
+ len += sprintf(buffer + len, "%10d %10d %4o %5u %5u %5u %5u %5u %10lu %10lu\n",
+ semary[i]->sem_perm.key,
+ semary[i]->sem_perm.seq * SEMMNI + i,
+ semary[i]->sem_perm.mode,
+ semary[i]->sem_nsems,
+ semary[i]->sem_perm.uid,
+ semary[i]->sem_perm.gid,
+ semary[i]->sem_perm.cuid,
+ semary[i]->sem_perm.cgid,
+ semary[i]->sem_otime,
+ semary[i]->sem_ctime);
+
+ pos += len;
+ if(pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if(pos > offset + length)
+ goto done;
+ }
+ *eof = 1;
+done:
+ *start = buffer + (offset - begin);
+ len -= (offset - begin);
+ if(len > length)
+ len = length;
+ if(len < 0)
+ len = 0;
+ return len;
+}
+#endif
* Many improvements/fixes by Bruno Haible.
* Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
* Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
+ *
+ * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
*/
#include <linux/malloc.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static void shm_close (struct vm_area_struct *shmd);
static unsigned long shm_nopage(struct vm_area_struct *, unsigned long, int);
static int shm_swapout(struct vm_area_struct *, struct page *);
+#ifdef CONFIG_PROC_FS
+static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
+#endif
static int shm_tot = 0; /* total number of shared memory pages */
static int shm_rss = 0; /* number of shared memory pages that are in memory */
void __init shm_init (void)
{
int id;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *ent;
+#endif
for (id = 0; id < SHMMNI; id++)
shm_segs[id] = (struct shmid_kernel *) IPC_UNUSED;
shm_tot = shm_rss = shm_seq = max_shmid = used_segs = 0;
init_waitqueue_head(&shm_lock);
+#ifdef CONFIG_PROC_FS
+ ent = create_proc_entry("sysvipc/shm", 0, 0);
+ ent->read_proc = sysvipc_shm_read_proc;
+#endif
return;
}
return;
}
}
+
+#ifdef CONFIG_PROC_FS
+static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
+{
+ off_t pos = 0;
+ off_t begin = 0;
+ int i, len = 0;
+
+ len += sprintf(buffer, " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n");
+
+ for(i = 0; i < SHMMNI; i++)
+ if(shm_segs[i] != IPC_UNUSED) {
+ len += sprintf(buffer + len, "%10d %10d %4o %10d %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n",
+ shm_segs[i]->u.shm_perm.key,
+ shm_segs[i]->u.shm_perm.seq * SHMMNI + i,
+ shm_segs[i]->u.shm_perm.mode,
+ shm_segs[i]->u.shm_segsz,
+ shm_segs[i]->u.shm_cpid,
+ shm_segs[i]->u.shm_lpid,
+ shm_segs[i]->u.shm_nattch,
+ shm_segs[i]->u.shm_perm.uid,
+ shm_segs[i]->u.shm_perm.gid,
+ shm_segs[i]->u.shm_perm.cuid,
+ shm_segs[i]->u.shm_perm.cgid,
+ shm_segs[i]->u.shm_atime,
+ shm_segs[i]->u.shm_dtime,
+ shm_segs[i]->u.shm_ctime);
+
+ pos += len;
+ if(pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if(pos > offset + length)
+ goto done;
+ }
+ *eof = 1;
+done:
+ *start = buffer + (offset - begin);
+ len -= (offset - begin);
+ if(len > length)
+ len = length;
+ if(len < 0)
+ len = 0;
+ return len;
+}
+#endif
/* process memory management */
EXPORT_SYMBOL(do_mmap);
EXPORT_SYMBOL(do_munmap);
+EXPORT_SYMBOL(do_brk);
EXPORT_SYMBOL(exit_mm);
EXPORT_SYMBOL(exit_files);
EXPORT_SYMBOL(exit_fs);
}
}
+/*
+ * sys_brk() for the most part doesn't need the global kernel
+ * lock, except when an application is doing something nasty
+ * like trying to un-brk an area that has already been mapped
+ * to a regular file. in this case, the unmapping will need
+ * to invoke file system routines that need the global lock.
+ */
asmlinkage unsigned long sys_brk(unsigned long brk)
{
unsigned long rlim, retval;
down(&mm->mmap_sem);
- /*
- * This lock-kernel is one of the main contention points for
- * certain normal loads. And it really should not be here: almost
- * everything in brk()/mmap()/munmap() is protected sufficiently by
- * the mmap semaphore that we got above.
- *
- * We should move this into the few things that really want the
- * lock, namely anything that actually touches a file descriptor
- * etc. We can do all the normal anonymous mapping cases without
- * ever getting the lock at all - the actual memory management
- * code is already completely thread-safe.
- */
- lock_kernel();
-
if (brk < mm->end_code)
goto out;
newbrk = PAGE_ALIGN(brk);
goto out;
/* Ok, looks good - let it rip. */
- if (do_mmap(NULL, oldbrk, newbrk-oldbrk,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0) != oldbrk)
+ if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
goto out;
set_brk:
mm->brk = brk;
out:
retval = mm->brk;
- unlock_kernel();
up(&mm->mmap_sem);
return retval;
}
return NULL;
}
+struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long addr)
+{
+ struct vm_area_struct * vma;
+ unsigned long start;
+
+ addr &= PAGE_MASK;
+ vma = find_vma(tsk->mm,addr);
+ if (!vma)
+ return NULL;
+ if (vma->vm_start <= addr)
+ return vma;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ return NULL;
+ start = vma->vm_start;
+ if (expand_stack(vma, addr))
+ return NULL;
+ if (vma->vm_flags & VM_LOCKED) {
+ make_pages_present(addr, start);
+ }
+ return vma;
+}
+
/* Normal function to fix up a mapping
* This function is the default for when an area has no specific
* function. This may be used as part of a more specific routine.
end = end > mpnt->vm_end ? mpnt->vm_end : end;
size = end - st;
+ lock_kernel();
+
if (mpnt->vm_ops && mpnt->vm_ops->unmap)
mpnt->vm_ops->unmap(mpnt, st, size);
* Fix the mapping, and free the old area if it wasn't reused.
*/
extra = unmap_fixup(mpnt, st, size, extra);
+
+ unlock_kernel();
}
/* Release the extra vma struct if it wasn't used */
int ret;
down(¤t->mm->mmap_sem);
- lock_kernel();
ret = do_munmap(addr, len);
- unlock_kernel();
up(¤t->mm->mmap_sem);
return ret;
}
+/*
+ * this is really a simplified "do_mmap". it only handles
+ * anonymous maps. eventually we may be able to do some
+ * brk-specific accounting here.
+ */
+unsigned long do_brk(unsigned long addr, unsigned long len)
+{
+ struct mm_struct * mm = current->mm;
+ struct vm_area_struct * vma;
+ unsigned long flags, retval;
+
+ /*
+ * mlock MCL_FUTURE?
+ */
+ if (mm->def_flags & VM_LOCKED) {
+ unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+ locked += len;
+ if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ return -EAGAIN;
+ }
+
+ /*
+ * Clear old maps. this also does some error checking for us
+ */
+ retval = do_munmap(addr, len);
+ if (retval != 0)
+ return retval;
+
+ /* Check against address space limits *after* clearing old maps... */
+ if ((mm->total_vm << PAGE_SHIFT) + len
+ > current->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+
+ if (mm->map_count > MAX_MAP_COUNT)
+ return -ENOMEM;
+
+ if (!vm_enough_memory(len >> PAGE_SHIFT))
+ return -ENOMEM;
+
+ /*
+ * create a vma struct for an anonymous mapping
+ */
+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!vma)
+ return -ENOMEM;
+
+ vma->vm_mm = mm;
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+ vma->vm_flags = vm_flags(PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_FIXED|MAP_PRIVATE) | mm->def_flags;
+
+ vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+ vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
+ vma->vm_ops = NULL;
+ vma->vm_offset = 0;
+ vma->vm_file = NULL;
+ vma->vm_pte = 0;
+
+ /*
+ * merge_segments may merge our vma, so we can't refer to it
+ * after the call. Save the values we need now ...
+ */
+ flags = vma->vm_flags;
+ addr = vma->vm_start;
+ insert_vm_struct(mm, vma);
+ merge_segments(mm, vma->vm_start, vma->vm_end);
+
+ mm->total_vm += len >> PAGE_SHIFT;
+ if (flags & VM_LOCKED) {
+ mm->locked_vm += len >> PAGE_SHIFT;
+ make_pages_present(addr, addr + len);
+ }
+ return addr;
+}
+
/* Build the AVL tree corresponding to the VMA list. */
void build_mmap_avl(struct mm_struct * mm)
{
new_vma->vm_start = new_addr;
new_vma->vm_end = new_addr+new_len;
new_vma->vm_offset = vma->vm_offset + (addr - vma->vm_start);
+ lock_kernel();
if (new_vma->vm_file)
new_vma->vm_file->f_count++;
if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma->vm_ops->open(new_vma);
insert_vm_struct(current->mm, new_vma);
merge_segments(current->mm, new_vma->vm_start, new_vma->vm_end);
+ unlock_kernel();
do_munmap(addr, old_len);
current->mm->total_vm += new_len >> PAGE_SHIFT;
if (new_vma->vm_flags & VM_LOCKED) {
unsigned long ret = -EINVAL;
down(¤t->mm->mmap_sem);
- lock_kernel();
if (addr & ~PAGE_MASK)
goto out;
old_len = PAGE_ALIGN(old_len);
else
ret = -ENOMEM;
out:
- unlock_kernel();
up(¤t->mm->mmap_sem);
return ret;
}
}
#ifdef MODULE
-void module_cleanup(void)
+void cleanup_module(void)
{
#ifdef CONFIG_NETLINK
netlink_detach(NETLINK_IP6_FW);
* Dec 22, 1998 Arnaldo Melo vmalloc/vfree used in device_setup to allocate
* kernel memory and copy configuration data to
* kernel space (for big firmwares)
+* May 19, 1999 Arnaldo Melo __initfunc in wanrouter_init
*****************************************************************************/
#include <linux/config.h>
#endif
#ifndef MODULE
-
-int wanrouter_init(void)
+__initfunc(int wanrouter_init(void))
{
int err;
- extern void wanpipe_init(void);
+ extern int wanpipe_init(void),
+ cyclomx_init(void);
printk(KERN_INFO "%s v%u.%u %s\n",
fullname, ROUTER_VERSION, ROUTER_RELEASE, copyright);
err = wanrouter_proc_init();
if (err)
- printk(KERN_ERR "%s: can't create entry in proc filesystem!\n", modname);
+ printk(KERN_ERR "%s: can't create entry in proc filesystem!\n",
+ modname);
/*
* Initialise compiled in boards
#ifdef CONFIG_VENDOR_SANGOMA
wanpipe_init();
#endif
+#ifdef CONFIG_CYCLADES_SYNC
+ cyclomx_init();
+#endif
return err;
}
* Context: process
*/
-
int register_wan_device(wan_device_t* wandev)
{
int err, namelen;
printk(KERN_INFO "%s: registering WAN device %s\n",
modname, wandev->name);
#endif
-
/*
* Register /proc directory entry
*/