IP always defragment.
If you want this, say Y.
-IP: ipautofw masquerade support
-CONFIG_IP_MASQUERADE_IPAUTOFW (Experimental)
- ipautofw is a program by Richard Lynch allowing additional
- support for masquerading protocols which do not (as yet)
- have additional protocol helpers.
- Information and source for ipautofw is available from
- ftp://ftp.netis.com/pub/members/rlynch/
- The ipautofw code is still under development and so is currently
- marked EXPERIMENTAL.
- If you want this, say Y.
+IP: ipautofw masquerading (EXPERIMENTAL)
+CONFIG_IP_MASQUERADE_IPAUTOFW
+ Richard Lynch's ipautofw allows masquerading to work with protocols
+ which do not (as yet) have specific protocol helpers. Its source, and
+ other information, is available at
+ ftp://ftp.netis.com/pub/members/rlynch/.
IP: ICMP masquerading
CONFIG_IP_MASQUERADE_ICMP
the position within the series.
block Floppy disks
- 0 = /dev/fd0 First floppy disk autodetect
- 1 = /dev/fd1 Second floppy disk autodetect
- 2 = /dev/fd2 Third floppy disk autodetect
- 3 = /dev/fd3 Fourth floppy disk autodetect
+ 0 = /dev/fd0 Controller 1, drive 1 autodetect
+ 1 = /dev/fd1 Controller 1, drive 2 autodetect
+ 2 = /dev/fd2 Controller 1, drive 3 autodetect
+ 3 = /dev/fd3 Controller 1, drive 4 autodetect
+ 128 = /dev/fd4 Controller 2, drive 1 autodetect
+ 129 = /dev/fd5 Controller 2, drive 2 autodetect
+ 130 = /dev/fd6 Controller 2, drive 3 autodetect
+ 131 = /dev/fd7 Controller 2, drive 4 autodetect
To specify format, add to the autodetect device number:
0 = /dev/fd? Autodetect format
* apply at our cpl of 0 and the stack ought to be aligned already, and
* we don't need to preserve eflags.
*/
- movl $3, SYMBOL_NAME(x86)
+ /*
+ * A Cyrix/IBM 6x86(L) preserves flags after dividing 5 by 2
+ * (and it _must_ be 5 divided by 2) while other CPUs change
+ * them in undefined ways. We need to know this since we may
+ * need to enable the CPUID instruction at least.
+ */
+ xor %ax,%ax
+ sahf
+ movb $5,%ax
+ movb $2,%bx
+ div %bl
+ lahf
+ cmpb $2,%ah
+ jne ncyrix
+
+ /*
+ * It behaves like a Cyrix/IBM 6x86(L) so put "Cyrix" in the
+ * vendor id field. It may be overwritten later with the
+ * real thing if CPUID works.
+ */
+ movl $0x69727943,SYMBOL_NAME(x86_vendor_id) # low 4 chars
+ movl $0x00000078,SYMBOL_NAME(x86_vendor_id)+4 # next 4 chars
+
+ /*
+ * N.B. The pattern of accesses to 0x22 and 0x23 is *essential*
+ * so do not try to "optimize" it! For the same reason we
+ * do all this with interrupts off.
+ */
+#define setCx86(reg, val) \
+ movb reg,%ax; \
+ outb %ax,$0x22; \
+ movb val,%ax; \
+ outb %ax,$0x23
+
+#define getCx86(reg) \
+ movb reg,%ax; \
+ outb %ax,$0x22; \
+ inb $0x23,%ax
+
+ cli
+ getCx86($0xc3) # get CCR3
+ movb %ax,%cx # Save old value
+ movb %ax,%bx
+ andb $0x0f,%bx # Enable access to all config registers
+ orb $0x10,%bx # by setting bit 4
+ setCx86($0xc3,%bx)
+
+ getCx86($0xe8) # now we can get CCR4
+ orb $0x80,%ax # and set bit 7 (CPUIDEN)
+ movb %ax,%bx # to enable CPUID execution
+ setCx86($0xe8,%bx)
+
+ getCx86($0xfe) # DIR0 : let's check this is a 6x86(L)
+ andb $0xf0,%ax # should be 3xh
+ cmpb $0x30,%ax #
+ jne n6x86
+ getCx86($0xe9) # CCR5 : we reset the SLOP bit
+ andb $0xfd,%ax # so that udelay calculation
+ movb %ax,%bx # is correct on 6x86(L) CPUs
+ setCx86($0xe9,%bx)
+
+n6x86: setCx86($0xc3,%cx) # Restore old CCR3
+ sti
+
+ncyrix: movl $3, SYMBOL_NAME(x86)
pushfl # push EFLAGS
popl %eax # get EFLAGS
movl %eax,%ecx # save original EFLAGS
andb $0x0f, %cl # mask mask revision
movb %cl,SYMBOL_NAME(x86_mask)
movl %edx,SYMBOL_NAME(x86_capability)
+
+ xor %ax,%ax # test again for Cyrix CPU
+ sahf
+ movb $5,%ax
+ movb $2,%bx
+ div %bl
+ lahf
+ cmpb $2,%ah
+ jne ncyrx2 # skip if not Cyrix CPU
+ getCx86($0xff) # DIR1 : let's check the stepping
+ movb %al,SYMBOL_NAME(x86_mask)
+
/* get vendor info */
- xorl %eax, %eax # call CPUID with 0 -> return vendor ID
+ncyrx2: xorl %eax, %eax # call CPUID with 0 -> return vendor ID
.byte 0x0f, 0xa2 # CPUID
movl %ebx,SYMBOL_NAME(x86_vendor_id) # lo 4 chars
movl %edx,SYMBOL_NAME(x86_vendor_id)+4 # next 4 chars
return -EINVAL;
if (from + num > IO_BITMAP_SIZE*32)
return -EINVAL;
- if (!suser())
+ if (!suser() || securelevel > 0)
return -EPERM;
set_bitmap((unsigned long *)current->tss.io_bitmap, from, num, !turn_on);
if (level > 3)
return -EINVAL;
- if (!suser())
+ if (!suser() || securelevel > 0)
return -EPERM;
*(&eflags) = (eflags & 0xffffcfff) | (level << 12);
return 0;
int i;
if (current->ldt) {
- free_page((unsigned long) current->ldt);
+ void * ldt = current->ldt;
current->ldt = NULL;
+ vfree(ldt);
for (i=1 ; i<NR_TASKS ; i++) {
if (task[i] == current) {
set_ldt_desc(gdt+(i<<1)+
return tmp;
}
+void safe_wake_up_process(struct task_struct * p)
+{
+ struct desc_struct * d;
+ unsigned long limit;
+
+ void check(int index, int mask, int value) {
+ unsigned long selector, limit;
+
+ if (!p) return;
+
+ selector = get_stack_long(p, sizeof(long)*index - MAGICNUMBER);
+ if (selector & 4) {
+ d = p->ldt;
+ if (d) limit = get_limit(p->tss.ldt); else limit = 0;
+ } else {
+ d = gdt;
+ limit = 8 * 8;
+ }
+
+ if ((selector & 0xFFF8) >= limit) {
+ d = NULL;
+ force_sig(SIGSEGV, p); p = NULL;
+ } else {
+ d += selector >> 3;
+ if ((d->b & mask) != value ||
+ (d->b >> 13) < (selector & 3)) {
+ force_sig(SIGSEGV, p); p = NULL;
+ }
+ }
+ }
+
+ check(DS, 0x9800, 0x9000); /* Allow present data segments only */
+ check(ES, 0x9800, 0x9000);
+ check(FS, 0x9800, 0x9000);
+ check(GS, 0x9800, 0x9000);
+ check(SS, 0x9A00, 0x9200); /* Stack segment should not be read-only */
+ check(CS, 0x9800, 0x9800); /* Allow present code segments only */
+
+ if (!p) return;
+
+ if (d) {
+ limit = (d->a & 0xFFFF) | (d->b & 0xF0000);
+ if (d->b & 0x800000) {
+ limit <<= 12; limit |= 0xFFF;
+ }
+
+ if (get_stack_long(p, sizeof(long)*EIP - MAGICNUMBER) > limit) {
+ force_sig(SIGSEGV, p); return;
+ }
+ }
+
+ wake_up_process(p);
+}
+
asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
{
struct task_struct *child;
else
child->flags &= ~PF_TRACESYS;
child->exit_code = data;
- wake_up_process(child);
+ safe_wake_up_process(child);
/* make sure the single step bit is not set. */
tmp = get_stack_long(child, sizeof(long)*EFL-MAGICNUMBER) & ~TRAP_FLAG;
put_stack_long(child, sizeof(long)*EFL-MAGICNUMBER,tmp);
child->flags &= ~PF_TRACESYS;
tmp = get_stack_long(child, sizeof(long)*EFL-MAGICNUMBER) | TRAP_FLAG;
put_stack_long(child, sizeof(long)*EFL-MAGICNUMBER,tmp);
- wake_up_process(child);
child->exit_code = data;
+ safe_wake_up_process(child);
/* give it a chance to run. */
return 0;
}
if ((unsigned long) data > NSIG)
return -EIO;
child->flags &= ~(PF_PTRACED|PF_TRACESYS);
- wake_up_process(child);
child->exit_code = data;
+ safe_wake_up_process(child);
REMOVE_LINKS(child);
child->p_pptr = child->p_opptr;
SET_LINKS(child);
char x86_vendor_id[13] = "unknown";
+unsigned char Cx86_step = 0;
+static const char *Cx86_type[] = {
+ "unknown", "1.3", "1.4", "2.4", "2.5", "2.6", "2.7 or 3.7", "4.2"
+ };
+
char ignore_irq13 = 0; /* set if exception 16 works */
char wp_works_ok = -1; /* set if paging hardware honours WP */
char hlt_works_ok = 1; /* set if the "hlt" instruction works */
return NULL;
}
+static const char * Cx86model(void)
+{
+ unsigned char nr6x86 = 0;
+ static const char *model[] = {
+ "unknown", "6x86", "6x86L", "6x86MX", "6x86MXi"
+ };
+ switch (x86) {
+ case 5:
+ nr6x86 = ((x86_capability & (1 << 8)) ? 2 : 1); /* cx8 flag only on 6x86L */
+ break;
+ case 6:
+ nr6x86 = 3;
+ break;
+ default:
+ nr6x86 = 0;
+ }
+ switch (x86_mask) {
+ case 0x03:
+ Cx86_step = 1; /* 6x86MX Rev 1.3 */
+ break;
+ case 0x04:
+ Cx86_step = 2; /* 6x86MX Rev 1.4 */
+ break;
+ case 0x14:
+ Cx86_step = 3; /* 6x86 Rev 2.4 */
+ break;
+ case 0x15:
+ Cx86_step = 4; /* 6x86 Rev 2.5 */
+ break;
+ case 0x16:
+ Cx86_step = 5; /* 6x86 Rev 2.6 */
+ break;
+ case 0x17:
+ Cx86_step = 6; /* 6x86 Rev 2.7 or 3.7 */
+ break;
+ case 0x22:
+ Cx86_step = 7; /* 6x86L Rev 4.2 */
+ break;
+ default:
+ Cx86_step = 0;
+ }
+ return model[nr6x86];
+}
+
static const char * i686model(unsigned int nr)
{
static const char *model[] = {
{
const char *p = NULL;
static char nbuf[12];
- switch (x86) {
- case 4:
- p = i486model(model);
- break;
- case 5:
- p = i586model(model);
- break;
- case 6:
- p = i686model(model);
- break;
+ if (strncmp(x86_vendor_id, "Cyrix", 5) == 0)
+ p = Cx86model();
+ else {
+ switch (x86) {
+ case 4:
+ p = i486model(model);
+ break;
+ case 5:
+ p = i586model(model);
+ break;
+ case 6:
+ p = i686model(model);
+ break;
+ }
}
if (p)
return p;
CD(x86_vendor_id));
if (CD(x86_mask))
- len += sprintf(buffer+len,
- "stepping\t: %d\n",
- CD(x86_mask));
+ if (strncmp(x86_vendor_id, "Cyrix", 5) != 0) {
+ len += sprintf(buffer+len,
+ "stepping\t: %d\n",
+ CD(x86_mask));
+ }
+ else { /* we have a Cyrix */
+ len += sprintf(buffer+len,
+ "stepping\t: %s\n",
+ Cx86_type[Cx86_step]);
+ }
else
len += sprintf(buffer+len,
"stepping\t: unknown\n");
#include <linux/ptrace.h>
#include <linux/unistd.h>
+#include <asm/system.h>
#include <asm/segment.h>
#define _S(nr) (1<<((nr)-1))
asmlinkage int sys_sigreturn(unsigned long __unused)
{
#define COPY(x) regs->x = context.x
+#define CHECK_DATA(x, mask, value) \
+__asm__("larw %0,%%ax\n\t" \
+ "jnz asm_badframe\n\t" \
+ "andl $" mask ",%%eax\n\t" \
+ "cmpl $" value ",%%eax\n\t" \
+ "jne asm_badframe" \
+ : \
+ : "g" (context.x) \
+ : "ax", "cc");
+#define CHECK_CODE(x, y) \
+__asm__("larw %0,%%ax\n\t" \
+ "jnz asm_badframe\n\t" \
+ "andl $0x9800,%%eax\n\t" \
+ "cmpl $0x9800,%%eax\n\t" /* Allow present code segments only */ \
+ "jne asm_badframe\n\t" \
+ "lsll %0,%%eax\n\t" \
+ "cmp %1,%%eax\n\t" /* Check context.eip against the segment limit */ \
+ "jb asm_badframe" \
+ : \
+ : "g" (context.x), "g" (context.y) \
+ : "ax", "cc");
#define COPY_SEG(x) \
+CHECK_DATA(x, "0x9800", "0x9000") /* Allow present data segments only */ \
if ( (context.x & 0xfffc) /* not a NULL selectors */ \
&& (context.x & 0x4) != 0x4 /* not a LDT selector */ \
&& (context.x & 3) != 3 /* not a RPL3 GDT selector */ \
) goto badframe; COPY(x);
-#define COPY_SEG_STRICT(x) \
+#define COPY_STACK(x) \
+CHECK_DATA(x, "0x9A00", "0x9200") /* Stack segment should not be read-only */ \
if (!(context.x & 0xfffc) || (context.x & 3) != 3) goto badframe; COPY(x);
+#define COPY_CODE(x, y) \
+CHECK_CODE(x, y) \
+if (!(context.x & 0xfffc) || (context.x & 3) != 3) goto badframe; \
+COPY(x); COPY(y);
struct sigcontext_struct context;
struct pt_regs * regs;
COPY_SEG(es);
COPY_SEG(fs);
COPY_SEG(gs);
- COPY_SEG_STRICT(ss);
- COPY_SEG_STRICT(cs);
- COPY(eip);
+ COPY_STACK(ss);
+ COPY_CODE(cs, eip);
COPY(ecx); COPY(edx);
COPY(ebx);
COPY(esp); COPY(ebp);
do_exit(SIGSEGV);
}
+void asm_badframe(void)
+{
+ do_exit(SIGSEGV);
+}
+
static inline struct _fpstate * save_i387_hard(struct _fpstate * buf)
{
#ifdef __SMP__
/* Don't use them if a suspend/resume could
corrupt the timer value. This problem
needs more debugging. */
- if (x86_capability & 16) {
- do_gettimeoffset = do_fast_gettimeoffset;
-
- if( strcmp( x86_vendor_id, "AuthenticAMD" ) == 0 ) {
- if( x86 == 5 ) {
- if( x86_model == 0 ) {
- /* turn on cycle counters during power down */
- __asm__ __volatile__ (" movl $0x83, %%ecx \n \
- .byte 0x0f,0x32 \n \
- orl $1,%%eax \n \
- .byte 0x0f,0x30 \n "
- : : : "ax", "cx", "dx" );
- udelay(500);
+ if (x86_capability & 16)
+ if (strncmp(x86_vendor_id, "Cyrix", 5) != 0) {
+ do_gettimeoffset = do_fast_gettimeoffset;
+
+ if( strcmp( x86_vendor_id, "AuthenticAMD" ) == 0 ) {
+ if( x86 == 5 ) {
+ if( x86_model == 0 ) {
+ /* turn on cycle counters during power down */
+ __asm__ __volatile__ (" movl $0x83, %%ecx \n \
+ .byte 0x0f,0x32 \n \
+ orl $1,%%eax \n \
+ .byte 0x0f,0x30 \n "
+ : : : "ax", "cx", "dx" );
+ udelay(500);
+ }
}
- }
- }
+ }
- /* read Pentium cycle counter */
- __asm__(".byte 0x0f,0x31"
- :"=a" (init_timer_cc.low),
- "=d" (init_timer_cc.high));
- irq0.handler = pentium_timer_interrupt;
- }
+ /* read Pentium cycle counter */
+ __asm__(".byte 0x0f,0x31"
+ :"=a" (init_timer_cc.low),
+ "=d" (init_timer_cc.high));
+ irq0.handler = pentium_timer_interrupt;
+ }
#endif
setup_x86_irq(0, &irq0);
}
if (inode->i_count>1 || md_dev[minor].busy>1) /* ioctl : one open channel */
{
- printk ("STOP_MD md%x failed : i_count=%d, busy=%d\n", minor, inode->i_count, md_dev[minor].busy);
+ printk ("STOP_MD md%x failed : i_count=%ld, busy=%d\n", minor, inode->i_count, md_dev[minor].busy);
return -EBUSY;
}
*
* Modularized: 04/10/96 by Todd Fries, tfries@umr.edu
*
+ * Revised: 13/12/97 by Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl
+ * Fixed some problems with disk initialization and module initiation.
+ * Recovered DMA access. Abridged messages. Added support for DTC5051CX,
+ * WD1002-27X & XEBEC controllers. Driver uses now some jumper settings.
+ * Added support for manual geometry setting (except Seagate controllers)
+ * in form:
+ * xd_geo=<cyl_xda>,<head_xda>,<sec_xda>[,<cyl_xdb>,<head_xdb>,<sec_xdb>]
+ * Extended ioctl() support.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/kernel.h>
+#include <linux/timer.h>
#include <linux/genhd.h>
+#include <linux/ioport.h>
#include <linux/xd.h>
#include <asm/system.h>
#define MAJOR_NR XT_DISK_MAJOR
#include <linux/blk.h>
+#define XD_DONT_USE_DMA 0 /* Initial value. may be overriden using
+ "nodma" module option */
+#define XD_INIT_DISK_DELAY 3 /* 30 ms delay during disk initialization */
+
+/* Above may need to be increased if a problem with the 2nd drive detection
+ (ST11M controller) or resetting a controler (WD) appears */
+
XD_INFO xd_info[XD_MAXDRIVES];
/* If you try this driver and find that your card is not detected by the driver at bootup, you need to add your BIOS
NOTE: You can now specify your XT controller's parameters from the command line in the form xd=TYPE,IRQ,IO,DMA. The driver
should be able to detect your drive's geometry from this info. (eg: xd=0,5,0x320,3 is the "standard"). */
+#include <asm/page.h>
+/* coppied from floppy.c */
+static inline int __get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+#define xd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,__get_order(size))
+#define xd_dma_mem_free(addr, size) free_pages(addr, __get_order(size))
+static char *xd_dma_buffer = 0;
+
static XD_SIGNATURE xd_sigs[] = {
{ 0x0000,"Override geometry handler",NULL,xd_override_init_drive,"n unknown" }, /* Pat Mackinlay, pat@it.com.au */
+ { 0x0008,"[BXD06 (C) DTC 17-MAY-1985]",xd_dtc_init_controller,xd_dtc5150cx_init_drive," DTC 5150CX" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
{ 0x000B,"CRD18A Not an IBM rom. (C) Copyright Data Technology Corp. 05/31/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Todd Fries, tfries@umr.edu */
{ 0x000B,"CXD23A Not an IBM ROM (C)Copyright Data Technology Corp 12/03/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Pat Mackinlay, pat@it.com.au */
- { 0x0008,"07/15/86 (C) Copyright 1986 Western Digital Corp",xd_wd_init_controller,xd_wd_init_drive," Western Digital 1002AWX1" }, /* Ian Justman, citrus!ianj@csusac.ecs.csus.edu */
- { 0x0008,"06/24/88 (C) Copyright 1988 Western Digital Corp",xd_wd_init_controller,xd_wd_init_drive," Western Digital 1004A27X" }, /* Dave Thaler, thalerd@engin.umich.edu */
- { 0x0008,"06/24/88(C) Copyright 1988 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Digital WDXT-GEN2" }, /* Dan Newcombe, newcombe@aa.csc.peachnet.edu */
+ { 0x0008,"07/15/86 (C) Copyright 1986 Western Digital Corp",xd_wd_init_controller,xd_wd_init_drive," WD 1002AWX1" }, /* Ian Justman, citrus!ianj@csusac.ecs.csus.edu */
+ { 0x0008,"07/15/86(C) Copyright 1986 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," WD 1002-27X" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
+ { 0x0008,"06/24/88 (C) Copyright 1988 Western Digital Corp",xd_wd_init_controller,xd_wd_init_drive," WD 1004A27X" }, /* Dave Thaler, thalerd@engin.umich.edu */
+ { 0x0008,"06/24/88(C) Copyright 1988 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," WDXT-GEN2" }, /* Dan Newcombe, newcombe@aa.csc.peachnet.edu */
{ 0x0015,"SEAGATE ST11 BIOS REVISION",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Salvador Abreu, spa@fct.unl.pt */
{ 0x0010,"ST11R BIOS",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Risto Kankkunen, risto.kankkunen@cs.helsinki.fi */
{ 0x0010,"ST11 BIOS v1.7",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11R" }, /* Alan Hourihane, alanh@fairlite.demon.co.uk */
{ 0x1000,"(c)Copyright 1987 SMS",xd_omti_init_controller,xd_omti_init_drive,"n OMTI 5520" }, /* Dirk Melchers, dirk@merlin.nbg.sub.org */
+ { 0x0006,"COPYRIGHT XEBEC (C) 1984",xd_xebec_init_controller,xd_xebec_init_drive," XEBEC" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
};
static u_char *xd_bases[] =
{
(u_char *) 0xC8000,(u_char *) 0xCA000,(u_char *) 0xCC000,
- (u_char *) 0xCE000,(u_char *) 0xD0000,(u_char *) 0xD8000,
+ (u_char *) 0xCE000,(u_char *) 0xD0000,(u_char *) 0xD2000,
+ (u_char *) 0xD4000,(u_char *) 0xD6000,(u_char *) 0xD8000,
+ (u_char *) 0xDA000,(u_char *) 0xDC000,(u_char *) 0xDE000,
(u_char *) 0xE0000
};
-static struct hd_struct xd[XD_MAXDRIVES << 6];
+static struct hd_struct xd_struct[XD_MAXDRIVES << 6];
static int xd_sizes[XD_MAXDRIVES << 6], xd_access[XD_MAXDRIVES] = { 0, 0 };
static int xd_blocksizes[XD_MAXDRIVES << 6];
static struct gendisk xd_gendisk = {
#else
xd_geninit, /* init function */
#endif
- xd, /* hd struct */
+ xd_struct, /* hd struct */
xd_sizes, /* block sizes */
0, /* number */
(void *) xd_info, /* internal */
};
static struct wait_queue *xd_wait_int = NULL, *xd_wait_open = NULL;
static u_char xd_valid[XD_MAXDRIVES] = { 0,0 };
-static u_char xd_drives = 0, xd_irq = 0, xd_dma = 0, xd_maxsectors;
+static u_char xd_drives = 0, xd_irq = 5, xd_dma = 3, xd_maxsectors;
static u_char xd_override = 0, xd_type = 0;
-static u_short xd_iobase = 0;
+static u_short xd_iobase = 0x320;
+static int xd_geo[XD_MAXDRIVES*3] = { 0,0,0,0,0,0 };
+
+static volatile int xdc_busy = 0;
+static struct wait_queue *xdc_wait = NULL;
+
+typedef void (*timeout_fn)(unsigned long);
+static struct timer_list xd_timer = { NULL, NULL, 0, 0, (timeout_fn) xd_wakeup },
+ xd_watchdog_int = { NULL, NULL, 0, 0, (timeout_fn) xd_watchdog };
+
+static volatile u_char xd_error;
+static int nodma = XD_DONT_USE_DMA;
/* xd_init: register the block device number and set up pointer tables */
int xd_init (void)
if (xd_detect(&controller,&address)) {
printk("xd_geninit: detected a%s controller (type %d) at address %p\n",xd_sigs[controller].name,controller,address);
+ if (check_region(xd_iobase,4)) {
+ printk("xd: Ports at 0x%x are not available\n",xd_iobase);
+ return;
+ }
+ request_region(xd_iobase,4,"xd");
if (controller)
xd_sigs[controller].init_controller(address);
xd_drives = xd_initdrives(xd_sigs[controller].init_drive);
for (i = 0; i < xd_drives; i++)
printk("xd_geninit: drive %d geometry - heads = %d, cylinders = %d, sectors = %d\n",i,xd_info[i].heads,xd_info[i].cylinders,xd_info[i].sectors);
+ }
+ if (xd_drives) {
if (!request_irq(xd_irq,xd_interrupt_handler, 0, "XT harddisk", NULL)) {
if (request_dma(xd_dma,"xd")) {
printk("xd_geninit: unable to get DMA%d\n",xd_dma);
}
for (i = 0; i < xd_drives; i++) {
- xd[i << 6].nr_sects = xd_info[i].heads * xd_info[i].cylinders * xd_info[i].sectors;
+ xd_struct[i << 6].nr_sects = xd_info[i].heads * xd_info[i].cylinders * xd_info[i].sectors;
xd_valid[i] = 1;
}
while (!xd_valid[dev])
sleep_on(&xd_wait_open);
+#ifdef MODULE
+ MOD_INC_USE_COUNT;
+#endif /* MODULE */
+
xd_access[dev]++;
return (0);
int code;
sti();
+ if (xdc_busy)
+ return;
while (code = 0, CURRENT) {
INIT_REQUEST; /* do some checking on the request structure */
if (CURRENT_DEV < xd_drives
&& CURRENT->sector + CURRENT->nr_sectors
- <= xd[MINOR(CURRENT->rq_dev)].nr_sects) {
- block = CURRENT->sector + xd[MINOR(CURRENT->rq_dev)].start_sect;
+ <= xd_struct[MINOR(CURRENT->rq_dev)].nr_sects) {
+ block = CURRENT->sector + xd_struct[MINOR(CURRENT->rq_dev)].start_sect;
count = CURRENT->nr_sectors;
switch (CURRENT->cmd) {
}
}
+static int write_fs_long (unsigned long useraddr, long value)
+{
+ int err;
+
+ if (NULL == (long *)useraddr)
+ return -EINVAL;
+ if ((err = verify_area(VERIFY_WRITE, (long *)useraddr, sizeof(long))))
+ return err;
+ put_user((unsigned)value, (long *) useraddr);
+ return 0;
+}
+
/* xd_ioctl: handle device ioctl's */
static int xd_ioctl (struct inode *inode,struct file *file,u_int cmd,u_long arg)
{
put_user(xd_info[dev].heads, &geometry->heads);
put_user(xd_info[dev].sectors, &geometry->sectors);
put_user(xd_info[dev].cylinders, &geometry->cylinders);
- put_user(xd[MINOR(inode->i_rdev)].start_sect,&geometry->start);
+ put_user(xd_struct[MINOR(inode->i_rdev)].start_sect,&geometry->start);
return (0);
}
return -EINVAL;
read_ahead[MAJOR(inode->i_rdev)] = arg;
return 0;
+ case BLKRAGET:
+ return write_fs_long(arg, read_ahead[MAJOR(inode->i_rdev)]);
case BLKGETSIZE:
if (arg) {
if ((err = verify_area(VERIFY_WRITE,(long *) arg,sizeof(long))))
return (err);
- put_user(xd[MINOR(inode->i_rdev)].nr_sects,(long *) arg);
+ put_user(xd_struct[MINOR(inode->i_rdev)].nr_sects,(long *) arg);
return (0);
}
fsync_dev(inode->i_rdev);
invalidate_buffers(inode->i_rdev);
return 0;
-
+ case HDIO_SET_DMA:
+ if (!suser())
+ return -EACCES;
+ if (xdc_busy)
+ return -EBUSY;
+ nodma = !arg;
+ if (nodma && xd_dma_buffer) {
+ xd_dma_mem_free((unsigned long)xd_dma_buffer, xd_maxsectors * 0x200);
+ xd_dma_buffer = 0;
+ }
+ return 0;
+ case HDIO_GET_DMA:
+ return write_fs_long(arg, !nodma);
+ case HDIO_GET_MULTCOUNT:
+ return write_fs_long(arg, xd_maxsectors);
case BLKRRPART:
return (xd_reread_partitions(inode->i_rdev));
RO_IOCTLS(inode->i_rdev,arg);
if (dev < xd_drives) {
sync_dev(inode->i_rdev);
xd_access[dev]--;
+
+#ifdef MODULE
+ MOD_DEC_USE_COUNT;
+#endif /* MODULE */
+
}
}
{
u_char cmdblk[6],sense[4];
u_short track,cylinder;
- u_char head,sector,control,mode,temp;
+ u_char head,sector,control,mode = PIO_MODE,temp;
+ char **real_buffer;
+ register int i;
#ifdef DEBUG_READWRITE
printk("xd_readwrite: operation = %s, drive = %d, buffer = 0x%X, block = %d, count = %d\n",operation == READ ? "read" : "write",drive,buffer,block,count);
#endif /* DEBUG_READWRITE */
control = xd_info[drive].control;
+ if (!xd_dma_buffer)
+ xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
while (count) {
temp = count < xd_maxsectors ? count : xd_maxsectors;
printk("xd_readwrite: drive = %d, head = %d, cylinder = %d, sector = %d, count = %d\n",drive,head,cylinder,sector,temp);
#endif /* DEBUG_READWRITE */
- mode = xd_setup_dma(operation == READ ? DMA_MODE_READ : DMA_MODE_WRITE,(u_char *)buffer,temp * 0x200);
+ if (xd_dma_buffer) {
+ mode = xd_setup_dma(operation == READ ? DMA_MODE_READ : DMA_MODE_WRITE,(u_char *)(xd_dma_buffer),temp * 0x200);
+ real_buffer = &xd_dma_buffer;
+ for (i=0; i < (temp * 0x200); i++)
+ xd_dma_buffer[i] = buffer[i];
+ }
+ else
+ real_buffer = &buffer;
+
xd_build(cmdblk,operation == READ ? CMD_READ : CMD_WRITE,drive,head,cylinder,sector,temp & 0xFF,control);
- switch (xd_command(cmdblk,mode,(u_char *) buffer,(u_char *) buffer,sense,XD_TIMEOUT)) {
+ switch (xd_command(cmdblk,mode,(u_char *)(*real_buffer),(u_char *)(*real_buffer),sense,XD_TIMEOUT)) {
case 1:
printk("xd_readwrite: timeout, recalibrating drive\n");
xd_recalibrate(drive);
printk(" - no valid disk address\n");
return (0);
}
+ if (xd_dma_buffer)
+ for (i=0; i < (temp * 0x200); i++)
+ buffer[i] = xd_dma_buffer[i];
+
count -= temp, buffer += temp * 0x200, block += temp;
}
return (1);
printk("xd_interrupt_handler: unexpected interrupt\n");
}
-/* xd_dma: set up the DMA controller for a data transfer */
+/* xd_setup_dma: set up the DMA controller for a data transfer */
static u_char xd_setup_dma (u_char mode,u_char *buffer,u_int count)
{
- if (buffer < ((u_char *) 0x1000000 - count)) { /* transfer to address < 16M? */
- if (((u_int) buffer & 0xFFFF0000) != (((u_int) buffer + count) & 0xFFFF0000)) {
+ if (nodma)
+ return (PIO_MODE);
+ if (((u_int) buffer & 0xFFFF0000) != (((u_int) buffer + count) & 0xFFFF0000)) {
#ifdef DEBUG_OTHER
printk("xd_setup_dma: using PIO, transfer overlaps 64k boundary\n");
#endif /* DEBUG_OTHER */
- return (PIO_MODE);
- }
- disable_dma(xd_dma);
- clear_dma_ff(xd_dma);
- set_dma_mode(xd_dma,mode);
- set_dma_addr(xd_dma,(u_int) buffer);
- set_dma_count(xd_dma,count);
-
- return (DMA_MODE); /* use DMA and INT */
+ return (PIO_MODE);
}
-#ifdef DEBUG_OTHER
- printk("xd_setup_dma: using PIO, cannot DMA above 16 meg\n");
-#endif /* DEBUG_OTHER */
- return (PIO_MODE);
+ disable_dma(xd_dma);
+ clear_dma_ff(xd_dma);
+ set_dma_mode(xd_dma,mode);
+ set_dma_addr(xd_dma,(u_int) buffer);
+ set_dma_count(xd_dma,count);
+
+ return (DMA_MODE); /* use DMA and INT */
}
/* xd_build: put stuff into an array in a format suitable for the controller */
cmdblk[3] = cylinder & 0xFF;
cmdblk[4] = count;
cmdblk[5] = control;
-
+
return (cmdblk);
}
+/* xd_wakeup is called from timer interrupt */
+static void xd_wakeup (void)
+{
+ wake_up(&xdc_wait);
+}
+
+/* xd_wakeup is called from timer interrupt */
+static void xd_watchdog (void)
+{
+ xd_error = 1;
+ wake_up(&xd_wait_int);
+}
+
/* xd_waitport: waits until port & mask == flags or a timeout occurs. return 1 for a timeout */
static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout)
{
u_long expiry = jiffies + timeout;
+ int success;
+
+ xdc_busy = 1;
+ while ((success = ((inb(port) & mask) != flags)) && (jiffies < expiry)) {
+ xd_timer.expires = jiffies;
+ cli();
+ add_timer(&xd_timer);
+ sleep_on(&xdc_wait);
+ del_timer(&xd_timer);
+ sti();
+ }
+ xdc_busy = 0;
+ return (success);
+}
- while (((inb(port) & mask) != flags) && (jiffies < expiry))
- ;
-
- return (jiffies >= expiry);
+static inline u_int xd_wait_for_IRQ (void)
+{
+ xd_watchdog_int.expires = jiffies + 8 * HZ;
+ add_timer(&xd_watchdog_int);
+ enable_dma(xd_dma);
+ sleep_on(&xd_wait_int);
+ del_timer(&xd_watchdog_int);
+ xdc_busy = 0;
+ disable_dma(xd_dma);
+ if (xd_error) {
+ printk("xd: missed IRQ - command aborted\n");
+ xd_error = 0;
+ return (1);
+ }
+ return (0);
}
/* xd_command: handle all data transfers necessary for a single command */
switch (inb(XD_STATUS) & (STAT_COMMAND | STAT_INPUT)) {
case 0:
if (mode == DMA_MODE) {
- enable_dma(xd_dma);
- sleep_on(&xd_wait_int);
- disable_dma(xd_dma);
+ if (xd_wait_for_IRQ())
+ return (1);
} else
outb(outdata ? *outdata++ : 0,XD_DATA);
break;
case STAT_INPUT:
if (mode == DMA_MODE) {
- enable_dma(xd_dma);
- sleep_on(&xd_wait_int);
- disable_dma(xd_dma);
+ if (xd_wait_for_IRQ())
+ return (1);
} else
if (indata)
*indata++ = inb(XD_DATA);
for (i = 0; i < XD_MAXDRIVES; i++) {
xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0);
if (!xd_command(cmdblk,PIO_MODE,0,0,0,XD_TIMEOUT * 8)) {
+ xd_timer.expires = jiffies + XD_INIT_DISK_DELAY;
+ add_timer(&xd_timer);
+ sleep_on(&xdc_wait);
+
init_drive(count);
count++;
+
+ xd_timer.expires = jiffies + XD_INIT_DISK_DELAY;
+ add_timer(&xd_timer);
+ sleep_on(&xdc_wait);
}
}
return (count);
}
+static void xd_manual_geo_set (u_char drive)
+{
+ xd_info[drive].heads = (u_char)(xd_geo[3 * drive + 1]);
+ xd_info[drive].cylinders = (u_short)(xd_geo[3 * drive]);
+ xd_info[drive].sectors = (u_char)(xd_geo[3 * drive + 2]);
+}
+
static void xd_dtc_init_controller (u_char *address)
{
switch ((u_long) address) {
- case 0xC8000: xd_iobase = 0x320; break;
+ case 0x00000:
+ case 0xC8000: break; /*initial: 0x320 */
case 0xCA000: xd_iobase = 0x324; break;
+ case 0xD0000: /*5150CX*/
+ case 0xD8000: break; /*5150CX*/
default: printk("xd_dtc_init_controller: unsupported BIOS address %p\n",address);
- xd_iobase = 0x320; break;
+ break;
}
- xd_irq = 5; /* the IRQ _can_ be changed on this card, but requires a hardware mod */
- xd_dma = 3;
xd_maxsectors = 0x01; /* my card seems to have trouble doing multi-block transfers? */
outb(0,XD_RESET); /* reset the controller */
}
+
+static void xd_dtc5150cx_init_drive (u_char drive)
+{
+ /* values from controller's BIOS - BIOS chip may be removed */
+ static u_short geometry_table[][4] = {
+ {0x200,8,0x200,0x100},
+ {0x267,2,0x267,0x267},
+ {0x264,4,0x264,0x80},
+ {0x132,4,0x132,0x0},
+ {0x132,2,0x80, 0x132},
+ {0x177,8,0x177,0x0},
+ {0x132,8,0x84, 0x0},
+ {}, /* not used */
+ {0x132,6,0x80, 0x100},
+ {0x200,6,0x100,0x100},
+ {0x264,2,0x264,0x80},
+ {0x280,4,0x280,0x100},
+ {0x2B9,3,0x2B9,0x2B9},
+ {0x2B9,5,0x2B9,0x2B9},
+ {0x280,6,0x280,0x100},
+ {0x132,4,0x132,0x0}};
+ u_char n;
+
+ n = inb(XD_JUMPER);
+ n = (drive ? n : (n >> 2)) & 0x33;
+ n = (n | (n >> 2)) & 0x0F;
+ if (xd_geo[3*drive])
+ xd_manual_geo_set(drive);
+ else
+ if (n != 7) {
+ xd_info[drive].heads = (u_char)(geometry_table[n][1]); /* heads */
+ xd_info[drive].cylinders = geometry_table[n][0]; /* cylinders */
+ xd_info[drive].sectors = 17; /* sectors */
+#if 0
+ xd_info[drive].rwrite = geometry_table[n][2]; /* reduced write */
+ xd_info[drive].precomp = geometry_table[n][3] /* write precomp */
+ xd_info[drive].ecc = 0x0B; /* ecc length */
+#endif /* 0 */
+ }
+ else {
+ printk("xd%c: undetermined drive geometry\n",'a'+drive);
+ return;
+ }
+ xd_info[drive].control = 5; /* control byte */
+ xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B);
+ xd_recalibrate(drive);
+}
+
static void xd_dtc_init_drive (u_char drive)
{
u_char cmdblk[6],buf[64];
xd_info[drive].heads = buf[0x0A]; /* heads */
xd_info[drive].cylinders = ((u_short *) (buf))[0x04]; /* cylinders */
xd_info[drive].sectors = 17; /* sectors */
+ if (xd_geo[3*drive])
+ xd_manual_geo_set(drive);
#if 0
xd_info[drive].rwrite = ((u_short *) (buf + 1))[0x05]; /* reduced write */
xd_info[drive].precomp = ((u_short *) (buf + 1))[0x06]; /* write precomp */
static void xd_wd_init_controller (u_char *address)
{
switch ((u_long) address) {
- case 0xC8000: xd_iobase = 0x320; break;
+ case 0x00000:
+ case 0xC8000: break; /*initial: 0x320 */
case 0xCA000: xd_iobase = 0x324; break;
case 0xCC000: xd_iobase = 0x328; break;
case 0xCE000: xd_iobase = 0x32C; break;
case 0xD0000: xd_iobase = 0x328; break;
case 0xD8000: xd_iobase = 0x32C; break;
default: printk("xd_wd_init_controller: unsupported BIOS address %p\n",address);
- xd_iobase = 0x320; break;
+ break;
}
- xd_irq = 5; /* don't know how to auto-detect this yet */
- xd_dma = 3;
xd_maxsectors = 0x01; /* this one doesn't wrap properly either... */
- /* outb(0,XD_RESET); */ /* reset the controller */
+ outb(0,XD_RESET); /* reset the controller */
+
+ xd_timer.expires = jiffies + XD_INIT_DISK_DELAY;
+ add_timer(&xd_timer);
+ sleep_on(&xdc_wait);
}
static void xd_wd_init_drive (u_char drive)
{
+ /* values from controller's BIOS - BIOS may be disabled */
+ static u_short geometry_table[][4] = {
+ {0x264,4,0x1C2,0x1C2}, /* common part */
+ {0x132,4,0x099,0x0},
+ {0x267,2,0x1C2,0x1C2},
+ {0x267,4,0x1C2,0x1C2},
+
+ {0x334,6,0x335,0x335}, /* 1004 series RLL */
+ {0x30E,4,0x30F,0x3DC},
+ {0x30E,2,0x30F,0x30F},
+ {0x267,4,0x268,0x268},
+
+ {0x3D5,5,0x3D6,0x3D6}, /* 1002 series RLL */
+ {0x3DB,7,0x3DC,0x3DC},
+ {0x264,4,0x265,0x265},
+ {0x267,4,0x268,0x268}};
+
u_char cmdblk[6],buf[0x200];
+ u_char n = 0,rll,jumper_state,use_jumper_geo;
+ u_char wd_1002 = (xd_sigs[xd_type].string[7] == '6');
+
+ jumper_state = ~(inb(0x322));
+ if (jumper_state & 0x40)
+ xd_irq = 9;
+ rll = (jumper_state & 0x30) ? (0x04 << wd_1002) : 0;
xd_build(cmdblk,CMD_READ,drive,0,0,0,1,0);
if (!xd_command(cmdblk,PIO_MODE,buf,0,0,XD_TIMEOUT * 2)) {
xd_info[drive].heads = buf[0x1AF]; /* heads */
xd_info[drive].cylinders = ((u_short *) (buf + 1))[0xD6]; /* cylinders */
xd_info[drive].sectors = 17; /* sectors */
+ if (xd_geo[3*drive])
+ xd_manual_geo_set(drive);
#if 0
xd_info[drive].rwrite = ((u_short *) (buf))[0xD8]; /* reduced write */
xd_info[drive].wprecomp = ((u_short *) (buf))[0xDA]; /* write precomp */
#endif /* 0 */
xd_info[drive].control = buf[0x1B5]; /* control byte */
- xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,((u_short *) (buf))[0xD8],((u_short *) (buf))[0xDA],buf[0x1B4]);
+ use_jumper_geo = !(xd_info[drive].heads) || !(xd_info[drive].cylinders);
+ if (xd_geo[3*drive]) {
+ xd_manual_geo_set(drive);
+ xd_info[drive].control = rll ? 7 : 5;
+ }
+ else if (use_jumper_geo) {
+ n = (((jumper_state & 0x0F) >> (drive << 1)) & 0x03) | rll;
+ xd_info[drive].cylinders = geometry_table[n][0];
+ xd_info[drive].heads = (u_char)(geometry_table[n][1]);
+ xd_info[drive].control = rll ? 7 : 5;
+#if 0
+ xd_info[drive].rwrite = geometry_table[n][2];
+ xd_info[drive].wprecomp = geometry_table[n][3];
+ xd_info[drive].ecc = 0x0B;
+#endif /* 0 */
+ }
+ if (!wd_1002)
+ if (use_jumper_geo)
+ xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,
+ geometry_table[n][2],geometry_table[n][3],0x0B);
+ else
+ xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,
+ ((u_short *) (buf))[0xD8],((u_short *) (buf))[0xDA],buf[0x1B4]);
+ /* 1002 based RLL controler requests converted adressing, but reports physical
+ (physical 26 sec., logical 17 sec.)
+ 1004 based ???? */
+ if (rll & wd_1002) {
+ if ((xd_info[drive].cylinders *= 26,
+ xd_info[drive].cylinders /= 17) > 1023)
+ xd_info[drive].cylinders = 1023; /* 1024 ? */
+#if 0
+ xd_info[drive].rwrite *= 26;
+ xd_info[drive].rwrite /= 17;
+ xd_info[drive].wprecomp *= 26
+ xd_info[drive].wprecomp /= 17;
+#endif /* 0 */
+ }
}
else
printk("xd_wd_init_drive: error reading geometry for drive %d\n",drive);
static void xd_seagate_init_controller (u_char *address)
{
switch ((u_long) address) {
- case 0xC8000: xd_iobase = 0x320; break;
+ case 0x00000:
+ case 0xC8000: break; /*initial: 0x320 */
case 0xD0000: xd_iobase = 0x324; break;
case 0xD8000: xd_iobase = 0x328; break;
case 0xE0000: xd_iobase = 0x32C; break;
default: printk("xd_seagate_init_controller: unsupported BIOS address %p\n",address);
- xd_iobase = 0x320; break;
+ break;
}
- xd_irq = 5; /* the IRQ and DMA channel are fixed on the Seagate controllers */
- xd_dma = 3;
xd_maxsectors = 0x40;
outb(0,XD_RESET); /* reset the controller */
static void xd_omti_init_controller (u_char *address)
{
switch ((u_long) address) {
- case 0xC8000: xd_iobase = 0x320; break;
+ case 0x00000:
+ case 0xC8000: break; /*initial: 0x320 */
case 0xD0000: xd_iobase = 0x324; break;
case 0xD8000: xd_iobase = 0x328; break;
case 0xE0000: xd_iobase = 0x32C; break;
default: printk("xd_omti_init_controller: unsupported BIOS address %p\n",address);
- xd_iobase = 0x320; break;
+ break;
}
- xd_irq = 5; /* the IRQ and DMA channel are fixed on the Omti controllers */
- xd_dma = 3;
xd_maxsectors = 0x40;
outb(0,XD_RESET); /* reset the controller */
xd_info[drive].control = 2;
}
+/* Xebec support (AK) */
+static void xd_xebec_init_controller (u_char *address)
+{
+/* iobase may be set manually in range 0x300 - 0x33C
+ irq may be set manually to 2(9),3,4,5,6,7
+ dma may be set manually to 1,2,3
+ (How to detect them ???)
+BIOS address may be set manually in range 0x0 - 0xF8000
+If you need non-standard settings use the xd=... command */
+
+ switch ((u_long) address) {
+ case 0x00000:
+ case 0xC8000: /* initially: xd_iobase==0x320 */
+ case 0xD0000:
+ case 0xD2000:
+ case 0xD4000:
+ case 0xD6000:
+ case 0xD8000:
+ case 0xDA000:
+ case 0xDC000:
+ case 0xDE000:
+ case 0xE0000: break;
+ default: printk("xd_xebec_init_controller: unsupported BIOS address %p\n",address);
+ break;
+ }
+
+ xd_maxsectors = 0x01;
+ outb(0,XD_RESET); /* reset the controller */
+
+ xd_timer.expires = jiffies + XD_INIT_DISK_DELAY;
+ add_timer(&xd_timer);
+ sleep_on(&xdc_wait);
+}
+
+static void xd_xebec_init_drive (u_char drive)
+{
+ /* values from controller's BIOS - BIOS chip may be removed */
+ static u_short geometry_table[][5] = {
+ {0x132,4,0x080,0x080,0x7},
+ {0x132,4,0x080,0x080,0x17},
+ {0x264,2,0x100,0x100,0x7},
+ {0x264,2,0x100,0x100,0x17},
+ {0x132,8,0x080,0x080,0x7},
+ {0x132,8,0x080,0x080,0x17},
+ {0x264,4,0x100,0x100,0x6},
+ {0x264,4,0x100,0x100,0x17},
+ {0x2BC,5,0x2BC,0x12C,0x6},
+ {0x3A5,4,0x3A5,0x3A5,0x7},
+ {0x26C,6,0x26C,0x26C,0x7},
+ {0x200,8,0x200,0x100,0x17},
+ {0x400,5,0x400,0x400,0x7},
+ {0x400,6,0x400,0x400,0x7},
+ {0x264,8,0x264,0x200,0x17},
+ {0x33E,7,0x33E,0x200,0x7}};
+ u_char n;
+
+ n = inb(XD_JUMPER) & 0x0F; /* BIOS's drive number: same geometry
+ is assumed for BOTH drives */
+ if (xd_geo[3*drive])
+ xd_manual_geo_set(drive);
+ else {
+ xd_info[drive].heads = (u_char)(geometry_table[n][1]); /* heads */
+ xd_info[drive].cylinders = geometry_table[n][0]; /* cylinders */
+ xd_info[drive].sectors = 17; /* sectors */
+#if 0
+ xd_info[drive].rwrite = geometry_table[n][2]; /* reduced write */
+ xd_info[drive].precomp = geometry_table[n][3] /* write precomp */
+ xd_info[drive].ecc = 0x0B; /* ecc length */
+#endif /* 0 */
+ }
+ xd_info[drive].control = geometry_table[n][4]; /* control byte */
+ xd_setparam(CMD_XBSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B);
+ xd_recalibrate(drive);
+}
+
/* xd_override_init_drive: this finds disk geometry in a "binary search" style, narrowing in on the "correct" number of heads
etc. by trying values until it gets the highest successful value. Idea courtesy Salvador Abreu (spa@fct.unl.pt). */
static void xd_override_init_drive (u_char drive)
u_short min[] = { 0,0,0 },max[] = { 16,1024,64 },test[] = { 0,0,0 };
u_char cmdblk[6],i;
- for (i = 0; i < 3; i++) {
- while (min[i] != max[i] - 1) {
- test[i] = (min[i] + max[i]) / 2;
- xd_build(cmdblk,CMD_SEEK,drive,(u_char) test[0],(u_short) test[1],(u_char) test[2],0,0);
- if (!xd_command(cmdblk,PIO_MODE,0,0,0,XD_TIMEOUT * 2))
- min[i] = test[i];
- else
- max[i] = test[i];
+ if (xd_geo[3*drive])
+ xd_manual_geo_set(drive);
+ else {
+ for (i = 0; i < 3; i++) {
+ while (min[i] != max[i] - 1) {
+ test[i] = (min[i] + max[i]) / 2;
+ xd_build(cmdblk,CMD_SEEK,drive,(u_char) test[0],(u_short) test[1],(u_char) test[2],0,0);
+ if (!xd_command(cmdblk,PIO_MODE,0,0,0,XD_TIMEOUT * 2))
+ min[i] = test[i];
+ else
+ max[i] = test[i];
+ }
+ test[i] = min[i];
}
- test[i] = min[i];
+ xd_info[drive].heads = (u_char) min[0] + 1;
+ xd_info[drive].cylinders = (u_short) min[1] + 1;
+ xd_info[drive].sectors = (u_char) min[2] + 1;
}
- xd_info[drive].heads = (u_char) min[0] + 1;
- xd_info[drive].cylinders = (u_short) min[1] + 1;
- xd_info[drive].sectors = (u_char) min[2] + 1;
xd_info[drive].control = 0;
}
-/* xd_setup: initialise from command line parameters */
+/* xd_setup: initialise controler from command line parameters */
void xd_setup (char *command,int *integers)
{
- xd_override = 1;
-
- xd_type = integers[1];
- xd_irq = integers[2];
- xd_iobase = integers[3];
- xd_dma = integers[4];
-
+ switch (integers[0]) {
+ case 4: if (integers[4] < 0)
+ nodma = 1;
+ else if (integers[4] < 8)
+ xd_dma = integers[4];
+ case 3: if ((integers[3] > 0) && (integers[3] <= 0x3FC))
+ xd_iobase = integers[3];
+ case 2: if ((integers[2] > 0) && (integers[2] < 16))
+ xd_irq = integers[2];
+ case 1: xd_override = 1;
+ if ((integers[1] >= 0) && (integers[1] < (sizeof(xd_sigs) / sizeof(xd_sigs[0]))))
+ xd_type = integers[1];
+ case 0: break;
+ default:printk("xd: too many parameters for xd\n");
+ }
xd_maxsectors = 0x01;
}
+#ifndef MODULE
+/* xd_manual_geo_init: initialise drive geometry from command line parameters
+ (used only for WD drives) */
+void xd_manual_geo_init (char *command,int *integers)
+{
+ int i;
+ if (integers[0]%3 != 0) {
+ printk("xd: incorrect number of parameters for xd_geo\n");
+ return;
+ }
+ for (i = 0; (i < integers[0]) && (i < 3*XD_MAXDRIVES); i++)
+ xd_geo[i] = integers[i+1];
+}
+#endif /* MODULE */
+
/* xd_setparam: set the drive characteristics */
static void xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc)
{
cmdblk[12] = (u_char) (wprecomp & 0xFF);
cmdblk[13] = ecc;
- if (xd_command(cmdblk,PIO_MODE,0,0,0,XD_TIMEOUT * 2))
+ /* Some controllers require geometry info as data, not command */
+
+ if (xd_command(cmdblk,PIO_MODE,0,&cmdblk[6],0,XD_TIMEOUT * 2))
printk("xd_setparam: error setting characteristics for drive %d\n",drive);
}
#ifdef MODULE
+static int xd[5] = { -1,-1,-1,-1, };
+
+static void xd_done (void)
+{
+ struct gendisk ** gdp;
+
+ blksize_size[MAJOR_NR] = NULL;
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ blk_size[MAJOR_NR] = NULL;
+ hardsect_size[MAJOR_NR] = NULL;
+ read_ahead[MAJOR_NR] = 0;
+ for (gdp = &gendisk_head; *gdp; gdp = &((*gdp)->next))
+ if (*gdp == &xd_gendisk)
+ break;
+ if (*gdp)
+ *gdp = (*gdp)->next;
+ release_region(xd_iobase,4);
+}
+
int init_module(void)
{
+ int i,count = 0;
int error = xd_init();
+
if (!error)
{
printk(KERN_INFO "XD: Loaded as a module.\n");
+ for (i = 4; i > 0; i--)
+ if(((xd[i] = xd[i-1]) >= 0) && !count)
+ count = i;
+ if((xd[0] = count));
+ xd_setup(NULL, xd);
xd_geninit(&(struct gendisk) { 0,0,0,0,0,0,0,0,0,0,0 });
+ if (!xd_drives) {
+ /* no drives detected - unload module */
+ unregister_blkdev(MAJOR_NR, "xd");
+ xd_done();
+ return (-1);
+ }
+ for (i = 0; i < xd_drives; i++)
+ resetup_one_dev(&xd_gendisk, i);
}
return error;
void cleanup_module(void)
{
+ int partition,dev,start;
+
unregister_blkdev(MAJOR_NR, "xd");
- free_irq(xd_irq, NULL);
- free_dma(xd_dma);
+ for (dev = 0; dev < xd_drives; dev++) {
+ start = dev << xd_gendisk.minor_shift;
+ for (partition = xd_gendisk.max_p - 1; partition >= 0; partition--) {
+ int minor = (start | partition);
+ kdev_t devp = MKDEV(MAJOR_NR, minor);
+ start = dev << xd_gendisk.minor_shift;
+ sync_dev(devp);
+ invalidate_buffers(devp);
+ }
+ }
+ xd_done();
+ if (xd_drives) {
+ free_irq(xd_irq, NULL);
+ free_dma(xd_dma);
+ if (xd_dma_buffer)
+ xd_dma_mem_free((unsigned long)xd_dma_buffer, xd_maxsectors * 0x200);
+ }
}
#endif /* MODULE */
-
/* We need our own cdrom error types! This is a temporary solution. */
+#ifndef ENOMEDIUM
#define ENOMEDIUM EAGAIN /* no medium in removable device */
+#endif
/* We use the open-option O_NONBLOCK to indicate that the
* purpose of opening is only for subsequent ioctl() calls; no device
static int memory_open(struct inode * inode, struct file * filp)
{
+ switch (MINOR(inode->i_rdev)) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ if(securelevel>0)
+ return -EPERM;
+ }
switch (MINOR(inode->i_rdev)) {
case 0:
filp->f_op = &ram_fops;
/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */
/*
- Written 1996-1997 by Donald Becker.
+ Written 1996-1998 by Donald Becker.
This software may be used and distributed according to the terms
of the GNU Public License, incorporated herein by reference.
*/
static char *version =
-"3c59x.c:v0.46C 10/14/97 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/vortex.html\n";
+"3c59x.c:v0.49 1/2/98 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/vortex.html\n";
/* "Knobs" that adjust features and parameters. */
/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
#endif
#define virt_to_bus(addr) ((unsigned long)addr)
#define bus_to_virt(addr) ((void*)addr)
+#define NR_IRQS 16
#else /* 1.3.0 and later */
#define RUN_AT(x) (jiffies + (x))
-#define DEV_ALLOC_SKB(len) dev_alloc_skb(len + 2)
+#define DEV_ALLOC_SKB(len) dev_alloc_skb(len)
#endif
#ifdef SA_SHIRQ
#define udelay(microsec) do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
#endif
-#if (LINUX_VERSION_CODE < 0x20123)
+#if LINUX_VERSION_CODE < 0x20115
#define test_and_set_bit(val, addr) set_bit(val, addr)
-#else
+#elif defined(MODULE)
MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
MODULE_DESCRIPTION("3Com 3c590/3c900 series Vortex/Boomerang driver");
MODULE_PARM(debug, "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(compaq_ioaddr, "i");
+MODULE_PARM(compaq_irq, "i");
+MODULE_PARM(compaq_prod_id, "i");
#endif
/* "Knobs" for adjusting internal parameters. */
static int vortex_debug = 1;
#endif
-/* Set iff a MII transceiver on any interface requires mdio preamble. */
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with the original DP83840 on older 3c905 boards, so the extra
+ code size of a per-interface flag is not worthwhile. */
static char mii_preamble_required = 0;
-/* Caution! These entries must be consistent, with the EISA ones last. */
+/* Caution! These entries must be consistent. */
static const int product_ids[] = {
- 0x5900, 0x5950, 0x5951, 0x5952, 0x9000, 0x9001, 0x9050, 0x9051, 0, 0};
+ 0x5900, 0x5920, 0x5970, 0x5950, 0x5951, 0x5952, 0x9000, 0x9001,
+ 0x9050, 0x9051, 0x9055, 0 };
static const char *product_names[] = {
"3c590 Vortex 10Mbps",
+ "3c592 EISA 10mbps Demon/Vortex",
+ "3c597 EISA Fast Demon/Vortex",
"3c595 Vortex 100baseTX",
"3c595 Vortex 100baseT4",
"3c595 Vortex 100base-MII",
"3c900 Boomerang 10Mbps/Combo",
"3c905 Boomerang 100baseTx",
"3c905 Boomerang 100baseT4",
- "3c592 EISA 10mbps Demon/Vortex",
- "3c597 EISA Fast Demon/Vortex",
+ "3c905B Cyclone 100baseTx",
};
-#define DEMON10_INDEX 8
-#define DEMON100_INDEX 9
/*
Theory of Operation
/* Bits in the general status register. */
enum vortex_status {
- IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
IntReq = 0x0040, StatsFull = 0x0080,
DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
struct boom_rx_desc {
u32 next; /* Last entry points to 0. */
s32 status;
- u32 addr; /* Up to addr/len possible.. */
- s32 length; /* set high bit to indicate last pair. */
+ u32 addr; /* Up to 63 addr/len pairs possible. */
+ s32 length; /* Set LAST_FRAG to indicate last pair. */
};
/* Values for the Rx status entry. */
enum rx_desc_status {
TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */
};
+/* Chip features we care about in vp->capabilities, read from the EEPROM. */
+enum ChipCaps { CapBusMaster=0x20 };
+
struct vortex_private {
char devname[8]; /* "ethN" string, also for kernel debug. */
const char *product_name;
unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
struct enet_statistics stats;
struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
+
+ /* PCI configuration space information. */
+ u8 pci_bus, pci_dev_fn; /* PCI bus location, for power management. */
+ u16 pci_device_id;
+
+ /* The remainder are related to chip state, mostly media selection. */
struct timer_list timer; /* Media selection timer. */
int options; /* User-settable misc. driver options. */
- int last_rx_packets; /* For media autoselection. */
- unsigned int available_media:8, /* From Wn3_Options */
+ unsigned int
media_override:3, /* Passed-in media type. */
- default_media:3, /* Read from the EEPROM. */
+ default_media:3, /* Read from the EEPROM/Wn3_Config. */
full_duplex:1, autoselect:1,
bus_master:1, /* Vortex can only do a fragment bus-m. */
full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */
tx_full:1;
- u16 capabilities; /* Adapter capabilities word. */
- u16 info1, info2; /* Software information information. */
- unsigned char phys[2]; /* MII device addresses. */
+ u16 status_enable;
+ u16 available_media; /* From Wn3_Options. */
+ u16 capabilities, info1, info2; /* Various, from EEPROM. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
};
/* The action to take with a media selection timer tick.
static int vortex_scan(struct device *dev);
static struct device *vortex_found_device(struct device *dev, int ioaddr,
- int irq, int product_index,
+ int irq, int device_id,
int options, int card_idx);
static int vortex_probe1(struct device *dev);
static int vortex_open(struct device *dev);
/* A list of all installed Vortex devices, for removing the driver module. */
static struct device *root_vortex_dev = NULL;
+#ifdef MODULE
/* Variables to work-around the Compaq PCI BIOS32 problem. */
-static int compaq_ioaddr = 0, compaq_irq = 0, compaq_prod_id = 0;
+static int compaq_ioaddr = 0, compaq_irq = 0, compaq_device_id = 0x5900;
-#ifdef MODULE
static int debug = -1;
int
for (;pci_index < 0xff; pci_index++) {
unsigned char pci_irq_line, pci_latency;
- unsigned short pci_command, vendor, device;
+ unsigned short pci_command, new_command, vendor, device;
unsigned int pci_ioaddr;
- int board_index = 0;
if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8,
pci_index, &pci_bus, &pci_device_fn)
!= PCIBIOS_SUCCESSFUL)
PCI_INTERRUPT_LINE, &pci_irq_line);
pcibios_read_config_dword(pci_bus, pci_device_fn,
PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
/* Remove I/O space marker in bit 0. */
pci_ioaddr &= ~3;
if (vendor != TCOM_VENDOR_ID)
continue;
- for (board_index = 0; product_ids[board_index]; board_index++) {
- if (device == product_ids[board_index])
- break;
- }
- if (product_ids[board_index] == 0) {
- printk("Unknown 3Com PCI ethernet adapter type %4.4x detected:"
- " not configured.\n", device);
- continue;
- }
if (check_region(pci_ioaddr, VORTEX_TOTAL_SIZE))
continue;
+ /* Activate the card. */
+ new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
+ if (pci_command != new_command) {
+ printk(KERN_INFO " The PCI BIOS has not enabled this"
+ " device! Updating PCI command %4.4x->%4.4x.\n",
+ pci_command, new_command);
+ pcibios_write_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, new_command);
+ }
+
dev = vortex_found_device(dev, pci_ioaddr, pci_irq_line,
- board_index, dev && dev->mem_start
+ device, dev && dev->mem_start
? dev->mem_start : options[cards_found],
cards_found);
if (dev) {
- /* Get and check the bus-master and latency values.
- Some PCI BIOSes fail to set the master-enable bit, and
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ /* Get and check the latency values. On the 3c590 series
the latency timer must be set to the maximum value to avoid
data corruption that occurs when the timer expires during
- a transfer -- a bug in the Vortex chip. */
- pcibios_read_config_word(pci_bus, pci_device_fn,
- PCI_COMMAND, &pci_command);
- if ( ! (pci_command & PCI_COMMAND_MASTER)) {
- printk("%s: PCI Master Bit has not been set! "
- " Setting...\n", dev->name);
- pci_command |= PCI_COMMAND_MASTER;
- pcibios_write_config_word(pci_bus, pci_device_fn,
- PCI_COMMAND, pci_command);
- }
+ a transfer -- a bug in the Vortex chip only. */
+ u8 new_latency = (device&0xff00) == 0x5900 ? 248 : 32;
+ vp->pci_bus = pci_bus;
+ vp->pci_dev_fn = pci_device_fn;
+ vp->pci_device_id = device;
+
pcibios_read_config_byte(pci_bus, pci_device_fn,
PCI_LATENCY_TIMER, &pci_latency);
- if (pci_latency != 248) {
+ if (pci_latency < new_latency) {
printk("%s: Overriding PCI latency"
- " timer (CFLT) setting of %d, new value is 248.\n",
- dev->name, pci_latency);
+ " timer (CFLT) setting of %d, new value is %d.\n",
+ dev->name, pci_latency, new_latency);
pcibios_write_config_byte(pci_bus, pci_device_fn,
- PCI_LATENCY_TIMER, 248);
+ PCI_LATENCY_TIMER, new_latency);
}
dev = 0;
cards_found++;
if (EISA_bus) {
static int ioaddr = 0x1000;
for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
- int product_id, product_index;
+ int device_id;
if (check_region(ioaddr, VORTEX_TOTAL_SIZE))
continue;
/* Check the standard EISA ID register for an encoded '3Com'. */
if (inw(ioaddr + 0xC80) != 0x6d50)
continue;
/* Check for a product that we support, 3c59{2,7} any rev. */
- product_id = inw(ioaddr + 0xC82) & 0xF0FF;
- if (product_id == 0x7059) /* 597 */
- product_index = DEMON100_INDEX;
- else if (product_id == 0x2059) /* 592 */
- product_index = DEMON10_INDEX;
- else
+ device_id = (inb(ioaddr + 0xC82)<<8) + inb(ioaddr + 0xC83);
+ if ((device_id & 0xFF00) != 0x5900)
continue;
vortex_found_device(dev, ioaddr, inw(ioaddr + 0xC88) >> 12,
- product_index, dev && dev->mem_start
+ device_id, dev && dev->mem_start
? dev->mem_start : options[cards_found],
cards_found);
dev = 0;
}
}
+#ifdef MODULE
/* Special code to work-around the Compaq PCI BIOS32 problem. */
if (compaq_ioaddr) {
- vortex_found_device(dev, compaq_ioaddr, compaq_irq, compaq_prod_id,
+ vortex_found_device(dev, compaq_ioaddr, compaq_irq, compaq_device_id,
dev && dev->mem_start ? dev->mem_start
: options[cards_found], cards_found);
cards_found++;
dev = 0;
}
+#endif
- /* Finally check for a 3c515 on the ISA bus. */
- /* (3c515 support omitted on this version.) */
+ /* 3c515 cards are now supported by the 3c515.c driver. */
return cards_found;
}
static struct device *
vortex_found_device(struct device *dev, int ioaddr, int irq,
- int product_index, int options, int card_idx)
+ int device_id, int options, int card_idx)
{
struct vortex_private *vp;
+ const char *product_name;
+ int board_index = 0;
+
+ for (board_index = 0; product_ids[board_index]; board_index++) {
+ if (device_id == product_ids[board_index])
+ break;
+ }
+ /* Handle products we don't recognize, but might still work with. */
+ if (product_ids[board_index])
+ product_name = product_names[board_index];
+ else if ((device_id & 0xff00) == 0x5900)
+ product_name = "3c590 Vortex";
+ else if ((device_id & 0xfff0) == 0x9000)
+ product_name = "3c900";
+ else if ((device_id & 0xfff0) == 0x9050)
+ product_name = "3c905";
+ else {
+ printk("Unknown 3Com PCI ethernet adapter type %4.4x detected:"
+ " not configured.\n", device_id);
+ return 0;
+ }
#ifdef MODULE
/* Allocate and fill new device structure. */
- int dev_size = sizeof(struct device) +
- sizeof(struct vortex_private) + 15; /* Pad for alignment */
+ {
+ int dev_size = sizeof(struct device) +
+ sizeof(struct vortex_private) + 15; /* Pad for alignment */
- dev = (struct device *) kmalloc(dev_size, GFP_KERNEL);
- memset(dev, 0, dev_size);
+ dev = (struct device *) kmalloc(dev_size, GFP_KERNEL);
+ memset(dev, 0, dev_size);
+ }
/* Align the Rx and Tx ring entries. */
dev->priv = (void *)(((long)dev + sizeof(struct device) + 15) & ~15);
vp = (struct vortex_private *)dev->priv;
dev->base_addr = ioaddr;
dev->irq = irq;
dev->init = vortex_probe1;
- vp->product_name = product_names[product_index];
+ vp->product_name = product_name;
vp->options = options;
if (card_idx >= 0) {
if (full_duplex[card_idx] >= 0)
dev->mtu = mtu;
vp = (struct vortex_private *)dev->priv;
- vp->product_name = product_names[product_index];
+ vp->product_name = product_name;
vp->options = options;
if (options >= 0) {
vp->media_override = ((options & 7) == 2) ? 0 : options & 7;
{
int ioaddr = dev->base_addr;
struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ u16 *ether_addr = (u16 *)dev->dev_addr;
unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
int i;
- printk("%s: 3Com %s at %#3x,", dev->name,
- vp->product_name, ioaddr);
+ printk("%s: 3Com %s at %#3x,", dev->name, vp->product_name, ioaddr);
/* Read the station address from the EEPROM. */
EL3WINDOW(0);
- for (i = 0; i < 0x18; i++) {
- u16 *phys_addr = (u16 *)dev->dev_addr;
+ for (i = 0; i < 0x40; i++) {
int timer;
outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd);
/* Pause for at least 162 us. for the read to take place. */
- for (timer = 4; timer >= 0; timer--) {
+ for (timer = 10; timer >= 0; timer--) {
udelay(162);
if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
break;
}
eeprom[i] = inw(ioaddr + Wn0EepromData);
- checksum ^= eeprom[i];
- if (i >= 10 && i < 13)
- phys_addr[i - 10] = htons(inw(ioaddr + Wn0EepromData));
}
+ for (i = 0; i < 0x18; i++)
+ checksum ^= eeprom[i];
checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */
+ while (i < 0x21)
+ checksum ^= eeprom[i++];
+ checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ }
if (checksum != 0x00)
printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
+
+ for (i = 0; i < 3; i++)
+ ether_addr[i] = htons(eeprom[i + 10]);
for (i = 0; i < 6; i++)
printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
printk(", IRQ %d\n", dev->irq);
if (vortex_debug && (dev->irq <= 0 || dev->irq >= NR_IRQS))
printk(" *** Warning: this IRQ is unlikely to work! ***\n");
+ /* Extract our information from the EEPROM data. */
+ vp->info1 = eeprom[13];
+ vp->info2 = eeprom[15];
+ vp->capabilities = eeprom[16];
+
+ if (vp->info1 & 0x8000)
+ vp->full_duplex = 1;
+
{
char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
union wn3_config config;
ram_split[config.u.ram_split],
config.u.autoselect ? "autoselect/" : "",
media_tbl[config.u.xcvr].name);
- dev->if_port = config.u.xcvr;
vp->default_media = config.u.xcvr;
vp->autoselect = config.u.autoselect;
}
+
if (vp->media_override != 7) {
printk(" Media override to transceiver type %d (%s).\n",
vp->media_override, media_tbl[vp->media_override].name);
dev->if_port = vp->media_override;
- }
+ } else
+ dev->if_port = vp->default_media;
if (dev->if_port == XCVR_MII) {
int phy, phy_idx = 0;
mii_status = mdio_read(ioaddr, phy, 0);
if (mii_status != 0xffff) {
vp->phys[phy_idx++] = phy;
- printk("%s: MII transceiver found at address %d.\n",
- dev->name, phy);
+ printk(" MII transceiver found at address %d.\n", phy);
mdio_sync(ioaddr, 32);
if ((mdio_read(ioaddr, phy, 1) & 0x0040) == 0)
mii_preamble_required = 1;
}
}
if (phy_idx == 0) {
- printk("%s: ***WARNING*** No MII transceivers found!\n",
- dev->name);
- vp->phys[0] = 0;
+ printk(" ***WARNING*** No MII transceivers found!\n");
+ vp->phys[0] = 24;
+ } else {
+ vp->advertising = mdio_read(ioaddr, vp->phys[0], 4);
+ if (vp->full_duplex) {
+ /* Only advertise the FD media types. */
+ vp->advertising &= 0x015F;
+ mdio_write(ioaddr, vp->phys[0], 4, vp->advertising);
+ }
}
}
- vp->info1 = eeprom[13];
- vp->info2 = eeprom[15];
- vp->capabilities = eeprom[16];
- if (vp->capabilities & 0x20) {
- vp->full_bus_master_tx = 1;
- printk(" Enabling bus-master transmits and %s receives.\n",
- (vp->info2 & 1) ? "early" : "whole-frame" );
- vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
+ if (vp->capabilities & CapBusMaster) {
+ vp->full_bus_master_tx = 1;
+ printk(" Enabling bus-master transmits and %s receives.\n",
+ (vp->info2 & 1) ? "early" : "whole-frame" );
+ vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
}
/* We do a request_region() to register /proc/ioports info. */
return 0;
}
-\f
-/* Read and write the MII registers using software-generated serial
- MDIO protocol. The maxium data clock rate is 2.5 Mhz. */
-#define mdio_delay() udelay(1)
-
-#define MDIO_SHIFT_CLK 0x01
-#define MDIO_DIR_WRITE 0x04
-#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
-#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
-#define MDIO_DATA_READ 0x02
-#define MDIO_ENB_IN 0x00
-
-static void mdio_sync(int ioaddr, int bits)
-{
- int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
-
- /* Establish sync by sending at least 32 logic ones. */
- while (-- bits >= 0) {
- outw(MDIO_DATA_WRITE1, mdio_addr);
- mdio_delay();
- outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
- mdio_delay();
- }
-}
-static int mdio_read(int ioaddr, int phy_id, int location)
-{
- int i;
- int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
- unsigned int retval = 0;
- int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
-
- if (mii_preamble_required)
- mdio_sync(ioaddr, 32);
-
- /* Shift the read command bits out. */
- for (i = 14; i >= 0; i--) {
- int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
- outw(dataval, mdio_addr);
- mdio_delay();
- outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
- mdio_delay();
- }
- /* Read the two transition, 16 data, and wire-idle bits. */
- for (i = 19; i > 0; i--) {
- outw(MDIO_ENB_IN, mdio_addr);
- mdio_delay();
- retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
- outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
- mdio_delay();
- }
- return retval>>1 & 0xffff;
-}
-
-static void mdio_write(int ioaddr, int phy_id, int location, int value)
-{
- int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
- int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
- int i;
-
- if (mii_preamble_required)
- mdio_sync(ioaddr, 32);
-
- /* Shift the command bits out. */
- for (i = 31; i >= 0; i--) {
- int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
- outw(dataval, mdio_addr);
- mdio_delay();
- outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
- mdio_delay();
- }
- /* Leave the interface idle. */
- for (i = 1; i >= 0; i--) {
- outw(MDIO_ENB_IN, mdio_addr);
- mdio_delay();
- outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
- mdio_delay();
- }
-
- return;
-}
\f
static int
vp->rx_ring[i].next = virt_to_bus(&vp->rx_ring[i+1]);
vp->rx_ring[i].status = 0; /* Clear complete bit. */
vp->rx_ring[i].length = PKT_BUF_SZ | LAST_FRAG;
- skb = dev_alloc_skb(PKT_BUF_SZ);
+ skb = DEV_ALLOC_SKB(PKT_BUF_SZ);
vp->rx_skbuff[i] = skb;
if (skb == NULL)
break; /* Bad news! */
skb->dev = dev; /* Mark as being used by this device. */
+#if LINUX_VERSION_CODE >= 0x10300
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
vp->rx_ring[i].addr = virt_to_bus(skb->tail);
+#else
+ vp->rx_ring[i].addr = virt_to_bus(skb->data);
+#endif
}
vp->rx_ring[i-1].next = virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */
outl(virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
/* Allow status bits to be seen. */
- outw(SetStatusEnb | AdapterFailure|IntReq|StatsFull|TxComplete|
- (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
- (vp->full_bus_master_rx ? UpComplete : RxComplete) |
- (vp->bus_master ? DMADone : 0),
- ioaddr + EL3_CMD);
+ vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
+ (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
+ (vp->full_bus_master_rx ? UpComplete : RxComplete) |
+ (vp->bus_master ? DMADone : 0);
+ outw(vp->status_enable, ioaddr + EL3_CMD);
/* Ack all pending events, and set active indicator mask. */
outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
ioaddr + EL3_CMD);
outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
- | AdapterFailure | TxComplete
+ | HostError | TxComplete
| (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete,
ioaddr + EL3_CMD);
#ifdef notdef
if (vp->full_bus_master_rx) {
printk(" Switching to non-bus-master receives.\n");
- outw(SetStatusEnb | AdapterFailure|IntReq|StatsFull |
+ outw(SetStatusEnb | HostError|IntReq|StatsFull |
(vp->full_bus_master_tx ? DownComplete : TxAvailable) |
RxComplete | (vp->bus_master ? DMADone : 0),
ioaddr + EL3_CMD);
/* End of Michael Sievers <sieversm@mail.desy.de> changes. */
}
-static int
-vortex_start_xmit(struct sk_buff *skb, struct device *dev)
+/*
+ * Handle uncommon interrupt sources. This is a seperate routine to minimize
+ * the cache impact.
+ */
+static void
+vortex_error(struct device *dev, int status)
{
struct vortex_private *vp = (struct vortex_private *)dev->priv;
int ioaddr = dev->base_addr;
-#ifndef final_version
- if (skb == NULL || skb->len <= 0) {
- printk("%s: Obsolete driver layer request made: skbuff==NULL.\n",
- dev->name);
- dev_tint(dev);
- return 0;
+ if (status & TxComplete) { /* Really "TxError" for us. */
+ unsigned char tx_status = inb(ioaddr + TxStatus);
+ /* Presumably a tx-timeout. We must merely re-enable. */
+ if (vortex_debug > 2
+ || (tx_status != 0x88 && vortex_debug > 0))
+ printk("%s: Transmit error, Tx status register %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ outb(0, ioaddr + TxStatus);
+ outw(TxEnable, ioaddr + EL3_CMD);
}
-#endif
+ if (status & RxEarly) { /* Rx early is unused. */
+ vortex_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & StatsFull) { /* Empty statistics. */
+ static int DoneDidThat = 0;
+ if (vortex_debug > 4)
+ printk("%s: Updating stats.\n", dev->name);
+ update_stats(ioaddr, dev);
+ /* DEBUG HACK: Disable statistics as an interrupt source. */
+ /* This occurs when we have the wrong media type! */
+ if (DoneDidThat == 0 &&
+ inw(ioaddr + EL3_STATUS) & StatsFull) {
+ int win, reg;
+ printk("%s: Updating stats failed, disabling stats as an"
+ " interrupt source.\n", dev->name);
+ for (win = 0; win < 8; win++) {
+ EL3WINDOW(win);
+ printk("\n Vortex window %d:", win);
+ for (reg = 0; reg < 16; reg++)
+ printk(" %2.2x", inb(ioaddr+reg));
+ }
+ EL3WINDOW(7);
+ outw(SetIntrEnb | TxAvailable | RxComplete | HostError
+ | UpComplete | DownComplete | TxComplete,
+ ioaddr + EL3_CMD);
+ DoneDidThat++;
+ }
+ }
+ if (status & IntReq)
+ outw(ioaddr + EL3_CMD, vp->status_enable);
+ if (status & HostError) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + Wn4_FIFODiag);
+ if (vortex_debug > 0)
+ printk("%s: Host error, FIFO diagnostic register %4.4x.\n",
+ dev->name, fifo_diag);
+ /* Adapter failure requires Tx/Rx reset and reinit. */
+ if (vp->full_bus_master_tx) {
+ int j;
+ outw(TotalReset | 0xff, ioaddr + EL3_CMD);
+ for (j = 200; j >= 0 ; j--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ /* Re-enable the receiver. */
+ outw(RxEnable, ioaddr + EL3_CMD);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ } else if (fifo_diag & 0x0400) {
+ int j;
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (j = 20; j >= 0 ; j--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ if (fifo_diag & 0x2000) {
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Set the Rx filter to the current state. */
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ outw(AckIntr | HostError, ioaddr + EL3_CMD);
+ }
+ }
+}
+
+
+static int
+vortex_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int ioaddr = dev->base_addr;
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
if (jiffies - dev->trans_start >= TX_TIMEOUT)
/* Clear the Tx status stack. */
{
short tx_status;
- int i = 4;
+ int i = 32;
while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
struct vortex_private *vp = (struct vortex_private *)dev->priv;
int ioaddr = dev->base_addr;
-#ifndef final_version
- if (skb == NULL || skb->len <= 0) {
- printk("%s: Obsolete driver layer request made: skbuff==NULL.\n",
- dev->name);
- dev_tint(dev);
- return 0;
- }
-#endif
-
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
if (jiffies - dev->trans_start >= TX_TIMEOUT)
vortex_tx_timeout(dev);
struct vortex_private *lp;
int ioaddr, status;
int latency;
- int i = max_interrupt_work;
+ int work_done = max_interrupt_work;
- if (dev->interrupt)
- printk("%s: Re-entering the interrupt handler.\n", dev->name);
- dev->interrupt = 1;
+ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+ printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
+ return;
+ }
ioaddr = dev->base_addr;
latency = inb(ioaddr + Timer);
if (vortex_debug > 4)
printk("%s: interrupt, status %4.4x, timer %d.\n", dev->name,
status, latency);
-#ifdef notdef
- /* This code guard against bogus hangs, but fails with shared IRQs. */
- if ((status & ~0xE000) == 0x0000) {
- static int donedidthis=0;
- /* Some interrupt controllers store a bogus interrupt from boot-time.
- Ignore a single early interrupt, but don't hang the machine for
- other interrupt problems. */
- if (donedidthis++ > 100) {
- printk("%s: Bogus interrupt, bailing. Status %4.4x, start=%d.\n",
- dev->name, status, dev->start);
- FREE_IRQ(dev->irq, dev);
- }
- }
-#endif
-
do {
if (vortex_debug > 5)
printk("%s: In interrupt loop, status %4.4x.\n",
dev->name, status);
if (status & RxComplete)
vortex_rx(dev);
+ if (status & UpComplete) {
+ outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
+ boomerang_rx(dev);
+ }
if (status & TxAvailable) {
if (vortex_debug > 5)
dev->tbusy = 0;
mark_bh(NET_BH);
}
- if (status & TxComplete) { /* Really "TxError" for us. */
- unsigned char tx_status = inb(ioaddr + TxStatus);
- /* Presumably a tx-timeout. We must merely re-enable. */
- if (vortex_debug > 2
- || (tx_status != 0x88 && vortex_debug > 0))
- printk("%s: Transmit error, Tx status register %2.2x.\n",
- dev->name, tx_status);
- if (tx_status & 0x04) lp->stats.tx_fifo_errors++;
- if (tx_status & 0x38) lp->stats.tx_aborted_errors++;
- outb(0, ioaddr + TxStatus);
- outw(TxEnable, ioaddr + EL3_CMD);
- }
+
if (status & DownComplete) {
unsigned int dirty_tx = lp->dirty_tx;
mark_bh(NET_BH);
}
#endif
- if (status & UpComplete) {
- boomerang_rx(dev);
- outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
- }
- if (status & (AdapterFailure | RxEarly | StatsFull)) {
- /* Handle all uncommon interrupts at once. */
- if (status & RxEarly) { /* Rx early is unused. */
- vortex_rx(dev);
- outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
- }
- if (status & StatsFull) { /* Empty statistics. */
- static int DoneDidThat = 0;
- if (vortex_debug > 4)
- printk("%s: Updating stats.\n", dev->name);
- update_stats(ioaddr, dev);
- /* DEBUG HACK: Disable statistics as an interrupt source. */
- /* This occurs when we have the wrong media type! */
- if (DoneDidThat == 0 &&
- inw(ioaddr + EL3_STATUS) & StatsFull) {
- int win, reg;
- printk("%s: Updating stats failed, disabling stats as an"
- " interrupt source.\n", dev->name);
- for (win = 0; win < 8; win++) {
- EL3WINDOW(win);
- printk("\n Vortex window %d:", win);
- for (reg = 0; reg < 16; reg++)
- printk(" %2.2x", inb(ioaddr+reg));
- }
- EL3WINDOW(7);
- outw(SetIntrEnb | TxAvailable | RxComplete | AdapterFailure
- | UpComplete | DownComplete | TxComplete,
- ioaddr + EL3_CMD);
- DoneDidThat++;
- }
- }
- if (status & AdapterFailure) {
- u16 fifo_diag;
- EL3WINDOW(4);
- fifo_diag = inw(ioaddr + Wn4_FIFODiag);
- if (vortex_debug > 0)
- printk("%s: Host error, FIFO diagnostic register %4.4x.\n",
- dev->name, fifo_diag);
- /* Adapter failure requires Tx/Rx reset and reinit. */
- if (fifo_diag & 0x0400) {
- int j;
- outw(TxReset, ioaddr + EL3_CMD);
- for (j = 20; j >= 0 ; j--)
- if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
- break;
- outw(TxEnable, ioaddr + EL3_CMD);
- }
- if (fifo_diag & 0x2000) {
- outw(RxReset, ioaddr + EL3_CMD);
- /* Set the Rx filter to the current state. */
- set_rx_mode(dev);
- outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
- outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
- }
+ /* Check for all uncommon interrupts at once. */
+ if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
+ vortex_error(dev, status);
+
+ if (--work_done < 0) {
+ if ((status & (0x7fe - (UpComplete | DownComplete))) == 0) {
+ /* Just ack these and return. */
+ outw(AckIntr | UpComplete | DownComplete, ioaddr + EL3_CMD);
+ } else {
+ printk("%s: Too much work in interrupt, status %4.4x. "
+ "Temporarily disabling functions (%4.4x).\n",
+ dev->name, status, SetStatusEnb | ((~status) & 0x7FE));
+ /* Disable all pending interrupts. */
+ outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD);
+ outw(AckIntr | 0x7FF, ioaddr + EL3_CMD);
+ /* Set a timer to reenable interrupts. */
+
+ break;
}
}
-
- if (--i < 0) {
- printk("%s: Too much work in interrupt, status %4.4x. "
- "Disabling functions (%4.4x).\n",
- dev->name, status, SetStatusEnb | ((~status) & 0x7FE));
- /* Disable all pending interrupts. */
- outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD);
- outw(AckIntr | 0x7FF, ioaddr + EL3_CMD);
- break;
- }
/* Acknowledge the IRQ. */
outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
int entry = vp->cur_rx % RX_RING_SIZE;
int ioaddr = dev->base_addr;
int rx_status;
+ int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
if (vortex_debug > 5)
printk(" In boomerang_rx(), status %4.4x, rx_status %4.4x.\n",
if (pkt_len < rx_copybreak
&& (skb = DEV_ALLOC_SKB(pkt_len + 2)) != 0) {
skb->dev = dev;
+#if LINUX_VERSION_CODE >= 0x10300
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
memcpy(skb_put(skb, pkt_len),
bus_to_virt(vp->rx_ring[entry].addr),
pkt_len);
+#else
+ memcpy(skb->data, bus_to_virt(vp->rx_ring[entry].addr), pkt_len);
+ skb->len = pkt_len;
+#endif
rx_copy++;
} else{
void *temp;
vp->stats.rx_packets++;
}
entry = (++vp->cur_rx) % RX_RING_SIZE;
+ if (--rx_work_limit < 0)
+ break;
}
/* Refill the Rx ring buffers. */
for (; vp->dirty_rx < vp->cur_rx; vp->dirty_rx++) {
struct sk_buff *skb;
entry = vp->dirty_rx % RX_RING_SIZE;
if (vp->rx_skbuff[entry] == NULL) {
- skb = dev_alloc_skb(PKT_BUF_SZ);
+ skb = DEV_ALLOC_SKB(PKT_BUF_SZ);
if (skb == NULL)
break; /* Bad news! */
skb->dev = dev; /* Mark as being used by this device. */
vp->rx_skbuff[entry] = skb;
}
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
+ outw(UpUnstall, ioaddr + EL3_CMD);
}
return 0;
}
short new_mode;
if (dev->flags & IFF_PROMISC) {
- if (vortex_debug > 3)
+ if (vortex_debug > 0)
printk("%s: Setting promiscuous mode.\n", dev->name);
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
} else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
set_rx_mode(dev);
}
#endif
+
+\f
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details. */
+
+/* The maxium data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues. */
+#define mdio_delay() udelay(1)
+
+#define MDIO_SHIFT_CLK 0x01
+#define MDIO_DIR_WRITE 0x04
+#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
+#define MDIO_DATA_READ 0x02
+#define MDIO_ENB_IN 0x00
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(int ioaddr, int bits)
+{
+ int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (-- bits >= 0) {
+ outw(MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+}
+
+static int mdio_read(int ioaddr, int phy_id, int location)
+{
+ int i;
+ int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ unsigned int retval = 0;
+ int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the read command bits out. */
+ for (i = 14; i >= 0; i--) {
+ int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ mdio_delay();
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ return retval>>1 & 0xffff;
+}
+
+static void mdio_write(int ioaddr, int phy_id, int location, int value)
+{
+ int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
+ int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ int i;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ mdio_delay();
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Leave the interface idle. */
+ for (i = 1; i >= 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+
+ return;
+}
+
\f
#ifdef MODULE
void
#
tristate 'Dummy net driver support' CONFIG_DUMMY
tristate 'EQL (serial line load balancing) support' CONFIG_EQUALIZER
-if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
- tristate 'Frame relay DLCI support (EXPERIMENTAL)' CONFIG_DLCI
- if [ "$CONFIG_DLCI" = "y" -o "$CONFIG_DLCI" = "m" ]; then
- int ' Max open DLCI' CONFIG_DLCI_COUNT 24
- int ' Max DLCI per device' CONFIG_DLCI_MAX 8
- dep_tristate ' SDLA (Sangoma S502/S508) support' CONFIG_SDLA $CONFIG_DLCI
- fi
+tristate 'Frame relay DLCI support' CONFIG_DLCI
+if [ "$CONFIG_DLCI" = "y" -o "$CONFIG_DLCI" = "m" ]; then
+ int ' Max open DLCI' CONFIG_DLCI_COUNT 24
+ int ' Max DLCI per device' CONFIG_DLCI_MAX 8
+ dep_tristate ' SDLA (Sangoma S502/S508) support' CONFIG_SDLA $CONFIG_DLCI
fi
tristate 'PLIP (parallel port) support' CONFIG_PLIP
tristate 'PPP (point-to-point) support' CONFIG_PPP
tristate 'Ansel Communications EISA 3200 support (EXPERIMENTAL)' CONFIG_AC3200
fi
tristate 'Apricot Xen-II on board ethernet' CONFIG_APRICOT
- tristate 'Intel EtherExpress/Pro 100B support' CONFIG_EEXPRESS_PRO100B
tristate 'DE425, DE434, DE435, DE450, DE500 support' CONFIG_DE4X5
tristate 'DECchip Tulip (dc21x4x) PCI support' CONFIG_DEC_ELCP
tristate 'Digi Intl. RightSwitch SE-X support' CONFIG_DGRS
+ tristate 'Intel EtherExpress/Pro 100B support' CONFIG_EEXPRESS_PRO100B
+ tristate 'RTL 8139 support' CONFIG_RTL8139
+ tristate 'SMC EtherPower II support' CONFIG_SMC_EPIC
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
bool 'Zenith Z-Note support (EXPERIMENTAL)' CONFIG_ZNET
fi
fi
fi
+bool 'Ethernet (Gigabit)' CONFIG_GIGAETHER
+if [ "$CONFIG_GIGAETHER" = "y" ]; then
+ bool 'Packet Engines G-NIC PCI Gigabit Ethernet Adapter' CONFIG_YELLOWFIN
+fi
+
bool 'Token Ring driver support' CONFIG_TR
if [ "$CONFIG_TR" = "y" ]; then
tristate 'IBM Tropic chipset based adaptor support' CONFIG_IBMTR
L_OBJS += smc-ultra32.o
CONFIG_8390_BUILTIN = y
else
- ifeq ($(CONFIG_ULTRA),m)
+ ifeq ($(CONFIG_ULTRA32),m)
CONFIG_8390_MODULE = y
M_OBJS += smc-ultra32.o
endif
endif
endif
+ifeq ($(CONFIG_YELLOWFIN),y)
+L_OBJS += yellowfin.o
+else
+ ifeq ($(CONFIG_YELLOWFIN),m)
+ M_OBJS += yellowfin.o
+ endif
+endif
+
+ifeq ($(CONFIG_SMC_EPIC),y)
+L_OBJS += epic100.o
+else
+ ifeq ($(CONFIG_SMC_EPIC),m)
+ M_OBJS += epic100.o
+ endif
+endif
+
+ifeq ($(CONFIG_RTL8139),y)
+L_OBJS += rtl8139.o
+else
+ ifeq ($(CONFIG_RTL8139),m)
+ M_OBJS += rtl8139.o
+ endif
+endif
+
include $(TOPDIR)/Rules.make
clean:
extern int a2065_probe(struct device *);
extern int ariadne_probe(struct device *);
extern int hydra_probe(struct device *);
-
+extern int yellowfin_probe(struct device *);
+extern int epic100_probe(struct device *);
+extern int rtl8139_probe(struct device *);
/* Detachable devices ("pocket adaptors") */
extern int atp_init(struct device *);
extern int de600_probe(struct device *);
return 1; /* ENXIO */
if (1
+#ifdef CONFIG_SMC_EPIC
+ && epic100_probe(dev)
+#endif
+#ifdef CONFIG_YELLOWFIN
+ && yellowfin_probe(dev)
+#endif
+#ifdef CONFIG_RTL8139
+ && rtl8139_probe(dev)
+#endif
#ifdef CONFIG_DGRS
&& dgrs_probe(dev)
#endif
--- /dev/null
+/* epic100.c: A SMC 83c170 EPIC/100 fast ethernet driver for Linux. */
+/*
+ NOTICE: THIS IS THE ALPHA TEST VERSION!
+ Written 1997 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+ All other rights reserved.
+
+ This driver is for the SMC EtherPower II 9432 PCI ethernet adapter based on
+ the SMC83c170.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Support and updates available at
+ http://cesdis.gsfc.nasa.gov/linux/drivers/epic100.html
+*/
+
+static const char *version =
+"epic100.c:v0.10 10/14/97 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/epic100.html\n";
+
+/* A few user-configurable values. */
+
+/* Keep the ring sizes a power of two for efficiency.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 32
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static const rx_copybreak = 200;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 10;
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT ((2000*HZ)/1000)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+/* Bytes transferred to chip before transmission starts. */
+#define TX_FIFO_THRESH 128 /* Rounded down to 4 byte units. */
+#define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
+
+#include <linux/config.h>
+#ifdef MODULE
+#ifdef MODVERSIONS
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/version.h>
+#else
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+/* Kernel compatibility defines, common to David Hind's PCMCIA package.
+ This is only in the support-all-kernels source code. */
+#include <linux/version.h> /* Evil, but neccessary */
+
+#if defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE < 0x10300
+#define RUN_AT(x) (x) /* What to put in timer->expires. */
+#define DEV_ALLOC_SKB(len) alloc_skb(len, GFP_ATOMIC)
+#define virt_to_bus(addr) ((unsigned long)addr)
+#define bus_to_virt(addr) ((void*)addr)
+
+#else /* 1.3.0 and later */
+#define RUN_AT(x) (jiffies + (x))
+#define DEV_ALLOC_SKB(len) dev_alloc_skb(len + 2)
+#endif
+
+#if defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE < 0x10338
+#ifdef MODULE
+#if !defined(CONFIG_MODVERSIONS) && !defined(__NO_VERSION__)
+char kernel_version[] = UTS_RELEASE;
+#endif
+#else
+#undef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT
+#undef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+#endif /* 1.3.38 */
+
+#if (LINUX_VERSION_CODE >= 0x10344)
+#define NEW_MULTICAST
+#include <linux/delay.h>
+#endif
+
+#ifdef SA_SHIRQ
+#define FREE_IRQ(irqnum, dev) free_irq(irqnum, dev)
+#define REQUEST_IRQ(i,h,f,n, instance) request_irq(i,h,f,n, instance)
+#define IRQ(irq, dev_id, pt_regs) (irq, dev_id, pt_regs)
+#else
+#define FREE_IRQ(irqnum, dev) free_irq(irqnum)
+#define REQUEST_IRQ(i,h,f,n, instance) request_irq(i,h,f,n)
+#define IRQ(irq, dev_id, pt_regs) (irq, pt_regs)
+#endif
+
+#if (LINUX_VERSION_CODE < 0x20123)
+#define test_and_set_bit(val, addr) set_bit(val, addr)
+#else
+MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
+MODULE_DESCRIPTION("SMC 82c170 EPIC series Ethernet driver");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(max_interrupt_work, "i");
+#endif
+
+/* The I/O extent. */
+#define EPIC_TOTAL_SIZE 0x100
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry epic100_drv =
+{"Epic100", epic100_pci_probe, EPIC_TOTAL_SIZE, NULL};
+#endif
+
+static int epic_debug = 1;
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the SMC "EPCI/100", the SMC
+single-chip ethernet controllers for PCI. This chip is used on
+the SMC EtherPower II boards.
+
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS will assign the
+PCI INTA signal to a (preferably otherwise unused) system IRQ line.
+Note: Kernel versions earlier than 1.3.73 do not support shared PCI
+interrupt lines.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+IVb. References
+
+http://www.smc.com/components/catalog/smc83c170.html
+http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
+http://www.national.com/pf/DP/DP83840.html
+
+IVc. Errata
+
+*/
+
+#ifndef PCI_VENDOR_ID_SMC
+#define PCI_VENDOR_ID_SMC 0x10B8
+#endif
+#ifndef PCI_DEVICE_ID_SMC_EPIC100
+#define PCI_DEVICE_ID_SMC_EPIC100 0x0005
+#endif
+
+/* The rest of these values should never change. */
+/* Offsets to registers, using the (ugh) SMC names. */
+enum epic_registers {
+ COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
+ TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
+ MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
+ LAN0=64, /* MAC address. */
+ MC0=80, /* Multicast filter table. */
+ RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
+ PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
+};
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatus {
+ TxIdle=0x40000, RxIdle=0x20000,
+ CntFull=0x0200, TxUnderrun=0x0100,
+ TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
+ RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
+};
+
+/* The EPIC100 Rx and Tx buffer descriptors. */
+
+struct epic_tx_desc {
+ s16 status;
+ u16 txlength;
+ u32 bufaddr;
+ u16 buflength;
+ u16 control;
+ u32 next;
+};
+
+struct epic_rx_desc {
+ s16 status;
+ u16 rxlength;
+ u32 bufaddr;
+ u32 buflength;
+ u32 next;
+};
+
+struct epic_private {
+ char devname[8]; /* Used only for kernel debugging. */
+ const char *product_name;
+ struct device *next_module;
+ struct epic_rx_desc rx_ring[RX_RING_SIZE];
+ struct epic_tx_desc tx_ring[TX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ int chip_id;
+ int revision;
+ struct enet_statistics stats;
+ struct timer_list timer; /* Media selection timer. */
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ unsigned char mc_filter[8];
+ signed char phys[4]; /* MII device addresses. */
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int media2:4; /* Secondary monitored media port. */
+ unsigned int medialock:1; /* Don't sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
+ int pad0, pad1; /* Used for 8-byte alignment */
+};
+
+static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+#ifdef MODULE
+/* Used to pass the full-duplex flag, etc. */
+static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+#endif
+
+static struct device *epic100_probe1(struct device *dev, int ioaddr, int irq,
+ int chip_id, int options, int card_idx);
+static int epic_open(struct device *dev);
+static int read_eeprom(int ioaddr, int location);
+static int mii_read(int ioaddr, int phy_id, int location);
+static void epic_timer(unsigned long data);
+static void epic_tx_timeout(struct device *dev);
+static void epic_init_ring(struct device *dev);
+static int epic_start_xmit(struct sk_buff *skb, struct device *dev);
+static int epic_rx(struct device *dev);
+static void epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int epic_close(struct device *dev);
+static struct enet_statistics *epic_get_stats(struct device *dev);
+#ifdef NEW_MULTICAST
+static void set_rx_mode(struct device *dev);
+#else
+static void set_rx_mode(struct device *dev, int num_addrs, void *addrs);
+#endif
+
+\f
+
+#ifdef MODULE
+/* A list of all installed EPIC devices, for removing the driver module. */
+static struct device *root_epic_dev = NULL;
+#endif
+
+int epic100_probe(struct device *dev)
+{
+ int cards_found = 0;
+ static int pci_index = 0; /* Static, for multiple probe calls. */
+
+ /* Ideally we would detect all network cards in slot order. That would
+ be best done a central PCI probe dispatch, which wouldn't work
+ well with the current structure. So instead we detect just the
+ Epic cards in slot order. */
+
+ if (pcibios_present()) {
+ unsigned char pci_bus, pci_device_fn;
+
+ for (;pci_index < 0xff; pci_index++) {
+ unsigned char pci_irq_line, pci_latency;
+ unsigned short pci_command, vendor, device;
+ unsigned int pci_ioaddr, chip_idx = 0;
+
+ if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8,
+#ifdef REVERSE_PROBE_ORDER
+ 0xff - pci_index,
+#else
+ pci_index,
+#endif
+ &pci_bus, &pci_device_fn)
+ != PCIBIOS_SUCCESSFUL)
+ break;
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_VENDOR_ID, &vendor);
+ if (vendor != PCI_VENDOR_ID_SMC)
+ continue;
+
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_DEVICE_ID, &device);
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ /* Remove I/O space marker in bit 0. */
+ pci_ioaddr &= ~3;
+
+ if (device != PCI_DEVICE_ID_SMC_EPIC100) {
+ printk("Unknown SMC PCI ethernet chip type %4.4x detected:"
+ " not configured.\n", device);
+ continue;
+ }
+ if (epic_debug > 2)
+ printk("Found SMC PCI EPIC/100 at I/O %#x, IRQ %d.\n",
+ pci_ioaddr, pci_irq_line);
+
+ if (check_region(pci_ioaddr, EPIC_TOTAL_SIZE))
+ continue;
+
+#ifdef MODULE
+ dev = epic100_probe1(dev, pci_ioaddr, pci_irq_line, chip_idx,
+ options[cards_found], cards_found);
+#else
+ dev = epic100_probe1(dev, pci_ioaddr, pci_irq_line, chip_idx,
+ dev ? dev->mem_start : 0, -1);
+#endif
+
+ if (dev) {
+ /* Get and check the bus-master and latency values. */
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+ if ( ! (pci_command & PCI_COMMAND_MASTER)) {
+ printk(" PCI Master Bit has not been set! Setting...\n");
+ pci_command |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, pci_command);
+ }
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_LATENCY_TIMER, &pci_latency);
+ if (pci_latency < 10) {
+ printk(" PCI latency timer (CFLT) is unreasonably low at %d."
+ " Setting to 255 clocks.\n", pci_latency);
+ pcibios_write_config_byte(pci_bus, pci_device_fn,
+ PCI_LATENCY_TIMER, 255);
+ } else if (epic_debug > 1)
+ printk(" PCI latency timer (CFLT) is %#x.\n", pci_latency);
+ dev = 0;
+ cards_found++;
+ }
+ }
+ }
+
+#if defined (MODULE)
+ return cards_found;
+#else
+ return cards_found ? 0 : -ENODEV;
+#endif
+}
+
+static struct device *epic100_probe1(struct device *dev, int ioaddr, int irq,
+ int chip_id, int options, int card_idx)
+{
+ static int did_version = 0; /* Already printed version info. */
+ struct epic_private *tp;
+ int i;
+
+ if (epic_debug > 0 && did_version++ == 0)
+ printk(version);
+
+ dev = init_etherdev(dev, 0);
+
+ printk("%s: SMC EPIC/100 at %#3x, IRQ %d, ", dev->name, ioaddr, irq);
+
+ /* Bring the chip out of low-power mode. */
+ outl(0x0200, ioaddr + GENCTL);
+ /* Magic?! If we don't set this bit the MII interface won't work. */
+ outl(0x0008, ioaddr + TEST1);
+
+ /* This could also be read from the EEPROM. */
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] = inw(ioaddr + LAN0 + i*4);
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x.\n", dev->dev_addr[i]);
+
+ if (epic_debug > 1) {
+ printk("%s: EEPROM contents\n", dev->name);
+ for (i = 0; i < 64; i++)
+ printk(" %4.4x%s", read_eeprom(ioaddr, i), i % 16 == 15 ? "\n" : "");
+ }
+
+ /* We do a request_region() to register /proc/ioports info. */
+ request_region(ioaddr, EPIC_TOTAL_SIZE, "SMC EPIC/100");
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ /* The data structures must be quadword aligned. */
+ tp = kmalloc(sizeof(*tp), GFP_KERNEL | GFP_DMA);
+ memset(tp, 0, sizeof(*tp));
+ dev->priv = tp;
+
+#ifdef MODULE
+ tp->next_module = root_epic_dev;
+ root_epic_dev = dev;
+#endif
+
+ tp->chip_id = chip_id;
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later, but
+ takes too much time. */
+ {
+ int phy, phy_idx;
+ for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys);
+ phy++) {
+ int mii_status = mii_read(ioaddr, phy, 0);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ tp->phys[phy_idx++] = phy;
+ printk("%s: MII transceiver found at address %d.\n",
+ dev->name, phy);
+ }
+ }
+ if (phy_idx == 0) {
+ printk("%s: ***WARNING***: No MII transceiver found!\n",
+ dev->name);
+ /* Use the known PHY address of the EPII. */
+ tp->phys[0] = 3;
+ }
+ }
+
+ /* Leave the chip in low-power mode. */
+ outl(0x0008, ioaddr + GENCTL);
+
+ /* The lower four bits are the media type. */
+ if (options > 0) {
+ tp->full_duplex = (options & 16) ? 1 : 0;
+ tp->default_port = options & 15;
+ if (tp->default_port)
+ tp->medialock = 1;
+ }
+ if (card_idx >= 0) {
+ if (full_duplex[card_idx] >= 0)
+ tp->full_duplex = full_duplex[card_idx];
+ }
+
+ /* The Epic-specific entries in the device structure. */
+ dev->open = &epic_open;
+ dev->hard_start_xmit = &epic_start_xmit;
+ dev->stop = &epic_close;
+ dev->get_stats = &epic_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+
+ return dev;
+}
+\f
+/* Serial EEPROM section. */
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x02 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x01
+#define EE_WRITE_1 0x09
+#define EE_DATA_READ 0x10 /* EEPROM chip data out. */
+#define EE_ENB (0x0001 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ The 1.2 code is a "nasty" timing loop, but PC compatible machines are
+ *supposed* to delay an ISA-compatible period for the SLOW_DOWN_IO macro. */
+#ifdef _LINUX_DELAY_H
+#define eeprom_delay(nanosec) udelay((nanosec + 999)/1000)
+#else
+#define eeprom_delay(nanosec) do { int _i = 3; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
+#endif
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5 << 6)
+#define EE_READ_CMD (6 << 6)
+#define EE_ERASE_CMD (7 << 6)
+
+static int read_eeprom(int ioaddr, int location)
+{
+ int i;
+ int retval = 0;
+ int ee_addr = ioaddr + EECTL;
+ int read_cmd = location | EE_READ_CMD;
+
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ outl(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ outl(EE_ENB | dataval, ee_addr);
+ eeprom_delay(100);
+ outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay(150);
+ outl(EE_ENB | dataval, ee_addr); /* Finish EEPROM a clock tick. */
+ eeprom_delay(250);
+ }
+ outl(EE_ENB, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay(100);
+ retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ outl(EE_ENB, ee_addr);
+ eeprom_delay(100);
+ }
+
+ /* Terminate the EEPROM access. */
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+}
+
+#define MII_READOP 1
+#define MII_WRITEOP 2
+static int mii_read(int ioaddr, int phy_id, int location)
+{
+ int i;
+
+ outl((phy_id << 9) | (location << 4) | MII_READOP, ioaddr + MIICtrl);
+ /* Typical operation takes < 50 ticks. */
+ for (i = 4000; i > 0; i--)
+ if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0)
+ break;
+ return inw(ioaddr + MIIData);
+}
+
+\f
+static int
+epic_open(struct device *dev)
+{
+ struct epic_private *tp = (struct epic_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+ int mii_reg5;
+ int full_duplex = 0;
+
+ /* Soft reset the chip. */
+ outl(0x0001, ioaddr + GENCTL);
+
+#ifdef SA_SHIRQ
+ if (request_irq(dev->irq, &epic_interrupt, SA_SHIRQ,
+ "SMC EPIC/100", dev)) {
+ return -EAGAIN;
+ }
+#else
+ if (irq2dev_map[dev->irq] != NULL
+ || (irq2dev_map[dev->irq] = dev) == NULL
+ || dev->irq == 0
+ || request_irq(dev->irq, &epic_interrupt, 0, "SMC EPIC/100")) {
+ return -EAGAIN;
+ }
+#endif
+
+ MOD_INC_USE_COUNT;
+
+ epic_init_ring(dev);
+
+ /* This next line by Ken Yamaguchi.. ?? */
+ outl(0x8, ioaddr + 0x1c);
+
+ /* Pull the chip out of low-power mode, enable interrupts, and set for PCI read multiple. */
+ outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+
+ for (i = 0; i < 3; i++)
+ outl(((u16*)dev->dev_addr)[i], ioaddr + LAN0 + i*4);
+
+ outl(TX_FIFO_THRESH, ioaddr + TxThresh);
+ full_duplex = tp->full_duplex;
+
+ mii_reg5 = mii_read(ioaddr, tp->phys[0], 5);
+ if (mii_reg5 != 0xffff && (mii_reg5 & 0x0100)) {
+ full_duplex = 1;
+ if (epic_debug > 1)
+ printk("%s: Setting %s-duplex based on MII xcvr %d"
+ " register read of %4.4x.\n", dev->name,
+ full_duplex ? "full" : "half", tp->phys[0],
+ mii_read(ioaddr, tp->phys[0], 5));
+ }
+
+ outl(full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+ outl(virt_to_bus(tp->rx_ring), ioaddr + PRxCDAR);
+ outl(virt_to_bus(tp->tx_ring), ioaddr + PTxCDAR);
+
+ /* Start the chip's Rx process. */
+ set_rx_mode(dev);
+ outl(0x000A, ioaddr + COMMAND);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outl(CntFull | TxUnderrun | TxDone
+ | RxError | RxOverflow | RxFull | RxHeader | RxDone,
+ ioaddr + INTMASK);
+
+ if (epic_debug > 1)
+ printk("%s: epic_open() ioaddr %4.4x IRQ %d status %4.4x %s-duplex.\n",
+ dev->name, ioaddr, dev->irq, inl(ioaddr + GENCTL),
+ full_duplex ? "full" : "half");
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&tp->timer);
+ tp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = &epic_timer; /* timer handler */
+ add_timer(&tp->timer);
+
+ return 0;
+}
+
+static void epic_timer(unsigned long data)
+{
+ struct device *dev = (struct device *)data;
+ struct epic_private *tp = (struct epic_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int next_tick = 0;
+
+ if (epic_debug > 3) {
+ printk("%s: Media selection tick, Tx status %8.8x.\n",
+ dev->name, inl(ioaddr + TxSTAT));
+ printk("%s: Other registers are IntMask %4.4x IntStatus %4.4x RxStatus"
+ " %4.4x.\n",
+ dev->name, inl(ioaddr + INTMASK), inl(ioaddr + INTSTAT),
+ inl(ioaddr + RxSTAT));
+ }
+
+ if (next_tick) {
+ tp->timer.expires = RUN_AT(next_tick);
+ add_timer(&tp->timer);
+ }
+}
+
+static void epic_tx_timeout(struct device *dev)
+{
+ struct epic_private *tp = (struct epic_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if (epic_debug > 0) {
+ printk("%s: Transmit timeout using MII device, Tx status %4.4x.\n",
+ dev->name, inw(ioaddr + TxSTAT));
+ if (epic_debug > 1) {
+ printk("%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
+ dev->name, tp->dirty_tx, tp->cur_tx);
+ }
+ }
+ /* Perhaps stop and restart the chip's Tx processes . */
+ /* Trigger a transmit demand. */
+ outl(0x0004, dev->base_addr + COMMAND);
+
+ dev->trans_start = jiffies;
+ tp->stats.tx_errors++;
+ return;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void
+epic_init_ring(struct device *dev)
+{
+ struct epic_private *tp = (struct epic_private *)dev->priv;
+ int i;
+
+ tp->tx_full = 0;
+ tp->cur_rx = tp->cur_tx = 0;
+ tp->dirty_rx = tp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ tp->rx_ring[i].status = 0x8000; /* Owned by Epic chip */
+ tp->rx_ring[i].buflength = PKT_BUF_SZ;
+ {
+ /* Note the receive buffer must be longword aligned.
+ dev_alloc_skb() provides 16 byte alignment. But do *not*
+ use skb_reserve() to align the IP header! */
+ struct sk_buff *skb;
+ skb = DEV_ALLOC_SKB(PKT_BUF_SZ);
+ tp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+#if LINUX_VERSION_CODE > 0x10300
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ tp->rx_ring[i].bufaddr = virt_to_bus(skb->tail);
+#else
+ tp->rx_ring[i].bufaddr = virt_to_bus(skb->data);
+#endif
+ }
+ tp->rx_ring[i].next = virt_to_bus(&tp->rx_ring[i+1]);
+ }
+ /* Mark the last entry as wrapping the ring. */
+ tp->rx_ring[i-1].next = virt_to_bus(&tp->rx_ring[0]);
+
+ /* The Tx buffer descriptor is filled in as needed, but we
+ do need to clear the ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ tp->tx_skbuff[i] = 0;
+ tp->tx_ring[i].status = 0x0000;
+ tp->tx_ring[i].next = virt_to_bus(&tp->tx_ring[i+1]);
+ }
+ tp->tx_ring[i-1].next = virt_to_bus(&tp->tx_ring[0]);
+}
+
+static int
+epic_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct epic_private *tp = (struct epic_private *)dev->priv;
+ int entry;
+ u32 flag;
+
+#ifndef final_version
+ if (skb == NULL || skb->len <= 0) {
+ printk("%s: Obsolete driver layer request made: skbuff==NULL.\n",
+ dev->name);
+ dev_tint(dev);
+ return 0;
+ }
+#endif
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+ if (jiffies - dev->trans_start < TX_TIMEOUT)
+ return 1;
+ epic_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % TX_RING_SIZE;
+
+ tp->tx_skbuff[entry] = skb;
+ tp->tx_ring[entry].txlength = (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN);
+ tp->tx_ring[entry].bufaddr = virt_to_bus(skb->data);
+ tp->tx_ring[entry].buflength = skb->len;
+
+ if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
+ flag = 0x10; /* No interrupt */
+ dev->tbusy = 0;
+ } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
+ flag = 0x14; /* Tx-done intr. */
+ dev->tbusy = 0;
+ } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
+ flag = 0x10; /* No Tx-done intr. */
+ dev->tbusy = 0;
+ } else {
+ /* Leave room for two additional entries. */
+ flag = 0x14; /* Tx-done intr. */
+ tp->tx_full = 1;
+ }
+
+ tp->tx_ring[entry].control = flag;
+ tp->tx_ring[entry].status = 0x8000; /* Pass ownership to the chip. */
+ tp->cur_tx++;
+ /* Trigger an immediate transmit demand. */
+ outl(0x0004, dev->base_addr + COMMAND);
+
+ dev->trans_start = jiffies;
+ if (epic_debug > 4)
+ printk("%s: Queued Tx packet size %d to slot %d, "
+ "flag %2.2x Tx status %8.8x.\n",
+ dev->name, (int)skb->len, entry, flag,
+ inl(dev->base_addr + TxSTAT));
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void epic_interrupt IRQ(int irq, void *dev_instance, struct pt_regs *regs)
+{
+#ifdef SA_SHIRQ
+ struct device *dev = (struct device *)dev_instance;
+#else
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+#endif
+ struct epic_private *lp;
+ int status, ioaddr, boguscnt = max_interrupt_work;
+
+ if (dev == NULL) {
+ printk ("epic_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = (struct epic_private *)dev->priv;
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = 1;
+
+ do {
+ status = inl(ioaddr + INTSTAT);
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outl(status & 0x00007fff, ioaddr + INTSTAT);
+
+ if (epic_debug > 4)
+ printk("%s: interrupt interrupt=%#8.8x new intstat=%#8.8x.\n",
+ dev->name, status, inl(ioaddr + INTSTAT));
+
+ if ((status & (RxDone | TxEmpty | TxDone)) == 0)
+ break;
+
+ if (status & RxDone) /* Rx interrupt */
+ epic_rx(dev);
+
+ if (status & (TxEmpty | TxDone)) {
+ int dirty_tx;
+
+ for (dirty_tx = lp->dirty_tx; dirty_tx < lp->cur_tx; dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int txstatus = lp->tx_ring[entry].status;
+
+ if (txstatus < 0)
+ break; /* It still hasn't been Txed */
+
+ if ( ! (txstatus & 0x0001)) {
+ /* There was an major error, log it. */
+#ifndef final_version
+ if (epic_debug > 1)
+ printk("%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, txstatus);
+#endif
+ lp->stats.tx_errors++;
+ if (txstatus & 0x1050) lp->stats.tx_aborted_errors++;
+ if (txstatus & 0x0008) lp->stats.tx_carrier_errors++;
+ if (txstatus & 0x0040) lp->stats.tx_window_errors++;
+ if (txstatus & 0x0010) lp->stats.tx_fifo_errors++;
+#ifdef ETHER_STATS
+ if (txstatus & 0x1000) lp->stats.collisions16++;
+#endif
+ } else {
+#ifdef ETHER_STATS
+ if ((txstatus & 0x0002) != 0) lp->stats.tx_deferred++;
+#endif
+ lp->stats.collisions += (txstatus >> 8) & 15;
+ lp->stats.tx_packets++;
+ }
+
+ /* Free the original skb. */
+ dev_kfree_skb(lp->tx_skbuff[entry], FREE_WRITE);
+ lp->tx_skbuff[entry] = 0;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk("%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, lp->cur_tx, lp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (lp->tx_full && dev->tbusy
+ && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Check uncommon events all at once. */
+ if (status & (CntFull | TxUnderrun | RxOverflow)) {
+ /* Always update the error counts to avoid overhead later. */
+ lp->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ lp->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ lp->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+
+ if (status & TxUnderrun) { /* Tx FIFO underflow. */
+ lp->stats.tx_fifo_errors++;
+ /* Restart the transmit process. */
+ outl(0x0080, ioaddr + COMMAND);
+ }
+ if (status & RxOverflow) { /* Missed a Rx frame. */
+ lp->stats.rx_errors++;
+ }
+ /* Clear all error sources. */
+ outl(status & 0x7f18, ioaddr + INTSTAT);
+ }
+ if (--boguscnt < 0) {
+ printk("%s: Too much work at interrupt, IntrStatus=0x%8.8x.\n",
+ dev->name, status);
+ /* Clear all interrupt sources. */
+ outl(0x0001ffff, ioaddr + INTSTAT);
+ break;
+ }
+ } while (1);
+
+ if (epic_debug > 3)
+ printk("%s: exiting interrupt, intr_status=%#4.4x.\n",
+ dev->name, inl(ioaddr + INTSTAT));
+
+ /* Code that should never be run! Perhaps remove after testing.. */
+ {
+ static int stopit = 10;
+ if (dev->start == 0 && --stopit < 0) {
+ printk("%s: Emergency stop, looping startup interrupt.\n",
+ dev->name);
+ FREE_IRQ(irq, dev);
+ }
+ }
+
+ dev->interrupt = 0;
+ return;
+}
+
+static int
+epic_rx(struct device *dev)
+{
+ struct epic_private *lp = (struct epic_private *)dev->priv;
+ int entry = lp->cur_rx % RX_RING_SIZE;
+
+ if (epic_debug > 4)
+ printk(" In epic_rx(), entry %d %8.8x.\n", entry,
+ lp->rx_ring[entry].status);
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (lp->rx_ring[entry].status >= 0) {
+ int status = lp->rx_ring[entry].status;
+
+ if (epic_debug > 4)
+ printk(" epic_rx() status was %8.8x.\n", status);
+ if (status & 0x2000) {
+ printk("%s: Oversized Ethernet frame spanned multiple buffers,"
+ " status %4.4x!\n", dev->name, status);
+ lp->stats.rx_length_errors++;
+ } else if (status & 0x0006) {
+ /* Rx Frame errors are counted in hardware. */
+ lp->stats.rx_errors++;
+ } else {
+ /* Malloc up new buffer, compatible with net-2e. */
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = lp->rx_ring[entry].rxlength - 4;
+ struct sk_buff *skb;
+ int rx_in_place = 0;
+
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+ char *temp;
+
+ /* Pass up the skb already on the Rx ring. */
+ skb = lp->rx_skbuff[entry];
+ temp = skb_put(skb, pkt_len);
+ if (bus_to_virt(lp->rx_ring[entry].bufaddr) != temp)
+ printk("%s: Warning -- the skbuff addresses do not match"
+ " in epic_rx: %p vs. %p / %p.\n", dev->name,
+ bus_to_virt(lp->rx_ring[entry].bufaddr),
+ skb->head, temp);
+ /* Get a fresh skbuff to replace the filled one. */
+ newskb = DEV_ALLOC_SKB(PKT_BUF_SZ);
+ if (newskb) {
+ rx_in_place = 1;
+ lp->rx_skbuff[entry] = newskb;
+ newskb->dev = dev;
+#if LINUX_VERSION_CODE > 0x10300
+ /* Align IP on 16 byte boundaries */
+ skb_reserve(newskb, 2);
+ lp->rx_ring[entry].bufaddr = virt_to_bus(newskb->tail);
+#else
+ lp->rx_ring[entry].bufaddr = virt_to_bus(newskb->data);
+#endif
+ } else /* No memory, drop the packet. */
+ skb = 0;
+ } else
+ skb = DEV_ALLOC_SKB(pkt_len + 2);
+ if (skb == NULL) {
+ int i;
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ /* Check that at least two ring entries are free.
+ If not, free one and mark stats->rx_dropped++. */
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (lp->rx_ring[(entry+i) % RX_RING_SIZE].status < 0)
+ break;
+
+ if (i > RX_RING_SIZE -2) {
+ lp->stats.rx_dropped++;
+ lp->rx_ring[entry].status = 0x8000;
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb->dev = dev;
+ if (! rx_in_place) {
+ skb_reserve(skb, 2); /* 16 byte align the data fields */
+ memcpy(skb_put(skb, pkt_len),
+ bus_to_virt(lp->rx_ring[entry].bufaddr), pkt_len);
+ }
+#if LINUX_VERSION_CODE > 0x10300
+ skb->protocol = eth_type_trans(skb, dev);
+#else
+ skb->len = pkt_len;
+#endif
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+
+ lp->rx_ring[entry].status = 0x8000;
+ entry = (++lp->cur_rx) % RX_RING_SIZE;
+ }
+
+ return 0;
+}
+
+static int
+epic_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct epic_private *tp = (struct epic_private *)dev->priv;
+ int i;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (epic_debug > 1)
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inl(ioaddr + INTSTAT));
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x00000000, ioaddr + INTMASK);
+ /* Stop the chip's Tx and Rx DMA processes. */
+ outw(0x0061, ioaddr + COMMAND);
+
+ /* Update the error counts. */
+ tp->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ tp->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ tp->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+
+ del_timer(&tp->timer);
+
+#ifdef SA_SHIRQ
+ free_irq(dev->irq, dev);
+#else
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = 0;
+#endif
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = tp->rx_skbuff[i];
+ tp->rx_skbuff[i] = 0;
+ tp->rx_ring[i].status = 0; /* Not owned by Epic chip. */
+ tp->rx_ring[i].buflength = 0;
+ tp->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
+ if (skb) {
+#if LINUX_VERSION_CODE < 0x20100
+ skb->free = 1;
+#endif
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (tp->tx_skbuff[i])
+ dev_kfree_skb(tp->tx_skbuff[i], FREE_WRITE);
+ tp->tx_skbuff[i] = 0;
+ }
+
+
+ /* Green! Leave the chip in low-power mode. */
+ outl(0x0008, ioaddr + GENCTL);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct enet_statistics *
+epic_get_stats(struct device *dev)
+{
+ struct epic_private *tp = (struct epic_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if (dev->start) {
+ /* Update the error counts. */
+ tp->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ tp->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ tp->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+ }
+
+ return &tp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling tp->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+
+/* The little-endian AUTODIN II ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+
+#ifdef NEW_MULTICAST
+static void set_rx_mode(struct device *dev)
+#else
+static void set_rx_mode(struct device *dev, int num_addrs, void *addrs);
+#endif
+{
+ int ioaddr = dev->base_addr;
+ struct epic_private *tp = (struct epic_private *)dev->priv;
+ unsigned char mc_filter[8]; /* Multicast hash filter */
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ outl(0x002C, ioaddr + RxCtrl);
+ /* Unconditionally log net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
+ /* There is apparently a chip bug, so the multicast filter
+ is never enabled. */
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outl(0x000C, ioaddr + RxCtrl);
+ } else if (dev->mc_count == 0) {
+ outl(0x0004, ioaddr + RxCtrl);
+ return;
+ } else { /* Never executed, for now. */
+ struct dev_mc_list *mclist;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
+ mc_filter);
+ }
+ /* ToDo: perhaps we need to stop the Tx and Rx process here? */
+ if (memcmp(mc_filter, tp->mc_filter, sizeof(mc_filter))) {
+ for (i = 0; i < 4; i++)
+ outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
+ memcpy(tp->mc_filter, mc_filter, sizeof(mc_filter));
+ }
+ return;
+}
+\f
+#ifdef MODULE
+
+/* An additional parameter that may be passed in... */
+static int debug = -1;
+
+int
+init_module(void)
+{
+ int cards_found;
+
+ if (debug >= 0)
+ epic_debug = debug;
+
+ root_epic_dev = NULL;
+ cards_found = epic100_probe(0);
+
+ return cards_found ? 0 : -ENODEV;
+}
+
+void
+cleanup_module(void)
+{
+ struct device *next_dev;
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_epic_dev) {
+ next_dev = ((struct epic_private *)root_epic_dev->priv)->next_module;
+ unregister_netdev(root_epic_dev);
+ release_region(root_epic_dev->base_addr, EPIC_TOTAL_SIZE);
+ kfree(root_epic_dev);
+ root_epic_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+\f
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c epic100.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
equalizer_t *eql = (equalizer_t *) dev->priv;
struct device *slave_dev = 0;
slave_t *slave;
- struct sk_buff *skb2;
if (skb == NULL)
return 0;
static int eth16i_tx(struct sk_buff *skb, struct device *dev);
static void eth16i_rx(struct device *dev);
static void eth16i_interrupt(int irq, void *dev_id, struct pt_regs *regs);
-static void eth16i_multicast(struct device *dev, int num_addrs, void *addrs);
+static void eth16i_multicast(struct device *dev);
static void eth16i_select_regbank(unsigned char regbank, short ioaddr);
static void eth16i_initialize(struct device *dev);
static struct enet_statistics *eth16i_get_stats(struct device *dev);
return;
}
-static void eth16i_multicast(struct device *dev, int num_addrs, void *addrs)
+static void eth16i_multicast(struct device *dev)
{
short ioaddr = dev->base_addr;
static unsigned char pci_irq_line = 0;
int ne_probe(struct device *dev);
+#ifdef CONFIG_PCI
static int ne_probe_pci(struct device *dev);
+#endif
static int ne_probe1(struct device *dev, int ioaddr);
static int ne_open(struct device *dev);
/* Tack on our header */
new_skb->h.iph = (struct iphdr *) skb_push(new_skb, tunnel_hlen);
- new_skb->mac.raw = new_skb->ip_hdr;
+ new_skb->mac.raw = (void *)new_skb->ip_hdr;
/* Free the old packet, we no longer need it */
dev_kfree_skb(skb, FREE_WRITE);
--- /dev/null
+/* rtl8139.c: A RealTek RTL8129/8139 Fast Ethernet driver for Linux. */
+/*
+ Written 1997 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+ All other rights reserved.
+
+ This driver is for boards based on the RTL8129 and RTL8139 PCI ethernet
+ chips.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Support and updates available at
+ http://cesdis.gsfc.nasa.gov/linux/drivers/rtl8139.html
+*/
+
+static const char *version =
+"rtl8139.c:v0.13 10/27/97 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/rtl8139.html\n";
+
+/* A few user-configurable values. */
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 10;
+
+/* Size of the in-memory receive ring. */
+#define RX_BUF_LEN_IDX 3 /* 0==8K, 1==16K, 2==32K, 3==64K */
+#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
+/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
+#define TX_BUF_SIZE 1536
+
+/* PCI Tuning Parameters
+ Threshold is bytes transferred to chip before transmission starts. */
+#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
+
+/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024. */
+#define RX_FIFO_THRESH 4 /* Rx buffer level before first PCI xfer. */
+#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 bytes */
+#define TX_DMA_BURST 4
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT ((2000*HZ)/1000)
+
+#include <linux/config.h>
+#ifdef MODULE
+#ifdef MODVERSIONS
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/version.h>
+#else
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+/* Kernel compatibility defines, common to David Hind's PCMCIA package.
+ This is only in the support-all-kernels source code. */
+#include <linux/version.h> /* Evil, but neccessary */
+
+#if defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE < 0x10300
+#define RUN_AT(x) (x) /* What to put in timer->expires. */
+#define DEV_ALLOC_SKB(len) alloc_skb(len, GFP_ATOMIC)
+#define virt_to_bus(addr) ((unsigned long)addr)
+#define bus_to_virt(addr) ((void*)addr)
+
+#else /* 1.3.0 and later */
+#define RUN_AT(x) (jiffies + (x))
+#define DEV_ALLOC_SKB(len) dev_alloc_skb(len + 2)
+#endif
+
+#if defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE < 0x10338
+#ifdef MODULE
+#if !defined(CONFIG_MODVERSIONS) && !defined(__NO_VERSION__)
+char kernel_version[] = UTS_RELEASE;
+#endif
+#else
+#undef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT
+#undef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+#endif /* 1.3.38 */
+
+#if (LINUX_VERSION_CODE >= 0x10344)
+#define NEW_MULTICAST
+#include <linux/delay.h>
+#endif
+
+#ifdef SA_SHIRQ
+#define FREE_IRQ(irqnum, dev) free_irq(irqnum, dev)
+#define REQUEST_IRQ(i,h,f,n, instance) request_irq(i,h,f,n, instance)
+#define IRQ(irq, dev_id, pt_regs) (irq, dev_id, pt_regs)
+#else
+#define FREE_IRQ(irqnum, dev) free_irq(irqnum)
+#define REQUEST_IRQ(i,h,f,n, instance) request_irq(i,h,f,n)
+#define IRQ(irq, dev_id, pt_regs) (irq, pt_regs)
+#endif
+
+#if (LINUX_VERSION_CODE < 0x20123)
+#define test_and_set_bit(val, addr) set_bit(val, addr)
+#endif
+
+/* The I/O extent. */
+#define RTL8129_TOTAL_SIZE 0x80
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry rtl8139_drv =
+{"RTL8139", rtl8139_probe, RTL8129_TOTAL_SIZE, NULL};
+#endif
+
+static int rtl8129_debug = 1;
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the RealTek RTL8129, the RealTek Fast
+Ethernet controllers for PCI. This chip is used on a few clone boards.
+
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS will assign the
+PCI INTA signal to a (preferably otherwise unused) system IRQ line.
+Note: Kernel versions earlier than 1.3.73 do not support shared PCI
+interrupt lines.
+
+III. Driver operation
+
+IIIa. Rx Ring buffers
+
+The receive unit uses a single linear ring buffer rather than the more
+common (and more efficient) descriptor-based architecture. Incoming frames
+are sequentially stored into the Rx region, and the host copies them into
+skbuffs.
+
+Comment: While it is theoretically possible to process many frames in place,
+any delay in Rx processing would cause us to drop frames. More importantly,
+the Linux protocol stack is not designed to operate in this manner.
+
+IIIb. Tx operation
+
+The RTL8129 uses a fixed set of four Tx descriptors in register space.
+In a stunningly bad design choice, Tx frames must be 32 bit aligned. Linux
+aligns the IP header on word boundaries, and 14 byte ethernet header means
+that almost all frames will need to be copied to an alignment buffer.
+
+IVb. References
+
+http://www.realtek.com.tw/cn/cn.html
+http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
+
+IVc. Errata
+
+*/
+
+#ifndef PCI_VENDOR_ID_REALTEK
+#define PCI_VENDOR_ID_REALTEK 0x10ec
+#endif
+#ifndef PCI_DEVICE_ID_REALTEK_8129
+#define PCI_DEVICE_ID_REALTEK_8129 0x8129
+#endif
+#ifndef PCI_DEVICE_ID_REALTEK_8139
+#define PCI_DEVICE_ID_REALTEK_8139 0x8139
+#endif
+
+/* The rest of these values should never change. */
+#define NUM_TX_DESC 4 /* Number of Tx descriptor registers. */
+
+/* Symbolic offsets to registers. */
+enum RTL8129_registers {
+ MAC0=0, /* Ethernet hardware address. */
+ MAR0=8, /* Multicast filter. */
+ TxStat0=0x10, /* Transmit status (Four 32bit registers). */
+ TxAddr0=0x20, /* Tx descriptors (also four 32bit). */
+ RxBuf=0x30, RxEarlyCnt=0x34, RxEarlyStatus=0x36,
+ ChipCmd=0x37, RxBufPtr=0x38, RxBufAddr=0x3A,
+ IntrMask=0x3C, IntrStatus=0x3E,
+ TxConfig=0x40, RxConfig=0x44,
+ Timer=0x48, /* A general-purpose counter. */
+ RxMissed=0x4C, /* 24 bits valid, write clears. */
+ Cfg9346=0x50, Config0=0x51, Config1=0x52,
+ FlashReg=0x54, GPPinData=0x58, GPPinDir=0x59, MII_SMI=0x5A, HltClk=0x5B,
+ MultiIntr=0x5C, TxSummary=0x60,
+ BMCR=0x62, BMSR=0x64, NWayAdvert=0x66, NWayLPAR=0x68, NWayExpansion=0x6A,
+};
+
+enum ChipCmdBits {
+ CmdReset=0x10, CmdRxEnb=0x08, CmdTxEnb=0x04, RxBufEmpty=0x01, };
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatusBits {
+ PCIErr=0x8000, PCSTimeout=0x4000,
+ RxFIFOOver=0x40, RxUnderrun=0x20, RxOverflow=0x10,
+ TxErr=0x08, TxOK=0x04, RxErr=0x02, RxOK=0x01,
+};
+enum TxStatusBits {
+ TxHostOwns=0x2000, TxUnderrun=0x4000, TxStatOK=0x8000,
+ TxOutOfWindow=0x20000000, TxAborted=0x40000000, TxCarrierLost=0x80000000,
+};
+enum RxStatusBits {
+ RxMulticast=0x8000, RxPhysical=0x4000, RxBroadcast=0x2000,
+ RxBadSymbol=0x0020, RxRunt=0x0010, RxTooLong=0x0008, RxCRCErr=0x0004,
+ RxBadAlign=0x0002, RxStatusOK=0x0001,
+};
+
+struct rtl8129_private {
+ char devname[8]; /* Used only for kernel debugging. */
+ const char *product_name;
+ struct device *next_module;
+ int chip_id;
+ int chip_revision;
+ struct enet_statistics stats;
+ struct timer_list timer; /* Media selection timer. */
+ unsigned int cur_rx, cur_tx; /* The next free and used entries */
+ unsigned int dirty_rx, dirty_tx;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[NUM_TX_DESC];
+ unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
+ unsigned char *rx_ring;
+ unsigned char *tx_bufs; /* Tx bounce buffer region. */
+ unsigned char mc_filter[8]; /* Current multicast filter. */
+ char phys[4]; /* MII device addresses. */
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int media2:4; /* Secondary monitored media port. */
+ unsigned int medialock:1; /* Don't sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
+};
+
+#ifdef MODULE
+/* Used to pass the full-duplex flag, etc. */
+static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+#if LINUX_VERSION_CODE > 0x20118
+MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
+MODULE_DESCRIPTION("RealTek RTL8129/8139 Fast Ethernet driver");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(max_interrupt_work, "i");
+#endif
+#endif
+
+static struct device *rtl8129_probe1(struct device *dev, int ioaddr, int irq,
+ int chip_id, int options, int card_idx);
+static int rtl8129_open(struct device *dev);
+static int read_eeprom(int ioaddr, int location);
+static int mdio_read(int ioaddr, int phy_id, int location);
+static void rtl8129_timer(unsigned long data);
+static void rtl8129_tx_timeout(struct device *dev);
+static void rtl8129_init_ring(struct device *dev);
+static int rtl8129_start_xmit(struct sk_buff *skb, struct device *dev);
+static int rtl8129_rx(struct device *dev);
+static void rtl8129_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int rtl8129_close(struct device *dev);
+static struct enet_statistics *rtl8129_get_stats(struct device *dev);
+#ifdef NEW_MULTICAST
+static void set_rx_mode(struct device *dev);
+#else
+static void set_rx_mode(struct device *dev, int num_addrs, void *addrs);
+#endif
+
+\f
+
+#ifdef MODULE
+/* A list of all installed RTL8129 devices, for removing the driver module. */
+static struct device *root_rtl8129_dev = NULL;
+#endif
+
+int rtl8139_probe(struct device *dev)
+{
+ int cards_found = 0;
+ static int pci_index = 0; /* Static, for multiple probe calls. */
+
+ /* Ideally we would detect all network cards in slot order. That would
+ be best done a central PCI probe dispatch, which wouldn't work
+ well with the current structure. So instead we detect just the
+ Rtl81*9 cards in slot order. */
+
+ if (pcibios_present()) {
+ unsigned char pci_bus, pci_device_fn;
+
+ for (;pci_index < 0xff; pci_index++) {
+ unsigned char pci_irq_line, pci_latency;
+ unsigned short pci_command, vendor, device;
+ unsigned int pci_ioaddr;
+
+ if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8,
+#ifdef REVERSE_PROBE_ORDER
+ 0xff - pci_index,
+#else
+ pci_index,
+#endif
+ &pci_bus, &pci_device_fn)
+ != PCIBIOS_SUCCESSFUL)
+ break;
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_VENDOR_ID, &vendor);
+ if (vendor != PCI_VENDOR_ID_REALTEK)
+ continue;
+
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_DEVICE_ID, &device);
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ /* Remove I/O space marker in bit 0. */
+ pci_ioaddr &= ~3;
+
+ if (device != PCI_DEVICE_ID_REALTEK_8129
+ && device != PCI_DEVICE_ID_REALTEK_8139) {
+ printk("Unknown RealTek PCI ethernet chip type %4.4x detected:"
+ " not configured.\n", device);
+ continue;
+ }
+ if (check_region(pci_ioaddr, RTL8129_TOTAL_SIZE))
+ continue;
+
+#ifdef MODULE
+ dev = rtl8129_probe1(dev, pci_ioaddr, pci_irq_line, device,
+ options[cards_found], cards_found);
+#else
+ dev = rtl8129_probe1(dev, pci_ioaddr, pci_irq_line, device,
+ dev ? dev->mem_start : 0, -1);
+#endif
+
+ if (dev) {
+ /* Get and check the bus-master and latency values. */
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+ if ( ! (pci_command & PCI_COMMAND_MASTER)) {
+ printk(" PCI Master Bit has not been set! Setting...\n");
+ pci_command |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, pci_command);
+ }
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_LATENCY_TIMER, &pci_latency);
+ if (pci_latency < 10) {
+ printk(" PCI latency timer (CFLT) is unreasonably low "
+ "at %d. Setting to 64 clocks.\n", pci_latency);
+ pcibios_write_config_byte(pci_bus, pci_device_fn,
+ PCI_LATENCY_TIMER, 64);
+ } else if (rtl8129_debug > 1)
+ printk(" PCI latency timer (CFLT) is %#x.\n",
+ pci_latency);
+ dev = 0;
+ cards_found++;
+ }
+ }
+ }
+
+#if defined (MODULE)
+ return cards_found;
+#else
+ return cards_found ? 0 : -ENODEV;
+#endif
+}
+
+static struct device *rtl8129_probe1(struct device *dev, int ioaddr, int irq,
+ int chip_id, int options, int card_idx)
+{
+ static int did_version = 0; /* Already printed version info. */
+ struct rtl8129_private *tp;
+ int i;
+
+ if (rtl8129_debug > 0 && did_version++ == 0)
+ printk(version);
+
+ dev = init_etherdev(dev, 0);
+
+ printk("%s: RealTek RTL%x at %#3x, IRQ %d, ",
+ dev->name, chip_id, ioaddr, irq);
+
+ /* Bring the chip out of low-power mode. */
+ outb(0x00, ioaddr + Config1);
+
+ /* Perhaps this should be read from the EEPROM? */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + MAC0 + i);
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x.\n", dev->dev_addr[i]);
+
+ if (rtl8129_debug > 1) {
+ printk("%s: EEPROM contents\n", dev->name);
+ for (i = 0; i < 64; i++)
+ printk(" %4.4x%s", read_eeprom(ioaddr, i), i%16 == 15 ? "\n" : "");
+ }
+
+ /* We do a request_region() to register /proc/ioports info. */
+ request_region(ioaddr, RTL8129_TOTAL_SIZE, "RealTek RTL8129/39 Fast Ethernet");
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ /* Some data structures must be quadword aligned. */
+ tp = kmalloc(sizeof(*tp), GFP_KERNEL | GFP_DMA);
+ memset(tp, 0, sizeof(*tp));
+ dev->priv = tp;
+
+#ifdef MODULE
+ tp->next_module = root_rtl8129_dev;
+ root_rtl8129_dev = dev;
+#endif
+
+ tp->chip_id = chip_id;
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later, but
+ takes too much time. */
+ if (chip_id == 0x8129) {
+ int phy, phy_idx;
+ for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys);
+ phy++) {
+ int mii_status = mdio_read(ioaddr, phy, 1);
+
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ tp->phys[phy_idx++] = phy;
+ printk("%s: MII transceiver found at address %d.\n",
+ dev->name, phy);
+ }
+ }
+ if (phy_idx == 0) {
+ printk("%s: No MII transceivers found! Assuming SYM transceiver.\n",
+ dev->name);
+ tp->phys[0] = -1;
+ }
+ } else {
+ tp->phys[0] = -1;
+ }
+
+ /* Put the chip into low-power mode. */
+ outb(0xC0, ioaddr + Cfg9346);
+ outb(0x03, ioaddr + Config1);
+ outb('H', ioaddr + HltClk); /* 'R' would leave the clock running. */
+
+ /* The lower four bits are the media type. */
+ if (options > 0) {
+ tp->full_duplex = (options & 16) ? 1 : 0;
+ tp->default_port = options & 15;
+ if (tp->default_port)
+ tp->medialock = 1;
+ }
+#ifdef MODULE
+ if (card_idx >= 0) {
+ if (full_duplex[card_idx] >= 0)
+ tp->full_duplex = full_duplex[card_idx];
+ }
+#endif
+
+ /* The Rtl8129-specific entries in the device structure. */
+ dev->open = &rtl8129_open;
+ dev->hard_start_xmit = &rtl8129_start_xmit;
+ dev->stop = &rtl8129_close;
+ dev->get_stats = &rtl8129_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+
+ return dev;
+}
+\f
+/* Serial EEPROM section. */
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x08 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x00
+#define EE_WRITE_1 0x02
+#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
+#define EE_ENB (0x80 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ The 1.2 code is a "nasty" timing loop, but PC compatible machines are
+ *supposed* to delay an ISA-compatible period for the SLOW_DOWN_IO macro. */
+#ifdef _LINUX_DELAY_H
+#define eeprom_delay(nanosec) udelay((nanosec + 999)/1000)
+#else
+#define eeprom_delay(nanosec) do { int _i = 3; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
+#endif
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5 << 6)
+#define EE_READ_CMD (6 << 6)
+#define EE_ERASE_CMD (7 << 6)
+
+static int read_eeprom(int ioaddr, int location)
+{
+ int i;
+ unsigned short retval = 0;
+ short ee_addr = ioaddr + Cfg9346;
+ int read_cmd = location | EE_READ_CMD;
+
+ outb(EE_ENB & ~EE_CS, ee_addr);
+ outb(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ outb(EE_ENB | dataval, ee_addr);
+ eeprom_delay(100);
+ outb(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay(150);
+ outb(EE_ENB | dataval, ee_addr); /* Finish EEPROM a clock tick. */
+ eeprom_delay(250);
+ }
+ outb(EE_ENB, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outb(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay(100);
+ retval = (retval << 1) | ((inb(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ outb(EE_ENB, ee_addr);
+ eeprom_delay(100);
+ }
+
+ /* Terminate the EEPROM access. */
+ outb(~EE_CS, ee_addr);
+ return retval;
+}
+
+/* MII serial management: mostly bogus for now. */
+/* Read and write the MII management registers using software-generated
+ serial MDIO protocol. The maxium data clock rate is 2.5 Mhz. */
+#define MDIO_DIR 0x80
+#define MDIO_DATA_OUT 0x04
+#define MDIO_DATA_IN 0x02
+#define MDIO_CLK 0x01
+#ifdef _LINUX_DELAY_H
+#define mdio_delay() udelay(1) /* Really 400ns. */
+#else
+#define mdio_delay() __SLOW_DOWN_IO;
+#endif
+
+/* Syncronize the MII management interface by shifting 32 one bits out. */
+static void mdio_sync(int ioaddr)
+{
+ int i;
+ int mdio_addr = ioaddr + MII_SMI;
+
+ for (i = 32; i >= 0; i--) {
+ outb(MDIO_DIR | MDIO_DATA_OUT, mdio_addr);
+ mdio_delay();
+ outb(MDIO_DIR | MDIO_DATA_OUT | MDIO_CLK, mdio_addr);
+ mdio_delay();
+ }
+ return;
+}
+static int mdio_read(int ioaddr, int phy_id, int location)
+{
+ int i;
+ int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int retval = 0;
+ int mdio_addr = ioaddr + MII_SMI;
+
+ mdio_sync(ioaddr);
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval =
+ (read_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
+
+ outb(MDIO_DIR | dataval, mdio_addr);
+ mdio_delay();
+ outb(MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
+ mdio_delay();
+ }
+
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ outb(0, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((inb(mdio_addr) & MDIO_DATA_IN) ? 1 : 0);
+ outb(MDIO_CLK, mdio_addr);
+ mdio_delay();
+ }
+ return (retval>>1) & 0xffff;
+}
+\f
+static int
+rtl8129_open(struct device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+ int full_duplex = 0;
+
+ /* Soft reset the chip. */
+ outb(CmdReset, ioaddr + ChipCmd);
+
+#ifdef SA_SHIRQ
+ if (request_irq(dev->irq, &rtl8129_interrupt, SA_SHIRQ,
+ "RealTek RTL8129/39 Fast Ethernet", dev)) {
+ return -EAGAIN;
+ }
+#else
+ if (irq2dev_map[dev->irq] != NULL
+ || (irq2dev_map[dev->irq] = dev) == NULL
+ || dev->irq == 0
+ || request_irq(dev->irq, &rtl8129_interrupt, 0, "RTL8129")) {
+ return -EAGAIN;
+ }
+#endif
+
+ MOD_INC_USE_COUNT;
+
+ tp->tx_bufs = kmalloc(TX_BUF_SIZE * NUM_TX_DESC, GFP_KERNEL);
+ tp->rx_ring = kmalloc(RX_BUF_LEN + 16, GFP_KERNEL);
+ if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
+ if (tp->tx_bufs)
+ kfree(tp->tx_bufs);
+ if (rtl8129_debug > 0)
+ printk("%s: Couldn't allocate a %d byte receive ring.\n",
+ dev->name, RX_BUF_LEN);
+ return -ENOMEM;
+ }
+ rtl8129_init_ring(dev);
+
+#ifndef final_version
+ /* Used to monitor rx ring overflow. */
+ memset(tp->rx_ring + RX_BUF_LEN, 0xcc, 16);
+#endif
+
+ /* Check that the chip has finished the reset. */
+ for (i = 1000; i > 0; i--)
+ if ((inb(ioaddr + ChipCmd) & CmdReset) == 0)
+ break;
+#ifndef final_version
+ if (rtl8129_debug > 2)
+ printk("%s: reset finished with status %2.2x after %d loops.\n",
+ dev->name, inb(ioaddr + ChipCmd), 1000-i);
+#endif
+
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + MAC0 + i);
+
+ /* Must enable Tx/Rx before setting transfer thresholds! */
+ outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+ outl((RX_FIFO_THRESH << 13) | (RX_BUF_LEN_IDX << 11) | (RX_DMA_BURST<<8),
+ ioaddr + RxConfig);
+ outl((TX_DMA_BURST<<8), ioaddr + TxConfig);
+
+ full_duplex = tp->full_duplex;
+ if (tp->phys[0] >= 0 || tp->chip_id == 0x8139) {
+ u16 mii_reg5;
+ if (tp->chip_id == 0x8139)
+ mii_reg5 = inw(ioaddr + NWayLPAR);
+ else
+ mii_reg5 = mdio_read(ioaddr, tp->phys[0], 5);
+ if (mii_reg5 == 0xffff)
+ ; /* Not there */
+ else if ((mii_reg5 & 0x0100) == 0x0100
+ || (mii_reg5 & 0x00C0) == 0x0040)
+ full_duplex = 1;
+ if (rtl8129_debug > 1)
+ printk("%s: Setting %s%s-duplex based on"
+ " auto-negotiated partner ability %4.4x.\n", dev->name,
+ mii_reg5 == 0 ? "" :
+ (mii_reg5 & 0x0180) ? "100mbps " : "10mbps ",
+ full_duplex ? "full" : "half", mii_reg5);
+ }
+
+ outb(0xC0, ioaddr + Cfg9346);
+ outb(full_duplex ? 0x60 : 0x20, ioaddr + Config1);
+ outb(0x00, ioaddr + Cfg9346);
+
+ outl(virt_to_bus(tp->rx_ring), ioaddr + RxBuf);
+
+ /* Start the chip's Tx and Rx process. */
+ outl(0, ioaddr + RxMissed);
+ set_rx_mode(dev);
+
+ outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+#ifndef final_version
+ if (rtl8129_debug > 1)
+ printk("%s: In rtl8129_open() Tx/Rx Config %8.8x/%8.8x"
+ " Chip Config %2.2x/%2.2x.\n",
+ dev->name, inl(ioaddr + TxConfig), inl(ioaddr + RxConfig),
+ inb(ioaddr + Config0), inb(ioaddr + Config1));
+#endif
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ outw(PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver
+ | TxErr | TxOK | RxErr | RxOK, ioaddr + IntrMask);
+
+ if (rtl8129_debug > 1)
+ printk("%s: rtl8129_open() ioaddr %4.4x IRQ %d GP Pins %2.2x %s-duplex.\n",
+ dev->name, ioaddr, dev->irq, inb(ioaddr + GPPinData),
+ full_duplex ? "full" : "half");
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&tp->timer);
+ tp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = &rtl8129_timer; /* timer handler */
+ add_timer(&tp->timer);
+
+ return 0;
+}
+
+static void rtl8129_timer(unsigned long data)
+{
+ struct device *dev = (struct device *)data;
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int next_tick = 0;
+
+ if (tp->chip_id == 0x8139) {
+ u16 mii_reg5 = inw(ioaddr + NWayLPAR);
+ if ((mii_reg5 & 0x0100) == 0x0100
+ || (mii_reg5 & 0x00C0) == 0x0040)
+ if ( ! tp->full_duplex) {
+ tp->full_duplex = 1;
+ if (rtl8129_debug > 0)
+ printk("%s: Switching to full-duplex based on link "
+ "partner ability of %4.4x.\n",
+ dev->name, mii_reg5);
+ outb(0xC0, ioaddr + Cfg9346);
+ outb(full_duplex ? 0x60 : 0x20, ioaddr + Config1);
+ outb(0x00, ioaddr + Cfg9346);
+ }
+ }
+ if (rtl8129_debug > 2) {
+ if (tp->chip_id == 0x8129)
+ printk("%s: Media selection tick, general purpose pins %2.2x.\n",
+ dev->name, inb(ioaddr + GPPinData));
+ else
+ printk("%s: Media selection tick, Link partner %4.4x.\n",
+ dev->name, inw(ioaddr + NWayLPAR));
+ printk("%s: Other registers are IntMask %4.4x IntStatus %4.4x"
+ " RxStatus %4.4x.\n",
+ dev->name, inw(ioaddr + IntrMask), inw(ioaddr + IntrStatus),
+ inl(ioaddr + RxEarlyStatus));
+ printk("%s: Chip config %2.2x %2.2x.\n",
+ dev->name, inb(ioaddr + Config0), inb(ioaddr + Config1));
+ }
+
+ if (next_tick) {
+ tp->timer.expires = RUN_AT(next_tick);
+ add_timer(&tp->timer);
+ }
+}
+
+static void rtl8129_tx_timeout(struct device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ if (rtl8129_debug > 1)
+ printk("%s: Transmit timeout using MII device.\n", dev->name);
+ for (i = 0; i < NUM_TX_DESC; i++)
+ printk("%s: Tx descriptor %d is %8.8x.%s\n",
+ dev->name, i, inl(ioaddr + TxStat0),
+ i == tp->dirty_tx ? " (queue head)" : "");
+ /* Perhaps stop and restart the chip's Tx processes . */
+
+ /* Todo: We should figure out what went wrong. */
+
+ dev->trans_start = jiffies;
+ tp->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void
+rtl8129_init_ring(struct device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ int i;
+
+ tp->tx_full = 0;
+ tp->cur_rx = tp->cur_tx = 0;
+ tp->dirty_rx = tp->dirty_tx = 0;
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ tp->tx_skbuff[i] = 0;
+ tp->tx_buf[i] = &tp->tx_bufs[i*TX_BUF_SIZE];
+ }
+}
+
+static int
+rtl8129_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int entry;
+
+#ifndef final_version
+ if (skb == NULL || skb->len <= 0) {
+ printk("%s: Obsolete driver layer request made: skbuff==NULL.\n",
+ dev->name);
+ dev_tint(dev);
+ return 0;
+ }
+#endif
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+ if (jiffies - dev->trans_start < TX_TIMEOUT)
+ return 1;
+ rtl8129_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % NUM_TX_DESC;
+
+ tp->tx_skbuff[entry] = skb;
+ if ((long)skb->data & 3) { /* Must use alignment buffer. */
+ memcpy(tp->tx_buf[entry], skb->data, skb->len);
+ outl(virt_to_bus(tp->tx_buf[entry]), ioaddr + TxAddr0 + entry*4);
+ } else
+ outl(virt_to_bus(skb->data), ioaddr + TxAddr0 + entry*4);
+ /* Note: the chip doesn't have auto-pad! */
+ outl(((TX_FIFO_THRESH<<11) & 0x003f0000) |
+ (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN),
+ ioaddr + TxStat0 + entry*4);
+
+ if (++tp->cur_tx - tp->dirty_tx < NUM_TX_DESC) {/* Typical path */
+ dev->tbusy = 0;
+ } else {
+ tp->tx_full = 1;
+ }
+
+ dev->trans_start = jiffies;
+ if (rtl8129_debug > 4)
+ printk("%s: Queued Tx packet at %p size %ld to slot %d.\n",
+ dev->name, skb->data, skb->len, entry);
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void rtl8129_interrupt IRQ(int irq, void *dev_instance, struct pt_regs *regs)
+{
+#ifdef SA_SHIRQ
+ struct device *dev = (struct device *)dev_instance;
+#else
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+#endif
+ struct rtl8129_private *tp;
+ int ioaddr, boguscnt = max_interrupt_work;
+ int status;
+
+ if (dev == NULL) {
+ printk ("rtl8129_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ tp = (struct rtl8129_private *)dev->priv;
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = 1;
+
+ do {
+ status = inw(ioaddr + IntrStatus);
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(status, ioaddr + IntrStatus);
+
+ if (rtl8129_debug > 4)
+ printk("%s: interrupt status=%#4.4x new intstat=%#4.4x.\n",
+ dev->name, status, inw(ioaddr + IntrStatus));
+
+ if ((status & (PCIErr|PCSTimeout|RxUnderrun|RxOverflow|RxFIFOOver
+ |TxErr|TxOK|RxErr|RxOK)) == 0)
+ break;
+
+ if (status & (RxOK|RxUnderrun|RxOverflow|RxFIFOOver))/* Rx interrupt */
+ rtl8129_rx(dev);
+
+ if (status & (TxOK | TxErr)) {
+ int dirty_tx;
+
+ for (dirty_tx = tp->dirty_tx; dirty_tx < tp->cur_tx; dirty_tx++) {
+ int entry = dirty_tx % NUM_TX_DESC;
+ int txstatus = inl(ioaddr + TxStat0 + entry*4);
+
+ if ( ! (txstatus & TxHostOwns))
+ break; /* It still hasn't been Txed */
+
+ /* Note: TxCarrierLost is always asserted at 100mbps. */
+ if (txstatus & (TxOutOfWindow | TxAborted)) {
+ /* There was an major error, log it. */
+#ifndef final_version
+ if (rtl8129_debug > 1)
+ printk("%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, txstatus);
+#endif
+ tp->stats.tx_errors++;
+ if (txstatus&TxAborted) tp->stats.tx_aborted_errors++;
+ if (txstatus&TxCarrierLost) tp->stats.tx_carrier_errors++;
+ if (txstatus&TxOutOfWindow) tp->stats.tx_window_errors++;
+#ifdef ETHER_STATS
+ if ((txstatus & 0x0f000000) == 0x0f000000)
+ tp->stats.collisions16++;
+#endif
+ } else {
+#ifdef ETHER_STATS
+ /* No count for tp->stats.tx_deferred */
+#endif
+ if (txstatus & TxUnderrun) {
+ /* Todo: increase the Tx FIFO threshold. */
+ tp->stats.tx_fifo_errors++;
+ }
+ tp->stats.collisions += (txstatus >> 24) & 15;
+ tp->stats.tx_packets++;
+ }
+
+ /* Free the original skb. */
+ dev_kfree_skb(tp->tx_skbuff[entry], FREE_WRITE);
+ tp->tx_skbuff[entry] = 0;
+ }
+
+#ifndef final_version
+ if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
+ printk("%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
+ dirty_tx += NUM_TX_DESC;
+ }
+#endif
+
+ if (tp->tx_full && dev->tbusy
+ && dirty_tx > tp->cur_tx - NUM_TX_DESC) {
+ /* The ring is no longer full, clear tbusy. */
+ tp->tx_full = 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+
+ tp->dirty_tx = dirty_tx;
+ }
+
+ /* Check uncommon events with one test. */
+ if (status & (PCIErr|PCSTimeout |RxUnderrun|RxOverflow|RxFIFOOver
+ |TxErr|RxErr)) {
+ /* Update the error count. */
+ tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+ outl(0, ioaddr + RxMissed);
+
+ if (status & (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
+ tp->stats.rx_errors++;
+
+ if (status & (PCSTimeout)) tp->stats.rx_length_errors++;
+ if (status & (RxUnderrun|RxFIFOOver)) tp->stats.rx_fifo_errors++;
+ if (status & RxOverflow) {
+ tp->stats.rx_over_errors++;
+ tp->cur_rx = inw(ioaddr + RxBufAddr) % RX_BUF_LEN;
+ outw(tp->cur_rx - 16, ioaddr + RxBufPtr);
+ }
+ /* Error sources cleared above. */
+ }
+ if (--boguscnt < 0) {
+ printk("%s: Too much work at interrupt, IntrStatus=0x%4.4x.\n",
+ dev->name, status);
+ /* Clear all interrupt sources. */
+ outw(0xffff, ioaddr + IntrStatus);
+ break;
+ }
+ } while (1);
+
+ if (rtl8129_debug > 3)
+ printk("%s: exiting interrupt, intr_status=%#4.4x.\n",
+ dev->name, inl(ioaddr + IntrStatus));
+
+ /* Code that should never be run! Perhaps remove after testing.. */
+ {
+ static int stopit = 10;
+ if (dev->start == 0 && --stopit < 0) {
+ printk("%s: Emergency stop, looping startup interrupt.\n",
+ dev->name);
+ FREE_IRQ(irq, dev);
+ }
+ }
+
+ dev->interrupt = 0;
+ return;
+}
+
+/* Todo: The data sheet doesn't describe the Rx ring at all, so I'm winging
+ it here until I have a chip to play with. 8/30/97 */
+static int
+rtl8129_rx(struct device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned char *rx_ring = tp->rx_ring;
+ u16 cur_rx = tp->cur_rx;
+
+ if (rtl8129_debug > 4)
+ printk("%s: In rtl8129_rx(), current %4.4x BufAddr %4.4x,"
+ " free to %4.4x, Cmd %2.2x.\n",
+ dev->name, cur_rx, inw(ioaddr + RxBufAddr),
+ inw(ioaddr + RxBufPtr), inb(ioaddr + ChipCmd));
+
+ while ((inb(ioaddr + ChipCmd) & 1) == 0) {
+ u16 ring_offset = cur_rx % RX_BUF_LEN;
+ u32 rx_status = *(u32*)(rx_ring + ring_offset);
+ u16 rx_size = rx_status >> 16;
+
+ if (rtl8129_debug > 4) {
+ int i;
+ printk("%s: rtl8129_rx() status %4.4x, size %4.4x, cur %4.4x.\n",
+ dev->name, rx_status, rx_size, cur_rx);
+ printk("%s: Frame contents ", dev->name);
+ for (i = 0; i < 70; i++)
+ printk(" %2.2x", rx_ring[ring_offset + i]);
+ printk(".\n");
+ }
+ if (rx_status & RxTooLong) {
+ if (rtl8129_debug > 0)
+ printk("%s: Oversized Ethernet frame, status %4.4x!\n",
+ dev->name, rx_status);
+ tp->stats.rx_length_errors++;
+ } else if (rx_status &
+ (RxBadSymbol|RxRunt|RxTooLong|RxCRCErr|RxBadAlign)) {
+ if (rtl8129_debug > 1)
+ printk("%s: Ethernet frame had errors,"
+ " status %4.4x.\n", dev->name, rx_status);
+ tp->stats.rx_errors++;
+ if (rx_status & (RxBadSymbol|RxBadAlign))
+ tp->stats.rx_frame_errors++;
+ if (rx_status & (RxRunt|RxTooLong)) tp->stats.rx_length_errors++;
+ if (rx_status & RxCRCErr) tp->stats.rx_crc_errors++;
+ } else {
+ /* Malloc up new buffer, compatible with net-2e. */
+ /* Omit the four octet CRC from the length. */
+ struct sk_buff *skb;
+
+ skb = DEV_ALLOC_SKB(rx_size + 2);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ /* We should check that some rx space is free.
+ If not, free one and mark stats->rx_dropped++. */
+ tp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP fields. */
+ if (ring_offset+rx_size+4 > RX_BUF_LEN) {
+ int semi_count = RX_BUF_LEN - ring_offset - 4;
+ memcpy(skb_put(skb, semi_count), &rx_ring[ring_offset + 4],
+ semi_count);
+ memcpy(skb_put(skb, rx_size-semi_count), rx_ring,
+ rx_size-semi_count);
+ if (rtl8129_debug > 4) {
+ int i;
+ printk("%s: Frame wrap @%d", dev->name, semi_count);
+ for (i = 0; i < 16; i++)
+ printk(" %2.2x", rx_ring[i]);
+ printk(".\n");
+ memset(rx_ring, 0xcc, 16);
+ }
+ } else
+ memcpy(skb_put(skb, rx_size), &rx_ring[ring_offset + 4],
+ rx_size);
+#if LINUX_VERSION_CODE >= 0x10300
+ skb->protocol = eth_type_trans(skb, dev);
+#else
+ skb->len = rx_size;
+#endif
+ netif_rx(skb);
+ tp->stats.rx_packets++;
+ }
+
+ cur_rx += rx_size + 4;
+ cur_rx = (cur_rx + 3) & ~3;
+ outw(cur_rx - 16, ioaddr + RxBufPtr);
+ }
+ if (rtl8129_debug > 4)
+ printk("%s: Done rtl8129_rx(), current %4.4x BufAddr %4.4x,"
+ " free to %4.4x, Cmd %2.2x.\n",
+ dev->name, cur_rx, inw(ioaddr + RxBufAddr),
+ inw(ioaddr + RxBufPtr), inb(ioaddr + ChipCmd));
+ tp->cur_rx = cur_rx;
+ return 0;
+}
+
+static int
+rtl8129_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ int i;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (rtl8129_debug > 1)
+ printk("%s: Shutting down ethercard, status was 0x%4.4x.\n",
+ dev->name, inw(ioaddr + IntrStatus));
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outw(0x0000, ioaddr + IntrMask);
+
+ /* Stop the chip's Tx and Rx DMA processes. */
+ outb(0x00, ioaddr + ChipCmd);
+
+ /* Update the error counts. */
+ tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+ outl(0, ioaddr + RxMissed);
+
+ del_timer(&tp->timer);
+
+#ifdef SA_SHIRQ
+ free_irq(dev->irq, dev);
+#else
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = 0;
+#endif
+
+#ifndef final_version
+ /* Used to monitor rx ring overflow. */
+ for (i = 0; i < 16; i++)
+ if (tp->rx_ring[RX_BUF_LEN+i] != 0xcc) {
+ printk("%s: Rx ring overflowed! Values are ", dev->name);
+ for (i = 0; i < 16; i++)
+ printk(" %2.2x", tp->rx_ring[RX_BUF_LEN + i]);
+ printk(".\n");
+ break;
+ }
+#endif
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ if (tp->tx_skbuff[i])
+ dev_kfree_skb(tp->tx_skbuff[i], FREE_WRITE);
+ tp->tx_skbuff[i] = 0;
+ }
+ kfree(tp->rx_ring);
+ kfree(tp->tx_bufs);
+
+ /* Green! Put the chip in low-power mode. */
+ outb(0xC0, ioaddr + Cfg9346);
+ outb(0x03, ioaddr + Config1);
+ outb('H', ioaddr + HltClk); /* 'R' would leave the clock running. */
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct enet_statistics *
+rtl8129_get_stats(struct device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if (dev->start) {
+ tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+ outl(0, ioaddr + RxMissed);
+ }
+
+ return &tp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling tp->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+
+/* The little-endian AUTODIN II ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+
+static void
+#ifdef NEW_MULTICAST
+set_rx_mode(struct device *dev)
+#else
+static void set_rx_mode(struct device *dev, int num_addrs, void *addrs);
+#endif
+{
+ int ioaddr = dev->base_addr;
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ unsigned char mc_filter[8]; /* Multicast hash filter */
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(0x0F, ioaddr + RxConfig);
+ } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(0x0E, ioaddr + RxConfig);
+ } else if (dev->mc_count == 0) {
+ outb(0x0A, ioaddr + RxConfig);
+ return;
+ } else {
+ struct dev_mc_list *mclist;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
+ mc_filter);
+ }
+ /* ToDo: perhaps we need to stop the Tx and Rx process here? */
+ if (memcmp(mc_filter, tp->mc_filter, sizeof(mc_filter))) {
+ for (i = 0; i < 2; i++)
+ outl(((u32 *)mc_filter)[i], ioaddr + MAR0 + i*4);
+ memcpy(tp->mc_filter, mc_filter, sizeof(mc_filter));
+ }
+ if (rtl8129_debug > 3)
+ printk("%s: set_rx_mode(%4.4x) done -- Rx config %8.8x.\n",
+ dev->name, dev->flags, inl(ioaddr + RxConfig));
+ return;
+}
+\f
+#ifdef MODULE
+
+/* An additional parameter that may be passed in... */
+static int debug = -1;
+
+int
+init_module(void)
+{
+ int cards_found;
+
+ if (debug >= 0)
+ rtl8129_debug = debug;
+
+ root_rtl8129_dev = NULL;
+ cards_found = rtl8139_probe(0);
+
+ return cards_found ? 0 : -ENODEV;
+}
+
+void
+cleanup_module(void)
+{
+ struct device *next_dev;
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_rtl8129_dev) {
+ next_dev = ((struct rtl8129_private *)root_rtl8129_dev->priv)->next_module;
+ unregister_netdev(root_rtl8129_dev);
+ release_region(root_rtl8129_dev->base_addr, RTL8129_TOTAL_SIZE);
+ kfree(root_rtl8129_dev);
+ root_rtl8129_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+\f
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c rtl8139.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
--- /dev/null
+/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
+/*
+ Written 1997-1998 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
+ It also supports the Symbios Logic version of the same chip core.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Support and updates available at
+ http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html
+*/
+
+static const char *version = "yellowfin.c:v0.11 1/21/97 becker@cesdis.gsfc.nasa.gov\n";
+
+/* A few user-configurable values. */
+
+static int max_interrupt_work = 20;
+static int min_pci_latency = 64;
+static int mtu = 0;
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+/* System-wide count of bogus-rx frames. */
+static int bogus_rx = 0;
+static int dma_ctrl = 0x004A0263; /* Constrained by errata */
+static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
+#elif YF_NEW
+static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
+static int fifo_cfg = 0x0028;
+#else
+static int dma_ctrl = 0x004A0263; /* Constrained by errata */
+static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
+#endif
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static const rx_copybreak = 100;
+
+/* Keep the ring sizes a power of two for efficiency.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT ((2000*HZ)/1000)
+
+#include <linux/config.h>
+#ifdef MODULE
+#ifdef MODVERSIONS
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/version.h>
+#else
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+/* Kernel compatibility defines, common to David Hind's PCMCIA package.
+ This is only in the support-all-kernels source code. */
+#include <linux/version.h> /* Evil, but neccessary */
+
+#if defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE < 0x10300
+#define RUN_AT(x) (x) /* What to put in timer->expires. */
+#define DEV_ALLOC_SKB(len) alloc_skb(len, GFP_ATOMIC)
+#define virt_to_bus(addr) ((unsigned long)addr)
+#define bus_to_virt(addr) ((void*)addr)
+
+#else /* 1.3.0 and later */
+#define RUN_AT(x) (jiffies + (x))
+#define DEV_ALLOC_SKB(len) dev_alloc_skb(len + 2)
+#endif
+
+#if defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE < 0x10338
+#ifdef MODULE
+#if !defined(CONFIG_MODVERSIONS) && !defined(__NO_VERSION__)
+char kernel_version[] = UTS_RELEASE;
+#endif
+#else
+#undef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT
+#undef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+#endif /* 1.3.38 */
+
+#if (LINUX_VERSION_CODE >= 0x10344)
+#define NEW_MULTICAST
+#include <linux/delay.h>
+#endif
+#if (LINUX_VERSION_CODE >= 0x20100)
+char kernel_version[] = UTS_RELEASE;
+#endif
+#ifdef SA_SHIRQ
+#define IRQ(irq, dev_id, pt_regs) (irq, dev_id, pt_regs)
+#else
+#define IRQ(irq, dev_id, pt_regs) (irq, pt_regs)
+#endif
+#if (LINUX_VERSION_CODE < 0x20123)
+#define test_and_set_bit(val, addr) set_bit(val, addr)
+#endif
+
+static const char *card_name = "Yellowfin G-NIC Gbit Ethernet";
+
+/* The PCI I/O space extent. */
+#define YELLOWFIN_TOTAL_SIZE 0x100
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry yellowfin_drv =
+{card_name, yellowfin_pci_probe, YELLOWFIN_TOTAL_SIZE, NULL};
+#endif
+
+#ifdef YELLOWFIN_DEBUG
+int yellowfin_debug = YELLOWFIN_DEBUG;
+#else
+int yellowfin_debug = 1;
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the Packet Engines "Yellowfin" Gigabit
+Ethernet adapter. The only PCA currently supported is the G-NIC 64-bit
+PCI card.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS preferably should assign the
+PCI INTA signal to an otherwise unused system IRQ line.
+Note: Kernel versions earlier than 1.3.73 do not support shared PCI
+interrupt lines.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
+This is a descriptor list scheme similar to that used by the EEPro100 and
+Tulip. This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the Yellowfin as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack and replaced by a newly allocated skbuff.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. For small frames the copying cost is negligible (esp. considering
+that we are pre-loading the cache with immediately useful header
+information). For large frames the copying cost is non-trivial, and the
+larger copy might flush the cache of useful data.
+
+IIIC. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'yp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
+
+IVb. References
+
+Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
+http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
+
+IVc. Errata
+
+See Packet Engines confidential appendix.
+
+*/
+\f
+/* A few values that may be tweaked. */
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+#ifndef PCI_VENDOR_ID_PKT_ENG /* To be defined in linux/pci.h */
+#define PCI_VENDOR_ID_PKT_ENG 0x1000 /* Hmm, likely number.. */
+#define PCI_DEVICE_ID_YELLOWFIN 0x0702
+#endif
+
+/* The rest of these values should never change. */
+
+static void yellowfin_timer(unsigned long data);
+
+/* Offsets to the Yellowfin registers. Various sizes and alignments. */
+enum yellowfin_offsets {
+ TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
+ TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
+ RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
+ RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
+ EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
+ ChipRev=0x8C, DMACtrl=0x90, Cnfg=0xA0, RxDepth=0xB8, FlowCtrl=0xBC,
+ AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
+};
+
+/* The Yellowfin Rx and Tx buffer descriptors. */
+struct yellowfin_desc {
+ u16 request_cnt;
+ u16 cmd;
+ u32 addr;
+ u32 branch_addr;
+ u16 result_cnt;
+ u16 status;
+};
+
+struct tx_status_words {
+ u16 tx_cnt;
+ u16 tx_errs;
+ u16 total_tx_cnt;
+ u16 paused;
+};
+
+/* Bits in yellowfin_desc.cmd */
+enum desc_cmd_bits {
+ CMD_TX_PKT=0x1000, CMD_RX_BUF=0x2000, CMD_TXSTATUS=0x3000,
+ CMD_NOP=0x6000, CMD_STOP=0x7000,
+ BRANCH_ALWAYS=0x0C, INTR_ALWAYS=0x30, WAIT_ALWAYS=0x03,
+ BRANCH_IFTRUE=0x04,
+};
+
+/* Bits in yellowfin_desc.status */
+enum desc_status_bits { RX_EOP=0x0040, };
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
+ IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
+ IntrEarlyRx=0x100, IntrWakeup=0x200, };
+
+struct yellowfin_private {
+ /* Descriptor rings first for alignment. Tx requires a second descriptor
+ for status. */
+ struct yellowfin_desc rx_ring[RX_RING_SIZE];
+ struct yellowfin_desc tx_ring[TX_RING_SIZE*2];
+ const char *product_name;
+ struct device *next_module;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct tx_status_words tx_status[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ int chip_id;
+ struct enet_statistics stats;
+ struct timer_list timer; /* Media selection timer. */
+ int in_interrupt;
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ u32 pad[4]; /* Used for 32-byte alignment */
+};
+
+#ifdef MODULE
+/* Used to pass the media type, etc. */
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+#if LINUX_VERSION_CODE > 0x20115
+MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
+MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(min_pci_latency, "i");
+MODULE_PARM(mtu, "i");
+MODULE_PARM(debug, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+#endif
+
+#endif
+
+static struct device *yellowfin_probe1(struct device *dev, int ioaddr, int irq,
+ int chip_id, int options);
+static int yellowfin_open(struct device *dev);
+static void yellowfin_timer(unsigned long data);
+static void yellowfin_tx_timeout(struct device *dev);
+static void yellowfin_init_ring(struct device *dev);
+static int yellowfin_start_xmit(struct sk_buff *skb, struct device *dev);
+static int yellowfin_rx(struct device *dev);
+static void yellowfin_interrupt IRQ(int irq, void *dev_instance, struct pt_regs *regs);
+static int yellowfin_close(struct device *dev);
+static struct enet_statistics *yellowfin_get_stats(struct device *dev);
+#ifdef NEW_MULTICAST
+static void set_rx_mode(struct device *dev);
+#else
+static void set_rx_mode(struct device *dev, int num_addrs, void *addrs);
+#endif
+
+\f
+
+#ifdef MODULE
+/* A list of all installed Yellowfin devices, for removing the driver module. */
+static struct device *root_yellowfin_dev = NULL;
+#endif
+
+int yellowfin_probe(struct device *dev)
+{
+ int cards_found = 0;
+ static int pci_index = 0; /* Static, for multiple probe calls. */
+
+ /* Ideally we would detect all network cards in slot order. That would
+ be best done a central PCI probe dispatch, which wouldn't work
+ well with the current structure. So instead we detect just the
+ Yellowfin cards in slot order. */
+
+ if (pcibios_present()) {
+ unsigned char pci_bus, pci_device_fn;
+
+ for (;pci_index < 0xff; pci_index++) {
+ unsigned char pci_irq_line, pci_latency;
+ unsigned short pci_command, vendor, device;
+ unsigned int pci_ioaddr, chip_idx = 0;
+
+#ifdef REVERSE_PROBE_ORDER
+ if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8,
+ 0xfe - pci_index,
+ &pci_bus, &pci_device_fn)
+ != PCIBIOS_SUCCESSFUL)
+ continue;
+#else
+ if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8,
+ pci_index,
+ &pci_bus, &pci_device_fn)
+ != PCIBIOS_SUCCESSFUL)
+ break;
+#endif
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_DEVICE_ID, &device);
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ /* Remove I/O space marker in bit 0. */
+ pci_ioaddr &= ~3;
+
+ if (vendor != PCI_VENDOR_ID_PKT_ENG)
+ continue;
+
+ if (device != PCI_DEVICE_ID_YELLOWFIN)
+ continue;
+
+ if (yellowfin_debug > 2)
+ printk("Found Packet Engines Yellowfin G-NIC at I/O %#x, IRQ %d.\n",
+ pci_ioaddr, pci_irq_line);
+
+ if (check_region(pci_ioaddr, YELLOWFIN_TOTAL_SIZE))
+ continue;
+
+#ifdef MODULE
+ dev = yellowfin_probe1(dev, pci_ioaddr, pci_irq_line, chip_idx,
+ cards_found < MAX_UNITS ? options[cards_found] : 0);
+#else
+ dev = yellowfin_probe1(dev, pci_ioaddr, pci_irq_line, chip_idx,
+ dev ? dev->mem_start : 0);
+#endif
+
+ if (dev) {
+ /* Get and check the bus-master and latency values. */
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+ if ( ! (pci_command & PCI_COMMAND_MASTER)) {
+ printk(" PCI Master Bit has not been set! Setting...\n");
+ pci_command |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, pci_command);
+ }
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_LATENCY_TIMER, &pci_latency);
+ if (pci_latency < min_pci_latency) {
+ printk(" PCI latency timer (CFLT) is unreasonably low at %d."
+ " Setting to %d clocks.\n",
+ pci_latency, min_pci_latency);
+ pcibios_write_config_byte(pci_bus, pci_device_fn,
+ PCI_LATENCY_TIMER, min_pci_latency);
+ } else if (yellowfin_debug > 1)
+ printk(" PCI latency timer (CFLT) is %#x.\n", pci_latency);
+ dev = 0;
+ cards_found++;
+ }
+ }
+ }
+
+#if defined (MODULE)
+ return cards_found;
+#else
+ return 0;
+#endif
+}
+
+static struct device *yellowfin_probe1(struct device *dev, int ioaddr, int irq,
+ int chip_id, int options)
+{
+ static int did_version = 0; /* Already printed version info. */
+ struct yellowfin_private *yp;
+ int i;
+
+ if (yellowfin_debug > 0 && did_version++ == 0)
+ printk(version);
+
+ dev = init_etherdev(dev, sizeof(struct yellowfin_private));
+
+ printk("%s: P-E Yellowfin type %8x at %#3x, ",
+ dev->name, inl(ioaddr + ChipRev), ioaddr);
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", inb(ioaddr + StnAddr + i));
+ printk("%2.2x, IRQ %d.\n", inb(ioaddr + StnAddr + i), irq);
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + StnAddr + i);
+
+ /* Reset the chip. */
+ outl(0x80000000, ioaddr + DMACtrl);
+
+
+ /* We do a request_region() only to register /proc/ioports info. */
+ request_region(ioaddr, YELLOWFIN_TOTAL_SIZE, card_name);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ /* Make certain the descriptor lists are aligned. */
+ yp = (void *)(((long)kmalloc(sizeof(*yp), GFP_KERNEL) + 31) & ~31);
+ memset(yp, 0, sizeof(*yp));
+ dev->priv = yp;
+
+#ifdef MODULE
+ yp->next_module = root_yellowfin_dev;
+ root_yellowfin_dev = dev;
+#endif
+
+ yp->chip_id = chip_id;
+
+ yp->full_duplex = 1;
+#ifdef YELLOWFIN_DEFAULT_MEDIA
+ yp->default_port = YELLOWFIN_DEFAULT_MEDIA;
+#endif
+#ifdef YELLOWFIN_NO_MEDIA_SWITCH
+ yp->medialock = 1;
+#endif
+
+ /* The lower four bits are the media type. */
+ if (options > 0) {
+ yp->full_duplex = (options & 16) ? 1 : 0;
+ yp->default_port = options & 15;
+ if (yp->default_port)
+ yp->medialock = 1;
+ }
+
+ /* The Yellowfin-specific entries in the device structure. */
+ dev->open = &yellowfin_open;
+ dev->hard_start_xmit = &yellowfin_start_xmit;
+ dev->stop = &yellowfin_close;
+ dev->get_stats = &yellowfin_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ if (mtu)
+ dev->mtu = mtu;
+
+ /* todo: Reset the xcvr interface and turn on heartbeat. */
+
+ return dev;
+}
+
+\f
+static int
+yellowfin_open(struct device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ /* Reset the chip. */
+ outl(0x80000000, ioaddr + DMACtrl);
+
+#ifdef SA_SHIRQ
+ if (request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ,
+ card_name, dev)) {
+ return -EAGAIN;
+ }
+#else
+ if (irq2dev_map[dev->irq] != NULL
+ || (irq2dev_map[dev->irq] = dev) == NULL
+ || dev->irq == 0
+ || request_irq(dev->irq, &yellowfin_interrupt, 0, card_name)) {
+ return -EAGAIN;
+ }
+#endif
+
+ if (yellowfin_debug > 1)
+ printk("%s: yellowfin_open() irq %d.\n", dev->name, dev->irq);
+
+ MOD_INC_USE_COUNT;
+
+ yellowfin_init_ring(dev);
+
+ outl(virt_to_bus(yp->rx_ring), ioaddr + RxPtr);
+ outl(virt_to_bus(yp->tx_ring), ioaddr + TxPtr);
+
+ /* Set up various condition 'select' registers.
+ There are no options here. */
+ outl(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
+ outl(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
+ outl(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
+ outl(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
+ outl(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
+ outl(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
+
+ /* Initialize other registers: with so many this eventually this will
+ converted to an offset/value list. */
+ outl(dma_ctrl, ioaddr + DMACtrl);
+ outw(fifo_cfg, ioaddr + FIFOcfg);
+ /* Enable automatic generation of flow control frames, period 0xffff. */
+ outl(0x0030FFFF, ioaddr + FlowCtrl);
+
+ if (dev->if_port == 0)
+ dev->if_port = yp->default_port;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ yp->in_interrupt = 0;
+
+ /* We are always in full-duplex mode with the current chip! */
+ yp->full_duplex = 1;
+
+ /* Setting the Rx mode will start the Rx process. */
+ outw(0x01CD | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
+#ifdef NEW_MULTICAST
+ set_rx_mode(dev);
+#else
+ set_rx_mode(dev, 0, 0);
+#endif
+
+ dev->start = 1;
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outw(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
+ outw(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
+ outl(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
+ outl(0x80008000, ioaddr + TxCtrl);
+
+ if (yellowfin_debug > 2) {
+ printk("%s: Done yellowfin_open().\n",
+ dev->name);
+ }
+ /* Set the timer to check for link beat. */
+ init_timer(&yp->timer);
+ yp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
+ yp->timer.data = (unsigned long)dev;
+ yp->timer.function = &yellowfin_timer; /* timer handler */
+ add_timer(&yp->timer);
+
+ return 0;
+}
+
+static void yellowfin_timer(unsigned long data)
+{
+ struct device *dev = (struct device *)data;
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int next_tick = 0;
+
+ if (yellowfin_debug > 3) {
+ printk("%s: Yellowfin timer tick, status %8.8x.\n",
+ dev->name, inl(ioaddr + IntrStatus));
+ }
+ if (next_tick) {
+ yp->timer.expires = RUN_AT(next_tick);
+ add_timer(&yp->timer);
+ }
+}
+
+static void yellowfin_tx_timeout(struct device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ printk("%s: Yellowfin transmit timed out, status %8.8x, resetting...\n",
+ dev->name, inl(ioaddr));
+
+#ifndef __alpha__
+ {
+ int i;
+ printk(" Rx ring %8.8x: ", (int)yp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)yp->rx_ring[i].status);
+ printk("\n Tx ring %8.8x: ", (int)yp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x /%4.4x", yp->tx_status[i].tx_errs, yp->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+
+ /* Perhaps we should reinitialize the hardware here. */
+ dev->if_port = 0;
+ /* Stop and restart the chip's Tx processes . */
+
+ /* Trigger an immediate transmit demand. */
+
+ dev->trans_start = jiffies;
+ yp->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void
+yellowfin_init_ring(struct device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ int i;
+
+ yp->tx_full = 0;
+ yp->cur_rx = yp->cur_tx = 0;
+ yp->dirty_rx = yp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ int pkt_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+
+ yp->rx_ring[i].request_cnt = pkt_buf_sz;
+ yp->rx_ring[i].cmd = CMD_RX_BUF | INTR_ALWAYS;
+
+ skb = DEV_ALLOC_SKB(pkt_buf_sz);
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ yp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+#if LINUX_VERSION_CODE > 0x10300
+ yp->rx_ring[i].addr = virt_to_bus(skb->tail);
+#else
+ yp->rx_ring[i].addr = virt_to_bus(skb->data);
+#endif
+ yp->rx_ring[i].branch_addr = virt_to_bus(&yp->rx_ring[i+1]);
+ }
+ /* Mark the last entry as wrapping the ring. */
+ yp->rx_ring[i-1].cmd = CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS;
+ yp->rx_ring[i-1].branch_addr = virt_to_bus(&yp->rx_ring[0]);
+
+/*#define NO_TXSTATS*/
+#ifdef NO_TXSTATS
+ /* In this mode the Tx ring needs only a single descriptor. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ yp->tx_skbuff[i] = 0;
+ yp->tx_ring[i].cmd = CMD_STOP;
+ yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]);
+ }
+ yp->tx_ring[--i].cmd = CMD_STOP | BRANCH_ALWAYS; /* Wrap ring */
+ yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[0]);
+#else
+ /* Tx ring needs a pair of descriptors, the second for the status. */
+ for (i = 0; i < TX_RING_SIZE*2; i++) {
+ yp->tx_skbuff[i/2] = 0;
+ yp->tx_ring[i].cmd = CMD_STOP; /* Branch on Tx error. */
+ yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]);
+ i++;
+ yp->tx_ring[i].cmd = CMD_TXSTATUS; /* Interrupt, no wait. */
+ yp->tx_ring[i].request_cnt = sizeof(yp->tx_status[i]);
+ yp->tx_ring[i].addr = virt_to_bus(&yp->tx_status[i/2]);
+ yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]);
+ }
+ /* Wrap ring */
+ yp->tx_ring[--i].cmd = CMD_TXSTATUS | BRANCH_ALWAYS | INTR_ALWAYS;
+ yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[0]);
+#endif
+}
+
+static int
+yellowfin_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+ if (jiffies - dev->trans_start < TX_TIMEOUT)
+ return 1;
+ yellowfin_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = yp->cur_tx % TX_RING_SIZE;
+
+ yp->tx_skbuff[entry] = skb;
+
+#ifdef NO_TXSTATS
+ yp->tx_ring[entry].request_cnt = skb->len;
+ yp->tx_ring[entry].addr = virt_to_bus(skb->data);
+ yp->tx_ring[entry].status = 0;
+ if (entry >= TX_RING_SIZE-1) {
+ yp->tx_ring[0].cmd = CMD_STOP; /* New stop command. */
+ yp->tx_ring[TX_RING_SIZE-1].cmd = CMD_TX_PKT | BRANCH_ALWAYS;
+ } else {
+ yp->tx_ring[entry+1].cmd = CMD_STOP; /* New stop command. */
+ yp->tx_ring[entry].cmd = CMD_TX_PKT | BRANCH_IFTRUE;
+ }
+ yp->cur_tx++;
+#else
+ yp->tx_ring[entry<<1].request_cnt = skb->len;
+ yp->tx_ring[entry<<1].addr = virt_to_bus(skb->data);
+ /* The input_last (status-write) command is constant, but we must rewrite
+ the subsequent 'stop' command. */
+
+ yp->cur_tx++;
+ {
+ unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
+ yp->tx_ring[next_entry<<1].cmd = CMD_STOP;
+ }
+ /* Final step -- overwrite the old 'stop' command. */
+
+ yp->tx_ring[entry<<1].cmd =
+ (entry % 6) == 0 ? CMD_TX_PKT | INTR_ALWAYS | BRANCH_IFTRUE :
+ CMD_TX_PKT | BRANCH_IFTRUE;
+#endif
+
+ /* Todo: explicitly flush cache lines here. */
+
+ /* Wake the potentially-idle transmit channel. */
+ outl(0x10001000, dev->base_addr + TxCtrl);
+
+ if (yp->cur_tx - yp->dirty_tx < TX_RING_SIZE - 1)
+ dev->tbusy = 0; /* Typical path */
+ else
+ yp->tx_full = 1;
+ dev->trans_start = jiffies;
+
+ if (yellowfin_debug > 4) {
+ printk("%s: Yellowfin transmit frame #%d queued in slot %d.\n",
+ dev->name, yp->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void yellowfin_interrupt IRQ(int irq, void *dev_instance, struct pt_regs *regs)
+{
+#ifdef SA_SHIRQ /* Use the now-standard shared IRQ implementation. */
+ struct device *dev = (struct device *)dev_instance;
+#else
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+#endif
+
+ struct yellowfin_private *lp;
+ int ioaddr, boguscnt = max_interrupt_work;
+
+ if (dev == NULL) {
+ printk ("yellowfin_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = (struct yellowfin_private *)dev->priv;
+ if (test_and_set_bit(0, (void*)&lp->in_interrupt)) {
+ dev->interrupt = 1;
+ printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
+ return;
+ }
+
+ do {
+ u16 intr_status = inw(ioaddr + IntrClear);
+ unsigned dirty_tx = lp->dirty_tx;
+
+ if (yellowfin_debug > 4)
+ printk("%s: Yellowfin interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0)
+ break;
+
+ if (intr_status & (IntrRxDone | IntrEarlyRx))
+ yellowfin_rx(dev);
+
+#ifdef NO_TXSTATS
+ for (; dirty_tx < lp->cur_tx; dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ if (lp->tx_ring[entry].status == 0)
+ break;
+ /* Free the original skb. */
+ dev_kfree_skb(lp->tx_skbuff[entry], FREE_WRITE);
+ lp->tx_skbuff[entry] = 0;
+ lp->stats.tx_packets++;
+ }
+ if (lp->tx_full && dev->tbusy
+ && dirty_tx > lp->cur_tx - TX_RING_SIZE + 4) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ lp->dirty_tx = dirty_tx;
+#else
+ if (intr_status & IntrTxDone
+ || lp->tx_status[dirty_tx % TX_RING_SIZE].tx_errs) {
+
+ for (dirty_tx = lp->dirty_tx; dirty_tx < lp->cur_tx; dirty_tx++) {
+ /* Todo: optimize this. */
+ int entry = dirty_tx % TX_RING_SIZE;
+ u16 tx_errs = lp->tx_status[entry].tx_errs;
+
+ if (tx_errs == 0)
+ break; /* It still hasn't been Txed */
+ if (tx_errs & 0xF8100000) {
+ /* There was an major error, log it. */
+#ifndef final_version
+ if (yellowfin_debug > 1)
+ printk("%s: Transmit error, Tx status %4.4x.\n",
+ dev->name, tx_errs);
+#endif
+ lp->stats.tx_errors++;
+ if (tx_errs & 0xF800) lp->stats.tx_aborted_errors++;
+ if (tx_errs & 0x0800) lp->stats.tx_carrier_errors++;
+ if (tx_errs & 0x2000) lp->stats.tx_window_errors++;
+ if (tx_errs & 0x8000) lp->stats.tx_fifo_errors++;
+#ifdef ETHER_STATS
+ if (tx_errs & 0x1000) lp->stats.collisions16++;
+#endif
+ } else {
+#ifdef ETHER_STATS
+ if (status & 0x0400) lp->stats.tx_deferred++;
+#endif
+ lp->stats.collisions += tx_errs & 15;
+ lp->stats.tx_packets++;
+ }
+
+ /* Free the original skb. */
+ dev_kfree_skb(lp->tx_skbuff[entry], FREE_WRITE);
+ lp->tx_skbuff[entry] = 0;
+ /* Mark status as empty. */
+ lp->tx_status[entry].tx_errs = 0;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk("%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, lp->cur_tx, lp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (lp->tx_full && dev->tbusy
+ && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ }
+#endif
+
+ /* Log errors and other events. */
+ if (intr_status & 0x2ee) { /* Abnormal error summary. */
+ printk("%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear what to do here. */
+ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
+ lp->stats.tx_errors++;
+ if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
+ lp->stats.rx_errors++;
+ }
+ if (--boguscnt < 0) {
+ printk("%s: Too much work at interrupt, status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (yellowfin_debug > 3)
+ printk("%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, inw(ioaddr + IntrStatus));
+
+ /* Code that should never be run! Perhaps remove after testing.. */
+ {
+ static int stopit = 10;
+ if (dev->start == 0 && --stopit < 0) {
+ printk("%s: Emergency stop, looping startup interrupt.\n",
+ dev->name);
+#ifdef SA_SHIRQ
+ free_irq(irq, dev);
+#else
+ free_irq(irq);
+#endif
+ }
+ }
+
+ dev->interrupt = 0;
+ lp->in_interrupt = 0;
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but seperated
+ for clarity and better register allocation. */
+static int
+yellowfin_rx(struct device *dev)
+{
+ struct yellowfin_private *lp = (struct yellowfin_private *)dev->priv;
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ int entry = lp->cur_rx % RX_RING_SIZE;
+ int boguscnt = 20;
+
+ if (yellowfin_debug > 4) {
+ printk(" In yellowfin_rx(), entry %d status %4.4x.\n", entry,
+ yp->rx_ring[entry].status);
+ printk(" #%d desc. %4.4x %4.4x %8.8x %4.4x %4.4x.\n",
+ entry, yp->rx_ring[entry].cmd,
+ yp->rx_ring[entry].request_cnt, yp->rx_ring[entry].addr,
+ yp->rx_ring[entry].result_cnt, yp->rx_ring[entry].status);
+ }
+
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (yp->rx_ring[entry].status) {
+ /* Todo: optimize this mess. */
+ u16 desc_status = yp->rx_ring[entry].status;
+ struct yellowfin_desc *desc = &lp->rx_ring[entry];
+ int frm_size = desc->request_cnt - desc->result_cnt;
+ u8 *buf_addr = bus_to_virt(lp->rx_ring[entry].addr);
+ s16 frame_status = *(s16*)&(buf_addr[frm_size - 2]);
+
+ if (yellowfin_debug > 4)
+ printk(" yellowfin_rx() status was %4.4x.\n", frame_status);
+ if (--boguscnt < 0)
+ break;
+ if ( ! (desc_status & RX_EOP)) {
+ printk("%s: Oversized Ethernet frame spanned multiple buffers,"
+ " status %4.4x!\n", dev->name, desc_status);
+ lp->stats.rx_length_errors++;
+ } else if (frame_status & 0x0038) {
+ /* There was a error. */
+ if (yellowfin_debug > 3)
+ printk(" yellowfin_rx() Rx error was %4.4x.\n", frame_status);
+ lp->stats.rx_errors++;
+ if (frame_status & 0x0060) lp->stats.rx_length_errors++;
+ if (frame_status & 0x0008) lp->stats.rx_frame_errors++;
+ if (frame_status & 0x0010) lp->stats.rx_crc_errors++;
+ if (frame_status < 0) lp->stats.rx_dropped++;
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+ } else if (memcmp(bus_to_virt(lp->rx_ring[entry].addr),
+ dev->dev_addr, 6) != 0
+ && memcmp(bus_to_virt(lp->rx_ring[entry].addr),
+ "\377\377\377\377\377\377", 6) != 0) {
+ printk("%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x.\n",
+ dev->name,
+ ((char *)bus_to_virt(lp->rx_ring[entry].addr))[0],
+ ((char *)bus_to_virt(lp->rx_ring[entry].addr))[1],
+ ((char *)bus_to_virt(lp->rx_ring[entry].addr))[2],
+ ((char *)bus_to_virt(lp->rx_ring[entry].addr))[3],
+ ((char *)bus_to_virt(lp->rx_ring[entry].addr))[4],
+ ((char *)bus_to_virt(lp->rx_ring[entry].addr))[5]);
+ bogus_rx++;
+#endif
+ } else {
+ u8 bogus_cnt = buf_addr[frm_size - 8];
+ short pkt_len = frm_size - 8 - bogus_cnt;
+ struct sk_buff *skb;
+ int rx_in_place = 0;
+
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+ char *temp;
+
+ /* Get a fresh skbuff to replace the filled one. */
+ newskb = DEV_ALLOC_SKB(dev->mtu <= 1500 ? PKT_BUF_SZ
+ : dev->mtu + 32);
+ if (newskb == NULL) {
+ skb = 0; /* No memory, drop the packet. */
+ goto memory_squeeze;
+ }
+ /* Pass up the skb already on the Rx ring. */
+ skb = lp->rx_skbuff[entry];
+ temp = skb_put(skb, pkt_len);
+ if (bus_to_virt(lp->rx_ring[entry].addr) != temp)
+ printk("%s: Warning -- the skbuff addresses do not match"
+ " in yellowfin_rx: %p vs. %p / %p.\n", dev->name,
+ bus_to_virt(lp->rx_ring[entry].addr),
+ skb->head, temp);
+ rx_in_place = 1;
+ lp->rx_skbuff[entry] = newskb;
+ newskb->dev = dev;
+ skb_reserve(newskb, 2); /* 16 byte align IP header */
+ lp->rx_ring[entry].addr = virt_to_bus(newskb->tail);
+ } else
+ skb = DEV_ALLOC_SKB(pkt_len + 2);
+ memory_squeeze:
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ /* todo: Check that at least two ring entries are free.
+ If not, free one and mark stats->rx_dropped++. */
+ break;
+ }
+ skb->dev = dev;
+ if (! rx_in_place) {
+ skb_reserve(skb, 2); /* 16 byte align the data fields */
+ memcpy(skb_put(skb, pkt_len),
+ bus_to_virt(lp->rx_ring[entry].addr), pkt_len);
+ }
+#if LINUX_VERSION_CODE > 0x10300
+ skb->protocol = eth_type_trans(skb, dev);
+#else
+ skb->len = pkt_len;
+#endif
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+
+ /* Mark this entry as being the end-of-list, and the prior entry
+ as now valid. */
+ lp->rx_ring[entry].cmd = CMD_STOP;
+ yp->rx_ring[entry].status = 0;
+ {
+ int prev_entry = entry - 1;
+ if (prev_entry < 0)
+ lp->rx_ring[RX_RING_SIZE - 1].cmd =
+ CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS;
+ else
+ lp->rx_ring[prev_entry].cmd = CMD_RX_BUF | INTR_ALWAYS;
+ }
+ entry = (++lp->cur_rx) % RX_RING_SIZE;
+ }
+ /* todo: restart Rx engine if stopped. For now we just make the Rx ring
+ large enough to avoid this. */
+
+ return 0;
+}
+
+static int
+yellowfin_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ int i;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (yellowfin_debug > 1) {
+ printk("%s: Shutting down ethercard, status was Tx %4.4x Rx %4.4x Int %2.2x.\n",
+ dev->name, inw(ioaddr + TxStatus),
+ inw(ioaddr + RxStatus), inl(ioaddr + IntrStatus));
+ printk("%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outw(0x0000, ioaddr + IntrEnb);
+
+ /* Stop the chip's Tx and Rx processes. */
+ outl(0x80000000, ioaddr + RxCtrl);
+ outl(0x80000000, ioaddr + TxCtrl);
+
+ del_timer(&yp->timer);
+
+#ifdef __i386__
+ if (yellowfin_debug > 2) {
+ printk("\n Tx ring at %8.8x:\n", (int)virt_to_bus(yp->tx_ring));
+ for (i = 0; i < TX_RING_SIZE*2; i++)
+ printk(" %c #%d desc. %4.4x %4.4x %8.8x %8.8x %4.4x %4.4x.\n",
+ inl(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
+ i, yp->tx_ring[i].cmd,
+ yp->tx_ring[i].request_cnt, yp->tx_ring[i].addr,
+ yp->tx_ring[i].branch_addr,
+ yp->tx_ring[i].result_cnt, yp->tx_ring[i].status);
+ printk(" Tx status %p:\n", yp->tx_status);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n",
+ i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
+ yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
+
+ printk("\n Rx ring %8.8x:\n", (int)virt_to_bus(yp->rx_ring));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(" %c #%d desc. %4.4x %4.4x %8.8x %4.4x %4.4x\n",
+ inl(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
+ i, yp->rx_ring[i].cmd,
+ yp->rx_ring[i].request_cnt, yp->rx_ring[i].addr,
+ yp->rx_ring[i].result_cnt, yp->rx_ring[i].status);
+ if (yellowfin_debug > 5) {
+ if (*(u8*)yp->rx_ring[i].addr != 0x69) {
+ int j;
+ for (j = 0; j < 0x50; j++)
+ printk(" %4.4x", ((u16*)yp->rx_ring[i].addr)[j]);
+ printk("\n");
+ }
+ }
+ }
+ }
+#endif /* __i386__ debugging only */
+
+#ifdef SA_SHIRQ
+ free_irq(dev->irq, dev);
+#else
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = 0;
+#endif
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ yp->rx_ring[i].cmd = CMD_STOP;
+ yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+ if (yp->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ yp->rx_skbuff[i]->free = 1;
+#endif
+ dev_kfree_skb(yp->rx_skbuff[i], FREE_WRITE);
+ }
+ yp->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (yp->tx_skbuff[i])
+ dev_kfree_skb(yp->tx_skbuff[i], FREE_WRITE);
+ yp->tx_skbuff[i] = 0;
+ }
+
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+ if (yellowfin_debug > 0) {
+ printk("%s: Received %d frames that we should not have.\n",
+ dev->name, bogus_rx);
+ }
+#endif
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct enet_statistics *
+yellowfin_get_stats(struct device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ return &yp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor. */
+
+/* The little-endian AUTODIN32 ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+
+static void
+#ifdef NEW_MULTICAST
+set_rx_mode(struct device *dev)
+#else
+static void set_rx_mode(struct device *dev, int num_addrs, void *addrs);
+#endif
+{
+ int ioaddr = dev->base_addr;
+ u16 cfg_value = inw(ioaddr + Cnfg);
+
+ /* Stop the Rx process to change any value. */
+ outw(cfg_value & ~0x1000, ioaddr + Cnfg);
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ outw(0x000F, ioaddr + AddrMode);
+ } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well, or accept all multicasts. */
+ outw(0x000B, ioaddr + AddrMode);
+ } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
+ struct dev_mc_list *mclist;
+ u16 hash_table[4];
+ int i;
+ memset(hash_table, 0, sizeof(hash_table));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ /* Due to a bug in the early chip versions, multiple filter
+ slots must be set for each address. */
+ set_bit((ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f,
+ hash_table);
+ set_bit((ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f,
+ hash_table);
+ set_bit((ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f,
+ hash_table);
+ set_bit((ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f,
+ hash_table);
+ }
+ /* Copy the hash table to the chip. */
+ for (i = 0; i < 4; i++)
+ outw(hash_table[i], ioaddr + HashTbl + i*2);
+ outw(0x0003, ioaddr + AddrMode);
+ } else { /* Normal, unicast/broadcast-only mode. */
+ outw(0x0001, ioaddr + AddrMode);
+ }
+ /* Restart the Rx process. */
+ outw(cfg_value | 0x1000, ioaddr + Cnfg);
+}
+\f
+#ifdef MODULE
+
+/* An additional parameter that may be passed in... */
+static int debug = -1;
+
+int
+init_module(void)
+{
+ int cards_found;
+
+ if (debug >= 0)
+ yellowfin_debug = debug;
+
+ root_yellowfin_dev = NULL;
+ cards_found = yellowfin_probe(0);
+
+ return cards_found ? 0 : -ENODEV;
+}
+
+void
+cleanup_module(void)
+{
+ struct device *next_dev;
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_yellowfin_dev) {
+ next_dev = ((struct yellowfin_private *)root_yellowfin_dev->priv)->next_module;
+ unregister_netdev(root_yellowfin_dev);
+ release_region(root_yellowfin_dev->base_addr, YELLOWFIN_TOTAL_SIZE);
+ kfree(root_yellowfin_dev);
+ root_yellowfin_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+\f
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c yellowfin.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
case BLKRRPART: /* Re-read partition tables */
return revalidate_scsidisk(dev, 1);
+
+ RO_IOCTLS(dev, arg);
+
default:
return scsi_ioctl(rscsi_disks[MINOR(dev) >> 4].device , cmd, (void *) arg);
}
return;
}
if (inode->i_count != 1) {
- printk("free_inode: inode has count=%d\n",inode->i_count);
+ printk("free_inode: inode has count=%ld\n",inode->i_count);
return;
}
if (inode->i_nlink) {
return;
}
if (inode->i_count > 1) {
- printk ("ext2_free_inode: inode has count=%d\n",
+ printk ("ext2_free_inode: inode has count=%ld\n",
inode->i_count);
return;
}
return;
}
if (inode->i_count != 1) {
- printk("free_inode: inode has count=%d\n",inode->i_count);
+ printk("free_inode: inode has count=%ld\n",inode->i_count);
return;
}
if (inode->i_nlink) {
lock_super(sb);
if (inode->i_count > 1) {
-printk("ncp_put_inode: inode in use device %s, inode %ld, count=%d\n",
+printk("ncp_put_inode: inode in use device %s, inode %ld, count=%ld\n",
kdevname(inode->i_dev), inode->i_ino, inode->i_count);
goto unlock;
}
__u32 mtime = inode->i_mtime;
if (inode->i_count > 1) {
- printk("smb_put_inode: in use device %s, inode %ld count=%d\n",
+ printk("smb_put_inode: in use device %s, inode %ld count=%ld\n",
kdevname(inode->i_dev), inode->i_ino, inode->i_count);
return;
}
return;
}
if (inode->i_count != 1) {
- printk("sysv_free_inode: inode has count=%d\n", inode->i_count);
+ printk("sysv_free_inode: inode has count=%ld\n", inode->i_count);
return;
}
if (inode->i_nlink) {
void ufs_print_inode(struct inode * inode)
{
- printk("ino %lu mode 0%6.6o lk %d uid %d gid %d sz %lu blks %lu cnt %u\n",
+ printk("ino %lu mode 0%6.6o lk %d uid %d gid %d sz %lu blks %lu cnt %lu\n",
inode->i_ino, inode->i_mode, inode->i_nlink, inode->i_uid, inode->i_gid, inode->i_size, inode->i_blocks, inode->i_count);
printk(" db <0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x>\n",
inode->u.ufs_i.ui_db[0], inode->u.ufs_i.ui_db[1],
struct inode *i_hash_next, *i_hash_prev;
struct inode *i_bound_to, *i_bound_by;
struct inode *i_mount;
- unsigned short i_count;
+ unsigned long i_count; /* needs to be > (address_space * tasks)>>pagebits */
unsigned short i_flags;
unsigned char i_lock;
unsigned char i_dirt;
#define CMD_SEEK 0x0B /* seek */
/* Controller specific commands */
-#define CMD_DTCSETPARAM 0x0C /* set drive parameters (DTC 5150X only?) */
+#define CMD_DTCSETPARAM 0x0C /* set drive parameters (DTC 5150X & CX only?) */
#define CMD_DTCGETECC 0x0D /* get ecc error length (DTC 5150X only?) */
#define CMD_DTCREADBUF 0x0E /* read sector buffer (DTC 5150X only?) */
#define CMD_DTCWRITEBUF 0x0F /* write sector buffer (DTC 5150X only?) */
#define CMD_DTCGETGEOM 0xFF /* get geometry data (DTC 5150X only?) */
#define CMD_ST11GETGEOM 0xF8 /* get geometry data (Seagate ST11R/M only?) */
#define CMD_WDSETPARAM 0x0C /* set drive parameters (WD 1004A27X only?) */
+#define CMD_XBSETPARAM 0x0C /* set drive parameters (XEBEC only?) */
/* Bits for command status byte */
#define CSB_ERROR 0x02 /* error */
u_char control;
} XD_INFO;
-#define HDIO_GETGEO 0x0301 /* get drive geometry */
+#define HDIO_GETGEO 0x0301 /* get drive geometry */
+#define HDIO_GET_MULTCOUNT 0x0304 /* get limit of multiblock transfer */
+#define HDIO_SET_DMA 0x0326 /* change use-dma flag */
+#define HDIO_GET_DMA 0x030b /* get use-dma flag */
/* this structure is returned to the HDIO_GETGEO ioctl */
typedef struct {
} XD_SIGNATURE;
void xd_setup (char *command,int *integers);
+#ifndef MODULE
+void xd_manual_geo_init (char *command,int *integers);
+#endif /* MODULE */
static u_char xd_detect (u_char *controller,u_char **address);
static u_char xd_initdrives (void (*init_drive)(u_char drive));
static void xd_geninit (struct gendisk *);
static void xd_interrupt_handler (int irq, void *dev_id, struct pt_regs *regs);
static u_char xd_setup_dma (u_char opcode,u_char *buffer,u_int count);
static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control);
+static void xd_wakeup (void);
+static void xd_watchdog (void);
static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout);
static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout);
/* card specific setup and geometry gathering code */
static void xd_dtc_init_controller (u_char *address);
+static void xd_dtc5150cx_init_drive (u_char drive);
static void xd_dtc_init_drive (u_char drive);
static void xd_wd_init_controller (u_char *address);
static void xd_wd_init_drive (u_char drive);
static void xd_seagate_init_drive (u_char drive);
static void xd_omti_init_controller (u_char *address);
static void xd_omti_init_drive (u_char drive);
+static void xd_xebec_init_controller (u_char *address);
+static void xd_xebec_init_drive (u_char drive);
static void xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc);
static void xd_override_init_drive (u_char drive);
extern void lp_setup(char *str, int *ints);
extern void eth_setup(char *str, int *ints);
extern void xd_setup(char *str, int *ints);
+extern void xd_manual_geo_init(char *str, int *ints);
extern void floppy_setup(char *str, int *ints);
extern void st_setup(char *str, int *ints);
extern void st0x_setup(char *str, int *ints);
#endif
#ifdef CONFIG_BLK_DEV_XD
{ "xd=", xd_setup },
+ { "xd_geo=", xd_manual_geo_init },
#endif
#ifdef CONFIG_BLK_DEV_FD
{ "floppy=", floppy_setup },
{ "hdb", 0x0340 },
{ "hdc", 0x1600 },
{ "hdd", 0x1640 },
+ { "hde", 0x2100 },
+ { "hdf", 0x2140 },
+ { "hdg", 0x2100 },
+ { "hdh", 0x2140 },
{ "sda", 0x0800 },
{ "sdb", 0x0810 },
{ "sdc", 0x0820 },
{ "sdd", 0x0830 },
{ "sde", 0x0840 },
+ { "sdf", 0x0850 },
+ { "sdg", 0x0860 },
+ { "sdh", 0x0870 },
+ { "sdi", 0x0880 },
+ { "sdj", 0x0890 },
+ { "sdk", 0x08a0 },
+ { "sdl", 0x08b0 },
+ { "sdm", 0x08c0 },
+ { "sdn", 0x08d0 },
+ { "sdo", 0x08e0 },
+ { "sdp", 0x08f0 },
{ "fd", 0x0200 },
{ "xda", 0x0d00 },
{ "xdb", 0x0d40 },
int sspace = sizeof(struct module) + MOD_MAX_NAME;
char name[MOD_MAX_NAME];
- if (!suser())
+ if (!suser() || securelevel > 0)
return -EPERM;
if (module_name == NULL || size == 0)
return -EINVAL;
int error;
struct mod_routines rt;
- if (!suser())
+ if (!suser() || securelevel > 0)
return -EPERM;
#ifdef __i386__
char name[MOD_MAX_NAME];
int error;
- if (!suser())
+ if (!suser() || securelevel > 0)
return -EPERM;
/* else */
if (module_name != NULL) {
int i;
int * groups;
- if (gidsetsize < 0)
+ /* Avoid an integer overflow on systems with 32 bit gid_t (Alpha) */
+ if (gidsetsize & ~0x3FFFFFFF)
return -EINVAL;
groups = current->groups;
for (i = 0 ; i < NGROUPS ; i++) {
struct ctl_table_header *tmp;
void *context;
- if (nlen == 0 || nlen >= CTL_MAXNAME)
+ if (nlen <= 0 || nlen >= CTL_MAXNAME)
return -ENOTDIR;
error = verify_area(VERIFY_READ,name,nlen*sizeof(int));
if (newval)
op |= 002;
if (ctl_perm(table, op))
- return -EPERM;
+ if( table->data != &securelevel || current->euid)
+ return -EPERM;
if (table->strategy) {
rc = table->strategy(table, name, nlen, oldval, oldlenp,
return tmp;
}
-void unregister_sysctl_table(struct ctl_table_header * table)
+void unregister_sysctl_table(struct ctl_table_header * header)
{
- DLIST_DELETE(table, ctl_entry);
+ DLIST_DELETE(header, ctl_entry);
#ifdef CONFIG_PROC_FS
- unregister_proc_table(table->ctl_table, &proc_sys_root);
+ unregister_proc_table(header->ctl_table, &proc_sys_root);
#endif
+ kfree(header);
}
/*
unregister_proc_table(table->child, de);
}
proc_unregister(root, de->low_ino);
+ table->de = NULL;
kfree(de);
}
}
if (vma->vm_ops->swapout(vma, address - vma->vm_start + vma->vm_offset, page_table))
kill_proc(pid, SIGBUS, 1);
} else {
- if (page_map->count != 1)
- return 0;
if (!(entry = get_swap_page()))
return 0;
vma->vm_mm->rss--;
extern __inline__ void dev_load(const char *name)
{
- if(!dev_get(name)) {
+ if(!dev_get(name) && suser()) {
#ifdef CONFIG_NET_ALIAS
const char *sptr;
{
int old_flags = dev->flags;
+ if(securelevel>0)
+ ifr.ifr_flags&=~IFF_PROMISC;
/*
* We are not allowed to potentially close/unload
* a device until we get this lock.
{
if(dev->set_mac_address==NULL)
return -EOPNOTSUPP;
+ if(securelevel>0)
+ return -EPERM;
ret=dev->set_mac_address(dev,&ifr.ifr_addr);
}
else
case SIOCSIFHWADDR:
if(dev->set_mac_address==NULL)
return -EOPNOTSUPP;
+ if(securelevel > 0)
+ return -EPERM;
if(ifr.ifr_hwaddr.sa_family!=dev->type)
return -EINVAL;
ret=dev->set_mac_address(dev,&ifr.ifr_hwaddr);
if [ "$CONFIG_IP_MASQUERADE" != "n" ]; then
comment 'Protocol-specific masquerading support will be built as modules.'
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
- bool 'IP: ipautofw masq support' CONFIG_IP_MASQUERADE_IPAUTOFW
+ bool 'IP: ipautofw masquerading (EXPERIMENTAL)' CONFIG_IP_MASQUERADE_IPAUTOFW
fi
bool 'IP: ICMP masquerading' CONFIG_IP_MASQUERADE_ICMP
fi
#ifdef CONFIG_IP_MASQUERADE
__u32 premasq_saddr = iph->saddr;
__u16 premasq_sport = 0;
- __u16 *portptr;
+ __u16 *portptr=NULL;
long premasq_len_diff = skb->len;
if (iph->protocol==IPPROTO_UDP ||
if(mtu<8)
{
/* It's wrong but it's better than nothing */
- icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev);
+ icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,htons(dev->mtu), dev);
ip_statistics.IpFragFails++;
return;
}
restore_flags(flags);
if (hh && atomic_dec_and_test(&hh->hh_refcnt))
kfree_s(hh, sizeof(struct hh_cache));
- kfree_s(rt, sizeof(struct rt_table));
+ kfree_s(rt, sizeof(struct rtable));
return;
}
rt->rt_next = rt_free_queue;
sti();
if (hh && atomic_dec_and_test(&hh->hh_refcnt))
kfree_s(hh, sizeof(struct hh_cache));
- kfree_s(rt, sizeof(struct rt_table));
+ kfree_s(rt, sizeof(struct rtable));
#if RT_CACHE_DEBUG >= 2
printk("rt_kick_free_queue: %08x is free\n", daddr);
#endif
if (type == ICMP_SOURCE_QUENCH)
{
- /*
- * FIXME:
- * Follow BSD for now and just reduce cong_window to 1 again.
- * It is possible that we just want to reduce the
- * window by 1/2, or that we want to reduce ssthresh by 1/2
- * here as well.
- */
- sk->cong_window = 1;
- sk->cong_count = 0;
- sk->high_seq = sk->sent_seq;
+ /* Current practice says these frames are bad, plus the drops
+ will account right anyway. If we act on this we stall doubly */
return;
}
if(sk->ip_xmit_timeout==TIME_KEEPOPEN)
tcp_reset_xmit_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
}
- return 1;
+ return 0;
}