say M here and read Documentation/modules.txt. This is recommended.
The module will be called rtl8139.o.
-Alternative RealTek 8139 driver (8139too) support
+Alternative RealTek 8129/8139 driver (8139too) support
CONFIG_RTL8139TOO
- This is a sophisticated, multi platform driver for RealTek 8139x
- based Fast Ethernet cards. It tries to work around several not
+ This is a sophisticated, multi platform driver for RealTek 8129 and
+ 8139x based Fast Ethernet cards. It tries to work around several not
well documented hardware bugs in these chips and is also
- usually faster than the original driver. However, 8129 is not
- supported.
- If you are sure you have a RTL8139-based card, choose this driver.
+ usually faster than the original driver.
+ If you have one of these cards, choose this driver.
You can find more information in the Ethernet-HOWTO, available by
anonymous FTP from ftp://metalab.unc.edu/pub/Linux/docs/HOWTO .
If you want to compile this driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read Documentation/modules.txt. This is recommended.
- The module will be called rtl8139too.o .
+ The module will be called 8139too.o .
Use PIO instead of MMIO
CONFIG_8139TOO_PIO
since hardly documented by the manufacturer.
If unsure, say n.
+Support for RealTek 8129
+CONFIG_8139TOO_8129
+ To include 8129 support, say y here. The RTL-8129 chip is basically
+ the same as the RTL-8139 except for the fact that boards with this
+ chip use an external Ethernet transceiver.
+
SiS 900/7016 support
CONFIG_SIS900
This is a driver for the Fast Ethernet PCI network cards based on
answer N.
QNX4 filesystem support (EXPERIMENTAL)
-CONFIG_QNX4FS_FS
+CONFIG_QNX4FS_FS
This is the filesystem used by the operating system QNX 4. Say Y if
you intend to mount QNX hard disks or floppies. Unless you say Y to
"QNX4FS write support" below, you will only be able to read
Loopback MIDI device support
CONFIG_SOUND_VMIDI
+ Support for MIDI loopback on port 1 or 2.
Yamaha YMF7xx PCI audio (native mode)
CONFIG_SOUND_YMFPCI
of PCI sound chips. This includes the Allegro sound chip that is
a lighter version of the Maestro3.
+Intel ICH audio support
+CONFIG_SOUND_ICH
+ Support for integral audio in Intel's I/O Controller Hub (ICH) chipset,
+ as used on the 810/820/840 motherboards.
+
Are you using a crosscompiler
CONFIG_CROSSCOMPILE
Say Y here if you are compiling the kernel on a different
ignore this option.
Toshiba Laptop support
-CONFIG_TOSHIBA
- If you intend to run this the kernel on a Toshiba portable say yes
- here. This adds a driver to safely access the System Management
- Mode of the CPU on Toshiba portables. The System Management Mode
+CONFIG_TOSHIBA
+ This adds a driver to safely access the System Management Mode
+ of the CPU on Toshiba portables. The System Management Mode
is used to set the BIOS and power saving options on Toshiba portables.
For information on utilities to make use of this driver see the
- Toshiba Linux utilities website at:
- http://www.buzzard.org.uk/toshiba/
+ Toshiba Linux utilities web site at:
+ <http://www.buzzard.org.uk/toshiba/>
+
+ Say Y if you intend to run this kernel on a Toshiba portable.
+ Say N otherwise.
CPiA Video For Linux
CONFIG_VIDEO_CPIA
VERSION = 2
PATCHLEVEL = 2
SUBLEVEL = 20
-EXTRAVERSION = pre6
+EXTRAVERSION = pre8
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <asm/sysinfo.h>
+#include <asm/hwrpb.h>
+#include <asm/machvec.h>
#include "proto.h"
static alist * int_name[] = {inta_name, intl_name, ints_name, intm_name};
+static int opDEC_testing = 0;
+static int opDEC_fix = 0;
+static unsigned long opDEC_test_pc = 0;
+
+static void
+opDEC_check(void)
+{
+ unsigned long test_pc;
+
+ lock_kernel();
+ opDEC_testing = 1;
+
+ __asm__ __volatile__(
+ " br %0,1f\n"
+ "1: addq %0,8,%0\n"
+ " stq %0,%1\n"
+ " cvttq/svm $f31,$f31\n"
+ : "=&r"(test_pc), "=m"(opDEC_test_pc)
+ : );
+
+ opDEC_testing = 0;
+ unlock_kernel();
+}
+
static char *
assoc(int fcode, alist * a)
{
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, struct pt_regs regs)
{
- die_if_kernel("Instruction fault", ®s, type, 0);
+ if (!opDEC_testing || type != 4) {
+ die_if_kernel("Instruction fault", ®s, type, 0);
+ }
switch (type) {
case 0: /* breakpoint */
if (ptrace_cancel_bpt(current)) {
case 4: /* opDEC */
if (implver() == IMPLVER_EV4) {
+ /* The some versions of SRM do not handle
+ the opDEC properly - they return the PC of the
+ opDEC fault, not the instruction after as the
+ Alpha architecture requires. Here we fix it up.
+ We do this by intentionally causing an opDEC
+ fault during the boot sequence and testing if
+ we get the correct PC. If not, we set a flag
+ to correct it every time through.
+ */
+ if (opDEC_testing && regs.pc == opDEC_test_pc) {
+ opDEC_fix = 4;
+ printk("opDEC fixup enabled.\n");
+ }
+
+ regs.pc += opDEC_fix;
+
/* EV4 does not implement anything except normal
rounding. Everything else will come here as
an illegal instruction. Emulate them. */
wrent(entUna, 4);
wrent(entSys, 5);
wrent(entDbg, 6);
+
+ /* Hack for Multia (UDB) and JENSEN: some of their SRMs have
+ * a bug in the handling of the opDEC fault. Fix it up.
+ */
+ if (implver() == IMPLVER_EV4) {
+ opDEC_check();
+ }
}
#
mainmenu_name "Linux Kernel Configuration"
+define_bool CONFIG_X86 y
+
mainmenu_option next_comment
comment 'Code maturity level options'
bool 'Prompt for development and/or incomplete code/drivers' CONFIG_EXPERIMENTAL
panic("could not set ID");
}
+static int __init ELCR_trigger(unsigned int irq)
+{
+ unsigned int port;
+
+ port = 0x4d0 + (irq >> 3);
+ return (inb(port) >> (irq & 7)) & 1;
+}
+
static void __init construct_default_ISA_mptable(void)
{
int i, pos = 0;
+ int ELCR_fallback = 0;
const int bus_type = (mpc_default_type == 2 || mpc_default_type == 3 ||
mpc_default_type == 6) ? MP_BUS_EISA : MP_BUS_ISA;
+ /*
+ * If true, we have an ISA/PCI system with no IRQ entries
+ * in the MP table. To prevent the PCI interrupts from being set up
+ * incorrectly, we try to use the ELCR. The sanity check to see if
+ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
+ * never be level sensitive, so we simply see if the ELCR agrees.
+ * If it does, we assume it's valid.
+ */
+ if (mpc_default_type == 5) {
+ printk("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
+
+ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
+ printk("ELCR contains invalid data... not using ELCR\n");
+ else {
+ printk("Using ELCR to identify PCI interrupts\n");
+ ELCR_fallback = 1;
+ }
+ }
+
for (i = 0; i < 16; i++) {
if (!IO_APIC_IRQ(i))
continue;
mp_irqs[pos].mpc_irqtype = mp_INT;
- mp_irqs[pos].mpc_irqflag = 0; /* default */
+
+ if (ELCR_fallback) {
+ /*
+ * If the ELCR indicates a level-sensitive interrupt, we
+ * copy that information over to the MP table in the
+ * irqflag field (level sensitive, active high polarity).
+ */
+ if (ELCR_trigger(i))
+ mp_irqs[pos].mpc_irqflag = 13;
+ else
+ mp_irqs[pos].mpc_irqflag = 0;
+ }
+ else
+ mp_irqs[pos].mpc_irqflag = 0; /* default */
+
mp_irqs[pos].mpc_srcbus = 0;
mp_irqs[pos].mpc_srcbusirq = i;
mp_irqs[pos].mpc_dstapic = 0;
#undef DEBUG_ELF
static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs);
-static int load_irix_library(int fd);
+static int load_irix_library(struct file *file);
static int irix_core_dump(long signr, struct pt_regs * regs, struct file *);
extern int dump_fpu (elf_fpregset_t *);
/* These are the functions used to load ELF style executables and shared
* libraries. There is no binary dependent code anywhere else.
*/
-static inline int do_load_irix_binary(struct linux_binprm * bprm,
- struct pt_regs * regs)
+static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
{
struct elfhdr elf_ex, interp_elf_ex;
struct dentry *interpreter_dentry;
sys_close(elf_exec_fileno);
current->personality = PER_IRIX32;
+ set_binfmt(&irix_format);
if (current->exec_domain && current->exec_domain->module)
__MOD_DEC_USE_COUNT(current->exec_domain->module);
- if (current->binfmt && current->binfmt->module)
- __MOD_DEC_USE_COUNT(current->binfmt->module);
current->exec_domain = lookup_exec_domain(current->personality);
- current->binfmt = &irix_format;
if (current->exec_domain && current->exec_domain->module)
__MOD_INC_USE_COUNT(current->exec_domain->module);
- if (current->binfmt && current->binfmt->module)
- __MOD_INC_USE_COUNT(current->binfmt->module);
compute_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
return retval;
}
-static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
-{
- int retval;
-
- MOD_INC_USE_COUNT;
- retval = do_load_irix_binary(bprm, regs);
- MOD_DEC_USE_COUNT;
- return retval;
-}
-
/* This is really simpleminded and specialized - we are loading an
* a.out library that is given an ELF header.
*/
-static inline int do_load_irix_library(struct file *file)
+static int load_irix_library(struct file *file)
{
struct elfhdr elf_ex;
struct elf_phdr *elf_phdata = NULL;
return 0;
}
-static int load_irix_library(int fd)
-{
- int retval = -EACCES;
- struct file *file;
-
- MOD_INC_USE_COUNT;
- file = fget(fd);
- if (file) {
- retval = do_load_irix_library(file);
- fput(file);
- }
- MOD_DEC_USE_COUNT;
- return retval;
-}
-
/* Called through irix_syssgi() to map an elf image given an FD,
* a phdr ptr USER_PHDRP in userspace, and a count CNT telling how many
* phdrs there are in the USER_PHDRP array. We return the vaddr the
-/* $Id: signal.c,v 1.91.2.3 2001/06/18 12:26:13 davem Exp $
+/* $Id: signal.c,v 1.91.2.4 2001/06/19 16:49:41 davem Exp $
* linux/arch/sparc/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
#include <asm/pgtable.h>
static int load_aout32_binary(struct linux_binprm *, struct pt_regs * regs);
-static int load_aout32_library(int fd);
+static int load_aout32_library(struct file *file);
static int aout32_core_dump(long signr, struct pt_regs * regs, struct file *);
extern void dump_thread(struct pt_regs *, struct user *);
* libraries. There is no binary dependent code anywhere else.
*/
-static inline int do_load_aout32_binary(struct linux_binprm * bprm,
- struct pt_regs * regs)
+static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
{
struct exec ex;
struct file * file;
}
}
beyond_if:
+ set_binfmt(&aout32_format);
if (current->exec_domain && current->exec_domain->module)
__MOD_DEC_USE_COUNT(current->exec_domain->module);
- if (current->binfmt && current->binfmt->module)
- __MOD_DEC_USE_COUNT(current->binfmt->module);
current->exec_domain = lookup_exec_domain(current->personality);
- current->binfmt = &aout32_format;
if (current->exec_domain && current->exec_domain->module)
__MOD_INC_USE_COUNT(current->exec_domain->module);
- if (current->binfmt && current->binfmt->module)
- __MOD_INC_USE_COUNT(current->binfmt->module);
set_brk(current->mm->start_brk, current->mm->brk);
return 0;
}
-
-static int
-load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
-{
- int retval;
-
- MOD_INC_USE_COUNT;
- retval = do_load_aout32_binary(bprm, regs);
- MOD_DEC_USE_COUNT;
- return retval;
-}
-
/* N.B. Move to .h file and use code in fs/binfmt_aout.c? */
-static inline int
-do_load_aout32_library(int fd)
+static int load_aout32_library(struct file *file)
{
- struct file * file;
struct inode * inode;
unsigned long bss, start_addr, len;
unsigned long error;
loff_t offset = 0;
struct exec ex;
- retval = -EACCES;
- file = fget(fd);
- if (!file)
- goto out;
- if (!file->f_op)
- goto out_putf;
inode = file->f_dentry->d_inode;
retval = -ENOEXEC;
retval = 0;
out_putf:
- fput(file);
-out:
return retval;
}
-static int
-load_aout32_library(int fd)
-{
- int retval;
-
- MOD_INC_USE_COUNT;
- retval = do_load_aout32_library(fd);
- MOD_DEC_USE_COUNT;
- return retval;
-}
-
-
__initfunc(int init_aout32_binfmt(void))
{
return register_binfmt(&aout32_format);
-/* $Id: psycho.c,v 1.85.2.12 2001/05/16 07:28:41 davem Exp $
+/* $Id: psycho.c,v 1.85.2.13 2001/06/28 01:31:12 davem Exp $
* psycho.c: Ultra/AX U2P PCI controller support.
*
* Copyright (C) 1997 David S. Miller (davem@caipfs.rutgers.edu)
/* Slot determination is only slightly complex. Handle
* the easy case first.
+ *
+ * Basically, device number zero on the top-level bus is
+ * always the PCI host controller. Slot 0 is then device 1.
+ * PBM A supports two external slots (0 and 1), and PBM B
+ * supports 4 external slots (0, 1, 2, and 3). On-board PCI
+ * devices are wired to device numbers outside of these
+ * ranges. -DaveM
*/
if(pdev->bus->number == pbm->pci_first_busno) {
- if(pbm == &pbm->parent->pbm_A)
- slot = (pdev->devfn >> 3) - 1;
- else
- slot = (pdev->devfn >> 3) - 2;
+ slot = (pdev->devfn >> 3) - 1;
} else {
/* Underneath a bridge, use slot number of parent
* bridge.
*/
- if(pbm == &pbm->parent->pbm_A)
- slot = (pdev->bus->self->devfn >> 3) - 1;
- else
- slot = (pdev->bus->self->devfn >> 3) - 2;
+ slot = (pdev->bus->self->devfn >> 3) - 1;
/* Use low slot number bits of child as IRQ line. */
line = (pdev->devfn >> 3) & 0x03;
-/* $Id: signal.c,v 1.38.2.2 2001/06/18 12:26:13 davem Exp $
+/* $Id: signal.c,v 1.38.2.3 2001/06/19 16:49:41 davem Exp $
* arch/sparc64/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
-/* $Id: signal32.c,v 1.47.2.4 2001/06/18 12:26:13 davem Exp $
+/* $Id: signal32.c,v 1.47.2.5 2001/06/19 16:49:42 davem Exp $
* arch/sparc64/kernel/signal32.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
-/* $Id: sys_sparc32.c,v 1.107.2.16 2000/12/20 02:45:00 anton Exp $
+/* $Id: sys_sparc32.c,v 1.107.2.17 2001/07/06 05:04:31 davem Exp $
* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
pt_rewind(unit);
return 0;
+ case MTWEOF:
+ pt_write_fm(unit);
+ return 0;
+
default:
printk("%s: Unimplemented mt_op %d\n",PT.name,
mtop.mt_op);
i=verify_area(VERIFY_READ, (void *) arg, sizeof(struct cdrom_read_audio));
if (i) RETURN_UP(i);
copy_from_user(&read_audio, (void *) arg, sizeof(struct cdrom_read_audio));
- if (read_audio.nframes>D_S[d].sbp_audsiz) RETURN_UP(-EINVAL);
+ if (read_audio.nframes < 0 || read_audio.nframes>D_S[d].sbp_audsiz) RETURN_UP(-EINVAL);
i=verify_area(VERIFY_WRITE, read_audio.buf,
read_audio.nframes*CD_FRAMESIZE_RAW);
if (i) RETURN_UP(i);
tristate ' Acquire SBC Watchdog Timer' CONFIG_ACQUIRE_WDT
tristate ' Advantech SBC Watchdog Timer' CONFIG_ADVANTECH_WDT
tristate ' Berkshire Products PC Watchdog' CONFIG_PCWATCHDOG
- tristate ' Intel i810 TCO timer / Watchdog' CONFIG_I810_TCO
+ tristate ' Intel i810 TCO timer / Watchdog' CONFIG_I810_TCO
tristate ' Mixcom Watchdog' CONFIG_MIXCOMWD
tristate ' SBC-60XX Watchdog Timer' CONFIG_60XX_WDT
tristate ' Software Watchdog' CONFIG_SOFT_WATCHDOG
bool ' AMD Irongate support' CONFIG_AGP_AMD
bool ' Generic SiS support' CONFIG_AGP_SIS
bool ' ALI M1541 support' CONFIG_AGP_ALI
+ bool ' Serverworks LE/HE support' CONFIG_AGP_SWORKS
fi
source drivers/char/drm/Config.in
fi
break;
default:
- return -ENOIOCTLCMD;
+ return -ENOTTY;
}
return 0;
}
break;
default:
- return -ENOIOCTLCMD;
+ return -ENOTTY;
}
return 0;
}
u32 mode;
enum chipset_type type;
enum aper_size_type size_type;
- u32 *key_list;
+ unsigned long *key_list;
atomic_t current_memory_agp;
atomic_t agp_in_use;
int max_memory_agp; /* in number of pages */
int (*remove_memory) (agp_memory *, off_t, int);
agp_memory *(*alloc_by_type) (size_t, int);
void (*free_by_type) (agp_memory *);
+ unsigned long (*agp_alloc_page) (void);
+ void (*agp_destroy_page) (unsigned long);
};
#define OUTREG32(mmap, addr, val) writel((val),(mmap + (addr)))
#define OUTREG16(mmap, addr, val) writew((val),(mmap + (addr)))
-#define OUTREG8 (mmap, addr, val) writeb((val),(mmap + (addr)))
+#define OUTREG8(mmap, addr, val) writeb((val),(mmap + (addr)))
#define INREG32(mmap, addr) readl(mmap + (addr))
#define INREG16(mmap, addr) readw(mmap + (addr))
-#define INREG8 (mmap, addr) readb(mmap + (addr))
+#define INREG8(mmap, addr) readb(mmap + (addr))
#define CACHE_FLUSH agp_bridge.cache_flush
#define A_SIZE_8(x) ((aper_size_info_8 *) x)
#ifndef PCI_DEVICE_ID_VIA_8363_0
#define PCI_DEVICE_ID_VIA_8363_0 0x0305
#endif
+#ifndef PCI_DEVICE_ID_AL_M1621_0
+#define PCI_DEVICE_ID_AL_M1621_0 0x1621
+#endif
+#ifndef PCI_DEVICE_ID_AL_M1631_0
+#define PCI_DEVICE_ID_AL_M1631_0 0x1631
+#endif
+#ifndef PCI_DEVICE_ID_AL_M1632_0
+#define PCI_DEVICE_ID_AL_M1632_0 0x1632
+#endif
+#ifndef PCI_DEVICE_ID_AL_M1641_0
+#define PCI_DEVICE_ID_AL_M1641_0 0x1641
+#endif
+#ifndef PCI_DEVICE_ID_AL_M1647_0
+#define PCI_DEVICE_ID_AL_M1647_0 0x1647
+#endif
+#ifndef PCI_DEVICE_ID_AL_M1651_0
+#define PCI_DEVICE_ID_AL_M1651_0 0x1651
+#endif
/* intel register */
#define ALI_AGPCTRL 0xb8
#define ALI_ATTBASE 0xbc
#define ALI_TLBCTRL 0xc0
+#define ALI_TAGCTRL 0xc4
+#define ALI_CACHE_FLUSH_CTRL 0xD0
+#define ALI_CACHE_FLUSH_ADDR_MASK 0xFFFFF000
+#define ALI_CACHE_FLUSH_EN 0x100
+
+
+/* Serverworks Registers */
+#define SVWRKS_APSIZE 0x10
+#define SVWRKS_SIZE_MASK 0xfe000000
+
+#define SVWRKS_MMBASE 0x14
+#define SVWRKS_CACHING 0x4b
+#define SVWRKS_FEATURE 0x68
+
+/* func 1 registers */
+#define SVWRKS_AGP_ENABLE 0x60
+#define SVWRKS_COMMAND 0x04
+
+/* Memory mapped registers */
+#define SVWRKS_GART_CACHE 0x02
+#define SVWRKS_GATTBASE 0x04
+#define SVWRKS_TLBFLUSH 0x10
+#define SVWRKS_POSTFLUSH 0x14
+#define SVWRKS_DIRFLUSH 0x0c
#endif /* _AGP_BACKEND_PRIV_H */
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/errno.h>
-#include <linux/malloc.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/pci.h>
#include <linux/init.h>
{
#if defined(__i386__)
asm volatile ("wbinvd":::"memory");
-#elif defined(__alpha__)
+#elif defined(__alpha__) || defined(__ia64__) || defined(__sparc__)
/* ??? I wonder if we'll really need to flush caches, or if the
core logic can manage to keep the system coherent. The ARM
speaks only of using `cflush' to get things in memory in
MOD_DEC_USE_COUNT;
}
-/*
- * Basic Page Allocation Routines -
- * These routines handle page allocation
- * and by default they reserve the allocated
- * memory. They also handle incrementing the
- * current_memory_agp value, Which is checked
- * against a maximum value.
- */
-
-static unsigned long agp_alloc_page(void)
-{
- void *pt;
-
- pt = (void *) __get_free_page(GFP_KERNEL);
- if (pt == NULL) {
- return 0;
- }
- atomic_inc(&mem_map[MAP_NR(pt)].count);
- set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
- atomic_inc(&agp_bridge.current_memory_agp);
- return (unsigned long) pt;
-}
-
-static void agp_destroy_page(unsigned long page)
-{
- void *pt = (void *) page;
-
- if (pt == NULL) {
- return;
- }
- atomic_dec(&mem_map[MAP_NR(pt)].count);
- clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
-#if 0
- wake_up(&mem_map[MAP_NR(pt)].wait);
-#endif
- free_page((unsigned long) pt);
- atomic_dec(&agp_bridge.current_memory_agp);
-}
-
-/* End Basic Page Allocation Routines */
-
/*
* Generic routines for handling agp_memory structures -
* They use the basic page allocation routines to do the
if (curr->page_count != 0) {
for (i = 0; i < curr->page_count; i++) {
curr->memory[i] &= ~(0x00000fff);
- agp_destroy_page((unsigned long)
+ agp_bridge.agp_destroy_page((unsigned long)
phys_to_virt(curr->memory[i]));
}
}
return NULL;
}
for (i = 0; i < page_count; i++) {
- new->memory[i] = agp_alloc_page();
+ new->memory[i] = agp_bridge.agp_alloc_page();
if (new->memory[i] == 0) {
/* Free this structure */
kfree(curr);
}
+/*
+ * Basic Page Allocation Routines -
+ * These routines handle page allocation
+ * and by default they reserve the allocated
+ * memory. They also handle incrementing the
+ * current_memory_agp value, Which is checked
+ * against a maximum value.
+ */
+
+static unsigned long agp_generic_alloc_page(void)
+{
+ void *pt;
+
+ pt = (void *) __get_free_page(GFP_KERNEL);
+ if (pt == NULL) {
+ return 0;
+ }
+ atomic_inc(&mem_map[MAP_NR(pt)].count);
+ set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
+ atomic_inc(&agp_bridge.current_memory_agp);
+ return (unsigned long) pt;
+}
+
+static void agp_generic_destroy_page(unsigned long page)
+{
+ void *pt = (void *) page;
+
+ if (pt == NULL) {
+ return;
+ }
+ atomic_dec(&mem_map[MAP_NR(pt)].count);
+ clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
+ free_page((unsigned long) pt);
+ atomic_dec(&agp_bridge.current_memory_agp);
+}
+
+/* End Basic Page Allocation Routines */
+
void agp_enable(u32 mode)
{
agp_bridge.agp_enable(mode);
return NULL;
}
MOD_INC_USE_COUNT;
- new->memory[0] = agp_alloc_page();
+ new->memory[0] = agp_bridge.agp_alloc_page();
if (new->memory[0] == 0) {
/* Free this structure */
{
agp_free_key(curr->key);
if(curr->type == AGP_PHYS_MEMORY) {
- agp_destroy_page((unsigned long)
+ agp_bridge.agp_destroy_page((unsigned long)
phys_to_virt(curr->memory[0]));
vfree(curr->memory);
}
agp_bridge.remove_memory = intel_i810_remove_entries;
agp_bridge.alloc_by_type = intel_i810_alloc_by_type;
agp_bridge.free_by_type = intel_i810_free_by_type;
+ agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+ agp_bridge.agp_destroy_page = agp_generic_destroy_page;
return 0;
}
agp_bridge.remove_memory = agp_generic_remove_memory;
agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
agp_bridge.free_by_type = agp_generic_free_by_type;
+ agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+ agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+ agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+ agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+ agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+ agp_bridge.agp_destroy_page = agp_generic_destroy_page;
return 0;
agp_bridge.remove_memory = agp_generic_remove_memory;
agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
agp_bridge.free_by_type = agp_generic_free_by_type;
+ agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+ agp_bridge.agp_destroy_page = agp_generic_destroy_page;
return 0;
agp_bridge.remove_memory = agp_generic_remove_memory;
agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
agp_bridge.free_by_type = agp_generic_free_by_type;
+ agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+ agp_bridge.agp_destroy_page = agp_generic_destroy_page;
return 0;
}
agp_bridge.remove_memory = amd_remove_memory;
agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
agp_bridge.free_by_type = agp_generic_free_by_type;
+ agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+ agp_bridge.agp_destroy_page = agp_generic_destroy_page;
return 0;
u32 temp;
pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
- pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
- ((temp & 0xffffff00) | 0x00000090));
- pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
- ((temp & 0xffffff00) | 0x00000010));
+// clear tag
+ pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL,
+ ((temp & 0xfffffff0) | 0x00000001|0x00000002));
}
static void ali_cleanup(void)
previous_size = A_SIZE_32(agp_bridge.previous_size);
pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
- pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
- ((temp & 0xffffff00) | 0x00000090));
+// clear tag
+ pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL,
+ ((temp & 0xffffff00) | 0x00000001|0x00000002));
+
+ pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
- previous_size->size_value);
+ ((temp & 0x00000ff0) | previous_size->size_value));
}
static int ali_configure(void)
current_size = A_SIZE_32(agp_bridge.current_size);
/* aperture size and gatt addr */
- pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE,
- agp_bridge.gatt_bus_addr | current_size->size_value);
+ pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
+ temp = (((temp & 0x00000ff0) | (agp_bridge.gatt_bus_addr & 0xfffff000))
+ | (current_size->size_value & 0xf));
+ pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, temp);
/* tlb control */
- pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
- pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
- ((temp & 0xffffff00) | 0x00000010));
+
+ /*
+ * Question: Jeff, ALi's patch deletes this:
+ *
+ * pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
+ * pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL,
+ * ((temp & 0xffffff00) | 0x00000010));
+ *
+ * and replaces it with the following, which seems to duplicate the
+ * next couple of lines below it. I suspect this was an oversight,
+ * but you might want to check up on this?
+ */
+
+ pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
+ agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
/* address to map to */
pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp);
agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+#if 0
+ if (agp_bridge.type == ALI_M1541) {
+ u32 nlvm_addr = 0;
+
+ switch (current_size->size_value) {
+ case 0: break;
+ case 1: nlvm_addr = 0x100000;break;
+ case 2: nlvm_addr = 0x200000;break;
+ case 3: nlvm_addr = 0x400000;break;
+ case 4: nlvm_addr = 0x800000;break;
+ case 6: nlvm_addr = 0x1000000;break;
+ case 7: nlvm_addr = 0x2000000;break;
+ case 8: nlvm_addr = 0x4000000;break;
+ case 9: nlvm_addr = 0x8000000;break;
+ case 10: nlvm_addr = 0x10000000;break;
+ default: break;
+ }
+ nlvm_addr--;
+ nlvm_addr&=0xfff00000;
+
+ nlvm_addr+= agp_bridge.gart_bus_addr;
+ nlvm_addr|=(agp_bridge.gart_bus_addr>>12);
+ printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr);
+ }
+#endif
+
+ pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp);
+ temp &= 0xffffff7f; //enable TLB
+ pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, temp);
+
return 0;
}
return addr | agp_bridge.masks[0].mask;
}
+static void ali_cache_flush(void)
+{
+ global_cache_flush();
+
+ if (agp_bridge.type == ALI_M1541) {
+ int i, page_count;
+ u32 temp;
+
+ page_count = 1 << A_SIZE_32(agp_bridge.current_size)->page_order;
+ for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) {
+ pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
+ pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
+ (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+ (agp_bridge.gatt_bus_addr + i)) |
+ ALI_CACHE_FLUSH_EN));
+ }
+ }
+}
+
+static unsigned long ali_alloc_page(void)
+{
+ void *pt;
+ u32 temp;
+
+ pt = (void *) __get_free_page(GFP_KERNEL);
+ if (pt == NULL)
+ return 0;
+
+ atomic_inc(&mem_map[MAP_NR(pt)].count);
+ set_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
+ atomic_inc(&agp_bridge.current_memory_agp);
+
+ global_cache_flush();
+
+ if (agp_bridge.type == ALI_M1541) {
+ pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
+ pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
+ (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+ virt_to_phys((void *)pt)) |
+ ALI_CACHE_FLUSH_EN ));
+ }
+ return (unsigned long) pt;
+}
+
+static void ali_destroy_page(unsigned long page)
+{
+ u32 temp;
+ void *pt = (void *) page;
+
+ if (pt == NULL)
+ return;
+
+ global_cache_flush();
+
+ if (agp_bridge.type == ALI_M1541) {
+ pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp);
+ pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL,
+ (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
+ virt_to_phys((void *)pt)) |
+ ALI_CACHE_FLUSH_EN));
+ }
+
+ atomic_dec(&mem_map[MAP_NR(pt)].count);
+ clear_bit(PG_locked, &mem_map[MAP_NR(pt)].flags);
+ free_page((unsigned long) pt);
+ atomic_dec(&agp_bridge.current_memory_agp);
+}
/* Setup function */
static gatt_mask ali_generic_masks[] =
agp_bridge.tlb_flush = ali_tlbflush;
agp_bridge.mask_memory = ali_mask_memory;
agp_bridge.agp_enable = agp_generic_agp_enable;
- agp_bridge.cache_flush = global_cache_flush;
+ agp_bridge.cache_flush = ali_cache_flush;
agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
agp_bridge.insert_memory = agp_generic_insert_memory;
agp_bridge.remove_memory = agp_generic_remove_memory;
agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
agp_bridge.free_by_type = agp_generic_free_by_type;
+ agp_bridge.agp_alloc_page = ali_alloc_page;
+ agp_bridge.agp_destroy_page = ali_destroy_page;
return 0;
#endif /* CONFIG_AGP_ALI */
+#ifdef CONFIG_AGP_SWORKS
+typedef struct _serverworks_page_map {
+ unsigned long *real;
+ unsigned long *remapped;
+} serverworks_page_map;
-/* per-chipset initialization data.
- * note -- all chipsets for a single vendor MUST be grouped together
- */
-static struct {
- unsigned short device_id; /* first, to make table easier to read */
- unsigned short vendor_id;
- enum chipset_type chipset;
- const char *vendor_name;
- const char *chipset_name;
- int (*chipset_setup) (struct pci_dev *pdev);
-} agp_bridge_info[] __initdata = {
+static struct _serverworks_private {
+ struct pci_dev *svrwrks_dev; /* device one */
+ volatile u8 *registers;
+ serverworks_page_map **gatt_pages;
+ int num_tables;
+ serverworks_page_map scratch_dir;
-#ifdef CONFIG_AGP_ALI
- { PCI_DEVICE_ID_AL_M1541_0,
- PCI_VENDOR_ID_AL,
- ALI_M1541,
- "Ali",
- "M1541",
- ali_generic_setup },
- { 0,
- PCI_VENDOR_ID_AL,
- ALI_GENERIC,
- "Ali",
- "Generic",
- ali_generic_setup },
-#endif /* CONFIG_AGP_ALI */
+ int gart_addr_ofs;
+ int mm_addr_ofs;
+} serverworks_private;
-#ifdef CONFIG_AGP_AMD
- { PCI_DEVICE_ID_AMD_IRONGATE_0,
- PCI_VENDOR_ID_AMD,
- AMD_IRONGATE,
- "AMD",
- "Irongate",
- amd_irongate_setup },
- { 0,
- PCI_VENDOR_ID_AMD,
- AMD_GENERIC,
- "AMD",
- "Generic",
- amd_irongate_setup },
-#endif /* CONFIG_AGP_AMD */
+static int serverworks_create_page_map(serverworks_page_map *page_map)
+{
+ int i;
-#ifdef CONFIG_AGP_INTEL
- { PCI_DEVICE_ID_INTEL_82443LX_0,
- PCI_VENDOR_ID_INTEL,
- INTEL_LX,
- "Intel",
- "440LX",
- intel_generic_setup },
- { PCI_DEVICE_ID_INTEL_82443BX_0,
- PCI_VENDOR_ID_INTEL,
- INTEL_BX,
- "Intel",
- "440BX",
- intel_generic_setup },
- { PCI_DEVICE_ID_INTEL_82443GX_0,
- PCI_VENDOR_ID_INTEL,
- INTEL_GX,
- "Intel",
- "440GX",
- intel_generic_setup },
- /* could we add support for PCI_DEVICE_ID_INTEL_815_1 too ? */
- { PCI_DEVICE_ID_INTEL_815_0,
- PCI_VENDOR_ID_INTEL,
- INTEL_I815,
- "Intel",
- "i815",
- intel_generic_setup },
- { PCI_DEVICE_ID_INTEL_840_0,
- PCI_VENDOR_ID_INTEL,
- INTEL_I840,
- "Intel",
- "i840",
- intel_840_setup },
- { 0,
- PCI_VENDOR_ID_INTEL,
- INTEL_GENERIC,
- "Intel",
- "Generic",
- intel_generic_setup },
-#endif /* CONFIG_AGP_INTEL */
+ page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+ if (page_map->real == NULL) {
+ return -ENOMEM;
+ }
+ set_bit(PG_reserved, &mem_map[MAP_NR(page_map->real)].flags);
+ CACHE_FLUSH();
+ page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
+ PAGE_SIZE);
+ if (page_map->remapped == NULL) {
+ clear_bit(PG_reserved,
+ &mem_map[MAP_NR(page_map->real)].flags);
+ free_page((unsigned long) page_map->real);
+ page_map->real = NULL;
+ return -ENOMEM;
+ }
+ CACHE_FLUSH();
-#ifdef CONFIG_AGP_SIS
- { PCI_DEVICE_ID_SI_630,
- PCI_VENDOR_ID_SI,
- SIS_GENERIC,
- "SiS",
- "630",
- sis_generic_setup },
- { PCI_DEVICE_ID_SI_540,
- PCI_VENDOR_ID_SI,
- SIS_GENERIC,
- "SiS",
- "540",
- sis_generic_setup },
- { PCI_DEVICE_ID_SI_620,
- PCI_VENDOR_ID_SI,
- SIS_GENERIC,
- "SiS",
- "620",
- sis_generic_setup },
- { PCI_DEVICE_ID_SI_530,
- PCI_VENDOR_ID_SI,
- SIS_GENERIC,
- "SiS",
- "530",
- sis_generic_setup },
- { PCI_DEVICE_ID_SI_630,
- PCI_VENDOR_ID_SI,
- SIS_GENERIC,
- "SiS",
- "Generic",
- sis_generic_setup },
- { PCI_DEVICE_ID_SI_540,
+ for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
+ page_map->remapped[i] = agp_bridge.scratch_page;
+ }
+
+ return 0;
+}
+
+static void serverworks_free_page_map(serverworks_page_map *page_map)
+{
+ iounmap(page_map->remapped);
+ clear_bit(PG_reserved,
+ &mem_map[MAP_NR(page_map->real)].flags);
+ free_page((unsigned long) page_map->real);
+}
+
+static void serverworks_free_gatt_pages(void)
+{
+ int i;
+ serverworks_page_map **tables;
+ serverworks_page_map *entry;
+
+ tables = serverworks_private.gatt_pages;
+ for(i = 0; i < serverworks_private.num_tables; i++) {
+ entry = tables[i];
+ if (entry != NULL) {
+ if (entry->real != NULL) {
+ serverworks_free_page_map(entry);
+ }
+ kfree(entry);
+ }
+ }
+ kfree(tables);
+}
+
+static int serverworks_create_gatt_pages(int nr_tables)
+{
+ serverworks_page_map **tables;
+ serverworks_page_map *entry;
+ int retval = 0;
+ int i;
+
+ tables = kmalloc((nr_tables + 1) * sizeof(serverworks_page_map *),
+ GFP_KERNEL);
+ if (tables == NULL) {
+ return -ENOMEM;
+ }
+ memset(tables, 0, sizeof(serverworks_page_map *) * (nr_tables + 1));
+ for (i = 0; i < nr_tables; i++) {
+ entry = kmalloc(sizeof(serverworks_page_map), GFP_KERNEL);
+ if (entry == NULL) {
+ retval = -ENOMEM;
+ break;
+ }
+ memset(entry, 0, sizeof(serverworks_page_map));
+ tables[i] = entry;
+ retval = serverworks_create_page_map(entry);
+ if (retval != 0) break;
+ }
+ serverworks_private.num_tables = nr_tables;
+ serverworks_private.gatt_pages = tables;
+
+ if (retval != 0) serverworks_free_gatt_pages();
+
+ return retval;
+}
+
+#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
+ GET_PAGE_DIR_IDX(addr)]->remapped)
+
+#ifndef GET_PAGE_DIR_OFF
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#endif
+
+#ifndef GET_PAGE_DIR_IDX
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+ GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
+#endif
+
+#ifndef GET_GATT_OFF
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#endif
+
+static int serverworks_create_gatt_table(void)
+{
+ aper_size_info_lvl2 *value;
+ serverworks_page_map page_dir;
+ int retval;
+ u32 temp;
+ int i;
+
+ value = A_SIZE_LVL2(agp_bridge.current_size);
+ retval = serverworks_create_page_map(&page_dir);
+ if (retval != 0) {
+ return retval;
+ }
+ retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
+ if (retval != 0) {
+ serverworks_free_page_map(&page_dir);
+ return retval;
+ }
+ /* Create a fake scratch directory */
+ for(i = 0; i < 1024; i++) {
+ serverworks_private.scratch_dir.remapped[i] = (unsigned long) agp_bridge.scratch_page;
+ page_dir.remapped[i] =
+ virt_to_bus(serverworks_private.scratch_dir.real);
+ page_dir.remapped[i] |= 0x00000001;
+ }
+
+ retval = serverworks_create_gatt_pages(value->num_entries / 1024);
+ if (retval != 0) {
+ serverworks_free_page_map(&page_dir);
+ return retval;
+ }
+
+ agp_bridge.gatt_table_real = page_dir.real;
+ agp_bridge.gatt_table = page_dir.remapped;
+ agp_bridge.gatt_bus_addr = virt_to_bus(page_dir.real);
+
+ /* Get the address for the gart region.
+ * This is a bus address even on the alpha, b/c its
+ * used to program the agp master not the cpu
+ */
+
+ pci_read_config_dword(agp_bridge.dev,
+ serverworks_private.gart_addr_ofs,
+ &temp);
+ agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* Calculate the agp offset */
+
+ for(i = 0; i < value->num_entries / 1024; i++) {
+ page_dir.remapped[i] =
+ virt_to_bus(serverworks_private.gatt_pages[i]->real);
+ page_dir.remapped[i] |= 0x00000001;
+ }
+
+ return 0;
+}
+
+static int serverworks_free_gatt_table(void)
+{
+ serverworks_page_map page_dir;
+
+ page_dir.real = agp_bridge.gatt_table_real;
+ page_dir.remapped = agp_bridge.gatt_table;
+
+ serverworks_free_gatt_pages();
+ serverworks_free_page_map(&page_dir);
+ return 0;
+}
+
+static int serverworks_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ u32 temp2;
+ aper_size_info_lvl2 *values;
+
+ values = A_SIZE_LVL2(agp_bridge.aperture_sizes);
+ pci_read_config_dword(agp_bridge.dev,
+ serverworks_private.gart_addr_ofs,
+ &temp);
+ pci_write_config_dword(agp_bridge.dev,
+ serverworks_private.gart_addr_ofs,
+ 0xfe000000);
+ pci_read_config_dword(agp_bridge.dev,
+ serverworks_private.gart_addr_ofs,
+ &temp2);
+ pci_write_config_dword(agp_bridge.dev,
+ serverworks_private.gart_addr_ofs,
+ temp);
+ temp2 &= SVWRKS_SIZE_MASK;
+
+ for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+ if (temp2 == values[i].size_value) {
+ agp_bridge.previous_size =
+ agp_bridge.current_size = (void *) (values + i);
+
+ agp_bridge.aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static int serverworks_configure(void)
+{
+ aper_size_info_lvl2 *current_size;
+ u32 temp;
+ u8 enable_reg;
+ u8 cap_ptr;
+ u32 cap_id;
+ u16 cap_reg;
+
+ current_size = A_SIZE_LVL2(agp_bridge.current_size);
+
+ /* Get the memory mapped registers */
+ pci_read_config_dword(agp_bridge.dev,
+ serverworks_private.mm_addr_ofs,
+ &temp);
+ temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ serverworks_private.registers = (volatile u8 *) ioremap(temp, 4096);
+
+ OUTREG8(serverworks_private.registers, SVWRKS_GART_CACHE, 0x0a);
+
+ OUTREG32(serverworks_private.registers, SVWRKS_GATTBASE,
+ agp_bridge.gatt_bus_addr);
+
+ cap_reg = INREG16(serverworks_private.registers, SVWRKS_COMMAND);
+ cap_reg &= ~0x0007;
+ cap_reg |= 0x4;
+ OUTREG16(serverworks_private.registers, SVWRKS_COMMAND, cap_reg);
+
+ pci_read_config_byte(serverworks_private.svrwrks_dev,
+ SVWRKS_AGP_ENABLE, &enable_reg);
+ enable_reg |= 0x1; /* Agp Enable bit */
+ pci_write_config_byte(serverworks_private.svrwrks_dev,
+ SVWRKS_AGP_ENABLE, enable_reg);
+ agp_bridge.tlb_flush(NULL);
+
+ pci_read_config_byte(serverworks_private.svrwrks_dev, 0x34, &cap_ptr);
+ if (cap_ptr != 0x00) {
+ do {
+ pci_read_config_dword(serverworks_private.svrwrks_dev,
+ cap_ptr, &cap_id);
+
+ if ((cap_id & 0xff) != 0x02)
+ cap_ptr = (cap_id >> 8) & 0xff;
+ }
+ while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
+ }
+ agp_bridge.capndx = cap_ptr;
+
+ /* Fill in the mode register */
+ pci_read_config_dword(serverworks_private.svrwrks_dev,
+ agp_bridge.capndx + 4,
+ &agp_bridge.mode);
+
+ pci_read_config_byte(agp_bridge.dev,
+ SVWRKS_CACHING,
+ &enable_reg);
+ enable_reg &= ~0x3;
+ pci_write_config_byte(agp_bridge.dev,
+ SVWRKS_CACHING,
+ enable_reg);
+
+ pci_read_config_byte(agp_bridge.dev,
+ SVWRKS_FEATURE,
+ &enable_reg);
+ enable_reg |= (1<<6);
+ pci_write_config_byte(agp_bridge.dev,
+ SVWRKS_FEATURE,
+ enable_reg);
+
+ return 0;
+}
+
+static void serverworks_cleanup(void)
+{
+ iounmap((void *) serverworks_private.registers);
+}
+
+/*
+ * This routine could be implemented by taking the addresses
+ * written to the GATT, and flushing them individually. However
+ * currently it just flushes the whole table. Which is probably
+ * more efficent, since agp_memory blocks can be a large number of
+ * entries.
+ */
+
+static void serverworks_tlbflush(agp_memory * temp)
+{
+ unsigned long end;
+
+ OUTREG8(serverworks_private.registers, SVWRKS_POSTFLUSH, 0x01);
+ end = jiffies + 3*HZ;
+ while(INREG8(serverworks_private.registers,
+ SVWRKS_POSTFLUSH) == 0x01) {
+ if((signed)(end - jiffies) <= 0) {
+ printk(KERN_ERR "Posted write buffer flush took more"
+ "then 3 seconds\n");
+ }
+ }
+ OUTREG32(serverworks_private.registers, SVWRKS_DIRFLUSH, 0x00000001);
+ end = jiffies + 3*HZ;
+ while(INREG32(serverworks_private.registers,
+ SVWRKS_DIRFLUSH) == 0x00000001) {
+ if((signed)(end - jiffies) <= 0) {
+ printk(KERN_ERR "TLB flush took more"
+ "then 3 seconds\n");
+ }
+ }
+}
+
+static unsigned long serverworks_mask_memory(unsigned long addr, int type)
+{
+ /* Only type 0 is supported by the serverworks chipsets */
+
+ return addr | agp_bridge.masks[0].mask;
+}
+
+static int serverworks_insert_memory(agp_memory * mem,
+ off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ unsigned long *cur_gatt;
+ unsigned long addr;
+
+ num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+ if ((pg_start + mem->page_count) > num_entries) {
+ return -EINVAL;
+ }
+
+ j = pg_start;
+ while (j < (pg_start + mem->page_count)) {
+ addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) {
+ return -EBUSY;
+ }
+ j++;
+ }
+
+ if (mem->is_flushed == FALSE) {
+ CACHE_FLUSH();
+ mem->is_flushed = TRUE;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i];
+ }
+ agp_bridge.tlb_flush(mem);
+ return 0;
+}
+
+static int serverworks_remove_memory(agp_memory * mem, off_t pg_start,
+ int type)
+{
+ int i;
+ unsigned long *cur_gatt;
+ unsigned long addr;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ CACHE_FLUSH();
+ agp_bridge.tlb_flush(mem);
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ cur_gatt[GET_GATT_OFF(addr)] =
+ (unsigned long) agp_bridge.scratch_page;
+ }
+
+ agp_bridge.tlb_flush(mem);
+ return 0;
+}
+
+static gatt_mask serverworks_masks[] =
+{
+ {0x00000001, 0}
+};
+
+static aper_size_info_lvl2 serverworks_sizes[7] =
+{
+ {2048, 524288, 0x80000000},
+ {1024, 262144, 0xc0000000},
+ {512, 131072, 0xe0000000},
+ {256, 65536, 0xf0000000},
+ {128, 32768, 0xf8000000},
+ {64, 16384, 0xfc000000},
+ {32, 8192, 0xfe000000}
+};
+
+static void serverworks_agp_enable(u32 mode)
+{
+ struct pci_dev *device = NULL;
+ u32 command, scratch, cap_id;
+ u8 cap_ptr;
+
+ pci_read_config_dword(serverworks_private.svrwrks_dev,
+ agp_bridge.capndx + 4,
+ &command);
+
+ /*
+ * PASS1: go throu all devices that claim to be
+ * AGP devices and collect their data.
+ */
+
+ while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
+ device)) != NULL) {
+ pci_read_config_dword(device, 0x04, &scratch);
+
+ if (!(scratch & 0x00100000))
+ continue;
+
+ pci_read_config_byte(device, 0x34, &cap_ptr);
+
+ if (cap_ptr != 0x00) {
+ do {
+ pci_read_config_dword(device,
+ cap_ptr, &cap_id);
+
+ if ((cap_id & 0xff) != 0x02)
+ cap_ptr = (cap_id >> 8) & 0xff;
+ }
+ while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
+ }
+ if (cap_ptr != 0x00) {
+ /*
+ * Ok, here we have a AGP device. Disable impossible
+ * settings, and adjust the readqueue to the minimum.
+ */
+
+ pci_read_config_dword(device, cap_ptr + 4, &scratch);
+
+ /* adjust RQ depth */
+ command =
+ ((command & ~0xff000000) |
+ min((mode & 0xff000000),
+ min((command & 0xff000000),
+ (scratch & 0xff000000))));
+
+ /* disable SBA if it's not supported */
+ if (!((command & 0x00000200) &&
+ (scratch & 0x00000200) &&
+ (mode & 0x00000200)))
+ command &= ~0x00000200;
+
+ /* disable FW */
+ command &= ~0x00000010;
+
+ command &= ~0x00000008;
+
+ if (!((command & 4) &&
+ (scratch & 4) &&
+ (mode & 4)))
+ command &= ~0x00000004;
+
+ if (!((command & 2) &&
+ (scratch & 2) &&
+ (mode & 2)))
+ command &= ~0x00000002;
+
+ if (!((command & 1) &&
+ (scratch & 1) &&
+ (mode & 1)))
+ command &= ~0x00000001;
+ }
+ }
+ /*
+ * PASS2: Figure out the 4X/2X/1X setting and enable the
+ * target (our motherboard chipset).
+ */
+
+ if (command & 4) {
+ command &= ~3; /* 4X */
+ }
+ if (command & 2) {
+ command &= ~5; /* 2X */
+ }
+ if (command & 1) {
+ command &= ~6; /* 1X */
+ }
+ command |= 0x00000100;
+
+ pci_write_config_dword(serverworks_private.svrwrks_dev,
+ agp_bridge.capndx + 8,
+ command);
+
+ /*
+ * PASS3: Go throu all AGP devices and update the
+ * command registers.
+ */
+
+ while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
+ device)) != NULL) {
+ pci_read_config_dword(device, 0x04, &scratch);
+
+ if (!(scratch & 0x00100000))
+ continue;
+
+ pci_read_config_byte(device, 0x34, &cap_ptr);
+
+ if (cap_ptr != 0x00) {
+ do {
+ pci_read_config_dword(device,
+ cap_ptr, &cap_id);
+
+ if ((cap_id & 0xff) != 0x02)
+ cap_ptr = (cap_id >> 8) & 0xff;
+ }
+ while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
+ }
+ if (cap_ptr != 0x00)
+ pci_write_config_dword(device, cap_ptr + 8, command);
+ }
+}
+
+static int __init serverworks_setup (struct pci_dev *pdev)
+{
+ u32 temp;
+ u32 temp2;
+
+ serverworks_private.svrwrks_dev = pdev;
+
+ agp_bridge.masks = serverworks_masks;
+ agp_bridge.num_of_masks = 1;
+ agp_bridge.aperture_sizes = (void *) serverworks_sizes;
+ agp_bridge.size_type = LVL2_APER_SIZE;
+ agp_bridge.num_aperture_sizes = 7;
+ agp_bridge.dev_private_data = (void *) &serverworks_private;
+ agp_bridge.needs_scratch_page = TRUE;
+ agp_bridge.configure = serverworks_configure;
+ agp_bridge.fetch_size = serverworks_fetch_size;
+ agp_bridge.cleanup = serverworks_cleanup;
+ agp_bridge.tlb_flush = serverworks_tlbflush;
+ agp_bridge.mask_memory = serverworks_mask_memory;
+ agp_bridge.agp_enable = serverworks_agp_enable;
+ agp_bridge.cache_flush = global_cache_flush;
+ agp_bridge.create_gatt_table = serverworks_create_gatt_table;
+ agp_bridge.free_gatt_table = serverworks_free_gatt_table;
+ agp_bridge.insert_memory = serverworks_insert_memory;
+ agp_bridge.remove_memory = serverworks_remove_memory;
+ agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+ agp_bridge.free_by_type = agp_generic_free_by_type;
+ agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+ agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+
+ pci_read_config_dword(agp_bridge.dev,
+ SVWRKS_APSIZE,
+ &temp);
+
+ serverworks_private.gart_addr_ofs = 0x10;
+
+ if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ pci_read_config_dword(agp_bridge.dev,
+ SVWRKS_APSIZE + 4,
+ &temp2);
+ if(temp2 != 0) {
+ printk("Detected 64 bit aperture address, but top "
+ "bits are not zero. Disabling agp\n");
+ return -ENODEV;
+ }
+ serverworks_private.mm_addr_ofs = 0x18;
+ } else {
+ serverworks_private.mm_addr_ofs = 0x14;
+ }
+
+ pci_read_config_dword(agp_bridge.dev,
+ serverworks_private.mm_addr_ofs,
+ &temp);
+ if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ pci_read_config_dword(agp_bridge.dev,
+ serverworks_private.mm_addr_ofs + 4,
+ &temp2);
+ if(temp2 != 0) {
+ printk("Detected 64 bit MMIO address, but top "
+ "bits are not zero. Disabling agp\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_AGP_SWORKS */
+
+
+/* per-chipset initialization data.
+ * note -- all chipsets for a single vendor MUST be grouped together
+ */
+static struct {
+ unsigned short device_id; /* first, to make table easier to read */
+ unsigned short vendor_id;
+ enum chipset_type chipset;
+ const char *vendor_name;
+ const char *chipset_name;
+ int (*chipset_setup) (struct pci_dev *pdev);
+} agp_bridge_info[] __initdata = {
+
+#ifdef CONFIG_AGP_ALI
+ { PCI_DEVICE_ID_AL_M1541_0,
+ PCI_VENDOR_ID_AL,
+ ALI_M1541,
+ "Ali",
+ "M1541",
+ ali_generic_setup },
+ { PCI_DEVICE_ID_AL_M1621_0,
+ PCI_VENDOR_ID_AL,
+ ALI_M1621,
+ "Ali",
+ "M1621",
+ ali_generic_setup },
+ { PCI_DEVICE_ID_AL_M1631_0,
+ PCI_VENDOR_ID_AL,
+ ALI_M1631,
+ "Ali",
+ "M1631",
+ ali_generic_setup },
+ { PCI_DEVICE_ID_AL_M1632_0,
+ PCI_VENDOR_ID_AL,
+ ALI_M1632,
+ "Ali",
+ "M1632",
+ ali_generic_setup },
+ { PCI_DEVICE_ID_AL_M1641_0,
+ PCI_VENDOR_ID_AL,
+ ALI_M1641,
+ "Ali",
+ "M1641",
+ ali_generic_setup },
+ { PCI_DEVICE_ID_AL_M1647_0,
+ PCI_VENDOR_ID_AL,
+ ALI_M1647,
+ "Ali",
+ "M1647",
+ ali_generic_setup },
+ { PCI_DEVICE_ID_AL_M1651_0,
+ PCI_VENDOR_ID_AL,
+ ALI_M1651,
+ "Ali",
+ "M1651",
+ ali_generic_setup },
+ { 0,
+ PCI_VENDOR_ID_AL,
+ ALI_GENERIC,
+ "Ali",
+ "Generic",
+ ali_generic_setup },
+#endif /* CONFIG_AGP_ALI */
+
+#ifdef CONFIG_AGP_AMD
+ { PCI_DEVICE_ID_AMD_IRONGATE_0,
+ PCI_VENDOR_ID_AMD,
+ AMD_IRONGATE,
+ "AMD",
+ "Irongate",
+ amd_irongate_setup },
+ { 0,
+ PCI_VENDOR_ID_AMD,
+ AMD_GENERIC,
+ "AMD",
+ "Generic",
+ amd_irongate_setup },
+#endif /* CONFIG_AGP_AMD */
+
+#ifdef CONFIG_AGP_INTEL
+ { PCI_DEVICE_ID_INTEL_82443LX_0,
+ PCI_VENDOR_ID_INTEL,
+ INTEL_LX,
+ "Intel",
+ "440LX",
+ intel_generic_setup },
+ { PCI_DEVICE_ID_INTEL_82443BX_0,
+ PCI_VENDOR_ID_INTEL,
+ INTEL_BX,
+ "Intel",
+ "440BX",
+ intel_generic_setup },
+ { PCI_DEVICE_ID_INTEL_82443GX_0,
+ PCI_VENDOR_ID_INTEL,
+ INTEL_GX,
+ "Intel",
+ "440GX",
+ intel_generic_setup },
+ /* could we add support for PCI_DEVICE_ID_INTEL_815_1 too ? */
+ { PCI_DEVICE_ID_INTEL_815_0,
+ PCI_VENDOR_ID_INTEL,
+ INTEL_I815,
+ "Intel",
+ "i815",
+ intel_generic_setup },
+ { PCI_DEVICE_ID_INTEL_840_0,
+ PCI_VENDOR_ID_INTEL,
+ INTEL_I840,
+ "Intel",
+ "i840",
+ intel_840_setup },
+ { 0,
+ PCI_VENDOR_ID_INTEL,
+ INTEL_GENERIC,
+ "Intel",
+ "Generic",
+ intel_generic_setup },
+#endif /* CONFIG_AGP_INTEL */
+
+#ifdef CONFIG_AGP_SIS
+ { PCI_DEVICE_ID_SI_630,
+ PCI_VENDOR_ID_SI,
+ SIS_GENERIC,
+ "SiS",
+ "630",
+ sis_generic_setup },
+ { PCI_DEVICE_ID_SI_540,
+ PCI_VENDOR_ID_SI,
+ SIS_GENERIC,
+ "SiS",
+ "540",
+ sis_generic_setup },
+ { PCI_DEVICE_ID_SI_620,
+ PCI_VENDOR_ID_SI,
+ SIS_GENERIC,
+ "SiS",
+ "620",
+ sis_generic_setup },
+ { PCI_DEVICE_ID_SI_530,
+ PCI_VENDOR_ID_SI,
+ SIS_GENERIC,
+ "SiS",
+ "530",
+ sis_generic_setup },
+ { PCI_DEVICE_ID_SI_630,
+ PCI_VENDOR_ID_SI,
+ SIS_GENERIC,
+ "SiS",
+ "Generic",
+ sis_generic_setup },
+ { PCI_DEVICE_ID_SI_540,
PCI_VENDOR_ID_SI,
SIS_GENERIC,
"SiS",
while ((i < arraysize (agp_bridge_info)) &&
(agp_bridge_info[i].vendor_id == pdev->vendor)) {
if (pdev->device == agp_bridge_info[i].device_id) {
+#ifdef CONFIG_AGP_ALI
+ if (pdev->device == PCI_DEVICE_ID_AL_M1621_0) {
+ u8 hidden_1621_id;
+
+ pci_read_config_byte(pdev, 0xFB, &hidden_1621_id);
+ switch (hidden_1621_id) {
+ case 0x31:
+ agp_bridge_info[i].chipset_name="M1631";
+ break;
+ case 0x32:
+ agp_bridge_info[i].chipset_name="M1632";
+ break;
+ case 0x41:
+ agp_bridge_info[i].chipset_name="M1641";
+ break;
+ case 0x43:
+ break;
+ case 0x47:
+ agp_bridge_info[i].chipset_name="M1647";
+ break;
+ case 0x51:
+ agp_bridge_info[i].chipset_name="M1651";
+ break;
+ default:
+ break;
+ }
+ }
+#endif
+
printk (KERN_INFO PFX "Detected %s %s chipset\n",
agp_bridge_info[i].vendor_name,
agp_bridge_info[i].chipset_name);
if (i810_dev == NULL) {
printk(KERN_ERR PFX "agpgart: Detected an "
"Intel i815, but could not find the"
- " secondary device.\n");
+ " secondary device. Assuming a "
+ "non-integrated video card.\n");
agp_bridge.type = NOT_SUPPORTED;
return -ENODEV;
}
}
#endif /* CONFIG_AGP_I810 */
+#ifdef CONFIG_AGP_SWORKS
+ /* Everything is on func 1 here so we are hardcoding function one */
+ if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS) {
+ struct pci_dev *bridge_dev;
+
+ bridge_dev = pci_find_slot ((unsigned int)dev->bus->number,
+ PCI_DEVFN(0, 1));
+ if(bridge_dev == NULL) {
+ printk(KERN_INFO PFX "agpgart: Detected a Serverworks "
+ "Chipset, but could not find the secondary "
+ "device.\n");
+ return -ENODEV;
+ }
+
+ switch (dev->device) {
+ case PCI_DEVICE_ID_SERVERWORKS_HE:
+ agp_bridge.type = SVWRKS_HE;
+ return serverworks_setup(bridge_dev);
+
+ case PCI_DEVICE_ID_SERVERWORKS_LE:
+ case 0x0007:
+ agp_bridge.type = SVWRKS_LE;
+ return serverworks_setup(bridge_dev);
+
+ default:
+ if(agp_try_unsupported) {
+ agp_bridge.type = SVWRKS_GENERIC;
+ return serverworks_setup(bridge_dev);
+ }
+ break;
+ }
+ }
+
+#endif /* CONFIG_AGP_SWORKS */
+
/* find capndx */
pci_read_config_dword(dev, 0x04, &scratch);
if (!(scratch & 0x00100000))
}
if (agp_bridge.needs_scratch_page == TRUE) {
- agp_bridge.scratch_page = agp_alloc_page();
+ agp_bridge.scratch_page = agp_bridge.agp_alloc_page();
if (agp_bridge.scratch_page == 0) {
printk(KERN_ERR PFX "unable to get memory for "
err_out:
if (agp_bridge.needs_scratch_page == TRUE) {
agp_bridge.scratch_page &= ~(0x00000fff);
- agp_destroy_page((unsigned long)
+ agp_bridge.agp_destroy_page((unsigned long)
phys_to_virt(agp_bridge.scratch_page));
}
if (got_gatt)
if (agp_bridge.needs_scratch_page == TRUE) {
agp_bridge.scratch_page &= ~(0x00000fff);
- agp_destroy_page((unsigned long)
+ agp_bridge.agp_destroy_page((unsigned long)
phys_to_virt(agp_bridge.scratch_page));
}
}
if (copy_from_user(&reserve, (void *) arg, sizeof(agp_region))) {
return -EFAULT;
}
+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(agp_segment))
+ return -EFAULT;
+
client = agp_find_client_by_pid(reserve.pid);
if (reserve.seg_count == 0) {
} else {
agp_segment *segment;
+ if (reserve.seg_count >= 16384)
+ return -EINVAL;
+
segment = kmalloc((sizeof(agp_segment) * reserve.seg_count),
GFP_KERNEL);
vfree(vcp);
return -EFAULT;
}
- } else if (vw.clipcount) {
+ } else if (vw.clipcount > 2048)
+ return -ENOMEM;
+ else {
if((vcp=vmalloc(sizeof(struct video_clip)*
(vw.clipcount))) == NULL)
return -ENOMEM;
case ALI_GENERIC: head->chipset = "ALi"; break;
case ALI_M1541: head->chipset = "ALi M1541"; break;
+ case ALI_M1621: head->chipset = "ALi M1621"; break;
+ case ALI_M1631: head->chipset = "ALi M1631"; break;
+ case ALI_M1632: head->chipset = "ALi M1632"; break;
+ case ALI_M1641: head->chipset = "ALi M1641"; break;
+ case ALI_M1647: head->chipset = "ALi M1647"; break;
+ case ALI_M1651: head->chipset = "ALi M1651"; break;
+ case SVWRKS_GENERIC: head->chipset = "Serverworks Generic";
+ break;
+ case SVWRKS_HE: head->chipset = "Serverworks HE"; break;
+ case SVWRKS_LE: head->chipset = "Serverworks LE"; break;
default: head->chipset = "Unknown"; break;
}
return -ENOMEM; /* May only call once for each order */
}
+ if(count < 0 || count > 4096)
+ {
+ up(&dev->struct_sem);
+ return -EINVAL;
+ }
+
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
atomic_dec(&dev->queuelist[i]->use_count);
}
/* Allocate a new queue */
- down(&dev->struct_sem);
queue = drm_alloc(sizeof(*queue), DRM_MEM_QUEUES);
+ if(queue == NULL)
+ return -ENOMEM;
+
memset(queue, 0, sizeof(*queue));
+ down(&dev->struct_sem);
atomic_set(&queue->use_count, 1);
++dev->queue_count;
{
int i;
- dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER);
+ if (!(dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER))) {
+ printk(KERN_ERR "drm_dma_setup: can't drm_alloc dev->dma");
+ return;
+ }
memset(dev->dma, 0, sizeof(*dev->dma));
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
+ if(priv == NULL)
+ return -ENOMEM;
memset(priv, 0, sizeof(*priv));
+
filp->private_data = priv;
priv->uid = current->euid;
priv->pid = current->pid;
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
-
+
+ if(count < 0 || count > 4096)
+ {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -EINVAL;
+ }
+
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
buf_priv = buf->dev_private;
if (buf_priv->currently_mapped != I810_BUF_MAPPED) return -EPERM;
+ /* Stopping end users copying their data to the entire kernel
+ is good.. */
+ if (d.used < 0 || d.used > buf->total)
+ return -EINVAL;
+
copy_from_user_ret(buf_priv->virtual, d.address, d.used, -EFAULT);
sarea_priv->last_dispatch = (int) hw_status[5];
if (dev->unique_len || dev->unique) return -EBUSY;
copy_from_user_ret(&u, (drm_unique_t *)arg, sizeof(u), -EFAULT);
- if (!u.unique_len) return -EINVAL;
+ if (!u.unique_len || u.unique_len > 1024)
dev->unique_len = u.unique_len;
dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
-
+
+ /* This isnt neccessarily a good limit, but we have to stop a dumb
+ 32 bit overflow problem below */
+
+ if ( count < 0 || count > 4096)
+ {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -EINVAL;
+ }
+
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
return -ENOMEM; /* May only call once for each order */
}
+ if(count < 0 || count > 4096)
+ {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -EINVAL;
+ }
+
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
-
+
+ /* Might be a poor limit, but take that up with XFree86
+ if its a problem */
+
+ if(count < 0 || count > 4096)
+ {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -EINVAL;
+ }
+
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
/* First we read the data in from the file system into a temp buffer */
+ memoff(ch);
+ restore_flags(flags);
+
if (bytesAvailable)
{ /* Begin bytesAvailable */
post_fep_init.
--------------------------------------------------------------------- */
buf = ch->tmp_buf;
- memoff(ch);
- restore_flags(flags);
} /* End from_user */
break;
default:
- return -ENOIOCTLCMD;
+ return -ENOTTY;
}
return 0;
mixcomwd_ping();
break;
default:
- return -ENOIOCTLCMD;
+ return -ENOTTY;
}
return 0;
}
struct moxa_str *ch;
int len, port;
unsigned long flags;
- unsigned char *temp;
ch = (struct moxa_str *) tty->driver_data;
if (ch == NULL)
return (0);
port = ch->port;
save_flags(flags);
- cli();
if (from_user) {
- copy_from_user(moxaXmitBuff, buf, count);
- temp = moxaXmitBuff;
- } else
- temp = (unsigned char *) buf;
- len = MoxaPortWriteData(port, temp, count);
- restore_flags(flags);
+ if (count > PAGE_SIZE)
+ count = PAGE_SIZE;
+ down(&moxaBuffSem);
+ if (copy_from_user(moxaXmitBuff, buf, count)) {
+ len = -EFAULT;
+ } else {
+ cli();
+ len = MoxaPortWriteData(port, moxaXmitBuff, count);
+ restore_flags(flags);
+ }
+ up(&moxaBuffSem);
+ if (len < 0)
+ return len;
+ } else {
+ cli();
+ len = MoxaPortWriteData(port, (unsigned char *) buf, count);
+ restore_flags(flags);
+ }
+
/*********************************************
if ( !(ch->statusflags & LOWWAIT) &&
((len != count) || (MoxaPortTxFree(port) <= 100)) )
unsigned long baseAddr;
int i;
+ if(len > sizeof(moxaBuff))
+ return -EINVAL;
if(copy_from_user(moxaBuff, tmp, len))
return -EFAULT;
baseAddr = moxaBaseAddr[cardno];
current_params.tap_interval = new.tap_interval;
return 0;
}
- return -ENOIOCTLCMD;
+ return -ENOTTY;
}
switch(cmd) {
default:
- return -ENOIOCTLCMD;
+ return -ENOTTY;
case WDIOC_GETSUPPORT:
i = copy_to_user((void*)arg, &ident, sizeof(ident));
char *ret;
ret = kmalloc(6, GFP_KERNEL);
+ if(ret == NULL)
+ return NULL;
while((count < 3) && (!found)) {
outb_p(0x80, current_readport + 2);
ten = send_command(0x82);
hund = send_command(0x83);
minor = send_command(0x84);
- }
-
- if (found)
sprintf(ret, "%c.%c%c%c", one, ten, hund, minor);
+ }
else
sprintf(ret, "ERROR");
#ifdef MODULE
void cleanup_module(void)
{
+ misc_deregister(&pcwd_miscdev);
/* Disable the board */
if (revision == PCWD_REVISION_C) {
outb_p(0xA5, current_readport + 3);
outb_p(0xA5, current_readport + 3);
}
- misc_deregister(&pcwd_miscdev);
if (supports_temp)
misc_deregister(&temp_miscdev);
if (!tty || !port->xmit_buf || !tmp_buf)
return 0;
- if (from_user)
+ save_flags(flags);
+ if (from_user) {
down(&tmp_buf_sem);
+ while (1) {
+ cli();
+ c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - port->xmit_head));
+ if (c <= 0)
+ break;
- save_flags(flags);
- while (1) {
- cli();
- c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - port->xmit_head));
- if (c <= 0)
- break;
+ c -= copy_from_user(tmp_buf, buf, c);
+ if (!c) {
+ if (!total)
+ total = -EFAULT;
+ break;
+ }
- if (from_user) {
- copy_from_user(tmp_buf, buf, c);
+ cli();
c = MIN(c, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
SERIAL_XMIT_SIZE - port->xmit_head));
memcpy(port->xmit_buf + port->xmit_head, tmp_buf, c);
- } else
+ port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
+ port->xmit_cnt += c;
+ restore_flags(flags);
+
+ buf += c;
+ count -= c;
+ total += c;
+ }
+ up(&tmp_buf_sem);
+ } else {
+ while (1) {
+ cli();
+ c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - port->xmit_head));
+ if (c <= 0) {
+ restore_flags(flags);
+ break;
+ }
+
memcpy(port->xmit_buf + port->xmit_head, buf, c);
- port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
- port->xmit_cnt += c;
- restore_flags(flags);
- buf += c;
- count -= c;
- total += c;
+ port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
+ port->xmit_cnt += c;
+ restore_flags(flags);
+
+ buf += c;
+ count -= c;
+ total += c;
+ }
}
- if (from_user)
- up(&tmp_buf_sem);
+
+ cli();
if (port->xmit_cnt && !tty->stopped && !tty->hw_stopped &&
!(port->IER & IER_TXRDY)) {
port->IER |= IER_TXRDY;
rc_out(bp, CD180_IER, port->IER);
}
restore_flags(flags);
+
return total;
}
/* now scan */
for(ofs = 0; ofs != count; ofs++)
- if(buf[ofs] == 'V')
+ {
+ char c;
+ if(get_user(c, buf+ofs))
+ return -EFAULT;
+ if(c == 'V')
wdt_expect_close = 1;
-
+ }
/* Well, anyhow someone wrote to us, we should return that favour */
next_heartbeat = jiffies + WDT_HEARTBEAT;
}
switch(cmd)
{
default:
- return -ENOIOCTLCMD;
+ return -ENOTTY;
case WDIOC_GETSUPPORT:
if(copy_to_user((struct watchdog_info *)arg, &ident, sizeof(ident)))
return -EFAULT;
if (!tty || !port->xmit_buf || !tmp_buf)
return 0;
- if (from_user)
+ save_flags(flags);
+ if (from_user) {
down(&tmp_buf_sem);
+ while (1) {
+ c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - port->xmit_head));
+ if (c <= 0)
+ break;
- save_flags(flags);
- while (1) {
- cli();
- c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - port->xmit_head));
- if (c <= 0)
- break;
+ c -= copy_from_user(tmp_buf, buf, c);
+ if (!c) {
+ if (!total)
+ total = -EFAULT;
+ break;
+ }
- if (from_user) {
- copy_from_user(tmp_buf, buf, c);
+ cli();
c = MIN(c, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - port->xmit_head));
+ SERIAL_XMIT_SIZE - port->xmit_head));
memcpy(port->xmit_buf + port->xmit_head, tmp_buf, c);
- } else
+ port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
+ port->xmit_cnt += c;
+ restore_flags(flags);
+
+ buf += c;
+ count -= c;
+ total += c;
+ }
+ up(&tmp_buf_sem);
+ } else {
+ while (1) {
+ cli();
+ c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - port->xmit_head));
+ if (c <= 0) {
+ restore_flags(flags);
+ break;
+ }
memcpy(port->xmit_buf + port->xmit_head, buf, c);
- port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
- port->xmit_cnt += c;
- restore_flags(flags);
- buf += c;
- count -= c;
- total += c;
+ port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
+ port->xmit_cnt += c;
+ restore_flags(flags);
+
+ buf += c;
+ count -= c;
+ total += c;
+ }
}
- if (from_user)
- up(&tmp_buf_sem);
+
+ cli();
if (port->xmit_cnt && !tty->stopped && !tty->hw_stopped &&
!(port->IER & IER_TXRDY)) {
port->IER |= IER_TXRDY;
switch(cmd)
{
default:
- return -ENOIOCTLCMD;
+ return -ENOTTY;
case WDIOC_GETSUPPORT:
return copy_to_user((struct watchdog_info *)arg, &ident, sizeof(ident))?-EFAULT:0;
switch(cmd)
{
default:
- return -ENOIOCTLCMD;
+ return -ENOTTY;
case WDIOC_GETSUPPORT:
return copy_to_user((struct watchdog_info *)arg, &ident, sizeof(ident))?-EFAULT:0;
*
*/
-#include <sys/types.h>
-
+#include "eicon.h"
#include "sys.h"
#include "idi.h"
#include "divas.h"
#include "adapter.h"
#include "uxio.h"
-#define PCI_COMMAND 0x04
-#define PCI_STATUS 0x06
-#define PCI_LATENCY 0x0D
#define PCI_BADDR0 0x10
#define PCI_BADDR1 0x14
#define PCI_BADDR2 0x18
*/
+#include "eicon.h"
#include "sys.h"
#include "idi.h"
#include "constant.h"
#include "pr_pc.h"
#include "uxio.h"
-#include <sys/types.h>
-
-#define MAX_ADDR_LEN
#define DIVAS_LOAD_CMD 0x02
#define DIVAS_START_CMD 0x03
#define DIVAS_IRQ_RESET 0xC18
#define DIVAS_IRQ_RESET_VAL 0xFE
-#define PCI_COMMAND 0x04
-#define PCI_STATUS 0x06
-#define PCI_LATENCY 0x0D
-#define PCI_INTERRUPT 0x3C
-
#define TEST_INT_DIVAS 0x11
#define TEST_INT_DIVAS_BRI 0x12
#define TEST_INT_DIVAS_Q 0x13
void DIVA_DIDD_Read( DESCRIPTOR *table, int tablelength )
{
- bzero(table, tablelength);
+ memset(table, 0, tablelength);
if (tablelength > sizeof(DIDD_Table))
tablelength = sizeof(DIDD_Table);
}
if (tablelength > 0)
- bcopy((caddr_t)DIDD_Table, (caddr_t)table, tablelength);
+ bcopy((void *)DIDD_Table, (void *)table, tablelength);
return;
}
if (tablelength > sizeof(DIDD_Table))
tablelength = sizeof(DIDD_Table);
- bcopy((caddr_t)table, (caddr_t)DIDD_Table, tablelength);
+ bcopy((void *)table, (void *)DIDD_Table, tablelength);
return;
}
{
DESCRIPTOR d[32];
- bzero(d, sizeof(d));
+ memset(d, 0, sizeof(d));
d[0].type = IDI_DIMAINT; /* identify the DIMAINT entry */
d[0].channels = 0; /* zero channels associated with dimaint*/
int DivasCardNew(dia_card_t *card_info)
{
card_t *card;
- byte b;
static boolean_t first_call = TRUE;
boolean_t NeedISRandReset = FALSE;
return -1;
}
- b = card->cfg.irq;
-
- UxPciConfigWrite(card->hw, sizeof(b), PCI_INTERRUPT, &b);
-
if (card_info->card_type != DIA_CARD_TYPE_DIVA_SERVER_Q)
{
if ((*card->card_reset)(card))
return -1;
}
- bzero(card->e_tbl, sizeof(E_INFO) * num_entities);
+ memset(card->e_tbl, 0, sizeof(E_INFO) * num_entities);
card->e_max = num_entities;
DIVA_DIDD_Read(d, sizeof(d));
{
int i;
- bzero(card_list, sizeof(dia_card_list_t));
+ memset(card_list, 0, sizeof(dia_card_list_t));
for(i = 0; i < DivasCardNext; i++)
{
-/* $Id: eicon.h,v 1.23.6.2 2001/02/13 11:43:30 kai Exp $
+/* $Id: eicon.h,v 1.23.6.4 2001/06/09 15:14:16 kai Exp $
*
* ISDN low-level module for Eicon active ISDN-Cards.
*
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/ctype.h>
+#include <linux/pci.h>
#include <linux/isdn.h>
#include <linux/isdnif.h>
-/* $Id: eicon_mod.c,v 1.37.6.4 2001/02/16 09:09:50 armin Exp $
+/* $Id: eicon_mod.c,v 1.37.6.5 2001/07/17 19:42:31 armin Exp $
*
* ISDN lowlevel-module for Eicon active cards.
*
static eicon_card *cards = (eicon_card *) NULL; /* glob. var , contains
start of card-list */
-static char *eicon_revision = "$Revision: 1.37.6.4 $";
+static char *eicon_revision = "$Revision: 1.37.6.5 $";
extern char *eicon_pci_revision;
extern char *eicon_isa_revision;
static void
eicon_freecard(eicon_card *card) {
int i;
- struct sk_buff *skb;
for(i = 0; i < (card->nchannels + 1); i++) {
- while((skb = skb_dequeue(&card->bch[i].e.X)))
- dev_kfree_skb(skb);
- while((skb = skb_dequeue(&card->bch[i].e.R)))
- dev_kfree_skb(skb);
+ skb_queue_purge(&card->bch[i].e.X);
+ skb_queue_purge(&card->bch[i].e.R);
}
- while((skb = skb_dequeue(&card->sndq)))
- dev_kfree_skb(skb);
- while((skb = skb_dequeue(&card->rcvq)))
- dev_kfree_skb(skb);
- while((skb = skb_dequeue(&card->rackq)))
- dev_kfree_skb(skb);
- while((skb = skb_dequeue(&card->sackq)))
- dev_kfree_skb(skb);
- while((skb = skb_dequeue(&card->statq)))
- dev_kfree_skb(skb);
+ skb_queue_purge(&card->sndq);
+ skb_queue_purge(&card->rcvq);
+ skb_queue_purge(&card->rackq);
+ skb_queue_purge(&card->sackq);
+ skb_queue_purge(&card->statq);
#ifdef CONFIG_ISDN_DRV_EICON_PCI
kfree(card->sbufp);
*/
+#include "eicon.h"
#include "sys.h"
#include <stdarg.h>
#undef MAX
#undef MIN
-#include <sys/types.h>
-#include <sys/param.h>
-
#include "divas.h"
#include "divalog.h"
#include "uxio.h"
/* clear log entry */
- bzero((caddr_t) &log, sizeof(klog_t));
+ memset((void *) &log, 0, sizeof(klog_t));
log.card = -1;
log.type = KLOG_TEXT_MSG;
#include <linux/kernel.h>
#include <linux/ioport.h>
-#define HW_ID_EICON_PCI 0x1133
-#define HW_ID_DIVA_SERVER_P 0xE014
-#define HW_ID_DIVA_SERVER_B_ST 0xE010
-#define HW_ID_DIVA_SERVER_B_U 0xE013
-#define HW_ID_DIVA_SERVER_Q 0xE012
-
struct file_operations Divas_fops;
int Divas_major;
}
#endif
-void bzero(void *pDataArea, dword dwLength)
-{
- memset(pDataArea, 0, dwLength);
-}
-
int Divas4BRIInitPCI(card_t *card, dia_card_t *cfg)
{
/* Use UxPciConfigWrite routines to initialise PCI config space */
}
m_count++;
- bzero(new_klog,sizeof(KNODE));
+ memset(new_klog, 0, sizeof(KNODE));
/* Set head & tail to point to the new Msg Struct */
head=tail=new_klog;
}
m_count++;
- bzero(new_klog,sizeof(KNODE));
+ memset(new_klog, 0, sizeof(KNODE));
/* Let last Msg Struct point to new Msg Struct & inc tail */
tail->next=new_klog;
{
klog_t klog;
- bzero(&klog, sizeof(klog));
+ memset(&klog, 0, sizeof(klog));
klog.time_stamp = UxTimeGet();
};
void bcopy(void *pSource, void *pDest, dword dwLength);
-void bzero(void *pDataArea, dword dwLength);
/*
x = (card_xlog_t *) b;
- bzero(&klog, sizeof(klog));
+ memset(&klog, 0, sizeof(klog));
klog.time_stamp = (dword) x->time_hi;
klog.time_stamp = (klog.time_stamp << 16) | (dword) x->time_lo;
#include <linux/skbuff.h>
#include <linux/inet.h>
#include <linux/ioctl.h>
+#include <linux/vmalloc.h>
#include <linux/ppp_defs.h>
static int isdn_timer_cnt1 = 0;
static int isdn_timer_cnt2 = 0;
static int isdn_timer_cnt3 = 0;
-static int isdn_timer_cnt4 = 0;
static void
isdn_timer_funct(ulong dummy)
isdn_timer_cnt2 = 0;
if (tf & ISDN_TIMER_NETHANGUP)
isdn_net_autohup();
- if (++isdn_timer_cnt3 > ISDN_TIMER_RINGING) {
+ if (++isdn_timer_cnt3 >= ISDN_TIMER_RINGING) {
isdn_timer_cnt3 = 0;
if (tf & ISDN_TIMER_MODEMRING)
isdn_tty_modem_ring();
}
- if (++isdn_timer_cnt4 > ISDN_TIMER_KEEPINT) {
- isdn_timer_cnt4 = 0;
- if (tf & ISDN_TIMER_KEEPALIVE)
- isdn_net_slarp_out();
- }
if (tf & ISDN_TIMER_CARRIER)
isdn_tty_carrier_timeout();
}
void
isdn_timer_ctrl(int tf, int onoff)
{
- int flags;
+ int flags, old_tflags;
save_flags(flags);
cli();
isdn_timer_cnt1 = 0;
isdn_timer_cnt2 = 0;
}
+ old_tflags = dev->tflags;
if (onoff)
dev->tflags |= tf;
else
dev->tflags &= ~tf;
- if (dev->tflags)
+ if (dev->tflags && !old_tflags)
mod_timer(&dev->timer, jiffies+ISDN_TIMER_RES);
restore_flags(flags);
}
retval = -ENODEV;
goto out;
}
- if (minor < ISDN_MINOR_CTRL) {
+ if (minor <= ISDN_MINOR_BMAX) {
printk(KERN_WARNING "isdn_read minor %d obsolete!\n", minor);
drvidx = isdn_minor2drv(minor);
if (drvidx < 0) {
if (!dev->drivers)
return -ENODEV;
- if (minor < ISDN_MINOR_CTRL) {
+ if (minor <= ISDN_MINOR_BMAX) {
printk(KERN_WARNING "isdn_write minor %d obsolete!\n", minor);
drvidx = isdn_minor2drv(minor);
if (drvidx < 0) {
}
if (!dev->drivers)
return -ENODEV;
- if (minor < ISDN_MINOR_CTRL) {
+ if (minor <= ISDN_MINOR_BMAX) {
drvidx = isdn_minor2drv(minor);
if (drvidx < 0)
return -ENODEV;
}
if (!dev->channels)
goto out;
- if (minor < ISDN_MINOR_CTRL) {
+ if (minor <= ISDN_MINOR_BMAX) {
printk(KERN_WARNING "isdn_open minor %d obsolete!\n", minor);
drvidx = isdn_minor2drv(minor);
if (drvidx < 0)
goto out;
}
isdn_unlock_drivers();
- if (minor < ISDN_MINOR_CTRL)
+ if (minor <= ISDN_MINOR_BMAX)
goto out;
if (minor <= ISDN_MINOR_CTRLMAX) {
if (dev->profd == current)
*
*/
+/* Jan 2001: fix CISCO HDLC Bjoern A. Zeeb <i4l@zabbadoz.net>
+ * for info on the protocol, see
+ * http://i4l.zabbadoz.net/i4l/cisco-hdlc.txt
+ */
+
#include <linux/config.h>
#define __NO_VERSION__
#include <linux/module.h>
int isdn_net_force_dial_lp(isdn_net_local *);
static int isdn_net_start_xmit(struct sk_buff *, struct device *);
-char *isdn_net_revision = "$Revision: 1.140.6.3 $";
+static void isdn_net_ciscohdlck_connected(isdn_net_local *lp);
+static void isdn_net_ciscohdlck_disconnected(isdn_net_local *lp);
+
+char *isdn_net_revision = "$Revision: 1.140.6.6 $";
/*
* Code for raw-networking over ISDN
pops -> disconn_ind(cprot);
#endif /* CONFIG_ISDN_X25 */
if ((!lp->dialstate) && (lp->flags & ISDN_NET_CONNECTED)) {
+ if (lp->p_encap == ISDN_NET_ENCAP_CISCOHDLCK)
+ isdn_net_ciscohdlck_disconnected(lp);
#ifdef CONFIG_ISDN_PPP
if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP)
isdn_ppp_free(lp);
lp->dialstate = 0;
isdn_timer_ctrl(ISDN_TIMER_NETHANGUP, 1);
if (lp->p_encap == ISDN_NET_ENCAP_CISCOHDLCK)
- isdn_timer_ctrl(ISDN_TIMER_KEEPALIVE, 1);
+ isdn_net_ciscohdlck_connected(lp);
if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP) {
if (lp->master) { /* is lp a slave? */
isdn_net_dev *nd = ((isdn_net_local *)lp->master->priv)->netdev;
isdn_net_hangup(&p->dev);
break;
}
- if (!strcmp(lp->dial->num, "LEASED")) {
+ if (!strncmp(lp->dial->num, "LEASED", strlen("LEASED"))) {
restore_flags(flags);
lp->dialstate = 4;
printk(KERN_INFO "%s: Open leased line ...\n", lp->name);
return htons(ETH_P_802_2);
}
-static void
-isdn_net_slarp_send(isdn_net_local *lp, int is_reply)
+
+/*
+ * CISCO HDLC keepalive specific stuff
+ */
+static struct sk_buff*
+isdn_net_ciscohdlck_alloc_skb(isdn_net_local *lp, int len)
{
unsigned short hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen;
- struct sk_buff *skb = alloc_skb(hl + sizeof(cisco_hdr) + sizeof(cisco_slarp), GFP_ATOMIC);
- unsigned long t = (jiffies / HZ * 1000000);
- cisco_hdr *ch;
- cisco_slarp *s;
+ struct sk_buff *skb;
+ skb = alloc_skb(hl + len, GFP_ATOMIC);
if (!skb) {
- printk(KERN_WARNING
- "%s: Could not allocate SLARP reply\n", lp->name);
- return;
+ printk("isdn out of mem at %s:%d!\n", __FILE__, __LINE__);
+ return 0;
}
skb_reserve(skb, hl);
- ch = (cisco_hdr *)skb_put(skb, sizeof(cisco_hdr));
- ch->addr = CISCO_ADDR_UNICAST;
- ch->ctrl = 0;
- ch->type = htons(CISCO_TYPE_SLARP);
- s = (cisco_slarp *)skb_put(skb, sizeof(cisco_slarp));
- if (is_reply) {
- s->code = htonl(CISCO_SLARP_REPLY);
- memset(&s->slarp.reply.ifaddr, 0, sizeof(__u32));
- memset(&s->slarp.reply.netmask, 0, sizeof(__u32));
- } else {
- lp->cisco_myseq++;
- s->code = htonl(CISCO_SLARP_KEEPALIVE);
- s->slarp.keepalive.my_seq = htonl(lp->cisco_myseq);
- s->slarp.keepalive.your_seq = htonl(lp->cisco_yourseq);
- }
- s->rel = 0xffff;
- s->t1 = t >> 16;
- s->t0 = t & 0xffff;
- isdn_net_write_super(lp, skb);
+ return skb;
}
-static void
-isdn_net_slarp_in(isdn_net_local *lp, struct sk_buff *skb)
+/* cisco hdlck device private ioctls */
+static int
+isdn_ciscohdlck_dev_ioctl(struct device *dev, struct ifreq *ifr, int cmd)
{
- cisco_slarp *s = (cisco_slarp *)skb->data;
+ isdn_net_local *lp = (isdn_net_local *) dev->priv;
+ unsigned long len = 0;
+ unsigned long expires = 0;
+ int tmp = 0;
+ int period = lp->cisco_keepalive_period;
+ char debserint = lp->cisco_debserint;
+ int rc = 0;
+
+ if (lp->p_encap != ISDN_NET_ENCAP_CISCOHDLCK)
+ return -EINVAL;
- switch (ntohl(s->code)) {
- case CISCO_SLARP_REQUEST:
- isdn_net_slarp_send(lp, 1);
+ switch (cmd) {
+ /* get/set keepalive period */
+ case SIOCGKEEPPERIOD:
+ len = (unsigned long)sizeof(lp->cisco_keepalive_period);
+ if (copy_to_user((char *)ifr->ifr_ifru.ifru_data,
+ (int *)&lp->cisco_keepalive_period, len))
+ rc = -EFAULT;
break;
- case CISCO_SLARP_REPLY:
- /* Ignore replies */
+ case SIOCSKEEPPERIOD:
+ tmp = lp->cisco_keepalive_period;
+ len = (unsigned long)sizeof(lp->cisco_keepalive_period);
+ if (copy_from_user((int *)&period,
+ (char *)ifr->ifr_ifru.ifru_data, len))
+ rc = -EFAULT;
+ if ((period > 0) && (period <= 32767))
+ lp->cisco_keepalive_period = period;
+ else
+ rc = -EINVAL;
+ if (!rc && (tmp != lp->cisco_keepalive_period)) {
+ expires = (unsigned long)(jiffies +
+ lp->cisco_keepalive_period * HZ);
+ mod_timer(&lp->cisco_timer, expires);
+ printk(KERN_INFO "%s: Keepalive period set "
+ "to %d seconds.\n",
+ lp->name, lp->cisco_keepalive_period);
+ }
break;
- case CISCO_SLARP_KEEPALIVE:
- lp->cisco_yourseq = s->slarp.keepalive.my_seq;
- if (ntohl(s->slarp.keepalive.my_seq == lp->cisco_myseq)) {
- if (lp->cisco_loop++ == 2) {
- printk(KERN_WARNING "%s: Keepalive Loop\n",
- lp->name);
- lp->cisco_myseq ^= jiffies;
- }
- } else
- lp->cisco_loop = 0;
+
+ /* get/set debugging */
+ case SIOCGDEBSERINT:
+ len = (unsigned long)sizeof(lp->cisco_debserint);
+ if (copy_to_user((char *)ifr->ifr_ifru.ifru_data,
+ (char *)&lp->cisco_debserint, len))
+ rc = -EFAULT;
+ break;
+ case SIOCSDEBSERINT:
+ len = (unsigned long)sizeof(lp->cisco_debserint);
+ if (copy_from_user((char *)&debserint,
+ (char *)ifr->ifr_ifru.ifru_data, len))
+ rc = -EFAULT;
+ if ((debserint >= 0) && (debserint <= 64))
+ lp->cisco_debserint = debserint;
+ else
+ rc = -EINVAL;
+ break;
+
+ default:
+ rc = -EINVAL;
break;
}
- kfree_skb(skb);
+ return (rc);
}
-/*
- * Called every 10 sec. via timer-interrupt if
- * any network-interface has Cisco-Keepalive-Encapsulation
- * and is online.
- * Send Keepalive-Packet and re-schedule.
- */
-void
-isdn_net_slarp_out(void)
+/* called via cisco_timer.function */
+static void
+isdn_net_ciscohdlck_slarp_send_keepalive(unsigned long data)
{
- isdn_net_dev *p = dev->netdev;
- int anymore = 0;
+ isdn_net_local *lp = (isdn_net_local *) data;
+ struct sk_buff *skb;
+ unsigned char *p;
+ unsigned long last_cisco_myseq = lp->cisco_myseq;
+ int myseq_diff = 0;
- while (p) {
- isdn_net_local *l = p->local;
- if ((l->p_encap == ISDN_NET_ENCAP_CISCOHDLCK) &&
- (l->flags & ISDN_NET_CONNECTED) &&
- (!l->dialstate) ) {
- anymore = 1;
- isdn_net_slarp_send(l, 0);
+ if (!(lp->flags & ISDN_NET_CONNECTED) || lp->dialstate) {
+ printk("isdn BUG at %s:%d!\n", __FILE__, __LINE__);
+ return;
+ }
+ lp->cisco_myseq++;
+
+ myseq_diff = (lp->cisco_myseq - lp->cisco_mineseen);
+ if ((lp->cisco_line_state) && ((myseq_diff >= 3)||(myseq_diff <= -3))) {
+ /* line up -> down */
+ lp->cisco_line_state = 0;
+ printk (KERN_WARNING
+ "UPDOWN: Line protocol on Interface %s,"
+ " changed state to down\n", lp->name);
+ /* should stop routing higher-level data accross */
+ } else if ((!lp->cisco_line_state) &&
+ (myseq_diff >= 0) && (myseq_diff <= 2)) {
+ /* line down -> up */
+ lp->cisco_line_state = 1;
+ printk (KERN_WARNING
+ "UPDOWN: Line protocol on Interface %s,"
+ " changed state to up\n", lp->name);
+ /* restart routing higher-level data accross */
+ }
+
+ if (lp->cisco_debserint)
+ printk (KERN_DEBUG "%s: HDLC "
+ "myseq %lu, mineseen %lu%c, yourseen %lu, %s\n",
+ lp->name, last_cisco_myseq, lp->cisco_mineseen,
+ ((last_cisco_myseq == lp->cisco_mineseen) ? '*' : 040),
+ lp->cisco_yourseq,
+ ((lp->cisco_line_state) ? "line up" : "line down"));
+
+ skb = isdn_net_ciscohdlck_alloc_skb(lp, 4 + 14);
+ if (!skb)
+ return;
+
+ p = skb_put(skb, 4 + 14);
+
+ /* cisco header */
+ p += put_u8 (p, CISCO_ADDR_UNICAST);
+ p += put_u8 (p, CISCO_CTRL);
+ p += put_u16(p, CISCO_TYPE_SLARP);
+
+ /* slarp keepalive */
+ p += put_u32(p, CISCO_SLARP_KEEPALIVE);
+ p += put_u32(p, lp->cisco_myseq);
+ p += put_u32(p, lp->cisco_yourseq);
+ p += put_u16(p, 0xffff); // reliablity, always 0xffff
+
+ isdn_net_write_super(lp, skb);
+
+ lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ;
+
+ add_timer(&lp->cisco_timer);
+}
+
+static void
+isdn_net_ciscohdlck_slarp_send_request(isdn_net_local *lp)
+{
+ struct sk_buff *skb;
+ unsigned char *p;
+
+ skb = isdn_net_ciscohdlck_alloc_skb(lp, 4 + 14);
+ if (!skb)
+ return;
+
+ p = skb_put(skb, 4 + 14);
+
+ /* cisco header */
+ p += put_u8 (p, CISCO_ADDR_UNICAST);
+ p += put_u8 (p, CISCO_CTRL);
+ p += put_u16(p, CISCO_TYPE_SLARP);
+
+ /* slarp request */
+ p += put_u32(p, CISCO_SLARP_REQUEST);
+ p += put_u32(p, 0); // address
+ p += put_u32(p, 0); // netmask
+ p += put_u16(p, 0); // unused
+
+ isdn_net_write_super(lp, skb);
+}
+
+static void
+isdn_net_ciscohdlck_connected(isdn_net_local *lp)
+{
+ lp->cisco_myseq = 0;
+ lp->cisco_mineseen = 0;
+ lp->cisco_yourseq = 0;
+ lp->cisco_keepalive_period = ISDN_TIMER_KEEPINT;
+ lp->cisco_last_slarp_in = 0;
+ lp->cisco_line_state = 0;
+ lp->cisco_debserint = 0;
+
+ /* send slarp request because interface/seq.no.s reset */
+ isdn_net_ciscohdlck_slarp_send_request(lp);
+
+ init_timer(&lp->cisco_timer);
+ lp->cisco_timer.data = (unsigned long) lp;
+ lp->cisco_timer.function = isdn_net_ciscohdlck_slarp_send_keepalive;
+ lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ;
+ add_timer(&lp->cisco_timer);
+}
+
+static void
+isdn_net_ciscohdlck_disconnected(isdn_net_local *lp)
+{
+ del_timer(&lp->cisco_timer);
+}
+
+static void
+isdn_net_ciscohdlck_slarp_send_reply(isdn_net_local *lp)
+{
+ struct sk_buff *skb;
+ unsigned char *p;
+ struct in_device *in_dev = NULL;
+ u32 addr = 0; /* local ipv4 address */
+ u32 mask = 0; /* local netmask */
+
+ if ((in_dev = lp->netdev->dev.ip_ptr) != NULL) {
+ /* take primary(first) address of interface */
+ struct in_ifaddr *ifa = in_dev->ifa_list;
+ if (ifa != NULL) {
+ addr = ifa->ifa_local;
+ mask = ifa->ifa_mask;
}
- p = (isdn_net_dev *) p->next;
}
- isdn_timer_ctrl(ISDN_TIMER_KEEPALIVE, anymore);
+
+ skb = isdn_net_ciscohdlck_alloc_skb(lp, 4 + 14);
+ if (!skb)
+ return;
+
+ p = skb_put(skb, 4 + 14);
+
+ /* cisco header */
+ p += put_u8 (p, CISCO_ADDR_UNICAST);
+ p += put_u8 (p, CISCO_CTRL);
+ p += put_u16(p, CISCO_TYPE_SLARP);
+
+ /* slarp reply, send own ip/netmask; if values are nonsense remote
+ * should think we are unable to provide it with an address via SLARP */
+ p += put_u32(p, CISCO_SLARP_REPLY);
+ p += put_u32(p, addr); // address
+ p += put_u32(p, mask); // netmask
+ p += put_u16(p, 0); // unused
+
+ isdn_net_write_super(lp, skb);
+}
+
+static void
+isdn_net_ciscohdlck_slarp_in(isdn_net_local *lp, struct sk_buff *skb)
+{
+ unsigned char *p;
+ int period;
+ __u32 code;
+ __u32 my_seq, addr;
+ __u32 your_seq, mask;
+ __u16 unused;
+
+ if (skb->len < 14)
+ return;
+
+ p = skb->data;
+ p += get_u32(p, &code);
+
+ switch (code) {
+ case CISCO_SLARP_REQUEST:
+ lp->cisco_yourseq = 0;
+ isdn_net_ciscohdlck_slarp_send_reply(lp);
+ break;
+ case CISCO_SLARP_REPLY:
+ /* Ignore replies - at least for now */
+ if (lp->cisco_debserint) {
+ p += get_u32(p, &addr);
+ p += get_u32(p, &mask);
+ p += get_u16(p, &unused);
+ printk(KERN_DEBUG "%s: got slarp reply (%ul/%ul) - "
+ "ignored\n", lp->name, addr, mask);
+ }
+ break;
+ case CISCO_SLARP_KEEPALIVE:
+ period = (int)((jiffies - lp->cisco_last_slarp_in
+ + HZ/2 - 1) / HZ);
+ if (lp->cisco_debserint &&
+ (period != lp->cisco_keepalive_period) &&
+ lp->cisco_last_slarp_in) {
+ printk(KERN_DEBUG "%s: Keepalive period mismatch - "
+ "is %d but should be %d.\n",
+ lp->name, period, lp->cisco_keepalive_period);
+ }
+ lp->cisco_last_slarp_in = jiffies;
+ p += get_u32(p, &my_seq);
+ p += get_u32(p, &your_seq);
+ p += get_u16(p, &unused);
+ lp->cisco_yourseq = my_seq;
+ lp->cisco_mineseen = your_seq;
+ break;
+ }
+}
+
+static void
+isdn_net_ciscohdlck_receive(isdn_net_local *lp, struct sk_buff *skb)
+{
+ unsigned char *p;
+ __u8 addr;
+ __u8 ctrl;
+ __u16 type;
+
+ if (skb->len < 4)
+ goto out_free;
+
+ p = skb->data;
+ p += get_u8 (p, &addr);
+ p += get_u8 (p, &ctrl);
+ p += get_u16(p, &type);
+ skb_pull(skb, 4);
+
+ if (addr != CISCO_ADDR_UNICAST && addr != CISCO_ADDR_BROADCAST) {
+ printk(KERN_WARNING "%s: Unknown Cisco addr 0x%02x\n",
+ lp->name, addr);
+ goto out_free;
+ }
+ if (ctrl != CISCO_CTRL) {
+ printk(KERN_WARNING "%s: Unknown Cisco ctrl 0x%02x\n",
+ lp->name, ctrl);
+ goto out_free;
+ }
+
+ switch (type) {
+ case CISCO_TYPE_INET:
+ skb->protocol = htons(ETH_P_IP);
+ netif_rx(skb);
+ break;
+ case CISCO_TYPE_SLARP:
+ isdn_net_ciscohdlck_slarp_in(lp, skb);
+ goto out_free;
+ default:
+ printk(KERN_WARNING "%s: Unknown Cisco type 0x%04x\n",
+ lp->name, type);
+ goto out_free;
+ }
+ return;
+
+ out_free:
+ kfree_skb(skb);
}
/*
#ifdef CONFIG_ISDN_X25
struct concap_proto *cprot = lp -> netdev -> cprot;
#endif
- cisco_hdr *ch;
-
lp->transcount += skb->len;
lp->stats.rx_packets++;
skb->protocol = htons(ETH_P_IP);
break;
case ISDN_NET_ENCAP_CISCOHDLCK:
- ch = (cisco_hdr *)skb->data;
- if ((ch->addr != CISCO_ADDR_UNICAST) &&
- (ch->addr != CISCO_ADDR_BROADCAST) ) {
- printk(KERN_WARNING "%s: Unknown Cisco addr 0x%02x\n",
- lp->name, ch->addr);
- kfree_skb(skb);
- return;
- }
- if (ch->ctrl != 0) {
- printk(KERN_WARNING "%s: Unknown Cisco ctrl 0x%02x\n",
- lp->name, ch->ctrl);
- kfree_skb(skb);
- return;
- }
- switch (ntohs(ch->type)) {
- case CISCO_TYPE_INET:
- skb_pull(skb, 4);
- skb->protocol = htons(ETH_P_IP);
- break;
- case CISCO_TYPE_SLARP:
- skb_pull(skb, 4);
- isdn_net_slarp_in(olp, skb);
- return;
- default:
- printk(KERN_WARNING "%s: Unknown Cisco type 0x%04x\n",
- lp->name, ch->type);
- kfree_skb(skb);
- return;
- }
- break;
+ isdn_net_ciscohdlck_receive(lp, skb);
+ return;
case ISDN_NET_ENCAP_CISCOHDLC:
/* CISCO-HDLC IP with type field and fake I-frame-header */
skb_pull(skb, 2);
void *daddr, void *saddr, unsigned plen)
{
isdn_net_local *lp = dev->priv;
+ unsigned char *p;
ushort len = 0;
switch (lp->p_encap) {
len = 2;
break;
case ISDN_NET_ENCAP_CISCOHDLC:
- skb_push(skb, 4);
- skb->data[0] = 0x0f;
- skb->data[1] = 0x00;
- *((ushort *) & skb->data[2]) = htons(type);
+ case ISDN_NET_ENCAP_CISCOHDLCK:
+ p = skb_push(skb, 4);
+ p += put_u8 (p, CISCO_ADDR_UNICAST);
+ p += put_u8 (p, CISCO_CTRL);
+ p += put_u16(p, type);
len = 4;
break;
#ifdef CONFIG_ISDN_X25
ndev->stop = &isdn_net_close;
ndev->get_stats = &isdn_net_get_stats;
ndev->rebuild_header = &isdn_net_rebuild_header;
-#ifdef CONFIG_ISDN_PPP
- ndev->do_ioctl = isdn_ppp_dev_ioctl;
-#endif
+ ndev->do_ioctl = NULL;
return 0;
}
#else
p->dev.type = ARPHRD_PPP; /* change ARP type */
p->dev.addr_len = 0;
+ p->dev.do_ioctl = isdn_ppp_dev_ioctl;
#endif
break;
case ISDN_NET_ENCAP_X25IFACE:
p->dev.addr_len = 0;
#endif
break;
+ case ISDN_NET_ENCAP_CISCOHDLCK:
+ p->dev.do_ioctl = isdn_ciscohdlck_dev_ioctl;
+ break;
default:
if( cfg->p_encap >= 0 &&
cfg->p_encap <= ISDN_NET_ENCAP_MAX_ENCAP )
-/* $Id: isdn_net.h,v 1.19 2000/06/21 09:54:29 keil Exp $
+/* $Id: isdn_net.h,v 1.19.6.1 2001/04/20 02:41:58 keil Exp $
* header for Linux ISDN subsystem, network related functions (linklevel).
*
* Definitions for Cisco-HDLC header.
*/
-typedef struct cisco_hdr {
- __u8 addr; /* unicast/broadcast */
- __u8 ctrl; /* Always 0 */
- __u16 type; /* IP-typefield */
-} cisco_hdr;
-
-typedef struct cisco_slarp {
- __u32 code; /* SLREQ/SLREPLY/KEEPALIVE */
- union {
- struct {
- __u32 ifaddr; /* My interface address */
- __u32 netmask; /* My interface netmask */
- } reply;
- struct {
- __u32 my_seq; /* Packet sequence number */
- __u32 your_seq;
- } keepalive;
- } slarp;
- __u16 rel; /* Always 0xffff */
- __u16 t1; /* Uptime in usec >> 16 */
- __u16 t0; /* Uptime in usec & 0xffff */
-} cisco_slarp;
-
#define CISCO_ADDR_UNICAST 0x0f
#define CISCO_ADDR_BROADCAST 0x8f
+#define CISCO_CTRL 0x00
#define CISCO_TYPE_INET 0x0800
#define CISCO_TYPE_SLARP 0x8035
#define CISCO_SLARP_REPLY 0
extern int isdn_net_force_dial(char *);
extern isdn_net_dev *isdn_net_findif(char *);
extern int isdn_net_rcv_skb(int, struct sk_buff *);
-extern void isdn_net_slarp_out(void);
extern int isdn_net_dial_req(isdn_net_local *);
extern void isdn_net_writebuf_skb(isdn_net_local *lp, struct sk_buff *skb);
extern void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb);
spin_unlock_irqrestore(&master_lp->netdev->queue_lock, flags);
}
+static inline int
+put_u8(unsigned char *p, __u8 x)
+{
+ p[0] = x;
+ return 1;
+}
+
+static inline int
+put_u16(unsigned char *p, __u16 x)
+{
+ p[0] = x >> 8;
+ p[1] = x;
+ return 2;
+}
+
+static inline int
+put_u32(unsigned char *p, __u32 x)
+{
+ p[0] = x >> 24;
+ p[1] = x >> 16;
+ p[2] = x >> 8;
+ p[3] = x;
+ return 4;
+}
+
+static inline int
+get_u8(unsigned char *p, __u8 *x)
+{
+ *x = p[0];
+ return 1;
+}
+
+static inline int
+get_u16(unsigned char *p, __u16 *x)
+{
+ *x = (p[0] << 8) + p[1];
+ return 2;
+}
+
+static inline int
+get_u32(unsigned char *p, __u32 *x)
+{
+ *x = (p[0] << 24) + (p[1] << 16) + (p[2] << 8) + p[3];
+ return 4;
+}
+
-/* $Id: isdn_ppp.c,v 1.85.6.2 2001/01/23 17:45:02 kai Exp $
+/* $Id: isdn_ppp.c,v 1.85.6.5 2001/05/26 15:19:56 kai Exp $
*
* Linux ISDN subsystem, functions for synchronous PPP (linklevel).
*
static int isdn_ppp_bundle(struct ippp_struct *, int unit);
#endif /* CONFIG_ISDN_MPP */
-char *isdn_ppp_revision = "$Revision: 1.85.6.2 $";
+char *isdn_ppp_revision = "$Revision: 1.85.6.5 $";
static struct ippp_struct *ippp_table[ISDN_MAX_CHANNELS];
int unit = 0;
long flags;
struct ippp_struct *is;
+ int retval;
save_flags(flags);
cli();
if (i >= ISDN_MAX_CHANNELS) {
restore_flags(flags);
printk(KERN_WARNING "isdn_ppp_bind: Can't find a (free) connection to the ipppd daemon.\n");
- return -1;
+ retval = -1;
+ goto out;
}
unit = isdn_ppp_if_get_unit(lp->name); /* get unit number from interface name .. ugly! */
if (unit < 0) {
printk(KERN_ERR "isdn_ppp_bind: illegal interface name %s.\n", lp->name);
- return -1;
+ retval = -1;
+ goto out;
}
lp->ppp_slot = i;
is->unit = unit;
is->state = IPPP_OPEN | IPPP_ASSIGNED; /* assigned to a netdevice but not connected */
#ifdef CONFIG_ISDN_MPP
- if (isdn_ppp_mp_init(lp, NULL) < 0)
- return -ENOMEM;
+ retval = isdn_ppp_mp_init(lp, NULL);
+ if (retval < 0)
+ goto out;
#endif /* CONFIG_ISDN_MPP */
- restore_flags(flags);
+ retval = lp->ppp_slot;
- return lp->ppp_slot;
+ out:
+ restore_flags(flags);
+ return retval;
}
/*
}
skb_reserve(skb, hl);
if (copy_from_user(skb_put(skb, count), buf, count))
+ {
+ kfree_skb(skb);
return -EFAULT;
+ }
if (is->debug & 0x40) {
printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
isdn_ppp_frame_log("xmit", skb->data, skb->len, 32,is->unit,lp->ppp_slot);
kmalloc(sizeof(struct ippp_struct), GFP_KERNEL))) {
printk(KERN_WARNING "isdn_ppp_init: Could not alloc ippp_table\n");
for (j = 0; j < i; j++)
- kfree(ippp_table[i]);
+ kfree(ippp_table[j]);
return -1;
}
memset((char *) ippp_table[i], 0, sizeof(struct ippp_struct));
switch(skb->data[0]) {
case CCP_CONFREQ:
+ if(is->debug & 0x10)
+ printk(KERN_DEBUG "Disable compression here!\n");
+ if(proto == PPP_CCP)
+ mis->compflags &= ~SC_COMP_ON;
+ else
+ is->compflags &= ~SC_LINK_COMP_ON;
+ break;
case CCP_TERMREQ:
case CCP_TERMACK:
if(is->debug & 0x10)
/* TODO: Clean this up with new Reset semantics */
+/* I believe the CCP handling as-is is done wrong. Compressed frames
+ * should only be sent/received after CCP reaches UP state, which means
+ * both sides have sent CONF_ACK. Currently, we handle both directions
+ * independently, which means we may accept compressed frames too early
+ * (supposedly not a problem), but may also mean we send compressed frames
+ * too early, which may turn out to be a problem.
+ * This part of state machine should actually be handled by (i)pppd, but
+ * that's too big of a change now. --kai
+ */
+
+
static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb)
{
struct ippp_struct *mis,*is = ippp_table[lp->ppp_slot];
switch(data[2]) {
case CCP_CONFREQ:
+ if(is->debug & 0x10)
+ printk(KERN_DEBUG "Disable decompression here!\n");
+ if(proto == PPP_CCP)
+ is->compflags &= ~SC_DECOMP_ON;
+ else
+ is->compflags &= ~SC_LINK_DECOMP_ON;
+ break;
case CCP_TERMREQ:
case CCP_TERMACK:
if(is->debug & 0x10)
-/* $Id: isdn_tty.c,v 1.94.6.2 2001/06/09 15:14:15 kai Exp $
+/* $Id: isdn_tty.c,v 1.94.6.3 2001/07/03 14:48:25 kai Exp $
* Linux ISDN subsystem, tty functions and AT-command emulator (linklevel).
*
static int si2bit[8] =
{4, 1, 4, 4, 4, 4, 4, 4};
-char *isdn_tty_revision = "$Revision: 1.94.6.2 $";
+char *isdn_tty_revision = "$Revision: 1.94.6.3 $";
/* isdn_tty_try_read() is called from within isdn_tty_rcv_skb()
status = info->lsr;
restore_flags(flags);
result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
- put_user(result, (uint *) value);
- return 0;
+ return put_user(result, (uint *) value);
}
| ((status & UART_MSR_RI) ? TIOCM_RNG : 0)
| ((status & UART_MSR_DSR) ? TIOCM_DSR : 0)
| ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
- put_user(result, (uint *) value);
- return 0;
+ return put_user(result, (uint *) value);
}
static int
uint arg;
int pre_dtr;
- get_user(arg, (uint *) value);
+ if (get_user(arg, (uint *) value))
+ return -EFAULT;
switch (cmd) {
case TIOCMBIS:
#ifdef ISDN_DEBUG_MODEM_IOCTL
uint cmd, ulong arg)
{
modem_info *info = (modem_info *) tty->driver_data;
- int error;
int retval;
if (isdn_tty_paranoia_check(info, tty->device, "isdn_tty_ioctl"))
#ifdef ISDN_DEBUG_MODEM_IOCTL
printk(KERN_DEBUG "ttyI%d ioctl TIOCGSOFTCAR\n", info->line);
#endif
- error = verify_area(VERIFY_WRITE, (void *) arg, sizeof(long));
- if (error)
- return error;
- put_user(C_CLOCAL(tty) ? 1 : 0, (ulong *) arg);
- return 0;
+ return put_user(C_CLOCAL(tty) ? 1 : 0, (ulong *) arg);
case TIOCSSOFTCAR:
#ifdef ISDN_DEBUG_MODEM_IOCTL
printk(KERN_DEBUG "ttyI%d ioctl TIOCSSOFTCAR\n", info->line);
#endif
- error = verify_area(VERIFY_READ, (void *) arg, sizeof(long));
- if (error)
- return error;
- get_user(arg, (ulong *) arg);
+ if (get_user(arg, (ulong *) arg))
+ return -EFAULT;
tty->termios->c_cflag =
((tty->termios->c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
#ifdef ISDN_DEBUG_MODEM_IOCTL
printk(KERN_DEBUG "ttyI%d ioctl TIOCMGET\n", info->line);
#endif
- error = verify_area(VERIFY_WRITE, (void *) arg, sizeof(uint));
- if (error)
- return error;
return isdn_tty_get_modem_info(info, (uint *) arg);
case TIOCMBIS:
case TIOCMBIC:
case TIOCMSET:
- error = verify_area(VERIFY_READ, (void *) arg, sizeof(uint));
- if (error)
- return error;
return isdn_tty_set_modem_info(info, cmd, (uint *) arg);
case TIOCSERGETLSR: /* Get line status register */
#ifdef ISDN_DEBUG_MODEM_IOCTL
printk(KERN_DEBUG "ttyI%d ioctl TIOCSERGETLSR\n", info->line);
#endif
- error = verify_area(VERIFY_WRITE, (void *) arg, sizeof(uint));
- if (error)
- return error;
- else
- return isdn_tty_get_lsr_info(info, (uint *) arg);
+ return isdn_tty_get_lsr_info(info, (uint *) arg);
default:
#ifdef ISDN_DEBUG_MODEM_IOCTL
printk(KERN_DEBUG "UNKNOWN ioctl 0x%08x on ttyi%d\n", cmd, info->line);
8139too.c: A RealTek RTL-8139 Fast Ethernet driver for Linux.
- Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
-
- Much code comes from Donald Becker's rtl8139.c driver,
- versions 1.13 and older. This driver was originally based
- on rtl8139.c version 1.07. Header of rtl8139.c version 1.13:
-
- -----<snip>-----
-
- Written 1997-2001 by Donald Becker.
- This software may be used and distributed according to the
- terms of the GNU General Public License (GPL), incorporated
- herein by reference. Drivers based on or derived from this
- code fall under the GPL and must retain the authorship,
- copyright and license notice. This file is not a complete
- program and may only be used when the entire operating
- system is licensed under the GPL.
-
- This driver is for boards based on the RTL8129 and RTL8139
- PCI ethernet chips.
-
- The author may be reached as becker@scyld.com, or C/O Scyld
- Computing Corporation 410 Severn Ave., Suite 210 Annapolis
- MD 21403
-
- Support and updates available at
- http://www.scyld.com/network/rtl8139.html
-
- Twister-tuning table provided by Kinston
- <shangh@realtek.com.tw>.
-
- -----<snip>-----
+ Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+ Copyright 2000,2001 Jeff Garzik
+
+ Much code comes from Donald Becker's rtl8139.c driver,
+ versions 1.13 and older. This driver was originally based
+ on rtl8139.c version 1.07. Header of rtl8139.c version 1.13:
+
+ -----<snip>-----
+
+ Written 1997-2001 by Donald Becker.
+ This software may be used and distributed according to the
+ terms of the GNU General Public License (GPL), incorporated
+ herein by reference. Drivers based on or derived from this
+ code fall under the GPL and must retain the authorship,
+ copyright and license notice. This file is not a complete
+ program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for boards based on the RTL8129 and RTL8139
+ PCI ethernet chips.
+
+ The author may be reached as becker@scyld.com, or C/O Scyld
+ Computing Corporation 410 Severn Ave., Suite 210 Annapolis
+ MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/rtl8139.html
+
+ Twister-tuning table provided by Kinston
+ <shangh@realtek.com.tw>.
+
+ -----<snip>-----
This software may be used and distributed according to the terms
- of the GNU Public License, incorporated herein by reference.
+ of the GNU General Public License, incorporated herein by reference.
Contributors:
problem by having an MMIO register write be immediately followed by
an MMIO register read.
-2) The RTL-8129 is only supported in Donald Becker's rtl8139 driver.
-
*/
+#define DRV_NAME "8139too"
+#define DRV_VERSION "0.9.18-pre4"
+
+
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/rtnetlink.h>
#include <linux/delay.h>
#include <asm/io.h>
-#include <linux/smp_lock.h>
+#include <asm/uaccess.h>
-
-#define RTL8139_VERSION "0.9.14-2.2"
-#define MODNAME "8139too"
-#define RTL8139_DRIVER_NAME MODNAME " FastEthernet driver " RTL8139_VERSION
-#define RTL8139_AUTHOR "Jeff Garzik <jgarzik@mandrakesoft.com>"
-#define PFX MODNAME ": "
+#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION
+#define PFX DRV_NAME ": "
/* enable PIO instead of MMIO, if CONFIG_8139TOO_PIO is selected */
#ifdef RTL8139_DEBUG
/* note: prints function name for you */
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
#else
-#define DPRINTK(fmt, args...)
+# define DPRINTK(fmt, args...)
#endif
#ifdef RTL8139_NDEBUG
-#define assert(expr) do {} while (0)
+# define assert(expr) do {} while (0)
#else
-#define assert(expr) \
+# define assert(expr) \
if(!(expr)) { \
printk( "Assertion failed! %s,%s,%s,line=%d\n", \
#expr,__FILE__,__FUNCTION__,__LINE__); \
#endif
#define dev_kfree_skb_irq(a) dev_kfree_skb(a)
-#define netif_wake_queue(dev) do { clear_bit(0, &dev->tbusy); mark_bh(NET_BH); } while(0)
+#define netif_wake_queue(dev) do { clear_bit(0, &dev->tbusy); mark_bh(NET_BH); } while (0);
#define netif_stop_queue(dev) set_bit(0, &dev->tbusy)
static inline void netif_start_queue(struct device *dev)
(rtl8139_pci_resource_end((dev_priv),(bar)) - \
rtl8139_pci_resource_start((dev_priv),(bar)) + 1))
+#define RTL8139_PCI_ANY_ID 0xffff
+static inline struct pci_dev *rtl8139_pci_find_device(unsigned int vendor, unsigned int device, struct pci_dev *from)
+{
+ if (!from)
+ from = pci_devices;
+ else
+ from = from->next;
+ while (from && (!(from->vendor == vendor || vendor==RTL8139_PCI_ANY_ID) || !(from->device == device || device==RTL8139_PCI_ANY_ID)))
+ from = from->next;
+ return from;
+}
+
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define SIOCETHTOOL 0x8946 /* Ethtool interface */
+#define SIOCGMIIPHY 0x8947 /* Get address of MII PHY in use. */
+#define SIOCGMIIREG 0x8948 /* Read MII PHY register. */
+#define SIOCSMIIREG 0x8949 /* Write MII PHY register. */
+#define ETHTOOL_GSET 0x00000001 /* Get settings. */
+#define ETHTOOL_SSET 0x00000002 /* Set settings, privileged. */
+#define ETHTOOL_GDRVINFO 0x00000003 /* Get driver info. */
+
+/* these strings are set to whatever the driver author decides... */
+struct ethtool_drvinfo {
+ u32 cmd;
+ char driver[32]; /* driver short name, "tulip", "eepro100" */
+ char version[32]; /* driver version string */
+ char fw_version[32]; /* firmware version string, if applicable */
+ char bus_info[32]; /* Bus info for this interface. For PCI
+ * devices, use pci_dev->slot_name. */
+ char reserved1[32];
+ char reserved2[32];
+};
+
+/* This structure is used in all SIOCxMIIxxx ioctl calls */
+struct mii_ioctl_data {
+ u16 phy_id;
+ u16 reg_num;
+ u16 val_in;
+ u16 val_out;
+};
/* A few user-configurable values. */
/* media options */
#define RX_BUF_PAD 16
#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
+#define RX_EARLY_THRESH 2
/* Number of Tx descriptor registers. */
#define NUM_TX_DESC 4
#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
-#define RX_FIFO_THRESH 6 /* Rx buffer level before first PCI xfer. */
-#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */
+#define RX_DMA_BURST 7 /* Maximum PCI burst, '6' is 1024 */
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
-#define TX_TIMEOUT (HZ/2)
+#define TX_TIMEOUT (6*HZ)
enum {
+ RTL_PCI_64BIT = 0x0100,
HAS_MII_XCVR = 0x010000,
HAS_CHIP_XCVR = 0x020000,
HAS_LNK_CHNG = 0x040000,
RTL8139 = 0,
RTL8139_CB,
SMC1211TX,
- /*MPX5030,*/
+ MPX5030,
DELTA8139,
ADDTRON8139,
DFE538TX,
} board_info[] = {
{ "RealTek RTL8139 Fast Ethernet", RTL8139_CAPS },
{ "RealTek RTL8139B PCI/CardBus", RTL8139_CAPS },
- { "SMC1211TX EZCard 10/100 (RealTek RTL8139)", RTL8139_CAPS },
-/* { MPX5030, "Accton MPX5030 (RealTek RTL8139)", RTL8139_CAPS },*/
+ { "SMC1211TX EZCard 10/100 (RealTek RTL8139)", RTL8139_CAPS | (RTL_PCI_64BIT | 2)},
+ { "Accton MPX5030 (RealTek RTL8139)", RTL8139_CAPS | (RTL_PCI_64BIT | 2)},
{ "Delta Electronics 8139 10/100BaseTX", RTL8139_CAPS },
{ "Addtron Technolgy 8139 10/100BaseTX", RTL8139_CAPS },
- { "D-Link DFE-538TX (RealTek RTL8139)", RTL8139_CAPS },
+ { "D-Link DFE-538TX (RealTek RTL8139)", RTL8139_CAPS | (RTL_PCI_64BIT | 1)},
{ "RealTek RTL8129", RTL8129_CAPS },
};
{0x10ec, 0x8139, RTL8139 },
{0x10ec, 0x8138, RTL8139_CB },
{0x1113, 0x1211, SMC1211TX },
-/* {0x1113, 0x1211, MPX5030 },*/
+ {0x1113, 0x1211, MPX5030 },
{0x1500, 0x1360, DELTA8139 },
{0x4033, 0x1360, ADDTRON8139 },
{0x1186, 0x1300, DFE538TX },
+#ifdef CONFIG_8139TOO_8129
{0x10ec, 0x8129, RTL8129 },
+#endif
+ /* some crazy card report invalid vendor IDs,
+ * catch this. RTL8139_PCI_ANY_ID means match anything
+ * 1KJD 2001-04-08
+ */
+ {RTL8139_PCI_ANY_ID, 0x8139, RTL8139 },
{0,}
};
TxOK = 0x04,
RxErr = 0x02,
RxOK = 0x01,
+
+ RxAckBits = RxFIFOOver | RxOverflow | RxOK,
};
+
enum TxStatusBits {
TxHostOwns = 0x2000,
TxUnderrun = 0x4000,
Cfg1_VPD_Enable = 0x02,
Cfg1_PIO = 0x04,
Cfg1_MMIO = 0x08,
- Cfg1_LWAKE = 0x10,
+ LWAKE = 0x10, /* not on 8139, 8139A */
Cfg1_Driver_Load = 0x20,
Cfg1_LED0 = 0x40,
Cfg1_LED1 = 0x80,
+ SLEEP = (1 << 1), /* only on 8139, 8139A */
+ PWRDN = (1 << 0), /* only on 8139, 8139A */
+};
+
+/* Bits in Config4 */
+enum Config4Bits {
+ LWPTN = (1 << 2), /* not on 8139, 8139A */
};
enum RxConfigBits {
CH_8139C,
} chip_t;
+enum chip_flags {
+ HasHltClk = (1 << 0),
+ HasLWake = (1 << 1),
+};
+
/* directly indexed by chip_t, above */
const static struct {
const char *name;
u8 version; /* from RTL8139C docs */
u32 RxConfigMask; /* should clear the bits supported by this chip */
+ u32 flags;
} rtl_chip_info[] = {
{ "RTL-8139",
0x40,
0xf0fe0040, /* XXX copied from RTL8139A, verify */
+ HasHltClk,
},
{ "RTL-8139 rev K",
0x60,
0xf0fe0040,
+ HasHltClk,
},
{ "RTL-8139A",
0x70,
0xf0fe0040,
+ HasHltClk, /* XXX undocumented? */
},
{ "RTL-8139B",
0x78,
- 0xf0fc0040
+ 0xf0fc0040,
+ HasLWake,
},
{ "RTL-8130",
0x7C,
0xf0fe0040, /* XXX copied from RTL8139A, verify */
+ HasLWake,
},
{ "RTL-8139C",
0x74,
0xf0fc0040, /* XXX copied from RTL8139B, verify */
+ HasLWake,
},
};
+struct rtl_extra_stats {
+ unsigned long early_rx;
+ unsigned long tx_buf_mapped;
+ unsigned long tx_timeouts;
+};
struct rtl8139_private {
void *mmio_addr;
unsigned char *tx_bufs; /* Tx bounce buffer region. */
dma_addr_t rx_ring_dma;
dma_addr_t tx_bufs_dma;
- char phys[4]; /* MII device addresses. */
+ signed char phys[4]; /* MII device addresses. */
u16 advertising; /* NWay media advertisement */
char twistie, twist_row, twist_col; /* Twister tune state. */
unsigned int full_duplex:1; /* Full-duplex operation requested. */
pid_t thr_pid;
wait_queue_head_t thr_wait;
struct semaphore thr_exited;
+ u32 rx_config;
+ struct rtl_extra_stats xstats;
};
-
+#define RTL8139_AUTHOR "Jeff Garzik <jgarzik@mandrakesoft.com>"
+MODULE_AUTHOR (RTL8139_AUTHOR);
+MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
+MODULE_PARM (multicast_filter_limit, "i");
+MODULE_PARM (max_interrupt_work, "i");
+MODULE_PARM (media, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM (full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM_DESC (multicast_filter_limit, "8139too maximum number of filtered multicast addresses");
+MODULE_PARM_DESC (max_interrupt_work, "8139too maximum events handled per interrupt");
+MODULE_PARM_DESC (media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps");
+MODULE_PARM_DESC (full_duplex, "8139too: Force full duplex for board(s) (1)");
static int read_eeprom (void *ioaddr, int location, int addr_len);
static int rtl8139_open (struct device *dev);
static void rtl8139_interrupt (int irq, void *dev_instance,
struct pt_regs *regs);
static int rtl8139_close (struct device *dev);
-static int mii_ioctl (struct device *dev, struct ifreq *rq, int cmd);
+static int netdev_ioctl (struct device *dev, struct ifreq *rq, int cmd);
static struct net_device_stats *rtl8139_get_stats (struct device *dev);
static inline u32 ether_crc (int length, unsigned char *data);
static void rtl8139_set_rx_mode (struct device *dev);
#define RTL_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0)
+#define MMIO_FLUSH_AUDIT_COMPLETE 1
#if MMIO_FLUSH_AUDIT_COMPLETE
/* write MMIO register */
TxErr | TxOK | RxErr | RxOK;
static const unsigned int rtl8139_rx_config =
- RxCfgEarlyRxNone | RxCfgRcv32K | RxNoWrap |
+ (RX_EARLY_THRESH << RxCfgEarlyRxShift) | RxCfgRcv32K | RxNoWrap |
(RX_FIFO_THRESH << RxCfgFIFOShift) |
(RX_DMA_BURST << RxCfgDMAShift);
-static int rtl8139_init_board (struct pci_dev *pdev, struct device **dev_out,
- void **ioaddr_out)
+static void __rtl8139_cleanup_dev (struct device *dev)
{
- void *ioaddr = NULL;
+ struct rtl8139_private *tp;
+ struct pci_dev *pdev;
+
+ assert (dev != NULL);
+ assert (dev->priv != NULL);
+
+ tp = dev->priv;
+ assert (tp->pci_dev != NULL);
+ pdev = tp->pci_dev;
+
+#ifndef USE_IO_OPS
+ if (tp->mmio_addr)
+ iounmap (tp->mmio_addr);
+#endif /* !USE_IO_OPS */
+
+#ifndef RTL8139_NDEBUG
+ /* poison memory before freeing */
+ memset (dev, 0xBC,
+ sizeof (struct device) +
+ sizeof (struct rtl8139_private));
+#endif /* RTL8139_NDEBUG */
+
+ kfree (dev);
+}
+
+
+static void rtl8139_chip_reset (void *ioaddr)
+{
+ int i;
+
+ /* Soft reset the chip. */
+ RTL_W8 (ChipCmd, CmdReset);
+
+ /* Check that the chip has finished the reset. */
+ for (i = 1000; i > 0; i--) {
+ barrier();
+ if ((RTL_R8 (ChipCmd) & CmdReset) == 0)
+ break;
+ udelay (10);
+ }
+}
+
+
+static int rtl8139_init_board (struct pci_dev *pdev,
+ struct device **dev_out,
+ u32 hwflags)
+{
+ void *ioaddr;
struct device *dev;
struct rtl8139_private *tp;
u8 tmp8;
- int rc, i;
+ int rc;
+ unsigned int i;
u32 pio_start, pio_end, pio_flags, pio_len;
unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
+ unsigned short pci_command, new_command;
u32 tmp;
- u16 pci_command, new_command;
DPRINTK ("ENTER\n");
assert (pdev != NULL);
- assert (ioaddr_out != NULL);
- *ioaddr_out = NULL;
*dev_out = NULL;
- /* dev zeroed in init_etherdev */
+ /* dev and dev->priv zeroed in init_etherdev */
dev = init_etherdev (NULL, sizeof (*tp));
if (dev == NULL) {
- printk (KERN_ERR PFX "unable to alloc new ethernet\n");
+ printk (KERN_ERR PFX "Unable to alloc new ethernet device\n");
DPRINTK ("EXIT, returning -ENOMEM\n");
return -ENOMEM;
}
tp = dev->priv;
+ tp->pci_dev = pdev;
rtl8139_read_pci_resources(pdev, &tp->rtl8139_pci_resource[0]);
mmio_end = rtl8139_pci_resource_end (tp, 1);
mmio_flags = rtl8139_pci_resource_flags (tp, 1);
mmio_len = rtl8139_pci_resource_len (tp, 1);
+#ifdef __alpha__
+ /* handle 64 bit address space mapping */
+ if (hwflags & RTL_PCI_64BIT) {
+ u32 pci_ext64;
+ pcibios_read_config_dword(PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ ((hwflags & 0x0f) << 2) + 0x10, &pci_ext64);
+ pio_start |= (unsigned long) pci_ext64 << 32;
+ pio_end |= (unsigned long) pci_ext64 << 32;
+ mmio_start |= (unsigned long) pci_ext64 << 32;
+ mmio_end |= (unsigned long) pci_ext64 << 32;
+ }
+#endif
/* set this immediately, we need to know before
* we talk to the chip directly */
- DPRINTK("PIO region size == 0x%02X\n", pio_len);
- DPRINTK("MMIO region size == 0x%02lX\n", mmio_len);
- if (pio_len == RTL8139B_IO_SIZE)
- tp->chipset = CH_8139B;
+ DPRINTK("PIO region size at 0x%08lx == 0x%02X\n", (unsigned long) pio_start, pio_len);
+ DPRINTK("MMIO region size at 0x%08lx == 0x%02lX\n", (unsigned long) mmio_start, mmio_len);
+#ifdef USE_IO_OPS
/* make sure PCI base addr 0 is PIO */
if (!(pio_flags & IORESOURCE_IO)) {
- printk (KERN_ERR PFX "region #0 not a PIO resource, aborting\n");
+ printk (KERN_ERR PFX "8139too: region #0 not a PIO resource, aborting\n");
rc = -ENODEV;
goto err_out;
}
-
- /* make sure PCI base addr 1 is MMIO */
- if (!(mmio_flags & IORESOURCE_MEM)) {
- printk (KERN_ERR PFX "region #1 not an MMIO resource, aborting\n");
+ /* check for weird/broken PCI region reporting */
+ if (pio_len < RTL_MIN_IO_SIZE) {
+ printk (KERN_ERR PFX "8139too: Invalid PCI I/O region size(s), aborting\n");
rc = -ENODEV;
goto err_out;
}
-
- /* check for weird/broken PCI region reporting */
- if ((pio_len < RTL_MIN_IO_SIZE) ||
- (mmio_len < RTL_MIN_IO_SIZE)) {
- printk (KERN_ERR PFX "Invalid PCI region size(s), aborting\n");
+#else
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(mmio_flags & IORESOURCE_MEM)) {
+ printk (KERN_ERR PFX "8139too: region #1 not an MMIO resource, aborting\n");
rc = -ENODEV;
goto err_out;
}
-
- /* make sure our PIO region in PCI space is available */
- if (check_region(pio_start, pio_len)) {
- printk (KERN_ERR PFX "no I/O resource available, aborting\n");
- rc = -EBUSY;
+ if (mmio_len < RTL_MIN_IO_SIZE) {
+ printk (KERN_ERR PFX "8139too: Invalid PCI mem region size(s), aborting\n");
+ rc = -ENODEV;
goto err_out;
}
+#endif
request_region(pio_start, pio_len, dev->name);
pci_set_master (pdev);
#ifdef USE_IO_OPS
ioaddr = (void *) pio_start;
+ dev->base_addr = pio_start;
#else
/* ioremap MMIO region */
- ioaddr = ioremap_nocache(mmio_start, mmio_len);
-
+ ioaddr = ioremap_nocache(mmio_start, mmio_len);
if (ioaddr == NULL) {
printk (KERN_ERR PFX "cannot remap MMIO, aborting\n");
rc = -EIO;
- goto err_out_free_mmio;
+ goto err_out;
}
+ dev->base_addr = (long) ioaddr;
#endif /* USE_IO_OPS */
+ tp->mmio_addr = ioaddr;
/* Activate the card: fix for brain-damaged Win98 BIOSes. */
pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
new_command = pci_command | PCI_USES_IO | PCI_USES_MEM | PCI_USES_MASTER;
if (pci_command != new_command)
pci_write_config_word(pdev, PCI_COMMAND, new_command);
-
- /* Soft reset the chip. */
- RTL_W8 (ChipCmd, (RTL_R8 (ChipCmd) & ChipCmdClear) | CmdReset);
-
- /* Check that the chip has finished the reset. */
- for (i = 10000; i > 0; i--)
- if ((RTL_R8 (ChipCmd) & CmdReset) == 0)
- break;
- else
- udelay (10);
- /* Bring the chip out of low-power mode. */
- if (tp->chipset == CH_8139B) {
- RTL_W8 (Config1, RTL_R8 (Config1) & ~(1<<4));
- RTL_W8 (Config4, RTL_R8 (Config4) & ~(1<<2));
- } else {
- /* handle RTL8139A and RTL8139 cases */
- /* XXX from becker driver. is this right?? */
- RTL_W8 (Config1, 0);
- }
+ /* Bring old chips out of low-power mode. */
+ RTL_W8 (HltClk, 'R');
- /* make sure chip thinks PIO and MMIO are enabled */
- tmp8 = RTL_R8 (Config1);
- if ((tmp8 & Cfg1_PIO) == 0) {
- printk (KERN_ERR PFX "PIO not enabled, Cfg1=%02X, aborting\n", tmp8);
- rc = -EIO;
- goto err_out_iounmap;
- }
- if ((tmp8 & Cfg1_MMIO) == 0) {
- printk (KERN_ERR PFX "MMIO not enabled, Cfg1=%02X, aborting\n", tmp8);
+ /* check for missing/broken hardware */
+ if (RTL_R32 (TxConfig) == 0xFFFFFFFF) {
+ printk (KERN_ERR PFX "slot %u: Chip not responding, ignoring board\n",
+ PCI_SLOT(pdev->devfn));
rc = -EIO;
- goto err_out_iounmap;
+ goto err_out;
}
/* identify chip attached to board */
tp->chipset,
rtl_chip_info[tp->chipset].name);
+ if (tp->chipset >= CH_8139B) {
+ u8 new_tmp8 = tmp8 = RTL_R8 (Config1);
+ DPRINTK("PCI PM wakeup\n");
+ if ((rtl_chip_info[tp->chipset].flags & HasLWake) &&
+ (tmp8 & LWAKE))
+ new_tmp8 &= ~LWAKE;
+ new_tmp8 |= Cfg1_PM_Enable;
+ if (new_tmp8 != tmp8) {
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8 (Config1, tmp8);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+ }
+ if (rtl_chip_info[tp->chipset].flags & HasLWake) {
+ tmp8 = RTL_R8 (Config4);
+ if (tmp8 & LWPTN)
+ RTL_W8 (Config4, tmp8 & ~LWPTN);
+ }
+ } else {
+ DPRINTK("Old chip wakeup\n");
+ tmp8 = RTL_R8 (Config1);
+ tmp8 &= ~(SLEEP | PWRDN);
+ RTL_W8 (Config1, tmp8);
+ }
+
+ rtl8139_chip_reset (ioaddr);
+
DPRINTK ("EXIT, returning 0\n");
- *ioaddr_out = ioaddr;
*dev_out = dev;
return 0;
-err_out_iounmap:
- assert (ioaddr > 0);
-#ifndef USE_IO_OPS
- iounmap (ioaddr);
-#endif /* !USE_IO_OPS */
-err_out_free_mmio:
- release_region (pio_start, pio_len);
err_out:
- unregister_netdev (dev);
- kfree (dev);
+ __rtl8139_cleanup_dev (dev);
DPRINTK ("EXIT, returning %d\n", rc);
return rc;
}
struct device *dev = NULL;
struct rtl8139_private *tp;
int i, addr_len, option;
- void *ioaddr = NULL;
+ void *ioaddr;
static int board_idx = -1;
- u8 tmp;
DPRINTK ("ENTER\n");
board_idx++;
- i = rtl8139_init_board (pdev, &dev, &ioaddr);
+ i = rtl8139_init_board (pdev, &dev, board_info[ent->board].hw_flags);
if (i < 0) {
DPRINTK ("EXIT, returning %d\n", i);
return i;
}
tp = dev->priv;
+ ioaddr = tp->mmio_addr;
assert (ioaddr != NULL);
assert (dev != NULL);
dev->stop = rtl8139_close;
dev->get_stats = rtl8139_get_stats;
dev->set_multicast_list = rtl8139_set_rx_mode;
- dev->do_ioctl = mii_ioctl;
+ dev->do_ioctl = netdev_ioctl;
dev->irq = pdev->irq;
- dev->base_addr = (unsigned long) ioaddr;
/* dev->priv/tp zeroed and aligned in init_etherdev */
tp = dev->priv;
/* note: tp->chipset set in rtl8139_init_board */
tp->drv_flags = board_info[ent->board].hw_flags;
- tp->pci_dev = pdev;
tp->mmio_addr = ioaddr;
spin_lock_init (&tp->lock);
init_waitqueue_head (&tp->thr_wait);
/* Find the connected MII xcvrs.
Doing this in open() would allow detecting external xcvrs later, but
takes too much time. */
+#ifdef CONFIG_8139TOO_8129
if (tp->drv_flags & HAS_MII_XCVR) {
int phy, phy_idx = 0;
for (phy = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
int mii_status = mdio_read(dev, phy, 1);
if (mii_status != 0xffff && mii_status != 0x0000) {
+ u16 advertising = mdio_read(dev, phy, 4);
tp->phys[phy_idx++] = phy;
- tp->advertising = mdio_read(dev, phy, 4);
printk(KERN_INFO "%s: MII transceiver %d status 0x%4.4x "
"advertising %4.4x.\n",
- dev->name, phy, mii_status, tp->advertising);
+ dev->name, phy, mii_status, advertising);
}
}
if (phy_idx == 0) {
tp->phys[0] = 32;
}
} else
+#endif
tp->phys[0] = 32;
- /* Put the chip into low-power mode. */
- RTL_W8_F (Cfg9346, Cfg9346_Unlock);
-
- tmp = RTL_R8 (Config1) & Config1Clear;
- tmp |= (tp->chipset == CH_8139B) ? 3 : 1; /* Enable PM/VPD */
- RTL_W8_F (Config1, tmp);
-
- RTL_W8_F (HltClk, 'H'); /* 'R' would leave the clock running. */
-
/* The lower four bits are the media type. */
option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx];
if (option > 0) {
if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0)
tp->full_duplex = full_duplex[board_idx];
if (tp->full_duplex) {
- printk (KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name);
+ printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name);
/* Changing the MII-advertised media because might prevent
re-connection. */
tp->duplex_lock = 1;
}
rtl8139_device_tab[rtl8139_device_count++] = dev;
-
+ /* Put the chip into low-power mode. */
+ if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+ RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */
+
DPRINTK ("EXIT - returning 0\n");
return 0;
}
unregister_netdev (dev);
-#ifndef USE_IO_OPS
- iounmap (np->mmio_addr);
-#endif /* !USE_IO_OPS */
-
- release_region (rtl8139_pci_resource_start (np, 0),
- rtl8139_pci_resource_len (np, 0));
-#ifndef RTL8139_NDEBUG
- /* poison memory before freeing */
- memset (dev, 0xBC,
- sizeof (struct device) +
- sizeof (struct rtl8139_private));
-#endif /* RTL8139_NDEBUG */
-
- kfree (dev);
+ __rtl8139_cleanup_dev (dev);
DPRINTK ("EXIT\n");
}
+
/* Serial EEPROM section. */
/* EEPROM_Ctrl bits. */
0
};
-
+#ifdef CONFIG_8139TOO_8129
/* Syncronize the MII management interface by shifting 32 one bits out. */
static void mdio_sync (void *mdio_addr)
{
DPRINTK ("EXIT\n");
}
-
+#endif
static int mdio_read (struct device *dev, int phy_id, int location)
{
struct rtl8139_private *tp = dev->priv;
- void *mdio_addr = tp->mmio_addr + Config4;
- int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
int retval = 0;
+#ifdef CONFIG_8139TOO_8129
int i;
+ void *mdio_addr = tp->mmio_addr + Config4;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+#endif
DPRINTK ("ENTER\n");
return location < 8 && mii_2_8139_map[location] ?
readw (tp->mmio_addr + mii_2_8139_map[location]) : 0;
}
+
+#ifdef CONFIG_8139TOO_8129
mdio_sync (mdio_addr);
/* Shift the read command bits out. */
for (i = 15; i >= 0; i--) {
writeb (MDIO_CLK, mdio_addr);
mdio_delay (mdio_addr);
}
+#endif
DPRINTK ("EXIT, returning %d\n", (retval >> 1) & 0xffff);
return (retval >> 1) & 0xffff;
int value)
{
struct rtl8139_private *tp = dev->priv;
+#ifdef CONFIG_8139TOO_8129
+ int i;
void *mdio_addr = tp->mmio_addr + Config4;
int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
- int i;
+#endif
DPRINTK ("ENTER\n");
if (phy_id > 31) { /* Really a 8139. Use internal registers. */
void *ioaddr = tp->mmio_addr;
if (location == 0) {
- RTL_W8_F (Cfg9346, Cfg9346_Unlock);
- RTL_W16_F (BasicModeCtrl, value);
- RTL_W8_F (Cfg9346, Cfg9346_Lock);
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W16 (BasicModeCtrl, value);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
} else if (location < 8 && mii_2_8139_map[location])
- RTL_W16_F (mii_2_8139_map[location], value);
+ RTL_W16 (mii_2_8139_map[location], value);
return;
}
+
+#ifdef CONFIG_8139TOO_8129
mdio_sync (mdio_addr);
/* Shift the command bits out. */
writeb (dataval | MDIO_CLK, mdio_addr);
mdio_delay (mdio_addr);
}
-
/* Clear out extra bits. */
for (i = 2; i > 0; i--) {
writeb (0, mdio_addr);
writeb (MDIO_CLK, mdio_addr);
mdio_delay (mdio_addr);
}
- return;
+#endif
}
DPRINTK ("%s: rtl8139_open() ioaddr %#lx IRQ %d"
" GP Pins %2.2x %s-duplex.\n",
- dev->name, pci_resource_start (tp, 1),
+ dev->name, (unsigned long) tp->mmio_addr,
dev->irq, RTL_R8 (MediaStatus),
tp->full_duplex ? "full" : "half");
}
+static void rtl_check_media (struct device *dev)
+{
+ struct rtl8139_private *tp = dev->priv;
+
+ DPRINTK("ENTER\n");
+
+ if (tp->phys[0] >= 0) {
+ u16 mii_reg5 = mdio_read(dev, tp->phys[0], 5);
+ if (mii_reg5 == 0xffff)
+ ; /* Not there */
+ else if ((mii_reg5 & 0x0100) == 0x0100
+ || (mii_reg5 & 0x00C0) == 0x0040)
+ tp->full_duplex = 1;
+
+ printk (KERN_INFO"%s: Setting %s%s-duplex based on"
+ " auto-negotiated partner ability %4.4x.\n",
+ dev->name, mii_reg5 == 0 ? "" :
+ (mii_reg5 & 0x0180) ? "100mbps " : "10mbps ",
+ tp->full_duplex ? "full" : "half", mii_reg5);
+ }
+}
+
/* Start the hardware at open or resume. */
static void rtl8139_hw_start (struct device *dev)
{
DPRINTK ("ENTER\n");
- /* Soft reset the chip. */
- RTL_W8 (ChipCmd, (RTL_R8 (ChipCmd) & ChipCmdClear) | CmdReset);
- udelay (100);
+ /* Bring old chips out of low-power mode. */
+ if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+ RTL_W8 (HltClk, 'R');
- /* Check that the chip has finished the reset. */
- for (i = 1000; i > 0; i--)
- if ((RTL_R8 (ChipCmd) & CmdReset) == 0)
- break;
+ rtl8139_chip_reset (ioaddr);
/* unlock Config[01234] and BMCR register writes */
RTL_W8_F (Cfg9346, Cfg9346_Unlock);
RTL_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
/* Must enable Tx/Rx before setting transfer thresholds! */
- RTL_W8_F (ChipCmd, (RTL_R8 (ChipCmd) & ChipCmdClear) |
- CmdRxEnb | CmdTxEnb);
+ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
- i = rtl8139_rx_config |
- (RTL_R32 (RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
- RTL_W32_F (RxConfig, i);
+ tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
+ RTL_W32 (RxConfig, tp->rx_config);
/* Check this value: the documentation for IFG contradicts ifself. */
RTL_W32 (TxConfig, (TX_DMA_BURST << TxDMAShift));
tp->cur_rx = 0;
-
- /* This is check_duplex() */
- if (tp->phys[0] >= 0 || (tp->drv_flags & HAS_MII_XCVR)) {
- u16 mii_reg5 = mdio_read(dev, tp->phys[0], 5);
- if (mii_reg5 == 0xffff)
- ; /* Not there */
- else if ((mii_reg5 & 0x0100) == 0x0100
- || (mii_reg5 & 0x00C0) == 0x0040)
- tp->full_duplex = 1;
- printk(KERN_INFO"%s: Setting %s%s-duplex based on"
- " auto-negotiated partner ability %4.4x.\n", dev->name,
- mii_reg5 == 0 ? "" :
- (mii_reg5 & 0x0180) ? "100Mbps " : "10Mbps ",
- tp->full_duplex ? "full" : "half", mii_reg5);
- }
-
- if (tp->chipset >= CH_8139A) {
- tmp = RTL_R8 (Config1) & Config1Clear;
- tmp |= Cfg1_Driver_Load;
- tmp |= (tp->chipset == CH_8139B) ? 3 : 1; /* Enable PM/VPD */
- RTL_W8_F (Config1, tmp);
- } else {
- u8 foo = RTL_R8 (Config1) & Config1Clear;
- RTL_W8 (Config1, tp->full_duplex ? (foo|0x60) : (foo|0x20));
- }
+
+ rtl_check_media (dev);
if (tp->chipset >= CH_8139B) {
- tmp = RTL_R8 (Config4) & ~(1<<2);
- /* chip will clear Rx FIFO overflow automatically */
- tmp |= (1<<7);
- RTL_W8 (Config4, tmp);
-
/* disable magic packet scanning, which is enabled
- * when PM is enabled above (Config1) */
+ * when PM is enabled in Config1 */
RTL_W8 (Config3, RTL_R8 (Config3) & ~(1<<5));
}
+ DPRINTK("init buffer addresses\n");
+
/* Lock Config[01234] and BMCR register writes */
- RTL_W8_F (Cfg9346, Cfg9346_Lock);
- udelay (10);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
/* init Rx ring buffer DMA address */
RTL_W32_F (RxBuf, tp->rx_ring_dma);
for (i = 0; i < NUM_TX_DESC; i++)
RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
- RTL_W32_F (RxMissed, 0);
+ RTL_W32 (RxMissed, 0);
rtl8139_set_rx_mode (dev);
RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear);
/* make sure RxTx has started */
- RTL_W8_F (ChipCmd, (RTL_R8 (ChipCmd) & ChipCmdClear) |
- CmdRxEnb | CmdTxEnb);
+ tmp = RTL_R8 (ChipCmd);
+ if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb)))
+ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
/* Enable all known interrupts by setting the interrupt mask. */
- RTL_W16_F (IntrMask, rtl8139_intr_mask);
+ RTL_W16 (IntrMask, rtl8139_intr_mask);
netif_start_queue (dev);
|| (mii_reg5 & 0x01C0) == 0x0040;
if (tp->full_duplex != duplex) {
tp->full_duplex = duplex;
- printk (KERN_INFO
- "%s: Setting %s-duplex based on MII #%d link"
- " partner ability of %4.4x.\n", dev->name,
- tp->full_duplex ? "full" : "half",
- tp->phys[0], mii_reg5);
+
+ if (mii_reg5) {
+ printk (KERN_INFO
+ "%s: Setting %s-duplex based on MII #%d link"
+ " partner ability of %4.4x.\n",
+ dev->name,
+ tp->full_duplex ? "full" : "half",
+ tp->phys[0], mii_reg5);
+ } else {
+ printk(KERN_INFO"%s: media is unconnected, link down, or incompatible connection\n",
+ dev->name);
+ }
+#if 0
RTL_W8 (Cfg9346, Cfg9346_Unlock);
RTL_W8 (Config1, tp->full_duplex ? 0x60 : 0x20);
RTL_W8 (Cfg9346, Cfg9346_Lock);
+#endif
}
}
struct rtl8139_private *tp = (struct rtl8139_private *) dev->priv;
unsigned long timeout;
- lock_kernel();
- daemonize ();
exit_files(current);
- unlock_kernel();
-
+ daemonize ();
spin_lock_irq(¤t->sigmask_lock);
sigemptyset(¤t->blocked);
recalc_sigpending(current);
current->comm[sizeof(current->comm) - 1] = '\0';
while (1) {
+ rtl8139_thread_iter (dev, tp, tp->mmio_addr);
timeout = next_tick;
do {
timeout = interruptible_sleep_on_timeout (&tp->thr_wait, timeout);
break;
rtnl_lock ();
- rtl8139_thread_iter (dev, tp, tp->mmio_addr);
rtnl_unlock ();
}
void *ioaddr = tp->mmio_addr;
int i;
u8 tmp8;
+ unsigned long flags;
DPRINTK ("%s: Transmit timeout, status %2.2x %4.4x "
"media %2.2x.\n", dev->name,
RTL_R16 (IntrStatus),
RTL_R8 (MediaStatus));
+ tp->xstats.tx_timeouts++;
+
/* disable Tx ASAP, if not already */
tmp8 = RTL_R8 (ChipCmd);
if (tmp8 & CmdTxEnb)
- RTL_W8 (ChipCmd, tmp8 & ~CmdTxEnb);
+ RTL_W8 (ChipCmd, CmdRxEnb);
/* Disable interrupts by clearing the interrupt mask. */
RTL_W16 (IntrMask, 0x0000);
/* Emit info to figure out what went wrong. */
- printk (KERN_DEBUG "%s: Tx queue start entry %ld dirty entry %ld.\n",
+ printk (KERN_DEBUG "%s: Tx queue start entry %ld dirty entry %ld.\n",
dev->name, tp->cur_tx, tp->dirty_tx);
for (i = 0; i < NUM_TX_DESC; i++)
printk (KERN_DEBUG "%s: Tx descriptor %d is %8.8lx.%s\n",
" (queue head)" : "");
/* Stop a shared interrupt from scavenging while we are. */
- spin_lock_irq (&tp->lock);
+ spin_lock_irqsave (&tp->lock, flags);
rtl8139_tx_clear (tp);
- spin_unlock_irq (&tp->lock);
+ spin_unlock_irqrestore (&tp->lock, flags);
/* ...and finally, reset everything */
rtl8139_hw_start (dev);
+
+ netif_wake_queue (dev);
}
{
struct rtl8139_private *tp = dev->priv;
void *ioaddr = tp->mmio_addr;
- int entry;
+ unsigned int entry;
+ u32 dma_addr;
if (dev->tbusy) {
/* If this happens network layer tells us we're broken. */
- if (jiffies - dev->trans_start > TX_TIMEOUT)
+ if (jiffies - dev->trans_start > TX_TIMEOUT) {
rtl8139_tx_timeout(dev);
+ if (skb) kfree_skb(skb);
+ return 0;
+ }
}
+ mb();
/* Calculate the next Tx descriptor entry. */
entry = tp->cur_tx % NUM_TX_DESC;
assert (tp->tx_info[entry].skb == NULL);
tp->tx_info[entry].skb = skb;
- if ((long) skb->data & 3) { /* Must use alignment buffer. */
- /* tp->tx_info[entry].mapping = 0 */
+ if ((long) skb->data & 3) { /* Must use alignment buffer. */
+ /* tp->tx_info[entry].mapping = 0; */
memcpy (tp->tx_buf[entry], skb->data, skb->len);
- RTL_W32 (TxAddr0 + (entry * 4),
- tp->tx_bufs_dma + (tp->tx_buf[entry] - tp->tx_bufs));
+ dma_addr = tp->tx_bufs_dma + (tp->tx_buf[entry] - tp->tx_bufs);
} else {
- RTL_W32 (TxAddr0 + (entry * 4), virt_to_bus(skb->data));
+ dma_addr = virt_to_bus(skb->data);
}
/* Note: the chip doesn't have auto-pad! */
- RTL_W32 (TxStatus0 + (entry * sizeof(u32)),
- tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+ spin_lock_irq(&tp->lock);
+ RTL_W32_F (TxAddr0 + (entry * 4), dma_addr);
+ RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
+ tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+ spin_unlock_irq(&tp->lock);
dev->trans_start = jiffies;
- spin_lock_irq (&tp->lock);
-
tp->cur_tx++;
+ mb();
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
netif_stop_queue (dev);
- spin_unlock_irq (&tp->lock);
-
DPRINTK ("%s: Queued Tx packet at %p size %u to slot %d.\n",
dev->name, skb->data, skb->len, entry);
tp->stats.tx_errors++;
if (txstatus & TxAborted) {
tp->stats.tx_aborted_errors++;
- RTL_W32 (TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift));
+ RTL_W32_F (TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift));
}
if (txstatus & TxCarrierLost)
tp->stats.tx_carrier_errors++;
#ifndef RTL8139_NDEBUG
if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
printk (KERN_ERR "%s: Out-of-sync dirty pointer, %ld vs. %ld.\n",
- dev->name, dirty_tx, tp->cur_tx);
+ dev->name, dirty_tx, tp->cur_tx);
dirty_tx += NUM_TX_DESC;
}
#endif /* RTL8139_NDEBUG */
/* only wake the queue if we did work, and the queue is stopped */
if (tp->dirty_tx != dirty_tx) {
tp->dirty_tx = dirty_tx;
+ mb();
if (netif_queue_stopped (dev))
netif_wake_queue (dev);
}
}
+
/* TODO: clean this up! Rx reset need not be this intensive */
static void rtl8139_rx_err (u32 rx_status, struct device *dev,
struct rtl8139_private *tp, void *ioaddr)
tp->cur_rx = 0;
/* disable receive */
- tmp8 = RTL_R8 (ChipCmd) & ChipCmdClear;
- RTL_W8_F (ChipCmd, tmp8 | CmdTxEnb);
+ RTL_W8 (ChipCmd, CmdTxEnb);
/* A.C.: Reset the multicast list. */
rtl8139_set_rx_mode (dev);
/* XXX potentially temporary hack to
* restart hung receiver */
while (--tmp_work > 0) {
+ barrier();
tmp8 = RTL_R8 (ChipCmd);
if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
break;
- RTL_W8_F (ChipCmd,
- (tmp8 & ChipCmdClear) | CmdRxEnb | CmdTxEnb);
+ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
}
/* G.S.: Re-enable receiver */
}
-/* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
- field alignments and semantics. */
static void rtl8139_rx_interrupt (struct device *dev,
struct rtl8139_private *tp, void *ioaddr)
{
unsigned int pkt_size;
struct sk_buff *skb;
+ rmb();
+
/* read size+status of next frame from DMA ring buffer */
rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset));
rx_size = rx_status >> 16;
}
#endif
- /* E. Gill */
- /* Note from BSD driver:
- * Here's a totally undocumented fact for you. When the
- * RealTek chip is in the process of copying a packet into
- * RAM for you, the length will be 0xfff0. If you spot a
- * packet header with this value, you need to stop. The
- * datasheet makes absolutely no mention of this and
- * RealTek should be shot for this.
- */
- if (rx_size == 0xfff0)
+ if (rx_size == 0xfff0) { /* Early Rx in progress */
+ tp->xstats.early_rx++;
break;
+ }
/* If Rx err or invalid rx_size/rx_status received
* (which happens if we get lost in the ring),
* Rx processing.
*/
if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
+ (rx_size < 8) ||
(!(rx_status & RxStatusOK))) {
rtl8139_rx_err (rx_status, dev, tp, ioaddr);
return;
netif_rx (skb);
mark_bh(NET_BH);
tp->stats.rx_bytes += pkt_size ;
+ dev->last_rx = jiffies;
tp->stats.rx_packets++;
} else {
printk (KERN_WARNING
}
cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
- RTL_W16_F (RxBufPtr, cur_rx - 16);
+ RTL_W16 (RxBufPtr, cur_rx - 16);
+
+ if (RTL_R16 (IntrStatus) & RxAckBits)
+ RTL_W16_F (IntrStatus, RxAckBits);
}
DPRINTK ("%s: Done rtl8139_rx(), current %4.4x BufAddr %4.4x,"
- " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
- RTL_R16 (RxBufAddr),
- RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
+ " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
+ RTL_R16 (RxBufAddr),
+ RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
tp->cur_rx = cur_rx;
+
+ if ((RTL_R8 (ChipCmd) & RxBufEmpty) &&
+ (RTL_R16 (IntrStatus) & RxAckBits))
+ RTL_W16_F (IntrStatus, RxAckBits);
}
void *ioaddr,
int status, int link_changed)
{
- printk (KERN_DEBUG "%s: Abnormal interrupt, status %8.8x.\n",
- dev->name, status);
+ DPRINTK ("%s: Abnormal interrupt, status %8.8x.\n",
+ dev->name, status);
assert (dev != NULL);
assert (tp != NULL);
|| tp->duplex_lock;
if (tp->full_duplex != duplex) {
tp->full_duplex = duplex;
+#if 0
RTL_W8 (Cfg9346, Cfg9346_Unlock);
RTL_W8 (Config1, tp->full_duplex ? 0x60 : 0x20);
RTL_W8 (Cfg9346, Cfg9346_Lock);
+#endif
}
status &= ~RxUnderrun;
}
tp->stats.rx_length_errors++;
if (status & (RxUnderrun | RxFIFOOver))
tp->stats.rx_fifo_errors++;
- if (status & RxOverflow) {
- tp->stats.rx_over_errors++;
- tp->cur_rx = RTL_R16 (RxBufAddr) % RX_BUF_LEN;
- RTL_W16_F (RxBufPtr, tp->cur_rx - 16);
- }
if (status & PCIErr) {
u16 pci_cmd_status;
pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
struct rtl8139_private *tp = dev->priv;
int boguscnt = max_interrupt_work;
void *ioaddr = tp->mmio_addr;
- int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */
+ int ackstat, status;
+ int link_changed = 0; /* avoid bogus "uninit" warning */
+
+ spin_lock (&tp->lock);
do {
status = RTL_R16 (IntrStatus);
if (status & RxUnderrun)
link_changed = RTL_R16 (CSCR) & CSCR_LinkChangeBit;
- /* E. Gill */
- /* In case of an RxFIFOOver we must also clear the RxOverflow
- bit to avoid dropping frames for ever. Believe me, I got a
- lot of troubles copying huge data (approximately 2 RxFIFOOver
- errors per 1GB data transfer).
- The following is written in the 'p-guide.pdf' file (RTL8139(A/B)
- Programming guide V0.1, from 1999/1/15) on page 9 from REALTEC.
- -----------------------------------------------------------
- 2. RxFIFOOvw handling:
- When RxFIFOOvw occurs, all incoming packets are discarded.
- Clear ISR(RxFIFOOvw) doesn't dismiss RxFIFOOvw event. To
- dismiss RxFIFOOvw event, the ISR(RxBufOvw) must be written
- with a '1'.
- -----------------------------------------------------------
- Unfortunately I was not able to find any reason for the
- RxFIFOOver error (I got the feeling this depends on the
- CPU speed, lower CPU speed --> more errors).
- After clearing the RxOverflow bit the transfer of the
- packet was repeated and all data are error free transfered */
- RTL_W16_F (IntrStatus, (status & RxFIFOOver) ? (status | RxOverflow) : status);
-
- DPRINTK ("%s: interrupt status=%#4.4x new intstat=%#4.4x.\n",
- dev->name, status,
- RTL_R16 (IntrStatus));
-
- if ((status & (PCIErr | PCSTimeout | RxUnderrun | RxOverflow
- | RxFIFOOver | TxErr | TxOK | RxErr | RxOK)) == 0)
+ /* The chip takes special action when we clear RxAckBits,
+ * so we clear them later in rtl8139_rx_interrupt
+ */
+ ackstat = status & ~RxAckBits;
+ RTL_W16 (IntrStatus, ackstat);
+
+ DPRINTK ("%s: interrupt status=%#4.4x ackstat=%#4.4x new intstat=%#4.4x.\n",
+ dev->name, ackstat, status, RTL_R16 (IntrStatus));
+
+ if ((status &
+ (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
+ RxFIFOOver | TxErr | TxOK | RxErr | RxOK)) == 0)
break;
+ if (netif_running (dev) && (status & RxAckBits))
+ rtl8139_rx_interrupt (dev, tp, ioaddr);
+
/* Check uncommon events with one test. */
if (status & (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
RxFIFOOver | TxErr | RxErr))
rtl8139_weird_interrupt (dev, tp, ioaddr,
status, link_changed);
- if (status & (RxOK | RxUnderrun | RxOverflow | RxFIFOOver)) /* Rx interrupt */
- rtl8139_rx_interrupt (dev, tp, ioaddr);
-
- if (status & (TxOK | TxErr)) {
- spin_lock (&tp->lock);
+ if (netif_running (dev) && (status & (TxOK | TxErr)))
rtl8139_tx_interrupt (dev, tp, ioaddr);
- spin_unlock (&tp->lock);
- }
boguscnt--;
} while (boguscnt > 0);
if (boguscnt <= 0) {
- printk (KERN_WARNING
- "%s: Too much work at interrupt, "
- "IntrStatus=0x%4.4x.\n", dev->name,
- status);
+ printk (KERN_WARNING "%s: Too much work at interrupt, "
+ "IntrStatus=0x%4.4x.\n", dev->name, status);
/* Clear all interrupt sources. */
RTL_W16 (IntrStatus, 0xffff);
}
+ spin_unlock (&tp->lock);
+
DPRINTK ("%s: exiting interrupt, intr_status=%#4.4x.\n",
dev->name, RTL_R16 (IntrStatus));
}
{
struct rtl8139_private *tp = dev->priv;
void *ioaddr = tp->mmio_addr;
- int i;
int ret = 0;
+ int i;
+ unsigned long flags;
DPRINTK ("ENTER\n");
DPRINTK ("%s: Shutting down ethercard, status was 0x%4.4x.\n",
dev->name, RTL_R16 (IntrStatus));
- spin_lock_irq(&tp->lock);
+ spin_lock_irqsave (&tp->lock, flags);
/* Stop the chip's Tx and Rx DMA processes. */
- RTL_W8 (ChipCmd, (RTL_R8 (ChipCmd) & ChipCmdClear));
+ RTL_W8 (ChipCmd, 0);
/* Disable interrupts by clearing the interrupt mask. */
- RTL_W16 (IntrMask, 0x0000);
+ RTL_W16 (IntrMask, 0);
/* Update the error counts. */
tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
RTL_W32 (RxMissed, 0);
- spin_unlock_irq (&tp->lock);
+ spin_unlock_irqrestore (&tp->lock, flags);
synchronize_irq ();
-
- /* snooze for a small bit */
- if (current->need_resched)
- schedule ();
-
free_irq (dev->irq, dev);
rtl8139_tx_clear (tp);
/* Green! Put the chip in low-power mode. */
RTL_W8 (Cfg9346, Cfg9346_Unlock);
- RTL_W8 (Config1, 0x03);
- RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */
- MOD_DEC_USE_COUNT;
+ if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+ RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */
+ MOD_DEC_USE_COUNT;
DPRINTK ("EXIT\n");
return 0;
}
-static int mii_ioctl (struct device *dev, struct ifreq *rq, int cmd)
+static int netdev_ethtool_ioctl (struct device *dev, void *useraddr)
+{
+ struct rtl8139_private *np = dev->priv;
+ u32 ethcmd;
+
+ if (copy_from_user (ðcmd, useraddr, sizeof (ethcmd)))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GDRVINFO:
+ {
+ struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
+ strcpy (info.driver, DRV_NAME);
+ strcpy (info.version, DRV_VERSION);
+ sprintf(info.bus_info, "PCI slot #%u", PCI_SLOT(np->pci_dev->devfn));
+ if (copy_to_user (useraddr, &info, sizeof (info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int netdev_ioctl (struct device *dev, struct ifreq *rq, int cmd)
{
struct rtl8139_private *tp = dev->priv;
- u16 *data = (u16 *) & rq->ifr_data;
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
int rc = 0;
+ int phy = tp->phys[0] & 0x3f;
DPRINTK ("ENTER\n");
+#ifdef notdef
+ data->phy_id &= 0x1f;
+ data->reg_num &= 0x1f;
+#endif
+
switch (cmd) {
- case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
- data[0] = tp->phys[0] & 0x3f;
+ case SIOCETHTOOL:
+ return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
+
+ case SIOCGMIIPHY: /* Get the address of the PHY in use. */
+ case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */
+ data->phy_id = phy;
/* Fall Through */
- case SIOCDEVPRIVATE + 1: /* Read the specified MII register. */
- data[3] = mdio_read (dev, data[0], data[1] & 0x1f);
+ case SIOCGMIIREG: /* Read the specified MII register. */
+ case SIOCDEVPRIVATE+1: /* binary compat, remove in 2.5 */
+ data->val_out = mdio_read (dev, data->phy_id, data->reg_num);
break;
- case SIOCDEVPRIVATE + 2: /* Write the specified MII register */
+ case SIOCSMIIREG: /* Write the specified MII register */
+ case SIOCDEVPRIVATE+2: /* binary compat, remove in 2.5 */
if (!capable (CAP_NET_ADMIN)) {
rc = -EPERM;
break;
}
- if (data[0] == tp->phys[0]) {
- u16 value = data[2];
- switch (data[1]) {
+ if (data->phy_id == phy) {
+ u16 value = data->val_in;
+ switch (data->reg_num) {
case 0:
/* Check for autonegotiation on or reset. */
tp->medialock = (value & 0x9000) ? 0 : 1;
if (tp->medialock)
tp->full_duplex = (value & 0x0100) ? 1 : 0;
break;
- case 4: tp->advertising = value; break;
+ case 4: /* tp->advertising = value; */ break;
}
}
- mdio_write(dev, data[0], data[1] & 0x1f, data[2]);
- break;
+ mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
+ break;
default:
rc = -EOPNOTSUPP;
{
struct rtl8139_private *tp = dev->priv;
void *ioaddr = tp->mmio_addr;
+ unsigned long flags;
DPRINTK ("ENTER\n");
if (netif_running(dev)) {
- spin_lock_irq(&tp->lock);
+ spin_lock_irqsave (&tp->lock, flags);
tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
RTL_W32 (RxMissed, 0);
- spin_unlock_irq(&tp->lock);
+ spin_unlock_irqrestore (&tp->lock, flags);
}
DPRINTK ("EXIT\n");
return &tp->stats;
}
-
-
/* Set or clear the multicast filter for this adaptor.
This routine is not state sensitive and need not be SMP locked. */
ethernet_polynomial : 0);
}
- DPRINTK ("EXIT\n");
+ DPRINTK ("EXIT, returning %u\n", crc);
return crc;
}
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
struct dev_mc_list *mclist;
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next)
- set_bit (ether_crc (ETH_ALEN, mclist->dmi_addr) >> 26,
- mc_filter);
+ i++, mclist = mclist->next) {
+ int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
+ rx_mode |= AcceptMulticast;
+ }
}
spin_lock_irqsave (&tp->lock, flags);
/* We can safely update without stopping the chip. */
- tmp = rtl8139_rx_config | rx_mode |
- (RTL_R32 (RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
- RTL_W32_F (RxConfig, tmp);
+ tmp = rtl8139_rx_config | rx_mode;
+ if (tp->rx_config != tmp) {
+ RTL_W32 (RxConfig, tmp);
+ tp->rx_config = tmp;
+ }
RTL_W32_F (MAR0 + 0, mc_filter[0]);
RTL_W32_F (MAR0 + 4, mc_filter[1]);
{
struct pci_dev *pcidev;
struct pci_device_id *pdid;
+ struct rtl8139_private *tp;
+ int i, initialized;
if (rtl8139_initialized) return -ENODEV;
for (pdid=&rtl8139_pci_tbl[0]; pdid->vendor != 0; pdid++) {
- for (pcidev = pci_find_device(pdid->vendor, pdid->device, NULL); pcidev != NULL; pcidev = pci_find_device(pdid->vendor, pdid->device, pcidev)) {
- if (rtl8139_init_one(pcidev, pdid) != 0) {
- printk(KERN_ERR "Error initializing PCI device %08lX.\n", (unsigned long) pcidev);
+ for (pcidev = rtl8139_pci_find_device(pdid->vendor, pdid->device, NULL); pcidev != NULL; pcidev = rtl8139_pci_find_device(pdid->vendor, pdid->device, pcidev)) {
+ initialized = 0;
+ for (i=0; i<rtl8139_device_count; i++) {
+ tp = (struct rtl8139_private *) rtl8139_device_tab[i]->priv;
+ if (tp->pci_dev == pcidev)
+ initialized = 1;
}
+ if (!initialized)
+ if (rtl8139_init_one(pcidev, pdid) != 0)
+ printk(KERN_ERR "Error initializing PCI device %08lX.\n", (unsigned long) pcidev);
}
}
rtl8139_initialized++;
#ifdef MODULE
-MODULE_AUTHOR (RTL8139_AUTHOR);
-MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
-MODULE_PARM (multicast_filter_limit, "i");
-MODULE_PARM (max_interrupt_work, "i");
-MODULE_PARM (media, "1-" __MODULE_STRING(MAX_UNITS) "i");
-MODULE_PARM (full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
-
int init_module(void)
{
return rtl8139_init();
tristate 'RealTek 8129/8139 (not 8019/8029!) support' CONFIG_RTL8139
fi
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
- tristate 'Alternative RealTek 8139 driver (8139too) support' CONFIG_RTL8139TOO
+ tristate 'Alternative RealTek 8129/8139 driver (8139too) support' CONFIG_RTL8139TOO
if [ "$CONFIG_RTL8139TOO" != "n" ]; then
bool ' Use PIO instead of MMIO' CONFIG_8139TOO_PIO
bool ' Support for automatic channel equalization' CONFIG_8139TOO_TUNE_TWISTER
+ bool ' Support for 8129' CONFIG_8139TOO_8129
fi
fi
bool 'Other ISA cards' CONFIG_NET_ISA
bool 'EISA, VLB, PCI and on board controllers' CONFIG_NET_EISA
if [ "$CONFIG_NET_EISA" = "y" ]; then
tristate 'AMD PCnet32 (VLB and PCI) support' CONFIG_PCNET32
+ tristate 'Adaptec Starfire/DuraLAN support' CONFIG_ADAPTEC_STARFIRE
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
- tristate 'Adaptec Starfire support (EXPERIMENTAL)' CONFIG_ADAPTEC_STARFIRE
tristate 'Ansel Communications EISA 3200 support (EXPERIMENTAL)' CONFIG_AC3200
fi
tristate 'Apricot Xen-II on board Ethernet' CONFIG_APRICOT
*/
static int bpq_check_devices(struct device *dev)
{
- struct bpqdev *bpq, *bpq_prev;
+ struct bpqdev *bpq, *bpq_prev, *bpq_next;
int result = 0;
unsigned long flags;
bpq_prev = NULL;
- for (bpq = bpq_devices; bpq != NULL; bpq = bpq->next) {
+ for (bpq = bpq_devices; bpq != NULL; bpq = bpq_next) {
+ bpq_next = bpq->next;
if (!dev_get(bpq->ethname)) {
if (bpq_prev)
bpq_prev->next = bpq->next;
unregister_netdevice(&bpq->axdev);
kfree(bpq);
}
-
- bpq_prev = bpq;
+ else
+ bpq_prev = bpq;
}
restore_flags(flags);
static unsigned char *SCC_DriverName = "scc";
-static struct irqflags { unsigned char used : 1; } Ivec[16];
+static struct irqflags { unsigned char used : 1; } Ivec[NR_IRQS];
static struct scc_channel SCC_Info[2 * SCC_MAXCHIPS]; /* information per channel */
printk(KERN_INFO "Init Z8530 driver: %u channels, IRQ", Nchips*2);
flag=" ";
- for (k = 0; k < 16; k++)
+ for (k = 0; k < NR_IRQS; k++)
if (Ivec[k].used)
{
printk("%s%d", flag, k);
if (hwcfg.irq == 2) hwcfg.irq = 9;
+ if (hwcfg.irq <0 || hwcfg.irq > NR_IRQS)
+ return -EINVAL;
+
if (!Ivec[hwcfg.irq].used && hwcfg.irq)
{
if (request_irq(hwcfg.irq, scc_isr, SA_INTERRUPT, "AX.25 SCC", NULL))
}
}
- for (k=0; k < 16 ; k++)
+ for (k=0; k < NR_IRQS ; k++)
if (Ivec[k].used) free_irq(k, NULL);
restore_flags(flags);
/* MCE and interface config reg */
write_codec(dev, 0x49, fdx ? 0x8 : 0xc);
outb(0xb, WSS_CODEC_IA(dev->base_addr)); /* leave MCE */
- if (SCSTATE->crystal && !fullcalib)
+ if (SCSTATE->crystal && !fullcalib) {
+ restore_flags(flags);
return 0;
+ }
/*
* wait for ACI start
*/
*/
static int lapbeth_check_devices(struct device *dev)
{
- struct lapbethdev *lapbeth, *lapbeth_prev;
+ struct lapbethdev *lapbeth, *lapbeth_prev, *lapbeth_next;
int result = 0;
unsigned long flags;
lapbeth_prev = NULL;
- for (lapbeth = lapbeth_devices; lapbeth != NULL; lapbeth = lapbeth->next) {
+ for (lapbeth = lapbeth_devices; lapbeth != NULL; lapbeth = lapbeth_next) {
+ lapbeth_next = lapbeth->next;
if (!dev_get(lapbeth->ethname)) {
if (lapbeth_prev)
lapbeth_prev->next = lapbeth->next;
unregister_netdev(&lapbeth->axdev);
kfree(lapbeth);
}
-
- lapbeth_prev = lapbeth;
+ else
+ lapbeth_prev = lapbeth;
}
restore_flags(flags);
break;
}
- LMC_COPY_FROM_USER(data, xc.data, xc.len);
+ if(copy_from_user(data, xc.data, xc.len))
+ {
+ kfree(data);
+ ret = -ENOMEM;
+ break;
+ }
printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
--- /dev/null
+#ifndef __STARFIRE_KCOMP22_H
+#define __STARFIRE_KCOMP22_H
+
+#include <linux/kcomp.h>
+#include <asm/io.h>
+
+/* MII constants */
+#define MII_BMCR 0x00 /* Basic mode control register */
+#define MII_BMSR 0x01 /* Basic mode status register */
+#define MII_ADVERTISE 0x04 /* Advertisement control reg */
+#define MII_LPA 0x05 /* Link partner ability reg */
+
+#define BMCR_FULLDPLX 0x0100 /* Full duplex */
+#define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */
+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
+#define BMCR_SPEED100 0x2000 /* Select 100Mbps */
+#define BMCR_RESET 0x8000 /* Reset the DP83840 */
+
+#define BMSR_LSTATUS 0x0004 /* Link status */
+
+#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
+#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
+#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
+#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
+#define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */
+
+#define LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
+#define LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
+#define LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
+#define LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
+
+/* MII ioctls */
+#define SIOCGMIIPHY 0x8947 /* Get address of MII PHY in use. */
+#define SIOCGMIIREG 0x8948 /* Read MII PHY register. */
+#define SIOCSMIIREG 0x8949 /* Write MII PHY register. */
+
+/* This structure is used in all SIOCxMIIxxx ioctl calls */
+struct mii_ioctl_data {
+ u16 phy_id;
+ u16 reg_num;
+ u16 val_in;
+ u16 val_out;
+};
+
+/* ethtool stuff */
+#define SIOCETHTOOL 0x8946 /* Ethtool interface */
+
+/* This should work for both 32 and 64 bit userland. */
+struct ethtool_cmd {
+ u32 cmd;
+ u32 supported; /* Features this interface supports */
+ u32 advertising; /* Features this interface advertises */
+ u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */
+ u8 duplex; /* Duplex, half or full */
+ u8 port; /* Which connector port */
+ u8 phy_address;
+ u8 transceiver; /* Which tranceiver to use */
+ u8 autoneg; /* Enable or disable autonegotiation */
+ u32 maxtxpkt; /* Tx pkts before generating tx int */
+ u32 maxrxpkt; /* Rx pkts before generating rx int */
+ u32 reserved[4];
+};
+
+/* these strings are set to whatever the driver author decides... */
+struct ethtool_drvinfo {
+ u32 cmd;
+ char driver[32]; /* driver short name, "tulip", "eepro100" */
+ char version[32]; /* driver version string */
+ char fw_version[32]; /* firmware version string, if applicable */
+ char bus_info[32]; /* Bus info for this interface. For PCI
+ * devices, use pci_dev->slot_name. */
+ char reserved1[32];
+ char reserved2[32];
+};
+
+/* CMDs currently supported */
+#define ETHTOOL_GSET 0x00000001 /* Get settings. */
+#define ETHTOOL_SSET 0x00000002 /* Set settings, privileged. */
+#define ETHTOOL_GDRVINFO 0x00000003 /* Get driver info. */
+
+/* Indicates what features are supported by the interface. */
+#define SUPPORTED_10baseT_Half (1 << 0)
+#define SUPPORTED_10baseT_Full (1 << 1)
+#define SUPPORTED_100baseT_Half (1 << 2)
+#define SUPPORTED_100baseT_Full (1 << 3)
+#define SUPPORTED_1000baseT_Half (1 << 4)
+#define SUPPORTED_1000baseT_Full (1 << 5)
+#define SUPPORTED_Autoneg (1 << 6)
+#define SUPPORTED_TP (1 << 7)
+#define SUPPORTED_AUI (1 << 8)
+#define SUPPORTED_MII (1 << 9)
+#define SUPPORTED_FIBRE (1 << 10)
+
+/* Indicates what features are advertised by the interface. */
+#define ADVERTISED_10baseT_Half (1 << 0)
+#define ADVERTISED_10baseT_Full (1 << 1)
+#define ADVERTISED_100baseT_Half (1 << 2)
+#define ADVERTISED_100baseT_Full (1 << 3)
+#define ADVERTISED_1000baseT_Half (1 << 4)
+#define ADVERTISED_1000baseT_Full (1 << 5)
+#define ADVERTISED_Autoneg (1 << 6)
+#define ADVERTISED_TP (1 << 7)
+#define ADVERTISED_AUI (1 << 8)
+#define ADVERTISED_MII (1 << 9)
+#define ADVERTISED_FIBRE (1 << 10)
+
+/* The forced speed, 10Mb, 100Mb, gigabit. */
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+
+/* Duplex, half or full. */
+#define DUPLEX_HALF 0x00
+#define DUPLEX_FULL 0x01
+
+/* Which connector port. */
+#define PORT_TP 0x00
+#define PORT_AUI 0x01
+#define PORT_MII 0x02
+#define PORT_FIBRE 0x03
+#define PORT_BNC 0x04
+
+/* Which tranceiver to use. */
+#define XCVR_INTERNAL 0x00
+#define XCVR_EXTERNAL 0x01
+
+/* Enable or disable autonegotiation. If this is set to enable,
+ * the forced link modes above are completely ignored.
+ */
+#define AUTONEG_DISABLE 0x00
+#define AUTONEG_ENABLE 0x01
+
+
+static LIST_HEAD(pci_drivers);
+
+struct pci_driver_mapping {
+ struct pci_dev *dev;
+ struct pci_driver *drv;
+ void *driver_data;
+};
+
+struct pci_device_id {
+ unsigned int vendor, device;
+ unsigned int subvendor, subdevice;
+ unsigned int class, class_mask;
+ unsigned long driver_data;
+};
+
+struct pci_driver {
+ struct list_head node;
+ struct pci_dev *dev;
+ char *name;
+ const struct pci_device_id *id_table; /* NULL if wants all devices */
+ int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
+ void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
+ void (*suspend)(struct pci_dev *dev); /* Device suspended */
+ void (*resume)(struct pci_dev *dev); /* Device woken up */
+};
+
+#define PCI_MAX_MAPPINGS 16
+static struct pci_driver_mapping drvmap [PCI_MAX_MAPPINGS] = { { NULL, } , };
+
+#define __devinit __init
+#define __devinitdata __initdata
+#define __devexit
+#define MODULE_DEVICE_TABLE(foo,bar)
+#define SET_MODULE_OWNER(dev)
+#define COMPAT_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
+#define COMPAT_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
+#define PCI_ANY_ID (~0)
+#define IORESOURCE_MEM 2
+#define PCI_DMA_FROMDEVICE 0
+#define PCI_DMA_TODEVICE 0
+#define PCI_SLOT_NAME(pci_dev) ""
+
+#define pci_request_regions(pdev, name) 0
+#define pci_release_regions(pdev) do {} while(0)
+#define del_timer_sync(timer) del_timer(timer)
+#define alloc_etherdev(size) init_etherdev(NULL, size)
+#define register_netdev(dev) 0
+
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ void *virt_ptr;
+
+ virt_ptr = kmalloc(size, GFP_KERNEL);
+ *dma_handle = virt_to_bus(virt_ptr);
+ return virt_ptr;
+}
+#define pci_free_consistent(cookie, size, ptr, dma_ptr) kfree(ptr)
+#define pci_map_single(cookie, address, size, dir) virt_to_bus(address)
+#define pci_unmap_single(cookie, address, size, dir)
+#define pci_dma_sync_single(cookie, address, size, dir)
+#undef pci_resource_flags
+#define pci_resource_flags(dev, i) \
+ ((dev->base_address[i] & IORESOURCE_IO) ? IORESOURCE_IO : IORESOURCE_MEM)
+
+static void * pci_get_drvdata (struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < PCI_MAX_MAPPINGS; i++)
+ if (drvmap[i].dev == dev)
+ return drvmap[i].driver_data;
+
+ return NULL;
+}
+
+static void pci_set_drvdata (struct pci_dev *dev, void *driver_data)
+{
+ int i;
+
+ for (i = 0; i < PCI_MAX_MAPPINGS; i++)
+ if (drvmap[i].dev == dev) {
+ drvmap[i].driver_data = driver_data;
+ return;
+ }
+}
+
+static const struct pci_device_id * __init
+pci_compat_match_device(const struct pci_device_id *ids, struct pci_dev *dev)
+{
+ u16 subsystem_vendor, subsystem_device;
+
+ pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vendor);
+ pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &subsystem_device);
+
+ while (ids->vendor || ids->subvendor || ids->class_mask) {
+ if ((ids->vendor == PCI_ANY_ID || ids->vendor == dev->vendor) &&
+ (ids->device == PCI_ANY_ID || ids->device == dev->device) &&
+ (ids->subvendor == PCI_ANY_ID || ids->subvendor == subsystem_vendor) &&
+ (ids->subdevice == PCI_ANY_ID || ids->subdevice == subsystem_device) &&
+ !((ids->class ^ dev->class) & ids->class_mask))
+ return ids;
+ ids++;
+ }
+ return NULL;
+}
+
+static int __init
+pci_announce_device(struct pci_driver *drv, struct pci_dev *dev)
+{
+ const struct pci_device_id *id;
+ int found, i;
+
+ if (drv->id_table) {
+ id = pci_compat_match_device(drv->id_table, dev);
+ if (!id)
+ return 0;
+ } else
+ id = NULL;
+
+ found = 0;
+ for (i = 0; i < PCI_MAX_MAPPINGS; i++)
+ if (!drvmap[i].dev) {
+ drvmap[i].dev = dev;
+ drvmap[i].drv = drv;
+ found = 1;
+ break;
+ }
+
+ if (!found)
+ return 0;
+
+ if (drv->probe(dev, id) >= 0)
+ return 1;
+
+ /* clean up */
+ drvmap[i].dev = NULL;
+ return 0;
+}
+
+static int __init
+pci_register_driver(struct pci_driver *drv)
+{
+ struct pci_dev *dev;
+ int count = 0, found, i;
+ list_add_tail(&drv->node, &pci_drivers);
+ for (dev = pci_devices; dev; dev = dev->next) {
+ found = 0;
+ for (i = 0; i < PCI_MAX_MAPPINGS && !found; i++)
+ if (drvmap[i].dev == dev)
+ found = 1;
+ if (!found)
+ count += pci_announce_device(drv, dev);
+ }
+ return count;
+}
+
+static void
+pci_unregister_driver(struct pci_driver *drv)
+{
+ struct pci_dev *dev;
+ int i, found;
+ list_del(&drv->node);
+ for (dev = pci_devices; dev; dev = dev->next) {
+ found = 0;
+ for (i = 0; i < PCI_MAX_MAPPINGS; i++)
+ if (drvmap[i].dev == dev) {
+ found = 1;
+ break;
+ }
+ if (found) {
+ if (drv->remove)
+ drv->remove(dev);
+ drvmap[i].dev = NULL;
+ }
+ }
+}
+
+static inline int pci_module_init(struct pci_driver *drv)
+{
+ if (pci_register_driver(drv))
+ return 0;
+ return -ENODEV;
+}
+
+static struct pci_driver starfire_driver;
+
+int __init starfire_probe(struct net_device *dev)
+{
+ static int __initdata probed = 0;
+
+ if (probed)
+ return -ENODEV;
+ probed++;
+
+ return pci_module_init(&starfire_driver);
+}
+
+#define init_tx_timer(dev, func, timeout)
+#define kick_tx_timer(dev, func, timeout) \
+ if (netif_queue_stopped(dev)) { \
+ /* If this happens network layer tells us we're broken. */ \
+ if (jiffies - dev->trans_start > timeout) \
+ func(dev); \
+ }
+
+#define netif_start_if(dev) dev->start = 1
+#define netif_stop_if(dev) dev->start = 0
+
+#endif /* __STARFIRE_KCOMP22_H */
/*
Written 1998-2000 by Donald Becker.
+ Current maintainer is Ion Badulescu <ionut@cs.columbia.edu>. Please
+ send all bug reports to me, and not to Donald Becker, as this code
+ has been modified quite a bit from Donald's original version.
+
This software may be used and distributed according to the terms of
the GNU General Public License (GPL), incorporated herein by reference.
Drivers based on or derived from this code fall under the GPL and must
- Quell bogus error messages, inform about the Tx threshold
- Removed #ifdef CONFIG_PCI, this driver is PCI only
+ LK1.2.9 (Ion Badulescu)
+ - Merged Jeff Garzik's changes from 2.4.4-pre5
+ - Added 2.2.x compatibility stuff required by the above changes
+
+ LK1.2.9a (Ion Badulescu)
+ - More updates from Jeff Garzik
+
+ LK1.3.0 (Ion Badulescu)
+ - Merged zerocopy support
+
+ LK1.3.1 (Ion Badulescu)
+ - Added ethtool support
+ - Added GPIO (media change) interrupt support
+
+ LK1.3.2 (Ion Badulescu)
+ - Fixed 2.2.x compatibility issues introduced in 1.3.1
+ - Fixed ethtool ioctl returning uninitialized memory
+
+ LK1.3.3 (Ion Badulescu)
+ - Initialize the TxMode register properly
+ - Don't dereference dev->priv after freeing it
+
TODO:
- implement tx_timeout() properly
- - support ethtool
*/
-/* These identify the driver base version and may not be removed. */
-static const char version1[] =
-"starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n";
-static const char version2[] =
-" Updates and info at http://www.scyld.com/network/starfire.html\n";
-
-static const char version3[] =
-" (unofficial 2.2.x kernel port, version 1.2.8, March 7, 2001)\n";
-
-/* The user-configurable values.
- These may be modified when a driver module is loaded.*/
+#define DRV_NAME "starfire"
+#define DRV_VERSION "1.03+LK1.3.3"
+#define DRV_RELDATE "July 05, 2001"
/*
* Adaptec's license for their Novell drivers (which is where I got the
- * firmware files) does not allow to redistribute them. Thus, we can't
- * include them with this driver.
+ * firmware files) does not allow one to redistribute them. Thus, we can't
+ * include the firmware with this driver.
*
- * However, an end-user is allowed to download and use them, after
- * converting them to C header files using starfire_firmware.pl.
- * Once that's done, the #undef must be changed into a #define
+ * However, an end-user is allowed to download and use it, after
+ * converting it to C header files using starfire_firmware.pl.
+ * Once that's done, the #undef below must be changed into a #define
* for this driver to really use the firmware. Note that Rx/Tx
* hardware TCP checksumming is not possible without the firmware.
*
* of length 1. If and when this is fixed, the #define below can be removed.
*/
#define HAS_BROKEN_FIRMWARE
+/*
+ * Define this if using the driver with the zero-copy patch
+ */
+#if defined(HAS_FIRMWARE) && defined(MAX_SKB_FRAGS)
+#define ZEROCOPY
+#endif
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
/* Used for tuning interrupt latency vs. overhead. */
-static int interrupt_mitigation = 0x0;
+static int interrupt_mitigation;
static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
static int max_interrupt_work = 20;
-static int mtu = 0;
+static int mtu;
/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
The Starfire has a 512 element hash table based on the Ethernet CRC. */
-static int multicast_filter_limit = 32;
+static int multicast_filter_limit = 512;
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
/*
* packets as the starfire doesn't allow for misaligned DMAs ;-(
* 23/10/2000 - Jes
*
- * Neither does the Alpha. -Ion
+ * The Alpha and the Sparc don't allow unaligned loads, either. -Ion
*/
-#if defined(__ia64__) || defined(__alpha__)
+#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
static int rx_copybreak = PKT_BUF_SZ;
#else
-static int rx_copybreak = 0;
+static int rx_copybreak /* = 0 */;
#endif
/* Used to pass the media type, etc.
The media type is usually passed in 'options[]'.
*/
#define MAX_UNITS 8 /* More are supported, limit only on options */
-static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
-static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int options[MAX_UNITS] = {0, };
+static int full_duplex[MAX_UNITS] = {0, };
/* Operational parameters that are set at compile time. */
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
-#define TX_TIMEOUT (2*HZ)
-
+#define TX_TIMEOUT (2 * HZ)
+
+#ifdef ZEROCOPY
+#if MAX_SKB_FRAGS <= 6
+#define MAX_STARFIRE_FRAGS 6
+#else /* MAX_STARFIRE_FRAGS > 6 */
+#warning This driver will not work with more than 6 skb fragments.
+#warning Turning off zerocopy support.
+#undef ZEROCOPY
+#endif /* MAX_STARFIRE_FRAGS > 6 */
+#endif /* ZEROCOPY */
+
+#ifdef ZEROCOPY
+#define skb_first_frag_len(skb) skb_headlen(skb)
+#else /* not ZEROCOPY */
#define skb_first_frag_len(skb) (skb->len)
-
-#if !defined(__OPTIMIZE__)
-#warning You must compile this file with the correct options!
-#warning See the last lines of the source file.
-#error You must compile this driver with "-O".
-#endif
+#endif /* not ZEROCOPY */
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/bitops.h>
-#include <asm/io.h>
+#include <asm/uaccess.h>
#ifdef HAS_FIRMWARE
#include "starfire_firmware.h"
#endif /* HAS_FIRMWARE */
+/* 2.2.x compatibility code */
+#if LINUX_VERSION_CODE < 0x20300
+
+#include "starfire-kcomp22.h"
+
+#else /* LINUX_VERSION_CODE > 0x20300 */
+
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+
+#define COMPAT_MOD_INC_USE_COUNT
+#define COMPAT_MOD_DEC_USE_COUNT
+
+#define init_tx_timer(dev, func, timeout) \
+ dev->tx_timeout = func; \
+ dev->watchdog_timeo = timeout;
+#define kick_tx_timer(dev, func, timeout)
+
+#define netif_start_if(dev)
+#define netif_stop_if(dev)
+
+#define PCI_SLOT_NAME(pci_dev) (pci_dev)->slot_name
+
+#endif /* LINUX_VERSION_CODE > 0x20300 */
+/* end of compatibility code */
+
+
+/* These identify the driver base version and may not be removed. */
+static char version[] __devinitdata =
+KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
+KERN_INFO " Updates and info at http://www.scyld.com/network/starfire.html\n"
+KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
+
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(interrupt_mitigation, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM_DESC(max_interrupt_work, "Starfire maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "Starfire MTU (all boards)");
+MODULE_PARM_DESC(debug, "Starfire debug level (0-6)");
+MODULE_PARM_DESC(rx_copybreak, "Starfire copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(options, "Starfire: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "Starfire full duplex setting(s) (1)");
/*
Theory of Operation
each structure. There are far too many to document here.
For transmit this driver uses type 0/1 transmit descriptors (depending
-on the presence of the zerocopy patches), and relies on automatic
+on the presence of the zerocopy infrastructure), and relies on automatic
minimum-length padding. It does not use the completion queue
consumer index, but instead checks for non-zero status entries.
phase of receive.
A notable aspect of operation is that unaligned buffers are not permitted by
-the Starfire hardware. The IP header at offset 14 in an ethernet frame thus
+the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
isn't longword aligned, which may cause problems on some machine
e.g. Alphas and IA64. For these architectures, the driver is forced to copy
the frame into a new skbuff unconditionally. Copied frames are put into the
\f
-/* 2.2.x compatibility code */
-#if LINUX_VERSION_CODE < 0x20300
-#include <linux/kcomp.h>
-
-static LIST_HEAD(pci_drivers);
-
-struct pci_driver_mapping {
- struct pci_dev *dev;
- struct pci_driver *drv;
- void *driver_data;
-};
-
-struct pci_device_id {
- unsigned int vendor, device;
- unsigned int subvendor, subdevice;
- unsigned int class, class_mask;
- unsigned long driver_data;
-};
-
-struct pci_driver {
- struct list_head node;
- struct pci_dev *dev;
- char *name;
- const struct pci_device_id *id_table; /* NULL if wants all devices */
- int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
- void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
- void (*suspend)(struct pci_dev *dev); /* Device suspended */
- void (*resume)(struct pci_dev *dev); /* Device woken up */
-};
-
-#define PCI_MAX_MAPPINGS 16
-static struct pci_driver_mapping drvmap [PCI_MAX_MAPPINGS] = { { NULL, } , };
-
-#define __devinit __init
-#define __devinitdata __initdata
-#define __devexit
-#define MODULE_DEVICE_TABLE(foo,bar)
-#define SET_MODULE_OWNER(dev)
-#define COMPAT_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
-#define COMPAT_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
-#define PCI_ANY_ID (~0)
-#define IORESOURCE_MEM 2
-#define PCI_DMA_FROMDEVICE 0
-#define PCI_DMA_TODEVICE 0
-
-#define request_mem_region(addr, size, name) ((void *)1)
-#define release_mem_region(addr, size)
-#define del_timer_sync(timer) del_timer(timer)
-
-static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle)
-{
- void *virt_ptr;
-
- virt_ptr = kmalloc(size, GFP_KERNEL);
- *dma_handle = virt_to_bus(virt_ptr);
- return virt_ptr;
-}
-#define pci_free_consistent(cookie, size, ptr, dma_ptr) kfree(ptr)
-#define pci_map_single(cookie, address, size, dir) virt_to_bus(address)
-#define pci_unmap_single(cookie, address, size, dir)
-#define pci_dma_sync_single(cookie, address, size, dir)
-#undef pci_resource_flags
-#define pci_resource_flags(dev, i) \
- ((dev->base_address[i] & IORESOURCE_IO) ? IORESOURCE_IO : IORESOURCE_MEM)
-
-static void * pci_get_drvdata (struct pci_dev *dev)
-{
- int i;
-
- for (i = 0; i < PCI_MAX_MAPPINGS; i++)
- if (drvmap[i].dev == dev)
- return drvmap[i].driver_data;
-
- return NULL;
-}
-
-static void pci_set_drvdata (struct pci_dev *dev, void *driver_data)
-{
- int i;
-
- for (i = 0; i < PCI_MAX_MAPPINGS; i++)
- if (drvmap[i].dev == dev) {
- drvmap[i].driver_data = driver_data;
- return;
- }
-}
-
-static const struct pci_device_id * __init
-pci_compat_match_device(const struct pci_device_id *ids, struct pci_dev *dev)
-{
- u16 subsystem_vendor, subsystem_device;
-
- pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vendor);
- pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &subsystem_device);
-
- while (ids->vendor || ids->subvendor || ids->class_mask) {
- if ((ids->vendor == PCI_ANY_ID || ids->vendor == dev->vendor) &&
- (ids->device == PCI_ANY_ID || ids->device == dev->device) &&
- (ids->subvendor == PCI_ANY_ID || ids->subvendor == subsystem_vendor) &&
- (ids->subdevice == PCI_ANY_ID || ids->subdevice == subsystem_device) &&
- !((ids->class ^ dev->class) & ids->class_mask))
- return ids;
- ids++;
- }
- return NULL;
-}
-
-static int __init
-pci_announce_device(struct pci_driver *drv, struct pci_dev *dev)
-{
- const struct pci_device_id *id;
- int found, i;
-
- if (drv->id_table) {
- id = pci_compat_match_device(drv->id_table, dev);
- if (!id)
- return 0;
- } else
- id = NULL;
-
- found = 0;
- for (i = 0; i < PCI_MAX_MAPPINGS; i++)
- if (!drvmap[i].dev) {
- drvmap[i].dev = dev;
- drvmap[i].drv = drv;
- found = 1;
- break;
- }
-
- if (!found)
- return 0;
-
- if (drv->probe(dev, id) >= 0)
- return 1;
-
- /* clean up */
- drvmap[i].dev = NULL;
- return 0;
-}
-
-static int __init
-pci_register_driver(struct pci_driver *drv)
-{
- struct pci_dev *dev;
- int count = 0, found, i;
- list_add_tail(&drv->node, &pci_drivers);
- for (dev = pci_devices; dev; dev = dev->next) {
- found = 0;
- for (i = 0; i < PCI_MAX_MAPPINGS && !found; i++)
- if (drvmap[i].dev == dev)
- found = 1;
- if (!found)
- count += pci_announce_device(drv, dev);
- }
- return count;
-}
-
-static void
-pci_unregister_driver(struct pci_driver *drv)
-{
- struct pci_dev *dev;
- int i, found;
- list_del(&drv->node);
- for (dev = pci_devices; dev; dev = dev->next) {
- found = 0;
- for (i = 0; i < PCI_MAX_MAPPINGS; i++)
- if (drvmap[i].dev == dev) {
- found = 1;
- break;
- }
- if (found) {
- if (drv->remove)
- drv->remove(dev);
- drvmap[i].dev = NULL;
- }
- }
-}
-
-static inline int pci_module_init(struct pci_driver *drv)
-{
- if (pci_register_driver(drv))
- return 0;
- return -ENODEV;
-}
-
-static struct pci_driver starfire_driver;
-
-int __init starfire_probe(struct net_device *dev)
-{
- static int __initdata probed = 0;
-
- if (probed)
- return -ENODEV;
- probed++;
-
- return pci_module_init(&starfire_driver);
-}
-
-#define init_tx_timer(dev, func, timeout)
-#define kick_tx_timer(dev, func, timeout) \
- if (netif_queue_stopped(dev)) { \
- /* If this happens network layer tells us we're broken. */ \
- if (jiffies - dev->trans_start > timeout) \
- func(dev); \
- }
-
-#define netif_start_if(dev) dev->start = 1
-#define netif_stop_if(dev) dev->start = 0
-
-#else /* LINUX_VERSION_CODE > 0x20300 */
-
-#define COMPAT_MOD_INC_USE_COUNT
-#define COMPAT_MOD_DEC_USE_COUNT
-
-#define init_tx_timer(dev, func, timeout) \
- dev->tx_timeout = func; \
- dev->watchdog_timeo = timeout;
-#define kick_tx_timer(dev, func, timeout)
-
-#define netif_start_if(dev)
-#define netif_stop_if(dev)
-
-#endif /* LINUX_VERSION_CODE > 0x20300 */
-/* end of compatibility code */
-
-
enum chip_capability_flags {CanHaveMII=1, };
#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR0)
-#define MEM_ADDR_SZ 0x80000 /* And maps in 0.5MB(!). */
#if 0
#define ADDR_64BITS 1 /* This chip uses 64 bit addresses. */
/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
static struct chip_info {
const char *name;
- int io_size;
int drv_flags;
} netdrv_tbl[] __devinitdata = {
- { "Adaptec Starfire 6915", MEM_ADDR_SZ, CanHaveMII },
+ { "Adaptec Starfire 6915", CanHaveMII },
};
PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
MIICtrl=0x52000, StationAddr=0x50120, EEPROMCtrl=0x51000,
- TxDescCtrl=0x50090,
+ GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
TxRingHiAddr=0x5009C, /* 64 bit address extension. */
TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
- TxMode=0x55000, TxGfpMem=0x58000, RxGfpMem=0x5a000,
+ TxMode=0x55000, PerfFilterTable=0x56000, HashTable=0x56100,
+ TxGfpMem=0x58000, RxGfpMem=0x5a000,
};
/* Bits in the interrupt status/mask registers. */
RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
};
+#ifdef ZEROCOPY
+/* Type 0 Tx descriptor. */
+/* If more fragments are needed, don't forget to change the
+ descriptor spacing as well! */
+struct starfire_tx_desc {
+ u32 status;
+ u32 nbufs;
+ u32 first_addr;
+ u16 first_len;
+ u16 total_len;
+ struct {
+ u32 addr;
+ u32 len;
+ } frag[MAX_STARFIRE_FRAGS];
+};
+#else /* not ZEROCOPY */
/* Type 1 Tx descriptor. */
struct starfire_tx_desc {
u32 status; /* Upper bits are status, lower 16 length. */
u32 first_addr;
};
+#endif /* not ZEROCOPY */
enum tx_desc_bits {
TxDescID=0xB0000000,
TxCRCEn=0x01000000, TxDescIntr=0x08000000,
struct tx_ring_info {
struct sk_buff *skb;
dma_addr_t first_mapping;
+#ifdef ZEROCOPY
+ dma_addr_t frag_mapping[MAX_STARFIRE_FRAGS];
+#endif /* ZEROCOPY */
};
-#define MII_CNT 2
+#define PHY_CNT 2
struct netdev_private {
/* Descriptor rings first for alignment. */
struct starfire_rx_desc *rx_ring;
/* The addresses of rx/tx-in-place skbuffs. */
struct rx_ring_info rx_info[RX_RING_SIZE];
struct tx_ring_info tx_info[TX_RING_SIZE];
- /* Pointers to completion queues (full pages). I should cache line pad..*/
- u8 pad0[100];
+ /* Pointers to completion queues (full pages). */
struct rx_done_desc *rx_done_q;
dma_addr_t rx_done_q_dma;
unsigned int rx_done;
struct tx_done_report *tx_done_q;
- unsigned int tx_done;
dma_addr_t tx_done_q_dma;
+ unsigned int tx_done;
struct net_device_stats stats;
- struct timer_list timer; /* Media monitoring timer. */
struct pci_dev *pci_dev;
/* Frequently used values: keep some adjacent for cache effect. */
unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
unsigned int cur_tx, dirty_tx;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
- unsigned int tx_full:1; /* The Tx queue is full. */
- /* These values are keep track of the transceiver/media in use. */
- unsigned int full_duplex:1, /* Full-duplex operation requested. */
- medialock:1, /* Xcvr set to fixed speed/duplex. */
- rx_flowctrl:1,
- tx_flowctrl:1; /* Use 802.3x flow control. */
- unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int tx_full:1, /* The Tx queue is full. */
+ /* These values keep track of the transceiver/media in use. */
+ autoneg:1, /* Autonegotiation allowed. */
+ full_duplex:1, /* Full-duplex operation. */
+ speed100:1; /* Set if speed == 100MBit. */
+ unsigned int intr_mitigation;
u32 tx_mode;
u8 tx_threshold;
/* MII transceiver section. */
- int mii_cnt; /* MII device addresses. */
u16 advertising; /* NWay media advertisement */
- unsigned char phys[MII_CNT]; /* MII device addresses. */
+ int phy_cnt; /* MII device addresses. */
+ unsigned char phys[PHY_CNT]; /* MII device addresses. */
};
+
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int netdev_open(struct net_device *dev);
-static void check_duplex(struct net_device *dev, int startup);
-static void netdev_timer(unsigned long data);
+static void check_duplex(struct net_device *dev);
static void tx_timeout(struct net_device *dev);
static void init_ring(struct net_device *dev);
static int start_tx(struct sk_buff *skb, struct net_device *dev);
static void netdev_error(struct net_device *dev, int intr_status);
static void set_rx_mode(struct net_device *dev);
static struct net_device_stats *get_stats(struct net_device *dev);
-static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int netdev_close(struct net_device *dev);
+static void netdev_media_change(struct net_device *dev);
+
-\f
static int __devinit starfire_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
int i, irq, option, chip_idx = ent->driver_data;
struct net_device *dev;
static int card_idx = -1;
- static int printed_version = 0;
long ioaddr;
int drv_flags, io_size;
int boguscnt;
+ u8 cache;
- card_idx++;
- option = card_idx < MAX_UNITS ? options[card_idx] : 0;
-
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
if (!printed_version++)
- printk(KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
- version1, version2, version3);
+ printk(version);
+#endif
+
+ card_idx++;
if (pci_enable_device (pdev))
return -EIO;
- ioaddr = pci_resource_start (pdev, 0);
- io_size = pci_resource_len (pdev, 0);
- if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_MEM) == 0)) {
- printk (KERN_ERR "starfire %d: no PCI MEM resources, aborting\n", card_idx);
+ ioaddr = pci_resource_start(pdev, 0);
+ io_size = pci_resource_len(pdev, 0);
+ if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
+ printk (KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
return -ENODEV;
}
- dev = init_etherdev(NULL, sizeof(*np));
+ dev = alloc_etherdev(sizeof(*np));
if (!dev) {
- printk (KERN_ERR "starfire %d: cannot alloc etherdev, aborting\n", card_idx);
+ printk (KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
return -ENOMEM;
}
SET_MODULE_OWNER(dev);
irq = pdev->irq;
- if (request_mem_region (ioaddr, io_size, dev->name) == NULL) {
- printk (KERN_ERR "starfire %d: resource 0x%x @ 0x%lx busy, aborting\n",
- card_idx, io_size, ioaddr);
+ if (pci_request_regions (pdev, dev->name)) {
+ printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
goto err_out_free_netdev;
}
ioaddr = (long) ioremap (ioaddr, io_size);
if (!ioaddr) {
- printk (KERN_ERR "starfire %d: cannot remap 0x%x @ 0x%lx, aborting\n",
+ printk (KERN_ERR DRV_NAME " %d: cannot remap 0x%x @ 0x%lx, aborting\n",
card_idx, io_size, ioaddr);
goto err_out_free_res;
}
pci_set_master (pdev);
- printk(KERN_INFO "%s: %s at 0x%lx, ",
- dev->name, netdrv_tbl[chip_idx].name, ioaddr);
+ /* set PCI cache size */
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
+ if ((cache << 2) != SMP_CACHE_BYTES) {
+ printk(KERN_INFO " PCI cache line size set incorrectly "
+ "(%i bytes) by BIOS/FW, correcting to %i\n",
+ (cache << 2), SMP_CACHE_BYTES);
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ SMP_CACHE_BYTES >> 2);
+ }
+
+#ifdef ZEROCOPY
+ /* Starfire can do SG and TCP/UDP checksumming */
+ dev->features |= NETIF_F_SG;
+#ifdef HAS_FIRMWARE
+ dev->features |= NETIF_F_IP_CSUM;
+#endif /* HAS_FIRMWARE */
+#endif /* ZEROCOPY */
/* Serial EEPROM reads are hidden by the hardware. */
for (i = 0; i < 6; i++)
dev->dev_addr[i] = readb(ioaddr + EEPROMCtrl + 20-i);
- for (i = 0; i < 5; i++)
- printk("%2.2x:", dev->dev_addr[i]);
- printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
#if ! defined(final_version) /* Dump the EEPROM contents during development. */
if (debug > 4)
np->pci_dev = pdev;
drv_flags = netdrv_tbl[chip_idx].drv_flags;
+ option = card_idx < MAX_UNITS ? options[card_idx] : 0;
if (dev->mem_start)
option = dev->mem_start;
/* The lower four bits are the media type. */
- if (option > 0) {
- if (option & 0x200)
- np->full_duplex = 1;
- np->default_port = option & 15;
- if (np->default_port)
- np->medialock = 1;
- }
+ if (option & 0x200)
+ np->full_duplex = 1;
+
if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
np->full_duplex = 1;
if (np->full_duplex)
- np->medialock = 1;
+ np->autoneg = 0;
+ else
+ np->autoneg = 1;
+ np->speed100 = 1;
/* The chip-specific entries in the device structure. */
dev->open = &netdev_open;
dev->stop = &netdev_close;
dev->get_stats = &get_stats;
dev->set_multicast_list = &set_rx_mode;
- dev->do_ioctl = &mii_ioctl;
+ dev->do_ioctl = &netdev_ioctl;
if (mtu)
dev->mtu = mtu;
+ i = register_netdev(dev);
+ if (i)
+ goto err_out_cleardev;
+
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, netdrv_tbl[chip_idx].name, ioaddr);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
if (drv_flags & CanHaveMII) {
int phy, phy_idx = 0;
int mii_status;
- for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
- mdio_write(dev, phy, 0, 0x8000);
+ for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
+ mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
udelay(500);
boguscnt = 1000;
while (--boguscnt > 0)
- if ((mdio_read(dev, phy, 0) & 0x8000) == 0)
+ if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
break;
if (boguscnt == 0) {
printk("%s: PHY reset never completed!\n", dev->name);
continue;
}
- mii_status = mdio_read(dev, phy, 1);
- if (mii_status != 0x0000) {
+ mii_status = mdio_read(dev, phy, MII_BMSR);
+ if (mii_status != 0) {
np->phys[phy_idx++] = phy;
- np->advertising = mdio_read(dev, phy, 4);
+ np->advertising = mdio_read(dev, phy, MII_ADVERTISE);
printk(KERN_INFO "%s: MII PHY found at address %d, status "
"0x%4.4x advertising %4.4x.\n",
dev->name, phy, mii_status, np->advertising);
break;
}
}
- np->mii_cnt = phy_idx;
+ np->phy_cnt = phy_idx;
}
return 0;
+err_out_cleardev:
+ pci_set_drvdata(pdev, NULL);
+ iounmap((void *)ioaddr);
err_out_free_res:
- release_mem_region (ioaddr, io_size);
+ pci_release_regions (pdev);
err_out_free_netdev:
- unregister_netdev (dev);
- kfree (dev);
+ unregister_netdev(dev);
+ kfree(dev);
return -ENODEV;
}
-\f
-/* Read the MII Management Data I/O (MDIO) interfaces. */
+/* Read the MII Management Data I/O (MDIO) interfaces. */
static int mdio_read(struct net_device *dev, int phy_id, int location)
{
long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
return result & 0xffff;
}
+
static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
{
long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
return;
}
-\f
+
static int netdev_open(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
RxDescSpace4,
ioaddr + RxDescQCtrl);
+#ifdef ZEROCOPY
+ /* Set Tx descriptor to type 0 and spacing to 64 bytes. */
+ writel((2 << TxHiPriFIFOThreshShift) |
+ (0 << TxPadLenShift) |
+ (4 << TxDMABurstSizeShift) |
+ TxDescSpace64 | TxDescType0,
+ ioaddr + TxDescCtrl);
+#else /* not ZEROCOPY */
/* Set Tx descriptor to type 1 and padding to 0 bytes. */
writel((2 << TxHiPriFIFOThreshShift) |
(0 << TxPadLenShift) |
(4 << TxDMABurstSizeShift) |
TxDescSpaceUnlim | TxDescType1,
ioaddr + TxDescCtrl);
+#endif /* not ZEROCOPY */
#if defined(ADDR_64BITS) && defined(__alpha__)
/* XXX We really need a 64-bit PCI dma interfaces too... -DaveM */
writeb(dev->dev_addr[i], ioaddr + StationAddr + 5-i);
for (i = 0; i < 16; i++) {
u16 *eaddrs = (u16 *)dev->dev_addr;
- long setup_frm = ioaddr + 0x56000 + i*16;
+ long setup_frm = ioaddr + PerfFilterTable + i * 16;
writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
/* Initialize other registers. */
/* Configure the PCI bus bursts and FIFO thresholds. */
- np->tx_mode = 0; /* Initialized when TxMode set. */
+ np->tx_mode = 0x0C04; /* modified when link is up. */
+ writel(0x8000 | np->tx_mode, ioaddr + TxMode);
+ writel(np->tx_mode, ioaddr + TxMode);
np->tx_threshold = 4;
writel(np->tx_threshold, ioaddr + TxThreshold);
- writel(interrupt_mitigation, ioaddr + IntrTimerCtrl);
- if (dev->if_port == 0)
- dev->if_port = np->default_port;
+ interrupt_mitigation &= 0x1f;
+ np->intr_mitigation = interrupt_mitigation;
+ writel(np->intr_mitigation, ioaddr + IntrTimerCtrl);
netif_start_if(dev);
netif_start_queue(dev);
printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
set_rx_mode(dev);
- np->advertising = mdio_read(dev, np->phys[0], 4);
- check_duplex(dev, 1);
+ np->advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
+ check_duplex(dev);
+
+ /* Enable GPIO interrupts on link change */
+ writel(0x0f00ff00, ioaddr + GPIOCtrl);
/* Set the interrupt mask and enable PCI interrupts. */
writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
printk(KERN_DEBUG "%s: Done netdev_open().\n",
dev->name);
- /* Set the timer to check for link beat. */
- init_timer(&np->timer);
- np->timer.expires = jiffies + 3*HZ;
- np->timer.data = (unsigned long)dev;
- np->timer.function = &netdev_timer; /* timer handler */
- add_timer(&np->timer);
-
return 0;
}
-static void check_duplex(struct net_device *dev, int startup)
+
+static void check_duplex(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
- int new_tx_mode ;
+ u16 reg0;
- new_tx_mode = 0x0C04 | (np->tx_flowctrl ? 0x0800:0)
- | (np->rx_flowctrl ? 0x0400:0);
- if (np->medialock) {
- if (np->full_duplex)
- new_tx_mode |= 2;
- } else {
- int mii_reg5 = mdio_read(dev, np->phys[0], 5);
- int negotiated = mii_reg5 & np->advertising;
- int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
- if (duplex)
- new_tx_mode |= 2;
- if (np->full_duplex != duplex) {
- np->full_duplex = duplex;
- if (debug > 1)
- printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
- " negotiated capability %4.4x.\n", dev->name,
- duplex ? "full" : "half", np->phys[0], negotiated);
- }
- }
- if (new_tx_mode != np->tx_mode) {
- np->tx_mode = new_tx_mode;
- writel(np->tx_mode | 0x8000, ioaddr + TxMode);
- writel(np->tx_mode, ioaddr + TxMode);
- }
-}
+ mdio_write(dev, np->phys[0], MII_ADVERTISE, np->advertising);
+ mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
+ udelay(500);
+ while (mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET);
-static void netdev_timer(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
- int next_tick = 60*HZ; /* Check before driver release. */
+ reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
- if (debug > 3) {
- printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
- dev->name, (int)readl(ioaddr + IntrStatus));
- }
- check_duplex(dev, 0);
-#if ! defined(final_version)
- /* This is often falsely triggered. */
- if (readl(ioaddr + IntrStatus) & 1) {
- int new_status = readl(ioaddr + IntrStatus);
- /* Bogus hardware IRQ: Fake an interrupt handler call. */
- if (new_status & 1) {
- printk(KERN_ERR "%s: Interrupt blocked, status %8.8x/%8.8x.\n",
- dev->name, new_status, (int)readl(ioaddr + IntrStatus));
- intr_handler(dev->irq, dev, 0);
- }
+ if (np->autoneg) {
+ reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
+ } else {
+ reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
+ if (np->speed100)
+ reg0 |= BMCR_SPEED100;
+ if (np->full_duplex)
+ reg0 |= BMCR_FULLDPLX;
+ printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
+ dev->name,
+ np->speed100 ? "100" : "10",
+ np->full_duplex ? "full" : "half");
}
-#endif
-
- np->timer.expires = jiffies + next_tick;
- add_timer(&np->timer);
+ mdio_write(dev, np->phys[0], MII_BMCR, reg0);
}
+
static void tx_timeout(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
#endif
/* Perhaps we should reinitialize the hardware here. */
- dev->if_port = 0;
/* Stop and restart the chip's Tx processes . */
/* Trigger an immediate transmit demand. */
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_info[i].skb = NULL;
np->tx_info[i].first_mapping = 0;
+#ifdef ZEROCOPY
+ {
+ int j;
+ for (j = 0; j < MAX_STARFIRE_FRAGS; j++)
+ np->tx_info[i].frag_mapping[j] = 0;
+ }
+#endif /* ZEROCOPY */
np->tx_ring[i].status = 0;
}
return;
}
+
static int start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct netdev_private *np = dev->priv;
unsigned int entry;
+#ifdef ZEROCOPY
+ int i;
+#endif
kick_tx_timer(dev, tx_timeout, TX_TIMEOUT);
/* Calculate the next Tx descriptor entry. */
entry = np->cur_tx % TX_RING_SIZE;
+#if defined(ZEROCOPY) && defined(HAS_FIRMWARE) && defined(HAS_BROKEN_FIRMWARE)
+ {
+ int has_bad_length = 0;
+
+ if (skb_first_frag_len(skb) == 1)
+ has_bad_length = 1;
+ else {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ if (skb_shinfo(skb)->frags[i].size == 1) {
+ has_bad_length = 1;
+ break;
+ }
+ }
+
+ if (has_bad_length)
+ skb_checksum_help(skb);
+ }
+#endif /* ZEROCOPY && HAS_FIRMWARE && HAS_BROKEN_FIRMWARE */
+
np->tx_info[entry].skb = skb;
np->tx_info[entry].first_mapping =
pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
np->tx_ring[entry].first_addr = cpu_to_le32(np->tx_info[entry].first_mapping);
+#ifdef ZEROCOPY
+ np->tx_ring[entry].first_len = cpu_to_le32(skb_first_frag_len(skb));
+ np->tx_ring[entry].total_len = cpu_to_le32(skb->len);
+ /* Add "| TxDescIntr" to generate Tx-done interrupts. */
+ np->tx_ring[entry].status = cpu_to_le32(TxDescID | TxCRCEn);
+ np->tx_ring[entry].nbufs = cpu_to_le32(skb_shinfo(skb)->nr_frags + 1);
+#else /* not ZEROCOPY */
/* Add "| TxDescIntr" to generate Tx-done interrupts. */
np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID | TxCRCEn | 1 << 16);
+#endif /* not ZEROCOPY */
if (entry >= TX_RING_SIZE-1) /* Wrap ring */
np->tx_ring[entry].status |= cpu_to_le32(TxRingWrap | TxDescIntr);
+#ifdef ZEROCOPY
+ if (skb->ip_summed == CHECKSUM_HW)
+ np->tx_ring[entry].status |= cpu_to_le32(TxCalTCP);
+#endif /* ZEROCOPY */
+
if (debug > 5) {
+#ifdef ZEROCOPY
+ printk(KERN_DEBUG "%s: Tx #%d slot %d status %8.8x nbufs %d len %4.4x/%4.4x.\n",
+ dev->name, np->cur_tx, entry,
+ le32_to_cpu(np->tx_ring[entry].status),
+ le32_to_cpu(np->tx_ring[entry].nbufs),
+ le32_to_cpu(np->tx_ring[entry].first_len),
+ le32_to_cpu(np->tx_ring[entry].total_len));
+#else /* not ZEROCOPY */
printk(KERN_DEBUG "%s: Tx #%d slot %d status %8.8x.\n",
dev->name, np->cur_tx, entry,
le32_to_cpu(np->tx_ring[entry].status));
+#endif /* not ZEROCOPY */
}
+#ifdef ZEROCOPY
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i];
+
+ /* we already have the proper value in entry */
+ np->tx_info[entry].frag_mapping[i] =
+ pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
+
+ np->tx_ring[entry].frag[i].addr = cpu_to_le32(np->tx_info[entry].frag_mapping[i]);
+ np->tx_ring[entry].frag[i].len = cpu_to_le32(this_frag->size);
+ if (debug > 5) {
+ printk(KERN_DEBUG "%s: Tx #%d frag %d len %4.4x.\n",
+ dev->name, np->cur_tx, i,
+ le32_to_cpu(np->tx_ring[entry].frag[i].len));
+ }
+ }
+#endif /* ZEROCOPY */
+
np->cur_tx++;
if (entry >= TX_RING_SIZE-1) /* Wrap ring */
return 0;
}
+
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
np->stats.tx_packets++;
} else if ((tx_status & 0xe0000000) == 0x80000000) {
struct sk_buff *skb;
+#ifdef ZEROCOPY
+ int i;
+#endif /* ZEROCOPY */
u16 entry = tx_status; /* Implicit truncate */
entry /= sizeof(struct starfire_tx_desc);
PCI_DMA_TODEVICE);
np->tx_info[entry].first_mapping = 0;
+#ifdef ZEROCOPY
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].frag_mapping[i],
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+ np->tx_info[entry].frag_mapping[i] = 0;
+ }
+#endif /* ZEROCOPY */
+
/* Scavenge the descriptor. */
dev_kfree_skb_irq(skb);
netif_wake_queue(dev);
}
+ /* Stats overflow */
+ if (intr_status & IntrStatsMax) {
+ get_stats(dev);
+ }
+
+ /* Media change interrupt. */
+ if (intr_status & IntrLinkChange)
+ netdev_media_change(dev);
+
/* Abnormal error summary/uncommon events handlers. */
if (intr_status & IntrAbnormalSummary)
netdev_error(dev, intr_status);
#endif
}
+
/* This routine is logically part of the interrupt handler, but separated
for clarity and better register allocation. */
static int netdev_rx(struct net_device *dev)
memcpy(skb_put(skb, pkt_len), np->rx_info[entry].skb->tail, pkt_len);
#endif
} else {
- char *temp;
-
pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb = np->rx_info[entry].skb;
- temp = skb_put(skb, pkt_len);
+ skb_put(skb, pkt_len);
np->rx_info[entry].skb = NULL;
np->rx_info[entry].mapping = 0;
}
writew(entry, dev->base_addr + RxDescQIdx);
}
- if (debug > 5
- || memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1))
- printk(KERN_DEBUG " exiting netdev_rx() status of %d was %8.8x %d.\n",
- np->rx_done, desc_status,
- memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1));
+ if (debug > 5)
+ printk(KERN_DEBUG " exiting netdev_rx() status of %d was %8.8x.\n",
+ np->rx_done, desc_status);
/* Restart Rx engine if stopped. */
return 0;
}
-static void netdev_error(struct net_device *dev, int intr_status)
+
+static void netdev_media_change(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 reg0, reg1, reg4, reg5;
+ u32 new_tx_mode;
+
+ /* reset status first */
+ mdio_read(dev, np->phys[0], MII_BMCR);
+ mdio_read(dev, np->phys[0], MII_BMSR);
+
+ reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
+ reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
+
+ if (reg1 & BMSR_LSTATUS) {
+ /* link is up */
+ if (reg0 & BMCR_ANENABLE) {
+ /* autonegotiation is enabled */
+ reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
+ reg5 = mdio_read(dev, np->phys[0], MII_LPA);
+ if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
+ np->speed100 = 1;
+ np->full_duplex = 1;
+ } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
+ np->speed100 = 1;
+ np->full_duplex = 0;
+ } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
+ np->speed100 = 0;
+ np->full_duplex = 1;
+ } else {
+ np->speed100 = 0;
+ np->full_duplex = 0;
+ }
+ } else {
+ /* autonegotiation is disabled */
+ if (reg0 & BMCR_SPEED100)
+ np->speed100 = 1;
+ else
+ np->speed100 = 0;
+ if (reg0 & BMCR_FULLDPLX)
+ np->full_duplex = 1;
+ else
+ np->full_duplex = 0;
+ }
+ printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
+ dev->name,
+ np->speed100 ? "100" : "10",
+ np->full_duplex ? "full" : "half");
- if (intr_status & IntrLinkChange) {
- printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
- " %4.4x, partner %4.4x.\n", dev->name,
- mdio_read(dev, np->phys[0], 4),
- mdio_read(dev, np->phys[0], 5));
- check_duplex(dev, 0);
- }
- if (intr_status & IntrStatsMax) {
- get_stats(dev);
+ new_tx_mode = np->tx_mode & ~0x2; /* duplex setting */
+ if (np->full_duplex)
+ new_tx_mode |= 2;
+ if (np->tx_mode != new_tx_mode) {
+ np->tx_mode = new_tx_mode;
+ writel(np->tx_mode | 0x8000, ioaddr + TxMode);
+ writel(np->tx_mode, ioaddr + TxMode);
+ }
+ } else {
+ printk(KERN_DEBUG "%s: Link is down\n", dev->name);
}
+}
+
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = dev->priv;
+
/* Came close to underrunning the Tx FIFO, increase threshold. */
if (intr_status & IntrTxDataLow) {
writel(++np->tx_threshold, dev->base_addr + TxThreshold);
np->stats.tx_fifo_errors++;
}
+
static struct net_device_stats *get_stats(struct net_device *dev)
{
long ioaddr = dev->base_addr;
return &np->stats;
}
+
/* The little-endian AUTODIN II ethernet CRC calculations.
A big-endian version is also available.
This is slow but compact code. Do not use this routine for bulk data,
return crc;
}
+
static void set_rx_mode(struct net_device *dev)
{
long ioaddr = dev->base_addr;
int i;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- /* Unconditionally log net taps. */
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptAll|AcceptMyPhys;
} else if ((dev->mc_count > multicast_filter_limit)
|| (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptMyPhys;
} else if (dev->mc_count <= 15) {
- /* Use the 16 element perfect filter. */
- long filter_addr = ioaddr + 0x56000 + 1*16;
+ /* Use the 16 element perfect filter, skip first entry. */
+ long filter_addr = ioaddr + PerfFilterTable + 1 * 16;
for (i = 1, mclist = dev->mc_list; mclist && i <= dev->mc_count;
i++, mclist = mclist->next) {
u16 *eaddrs = (u16 *)mclist->dmi_addr;
memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next) {
- set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23, mc_filter);
+ int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
+ __u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1];
+
+ *fptr |= cpu_to_le32(1 << (bit_nr & 31));
}
- /* Clear the perfect filter list. */
- filter_addr = ioaddr + 0x56000 + 1*16;
+ /* Clear the perfect filter list, skip first entry. */
+ filter_addr = ioaddr + PerfFilterTable + 1 * 16;
for (i = 1; i < 16; i++) {
writew(0xffff, filter_addr); filter_addr += 4;
writew(0xffff, filter_addr); filter_addr += 4;
writew(0xffff, filter_addr); filter_addr += 8;
}
- for (filter_addr=ioaddr + 0x56100, i=0; i < 32; filter_addr+= 16, i++)
+ for (filter_addr = ioaddr + HashTable, i=0; i < 32; filter_addr+= 16, i++)
writew(mc_filter[i], filter_addr);
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
}
writel(rx_mode, ioaddr + RxFilterMode);
}
-static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+
+static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
{
+ struct ethtool_cmd ecmd;
struct netdev_private *np = dev->priv;
- u16 *data = (u16 *)&rq->ifr_data;
+
+ if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
+ return -EFAULT;
+
+ switch (ecmd.cmd) {
+ case ETHTOOL_GSET:
+ ecmd.supported =
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_MII;
+
+ ecmd.advertising = ADVERTISED_MII;
+ if (np->advertising & ADVERTISE_10HALF)
+ ecmd.advertising |= ADVERTISED_10baseT_Half;
+ if (np->advertising & ADVERTISE_10FULL)
+ ecmd.advertising |= ADVERTISED_10baseT_Full;
+ if (np->advertising & ADVERTISE_100HALF)
+ ecmd.advertising |= ADVERTISED_100baseT_Half;
+ if (np->advertising & ADVERTISE_100FULL)
+ ecmd.advertising |= ADVERTISED_100baseT_Full;
+ if (np->autoneg) {
+ ecmd.advertising |= ADVERTISED_Autoneg;
+ ecmd.autoneg = AUTONEG_ENABLE;
+ } else
+ ecmd.autoneg = AUTONEG_DISABLE;
+
+ ecmd.port = PORT_MII;
+ ecmd.transceiver = XCVR_INTERNAL;
+ ecmd.phy_address = np->phys[0];
+ ecmd.speed = np->speed100 ? SPEED_100 : SPEED_10;
+ ecmd.duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ ecmd.maxtxpkt = TX_RING_SIZE;
+ ecmd.maxrxpkt = np->intr_mitigation; /* not 100% accurate */
+
+
+ if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
+ return -EFAULT;
+ return 0;
+
+ case ETHTOOL_SSET: {
+ u16 autoneg, speed100, full_duplex;
+
+ autoneg = (ecmd.autoneg == AUTONEG_ENABLE);
+ speed100 = (ecmd.speed == SPEED_100);
+ full_duplex = (ecmd.duplex == DUPLEX_FULL);
+
+ np->autoneg = autoneg;
+ if (speed100 != np->speed100 ||
+ full_duplex != np->full_duplex) {
+ np->speed100 = speed100;
+ np->full_duplex = full_duplex;
+ /* change advertising bits */
+ np->advertising &= ~(ADVERTISE_10HALF |
+ ADVERTISE_10FULL |
+ ADVERTISE_100HALF |
+ ADVERTISE_100FULL |
+ ADVERTISE_100BASE4);
+ if (speed100) {
+ if (full_duplex)
+ np->advertising |= ADVERTISE_100FULL;
+ else
+ np->advertising |= ADVERTISE_100HALF;
+ } else {
+ if (full_duplex)
+ np->advertising |= ADVERTISE_10FULL;
+ else
+ np->advertising |= ADVERTISE_10HALF;
+ }
+ }
+ check_duplex(dev);
+ return 0;
+ }
+
+ case ETHTOOL_GDRVINFO: {
+ struct ethtool_drvinfo info;
+ memset(&info, 0, sizeof(info));
+ info.cmd = ecmd.cmd;
+ strcpy(info.driver, DRV_NAME);
+ strcpy(info.version, DRV_VERSION);
+ *info.fw_version = 0;
+ strcpy(info.bus_info, PCI_SLOT_NAME(np->pci_dev));
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = dev->priv;
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
switch(cmd) {
- case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
- data[0] = np->phys[0] & 0x1f;
+ case SIOCETHTOOL:
+ return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
+
+ /* Legacy mii-diag interface */
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
+ data->phy_id = np->phys[0] & 0x1f;
/* Fall Through */
- case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
- data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
+ data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
return 0;
- case SIOCDEVPRIVATE+2: /* Write the specified MII register */
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (data[0] == np->phys[0]) {
- u16 value = data[2];
- switch (data[1]) {
+ if (data->phy_id == np->phys[0]) {
+ u16 value = data->val_in;
+ switch (data->reg_num) {
case 0:
- if (value & 0x9000) /* Autonegotiation. */
- np->medialock = 0;
+ if (value & (BMCR_RESET | BMCR_ANENABLE))
+ /* Autonegotiation. */
+ np->autoneg = 1;
else {
- np->full_duplex = (value & 0x0100) ? 1 : 0;
- np->medialock = 1;
+ np->full_duplex = (value & BMCR_FULLDPLX) ? 1 : 0;
+ np->autoneg = 0;
}
break;
- case 4: np->advertising = value; break;
+ case 4:
+ np->advertising = value;
+ break;
}
- check_duplex(dev, 0);
+ check_duplex(dev);
}
- mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
return 0;
default:
return -EOPNOTSUPP;
netif_stop_queue(dev);
netif_stop_if(dev);
- del_timer_sync(&np->timer);
-
if (debug > 1) {
printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %4.4x.\n",
dev->name, (int)readl(ioaddr + IntrStatus));
}
for (i = 0; i < TX_RING_SIZE; i++) {
struct sk_buff *skb = np->tx_info[i].skb;
+#ifdef ZEROCOPY
+ int j;
+#endif /* ZEROCOPY */
if (skb == NULL)
continue;
pci_unmap_single(np->pci_dev,
np->tx_info[i].first_mapping = 0;
dev_kfree_skb(skb);
np->tx_info[i].skb = NULL;
+#ifdef ZEROCOPY
+ for (j = 0; j < MAX_STARFIRE_FRAGS; j++)
+ if (np->tx_info[i].frag_mapping[j]) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[i].frag_mapping[j],
+ skb_shinfo(skb)->frags[j].size,
+ PCI_DMA_TODEVICE);
+ np->tx_info[i].frag_mapping[j] = 0;
+ } else
+ break;
+#endif /* ZEROCOPY */
}
COMPAT_MOD_DEC_USE_COUNT;
BUG();
np = dev->priv;
-
- unregister_netdev(dev);
- iounmap((char *)dev->base_addr);
-
- release_mem_region(pci_resource_start (pdev, 0),
- pci_resource_len (pdev, 0));
-
if (np->tx_done_q)
- pci_free_consistent(np->pci_dev, PAGE_SIZE,
+ pci_free_consistent(pdev, PAGE_SIZE,
np->tx_done_q, np->tx_done_q_dma);
if (np->rx_done_q)
- pci_free_consistent(np->pci_dev, PAGE_SIZE,
+ pci_free_consistent(pdev,
+ sizeof(struct rx_done_desc) * DONE_Q_SIZE,
np->rx_done_q, np->rx_done_q_dma);
if (np->tx_ring)
- pci_free_consistent(np->pci_dev, PAGE_SIZE,
+ pci_free_consistent(pdev, PAGE_SIZE,
np->tx_ring, np->tx_ring_dma);
if (np->rx_ring)
- pci_free_consistent(np->pci_dev, PAGE_SIZE,
+ pci_free_consistent(pdev, PAGE_SIZE,
np->rx_ring, np->rx_ring_dma);
- kfree(dev);
+ unregister_netdev(dev);
+ iounmap((char *)dev->base_addr);
+ pci_release_regions(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+ kfree(dev); /* Will also free np!! */
}
static struct pci_driver starfire_driver = {
- name: "starfire",
+ name: DRV_NAME,
probe: starfire_init_one,
remove: starfire_remove_one,
id_table: starfire_pci_tbl,
static int __init starfire_init (void)
{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
return pci_module_init (&starfire_driver);
}
/*
* Local variables:
- * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c starfire.c"
- * simple-compile-command: "gcc -DMODULE -O6 -c starfire.c"
+ * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O2 -c starfire.c"
+ * simple-compile-command: "gcc -DMODULE -O2 -c starfire.c"
* c-basic-offset: 8
* tab-width: 8
* End:
{
int nprinted, len, size;
struct pci_dev *dev;
- static int complained = 0;
# define MSG "\nwarning: page-size limit reached!\n"
/* reserve same for truncation warning message: */
-/* $Id: envctrl.c,v 1.9.2.2 2000/11/08 09:43:04 davem Exp $
+/* $Id: envctrl.c,v 1.9.2.3 2001/06/19 16:49:44 davem Exp $
* envctrl.c: Temperature and Fan monitoring on Machines providing it.
*
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
DBG(DBG_REQSENSE, printk(KERN_DEBUG "Tried to REQUEST SENSE\n"));
cmd->result = DID_OK << 16;
done(cmd);
+ restore_flags(flags);
return(0);
}
* Revised: Mon Dec 28 21:59:02 1998 by faith@acm.org
* Author: Rickard E. Faith, faith@cs.unc.edu
* Copyright 1992-1996, 1998 Rickard E. Faith (faith@acm.org)
+ * Shared IRQ supported added 7/7/2001 Alan Cox <alan@redhat.com>
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
/* Register the IRQ with the kernel */
retcode = request_irq( interrupt_level,
- do_fdomain_16x0_intr, 0, "fdomain", NULL);
+ do_fdomain_16x0_intr, PCI_bus?SA_SHIRQ:0, "fdomain", NULL);
if (retcode < 0) {
if (retcode == -EINVAL) {
interruptions while this routine is
running. */
- /* sti(); Yes, we really want sti() here if we want to lock up our machine */
-
+ /* Check for other IRQ sources */
+ if((inb(TMC_Status_port)&0x01)==0)
+ return;
+
+ /* It is our IRQ */
outb( 0x00, Interrupt_Cntl_port );
/* We usually have one spurious interrupt after each command. Ignore it. */
case WRITE_6:
case WRITE_BUFFER:
case MODE_SELECT:
+ case MODE_SELECT_10:
cmd->control_flags = CFLAG_WRITE;
break;
case REQUEST_SENSE:
#include <asm/pgtable.h>
static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs);
-static int load_aout_library(int fd);
+static int load_aout_library(struct file *file);
static int aout_core_dump(long signr, struct pt_regs * regs, struct file *);
extern void dump_thread(struct pt_regs *, struct user *);
* libraries. There is no binary dependent code anywhere else.
*/
-static inline int do_load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
{
struct exec ex;
struct file * file;
}
}
beyond_if:
+ set_binfmt(&aout_format);
if (current->exec_domain && current->exec_domain->module)
__MOD_DEC_USE_COUNT(current->exec_domain->module);
- if (current->binfmt && current->binfmt->module)
- __MOD_DEC_USE_COUNT(current->binfmt->module);
current->exec_domain = lookup_exec_domain(current->personality);
- current->binfmt = &aout_format;
if (current->exec_domain && current->exec_domain->module)
__MOD_INC_USE_COUNT(current->exec_domain->module);
- if (current->binfmt && current->binfmt->module)
- __MOD_INC_USE_COUNT(current->binfmt->module);
set_brk(current->mm->start_brk, current->mm->brk);
return 0;
}
-
-static int
-load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+static int load_aout_library(struct file *file)
{
- int retval;
-
- MOD_INC_USE_COUNT;
- retval = do_load_aout_binary(bprm, regs);
- MOD_DEC_USE_COUNT;
- return retval;
-}
-
-static inline int
-do_load_aout_library(int fd)
-{
- struct file * file;
struct inode * inode;
unsigned long bss, start_addr, len;
unsigned long error;
loff_t offset = 0;
struct exec ex;
- retval = -EACCES;
- file = fget(fd);
- if (!file)
- goto out;
- if (!file->f_op)
- goto out_putf;
inode = file->f_dentry->d_inode;
retval = -ENOEXEC;
retval = 0;
out_putf:
- fput(file);
-out:
return retval;
}
-static int
-load_aout_library(int fd)
-{
- int retval;
-
- MOD_INC_USE_COUNT;
- retval = do_load_aout_library(fd);
- MOD_DEC_USE_COUNT;
- return retval;
-}
-
-
int __init init_aout_binfmt(void)
{
return register_binfmt(&aout_format);
#include <linux/elf.h>
static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
-static int load_elf_library(int fd);
+static int load_elf_library(struct file *file);
extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
extern void dump_thread(struct pt_regs *, struct user *);
#define INTERPRETER_ELF 2
-static inline int
-do_load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
{
struct file * file;
struct dentry *interpreter_dentry = NULL; /* to shut gcc up */
if (interpreter_type != INTERPRETER_AOUT)
sys_close(elf_exec_fileno);
+ set_binfmt(&elf_format);
if (current->exec_domain && current->exec_domain->module)
__MOD_DEC_USE_COUNT(current->exec_domain->module);
- if (current->binfmt && current->binfmt->module)
- __MOD_DEC_USE_COUNT(current->binfmt->module);
current->exec_domain = lookup_exec_domain(current->personality);
- current->binfmt = &elf_format;
if (current->exec_domain && current->exec_domain->module)
__MOD_INC_USE_COUNT(current->exec_domain->module);
- if (current->binfmt && current->binfmt->module)
- __MOD_INC_USE_COUNT(current->binfmt->module);
compute_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
goto out;
}
-static int
-load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
-{
- int retval;
-
- MOD_INC_USE_COUNT;
- retval = do_load_elf_binary(bprm, regs);
- MOD_DEC_USE_COUNT;
- return retval;
-}
-
/* This is really simpleminded and specialized - we are loading an
a.out library that is given an ELF header. */
-static inline int
-do_load_elf_library(int fd)
+static int load_elf_library(struct file *file)
{
- struct file * file;
struct dentry * dentry;
struct inode * inode;
struct elf_phdr *elf_phdata;
struct elfhdr elf_ex;
loff_t offset = 0;
- error = -EACCES;
- file = fget(fd);
- if (!file || !file->f_op)
- goto out;
dentry = file->f_dentry;
inode = dentry->d_inode;
out_free_ph:
kfree(elf_phdata);
out_putf:
- fput(file);
-out:
return error;
}
-static int load_elf_library(int fd)
-{
- int retval;
-
- MOD_INC_USE_COUNT;
- retval = do_load_elf_library(fd);
- MOD_DEC_USE_COUNT;
- return retval;
-}
-
/*
* Note that some platforms still use traditional core dumps and not
* the ELF core dump. Each platform can select it as appropriate.
#define EM86_INTERP "/usr/bin/em86"
#define EM86_I_NAME "em86"
-static int do_load_em86(struct linux_binprm *bprm,struct pt_regs *regs)
+static int load_em86(struct linux_binprm *bprm,struct pt_regs *regs)
{
char *interp, *i_name, *i_arg;
struct dentry * dentry;
return search_binary_handler(bprm, regs);
}
-static int load_em86(struct linux_binprm *bprm,struct pt_regs *regs)
-{
- int retval;
- MOD_INC_USE_COUNT;
- retval = do_load_em86(bprm,regs);
- MOD_DEC_USE_COUNT;
- return retval;
-}
-
struct linux_binfmt em86_format = {
module: THIS_MODULE,
load_binary: load_em86,
char binfmt_java_interpreter[65] = _PATH_JAVA;
char binfmt_java_appletviewer[65] = _PATH_APPLET;
-static int do_load_java(struct linux_binprm *bprm,struct pt_regs *regs)
+static int load_java(struct linux_binprm *bprm,struct pt_regs *regs)
{
char *i_name;
int len;
return search_binary_handler(bprm,regs);
}
-static int do_load_applet(struct linux_binprm *bprm,struct pt_regs *regs)
+static int load_applet(struct linux_binprm *bprm,struct pt_regs *regs)
{
char *i_name;
struct dentry * dentry;
return search_binary_handler(bprm,regs);
}
-static int load_java(struct linux_binprm *bprm,struct pt_regs *regs)
-{
- int retval;
- MOD_INC_USE_COUNT;
- retval = do_load_java(bprm,regs);
- MOD_DEC_USE_COUNT;
- return retval;
-}
-
static struct linux_binfmt java_format = {
module: THIS_MODULE,
load_binary: load_java,
};
-static int load_applet(struct linux_binprm *bprm,struct pt_regs *regs)
-{
- int retval;
- MOD_INC_USE_COUNT;
- retval = do_load_applet(bprm,regs);
- MOD_DEC_USE_COUNT;
- return retval;
-}
-
static struct linux_binfmt applet_format = {
module: THIS_MODULE,
load_binary: load_applet,
char *iname_addr = iname;
int retval;
- MOD_INC_USE_COUNT;
retval = -ENOEXEC;
if (!enabled)
goto _ret;
if (retval >= 0)
retval = search_binary_handler(bprm, regs);
_ret:
- MOD_DEC_USE_COUNT;
return retval;
}
#include <linux/binfmts.h>
#include <linux/init.h>
-static int do_load_script(struct linux_binprm *bprm,struct pt_regs *regs)
+static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
{
char *cp, *i_name, *i_name_start, *i_arg;
struct dentry * dentry;
return search_binary_handler(bprm,regs);
}
-static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
-{
- int retval;
- MOD_INC_USE_COUNT;
- retval = do_load_script(bprm,regs);
- MOD_DEC_USE_COUNT;
- return retval;
-}
-
struct linux_binfmt script_format = {
module: THIS_MODULE,
load_binary: load_script,
#include <linux/fcntl.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
+#define __NO_VERSION__
+#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
*/
asmlinkage int sys_uselib(const char * library)
{
- int fd, retval;
+ int retval;
struct file * file;
struct linux_binfmt * fmt;
+ char * tmp = getname(library);
lock_kernel();
- fd = sys_open(library, 0, 0);
- retval = fd;
- if (fd < 0)
+ retval = PTR_ERR(tmp);
+ if (IS_ERR(tmp))
+ goto out;
+
+ file = filp_open(tmp, 0, 0);
+ putname(tmp);
+
+ retval = PTR_ERR(file);
+ if (IS_ERR(file))
goto out;
- file = fget(fd);
+
+ retval = -EINVAL;
+ if (!S_ISREG(file->f_dentry->d_inode->i_mode))
+ goto out_fput;
+
retval = -ENOEXEC;
- if (file && file->f_dentry && file->f_op && file->f_op->read) {
+ if (file->f_op && file->f_op->read) {
for (fmt = formats ; fmt ; fmt = fmt->next) {
- int (*fn)(int) = fmt->load_shlib;
+ int (*fn)(struct file *) = fmt->load_shlib;
if (!fn)
continue;
- /* N.B. Should use file instead of fd */
- retval = fn(fd);
+ retval = fn(file);
if (retval != -ENOEXEC)
break;
}
}
+out_fput:
fput(file);
- sys_close(fd);
out:
unlock_kernel();
return retval;
return retval;
}
+void set_binfmt(struct linux_binfmt *new)
+{
+ struct linux_binfmt *old = current->binfmt;
+ if (new && new->module)
+ __MOD_INC_USE_COUNT(new->module);
+ current->binfmt = new;
+ if (old && old->module)
+ __MOD_DEC_USE_COUNT(old->module);
+}
+
int do_coredump(long signr, struct pt_regs * regs)
{
struct linux_binfmt *binfmt;
#include <linux/nls.h>
#include <linux/locks.h>
#include <linux/init.h>
+#include <asm/page.h>
/* Forward declarations */
static struct inode_operations ntfs_dir_inode_operations;
NTFS_SB(vol)=sb;
ntfs_debug(DEBUG_OTHER, "Done to init volume\n");
+ /* Check the cluster size is within allowed blocksize limits. */
+ if (vol->clustersize > PAGE_SIZE) {
+ ntfs_error("Partition cluster size is not supported yet (it "
+ "is > max kernel blocksize).\n");
+ goto ntfs_read_super_unl;
+ }
+
/* Inform the kernel that a device block is a NTFS cluster */
sb->s_blocksize=vol->clustersize;
for(i=sb->s_blocksize,sb->s_blocksize_bits=0;i != 1;i>>=1)
AMD_GENERIC,
AMD_IRONGATE,
ALI_M1541,
- ALI_GENERIC
+ ALI_M1621,
+ ALI_M1631,
+ ALI_M1632,
+ ALI_M1641,
+ ALI_M1647,
+ ALI_M1651,
+ ALI_GENERIC,
+ SVWRKS_HE,
+ SVWRKS_LE,
+ SVWRKS_GENERIC
};
typedef struct _agp_version {
struct linux_binfmt * next;
struct module *module;
int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
- int (*load_shlib)(int fd);
+ int (*load_shlib)(struct file *file);
int (*core_dump)(long signr, struct pt_regs * regs, struct file *file);
unsigned long min_coredump;
};
+extern void set_binfmt(struct linux_binfmt *);
extern int do_coredump(long, struct pt_regs *);
extern int register_binfmt(struct linux_binfmt *);
extern int unregister_binfmt(struct linux_binfmt *);
#define IIOCDRVCTL _IO('I',128)
+/* cisco hdlck device private ioctls */
+#define SIOCGKEEPPERIOD (SIOCDEVPRIVATE + 0)
+#define SIOCSKEEPPERIOD (SIOCDEVPRIVATE + 1)
+#define SIOCGDEBSERINT (SIOCDEVPRIVATE + 2)
+#define SIOCSDEBSERINT (SIOCDEVPRIVATE + 3)
+
/* Packet encapsulations for net-interfaces */
#define ISDN_NET_ENCAP_ETHER 0
#define ISDN_NET_ENCAP_RAWIP 1
((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE) )
/* Timer-delays and scheduling-flags */
-#define ISDN_TIMER_RES 3 /* Main Timer-Resolution */
-#define ISDN_TIMER_02SEC (HZ/(ISDN_TIMER_RES+1)/5) /* Slow-Timer1 .2 sec */
-#define ISDN_TIMER_1SEC (HZ/(ISDN_TIMER_RES+1)) /* Slow-Timer2 1 sec */
+#define ISDN_TIMER_RES 4 /* Main Timer-Resolution */
+#define ISDN_TIMER_02SEC (HZ/ISDN_TIMER_RES/5) /* Slow-Timer1 .2 sec */
+#define ISDN_TIMER_1SEC (HZ/ISDN_TIMER_RES) /* Slow-Timer2 1 sec */
#define ISDN_TIMER_RINGING 5 /* tty RINGs = ISDN_TIMER_1SEC * this factor */
#define ISDN_TIMER_KEEPINT 10 /* Cisco-Keepalive = ISDN_TIMER_1SEC * this factor */
#define ISDN_TIMER_MODEMREAD 1
#define ISDN_TIMER_MODEMXMIT 8
#define ISDN_TIMER_NETDIAL 16
#define ISDN_TIMER_NETHANGUP 32
-#define ISDN_TIMER_KEEPALIVE 128 /* Cisco-Keepalive */
#define ISDN_TIMER_CARRIER 256 /* Wait for Carrier */
#define ISDN_TIMER_FAST (ISDN_TIMER_MODEMREAD | ISDN_TIMER_MODEMPLUS | \
ISDN_TIMER_MODEMXMIT)
#define ISDN_TIMER_SLOW (ISDN_TIMER_MODEMRING | ISDN_TIMER_NETHANGUP | \
- ISDN_TIMER_NETDIAL | ISDN_TIMER_KEEPALIVE | \
- ISDN_TIMER_CARRIER)
+ ISDN_TIMER_NETDIAL | ISDN_TIMER_CARRIER)
/* Timeout-Values for isdn_net_dial() */
#define ISDN_TIMER_DTIMEOUT10 (10*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1)))
#ifdef CONFIG_ISDN_X25
struct concap_device_ops *dops; /* callbacks used by encapsulator */
#endif
- int cisco_loop; /* Loop counter for Cisco-SLARP */
+ /* use an own struct for that in later versions */
ulong cisco_myseq; /* Local keepalive seq. for Cisco */
+ ulong cisco_mineseen; /* returned keepalive seq. from remote */
ulong cisco_yourseq; /* Remote keepalive seq. for Cisco */
+ int cisco_keepalive_period; /* keepalive period */
+ ulong cisco_last_slarp_in; /* jiffie of last keepalive packet we received */
+ char cisco_line_state; /* state of line according to keepalive packets */
+ char cisco_debserint; /* debugging flag of cisco hdlc with slarp */
+ struct timer_list cisco_timer;
struct tq_struct tqueue;
} isdn_net_local;
(mod_member_present((mod), can_unload) && (mod)->can_unload \
? (mod)->can_unload() : atomic_read(&(mod)->uc.usecount))
+extern int try_inc_mod_count(struct module *mod);
/* Indirect stringification. */
#define __MODULE_STRING_1(x) #x
#ifdef CONFIG_MODULES
EXPORT_SYMBOL(get_module_symbol);
+EXPORT_SYMBOL(try_inc_mod_count);
#endif
EXPORT_SYMBOL(get_options);
EXPORT_SYMBOL(prepare_binprm);
EXPORT_SYMBOL(compute_creds);
EXPORT_SYMBOL(remove_arg_zero);
+EXPORT_SYMBOL(set_binfmt);
/* execution environment registration */
EXPORT_SYMBOL(lookup_exec_domain);
return error;
}
+static spinlock_t unload_lock = SPIN_LOCK_UNLOCKED;
+int try_inc_mod_count(struct module *mod)
+{
+ int res = 1;
+ if (mod) {
+ spin_lock(&unload_lock);
+ if (mod->flags & MOD_DELETED)
+ res = 0;
+ else
+ __MOD_INC_USE_COUNT(mod);
+ spin_unlock(&unload_lock);
+ }
+ return res;
+}
+
asmlinkage int
sys_delete_module(const char *name_user)
{
}
put_mod_name(name);
error = -EBUSY;
- if (mod->refs != NULL || __MOD_IN_USE(mod))
+ if (mod->refs != NULL)
goto out;
- free_module(mod, 0);
- error = 0;
+ spin_lock(&unload_lock);
+ if (!__MOD_IN_USE(mod)) {
+ mod->flags |= MOD_DELETED;
+ spin_unlock(&unload_lock);
+ free_module(mod, 0);
+ error = 0;
+ } else {
+ spin_unlock(&unload_lock);
+ }
goto out;
}
something_changed = 0;
for (mod = module_list; mod != &kernel_module; mod = next) {
next = mod->next;
+ spin_lock(&unload_lock);
if (mod->refs == NULL
&& (mod->flags & MOD_AUTOCLEAN)
&& (mod->flags & MOD_RUNNING)
&& !__MOD_IN_USE(mod)) {
if ((mod->flags & MOD_VISITED)
&& !(mod->flags & MOD_JUST_FREED)) {
+ spin_unlock(&unload_lock);
mod->flags &= ~MOD_VISITED;
} else {
+ mod->flags |= MOD_DELETED;
+ spin_unlock(&unload_lock);
free_module(mod, 1);
something_changed = 1;
}
+ } else {
+ spin_unlock(&unload_lock);
}
}
if (something_changed)
/* Let the module clean up. */
- mod->flags |= MOD_DELETED;
if (mod->flags & MOD_RUNNING)
{
if(mod->cleanup)
return -ENOSYS;
}
+int try_inc_mod_count(struct module *mod)
+{
+ return 1;
+}
+
#endif /* CONFIG_MODULES */
/* linux/net/inet/arp.c
*
- * Version: $Id: arp.c,v 1.77.2.7 2000/10/29 11:41:15 davem Exp $
+ * Version: $Id: arp.c,v 1.77.2.8 2001/07/06 11:36:55 davem Exp $
*
* Copyright (C) 1994 by Florian La Roche
*
(addr_type == RTN_UNICAST && rt->u.dst.dev != dev &&
(IN_DEV_PROXY_ARP(in_dev) || pneigh_lookup(&arp_tbl, &tip, dev, 0)))) {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
- neigh_release(n);
+ if (n)
+ neigh_release(n);
if (skb->stamp.tv_sec == 0 ||
skb->pkt_type == PACKET_HOST ||