-ALS-007 based sound cards
+ALS-007/ALS-100/ALS-200 based sound cards
=========================
-Support for sound cards based around the Avance Logic ALS-007 chip is
-included. The ALS-007 is a single chip PnP sound solution which is mostly
-hardware compatible with the Sound Blaster 16 card, with most differences
-occurring in the use of the mixer registers. For this reason the ALS-007
-code is integrated as part of the Sound Blaster 16 driver (adding only 800
-bytes to the SB16 driver).
+Support for sound cards based around the Avance Logic
+ALS-007/ALS-100/ALS-200 chip is included. These chips are a single
+chip PnP sound solution which is mostly hardware compatible with the
+Sound Blaster 16 card, with most differences occurring in the use of
+the mixer registers. For this reason the ALS code is integrated
+as part of the Sound Blaster 16 driver (adding only 800 bytes to the
+SB16 driver).
-To use an ALS-007 sound card under Linux, enable the following options in the
+To use an ALS sound card under Linux, enable the following options in the
sound configuration section of the kernel config:
- 100% Sound Blaster compatibles (SB16/32/64, ESS, Jazz16) support
- FM synthesizer (YM3812/OPL-3) support
-Since the ALS-007 is a PnP card, the sound driver probably should be
+Since the ALS-007/100/200 is a PnP card, the sound driver probably should be
compiled as a module, with the isapnptools used to wake up the sound card.
Set the "I/O base for SB", "Sound Blaster IRQ" and "Sound Blaster DMA" (8 bit -
either 0, 1 or 3) to the values used in your particular installation (they
Jonathan Woithe
jwoithe@physics.adelaide.edu.au
30 March 1998
+
+Modified 2000-02-26 by Dave Forrest, drf5n@virginia.edu to add ALS100/ALS200
in /etc/modules.conf of:
alias parport_lowlevel parport_pc
- alias parport_pc io=0x378 irq=none
+ options parport_pc io=0x378 irq=none
alias char-major-81 videodev
alias char-major-81-0 c-qcam
DIGI INTL. EPCA DRIVER
P: Daniel Taylor
-M: support@dgii.com
-M: danielt@dgii.com
-L: digilnux@dgii.com
+M: support@digi.com
+M: danielt@digi.com
+L: digilnux@digi.com
S: Maintained
DIGI RIGHTSWITCH NETWORK DRIVER
VERSION = 2
PATCHLEVEL = 2
SUBLEVEL = 15
-EXTRAVERSION = pre10
+EXTRAVERSION = pre11
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
return 0;
}
+extern int x86_udelay_tsc;
+
__initfunc(void time_init(void))
{
xtime.tv_sec = get_cmos_time();
if (tsc_quotient) {
fast_gettimeoffset_quotient = tsc_quotient;
use_tsc = 1;
+ /*
+ * We should be more selective here I suspect
+ * and just enable this for the new intel chips ?
+ */
+ x86_udelay_tsc = 1;
#ifndef do_gettimeoffset
do_gettimeoffset = do_fast_gettimeoffset;
#endif
#include <linux/sched.h>
#include <linux/delay.h>
+#include <asm/delay.h>
#ifdef __SMP__
#include <asm/smp.h>
#endif
-void __delay(unsigned long loops)
+int x86_udelay_tsc;
+
+/*
+ * Do a udelay using the TSC for any CPU that happens
+ * to have one that we trust. This could be optimised to avoid
+ * the multiply per loop but its a delay loop so who are we kidding...
+ */
+
+static void __rdtsc_delay(unsigned long loops)
+{
+ unsigned long bclock, now;
+
+ rdtscl(bclock);
+ do
+ {
+ rdtscl(now);
+ }
+ while((now-bclock) < loops);
+}
+
+/*
+ * Non TSC based delay loop for 386, 486, MediaGX
+ */
+
+static void __loop_delay(unsigned long loops)
{
int d0;
__asm__ __volatile__(
:"0" (loops));
}
-inline void __const_udelay(unsigned long xloops)
+void __delay(unsigned long loops)
+{
+ if(x86_udelay_tsc)
+ __rdtsc_delay(loops);
+ else
+ __loop_delay(loops);
+}
+
+void __const_udelay(unsigned long xloops)
{
int d0;
__asm__("mull %0"
__delay(xloops);
}
+/*
+ * Do a udelay using the delay/jump loop. This won't work on
+ * the next intel CPU's and isnt ideal on anything with APM
+ */
+
+
void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
}
+
+
ppc_md.kbd_leds = NULL;
ppc_md.kbd_init_hw = NULL;
#ifdef CONFIG_MAGIC_SYSRQ
- ppc_md.ppc_kbd_sysrq_xlate = NULL;
+ ppc_md.kbd_sysrq_xlate = NULL;
#endif
}
static __inline__ void write_user_long(unsigned long kvaddr, unsigned long val)
{
- __asm__ __volatile__("stxa %0, [%1] %2"
- : /* no outputs */
- : "r" (val), "r" (__pa(kvaddr)), "i" (ASI_PHYS_USE_EC));
+ *(unsigned long *)kvaddr = val;
+ flush_dcache_page(kvaddr & PAGE_MASK);
}
static __inline__ void write_user_int(unsigned long kvaddr, unsigned int val)
{
- __asm__ __volatile__("stwa %0, [%1] %2"
- : /* no outputs */
- : "r" (val), "r" (__pa(kvaddr)), "i" (ASI_PHYS_USE_EC));
+ *(unsigned int *)kvaddr = val;
+ flush_dcache_page(kvaddr & PAGE_MASK);
}
static inline unsigned long get_long(struct task_struct * tsk,
pgaddr = page + (addr & ~PAGE_MASK);
write_user_long(pgaddr, data);
-
- __asm__ __volatile__("
- membar #StoreStore
- flush %0
-" : : "r" (pgaddr & ~7) : "memory");
}
/* we're bypassing pagetables, so we have to set the dirty bit ourselves */
/* this should also re-instate whatever read-only mode there was before */
pgaddr = page + (addr & ~PAGE_MASK);
write_user_int(pgaddr, data);
-
- __asm__ __volatile__("
- membar #StoreStore
- flush %0
-" : : "r" (pgaddr & ~7) : "memory");
}
/* we're bypassing pagetables, so we have to set the dirty bit ourselves */
/* this should also re-instate whatever read-only mode there was before */
EXPORT_SYMBOL_PRIVATE(flushw_user);
+EXPORT_SYMBOL(flush_dcache_page);
+
EXPORT_SYMBOL(mstk48t02_regs);
EXPORT_SYMBOL(request_fast_irq);
EXPORT_SYMBOL(sparc_alloc_io);
-/* $Id: sys_sparc32.c,v 1.107.2.7 2000/01/24 11:36:50 jj Exp $
+/* $Id: sys_sparc32.c,v 1.107.2.8 2000/02/28 04:09:49 davem Exp $
* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
case NFSCTL_GETFH:
err = nfs_getfh32_trans(karg, arg32);
break;
+ case NFSCTL_LOCKD:
+ /* No arguments, no translations... */
+ err = 0;
+ break;
default:
err = -EINVAL;
break;
void __flush_dcache_range(unsigned long start, unsigned long end)
{
- unsigned long va;
- int n = 0;
-
- for (va = start; va < end; va += 32) {
- spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
- if (++n >= 512)
- break;
+ start &= PAGE_MASK;
+ end = PAGE_ALIGN(end);
+ while (start < end) {
+ flush_dcache_page(start);
+ start += PAGE_SIZE;
}
}
ba,pt %xcc, 3b
flush %g6
+ .align 64
+ .globl flush_dcache_page
+flush_dcache_page:
+ sub %o0, %g4, %o0
+ clr %o1
+ srlx %o0, 11, %o0
+ sethi %hi(1 << 14), %o2
+1: ldxa [%o1] ASI_DCACHE_TAG, %o3
+ andn %o3, 0x3, %o3
+ cmp %o0, %o3
+ bne,pt %xcc, 2f
+ nop
+ stxa %g0, [%o1] ASI_DCACHE_TAG
+2: add %o1, (1 << 5), %o1
+ cmp %o1, %o2
+ bne,pt %xcc, 1b
+ nop
+ /* The I-cache does not snoop local stores so we
+ * better flush that too.
+ */
+ ba,pt %xcc, flush_icache_page
+ sllx %o0, 11, %o0
+
#ifdef __SMP__
/* These are all called by the slaves of a cross call, at
* trap level 1, with interrupts fully disabled.
#undef BLOCKMOVE
#define Z_WAKE
static char rcsid[] =
-"$Revision: 2.3.2.4 $$Date: 2000/01/17 09:19:40 $";
+"$Revision: 2.3.2.5 $$Date: 2000/01/19 14:35:33 $";
/*
* linux/drivers/char/cyclades.c
* void cleanup_module(void);
*
* $Log: cyclades.c,v $
+ * Revision 2.3.2.5 2000/01/19 14:35:33 ivan
+ * Fixed bug in cy_set_termios on CRTSCTS flag turnoff.
+ *
* Revision 2.3.2.4 2000/01/17 09:19:40 ivan
* Fixed SMP locking in Cyclom-Y interrupt handler.
*
tristate 'I2O support' CONFIG_I2O
-dep_tristate ' I2O PCI support' CONFIG_I2O_PCI $CONFIG_I2O
+dep_tristate ' I2O PCI support' CONFIG_I2O_PCI $CONFIG_I2O $CONFIG_PCI
dep_tristate ' I2O Block OSM' CONFIG_I2O_BLOCK $CONFIG_I2O
dep_tristate ' I2O SCSI OSM' CONFIG_I2O_SCSI $CONFIG_I2O $CONFIG_SCSI
static const char *version =
"eepro100.c:v1.09j-t 9/29/99 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html\n"
-"eepro100.c: $Revision: 1.18 $ 1999/12/29 Modified by Andrey V. Savochkin <saw@msu.ru>\n";
+"eepro100.c: $Revision: 1.20 $ 1999/12/29 Modified by Andrey V. Savochkin <saw@msu.ru>\n";
/* A few user-configurable values that apply to all boards.
First set is undocumented and spelled per Intel recommendations. */
CmdIntr = 0x20000000, /* Interrupt after completion. */
CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
};
-/* Clear CmdSuspend (1<<30) atomically.
- Otherwise the command status in the lower 16 bits may be reset after
- an asynchronous change. Previous driver version used separate 16 bit fields
- for commands and statuses. --SAW
+/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
+ status bits. Previous driver versions used separate 16 bit fields for
+ commands and statuses. --SAW
*/
-#ifdef __i386__
-#define speedo_fool_gcc(x) (*(volatile struct { int a[100]; } *)x)
-#define speedo_clear_mask(mask, addr) \
-__asm__ __volatile__("lock; andl %0,%1" \
-: : "r" (~(mask)),"m" (speedo_fool_gcc(addr)) : "memory")
-#define clear_suspend(cmd) speedo_clear_mask(CmdSuspend, &(cmd)->cmd_status)
+#if defined(__LITTLE_ENDIAN)
+#define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
+#elif defined(__BIG_ENDIAN)
+#define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[0] &= ~0x4000
#else
-#define clear_suspend(cmd) (cmd)->cmd_status &= cpu_to_le32(~CmdSuspend)
+#error Unsupported byteorder
#endif
enum SCBCmdBits {
*/
static void
-lance_purge_tx_ring(struct device *dev)
+lance_purge_ring(struct device *dev)
{
struct lance_private *lp = (struct lance_private *)dev->priv;
int i;
+ /* Free all the skbuffs in the Rx and Tx queues. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = lp->rx_skbuff[i];
+ lp->rx_skbuff[i] = 0;
+ lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
+ if (skb)
+ dev_kfree_skb(skb);
+ }
for (i = 0; i < TX_RING_SIZE; i++) {
if (lp->tx_skbuff[i]) {
dev_kfree_skb(lp->tx_skbuff[i]);
if (must_reinit ||
(chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
- lance_purge_tx_ring(dev);
+ lance_purge_ring(dev);
lance_init_ring(dev, GFP_ATOMIC);
}
outw(0x0000, dev->base_addr + LANCE_ADDR);
outw(0x0004, ioaddr+LANCE_DATA);
lp->stats.tx_errors++;
#ifndef final_version
- {
+ if (lance_debug > 3) {
int i;
printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
dev->tbusy=0;
dev->trans_start = jiffies;
- return 0;
+ return 1;
}
if (lance_debug > 3) {
}
free_irq(dev->irq, dev);
- /* Free all the skbuffs in the Rx and Tx queues. */
- for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = lp->rx_skbuff[i];
- lp->rx_skbuff[i] = 0;
- lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
- if (skb)
- dev_kfree_skb(skb);
- }
- for (i = 0; i < TX_RING_SIZE; i++) {
- if (lp->tx_skbuff[i])
- dev_kfree_skb(lp->tx_skbuff[i]);
- lp->tx_skbuff[i] = 0;
- }
+ lance_purge_ring(dev);
MOD_DEC_USE_COUNT;
return 0;
* them into a new buffer??
*/
- if(len > LMC_MTU * 0.75){
+ if(len > (3*LMC_MTU)/4){
/*
* If it's a large packet don't copy it just hand it up
*/
* Al Longyear <longyear@netcom.com>
* Extensively rewritten by Paul Mackerras <paulus@cs.anu.edu.au>
*
- * ==FILEVERSION 990510==
+ * ==FILEVERSION 20000223==
*
* NOTE TO MAINTAINERS:
* If you modify this file at all, please set the number above to the
EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
+/* Bits in ppp->state */
+#define PUSHING 0 /* currently executing in ppp_tty_push */
+#define WAKEUP 1 /* someone else wants to also */
+#define XMITFULL 2 /* someone owns ppp->tpkt */
+#define FLUSHING 3 /* discard output */
+
+/* Non-blocking locking. */
+static inline int xmit_trylock(struct ppp *ppp)
+{
+ wmb();
+ if (test_and_set_bit(PUSHING, &ppp->state))
+ return 0;
+ return 1;
+}
+
+static inline void xmit_unlock(struct ppp *ppp)
+{
+ wmb();
+ clear_bit(PUSHING, &ppp->state);
+}
+
/*************************************************************
* LINE DISCIPLINE SUPPORT
* The following code implements the PPP line discipline
{
ppp->escape = 0;
ppp->toss = 0xE0;
- ppp->tty_pushing = 0;
+ ppp->state = 0;
memset (ppp->xmit_async_map, 0, sizeof (ppp->xmit_async_map));
ppp->xmit_async_map[0] = 0xffffffff;
CHECK_PPP(0);
- if (ppp->tpkt != NULL)
+ if (test_and_set_bit(XMITFULL, &ppp->state))
return -1;
ppp->tpkt = skb;
static int
ppp_tty_sync_push(struct ppp *ppp)
{
- int sent;
- struct tty_struct *tty = ppp2tty(ppp);
- unsigned long flags;
+ int sent, done = 0;
+ struct tty_struct *tty;
CHECK_PPP(0);
- if (ppp->tpkt == NULL)
- return 0;
-
- /* prevent reentrancy with tty_pushing flag */
- save_flags(flags);
- cli();
- if (ppp->tty_pushing) {
- /* record wakeup attempt so we don't lose */
- /* a wakeup call while doing push processing */
- ppp->woke_up=1;
- restore_flags(flags);
+ set_bit(WAKEUP, &ppp->state);
+ if (!xmit_trylock(ppp))
return 0;
- }
- ppp->tty_pushing = 1;
- restore_flags(flags);
-
- if (tty == NULL || tty->disc_data != (void *) ppp)
- goto flush;
-
- for(;;){
- ppp->woke_up=0;
-
+
+ again:
+ clear_bit(WAKEUP, &ppp->state);
+
+ if (ppp->tpkt != 0) {
/* Note: Sync driver accepts complete frame or nothing */
- tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
- sent = tty->driver.write(tty, 0, ppp->tpkt->data, ppp->tpkt->len);
- if (sent < 0) {
+ tty = ppp2tty(ppp);
+ sent = -1;
+ if (test_bit(FLUSHING, &ppp->state))
+ sent = ppp->tpkt->len;
+ else if (tty != NULL && tty->disc_data == (void *) ppp) {
+ tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
+ sent = tty->driver.write(tty, 0, ppp->tpkt->data, ppp->tpkt->len);
+ }
+ if (sent < 0)
/* write error (possible loss of CD) */
/* record error and discard current packet */
ppp->stats.ppp_oerrors++;
- break;
- }
- ppp->stats.ppp_obytes += sent;
- if (sent < ppp->tpkt->len) {
- /* driver unable to accept frame just yet */
- save_flags(flags);
- cli();
- if (ppp->woke_up) {
- /* wake up called while processing */
- /* try to send the frame again */
- restore_flags(flags);
- continue;
- }
- /* wait for wakeup callback to try send again */
- ppp->tty_pushing = 0;
- restore_flags(flags);
- return 0;
+ else
+ ppp->stats.ppp_obytes += sent;
+ if (sent < 0 || sent >= ppp->tpkt->len) {
+ /* driver accepted the frame or we got an error */
+ kfree_skb(ppp->tpkt);
+ ppp->tpkt = 0;
+ wmb();
+ clear_bit(XMITFULL, &ppp->state);
+ done = 1;
}
- break;
}
-flush:
- /* done with current packet (sent or discarded) */
- kfree_skb(ppp->tpkt);
- ppp->tpkt = 0;
- ppp->tty_pushing = 0;
- return 1;
+ if (ppp->tpkt == 0)
+ clear_bit(FLUSHING, &ppp->state);
+
+ xmit_unlock(ppp);
+ if (test_and_clear_bit(WAKEUP, &ppp->state))
+ if (xmit_trylock(ppp))
+ goto again;
+
+ return done;
}
/*
ppp_tty_push(ppp);
- if (ppp->tpkt != NULL)
+ if (test_and_set_bit(XMITFULL, &ppp->state))
return -1;
- ppp->tpkt = skb;
ppp->tpkt_pos = 0;
+ wmb();
+ ppp->tpkt = skb;
return ppp_tty_push(ppp);
}
ppp_tty_push(struct ppp *ppp)
{
int avail, sent, done = 0;
- struct tty_struct *tty = ppp2tty(ppp);
-
+ struct tty_struct *tty;
+
if (ppp->flags & SC_SYNC)
return ppp_tty_sync_push(ppp);
CHECK_PPP(0);
- if (ppp->tty_pushing) {
- ppp->woke_up = 1;
+
+ set_bit(WAKEUP, &ppp->state);
+ if (!xmit_trylock(ppp))
return 0;
- }
- if (tty == NULL || tty->disc_data != (void *) ppp)
- goto flush;
- while (ppp->optr < ppp->olim || ppp->tpkt != 0) {
- ppp->tty_pushing = 1;
- mb();
- ppp->woke_up = 0;
- avail = ppp->olim - ppp->optr;
- if (avail > 0) {
+
+ again:
+ clear_bit(WAKEUP, &ppp->state);
+
+ avail = ppp->olim - ppp->optr;
+ if (avail > 0) {
+ tty = ppp2tty(ppp);
+ sent = -1;
+ if (test_bit(FLUSHING, &ppp->state)) {
+ sent = avail;
+ } else if (tty != NULL && tty->disc_data == (void *) ppp) {
tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
sent = tty->driver.write(tty, 0, ppp->optr, avail);
- if (sent < 0)
- goto flush; /* error, e.g. loss of CD */
- ppp->stats.ppp_obytes += sent;
- ppp->optr += sent;
- if (sent < avail) {
+ }
+ if (sent < 0) {
+ /* error, e.g. loss of CD */
+ ppp->stats.ppp_oerrors++;
+ ppp->optr = ppp->olim;
+ if (ppp->tpkt != 0) {
+ kfree_skb(ppp->tpkt);
+ ppp->tpkt = 0;
wmb();
- ppp->tty_pushing = 0;
- mb();
- if (ppp->woke_up)
- continue;
- return done;
+ clear_bit(XMITFULL, &ppp->state);
+ done = 1;
}
+ } else {
+ ppp->stats.ppp_obytes += sent;
+ ppp->optr += sent;
}
- if (ppp->tpkt != 0)
- done = ppp_async_encode(ppp);
- wmb();
- ppp->tty_pushing = 0;
}
- return done;
-flush:
- ppp->tty_pushing = 1;
- mb();
- ppp->stats.ppp_oerrors++;
- if (ppp->tpkt != 0) {
- kfree_skb(ppp->tpkt);
- ppp->tpkt = 0;
- done = 1;
+ if (ppp->optr == ppp->olim) {
+ if (ppp->tpkt != 0) {
+ done |= ppp_async_encode(ppp);
+ goto again;
+ } else {
+ /* buffers are empty */
+ clear_bit(FLUSHING, &ppp->state);
+ }
}
- ppp->optr = ppp->olim;
- wmb();
- ppp->tty_pushing = 0;
+
+ xmit_unlock(ppp);
+ if (test_and_clear_bit(WAKEUP, &ppp->state))
+ if (xmit_trylock(ppp))
+ goto again;
+
return done;
}
kfree_skb(ppp->tpkt);
ppp->tpkt = 0;
+ wmb();
+ clear_bit(XMITFULL, &ppp->state);
return 1;
}
ppp_tty_flush_output(struct ppp *ppp)
{
struct sk_buff *skb;
- int done = 0;
+ set_bit(FLUSHING, &ppp->state);
while ((skb = skb_dequeue(&ppp->xmt_q)) != NULL)
kfree_skb(skb);
- ppp->tty_pushing = 1;
- mb();
- ppp->optr = ppp->olim;
- if (ppp->tpkt != NULL) {
- kfree_skb(ppp->tpkt);
- ppp->tpkt = 0;
- done = 1;
- }
- wmb();
- ppp->tty_pushing = 0;
- if (done)
- ppp_output_wakeup(ppp);
}
/*
}
/*
- * The dev->tbusy field acts as a lock to allow only
+ * The ppp->xmit_busy field acts as a lock to allow only
* one packet to be processed at a time. If we can't
* get the lock, try again later.
* We deliberately queue as little as possible inside
* to overwrite timers like TLAN_TIMER_ACTIVITY
* Patch from John Cagle <john.cagle@compaq.com>
* - Removed dependency of HZ being 100.
- *
+ * - Statistics is now fixed.
+ * - Minor stuff.
********************************************************************************/
u8 irq;
u8 rev;
- printk( "TLAN driver, v%d.%d, (C) 1997-8 Caldera, Inc.\n",
+ printk(KERN_INFO "TLAN driver, v%d.%d, (C) 1997-8 Caldera, Inc.\n",
TLanVersionMajor,
TLanVersionMinor
);
priv->nextDevice = TLanDevices;
TLanDevices = dev;
TLanDevicesInstalled++;
- printk("TLAN: %s irq=%2d io=%04x, %s, Rev. %d\n",
+ printk(KERN_INFO "TLAN: %s irq=%2d io=%04x, %s, Rev. %d\n",
dev->name,
(int) dev->irq,
(int) dev->base_addr,
priv->debug = dev->mem_end;
- printk("TLAN %d.%d: %s irq=%2d io=%04x, %s, Rev. %d\n",
+ printk(KERN_INFO "TLAN %d.%d: %s irq=%2d io=%04x, %s, Rev. %d\n",
TLanVersionMajor,
TLanVersionMinor,
dev->name,
if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || ( priv->aui ) ) {
status = MII_GS_LINK;
- printk( "TLAN: %s: Link forced.\n", dev->name );
+ printk(KERN_INFO "TLAN: %s: Link forced.\n", dev->name );
} else {
TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
udelay( 1000 );
TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
if ( status & MII_GS_LINK ) {
- printk( "TLAN: %s: Link active.\n", dev->name );
+ printk(KERN_INFO "TLAN: %s: Link active.\n", dev->name );
TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
}
}
outl( virt_to_bus( priv->rxList ), dev->base_addr + TLAN_CH_PARM );
outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
} else {
- printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", dev->name );
+ printk(KERN_INFO "TLAN: %s: Link inactive, will retry in 10 secs...\n", dev->name );
TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
return;
}
* but the card need additional time to start AN.
* .5 sec should be plenty extra.
*/
- printk( "TLAN: %s: Starting autonegotiation.\n", dev->name );
+ printk(KERN_INFO "TLAN: %s: Starting autonegotiation.\n", dev->name );
TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_PHY_FINISH_AN );
return;
}
priv->phyNum = 0;
data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
- TLan_SetTimer( dev, (4*(HZ/1000)), TLAN_TIMER_PHY_PDOWN );
+ TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN );
return;
} else if ( priv->phyNum == 0 ) {
TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl );
/* Wait for 8 sec to give the process
* more time. Perhaps we should fail after a while.
*/
- printk( "TLAN: Giving autonegotiation more time.\n" );
+ printk(KERN_INFO "TLAN: Giving autonegotiation more time.\n" );
TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN );
return;
}
- printk( "TLAN: %s: Autonegotiation complete.\n", dev->name );
+ printk(KERN_INFO "TLAN: %s: Autonegotiation complete.\n", dev->name );
TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv );
TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
mode = an_adv & an_lpa & 0x03E0;
priv->phyNum = 0;
data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
- TLan_SetTimer( dev, (400*(HZ/1000)), TLAN_TIMER_PHY_PDOWN );
+ TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN );
return;
}
if ( priv->phyNum == 0 ) {
if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || ( an_adv & an_lpa & 0x0040 ) ) {
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB | MII_GC_DUPLEX );
- printk( "TLAN: Starting internal PHY with DUPLEX\n" );
+ printk(KERN_INFO "TLAN: Starting internal PHY with DUPLEX\n" );
} else {
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
- printk( "TLAN: Starting internal PHY with HALF-DUPLEX\n" );
+ printk(KERN_INFO "TLAN: Starting internal PHY with HALF-DUPLEX\n" );
}
}
{"YAMAHA","CDR102","1.00", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
{"YAMAHA","CRW8424S","1.0", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
{"YAMAHA","CRW6416S","1.0c", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
{"RELISYS", "Scorpio", "*", BLIST_NOLUN}, /* responds to all LUN */
/*
j->flags.cringing = 0;
ixj_ring_off(board);
} else {
- if (jiffies - j->ring_cadence_jif >= (.5 * hertz)) {
+ if (jiffies - j->ring_cadence_jif >= (hertz/2)) {
j->ring_cadence_t--;
if (j->ring_cadence_t == -1)
j->ring_cadence_t = 15;
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/bitops.h>
+#include <asm/pgtable.h>
#define NR_SIZES 7
static char buffersize_index[65] =
int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size, int bmap)
{
struct buffer_head *bh, *prev, *next, *arr[MAX_BUF_PER_PAGE];
- int block, nr;
+ int block, nr, need_dcache_flush;
if (!PageLocked(page))
panic("brw_page: page not locked for I/O");
return -ENOMEM;
}
nr = 0;
+ need_dcache_flush = 0;
next = bh;
do {
struct buffer_head * tmp;
ll_rw_block(READ, 1, &tmp);
wait_on_buffer(tmp);
}
- if (rw == READ)
+ if (rw == READ) {
memcpy(next->b_data, tmp->b_data, size);
- else {
+ need_dcache_flush = 1;
+ } else {
memcpy(tmp->b_data, next->b_data, size);
mark_buffer_dirty(tmp, 0);
}
set_bit(BH_Dirty, &next->b_state);
arr[nr++] = next;
} while (prev = next, (next = next->b_this_page) != NULL);
+ if (need_dcache_flush)
+ flush_dcache_page(page_address(page));
prev->b_this_page = bh;
if (nr) {
#include <asm/uaccess.h>
#include <asm/system.h>
+#include <asm/pgtable.h>
/*
* Fill in the supplied page for mmap
struct dentry * dentry = file->f_dentry;
struct inode * inode = dentry->d_inode;
if (MSDOS_SB(inode->i_sb)->cvf_format &&
- MSDOS_SB(inode->i_sb)->cvf_format->cvf_readpage)
- return MSDOS_SB(inode->i_sb)->cvf_format->cvf_readpage(inode,page);
+ MSDOS_SB(inode->i_sb)->cvf_format->cvf_readpage) {
+ int ret = MSDOS_SB(inode->i_sb)->cvf_format->cvf_readpage(inode,page);
+
+ flush_dcache_page(page_address(page));
+ return ret;
+ }
printk("fat_readpage called with no handler (shouldn't happen)\n");
return -1;
#include <asm/segment.h>
#include <asm/system.h>
+#include <asm/pgtable.h>
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
} while (count);
memset(buffer, 0, count);
+ flush_dcache_page(page_address(page));
set_bit(PG_uptodate, &page->flags);
result = 0;
memset((char *) address + result, 0, PAGE_SIZE - result);
}
nfs_refresh_inode(req->ra_inode, &req->ra_fattr);
+ flush_dcache_page(address);
set_bit(PG_uptodate, &page->flags);
succ++;
} else {
#include <linux/init.h>
#include <asm/uaccess.h>
+#include <asm/pgtable.h>
static int inline min(int a, int b)
{
if (readlen < PAGE_SIZE) {
memset((void *)(buf+readlen),0,PAGE_SIZE-readlen);
}
+ flush_dcache_page(page_address(page));
set_bit(PG_uptodate, &page->flags);
result = 0;
}
#include <asm/uaccess.h>
#include <asm/system.h>
+#include <asm/pgtable.h>
#include <linux/smbno.h>
#include <linux/smb_fs.h>
} while (count);
memset(buffer, 0, count);
+ flush_dcache_page(page_address(page));
set_bit(PG_uptodate, &page->flags);
result = 0;
#include <linux/net.h>
#include <linux/mm.h>
#include <linux/netdevice.h>
+#include <linux/smp_lock.h>
#include <net/scm.h>
#include <net/ip.h>
return err;
}
+struct data_callback {
+ struct tq_struct cb;
+ struct sock *sk;
+};
/*
* N.B. What happens if we're in here when the socket closes??
*/
static void
-smb_data_callback(struct sock *sk, int len)
+found_data(struct sock *sk)
{
- struct socket *socket = sk->socket;
+ /*
+ * FIXME: copied from sock_def_readable, it should be a call to
+ * server->data_ready();
+ */
+ if(!sk->dead) {
+ wake_up_interruptible(sk->sleep);
+ sock_wake_async(sk->socket,1);
+ }
+}
+
+static void
+smb_data_callback(void* ptr)
+{
+ struct data_callback* job=ptr;
+ struct socket *socket = job->sk->socket;
unsigned char peek_buf[4];
int result;
mm_segment_t fs;
fs = get_fs();
set_fs(get_ds());
+ lock_kernel();
while (1)
{
result = -EIO;
- if (sk->dead)
+ if (job->sk->dead)
{
#ifdef SMBFS_PARANOIA
printk("smb_data_callback: sock dead!\n");
if (result == -EAGAIN)
break;
}
+ unlock_kernel();
set_fs(fs);
if (result != -EAGAIN)
- {
- wake_up_interruptible(sk->sleep);
+ found_data(job->sk);
+ kfree(ptr);
+}
+
+static void
+smb_data_ready(struct sock *sk, int len)
+{
+ struct data_callback* job;
+ job = kmalloc(sizeof(struct data_callback),GFP_ATOMIC);
+ if(job == 0) {
+ printk("smb_data_ready(): lost SESSION KEEPALIVE due to OOM.\n");
+ found_data(sk);
+ return;
}
+ job->cb.next = NULL;
+ job->cb.sync = 0;
+ job->cb.routine = smb_data_callback;
+ job->cb.data = job;
+ job->sk = sk;
+ queue_task(&job->cb, &tq_scheduler);
}
int
/*
* Install the callback atomically to avoid races ...
*/
- data_ready = xchg(&sk->data_ready, smb_data_callback);
- if (data_ready != smb_data_callback)
+ data_ready = xchg(&sk->data_ready, smb_data_ready);
+ if (data_ready != smb_data_ready)
{
server->data_ready = data_ready;
error = 0;
*/
data_ready = xchg(&sk->data_ready, server->data_ready);
server->data_ready = NULL;
- if (data_ready != smb_data_callback)
+ if (data_ready != smb_data_ready)
{
printk("smb_dont_catch_keepalive: "
- "sk->data_callback != smb_data_callback\n");
+ "sk->data_ready != smb_data_ready\n");
}
error = 0;
out:
printk("smb_close_socket: closing socket %p\n", server_sock(server));
#endif
#ifdef SMBFS_PARANOIA
-if (server_sock(server)->sk->data_ready == smb_data_callback)
+if (server_sock(server)->sk->data_ready == smb_data_ready)
printk("smb_close_socket: still catching keepalives!\n");
#endif
server->sock_file = NULL;
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
+#define flush_dcache_page(page) do { } while (0)
/*
* Use a few helper functions to hide the ugly broken ASN
extern int do_check_pgt_cache(int, int);
+#define flush_dcache_page(page) do { } while (0)
+
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define PageSkip(page) (0)
#define kern_addr_valid(addr) (1)
* Delay routines calling functions in arch/i386/lib/delay.c
*/
-extern void __udelay(unsigned long usecs);
extern void __const_udelay(unsigned long usecs);
+extern void __udelay(unsigned long usecs);
extern void __delay(unsigned long loops);
+
#define udelay(n) (__builtin_constant_p(n) ? \
__const_udelay((n) * 0x10c6ul) : \
__udelay(n))
-
+
#endif /* defined(_I386_DELAY_H) */
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
+#define flush_dcache_page(page) do { } while (0)
/*
* TLB flushing:
}
}
+#define flush_dcache_page(page) do { } while (0)
/*
* flush all user-space atc entries.
extern void (*flush_cache_sigtramp)(unsigned long addr);
extern void (*flush_page_to_ram)(unsigned long page);
#define flush_icache_range(start, end) flush_cache_all()
+#define flush_dcache_page(page) do { } while (0)
/* TLB flushing:
*
ppc_md.kbd_init_hw();
}
-#define kbd_sysrq_xlate (ppc_md.ppc_kbd_sysrq_xlate)
+#define kbd_sysrq_xlate (ppc_md.kbd_sysrq_xlate)
-extern unsigned long SYSRQ_KEY;
+#define SYSRQ_KEY 0x54
/* resource allocation */
#define kbd_request_region()
extern void flush_icache_range(unsigned long, unsigned long);
extern void flush_page_to_ram(unsigned long);
+#define flush_dcache_page(page) do { } while (0)
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(struct task_struct *tsk, unsigned long address);
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
+#define flush_dcache_page(page) do { } while (0)
/*
* TLB flushing:
#define flush_cache_range(mm,start,end) BTFIXUP_CALL(flush_cache_range)(mm,start,end)
#define flush_cache_page(vma,addr) BTFIXUP_CALL(flush_cache_page)(vma,addr)
#define flush_icache_range(start, end) do { } while (0)
+#define flush_dcache_page(page) do { } while (0)
BTFIXUPDEF_CALL(void, flush_tlb_all, void)
BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
doing_pdma = 0;
if (pdma_base) {
mmu_unlockarea(pdma_base, pdma_areasize);
+ __flush_dcache_range((unsigned long)pdma_base,
+ (unsigned long)pdma_base + pdma_areasize);
pdma_base = 0;
}
}
/* These operations are unnecessary on the SpitFire since D-CACHE is write-through. */
#define flush_icache_range(start, end) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
+extern void flush_dcache_page(unsigned long page);
extern void __flush_dcache_range(unsigned long start, unsigned long end);
*/
/*
- * ==FILEVERSION 990325==
+ * ==FILEVERSION 20000223==
*
* NOTE TO MAINTAINERS:
* If you modify this file at all, please set the above date.
/* Information specific to using ppp on async serial lines. */
struct tty_struct *tty; /* ptr to TTY structure */
struct tty_struct *backup_tty; /* TTY to use if tty gets closed */
+ unsigned long state; /* state flags, use atomic ops */
__u8 escape; /* 0x20 if prev char was PPP_ESC */
__u8 toss; /* toss this frame */
- volatile __u8 tty_pushing; /* internal state flag */
- volatile __u8 woke_up; /* internal state flag */
__u32 xmit_async_map[8]; /* 1 bit means that given control
character is quoted on output*/
__u32 recv_async_map; /* 1 bit means that given control
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Aug 21 00:02:07 1997
- * Modified at: Sat Dec 25 21:09:47 1999
+ * Modified at: Sat Feb 26 09:52:55 2000
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
+ * Copyright (c) 1997-2000 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
switch (event) {
case IAP_RECV_F_LST:
- iriap_send_ack(self);
- /*LM_Idle_request(idle); */
+ /* iriap_send_ack(self); */
+ /* LM_Idle_request(idle); */
iriap_next_call_state(self, S_WAIT_FOR_CALL);
break;
if(fa!=NULL)
{
fa->fa_fd=fd;
+ synchronize_irq();
kfree_s(fna,sizeof(struct fasync_struct));
release_sock(sock->sk);
return 0;
if (fa!=NULL)
{
*prev=fa->fa_next;
+ synchronize_irq();
kfree_s(fa,sizeof(struct fasync_struct));
}
}