S: Germany
N: Michael Hipp
-E: mhipp@student.uni-tuebingen.de
+E: hippm@informatik.uni-tuebingen.de
D: drivers for the racal ni5210 & ni6510 Ethernet-boards
S: Talstr. 1
S: D - 72072 Tuebingen
MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
+vmlinux: arch/i386/vmlinux.lds
+
+arch/i386/vmlinux.lds: arch/i386/vmlinux.lds.S FORCE
+ gcc -E -C -P -I$(HPATH) -imacros $(HPATH)/asm-i386/page_offset.h -Ui386 arch/i386/vmlinux.lds.S >arch/i386/vmlinux.lds
+
+FORCE: ;
+
zImage: vmlinux
@$(MAKEBOOT) zImage
mov ds,ax
mov ax,#INITSEG
mov es,ax
- mov cx,#256
+ mov cx,#128
sub si,si
sub di,di
cld
rep
- movsw
+ movsd
jmpi go,INITSEG
! ax and es already contain INITSEG
define_bool CONFIG_X86_GOOD_APIC y
fi
+choice 'Maximum Physical Memory' \
+ "1GB CONFIG_1GB \
+ 2GB CONFIG_2GB" 1GB
+
bool 'Math emulation' CONFIG_MATH_EMULATION
bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
bool 'Symmetric multi-processing support' CONFIG_SMP
CONFIG_X86_POPAD_OK=y
CONFIG_X86_TSC=y
CONFIG_X86_GOOD_APIC=y
+CONFIG_1GB=y
+# CONFIG_2GB is not set
# CONFIG_MATH_EMULATION is not set
# CONFIG_MTRR is not set
CONFIG_SMP=y
#
# CONFIG_HAMRADIO is not set
+#
+# IrDA subsystem support
+#
+# CONFIG_IRDA is not set
+
#
# ISDN subsystem
#
# CONFIG_USB_AUDIO is not set
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_SCSI is not set
#
# Filesystems
static ssize_t proc_mca_read(struct file*, char*, size_t, loff_t *);
static struct file_operations proc_mca_operations = {
- NULL, /* array_lseek */
- proc_mca_read, /* array_read */
- NULL, /* array_write */
- NULL, /* array_readdir */
- NULL, /* array_poll */
- NULL, /* array_ioctl */
+ NULL, /* llseek */
+ proc_mca_read, /* read */
+ NULL, /* write */
+ NULL, /* readdir */
+ NULL, /* poll */
+ NULL, /* ioctl */
NULL, /* mmap */
- NULL, /* no special open code */
+ NULL, /* open */
NULL, /* flush */
- NULL, /* no special release code */
- NULL /* can't fsync */
+ NULL, /* release */
+ NULL, /* fsync */
+ NULL, /* fascync */
+ NULL, /* check_media_change */
+ NULL, /* revalidate */
+ NULL /* lock */
};
static struct inode_operations proc_mca_inode_operations = {
- &proc_mca_operations, /* default base directory file-ops */
+ &proc_mca_operations, /* default file-ops */
NULL, /* create */
NULL, /* lookup */
NULL, /* link */
NULL, /* writepage */
NULL, /* bmap */
NULL, /* truncate */
- NULL /* permission */
+ NULL, /* permission */
+ NULL, /* smap */
+ NULL, /* updatepage */
+ NULL /* revalidate */
};
#endif
if(!MCA_bus)
return;
printk("Micro Channel bus detected.\n");
- save_flags(flags);
- cli();
/* Allocate MCA_info structure (at address divisible by 8) */
- mca_info = kmalloc(sizeof(struct MCA_info), GFP_KERNEL);
+ mca_info = (struct MCA_info *)kmalloc(sizeof(struct MCA_info), GFP_KERNEL);
if(mca_info == NULL) {
printk("Failed to allocate memory for mca_info!");
- restore_flags(flags);
return;
}
+ memset(mca_info, 0, sizeof(struct MCA_info));
+
+ save_flags(flags);
+ cli();
/* Make sure adapter setup is off */
mca_info->slot[i].dev = 0;
if(!mca_isadapter(i)) continue;
- node = kmalloc(sizeof(struct proc_dir_entry), GFP_KERNEL);
+
+ node = (struct proc_dir_entry *)kmalloc(sizeof(struct proc_dir_entry), GFP_KERNEL);
if(node == NULL) {
printk("Failed to allocate memory for MCA proc-entries!");
return;
}
+ memset(node, 0, sizeof(struct proc_dir_entry));
+
if(i < MCA_MAX_SLOT_NR) {
node->low_ino = PROC_MCA_SLOT + i;
node->namelen = sprintf(mca_info->slot[i].procname,
type = inode->i_ino;
pid = type >> 16;
type &= 0x0000ffff;
- start = 0;
+ start = NULL;
dp = (struct proc_dir_entry *) inode->u.generic_ip;
length = mca_fill((char *) page, pid, type,
&start, ppos, count);
free_page(page);
return length;
}
- if(start != 0) {
+ if(start != NULL) {
/* We have had block-adjusting processing! */
copy_to_user(buf, start, length);
*
* Force Centaur C6 processors to report MTRR capability.
* Bart Hartgers <bart@etpmod.phys.tue.nl>, May 199.
+ *
+ * Intel Mobile Pentium II detection fix. Sean Gilley, June 1999.
*/
/*
NULL, NULL, NULL, NULL }},
{ X86_VENDOR_INTEL, 6,
{ "Pentium Pro A-step", "Pentium Pro", NULL, "Pentium II (Klamath)",
- NULL, "Pentium II (Deschutes)", "Celeron (Mendocino)", NULL,
+ NULL, "Pentium II (Deschutes)", "Mobile Pentium II", NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
{ X86_VENDOR_AMD, 4,
{ NULL, NULL, NULL, "486 DX/2", NULL, NULL, NULL, "486 DX/2-WB",
if (c->x86_model <= 16)
p = cpu_models[i].model_names[c->x86_model];
- /* Names for the Pentium II processors */
+ /* Names for the Pentium II Celeron processors
+ detectable only by also checking the cache size */
if ((cpu_models[i].vendor == X86_VENDOR_INTEL)
- && (cpu_models[i].x86 == 6)
- && (c->x86_model == 5)
- && (c->x86_cache_size == 0)) {
- p = "Celeron (Covington)";
- }
+ && (cpu_models[i].x86 == 6)){
+ if(c->x86_model == 6 && c->x86_cache_size == 128) {
+ p = "Celeron (Mendocino)";
+ }
+ else {
+ if (c->x86_model == 5 && c->x86_cache_size == 0) {
+ p = "Celeron (Covington)";
+ }
+ }
+ }
}
}
int datapages = 0;
int initpages = 0;
unsigned long tmp;
+ unsigned long endbase;
end_mem &= PAGE_MASK;
high_memory = (void *) end_mem;
* IBM messed up *AGAIN* in their thinkpad: 0xA0000 -> 0x9F000.
* They seem to have done something stupid with the floppy
* controller as well..
+ * The amount of available base memory is in WORD 40:13.
*/
- while (start_low_mem < 0x9f000+PAGE_OFFSET) {
+ endbase = PAGE_OFFSET + ((*(unsigned short *)__va(0x413) * 1024) & PAGE_MASK);
+ while (start_low_mem < endbase) {
clear_bit(PG_reserved, &mem_map[MAP_NR(start_low_mem)].flags);
start_low_mem += PAGE_SIZE;
}
/* ld script to make i386 Linux kernel
- * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
*/
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_ARCH(i386)
ENTRY(_start)
SECTIONS
{
- . = 0xC0000000 + 0x100000;
+ . = 0xC0000000 + 0x100000;
_text = .; /* Text and read-only data */
.text : {
*(.text)
--- /dev/null
+/* ld script to make i386 Linux kernel
+ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
+ */
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+SECTIONS
+{
+ . = PAGE_OFFSET_RAW + 0x100000;
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.text)
+ *(.fixup)
+ *(.gnu.warning)
+ } = 0x9090
+ .text.lock : { *(.text.lock) } /* out-of-line lock text */
+ .rodata : { *(.rodata) }
+ .kstrtab : { *(.kstrtab) }
+
+ . = ALIGN(16); /* Exception table */
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
+
+ __start___ksymtab = .; /* Kernel symbol table */
+ __ksymtab : { *(__ksymtab) }
+ __stop___ksymtab = .;
+
+ _etext = .; /* End of text section */
+
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ }
+
+ _edata = .; /* End of data section */
+
+ . = ALIGN(8192); /* init_task */
+ .data.init_task : { *(.data.init_task) }
+
+ . = ALIGN(4096); /* Init code and data */
+ __init_begin = .;
+ .text.init : { *(.text.init) }
+ .data.init : { *(.data.init) }
+ . = ALIGN(4096);
+ __init_end = .;
+
+ . = ALIGN(32);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+ . = ALIGN(4096);
+ .data.page_aligned : { *(.data.idt) }
+
+
+ __bss_start = .; /* BSS */
+ .bss : {
+ *(.bss)
+ }
+ _end = . ;
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+}
endmenu
source drivers/char/Config.in
+source drivers/usb/Config.in
source fs/Config.in
mainmenu_option next_comment
-/* $Id: ioctl32.c,v 1.62 1999/05/01 09:17:44 davem Exp $
+/* $Id: ioctl32.c,v 1.62.2.1 1999/06/09 04:53:03 davem Exp $
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
#include <linux/fb.h>
#include <linux/ext2_fs.h>
#include <linux/videodev.h>
+#include <linux/netdevice.h>
#include <scsi/scsi.h>
/* Ugly hack. */
__kernel_caddr_t32 ifcbuf;
};
+static int dev_ifname32(unsigned int fd, unsigned long arg)
+{
+ struct device *dev;
+ struct ifreq32 ifr32;
+ int err;
+
+ if (copy_from_user(&ifr32, (struct ifreq32 *)arg, sizeof(struct ifreq32)))
+ return -EFAULT;
+
+ dev = dev_get_by_index(ifr32.ifr_ifindex);
+ if (!dev)
+ return -ENODEV;
+
+ err = copy_to_user((struct ifreq32 *)arg, &ifr32, sizeof(struct ifreq32));
+ return (err ? -EFAULT : 0);
+}
+
static inline int dev_ifconf(unsigned int fd, unsigned long arg)
{
struct ifconf32 ifc32;
goto out;
}
switch (cmd) {
+ case SIOCGIFNAME:
+ error = dev_ifname32(fd, arg);
+ goto out;
+
case SIOCGIFCONF:
error = dev_ifconf(fd, arg);
goto out;
+
/*
bttv - Bt848 frame grabber driver
{ 3, 4, 0, 2, 0x01e000, { 2, 0, 1, 1}, {0x01c000, 0, 0x018000, 0x014000, 0x002000, 0 }},
/* "Leadtek WinView 601", */
{ 3, 1, 0, 2, 0x8300f8, { 2, 3, 1, 1,0}, {0x4fa007,0xcfa007,0xcfa007,0xcfa007,0xcfa007,0xcfa007}},
+ /* AVEC Intercapture */
+ { 3, 1, 9, 2, 0, { 2, 3, 1, 1}, { 0, 0, 0, 0, 0}},
};
#define TVCARDS (sizeof(tvcards)/sizeof(tvcard))
I2CWrite(bus, I2C_TEA6300, TEA6300_SW, 0x01, 1); /* mute off input A */
}
+static void init_tea6320(struct i2c_bus *bus)
+{
+ I2CWrite(bus, I2C_TEA6300, TEA6320_V, 0x28, 1); /* master volume */
+ I2CWrite(bus, I2C_TEA6300, TEA6320_FFL, 0x28, 1); /* volume left 0dB */
+ I2CWrite(bus, I2C_TEA6300, TEA6320_FFR, 0x28, 1); /* volume right 0dB */
+ I2CWrite(bus, I2C_TEA6300, TEA6320_FRL, 0x28, 1); /* volume rear left 0dB */
+ I2CWrite(bus, I2C_TEA6300, TEA6320_FRR, 0x28, 1); /* volume rear right 0dB */
+ I2CWrite(bus, I2C_TEA6300, TEA6320_BA, 0x11, 1); /* bass 0dB */
+ I2CWrite(bus, I2C_TEA6300, TEA6320_TR, 0x11, 1); /* treble 0dB */
+ I2CWrite(bus, I2C_TEA6300, TEA6320_S, TEA6320_S_GMU, 1); /* mute off input A */
+}
+
static void init_tda8425(struct i2c_bus *bus)
{
I2CWrite(bus, I2C_TDA8425, TDA8425_VL, 0xFC, 1); /* volume left 0dB */
if (I2CRead(&(btv->i2c), I2C_TEA6300) >=0)
{
+ if(btv->type==BTTV_AVEC_INTERCAP)
+ {
+ printk(KERN_INFO "bttv%d: fader chip: TEA6320\n",btv->nr);
+ btv->audio_chip = TEA6320;
+ init_tea6320(&(btv->i2c));
+ } else {
printk(KERN_INFO "bttv%d: fader chip: TEA6300\n",btv->nr);
btv->audio_chip = TEA6300;
init_tea6300(&(btv->i2c));
+ }
} else
printk(KERN_INFO "bttv%d: NO fader chip: TEA6300\n",btv->nr);
case BTTV_WINVIEW_601:
strcpy(btv->video_dev.name,"BT848(Leadtek WinView 601)");
break;
+ case BTTV_AVEC_INTERCAP:
+ strcpy(btv->video_dev.name,"(AVEC Intercapture)");
+ break;
}
printk("%s\n",btv->video_dev.name);
audio(btv, AUDIO_MUTE);
#define BTTV_ZOLTRIX 0x0f
#define BTTV_PIXVIEWPLAYTV 0x10
#define BTTV_WINVIEW_601 0x11
+#define BTTV_AVEC_INTERCAP 0x12
#define AUDIO_TUNER 0x00
#define AUDIO_RADIO 0x01
#define TDA8425 0x02
#define TDA9840 0x03
#define TEA6300 0x04
+#define TEA6320 0x05
#define I2C_TSA5522 0xc2
#define I2C_TDA9840 0x84
#define I2C_HAUPEE 0xa0
#define I2C_STBEE 0xae
#define I2C_VHX 0xc0
-#define I2C_TEA6300 0x80
+#define I2C_TEA6300 0x80 /* same as TEA6320 */
#define TDA9840_SW 0x00
#define TDA9840_LVADJ 0x02
#define TEA6300_FA 0x04 /* fader control */
#define TEA6300_SW 0x05 /* mute and source switch */
+
+#define TEA6320_V 0x00
+#define TEA6320_FFR 0x01 /* volume front right */
+#define TEA6320_FFL 0x02 /* volume front left */
+#define TEA6320_FRR 0x03 /* volume rear right */
+#define TEA6320_FRL 0x04 /* volume rear left */
+#define TEA6320_BA 0x05 /* bass */
+#define TEA6320_TR 0x06 /* treble */
+#define TEA6320_S 0x07 /* switch register */
+ /* values for those registers: */
+#define TEA6320_S_SA 0x01 /* stereo A input */
+#define TEA6320_S_SB 0x02 /* stereo B */
+#define TEA6320_S_SC 0x04 /* stereo C */
+#define TEA6320_S_GMU 0x80 /* general mute */
+
+
#define PT2254_L_CHANEL 0x10
#define PT2254_R_CHANEL 0x08
#define PT2254_DBS_IN_2 0x400
/* cadet.c - A video4linux driver for the ADS Cadet AM/FM Radio Card
*
* by Fred Gleason <fredg@wava.com>
- * Version 0.3.1
+ * Version 0.3.2
*
* (Loosely) based on code for the Aztech radio card by
*
return -EINVAL;
request_region(io,2,"cadet");
- printk(KERN_INFO "ADS Cadet Radio Card at %x\n",io);
+ printk(KERN_INFO "ADS Cadet Radio Card at 0x%x\n",io);
return 0;
}
for(i=0;i<8;i++) {
io=iovals[i];
- if(check_region(io,2)) {
- return -1;
- }
- cadet_setfreq(1410);
- if(cadet_getfreq()==1410) {
- return io;
+ if(check_region(io,2)>=0) {
+ cadet_setfreq(1410);
+ if(cadet_getfreq()==1410) {
+ return io;
+ }
}
}
return -1;
// 16*170.00,16*450.00,0xa0,0x90,0x30,0x8e,0xc2,623},
16*170.00,16*450.00,0x02,0x04,0x01,0x8e,0xc2,623},
{"Temic 4036 FY5 NTSC", TEMIC, NTSC,
- 16*157.25,16*463.25,0xa0,0x90,0x30,0x8e,0xc2,732},
+ 16*157.25,16*463.25,0xa0,0x90,0x30,0x8e,0xc2,732},
+ {"Alps HSBH1", TEMIC, NTSC,
+ 16*137.25,16*385.25,0x01,0x02,0x08,0x8e,0xc2,732},
};
/* ---------------------------------------------------------------------- */
#define TUNER_TEMIC_NTSC 6
#define TUNER_TEMIC_PAL_I 7
#define TUNER_TEMIC_4036FY5_NTSC 8
+#define TUNER_ALPS_TSBH1_NTSC 9
#define NOTUNER 0
#define PAL 1
outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
}
}
+ vp->stats.tx_bytes+=skb->len;
return 0;
}
netif_rx(skb);
dev->last_rx = jiffies;
vp->stats.rx_packets++;
+ vp->stats.rx_bytes+=skb->len;
/* Wait a limited time to go to next packet. */
for (i = 200; i >= 0; i--)
if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
short pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
+ vp->stats.rx_bytes+=pkt_len;
if (vortex_debug > 4)
printk("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
-/* $Id: cosa.c,v 1.21 1999/02/06 19:49:18 kas Exp $ */
+/* $Id: cosa.c,v 1.24 1999/05/28 17:28:34 kas Exp $ */
/*
* Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
* The Comtrol Hostess SV11 driver by Alan Cox
* The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox
*/
+/*
+ * 5/25/1999 : Marcelo Tosatti <marcelo@conectiva.com.br>
+ * fixed a deadlock in cosa_sppp_open
+ */
\f
/* ---------- Headers, macros, data structures ---------- */
debug_status_out(cosa, 0);
#endif
}
+ cosa_putdata8(cosa, 0);
cosa_putdata8(cosa, status);
#ifdef DEBUG_IO
+ debug_data_cmd(cosa, 0);
debug_data_cmd(cosa, status);
#endif
}
printk(KERN_WARNING
"%s: No channel wants data in TX IRQ\n",
cosa->name);
+ put_driver_status_nolock(cosa);
clear_bit(TXBIT, &cosa->rxtx);
spin_unlock_irqrestore(&cosa->lock, flags);
return;
if (tickssofar < 5)
return 1;
if (net_debug > 0) printk("%s: transmit timed out, %s?\n", dev->name,
- tx_done(dev) ? "IRQ conflict" : "network cable problem");
+ tx_done(dev) ? "IRQ conflict ?" : "network cable problem");
/* Try to restart the adaptor. */
dev->tbusy=0;
dev->trans_start = jiffies;
dep_tristate 'Winbond W83977AF (IR)' CONFIG_WINBOND_FIR $CONFIG_IRDA
dep_tristate 'Sharp UIRCC' CONFIG_SHARP_FIR $CONFIG_IRDA
dep_tristate 'Toshiba Type-O IR Port' CONFIG_TOSHIBA_FIR $CONFIG_IRDA
+dep_tristate 'SMC IrCC' CONFIG_SMC_IRCC_FIR $CONFIG_IRDA
comment 'Dongle support'
bool 'Serial dongle support' CONFIG_DONGLE
endif
endif
+ifeq ($(CONFIG_IRPORT_SIR),y)
+L_OBJS += irport.o
+else
+ ifeq ($(CONFIG_IRPORT_SIR),m)
+ M_OBJS += irport.o
+ endif
+endif
+
+ifeq ($(CONFIG_IRPORT_SIR),y)
+L_OBJS += irport.o
+else
+ ifeq ($(CONFIG_IRPORT_SIR),m)
+ M_OBJS += irport.o
+ endif
+endif
+
ifeq ($(CONFIG_NSC_FIR),y)
L_OBJS += pc87108.o
else
endif
endif
+ifeq ($(CONFIG_TOSHIBA_FIR),y)
+L_OBJS += toshoboe.o
+else
+ ifeq ($(CONFIG_TOSHIBA_FIR),m)
+ M_OBJS += toshoboe.o
+ endif
+endif
+
+ifeq ($(CONFIG_TOSHIBA_FIR),y)
+L_OBJS += toshoboe.o
+else
+ ifeq ($(CONFIG_TOSHIBA_FIR),m)
+ M_OBJS += toshoboe.o
+ endif
+endif
+
+ifeq ($(CONFIG_SMC_IRCC_FIR),y)
+L_OBJS += irport.o smc-ircc.o
+else
+ ifeq ($(CONFIG_SMC_IRCC_FIR),m)
+ M_OBJS += irport.o smc-ircc.o
+ endif
+endif
+
ifeq ($(CONFIG_ESI_DONGLE),y)
L_OBJS += esi.o
else
endif
endif
+ifeq ($(CONFIG_LITELINK_DONGLE),y)
+L_OBJS += litelink.o
+else
+ ifeq ($(CONFIG_LITELINK_DONGLE),m)
+ M_OBJS += litelink.o
+ endif
+endif
+
+ifeq ($(CONFIG_LITELINK_DONGLE),y)
+L_OBJS += litelink.o
+else
+ ifeq ($(CONFIG_LITELINK_DONGLE),m)
+ M_OBJS += litelink.o
+ endif
+endif
+
include $(TOPDIR)/Rules.make
clean:
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Wed Oct 21 20:02:35 1998
- * Modified at: Mon May 10 15:12:54 1999
+ * Modified at: Sun May 16 14:35:11 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
#include <linux/sched.h>
#include <linux/init.h>
-#include <asm/ioctls.h>
-#include <asm/segment.h>
-#include <asm/uaccess.h>
-
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
#include <net/irda/irda_device.h>
#include <net/irda/dongle.h>
-static void actisys_reset(struct irda_device *dev, int unused);
+static void actisys_reset(struct irda_device *dev);
static void actisys_open(struct irda_device *idev, int type);
static void actisys_close(struct irda_device *dev);
static void actisys_change_speed( struct irda_device *dev, int baudrate);
-static void actisys_reset(struct irda_device *dev, int unused);
static void actisys_init_qos(struct irda_device *idev, struct qos_info *qos);
/* These are the baudrates supported */
* 1. Clear DTR for a few ms.
*
*/
-static void actisys_reset(struct irda_device *idev, int unused)
+static void actisys_reset(struct irda_device *idev)
{
ASSERT(idev != NULL, return;);
ASSERT(idev->magic == IRDA_DEVICE_MAGIC, return;);
* Status: Experimental.
* Author: Thomas Davis, <ratbert@radiks.net>
* Created at: Sat Feb 21 18:54:38 1998
- * Modified at: Mon May 10 15:13:12 1999
+ * Modified at: Sun May 16 14:35:21 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: esi.c
*
#include <linux/sched.h>
#include <linux/init.h>
-#include <asm/ioctls.h>
-#include <asm/segment.h>
-#include <asm/uaccess.h>
-
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
#include <net/irda/irda_device.h>
static void esi_open(struct irda_device *idev, int type);
static void esi_close(struct irda_device *driver);
static void esi_change_speed(struct irda_device *idev, int baud);
-static void esi_reset(struct irda_device *idev, int unused);
+static void esi_reset(struct irda_device *idev);
static void esi_qos_init(struct irda_device *idev, struct qos_info *qos);
static struct dongle dongle = {
irda_device_set_dtr_rts(idev, dtr, rts);
}
-static void esi_reset( struct irda_device *idev, int unused)
+static void esi_reset( struct irda_device *idev)
{
/* Empty */
}
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sat Feb 6 21:02:33 1999
- * Modified at: Mon May 10 16:01:33 1999
+ * Modified at: Tue Jun 1 08:47:41 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1999 Dag Brattli, All Rights Reserved.
#include <linux/sched.h>
#include <linux/init.h>
-#include <asm/ioctls.h>
-#include <asm/segment.h>
-#include <asm/uaccess.h>
-
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
#include <net/irda/irda_device.h>
#include <net/irda/irtty.h>
#include <net/irda/dongle.h>
-static void girbil_reset(struct irda_device *dev, int unused);
+static void girbil_reset(struct irda_device *dev);
static void girbil_open(struct irda_device *dev, int type);
static void girbil_close(struct irda_device *dev);
static void girbil_change_speed(struct irda_device *dev, int baud);
* 0. set RTS, and wait at least 5 ms
* 1. clear RTS
*/
-void girbil_reset(struct irda_device *idev, int unused)
+void girbil_reset(struct irda_device *idev)
{
__u8 control = GIRBIL_TXEN | GIRBIL_RXEN;
/* Sleep at least 5 ms */
current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(2);
+ schedule_timeout(MSECS_TO_JIFFIES(20));
/* Set DTR and clear RTS to enter command mode */
irda_device_set_dtr_rts(idev, FALSE, TRUE);
current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(2);
+ schedule_timeout(MSECS_TO_JIFFIES(20));
/* Write control byte */
irda_device_raw_write(idev, &control, 1);
current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(2);
+ schedule_timeout(MSECS_TO_JIFFIES(20));
/* Go back to normal mode */
irda_device_set_dtr_rts(idev, TRUE, TRUE);
+
+ /* Make sure the IrDA chip also goes to defalt speed */
+ if (idev->change_speed)
+ idev->change_speed(idev, 9600);
}
/*
static void girbil_init_qos(struct irda_device *idev, struct qos_info *qos)
{
qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
- qos->min_turn_time.bits &= 0xfe; /* All except 0 ms */
+ qos->min_turn_time.bits &= 0x03;
}
#ifdef MODULE
/*********************************************************************
- *
+ *
* Filename: irport.c
- * Version: 0.9
- * Description: Serial driver for IrDA.
+ * Version: 1.0
+ * Description: Half duplex serial port SIR driver for IrDA.
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 3 13:49:59 1997
- * Modified at: Sat May 23 23:15:20 1998
+ * Modified at: Tue Jun 1 10:02:42 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: serial.c by Linus Torvalds
*
- * Copyright (c) 1997,1998 Dag Brattli <dagb@cs.uit.no>
- * All Rights Reserved.
+ * Copyright (c) 1997, 1998, 1999 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- * NOTICE:
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
*
* This driver is ment to be a small half duplex serial driver to be
- * used for IR-chipsets that has a UART (16550) compatibility mode. If
- * your chipset is is UART only, you should probably use IrTTY instead
- * since the Linux serial driver is probably more robust and optimized.
- *
- * The functions in this file may be used by FIR drivers, but this
- * driver knows nothing about FIR drivers so don't ever insert such
- * code into this file. Instead you should code your FIR driver in a
- * separate file, and then call the functions in this file if
- * necessary. This is becase it is difficult to use the Linux serial
- * driver with a FIR driver becase they must share interrupts etc. Most
- * FIR chipsets can function in advanced SIR mode, and you should
- * probably use that mode instead of the UART compatibility mode (and
- * then just forget about this file)
+ * used for IR-chipsets that has a UART (16550) compatibility mode.
+ * Eventually it will replace irtty, because of irtty has some
+ * problems that is hard to get around when we don't have control
+ * over the serial driver. This driver may also be used by FIR
+ * drivers to handle SIR mode for them.
*
********************************************************************/
#include <linux/ioport.h>
#include <linux/malloc.h>
#include <linux/string.h>
-#include <asm/system.h>
-#include <asm/bitops.h>
-#include <asm/io.h>
+#include <linux/skbuff.h>
+#include <linux/serial_reg.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/serial_reg.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/spinlock.h>
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
static int irport_net_init(struct device *dev);
static int irport_net_open(struct device *dev);
static int irport_net_close(struct device *dev);
-static void irport_wait_until_sent(struct irda_device *idev);
static int irport_is_receiving(struct irda_device *idev);
static void irport_set_dtr_rts(struct irda_device *idev, int dtr, int rts);
static int irport_raw_write(struct irda_device *idev, __u8 *buf, int len);
idev->io.io_ext = IO_EXTENT;
idev->io.fifo_size = 16;
+ idev->netdev.base_addr = iobase;
+ idev->netdev.irq = irq;
+
/* Lock the port that we need */
ret = check_region(idev->io.iobase2, idev->io.io_ext);
if (ret < 0) {
- DEBUG( 0, __FUNCTION__ "(), can't get iobase of 0x%03x\n",
- idev->io.iobase2);
- /* w83977af_cleanup( self->idev); */
+ DEBUG(0, __FUNCTION__ "(), can't get iobase of 0x%03x\n",
+ idev->io.iobase2);
+ /* irport_cleanup(self->idev); */
return -ENODEV;
}
request_region(idev->io.iobase2, idev->io.io_ext, idev->name);
static int irport_close(struct irda_device *idev)
{
- DEBUG(0, __FUNCTION__ "()\n");
-
ASSERT(idev != NULL, return -1;);
ASSERT(idev->magic == IRDA_DEVICE_MAGIC, return -1;);
- /* Release the PORT that this driver is using */
+ /* Release the IO-port that this driver is using */
DEBUG(0 , __FUNCTION__ "(), Releasing Region %03x\n",
idev->io.iobase2);
release_region(idev->io.iobase2, idev->io.io_ext);
return 0;
}
-void irport_start(int iobase)
+void irport_start(struct irda_device *idev, int iobase)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&idev->lock, flags);
+
+ irport_stop(idev, iobase);
+
/* Initialize UART */
outb(UART_LCR_WLEN8, iobase+UART_LCR); /* Reset DLAB */
outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
/* Turn on interrups */
- outb((UART_IER_RLSI | UART_IER_RDI), iobase+UART_IER);
+ outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, iobase+UART_IER);
+ spin_unlock_irqrestore(&idev->lock, flags);
}
-void irport_stop(int iobase)
+void irport_stop(struct irda_device *idev, int iobase)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&idev->lock, flags);
+
/* Reset UART */
outb(0, iobase+UART_MCR);
/* Turn off interrupts */
outb(0, iobase+UART_IER);
+
+ spin_unlock_irqrestore(&idev->lock, flags);
}
/*
{
DEBUG(4, __FUNCTION__ "(), iobase=%#x\n", iobase);
-
return 0;
}
/*
* Function irport_change_speed (idev, speed)
*
- * Set speed of port to specified baudrate
+ * Set speed of IrDA port to specified baudrate
*
*/
void irport_change_speed(struct irda_device *idev, int speed)
{
+ unsigned long flags;
int iobase;
int fcr; /* FIFO control reg */
int lcr; /* Line control reg */
int divisor;
- DEBUG( 0, __FUNCTION__ "(), Setting speed to: %d\n", speed);
+ DEBUG(0, __FUNCTION__ "(), Setting speed to: %d\n", speed);
ASSERT(idev != NULL, return;);
ASSERT(idev->magic == IRDA_DEVICE_MAGIC, return;);
/* Update accounting for new speed */
idev->io.baudrate = speed;
+ spin_lock_irqsave(&idev->lock, flags);
+
/* Turn off interrupts */
outb(0, iobase+UART_IER);
divisor = SPEED_MAX/speed;
- fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_14;
+ fcr = UART_FCR_ENABLE_FIFO;
+
+ /*
+ * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
+ * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
+ * about this timeout since it will always be fast enough.
+ */
+ if (idev->io.baudrate < 38400)
+ fcr |= UART_FCR_TRIGGER_1;
+ else
+ fcr |= UART_FCR_TRIGGER_14;
/* IrDA ports use 8N1 */
lcr = UART_LCR_WLEN8;
outb(lcr, iobase+UART_LCR); /* Set 8N1 */
outb(fcr, iobase+UART_FCR); /* Enable FIFO's */
- /* Turn on receive interrups */
- outb(UART_IER_RLSI|UART_IER_RDI, iobase+UART_IER);
+ /* Turn on interrups */
+ outb(UART_IER_RLSI|UART_IER_RDI|UART_IER_THRI, iobase+UART_IER);
+
+ spin_unlock_irqrestore(&self->lock, flags);
}
/*
{
int actual = 0;
int iobase;
+ int fcr;
ASSERT(idev != NULL, return;);
ASSERT(idev->magic == IRDA_DEVICE_MAGIC, return;);
+ DEBUG(4, __FUNCTION__ "()\n");
+
/* Finished with frame? */
if (idev->tx_buff.len > 0) {
/* Write data left in transmit buffer */
/* Schedule network layer, so we can get some more frames */
mark_bh(NET_BH);
- outb(UART_FCR_ENABLE_FIFO |
- UART_FCR_TRIGGER_14 |
- UART_FCR_CLEAR_RCVR, iobase+UART_FCR); /* Enable FIFO's */
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR;
+
+ if (idev->io.baudrate < 38400)
+ fcr |= UART_FCR_TRIGGER_1;
+ else
+ fcr |= UART_FCR_TRIGGER_14;
+
+ /*
+ * Reset Rx FIFO to make sure that all reflected transmit data
+ * will be discarded
+ */
+ outb(fcr, iobase+UART_FCR);
/* Turn on receive interrupts */
outb(UART_IER_RLSI|UART_IER_RDI, iobase+UART_IER);
/*
* Function irport_write (driver)
*
- *
+ * Fill Tx FIFO with transmit data
*
*/
static int irport_write(int iobase, int fifo_size, __u8 *buf, int len)
/* Tx FIFO should be empty! */
if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
- DEBUG( 0, __FUNCTION__ "(), failed, fifo not empty!\n");
+ DEBUG(0, __FUNCTION__ "(), failed, fifo not empty!\n");
return -1;
}
/* Fill FIFO with current frame */
- while (( fifo_size-- > 0) && (actual < len)) {
+ while ((fifo_size-- > 0) && (actual < len)) {
/* Transmit next byte */
- outb( buf[actual], iobase+UART_TX);
+ outb(buf[actual], iobase+UART_TX);
actual++;
}
- DEBUG(4, __FUNCTION__ "(), fifo_size %d ; %d sent of %d\n",
- fifo_size, actual, len);
-
return actual;
}
int irport_hard_xmit(struct sk_buff *skb, struct device *dev)
{
struct irda_device *idev;
+ unsigned long flags;
int actual = 0;
int iobase;
- DEBUG(5, __FUNCTION__ "(), dev=%p\n", dev);
-
ASSERT(dev != NULL, return 0;);
idev = (struct irda_device *) dev->priv;
iobase = idev->io.iobase2;
/* Lock transmit buffer */
- if (irda_lock((void *) &dev->tbusy) == FALSE)
- return -EBUSY;
+ if (irda_lock((void *) &dev->tbusy) == FALSE) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return -EBUSY;
+
+ WARNING("%s: transmit timed out\n", dev->name);
+ irport_start(idev, iobase);
+ irport_change_speed(idev, idev->io.baudrate);
+
+ dev->trans_start = jiffies;
+ }
+
+ spin_lock_irqsave(&idev->lock, flags);
/* Init tx buffer */
idev->tx_buff.data = idev->tx_buff.head;
/* Turn on transmit finished interrupt. Will fire immediately! */
outb(UART_IER_THRI, iobase+UART_IER);
+ spin_unlock_irqrestore(&idev->lock, flags);
+
dev_kfree_skb(skb);
return 0;
int iobase;
int boguscount = 0;
- if (!idev)
- return;
-
- DEBUG(4, __FUNCTION__ "()\n");
+ ASSERT(idev != NULL, return;);
iobase = idev->io.iobase2;
int boguscount = 0;
if (!idev) {
- printk(KERN_WARNING __FUNCTION__
- "() irq %d for unknown device.\n", irq);
+ WARNING(__FUNCTION__ "() irq %d for unknown device.\n", irq);
return;
}
+ spin_lock(&idev->lock);
+
idev->netdev.interrupt = 1;
iobase = idev->io.iobase2;
- iir = inb(iobase + UART_IIR) & UART_IIR_ID;
+ iir = inb(iobase+UART_IIR) & UART_IIR_ID;
while (iir) {
/* Clear interrupt */
lsr = inb(iobase+UART_LSR);
- if ((iir & UART_IIR_THRI) && (lsr & UART_LSR_THRE)) {
- /* Transmitter ready for data */
- irport_write_wakeup(idev);
- } else if ((iir & UART_IIR_RDI) && (lsr & UART_LSR_DR)) {
- /* Receive interrupt */
- irport_receive(idev);
- }
+ DEBUG(4, __FUNCTION__ "(), iir=%02x, lsr=%02x, iobase=%#x\n",
+ iir, lsr, iobase);
+
+ switch (iir) {
+ case UART_IIR_RLSI:
+ DEBUG(0, __FUNCTION__ "(), RLSI\n");
+ break;
+ case UART_IIR_RDI:
+ if (lsr & UART_LSR_DR)
+ /* Receive interrupt */
+ irport_receive(idev);
+ break;
+ case UART_IIR_THRI:
+ if (lsr & UART_LSR_THRE)
+ /* Transmitter ready for data */
+ irport_write_wakeup(idev);
+ break;
+ default:
+ DEBUG(0, __FUNCTION__ "(), unhandled IIR=%#x\n", iir);
+ break;
+ }
/* Make sure we don't stay here to long */
if (boguscount++ > 32)
iir = inb(iobase + UART_IIR) & UART_IIR_ID;
}
idev->netdev.interrupt = 0;
+
+ spin_unlock(&idev->lock);
}
static int irport_net_init(struct device *dev)
iobase = idev->io.iobase2;
if (request_irq(idev->io.irq2, irport_interrupt, 0, idev->name,
- (void *) idev)) {
+ (void *) idev))
return -EAGAIN;
- }
+
+ irport_start(idev, iobase);
+
+ MOD_INC_USE_COUNT;
/* Ready to play! */
dev->tbusy = 0;
dev->interrupt = 0;
dev->start = 1;
- MOD_INC_USE_COUNT;
-
- irport_start(iobase);
+ /* Change speed to make sure dongles follow us again */
+ if (idev->change_speed)
+ idev->change_speed(idev, 9600);
return 0;
}
iobase = idev->io.iobase2;
- irport_stop(iobase);
-
/* Stop device */
dev->tbusy = 1;
dev->start = 0;
+ irport_stop(idev, iobase);
+
free_irq(idev->io.irq2, idev);
MOD_DEC_USE_COUNT;
return 0;
}
-static void irport_wait_until_sent(struct irda_device *idev)
+/*
+ * Function irport_wait_until_sent (idev)
+ *
+ * Delay exectution until finished transmitting
+ *
+ */
+void irport_wait_until_sent(struct irda_device *idev)
{
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(60*HZ/1000);
+ int iobase;
+
+ iobase = idev->io.iobase2;
+
+ /* Wait until Tx FIFO is empty */
+ while (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
+ DEBUG(2, __FUNCTION__ "(), waiting!\n");
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(MSECS_TO_JIFFIES(60));
+ }
}
+/*
+ * Function irport_is_receiving (idev)
+ *
+ * Returns true is we are currently receiving data
+ *
+ */
static int irport_is_receiving(struct irda_device *idev)
{
return (idev->rx_buff.state != OUTSIDE_FRAME);
MODULE_PARM(io, "1-4i");
MODULE_PARM(irq, "1-4i");
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Half duplex serial driver for IrDA SIR mode");
+
/*
* Function cleanup_module (void)
*
* Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Fri May 7 12:50:33 1999
- * Modified at: Mon May 10 15:12:18 1999
+ * Modified at: Wed May 19 07:25:15 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1999 Dag Brattli, All Rights Reserved.
#include <linux/tty.h>
#include <linux/sched.h>
#include <linux/init.h>
-#include <asm/ioctls.h>
-#include <asm/uaccess.h>
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
#include <net/irda/irda_device.h>
#include <net/irda/dongle.h>
-static void litelink_reset(struct irda_device *dev, int unused);
+#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
+#define MAX_DELAY 10000 /* 1 ms */
+
static void litelink_open(struct irda_device *idev, int type);
static void litelink_close(struct irda_device *dev);
-static void litelink_change_speed( struct irda_device *dev, int baudrate);
-static void litelink_reset(struct irda_device *dev, int unused);
+static void litelink_change_speed(struct irda_device *dev, int baudrate);
+static void litelink_reset(struct irda_device *dev);
static void litelink_init_qos(struct irda_device *idev, struct qos_info *qos);
/* These are the baudrates supported */
irda_device_set_dtr_rts(idev, TRUE, FALSE);
/* Sleep a minimum of 15 us */
- udelay(15);
+ udelay(MIN_DELAY);
/* Go back to normal mode */
irda_device_set_dtr_rts(idev, TRUE, TRUE);
/* Sleep a minimum of 15 us */
- udelay(15);
+ udelay(MIN_DELAY);
/* Cycle through avaiable baudrates until we reach the correct one */
for (i=0; i<5 && baud_rates[i] != baudrate; i++) {
irda_device_set_dtr_rts(idev, FALSE, TRUE);
/* Sleep a minimum of 15 us */
- udelay(15);
+ udelay(MIN_DELAY);
/* Set DTR, Set RTS */
irda_device_set_dtr_rts(idev, TRUE, TRUE);
/* Sleep a minimum of 15 us */
- udelay(15);
+ udelay(MIN_DELAY);
}
}
* called with a process context!
*
*/
-static void litelink_reset(struct irda_device *idev, int unused)
+static void litelink_reset(struct irda_device *idev)
{
- struct irtty_cb *self;
- struct tty_struct *tty;
-
ASSERT(idev != NULL, return;);
ASSERT(idev->magic == IRDA_DEVICE_MAGIC, return;);
irda_device_set_dtr_rts(idev, TRUE, TRUE);
/* Sleep a minimum of 15 us */
- udelay(15);
+ udelay(MIN_DELAY);
/* Clear RTS to reset dongle */
irda_device_set_dtr_rts(idev, TRUE, FALSE);
/* Sleep a minimum of 15 us */
- udelay(15);
+ udelay(MIN_DELAY);
/* Go back to normal mode */
irda_device_set_dtr_rts(idev, TRUE, TRUE);
/* Sleep a minimum of 15 us */
- udelay(15);
+ udelay(MIN_DELAY);
/* This dongles speed defaults to 115200 bps */
idev->qos.baud_rate.value = 115200;
* Initialize QoS capabilities
*
*/
-static void litelink_init_qos( struct irda_device *idev, struct qos_info *qos)
+static void litelink_init_qos(struct irda_device *idev, struct qos_info *qos)
{
qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
qos->min_turn_time.bits &= 0x40; /* Needs 0.01 ms */
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sat Nov 7 21:43:15 1998
- * Modified at: Sun May 9 12:57:46 1999
+ * Modified at: Mon May 24 15:19:21 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>
#define CHIP_IO_EXTENT 8
static unsigned int io[] = { 0x2f8, ~0, ~0, ~0 };
-static unsigned int io2[] = { 0x150, 0, 0, 0};
+static unsigned int io2[] = { 0x150, 0, 0, 0 };
static unsigned int irq[] = { 3, 0, 0, 0 };
static unsigned int dma[] = { 0, 0, 0, 0 };
};
/* Some prototypes */
-static int pc87108_open( int i, unsigned int iobase, unsigned int board_addr,
- unsigned int irq, unsigned int dma);
+static int pc87108_open(int i, unsigned int iobase, unsigned int board_addr,
+ unsigned int irq, unsigned int dma);
#ifdef MODULE
-static int pc87108_close( struct irda_device *idev);
+static int pc87108_close(struct irda_device *idev);
#endif /* MODULE */
-static int pc87108_probe( int iobase, int board_addr, int irq, int dma);
-static void pc87108_pio_receive( struct irda_device *idev);
-static int pc87108_dma_receive( struct irda_device *idev);
+static int pc87108_probe(int iobase, int board_addr, int irq, int dma);
+static void pc87108_pio_receive(struct irda_device *idev);
+static int pc87108_dma_receive(struct irda_device *idev);
static int pc87108_dma_receive_complete(struct irda_device *idev, int iobase);
-static int pc87108_hard_xmit( struct sk_buff *skb, struct device *dev);
-static int pc87108_pio_write( int iobase, __u8 *buf, int len, int fifo_size);
-static void pc87108_dma_write( struct irda_device *idev, int iobase);
-static void pc87108_change_speed( struct irda_device *idev, int baud);
+static int pc87108_hard_xmit(struct sk_buff *skb, struct device *dev);
+static int pc87108_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
+static void pc87108_dma_write(struct irda_device *idev, int iobase);
+static void pc87108_change_speed(struct irda_device *idev, int baud);
static void pc87108_interrupt(int irq, void *dev_id, struct pt_regs *regs);
-static void pc87108_wait_until_sent( struct irda_device *idev);
-static int pc87108_is_receiving( struct irda_device *idev);
-static int pc87108_read_dongle_id ( int iobase);
-static void pc87108_init_dongle_interface ( int iobase, int dongle_id);
+static void pc87108_wait_until_sent(struct irda_device *idev);
+static int pc87108_is_receiving(struct irda_device *idev);
+static int pc87108_read_dongle_id (int iobase);
+static void pc87108_init_dongle_interface (int iobase, int dongle_id);
-static int pc87108_net_init( struct device *dev);
-static int pc87108_net_open( struct device *dev);
-static int pc87108_net_close( struct device *dev);
+static int pc87108_net_init(struct device *dev);
+static int pc87108_net_open(struct device *dev);
+static int pc87108_net_close(struct device *dev);
/*
* Function pc87108_init ()
{
int i;
- for ( i=0; (io[i] < 2000) && (i < 4); i++) {
+ for (i=0; (io[i] < 2000) && (i < 4); i++) {
int ioaddr = io[i];
if (check_region(ioaddr, CHIP_IO_EXTENT) < 0)
continue;
- if (pc87108_open( i, io[i], io2[i], irq[i], dma[i]) == 0)
+ if (pc87108_open(i, io[i], io2[i], irq[i], dma[i]) == 0)
return 0;
}
return -ENODEV;
* Open driver instance
*
*/
-static int pc87108_open( int i, unsigned int iobase, unsigned int board_addr,
- unsigned int irq, unsigned int dma)
+static int pc87108_open(int i, unsigned int iobase, unsigned int board_addr,
+ unsigned int irq, unsigned int dma)
{
struct pc87108 *self;
struct irda_device *idev;
int ret;
int dongle_id;
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG(0, __FUNCTION__ "()\n");
- if (( dongle_id = pc87108_probe( iobase, board_addr, irq, dma)) == -1)
+ if ((dongle_id = pc87108_probe(iobase, board_addr, irq, dma)) == -1)
return -1;
/*
* Allocate new instance of the driver
*/
- self = kmalloc( sizeof(struct pc87108), GFP_KERNEL);
- if ( self == NULL) {
- printk( KERN_ERR "IrDA: Can't allocate memory for "
- "IrDA control block!\n");
+ self = kmalloc(sizeof(struct pc87108), GFP_KERNEL);
+ if (self == NULL) {
+ printk(KERN_ERR "IrDA: Can't allocate memory for "
+ "IrDA control block!\n");
return -ENOMEM;
}
- memset( self, 0, sizeof(struct pc87108));
+ memset(self, 0, sizeof(struct pc87108));
/* Need to store self somewhere */
dev_self[i] = self;
idev->io.fifo_size = 32;
/* Lock the port that we need */
- ret = check_region( idev->io.iobase, idev->io.io_ext);
- if ( ret < 0) {
- DEBUG( 0, __FUNCTION__ "(), can't get iobase of 0x%03x\n",
- idev->io.iobase);
+ ret = check_region(idev->io.iobase, idev->io.io_ext);
+ if (ret < 0) {
+ DEBUG(0, __FUNCTION__ "(), can't get iobase of 0x%03x\n",
+ idev->io.iobase);
/* pc87108_cleanup( self->idev); */
return -ENODEV;
}
- request_region( idev->io.iobase, idev->io.io_ext, idev->name);
+ request_region(idev->io.iobase, idev->io.io_ext, idev->name);
/* Initialize QoS for this device */
- irda_init_max_qos_capabilies( &idev->qos);
+ irda_init_max_qos_capabilies(&idev->qos);
/* The only value we must override it the baudrate */
idev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
idev->qos.min_turn_time.bits = qos_mtt_bits;
- irda_qos_bits_to_value( &idev->qos);
+ irda_qos_bits_to_value(&idev->qos);
idev->flags = IFF_FIR|IFF_MIR|IFF_SIR|IFF_DMA|IFF_PIO|IFF_DONGLE;
idev->netdev.stop = pc87108_net_close;
idev->io.dongle_id = dongle_id;
- pc87108_init_dongle_interface( iobase, dongle_id);
+ pc87108_init_dongle_interface(iobase, dongle_id);
/* Open the IrDA device */
- irda_device_open( idev, driver_name, self);
+ irda_device_open(idev, driver_name, self);
return 0;
}
DEBUG( 4, __FUNCTION__ "()\n");
- ASSERT( idev != NULL, return -1;);
- ASSERT( idev->magic == IRDA_DEVICE_MAGIC, return -1;);
+ ASSERT(idev != NULL, return -1;);
+ ASSERT(idev->magic == IRDA_DEVICE_MAGIC, return -1;);
iobase = idev->io.iobase;
self = (struct pc87108 *) idev->priv;
/* Release the PORT that this driver is using */
- DEBUG( 4, __FUNCTION__ "(), Releasing Region %03x\n",
- idev->io.iobase);
+ DEBUG(4, __FUNCTION__ "(), Releasing Region %03x\n", idev->io.iobase);
release_region(idev->io.iobase, idev->io.io_ext);
irda_device_close(idev);
* Returns non-negative on success.
*
*/
-static int pc87108_probe( int iobase, int board_addr, int irq, int dma)
+static int pc87108_probe(int iobase, int board_addr, int irq, int dma)
{
int version;
__u8 temp=0;
int dongle_id;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
/* Base Address and Interrupt Control Register BAIC */
outb(0, board_addr);
- switch ( iobase) {
- case 0x3E8: outb( 0x14, board_addr+1); break;
- case 0x2E8: outb( 0x15, board_addr+1); break;
- case 0x3F8: outb( 0x16, board_addr+1); break;
- case 0x2F8: outb( 0x17, board_addr+1); break;
- default: DEBUG(0, __FUNCTION__ "(), invalid base_address");
+ switch (iobase) {
+ case 0x3E8: outb(0x14, board_addr+1); break;
+ case 0x2E8: outb(0x15, board_addr+1); break;
+ case 0x3F8: outb(0x16, board_addr+1); break;
+ case 0x2F8: outb(0x17, board_addr+1); break;
+ default: ERROR(__FUNCTION__ "(), invalid base_address");
}
/* Control Signal Routing Register CSRT */
case 9: temp = 0x05; break;
case 11: temp = 0x06; break;
case 15: temp = 0x07; break;
- default: DEBUG( 0, __FUNCTION__ "(), invalid irq");
+ default: ERROR(__FUNCTION__ "(), invalid irq");
}
- outb( 1, board_addr);
-
+ outb(1, board_addr);
+
switch (dma) {
- case 0: outb( 0x08+temp, board_addr+1); break;
- case 1: outb( 0x10+temp, board_addr+1); break;
- case 3: outb( 0x18+temp, board_addr+1); break;
+ case 0: outb(0x08+temp, board_addr+1); break;
+ case 1: outb(0x10+temp, board_addr+1); break;
+ case 3: outb(0x18+temp, board_addr+1); break;
default: DEBUG( 0, __FUNCTION__ "(), invalid dma");
}
/* Mode Control Register MCTL */
- outb( 2, board_addr);
- outb( 0x03, board_addr+1);
+ outb(2, board_addr);
+ outb(0x03, board_addr+1);
/* read the Module ID */
- switch_bank( iobase, BANK3);
- version = inb( iobase+MID);
+ switch_bank(iobase, BANK3);
+ version = inb(iobase+MID);
/* should be 0x2? */
- if (0x20 != (version & 0xf0))
- {
- DEBUG( 0, __FUNCTION__ "(), Wrong chip version");
+ if (0x20 != (version & 0xf0)) {
+ ERROR(__FUNCTION__ "(), Wrong chip version %02x\n", version);
return -1;
}
/* Switch to advanced mode */
switch_bank( iobase, BANK2);
- outb( ECR1_EXT_SL, iobase+ECR1);
- switch_bank( iobase, BANK0);
+ outb(ECR1_EXT_SL, iobase+ECR1);
+ switch_bank(iobase, BANK0);
- dongle_id = pc87108_read_dongle_id( iobase);
- DEBUG( 0, __FUNCTION__ "(), Found dongle: %s\n",
- dongle_types[ dongle_id]);
+ dongle_id = pc87108_read_dongle_id(iobase);
+ DEBUG(0, __FUNCTION__ "(), Found dongle: %s\n",
+ dongle_types[ dongle_id]);
/* Set FIFO threshold to TX17, RX16, reset and enable FIFO's */
- switch_bank( iobase, BANK0);
- outb( FCR_RXTH|FCR_TXTH|FCR_TXSR|FCR_RXSR|FCR_FIFO_EN, iobase+FCR);
+ switch_bank(iobase, BANK0);
+ outb(FCR_RXTH|FCR_TXTH|FCR_TXSR|FCR_RXSR|FCR_FIFO_EN, iobase+FCR);
/* Set FIFO size to 32 */
- switch_bank( iobase, BANK2);
- outb( EXCR2_RFSIZ|EXCR2_TFSIZ, iobase+EXCR2);
+ switch_bank(iobase, BANK2);
+ outb(EXCR2_RFSIZ|EXCR2_TFSIZ, iobase+EXCR2);
/* IRCR2: FEND_MD is set */
- switch_bank( iobase, BANK5);
- outb( 0x2a, iobase+4);
+ switch_bank(iobase, BANK5);
+ outb(0x2a, iobase+4);
/* Make sure that some defaults are OK */
- switch_bank( iobase, BANK6);
- outb( 0x20, iobase+0); /* Set 32 bits FIR CRC */
- outb( 0x0a, iobase+1); /* Set MIR pulse width */
- outb( 0x0d, iobase+2); /* Set SIR pulse width */
- outb( 0x2a, iobase+4); /* Set beginning frag, and preamble length */
+ switch_bank(iobase, BANK6);
+ outb(0x20, iobase+0); /* Set 32 bits FIR CRC */
+ outb(0x0a, iobase+1); /* Set MIR pulse width */
+ outb(0x0d, iobase+2); /* Set SIR pulse width */
+ outb(0x2a, iobase+4); /* Set beginning frag, and preamble length */
/* Receiver frame length */
- switch_bank( iobase, BANK4);
- outb( 2048 & 0xff, iobase+6);
- outb(( 2048 >> 8) & 0x1f, iobase+7);
+ switch_bank(iobase, BANK4);
+ outb(2048 & 0xff, iobase+6);
+ outb((2048 >> 8) & 0x1f, iobase+7);
/* Transmitter frame length */
- outb( 2048 & 0xff, iobase+4);
- outb(( 2048 >> 8) & 0x1f, iobase+5);
+ outb(2048 & 0xff, iobase+4);
+ outb((2048 >> 8) & 0x1f, iobase+5);
- DEBUG( 0, "PC87108 driver loaded. Version: 0x%02x\n", version);
+ DEBUG(0, "PC87108 driver loaded. Version: 0x%02x\n", version);
/* Enable receive interrupts */
- switch_bank( iobase, BANK0);
- outb( IER_RXHDL_IE, iobase+IER);
+ switch_bank(iobase, BANK0);
+ outb(IER_RXHDL_IE, iobase+IER);
return dongle_id;
}
bank = inb( iobase+BSR);
/* Select Bank 7 */
- switch_bank( iobase, BANK7);
+ switch_bank(iobase, BANK7);
/* IRCFG4: IRSL0_DS and IRSL21_DS are cleared */
- outb( 0x00, iobase+7);
+ outb(0x00, iobase+7);
/* ID0, 1, and 2 are pulled up/down very slowly */
udelay(50);
dongle_id = inb( iobase+4) & 0x0f;
#ifdef BROKEN_DONGLE_ID
- if ( dongle_id == 0x0a)
+ if (dongle_id == 0x0a)
dongle_id = 0x09;
#endif
-
+
/* Go back to bank 0 before returning */
- switch_bank( iobase, BANK0);
+ switch_bank(iobase, BANK0);
- DEBUG( 0, __FUNCTION__ "(), Dongle = %#x\n", dongle_id);
+ DEBUG(0, __FUNCTION__ "(), Dongle = %#x\n", dongle_id);
- outb( bank, iobase+BSR);
+ outb(bank, iobase+BSR);
return dongle_id;
}
* power-on/reset. It also needs to be used whenever you suspect that
* the dongle is changed.
*/
-static void pc87108_init_dongle_interface ( int iobase, int dongle_id)
+static void pc87108_init_dongle_interface (int iobase, int dongle_id)
{
int bank;
iobase+ECR1);
/* Enable DMA */
- switch_bank( iobase, BANK0);
- outb( inb( iobase+MCR)|MCR_DMA_EN, iobase+MCR);
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR)|MCR_DMA_EN, iobase+MCR);
/* Restore bank register */
- outb( bsr, iobase+BSR);
+ outb(bsr, iobase+BSR);
}
/*
* got transfered
*
*/
-static int pc87108_pio_write( int iobase, __u8 *buf, int len, int fifo_size)
+static int pc87108_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
{
int actual = 0;
__u8 bank;
}
/* Fill FIFO with current frame */
- while (( fifo_size-- > 0) && (actual < len)) {
+ while ((fifo_size-- > 0) && (actual < len)) {
/* Transmit next byte */
- outb( buf[actual++], iobase+TXD);
+ outb(buf[actual++], iobase+TXD);
}
- DEBUG( 4, __FUNCTION__ "(), fifo_size %d ; %d sent of %d\n",
- fifo_size, actual, len);
+ DEBUG(4, __FUNCTION__ "(), fifo_size %d ; %d sent of %d\n",
+ fifo_size, actual, len);
/* Restore bank */
- outb( bank, iobase+BSR);
+ outb(bank, iobase+BSR);
return actual;
}
MODULE_DESCRIPTION("NSC PC87108 IrDA Device Driver");
MODULE_PARM(qos_mtt_bits, "i");
+MODULE_PARM(io, "1-4i");
+MODULE_PARM(io2, "1-4i");
+MODULE_PARM(irq, "1-4i");
/*
* Function init_module (void)
--- /dev/null
+/*********************************************************************
+ *
+ * Filename: smc-ircc.c
+ * Version: 0.1
+ * Description: Driver for the SMC Infrared Communications Controller (SMC)
+ * Status: Experimental.
+ * Author: Thomas Davis (tadavis@jps.net)
+ * Created at:
+ * Modified at: Wed May 19 15:30:08 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-1999 Thomas Davis, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * I, Thomas Davis, admit no liability nor provide warranty for any
+ * of this software. This material is provided "AS-IS" and at no charge.
+ *
+ * Applicable Models : Fujitsu Lifebook 635t
+ * Sony PCG-505TX (gets DMA wrong.)
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/malloc.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+#include <net/irda/irmod.h>
+#include <net/irda/irlap_frame.h>
+#include <net/irda/irda_device.h>
+
+#include <net/irda/smc-ircc.h>
+#include <net/irda/irport.h>
+
+static char *driver_name = "smc-ircc";
+
+#define CHIP_IO_EXTENT 8
+
+static unsigned int io[] = { 0x2e8, 0x140, ~0, ~0 };
+static unsigned int io2[] = { 0x2f8, 0x3e8, 0, 0};
+
+static struct ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL};
+
+/* Some prototypes */
+static int ircc_open( int i, unsigned int iobase, unsigned int board_addr);
+static int ircc_close( struct irda_device *idev);
+static int ircc_probe( int iobase, int board_addr);
+static int ircc_dma_receive( struct irda_device *idev);
+static int ircc_dma_receive_complete(struct irda_device *idev, int iobase);
+static int ircc_hard_xmit( struct sk_buff *skb, struct device *dev);
+static void ircc_dma_write( struct irda_device *idev, int iobase);
+static void ircc_change_speed( struct irda_device *idev, int baud);
+static void ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void ircc_wait_until_sent( struct irda_device *idev);
+static int ircc_is_receiving( struct irda_device *idev);
+
+static int ircc_net_init( struct device *dev);
+static int ircc_net_open( struct device *dev);
+static int ircc_net_close( struct device *dev);
+
+static int ircc_debug=3;
+static int ircc_irq=255;
+static int ircc_dma=255;
+
+static inline void register_bank(int port, int bank)
+{
+ outb(((inb(port+UART_MASTER) & 0xF0) | (bank & 0x07)),
+ port+UART_MASTER);
+}
+
+static inline unsigned int serial_in(int port, int offset)
+{
+ return inb(port+offset);
+}
+
+static inline void serial_out(int port, int offset, int value)
+{
+ outb(value, port+offset);
+}
+
+/*
+ * Function ircc_init ()
+ *
+ * Initialize chip. Just try to find out how many chips we are dealing with
+ * and where they are
+ */
+__initfunc(int ircc_init(void))
+{
+ int i;
+
+ DEBUG(ircc_debug, __FUNCTION__ " -->\n");
+ for ( i=0; (io[i] < 2000) && (i < 4); i++) {
+ int ioaddr = io[i];
+ if (check_region(ioaddr, CHIP_IO_EXTENT))
+ continue;
+ if (ircc_open( i, io[i], io2[i]) == 0)
+ return 0;
+ }
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+
+ return -ENODEV;
+}
+
+/*
+ * Function ircc_cleanup ()
+ *
+ * Close all configured chips
+ *
+ */
+#ifdef MODULE
+static void ircc_cleanup(void)
+{
+ int i;
+
+ DEBUG(ircc_debug, __FUNCTION__ " -->\n");
+
+ for ( i=0; i < 4; i++) {
+ if ( dev_self[i])
+ ircc_close( &(dev_self[i]->idev));
+ }
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+}
+#endif /* MODULE */
+
+/*
+ * Function ircc_open (iobase, irq)
+ *
+ * Open driver instance
+ *
+ */
+static int ircc_open( int i, unsigned int iobase, unsigned int iobase2)
+{
+ struct ircc_cb *self;
+ struct irda_device *idev;
+ int ret;
+ int config;
+
+ DEBUG( ircc_debug, __FUNCTION__ " -->\n");
+
+ if ((config = ircc_probe( iobase, iobase2)) == -1) {
+ DEBUG(ircc_debug,
+ __FUNCTION__ ": addr 0x%04x - no device found!\n", iobase);
+ return -1;
+ }
+
+ /*
+ * Allocate new instance of the driver
+ */
+ self = kmalloc( sizeof(struct ircc_cb), GFP_KERNEL);
+ if ( self == NULL) {
+ printk( KERN_ERR "IrDA: Can't allocate memory for "
+ "IrDA control block!\n");
+ return -ENOMEM;
+ }
+ memset(self, 0, sizeof(struct ircc_cb));
+
+ /* Need to store self somewhere */
+ dev_self[i] = self;
+
+ idev = &self->idev;
+
+ /* Initialize IO */
+ idev->io.iobase = iobase;
+ idev->io.iobase2 = iobase2; /* Used by irport */
+ idev->io.irq = config >> 4 & 0x0f;
+ if (ircc_irq < 255) {
+ printk(KERN_INFO "smc: Overriding IRQ - chip says %d, using %d\n",
+ idev->io.irq, ircc_irq);
+ idev->io.irq = ircc_irq;
+ }
+ idev->io.io_ext = CHIP_IO_EXTENT;
+ idev->io.io_ext2 = 8; /* Used by irport */
+ idev->io.dma = config & 0x0f;
+ if (ircc_dma < 255) {
+ printk(KERN_INFO "smc: Overriding DMA - chip says %d, using %d\n",
+ idev->io.dma, ircc_dma);
+ idev->io.dma = ircc_dma;
+ }
+ idev->io.fifo_size = 16;
+
+ /* Lock the port that we need */
+ ret = check_region( idev->io.iobase, idev->io.io_ext);
+ if ( ret < 0) {
+ DEBUG( 0, __FUNCTION__ ": can't get iobase of 0x%03x\n",
+ idev->io.iobase);
+ /* ircc_cleanup( self->idev); */
+ return -ENODEV;
+ }
+ ret = check_region( idev->io.iobase2, idev->io.io_ext2);
+ if ( ret < 0) {
+ DEBUG( 0, __FUNCTION__ ": can't get iobase of 0x%03x\n",
+ idev->io.iobase2);
+ /* ircc_cleanup( self->idev); */
+ return -ENODEV;
+ }
+ request_region( idev->io.iobase, idev->io.io_ext, idev->name);
+ request_region( idev->io.iobase2, idev->io.io_ext2, idev->name);
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies( &idev->qos);
+
+#if 1
+ /* The only value we must override it the baudrate */
+ idev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
+ IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
+#else
+ /* The only value we must override it the baudrate */
+ idev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
+ IR_115200;
+#endif
+
+ idev->qos.min_turn_time.bits = 0x07;
+ irda_qos_bits_to_value( &idev->qos);
+
+ idev->flags = IFF_FIR|IFF_SIR|IFF_DMA|IFF_PIO;
+
+ /* Specify which buffer allocation policy we need */
+ idev->rx_buff.flags = GFP_KERNEL | GFP_DMA;
+ idev->tx_buff.flags = GFP_KERNEL | GFP_DMA;
+
+ /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+ idev->rx_buff.truesize = 4000;
+ idev->tx_buff.truesize = 4000;
+
+ /* Initialize callbacks */
+ idev->change_speed = ircc_change_speed;
+ idev->wait_until_sent = ircc_wait_until_sent;
+ idev->is_receiving = ircc_is_receiving;
+
+ /* Override the network functions we need to use */
+ idev->netdev.init = ircc_net_init;
+ idev->netdev.hard_start_xmit = ircc_hard_xmit;
+ idev->netdev.open = ircc_net_open;
+ idev->netdev.stop = ircc_net_close;
+
+ irport_start(idev, iobase2);
+
+ /* Open the IrDA device */
+ irda_device_open( idev, driver_name, self);
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+ return 0;
+}
+
+/*
+ * Function ircc_close (idev)
+ *
+ * Close driver instance
+ *
+ */
+static int ircc_close( struct irda_device *idev)
+{
+ int iobase;
+
+ DEBUG(ircc_debug, __FUNCTION__ " -->\n");
+
+ ASSERT( idev != NULL, return -1;);
+ ASSERT( idev->magic == IRDA_DEVICE_MAGIC, return -1;);
+
+ iobase = idev->io.iobase;
+
+ irport_stop(idev, idev->io.iobase2);
+
+ register_bank(iobase, 0);
+ serial_out(iobase, UART_IER, 0);
+ serial_out(iobase, UART_MASTER, UART_MASTER_RESET);
+
+ register_bank(iobase, 1);
+
+ serial_out(iobase, UART_SCE_CFGA,
+ UART_CFGA_IRDA_SIR_A | UART_CFGA_TX_POLARITY);
+ serial_out(iobase, UART_SCE_CFGB, UART_CFGB_IR);
+
+ /* Release the PORT that this driver is using */
+ DEBUG( ircc_debug,
+ __FUNCTION__ ": releasing 0x%03x\n", idev->io.iobase);
+
+ release_region( idev->io.iobase, idev->io.io_ext);
+
+ if ( idev->io.iobase2) {
+ DEBUG( ircc_debug, __FUNCTION__ ": releasing 0x%03x\n",
+ idev->io.iobase2);
+ release_region( idev->io.iobase2, idev->io.io_ext2);
+ }
+
+ irda_device_close( idev);
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+ return 0;
+}
+
+/*
+ * Function ircc_probe (iobase, board_addr, irq, dma)
+ *
+ * Returns non-negative on success.
+ *
+ */
+static int ircc_probe( int iobase, int iobase2)
+{
+ int version = 1;
+ int low, high, chip, config, dma, irq;
+
+ DEBUG(ircc_debug, __FUNCTION__ " -->\n");
+
+ register_bank(iobase, 3);
+ high = serial_in(iobase, UART_ID_HIGH);
+ low = serial_in(iobase, UART_ID_LOW);
+ chip = serial_in(iobase, UART_CHIP_ID);
+ version = serial_in(iobase, UART_VERSION);
+ config = serial_in(iobase, UART_INTERFACE);
+ irq = config >> 4 & 0x0f;
+ dma = config & 0x0f;
+
+ if (high == 0x10 && low == 0xb8 && chip == 0xf1) {
+ DEBUG(0, "SMC IrDA Controller found; version = %d, "
+ "port 0x%04x, dma %d, interrupt %d\n",
+ version, iobase, dma, irq);
+ } else {
+ return -1;
+ }
+
+ serial_out(iobase, UART_MASTER, 0);
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+
+ return config;
+}
+
+/*
+ * Function ircc_change_speed (idev, baud)
+ *
+ * Change the speed of the device
+ *
+ */
+static void ircc_change_speed( struct irda_device *idev, int speed)
+{
+ struct ircc_cb *self;
+ int iobase, ir_mode, select, fast;
+
+ DEBUG(ircc_debug+1, __FUNCTION__ " -->\n");
+
+ ASSERT(idev != NULL, return;);
+ ASSERT(idev->magic == IRDA_DEVICE_MAGIC, return;);
+
+ self = idev->priv;
+ iobase = idev->io.iobase;
+
+ /* Update accounting for new speed */
+ idev->io.baudrate = speed;
+
+ switch ( speed) {
+ case 9600:
+ case 19200:
+ case 37600:
+ case 57600:
+ case 115200:
+ DEBUG(ircc_debug+1,
+ __FUNCTION__ ": using irport to change speed to %d\n",
+ speed);
+ register_bank(iobase, 0);
+ serial_out(iobase, UART_IER, 0);
+ serial_out(iobase, UART_MASTER, UART_MASTER_RESET);
+ serial_out(iobase, UART_MASTER, UART_MASTER_INT_EN);
+ irport_start(idev, idev->io.iobase2);
+ irport_change_speed( idev, speed);
+ return;
+ break;
+
+ case 576000:
+ ir_mode = UART_CFGA_IRDA_HDLC;
+ select = 0;
+ fast = 0;
+ DEBUG( ircc_debug, __FUNCTION__ ": handling baud of 576000\n");
+ break;
+ case 1152000:
+ ir_mode = UART_CFGA_IRDA_HDLC;
+ select = UART_1152;
+ fast = 0;
+ DEBUG(ircc_debug, __FUNCTION__ ": handling baud of 1152000\n");
+ break;
+ case 4000000:
+ ir_mode = UART_CFGA_IRDA_4PPM;
+ select = 0;
+ fast = UART_LCR_A_FAST;
+ DEBUG(ircc_debug, __FUNCTION__ ": handling baud of 4000000\n");
+ break;
+ default:
+ DEBUG( 0, __FUNCTION__ ": unknown baud rate of %d\n", speed);
+ return;
+ }
+
+#if 0
+ serial_out(idev->io.iobase2, 4, 0x08);
+#endif
+
+ serial_out(iobase, UART_MASTER, UART_MASTER_RESET);
+
+ register_bank(iobase, 0);
+ serial_out(iobase, UART_IER, 0);
+
+ irport_stop(idev, idev->io.iobase2);
+
+ idev->netdev.tbusy = 0;
+
+ register_bank(iobase, 1);
+
+ serial_out(iobase, UART_SCE_CFGA,
+ ((serial_in(iobase, UART_SCE_CFGA) & 0x87) | ir_mode));
+
+ serial_out(iobase, UART_SCE_CFGB,
+ ((serial_in(iobase, UART_SCE_CFGB) & 0x3f) | UART_CFGB_IR));
+
+ (void) serial_in(iobase, UART_FIFO_THRESHOLD);
+ serial_out(iobase, UART_FIFO_THRESHOLD, 64);
+
+ register_bank(iobase, 4);
+
+ serial_out(iobase, UART_CONTROL,
+ (serial_in(iobase, UART_CONTROL) & 0x30)
+ | select | UART_CRC );
+
+ register_bank(iobase, 0);
+
+ serial_out(iobase, UART_LCR_A, fast);
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+}
+
+/*
+ * Function ircc_hard_xmit (skb, dev)
+ *
+ * Transmit the frame!
+ *
+ */
+static int ircc_hard_xmit( struct sk_buff *skb, struct device *dev)
+{
+ struct irda_device *idev;
+ int iobase;
+ int mtt;
+
+ DEBUG(ircc_debug+1, __FUNCTION__ " -->\n");
+ idev = (struct irda_device *) dev->priv;
+
+ ASSERT( idev != NULL, return 0;);
+ ASSERT( idev->magic == IRDA_DEVICE_MAGIC, return 0;);
+
+ iobase = idev->io.iobase;
+
+ DEBUG(ircc_debug+1, __FUNCTION__ "(%ld), skb->len=%d\n", jiffies, (int) skb->len);
+
+ /* Use irport for SIR speeds */
+ if (idev->io.baudrate <= 115200) {
+ DEBUG(ircc_debug+1, __FUNCTION__ ": calling irport_hard_xmit\n");
+ return irport_hard_xmit(skb, dev);
+ }
+
+ DEBUG(ircc_debug, __FUNCTION__ ": using dma; len=%d\n", skb->len);
+
+ /* Lock transmit buffer */
+ if (irda_lock((void *) &dev->tbusy) == FALSE)
+ return -EBUSY;
+
+ memcpy( idev->tx_buff.head, skb->data, skb->len);
+
+ /* Make sure that the length is a multiple of 16 bits */
+ if ( skb->len & 0x01)
+ skb->len++;
+
+ idev->tx_buff.len = skb->len;
+ idev->tx_buff.data = idev->tx_buff.head;
+#if 0
+ idev->tx_buff.offset = 0;
+#endif
+
+ mtt = irda_get_mtt( skb);
+
+ /* Use udelay for delays less than 50 us. */
+ if (mtt)
+ udelay( mtt);
+
+ ircc_dma_write( idev, iobase);
+
+ dev_kfree_skb( skb);
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+ return 0;
+}
+
+/*
+ * Function ircc_dma_xmit (idev, iobase)
+ *
+ * Transmit data using DMA
+ *
+ */
+static void ircc_dma_write( struct irda_device *idev, int iobase)
+{
+ struct ircc_cb *self;
+
+ DEBUG(ircc_debug, __FUNCTION__ " -->\n");
+
+ ASSERT( idev != NULL, return;);
+ ASSERT( idev->magic == IRDA_DEVICE_MAGIC, return;);
+
+ self = idev->priv;
+ iobase = idev->io.iobase;
+
+ setup_dma( idev->io.dma, idev->tx_buff.data, idev->tx_buff.len,
+ DMA_MODE_WRITE);
+
+ idev->io.direction = IO_XMIT;
+
+ serial_out(idev->io.iobase2, 4, 0x08);
+
+ register_bank(iobase, 4);
+ serial_out(iobase, UART_CONTROL,
+ (serial_in(iobase, UART_CONTROL) & 0xF0));
+
+ serial_out(iobase, UART_BOF_COUNT_LO, 2);
+ serial_out(iobase, UART_BRICKWALL_CNT_LO, 0);
+#if 1
+ serial_out(iobase, UART_BRICKWALL_TX_CNT_HI, idev->tx_buff.len >> 8);
+ serial_out(iobase, UART_TX_SIZE_LO, idev->tx_buff.len & 0xff);
+#else
+ serial_out(iobase, UART_BRICKWALL_TX_CNT_HI, 0);
+ serial_out(iobase, UART_TX_SIZE_LO, 0);
+#endif
+
+ register_bank(iobase, 1);
+ serial_out(iobase, UART_SCE_CFGB,
+ serial_in(iobase, UART_SCE_CFGB) | UART_CFGB_DMA_ENABLE);
+
+ register_bank(iobase, 0);
+
+ serial_out(iobase, UART_IER, UART_IER_ACTIVE_FRAME | UART_IER_EOM);
+ serial_out(iobase, UART_LCR_B,
+ UART_LCR_B_SCE_TRANSMIT|UART_LCR_B_SIP_ENABLE);
+
+ serial_out(iobase, UART_MASTER, UART_MASTER_INT_EN);
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+}
+
+/*
+ * Function ircc_dma_xmit_complete (idev)
+ *
+ * The transfer of a frame in finished. This function will only be called
+ * by the interrupt handler
+ *
+ */
+static void ircc_dma_xmit_complete( struct irda_device *idev, int underrun)
+{
+ struct ircc_cb *self;
+ int iobase, d;
+
+ DEBUG(ircc_debug, __FUNCTION__ " -->\n");
+
+ ASSERT( idev != NULL, return;);
+ ASSERT( idev->magic == IRDA_DEVICE_MAGIC, return;);
+
+ register_bank(idev->io.iobase, 1);
+
+ serial_out(idev->io.iobase, UART_SCE_CFGB,
+ serial_in(idev->io.iobase, UART_SCE_CFGB) &
+ ~UART_CFGB_DMA_ENABLE);
+
+ d = get_dma_residue(idev->io.dma);
+
+ DEBUG(ircc_debug, __FUNCTION__ ": dma residue = %d, len=%d, sent=%d\n",
+ d, idev->tx_buff.len, idev->tx_buff.len - d);
+
+ self = idev->priv;
+
+ iobase = idev->io.iobase;
+
+ /* Check for underrrun! */
+ if ( underrun) {
+ idev->stats.tx_errors++;
+ idev->stats.tx_fifo_errors++;
+ } else {
+ idev->stats.tx_packets++;
+ idev->stats.tx_bytes += idev->tx_buff.len;
+ }
+
+ /* Unlock tx_buff and request another frame */
+ idev->netdev.tbusy = 0; /* Unlock */
+ idev->media_busy = FALSE;
+
+ /* Tell the network layer, that we can accept more frames */
+ mark_bh( NET_BH);
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+}
+
+/*
+ * Function ircc_dma_receive (idev)
+ *
+ * Get ready for receiving a frame. The device will initiate a DMA
+ * if it starts to receive a frame.
+ *
+ */
+static int ircc_dma_receive( struct irda_device *idev)
+{
+ struct ircc_cb *self;
+ int iobase;
+
+ DEBUG(ircc_debug, __FUNCTION__ " -->\n");
+
+ ASSERT( idev != NULL, return -1;);
+ ASSERT( idev->magic == IRDA_DEVICE_MAGIC, return -1;);
+
+ self = idev->priv;
+ iobase= idev->io.iobase;
+
+ setup_dma( idev->io.dma, idev->rx_buff.data, idev->rx_buff.truesize,
+ DMA_MODE_READ);
+
+ /* driver->media_busy = FALSE; */
+ idev->io.direction = IO_RECV;
+ idev->rx_buff.data = idev->rx_buff.head;
+#if 0
+ idev->rx_buff.offset = 0;
+#endif
+
+ register_bank(iobase, 4);
+ serial_out(iobase, UART_CONTROL,
+ (serial_in(iobase, UART_CONTROL) &0xF0));
+ serial_out(iobase, UART_BOF_COUNT_LO, 2);
+ serial_out(iobase, UART_BRICKWALL_CNT_LO, 0);
+ serial_out(iobase, UART_BRICKWALL_TX_CNT_HI, 0);
+ serial_out(iobase, UART_TX_SIZE_LO, 0);
+ serial_out(iobase, UART_RX_SIZE_HI, 0);
+ serial_out(iobase, UART_RX_SIZE_LO, 0);
+
+ register_bank(iobase, 0);
+ serial_out(iobase,
+ UART_LCR_B, UART_LCR_B_SCE_RECEIVE | UART_LCR_B_SIP_ENABLE);
+
+ register_bank(iobase, 1);
+ serial_out(iobase, UART_SCE_CFGB,
+ serial_in(iobase, UART_SCE_CFGB) |
+ UART_CFGB_DMA_ENABLE | UART_CFGB_DMA_BURST);
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+ return 0;
+}
+
+/*
+ * Function ircc_dma_receive_complete (idev)
+ *
+ * Finished with receiving frames
+ *
+ *
+ */
+static int ircc_dma_receive_complete( struct irda_device *idev, int iobase)
+{
+ struct sk_buff *skb;
+ struct ircc_cb *self;
+ int len, msgcnt;
+
+ DEBUG(ircc_debug, __FUNCTION__ " -->\n");
+
+ self = idev->priv;
+
+ msgcnt = serial_in(idev->io.iobase, UART_LCR_B) & 0x08;
+
+ DEBUG(ircc_debug, __FUNCTION__ ": dma count = %d\n",
+ get_dma_residue(idev->io.dma));
+
+ len = idev->rx_buff.truesize - get_dma_residue(idev->io.dma) - 4;
+
+ DEBUG(ircc_debug, __FUNCTION__ ": msgcnt = %d, len=%d\n", msgcnt, len);
+
+ skb = dev_alloc_skb( len+1);
+
+ if (skb == NULL) {
+ printk( KERN_INFO __FUNCTION__
+ ": memory squeeze, dropping frame.\n");
+ return FALSE;
+ }
+
+ /* Make sure IP header gets aligned */
+ skb_reserve( skb, 1);
+ skb_put( skb, len);
+
+ memcpy(skb->data, idev->rx_buff.data, len);
+ idev->stats.rx_packets++;
+
+ skb->dev = &idev->netdev;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx( skb);
+
+ register_bank(idev->io.iobase, 1);
+ serial_out(idev->io.iobase, UART_SCE_CFGB,
+ serial_in(idev->io.iobase, UART_SCE_CFGB) &
+ ~UART_CFGB_DMA_ENABLE);
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+ return TRUE;
+}
+
+/*
+ * Function ircc_interrupt (irq, dev_id, regs)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static void ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ int iobase, iir;
+
+ struct irda_device *idev = (struct irda_device *) dev_id;
+
+ DEBUG(ircc_debug+1, __FUNCTION__ " -->\n");
+
+ if (idev == NULL) {
+ printk( KERN_WARNING "%s: irq %d for unknown device.\n",
+ driver_name, irq);
+ return;
+ }
+
+ if (idev->io.baudrate <= 115200) {
+ DEBUG(ircc_debug+1, __FUNCTION__
+ ": routing interrupt to irport_interrupt\n");
+ return irport_interrupt( irq, dev_id, regs);
+ }
+
+ iobase = idev->io.iobase;
+
+ idev->netdev.interrupt = 1;
+
+ serial_out(iobase, UART_MASTER, 0);
+
+ register_bank(iobase, 0);
+
+ iir = serial_in(iobase, UART_IIR);
+
+ serial_out(iobase, UART_IER, 0);
+
+ DEBUG(ircc_debug, __FUNCTION__ ": iir = 0x%02x\n", iir);
+
+ if (iir & UART_IIR_EOM) {
+ DEBUG(ircc_debug, __FUNCTION__ ": UART_IIR_EOM\n");
+ if (idev->io.direction == IO_RECV) {
+ ircc_dma_receive_complete(idev, iobase);
+ } else {
+ ircc_dma_xmit_complete(idev, iobase);
+ }
+ ircc_dma_receive(idev);
+ }
+
+ if (iir & UART_IIR_ACTIVE_FRAME) {
+ DEBUG(ircc_debug, __FUNCTION__ ": UART_IIR_ACTIVE_FRAME\n");
+ idev->rx_buff.state = INSIDE_FRAME;
+#if 0
+ ircc_dma_receive(idev);
+#endif
+ }
+
+ if (iir & UART_IIR_RAW_MODE) {
+ DEBUG(ircc_debug, __FUNCTION__ ": IIR RAW mode interrupt.\n");
+ }
+
+ idev->netdev.interrupt = 0;
+
+ register_bank(iobase, 0);
+ serial_out(iobase, UART_IER, UART_IER_ACTIVE_FRAME|UART_IER_EOM);
+ serial_out(iobase, UART_MASTER, UART_MASTER_INT_EN);
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+}
+
+/*
+ * Function ircc_wait_until_sent (idev)
+ *
+ * This function should put the current thread to sleep until all data
+ * have been sent, so it is safe to change the speed.
+ */
+static void ircc_wait_until_sent( struct irda_device *idev)
+{
+ DEBUG(ircc_debug, __FUNCTION__ " -->\n");
+
+ /* Just delay 60 ms */
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(6);
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+}
+
+/*
+ * Function ircc_is_receiving (idev)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int ircc_is_receiving( struct irda_device *idev)
+{
+ int status = FALSE;
+ /* int iobase; */
+
+ DEBUG(ircc_debug, __FUNCTION__ " -->\n");
+
+ ASSERT( idev != NULL, return FALSE;);
+ ASSERT( idev->magic == IRDA_DEVICE_MAGIC, return FALSE;);
+
+ DEBUG(ircc_debug, __FUNCTION__ ": dma count = %d\n",
+ get_dma_residue(idev->io.dma));
+
+ status = ( idev->rx_buff.state != OUTSIDE_FRAME);
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+
+ return status;
+}
+
+/*
+ * Function ircc_net_init (dev)
+ *
+ * Initialize network device
+ *
+ */
+static int ircc_net_init( struct device *dev)
+{
+ DEBUG(ircc_debug, __FUNCTION__ " -->\n");
+
+ /* Setup to be a normal IrDA network device driver */
+ irda_device_setup( dev);
+
+ /* Insert overrides below this line! */
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+ return 0;
+}
+
+
+/*
+ * Function ircc_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int ircc_net_open( struct device *dev)
+{
+ struct irda_device *idev;
+ int iobase;
+
+ DEBUG(ircc_debug, __FUNCTION__ " -->\n");
+
+ ASSERT( dev != NULL, return -1;);
+ idev = (struct irda_device *) dev->priv;
+
+ ASSERT( idev != NULL, return 0;);
+ ASSERT( idev->magic == IRDA_DEVICE_MAGIC, return 0;);
+
+ iobase = idev->io.iobase;
+
+ if (request_irq( idev->io.irq, ircc_interrupt, 0, idev->name,
+ (void *) idev)) {
+ return -EAGAIN;
+ }
+ /*
+ * Always allocate the DMA channel after the IRQ,
+ * and clean up on failure.
+ */
+ if (request_dma(idev->io.dma, idev->name)) {
+ free_irq( idev->io.irq, idev);
+ return -EAGAIN;
+ }
+
+ /* Ready to play! */
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ /* turn on interrupts */
+
+ MOD_INC_USE_COUNT;
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+ return 0;
+}
+
+/*
+ * Function ircc_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int ircc_net_close(struct device *dev)
+{
+ struct irda_device *idev;
+ int iobase;
+
+ DEBUG(ircc_debug, __FUNCTION__ " -->\n");
+
+ /* Stop device */
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ ASSERT( dev != NULL, return -1;);
+ idev = (struct irda_device *) dev->priv;
+
+ ASSERT( idev != NULL, return 0;);
+ ASSERT( idev->magic == IRDA_DEVICE_MAGIC, return 0;);
+
+ iobase = idev->io.iobase;
+
+ disable_dma( idev->io.dma);
+
+ /* Disable interrupts */
+
+ free_irq( idev->io.irq, idev);
+ free_dma( idev->io.dma);
+
+ MOD_DEC_USE_COUNT;
+
+ DEBUG( ircc_debug, "--> " __FUNCTION__ "\n");
+ return 0;
+}
+
+#ifdef MODULE
+
+MODULE_AUTHOR("Thomas Davis <tadavis@jps.net>");
+MODULE_DESCRIPTION("SMC IrCC controller driver");
+MODULE_PARM(ircc_debug,"1i");
+MODULE_PARM(ircc_dma, "1i");
+MODULE_PARM(ircc_irq, "1i");
+
+/*
+ * Function init_module (void)
+ *
+ *
+ *
+ */
+int init_module(void)
+{
+ return ircc_init();
+}
+
+/*
+ * Function cleanup_module (void)
+ *
+ *
+ *
+ */
+void cleanup_module(void)
+{
+ ircc_cleanup();
+}
+
+#endif
/*********************************************************************
*
* Filename: tekram.c
- * Version: 1.1
+ * Version: 1.2
* Description: Implementation of the Tekram IrMate IR-210B dongle
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Wed Oct 21 20:02:35 1998
- * Modified at: Mon May 10 16:10:17 1999
+ * Modified at: Sun May 16 14:33:42 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
#include <linux/sched.h>
#include <linux/init.h>
-#include <asm/ioctls.h>
-#include <asm/segment.h>
-#include <asm/uaccess.h>
-
#include <net/irda/irda.h>
#include <net/irda/irda_device.h>
#include <net/irda/irtty.h>
#include <net/irda/dongle.h>
-static void tekram_reset(struct irda_device *dev, int unused);
+static void tekram_reset(struct irda_device *dev);
static void tekram_open(struct irda_device *dev, int type);
static void tekram_close(struct irda_device *dev);
static void tekram_change_speed(struct irda_device *dev, int baud);
#define TEKRAM_19200 0x03
#define TEKRAM_9600 0x04
-#define TEKRAM_PW 0x10 /* Pulse select bit */
+#define TEKRAM_PW 0x10 /* Pulse select bit */
static struct dongle dongle = {
TEKRAM_DONGLE,
ASSERT(idev != NULL, return;);
ASSERT(idev->magic == IRDA_DEVICE_MAGIC, return;);
-
+
switch (baud) {
default:
case 9600:
case 19200:
byte = TEKRAM_PW|TEKRAM_19200;
break;
- case 34800:
+ case 38400:
byte = TEKRAM_PW|TEKRAM_38400;
break;
case 57600:
break;
}
+ /* Need to reset the dongle and go to 9600 bps before programming */
+ tekram_reset(idev);
+
/* Set DTR, Clear RTS */
irda_device_set_dtr_rts(idev, TRUE, FALSE);
* 3. clear DTR to SPACE state, wait at least 50 us for further
* operation
*/
-void tekram_reset(struct irda_device *idev, int unused)
+void tekram_reset(struct irda_device *idev)
{
ASSERT(idev != NULL, return;);
ASSERT(idev->magic == IRDA_DEVICE_MAGIC, return;);
irda_device_set_dtr_rts(idev, TRUE, TRUE);
udelay(50);
-
- /* Finished! */
+
+ /* Make sure the IrDA chip also goes to defalt speed */
+ if (idev->change_speed)
+ idev->change_speed(idev, 9600);
}
/*
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sat Dec 26 10:59:03 1998
- * Modified at: Mon May 10 22:11:09 1999
+ * Modified at: Wed May 19 15:29:56 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
idev->netdev.open = uircc_net_open;
idev->netdev.stop = uircc_net_close;
- irport_start(iobase2);
+ irport_start(idev, iobase2);
/* Open the IrDA device */
irda_device_open(idev, driver_name, self);
/* Disable modem */
outb(0x00, iobase+UIRCC_CR10);
- irport_stop(idev->io.iobase2);
+ irport_stop(idev, idev->io.iobase2);
/* Release the PORT that this driver is using */
DEBUG(4, __FUNCTION__ "(), Releasing Region %03x\n", idev->io.iobase);
case 37600:
case 57600:
case 115200:
- irport_start(idev->io.iobase2);
+ irport_start(idev, idev->io.iobase2);
irport_change_speed(idev, speed);
/* Some magic to disable FIR and enable SIR */
DEBUG(0, __FUNCTION__ "(), handling baud of 1152000\n");
break;
case 4000000:
- irport_stop(idev->io.iobase2);
+ irport_stop(idev, idev->io.iobase2);
/* Some magic to disable SIR and enable FIR */
uircc_toshiba_cmd(&status, 0xffff, 0x001b, 0x0001);
* Status: Experimental.
* Author: Paul VanderSpek
* Created at: Wed Nov 4 11:46:16 1998
- * Modified at: Thu May 13 08:03:27 1999
+ * Modified at: Fri May 21 22:18:19 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>
static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
-static struct st_fifo_entry prev;
-
/* Some prototypes */
static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
unsigned int dma);
DEBUG(0, __FUNCTION__ "()\n");
- prev.status = 0;
-
for (i=0; (io[i] < 2000) && (i < 4); i++) {
int ioaddr = io[i];
if (check_region(ioaddr, CHIP_IO_EXTENT) < 0)
* same Gnu Public License that covers that work.
*
* Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later)
- * Copyrights (c) 1994,1995,1996 by M.Hipp (Michael.Hipp@student.uni-tuebingen.de)
+ * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de)
* [feel free to mail ....]
*
* when using as module: (no autoprobing!)
* This is an extension to the Linux operating system, and is covered by the
* same Gnu Public License that covers that work.
*
- * copyrights (c) 1994 by Michael Hipp (mhipp@student.uni-tuebingen.de)
+ * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de)
*
* I have done a look in the following sources:
* crynwr-packet-driver by Russ Nelson
*
* comments/bugs/suggestions can be sent to:
* Michael Hipp
- * email: Michael.Hipp@student.uni-tuebingen.de
+ * email: hippm@informatik.uni-tuebingen.de
*
* sources:
* some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
*/
/*
+ * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
* 96.Sept.29: virt_to_bus stuff added for new memory modell
* 96.April.29: Added Harald Koenig's Patches (MH)
* 96.April.13: enhanced error handling .. more tests (MH)
p->stats.tx_errors++;
tmdp->status2 = 0;
}
- else
+ else {
+ p->stats.tx_bytes -= (short)(tmdp->blen);
p->stats.tx_packets++;
+ }
#ifdef XMT_VIA_SKB
if(p->tmd_skb[p->tmdlast]) {
eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0);
#endif
p->stats.rx_packets++;
+ p->stats.rx_bytes += len;
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
SCpnt->request.rq_status = RQ_SCSI_BUSY;
spin_lock_irq(&io_request_lock);
scsi_do_cmd (SCpnt, (void *) scsi_cmd,
- (void *) scsi_result,
- 256, scan_scsis_done, SCSI_TIMEOUT + 4 * HZ, 5);
+ (void *) NULL,
+ 0, scan_scsis_done, SCSI_TIMEOUT + 4 * HZ, 5);
spin_unlock_irq(&io_request_lock);
down (&sem);
SCpnt->request.sem = NULL;
static void sd_detach(Scsi_Device * SDp)
{
Scsi_Disk * dpnt;
- int i;
+ int i, j;
int max_p;
int start;
max_p = sd_gendisk.max_p;
start = i << sd_gendisk.minor_shift;
- for (i=max_p - 1; i >=0 ; i--) {
- int index = start+i;
+ for (j=max_p - 1; j >=0 ; j--) {
+ int index = start+j;
kdev_t devi = MKDEV_SD_PARTITION(index);
struct super_block *sb = get_super(devi);
sync_dev(devi);
SDp->attached--;
sd_template.dev_noticed--;
sd_template.nr_dev--;
- SD_GENDISK(start).nr_real--;
+ SD_GENDISK(i).nr_real--;
return;
}
return;
*
* Borrows code from st driver. Thanks to Alessandro Rubini's "dd" book.
*/
- static char * sg_version_str = "Version: 2.1.32 (990501)";
+ static char * sg_version_str = "Version: 2.1.34 (990603)";
+ static int sg_version_num = 20134; /* 2 digits for each component */
/*
- * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au)
- * - scatter list logic replaces previous large atomic SG_BIG_BUFF
- * sized allocation. See notes in <scsi/sg.h> include file.
- *
+ * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
* - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
* the kernel/module needs to be built with CONFIG_SCSI_LOGGING
* (otherwise the macros compile to empty statements), then do
* Should use hlcomplete but it is too "noisy" (sd uses it).
*
* - This driver obtains memory (heap) for the low-level driver to
- * transfer/dma to and from. It is obtained from up to 4 sources:
- * - 1 SG_SCATTER_SZ sized buffer on open() (per fd)
- * [could be less if SG_SCATTER_SZ bytes not available]
- * - obtain heap as required on write()s (get_free_pages)
+ * transfer/dma to and from. It is obtained from up to 3 sources:
+ * - obtain heap via get_free_pages()
* - obtain heap from the shared scsi dma pool
* - obtain heap from kernel directly (kmalloc) [last choice]
- * the 'alt_address' field in the scatter_list structure and the
+ * Each open() attempts to obtain a "reserve" buffer of
+ * SG_DEF_RESERVED_SIZE bytes (or 0 bytes if opened O_RDONLY). The
+ * amount actually obtained [which could be 0 bytes] can be found from
+ * the SG_GET_RESERVED_SIZE ioctl(). This reserved buffer size can
+ * be changed by calling the SG_SET_RESERVED_SIZE ioctl(). Since this
+ * is an ambit claim, it should be followed by a SG_GET_RESERVED_SIZE
+ * ioctl() to find out how much was actually obtained.
+ * A subsequent write() to this file descriptor will use the
+ * reserved buffer unless:
+ * - it is already in use (eg during command queuing)
+ * - or the write() needs a buffer size larger than the
+ * reserved size
+ * In these cases the write() will attempt to get the required memory
+ * for the duration of this request but, if memory is low, it may
+ * fail with ENOMEM.
+ *
+ * - The 'alt_address' field in the scatter_list structure and the
* related 'mem_src' indicate the source of the heap allocation.
*
*/
#include <scsi/sg.h>
-int sg_big_buff = SG_SCATTER_SZ; /* sg_big_buff is ro through sysctl */
+int sg_big_buff = SG_DEF_RESERVED_SIZE; /* sg_big_buff is ro through sysctl */
/* N.B. This global is here to keep existing software happy. It now holds
- the size of the "first buffer" of the most recent sucessful sg_open().
+ the size of the reserve buffer of the most recent sucessful sg_open().
Only available when 'sg' compiled into kernel (rather than a module).
- This should probably be deprecated (use SG_GET_RESERVED_SIZE instead). */
+ This is deprecated (use SG_GET_RESERVED_SIZE ioctl() instead). */
#define SG_SECTOR_SZ 512
#define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
static int sg_num_page = 0;
#endif
-#define SG_HEAP_FB 0 /* heap obtained at open() (one buffer per fd) */
#define SG_HEAP_PAGE 1 /* heap from kernel via get_free_pages() */
#define SG_HEAP_KMAL 2 /* heap from kernel via kmalloc() */
#define SG_HEAP_POOL 3 /* heap from scsi dma pool (mid-level) */
{
unsigned short use_sg; /* Number of pieces of scatter-gather */
unsigned short sglist_len; /* size of malloc'd scatter-gather list */
- unsigned bufflen; /* Size of data buffer */
+ unsigned bufflen; /* Size of (aggregate) data buffer */
unsigned b_malloc_len; /* actual len malloc'ed in buffer */
- void * buffer; /* Data buffer or scatter list (12 bytes) */
+ void * buffer; /* Data buffer or scatter list,12 bytes each*/
char mem_src; /* heap whereabouts of 'buffer' */
} Sg_scatter_hold; /* 20 bytes long on i386 */
Scsi_Cmnd * my_cmdp; /* NULL -> ready to read, else id */
struct sg_request * nextrp; /* NULL -> tail request (slist) */
struct sg_fd * parentfp; /* NULL -> not in use */
- Sg_scatter_hold data; /* hold buffers, perhaps scatter list */
- struct sg_header header; /* scsi command+info <include/sg.h> */
- char fb_used; /* 1 -> using fst_buf, normally 0 (used) */
-} Sg_request; /* around 72 bytes long on i386 */
+ Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
+ struct sg_header header; /* scsi command+info, see <scsi/sg.h> */
+ char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
+} Sg_request; /* 72 bytes long on i386 */
typedef struct sg_fd /* holds the state of a file descriptor */
{
struct sg_device * parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
- char * fst_buf; /* try to grab SG_SCATTER_SZ sized buffer on open */
- int fb_size; /* actual size of allocated fst_buf */
- Sg_request * headrp; /* head of request slist, NULL->empty */
+ Sg_scatter_hold reserve; /* buffer held for this file descriptor */
+ unsigned save_scat_len; /* original length of trunc. scat. element */
+ Sg_request * headrp; /* head of request slist, NULL->empty */
struct fasync_struct * async_qp; /* used by asynchronous notification */
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
- char low_dma; /* as in parent but possible overridden to 1 */
+ char low_dma; /* as in parent but possibly overridden to 1 */
char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
char closed; /* 1 -> fd closed but request(s) outstanding */
- char my_mem_src; /* heap whereabouts of this sg_fb object */
+ char my_mem_src; /* heap whereabouts of this Sg_fd object */
char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
char underrun_flag; /* 1 -> flag underruns, 0 -> don't, 2 -> test */
-} Sg_fd; /* around 1192 bytes long on i386 */
+ char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
+} Sg_fd; /* 1208 bytes long on i386 */
typedef struct sg_device /* holds the state of each scsi generic device */
{
Scsi_Device * device;
- wait_queue_head_t generic_wait;/* queue open if O_EXCL on prev. open */
+ wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
int sg_tablesize; /* adapter's max scatter-gather table size */
Sg_fd * headfp; /* first open fd belonging to this device */
kdev_t i_rdev; /* holds device major+minor number */
char exclude; /* opened for exclusive access */
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
- unsigned char merge_fd; /* 0->sequencing per fd (def) else fd count */
-} Sg_device; /* around 24 bytes long on i386 */
+ unsigned char merge_fd; /* 0->sequencing per fd, else fd count */
+} Sg_device; /* 24 bytes long on i386 */
static int sg_fasync(int fd, struct file * filp, int mode);
static void sg_command_done(Scsi_Cmnd * SCpnt);
-static int sg_sc_build(Sg_request * srp, int max_buff_size,
- const char * inp, int num_write_xfer);
-static int sg_sc_undo_rem(Sg_request * srp, char * outp,
- int num_read_xfer);
-static char * sg_malloc(Sg_request * srp, int size, int * retSzp,
+static int sg_start_req(Sg_request * srp, int max_buff_size,
+ const char * inp, int num_write_xfer);
+static void sg_finish_rem_req(Sg_request * srp, char * outp,
+ int num_read_xfer);
+static int sg_build_scat(Sg_scatter_hold * schp, int buff_size,
+ const Sg_fd * sfp);
+static void sg_write_xfer(Sg_scatter_hold * schp, const char * inp,
+ int num_write_xfer);
+static void sg_remove_scat(Sg_scatter_hold * schp);
+static void sg_read_xfer(Sg_scatter_hold * schp, char * outp,
+ int num_read_xfer);
+static void sg_build_reserve(Sg_fd * sfp, int req_size);
+static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
+static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
+static char * sg_malloc(const Sg_fd * sfp, int size, int * retSzp,
int * mem_srcp);
-static void sg_free(Sg_request * srp, char * buff, int size, int mem_src);
+static void sg_free(char * buff, int size, int mem_src);
static char * sg_low_malloc(int rqSz, int lowDma, int mem_src,
int * retSzp);
static void sg_low_free(char * buff, int size, int mem_src);
static Sg_request * sg_get_request(const Sg_fd * sfp, int pack_id);
static Sg_request * sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, const Sg_request * srp);
-static int sg_fb_in_use(const Sg_fd * sfp);
+static int sg_res_in_use(const Sg_fd * sfp);
static void sg_clr_scpnt(Scsi_Cmnd * SCpnt);
static void sg_shorten_timeout(Scsi_Cmnd * scpnt);
static void sg_debug(const Sg_device * sdp, const Sg_fd * sfp, int part_of);
int flags = filp->f_flags;
Sg_device * sdp;
Sg_fd * sfp;
+ int res;
if ((NULL == sg_dev_arr) || (dev < 0) || (dev >= sg_template.dev_max))
return -ENXIO;
printk("sg_open: inode maj=%d, min=%d sdp maj=%d, min=%d\n",
MAJOR(inode->i_rdev), MINOR(inode->i_rdev),
MAJOR(sdp->i_rdev), MINOR(sdp->i_rdev));
+ /* If we are in the middle of error recovery, don't let anyone
+ * else try and use this device. Also, if error recovery fails, it
+ * may try and take the device offline, in which case all further
+ * access to the device is prohibited. */
if(! scsi_block_when_processing_errors(sdp->device))
return -ENXIO;
-/* if (O_RDWR != (flags & O_ACCMODE)) */
-/* return -EACCES; May just want to get to a ioctl, so remove */
SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
- /* If we want exclusive access, then wait until the device is not
- * busy, and then set the flag to prevent anyone else from using it. */
+
if (flags & O_EXCL) {
if (O_RDONLY == (flags & O_ACCMODE))
return -EACCES; /* Can't lock it with read only access */
- while (sdp->headfp) {
- if (flags & O_NONBLOCK)
- return -EBUSY;
- interruptible_sleep_on(&sdp->generic_wait);
- if (signal_pending(current))
- return -ERESTARTSYS;
- }
- sdp->exclude = 1;
+ if (sdp->headfp && (filp->f_flags & O_NONBLOCK))
+ return -EBUSY;
+ res = 0; /* following is a macro that beats race condition */
+ __wait_event_interruptible(sdp->o_excl_wait,
+ ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)),
+ res);
+ if (res)
+ return res; /* -ERESTARTSYS because signal hit process */
}
- else { /* Wait until nobody has an exclusive open on this device. */
- while (sdp->exclude) {
- if (flags & O_NONBLOCK)
- return -EBUSY;
- interruptible_sleep_on(&sdp->generic_wait);
- if (signal_pending(current))
- return -ERESTARTSYS;
- }
+ else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
+ if (filp->f_flags & O_NONBLOCK)
+ return -EBUSY;
+ res = 0; /* following is a macro that beats race condition */
+ __wait_event_interruptible(sdp->o_excl_wait, (! sdp->exclude), res);
+ if (res)
+ return res; /* -ERESTARTSYS because signal hit process */
}
- /* OK, we should have grabbed the device. Mark the thing so
- * that other processes know that we have it, and initialize the
- * state variables to known values. */
if (! sdp->headfp) { /* no existing opens on this device */
sdp->sgdebug = 0;
sdp->sg_tablesize = sdp->device->host->sg_tablesize;
if(sg_template.module)
__MOD_DEC_USE_COUNT(sg_template.module);
sdp->exclude = 0;
- wake_up_interruptible(&sdp->generic_wait);
+ wake_up_interruptible(&sdp->o_excl_wait);
return 0;
}
-/* Read back the results of a SCSI command which was sent in a prior
- write(). */
static ssize_t sg_read(struct file * filp, char * buf,
size_t count, loff_t *ppos)
{
- int k;
+ int k, res;
Sg_device * sdp;
Sg_fd * sfp;
Sg_request * srp;
SCSI_LOG_TIMEOUT(3, printk("sg_read: dev=%d, count=%d\n",
MINOR(sdp->i_rdev), (int)count));
- /* If we are in the middle of error recovery, don't let anyone
- * else try and use this device. Also, if error recovery fails, it
- * may try and take the device offline, in which case all further
- * access to the device is prohibited. */
if(! scsi_block_when_processing_errors(sdp->device))
return -ENXIO;
-
if (ppos != &filp->f_pos)
; /* FIXME: Hmm. Seek to the right place, or fail? */
if ((k = verify_area(VERIFY_WRITE, buf, count)))
if (sfp->force_packid && (count >= size_sg_header))
req_pack_id = shp->pack_id;
srp = sg_get_request(sfp, req_pack_id);
- while(! srp) {
+ if (! srp) { /* now wait on packet to arrive */
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
- interruptible_sleep_on(&sfp->read_wait);
- if (signal_pending(current))
- return -ERESTARTSYS;
- srp = sg_get_request(sfp, req_pack_id);
+ res = 0; /* following is a macro that beats race condition */
+ __wait_event_interruptible(sfp->read_wait,
+ (srp = sg_get_request(sfp, req_pack_id)),
+ res);
+ if (res)
+ return res; /* -ERESTARTSYS because signal hit process */
}
if (2 != sfp->underrun_flag)
srp->header.pack_len = srp->header.reply_len; /* Why ????? */
if (count > srp->header.reply_len)
count = srp->header.reply_len;
if (count > size_sg_header) /* release does copy_to_user */
- sg_sc_undo_rem(srp, buf, count - size_sg_header);
+ sg_finish_rem_req(srp, buf, count - size_sg_header);
else
- sg_sc_undo_rem(srp, NULL, 0);
+ sg_finish_rem_req(srp, NULL, 0);
}
else {
count = (srp->header.result == 0) ? 0 : -EIO;
- sg_sc_undo_rem(srp, NULL, 0);
+ sg_finish_rem_req(srp, NULL, 0);
}
return count;
}
SCSI_LOG_TIMEOUT(3, printk("sg_write: dev=%d, count=%d\n",
MINOR(sdp->i_rdev), (int)count));
-/* If we are in the middle of error recovery, don't let anyone
- * else try and use this device. Also, if error recovery fails, it
- * may try and take the device offline, in which case all further
- * access to the device is prohibited. */
if(! scsi_block_when_processing_errors(sdp->device) )
return -ENXIO;
-
if (ppos != &filp->f_pos)
; /* FIXME: Hmm. Seek to the right place, or fail? */
if ((k = verify_area(VERIFY_READ, buf, count)))
return k; /* protects following copy_from_user()s + get_user()s */
-/* The minimum scsi command length is 6 bytes. If we get anything
- * less than this, it is clearly bogus. */
if (count < (size_sg_header + 6))
- return -EIO;
+ return -EIO; /* The minimum scsi command length is 6 bytes. */
srp = sg_add_request(sfp);
if (! srp) {
buf += size_sg_header;
srp->header.pack_len = count;
__get_user(opcode, buf);
- cmd_size = COMMAND_SIZE(opcode);
- if ((opcode >= 0xc0) && srp->header.twelve_byte)
- cmd_size = 12;
+ if (sfp->next_cmd_len > 0) {
+ if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
+ SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
+ sfp->next_cmd_len = 0;
+ return -EDOM;
+ }
+ cmd_size = sfp->next_cmd_len;
+ sfp->next_cmd_len = 0; /* reset so only this write() effected */
+ }
+ else {
+ cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
+ if ((opcode >= 0xc0) && srp->header.twelve_byte)
+ cmd_size = 12;
+ }
SCSI_LOG_TIMEOUT(4, printk("sg_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int)opcode, cmd_size));
/* Determine buffer size. */
input_size = count - cmd_size;
mxsize = (input_size > srp->header.reply_len) ? input_size :
srp->header.reply_len;
-/* Don't include the command header itself in the size. */
mxsize -= size_sg_header;
input_size -= size_sg_header;
-/* Verify user has actually passed enough bytes for this command. */
if (input_size < 0) {
- sg_sc_undo_rem(srp, NULL, 0);
- return -EIO;
+ sg_remove_request(sfp, srp);
+ return -EIO; /* User did not pass enough bytes for this command. */
}
-
-/* If we cannot allocate the buffer, report an error. */
- if ((k = sg_sc_build(srp, mxsize, buf + cmd_size, input_size))) {
+ if ((k = sg_start_req(srp, mxsize, buf + cmd_size, input_size))) {
SCSI_LOG_TIMEOUT(1, printk("sg_write: build err=%d\n", k));
- sg_sc_undo_rem(srp, NULL, 0);
- return k;
+ sg_finish_rem_req(srp, NULL, 0);
+ return k; /* probably out of space --> ENOMEM */
}
-
/* SCSI_LOG_TIMEOUT(7, printk("sg_write: allocating device\n")); */
-/* Grab a command pointer for the device we want to talk to. If we
- * don't want to block, just return with the appropriate message. */
if (! (SCpnt = scsi_allocate_device(NULL, sdp->device,
!(filp->f_flags & O_NONBLOCK)))) {
- sg_sc_undo_rem(srp, NULL, 0);
- return -EAGAIN;
+ sg_finish_rem_req(srp, NULL, 0);
+ return -EAGAIN; /* No available command blocks at the moment */
}
/* SCSI_LOG_TIMEOUT(7, printk("sg_write: device allocated\n")); */
-
srp->my_cmdp = SCpnt;
SCpnt->request.rq_dev = sdp->i_rdev;
SCpnt->request.rq_status = RQ_ACTIVE;
SCpnt->sense_buffer[0] = 0;
SCpnt->cmd_len = cmd_size;
- /* Now copy the SCSI command from the user's address space. */
__copy_from_user(cmnd, buf, cmd_size);
-
-/* Set the LUN field in the command structure. */
+/* Set the LUN field in the command structure, overriding user input */
cmnd[1]= (cmnd[1] & 0x1f) | (sdp->device->lun << 5);
+
/* SCSI_LOG_TIMEOUT(7, printk("sg_write: do cmd\n")); */
-/* Now pass the actual command down to the low-level driver. We
- * do not do any more here - when the interrupt arrives, we will
- * then do the post-processing. */
spin_lock_irqsave(&io_request_lock, flags);
SCpnt->use_sg = srp->data.use_sg;
SCpnt->sglist_len = srp->data.sglist_len;
srp->data.sglist_len = 0;
srp->data.bufflen = 0;
srp->data.buffer = NULL;
+/* Now send everything of to mid-level. The next time we hear about this
+ packet is when sg_command_done() is called (ie a callback). */
scsi_do_cmd(SCpnt, (void *)cmnd,
(void *)SCpnt->buffer, mxsize,
sg_command_done, sfp->timeout, SG_DEFAULT_RETRIES);
return -ENXIO;
SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: dev=%d, cmd=0x%x\n",
MINOR(sdp->i_rdev), (int)cmd_in));
- /* If we are in the middle of error recovery, then don't allow any
- * access to this device. Also, error recovery *may* have taken the
- * device offline, in which case all further access is prohibited. */
if(! scsi_block_when_processing_errors(sdp->device) )
return -ENXIO;
{
case SG_SET_TIMEOUT:
return get_user(sfp->timeout, (int *)arg);
- case SG_GET_TIMEOUT:
+ case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
return sfp->timeout; /* strange ..., for backward compatibility */
case SG_SET_FORCE_LOW_DMA:
result = get_user(val, (int *)arg);
if (result) return result;
if (val) {
- if ((0 == sfp->low_dma) && (0 == sg_fb_in_use(sfp))) {
- sg_low_free(sfp->fst_buf, sfp->fb_size, SG_HEAP_PAGE);
- sfp->fst_buf = sg_low_malloc(SG_SCATTER_SZ, 1,
- SG_HEAP_PAGE, &sfp->fb_size);
- }
sfp->low_dma = 1;
- if (! sfp->fst_buf)
- return -ENOMEM;
+ if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
+ val = (int)sfp->reserve.bufflen;
+ sg_remove_scat(&sfp->reserve);
+ sg_build_reserve(sfp, val);
+ }
}
else
sfp->low_dma = sdp->device->host->unchecked_isa_dma;
case SG_GET_SG_TABLESIZE:
return put_user(sdp->sg_tablesize, (int *)arg);
case SG_SET_RESERVED_SIZE:
- /* currently ignored, future extension */
if (O_RDWR != (filp->f_flags & O_ACCMODE))
return -EACCES;
result = get_user(val, (int *)arg);
if (result) return result;
- /* logic should go here */
+ if (val != sfp->reserve.bufflen) {
+ if (sg_res_in_use(sfp))
+ return -EBUSY;
+ sg_remove_scat(&sfp->reserve);
+ sg_build_reserve(sfp, val);
+ }
return 0;
case SG_GET_RESERVED_SIZE:
- return put_user(sfp->fb_size, (int *)arg);
+ val = (int)sfp->reserve.bufflen;
+ return put_user(val, (int *)arg);
case SG_GET_MERGE_FD:
return put_user((int)sdp->merge_fd, (int *)arg);
case SG_SET_MERGE_FD:
return 0;
case SG_GET_UNDERRUN_FLAG:
return put_user((int)sfp->underrun_flag, (int *)arg);
+ case SG_NEXT_CMD_LEN:
+ result = get_user(val, (int *)arg);
+ if (result) return result;
+ sfp->next_cmd_len = (val > 0) ? val : 0;
+ return 0;
+ case SG_GET_VERSION_NUM:
+ return put_user(sg_version_num, (int *)arg);
case SG_EMULATED_HOST:
return put_user(sdp->device->host->hostt->emulated, (int *)arg);
case SCSI_IOCTL_SEND_COMMAND:
user already has read/write access to the generic device and so
can execute arbitrary SCSI commands. */
if (O_RDWR != (filp->f_flags & O_ACCMODE))
- return -EACCES; /* require write access since these could be
- dangerous */
+ return -EACCES; /* very dangerous things can be done here */
return scsi_ioctl_send_command(sdp->device, (void *)arg);
case SG_SET_DEBUG:
result = get_user(val, (int *)arg);
return scsi_ioctl(sdp->device, cmd_in, (void *)arg);
default:
if (O_RDWR != (filp->f_flags & O_ACCMODE))
- return -EACCES; /* require write access since these could be
- dangerous */
+ return -EACCES; /* don't know so take safe approach */
return scsi_ioctl(sdp->device, cmd_in, (void *)arg);
}
}
}
/* This function is called by the interrupt handler when we
- * actually have a command that is complete. Change the
- * flags to indicate that we have a result. */
+ * actually have a command that is complete. */
static void sg_command_done(Scsi_Cmnd * SCpnt)
{
int dev = MINOR(SCpnt->request.rq_dev);
sg_clr_scpnt(SCpnt);
srp->my_cmdp = NULL;
- SCSI_LOG_TIMEOUT(4,
- printk("sg__done: dev=%d, scsi_stat=%d, res=0x%x\n",
+ SCSI_LOG_TIMEOUT(4, printk("sg__done: dev=%d, scsi_stat=%d, res=0x%x\n",
dev, (int)status_byte(SCpnt->result), (int)SCpnt->result));
-/* See if the command completed normally, or whether something went wrong. */
memcpy(srp->header.sense_buffer, SCpnt->sense_buffer,
sizeof(SCpnt->sense_buffer));
switch (host_byte(SCpnt->result))
- {
+ { /* This setup of 'result' is for backward compatibility and is best
+ ignored by the user who should use target, host + driver status */
case DID_OK:
- case DID_PASSTHROUGH: /* just guessing */
- case DID_SOFT_ERROR: /* just guessing */
+ case DID_PASSTHROUGH:
+ case DID_SOFT_ERROR:
srp->header.result = 0;
break;
case DID_NO_CONNECT:
srp->header.result = EIO;
break;
case DID_ERROR:
- /* There really should be DID_UNDERRUN and DID_OVERRUN error values,
- * and a means for callers of scsi_do_cmd to indicate whether an
- * underrun or overrun should signal an error. Until that can be
- * implemented, this kludge allows for returning useful error values
- * except in cases that return DID_ERROR that might be due to an
- * underrun. */
if (SCpnt->sense_buffer[0] == 0 &&
status_byte(SCpnt->result) == GOOD)
srp->header.result = 0;
/* filesystems using this device. */
sdp->device->changed = 1;
}
-
-/* Pick up error and status information */
srp->header.target_status = status_byte(SCpnt->result);
if ((sdp->sgdebug > 0) &&
((CHECK_CONDITION == srp->header.target_status) ||
SCSI_LOG_TIMEOUT(1,
printk("sg__done: already closed, freeing ...\n"));
/* should check if module is unloaded <<<<<<< */
- sg_sc_undo_rem(srp, NULL, 0);
+ sg_finish_rem_req(srp, NULL, 0);
if (NULL == sfp->headrp) {
SCSI_LOG_TIMEOUT(1,
printk("sg__done: already closed, final cleanup\n"));
sg_remove_sfp(sdp, sfp);
}
}
-/* Now wake up the process that is waiting for the result. */
- /* A. Rubini says this is preferable+faster than wake_up() */
+/* Now wake up any sg_read() that is waiting for this packet. */
wake_up_interruptible(&sfp->read_wait);
if ((sfp->async_qp) && (! closed))
kill_fasync(sfp->async_qp, SIGPOLL);
printk(" *** Following data belongs to invoking FD ***\n");
else if (! fp->parentdp)
printk(">> Following FD has NULL parent pointer ???\n");
- printk(" FD(%d): timeout=%d, fb_size=%d, cmd_q=%d\n",
- k, fp->timeout, fp->fb_size, (int)fp->cmd_q);
- printk(" low_dma=%d, force_packid=%d, urun_flag=%d, closed=%d\n",
- (int)fp->low_dma, (int)fp->force_packid,
- (int)fp->underrun_flag, (int)fp->closed);
+ printk(" FD(%d): timeout=%d, bufflen=%d, use_sg=%d\n",
+ k, fp->timeout, fp->reserve.bufflen, (int)fp->reserve.use_sg);
+ printk(" low_dma=%d, cmd_q=%d, s_sc_len=%d, f_packid=%d\n",
+ (int)fp->low_dma, (int)fp->cmd_q, (int)fp->save_scat_len,
+ (int)fp->force_packid);
+ printk(" urun_flag=%d, next_cmd_len=%d, closed=%d\n",
+ (int)fp->underrun_flag, (int)fp->next_cmd_len,
+ (int)fp->closed);
srp = fp->headrp;
if (NULL == srp)
printk(" No requests active\n");
while (srp) {
- if (srp->fb_used)
- printk("using 1st buff >> ");
+ if (srp->res_used)
+ printk("reserved buff >> ");
else
printk(" ");
if (srp->my_cmdp)
SCSI_LOG_TIMEOUT(3, printk("sg_attach: dev=%d \n", k));
sdp->device = scsidp;
- init_waitqueue_head(&sdp->generic_wait);
+ init_waitqueue_head(&sdp->o_excl_wait);
sdp->headfp= NULL;
sdp->exclude = 0;
sdp->merge_fd = 0; /* Cope with SG_DEF_MERGE_FD on open */
}
scsidp->attached--;
sg_template.nr_dev--;
- /*
- * avoid associated device /dev/sg? bying incremented
- * each time module is inserted/removed , <dan@lectra.fr>
- */
+/* avoid associated device /dev/sg? being incremented
+ * each time module is inserted/removed , <dan@lectra.fr> */
sg_template.dev_noticed--;
return;
}
#endif
}
-static int sg_sc_build(Sg_request * srp, int max_buff_size,
- const char * inp, int num_write_xfer)
+static int sg_start_req(Sg_request * srp, int max_buff_size,
+ const char * inp, int num_write_xfer)
+{
+ int res;
+ Sg_fd * sfp = srp->parentfp;
+ Sg_scatter_hold * req_schp = &srp->data;
+ Sg_scatter_hold * rsv_schp = &sfp->reserve;
+
+ SCSI_LOG_TIMEOUT(4, printk("sg_start_req: max_buff_size=%d\n",
+ max_buff_size));
+ if ((! sg_res_in_use(sfp)) && (max_buff_size <= rsv_schp->bufflen)) {
+ sg_link_reserve(sfp, srp, max_buff_size);
+ sg_write_xfer(req_schp, inp, num_write_xfer);
+ }
+ else {
+ res = sg_build_scat(req_schp, max_buff_size, sfp);
+ if (res) {
+ sg_remove_scat(req_schp);
+ return res;
+ }
+ sg_write_xfer(req_schp, inp, num_write_xfer);
+ }
+ return 0;
+}
+
+static void sg_finish_rem_req(Sg_request * srp, char * outp,
+ int num_read_xfer)
+{
+ Sg_fd * sfp = srp->parentfp;
+ Sg_scatter_hold * req_schp = &srp->data;
+
+ SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n",
+ (int)srp->res_used));
+ if (num_read_xfer > 0)
+ sg_read_xfer(req_schp, outp, num_read_xfer);
+ if (srp->res_used)
+ sg_unlink_reserve(sfp, srp);
+ else
+ sg_remove_scat(req_schp);
+ sg_remove_request(sfp, srp);
+}
+
+static int sg_build_scat(Sg_scatter_hold * schp, int buff_size,
+ const Sg_fd * sfp)
{
int ret_sz, mem_src;
- int blk_size = max_buff_size;
+ int blk_size = buff_size;
char * p = NULL;
- if ((blk_size < 0) || (! srp))
+ if ((blk_size < 0) || (! sfp))
return -EFAULT;
-
- SCSI_LOG_TIMEOUT(4, printk("sg_sc_build: m_b_s=%d, num_write_xfer=%d\n",
- max_buff_size, num_write_xfer));
if (0 == blk_size)
++blk_size; /* don't know why */
/* round request up to next highest SG_SECTOR_SZ byte boundary */
blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK);
- SCSI_LOG_TIMEOUT(5, printk("sg_sc_build: blk_size=%d\n", blk_size));
-
+ SCSI_LOG_TIMEOUT(4, printk("sg_build_scat: buff_size=%d, blk_size=%d\n",
+ buff_size, blk_size));
if (blk_size <= SG_SCATTER_SZ) {
- mem_src = SG_HEAP_FB;
- p = sg_malloc(srp, blk_size, &ret_sz, &mem_src);
+ mem_src = SG_HEAP_PAGE;
+ p = sg_malloc(sfp, blk_size, &ret_sz, &mem_src);
if (! p)
return -ENOMEM;
if (blk_size == ret_sz) { /* got it on the first attempt */
- srp->data.buffer = p;
- srp->data.bufflen = blk_size;
- srp->data.mem_src = mem_src;
- srp->data.b_malloc_len = blk_size;
- if (inp && (num_write_xfer > 0))
- __copy_from_user(srp->data.buffer, inp, num_write_xfer);
+ schp->use_sg = 0;
+ schp->buffer = p;
+ schp->bufflen = blk_size;
+ schp->mem_src = mem_src;
+ schp->b_malloc_len = blk_size;
return 0;
}
}
else {
mem_src = SG_HEAP_PAGE;
- p = sg_malloc(srp, SG_SCATTER_SZ, &ret_sz, &mem_src);
+ p = sg_malloc(sfp, SG_SCATTER_SZ, &ret_sz, &mem_src);
if (! p)
return -ENOMEM;
}
int k, rem_sz, num, nxt;
int sc_bufflen = PAGE_SIZE;
int mx_sc_elems = (sc_bufflen / sizeof(struct scatterlist)) - 1;
- int sg_tablesize = srp->parentfp->parentdp->sg_tablesize;
+ int sg_tablesize = sfp->parentdp->sg_tablesize;
int first = 1;
k = SG_HEAP_KMAL; /* want to protect mem_src, use k as scratch */
- srp->data.buffer = (struct scatterlist *)sg_malloc(srp,
+ schp->buffer = (struct scatterlist *)sg_malloc(sfp,
sc_bufflen, &num, &k);
- srp->data.mem_src = (char)k;
+ schp->mem_src = (char)k;
/* N.B. ret_sz and mem_src carried into this block ... */
- if (! srp->data.buffer)
+ if (! schp->buffer)
return -ENOMEM;
else if (num != sc_bufflen) {
sc_bufflen = num;
mx_sc_elems = (sc_bufflen / sizeof(struct scatterlist)) - 1;
}
- srp->data.sglist_len = sc_bufflen;
- memset(srp->data.buffer, 0, sc_bufflen);
- for (k = 0, sclp = srp->data.buffer, rem_sz = blk_size, nxt =0;
+ schp->sglist_len = sc_bufflen;
+ memset(schp->buffer, 0, sc_bufflen);
+ for (k = 0, sclp = schp->buffer, rem_sz = blk_size, nxt =0;
(k < sg_tablesize) && (rem_sz > 0) && (k < mx_sc_elems);
++k, rem_sz -= ret_sz, ++sclp) {
if (first)
else {
num = (rem_sz > SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz;
mem_src = SG_HEAP_PAGE;
- p = sg_malloc(srp, num, &ret_sz, &mem_src);
+ p = sg_malloc(sfp, num, &ret_sz, &mem_src);
if (! p)
break;
}
sclp->length = ret_sz;
sclp->alt_address = (char *)(long)mem_src;
- if(inp && (num_write_xfer > 0)) {
- num = (ret_sz > num_write_xfer) ? num_write_xfer : ret_sz;
- __copy_from_user(sclp->address, inp, num);
- num_write_xfer -= num;
- inp += num;
- }
SCSI_LOG_TIMEOUT(5,
- printk("sg_sc_build: k=%d, a=0x%p, len=%d, ms=%d\n",
+ printk("sg_build_build: k=%d, a=0x%p, len=%d, ms=%d\n",
k, sclp->address, ret_sz, mem_src));
} /* end of for loop */
- srp->data.use_sg = k;
+ schp->use_sg = k;
SCSI_LOG_TIMEOUT(5,
- printk("sg_sc_build: use_sg=%d, rem_sz=%d\n", k, rem_sz));
- srp->data.bufflen = blk_size;
+ printk("sg_build_scat: use_sg=%d, rem_sz=%d\n", k, rem_sz));
+ schp->bufflen = blk_size;
if (rem_sz > 0) /* must have failed */
return -ENOMEM;
}
return 0;
}
-static int sg_sc_undo_rem(Sg_request * srp, char * outp,
- int num_read_xfer)
+static void sg_write_xfer(Sg_scatter_hold * schp, const char * inp,
+ int num_write_xfer)
{
- if (! srp)
- return -EFAULT;
- SCSI_LOG_TIMEOUT(4, printk("sg_sc_undo_rem: num_read_xfer=%d\n",
- num_read_xfer));
- if (! outp)
- num_read_xfer = 0;
- if(srp->data.use_sg) {
- int k, num, mem_src;
- struct scatterlist * sclp = (struct scatterlist *)srp->data.buffer;
-
- for (k = 0; (k < srp->data.use_sg) && sclp->address; ++k, ++sclp) {
- if (num_read_xfer > 0) {
- num = (int)sclp->length;
- if (num > num_read_xfer) {
- __copy_to_user(outp, sclp->address, num_read_xfer);
- outp += num_read_xfer;
- num_read_xfer = 0;
- }
- else {
- __copy_to_user(outp, sclp->address, num);
- outp += num;
- num_read_xfer -= num;
- }
+ SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_write_xfer=%d, use_sg=%d\n",
+ num_write_xfer, schp->use_sg));
+ if ((! inp) || (num_write_xfer <= 0))
+ return;
+ if (schp->use_sg > 0) {
+ int k, num;
+ struct scatterlist * sclp = (struct scatterlist *)schp->buffer;
+
+ for (k = 0; (k < schp->use_sg) && sclp->address; ++k, ++sclp) {
+ num = (int)sclp->length;
+ if (num > num_write_xfer) {
+ __copy_from_user(sclp->address, inp, num_write_xfer);
+ break;
}
+ else {
+ __copy_from_user(sclp->address, inp, num);
+ num_write_xfer -= num;
+ if (num_write_xfer <= 0)
+ break;
+ inp += num;
+ }
+ }
+ }
+ else
+ __copy_from_user(schp->buffer, inp, num_write_xfer);
+}
+
+static void sg_remove_scat(Sg_scatter_hold * schp)
+{
+ SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: use_sg=%d\n", schp->use_sg));
+ if(schp->use_sg > 0) {
+ int k, mem_src;
+ struct scatterlist * sclp = (struct scatterlist *)schp->buffer;
+
+ for (k = 0; (k < schp->use_sg) && sclp->address; ++k, ++sclp) {
mem_src = (int)(long)sclp->alt_address;
SCSI_LOG_TIMEOUT(5,
- printk("sg_sc_undo_rem: k=%d, a=0x%p, len=%d, ms=%d\n",
+ printk("sg_remove_scat: k=%d, a=0x%p, len=%d, ms=%d\n",
k, sclp->address, sclp->length, mem_src));
- sg_free(srp, sclp->address, sclp->length, mem_src);
+ sg_free(sclp->address, sclp->length, mem_src);
+ sclp->address = NULL;
+ sclp->length = 0;
}
- sg_free(srp, srp->data.buffer, srp->data.sglist_len,
- srp->data.mem_src);
+ sg_free(schp->buffer, schp->sglist_len, schp->mem_src);
+ }
+ else if (schp->buffer)
+ sg_free(schp->buffer, schp->b_malloc_len, schp->mem_src);
+ schp->buffer = NULL;
+ schp->bufflen = 0;
+ schp->use_sg = 0;
+ schp->sglist_len = 0;
+}
+
+static void sg_read_xfer(Sg_scatter_hold * schp, char * outp,
+ int num_read_xfer)
+{
+ SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_read_xfer=%d\n",
+ num_read_xfer));
+ if ((! outp) || (num_read_xfer <= 0))
+ return;
+ if(schp->use_sg > 0) {
+ int k, num;
+ struct scatterlist * sclp = (struct scatterlist *)schp->buffer;
+
+ for (k = 0; (k < schp->use_sg) && sclp->address; ++k, ++sclp) {
+ num = (int)sclp->length;
+ if (num > num_read_xfer) {
+ __copy_to_user(outp, sclp->address, num_read_xfer);
+ break;
+ }
+ else {
+ __copy_to_user(outp, sclp->address, num);
+ num_read_xfer -= num;
+ if (num_read_xfer <= 0)
+ break;
+ outp += num;
+ }
+ }
+ }
+ else
+ __copy_to_user(outp, schp->buffer, num_read_xfer);
+}
+
+static void sg_build_reserve(Sg_fd * sfp, int req_size)
+{
+ Sg_scatter_hold * schp = &sfp->reserve;
+
+ SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
+ do {
+ if (req_size < PAGE_SIZE)
+ req_size = PAGE_SIZE;
+ if (0 == sg_build_scat(schp, req_size, sfp))
+ return;
+ else
+ sg_remove_scat(schp);
+ req_size >>= 1; /* divide by 2 */
+ } while (req_size > (PAGE_SIZE / 2));
+}
+
+static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
+{
+ Sg_scatter_hold * req_schp = &srp->data;
+ Sg_scatter_hold * rsv_schp = &sfp->reserve;
+
+ SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
+ if (rsv_schp->use_sg > 0) {
+ int k, num;
+ int rem = size;
+ struct scatterlist * sclp = (struct scatterlist *)rsv_schp->buffer;
+
+ for (k = 0; k < rsv_schp->use_sg; ++k, ++sclp) {
+ num = (int)sclp->length;
+ if (rem <= num) {
+ sfp->save_scat_len = num;
+ sclp->length = (unsigned)rem;
+ break;
+ }
+ else
+ rem -= num;
+ }
+ if (k < rsv_schp->use_sg) {
+ req_schp->use_sg = k + 1; /* adjust scatter list length */
+ req_schp->bufflen = size;
+ req_schp->sglist_len = rsv_schp->sglist_len;
+ req_schp->buffer = rsv_schp->buffer;
+ req_schp->mem_src = rsv_schp->mem_src;
+ req_schp->b_malloc_len = rsv_schp->b_malloc_len;
+ }
+ else
+ SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
}
else {
- if (num_read_xfer > 0)
- __copy_to_user(outp, srp->data.buffer, num_read_xfer);
- sg_free(srp, srp->data.buffer, srp->data.b_malloc_len,
- srp->data.mem_src);
+ req_schp->use_sg = 0;
+ req_schp->bufflen = size;
+ req_schp->buffer = rsv_schp->buffer;
+ req_schp->mem_src = rsv_schp->mem_src;
+ req_schp->use_sg = rsv_schp->use_sg;
+ req_schp->b_malloc_len = rsv_schp->b_malloc_len;
}
- if (0 == sg_remove_request(srp->parentfp, srp)) {
- SCSI_LOG_TIMEOUT(1, printk("sg_sc_undo_rem: srp=0x%p not found\n",
- srp));
+ srp->res_used = 1;
+}
+
+static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
+{
+ Sg_scatter_hold * req_schp = &srp->data;
+ Sg_scatter_hold * rsv_schp = &sfp->reserve;
+
+ SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->use_sg=%d\n",
+ (int)req_schp->use_sg));
+ if (rsv_schp->use_sg > 0) {
+ struct scatterlist * sclp = (struct scatterlist *)rsv_schp->buffer;
+
+ if (sfp->save_scat_len > 0)
+ (sclp + (req_schp->use_sg - 1))->length =
+ (unsigned)sfp->save_scat_len;
+ else
+ SCSI_LOG_TIMEOUT(1, printk(
+ "sg_unlink_reserve: BAD save_scat_len\n"));
}
- return 0;
+ req_schp->use_sg = 0;
+ req_schp->bufflen = 0;
+ req_schp->buffer = NULL;
+ req_schp->sglist_len = 0;
+ sfp->save_scat_len = 0;
+ srp->res_used = 0;
}
static Sg_request * sg_get_request(const Sg_fd * sfp, int pack_id)
if (resp) {
resp->parentfp = sfp;
resp->nextrp = NULL;
- resp->fb_used = 0;
+ resp->res_used = 0;
memset(&resp->data, 0, sizeof(Sg_scatter_hold));
memset(&resp->header, 0, sizeof(struct sg_header));
resp->my_cmdp = NULL;
sdp->device->host->unchecked_isa_dma : 1;
sfp->cmd_q = SG_DEF_COMMAND_Q;
sfp->underrun_flag = SG_DEF_UNDERRUN_FLAG;
- if (get_reserved)
- sfp->fst_buf = sg_low_malloc(SG_SCATTER_SZ, sfp->low_dma,
- SG_HEAP_PAGE, &sfp->fb_size);
- else
- sfp->fst_buf = NULL;
- if (! sfp->fst_buf)
- sfp->fb_size = 0;
sfp->parentdp = sdp;
if (! sdp->headfp)
sdp->headfp = sfp;
pfp = pfp->nextfp;
pfp->nextfp = sfp;
}
- sg_big_buff = sfp->fb_size; /* show sysctl most recent "fb" size */
SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p, m_s=%d\n",
sfp, (int)sfp->my_mem_src));
- SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: fb_sz=%d, fst_buf=0x%p\n",
- sfp->fb_size, sfp->fst_buf));
+ if (get_reserved) {
+ sg_build_reserve(sfp, SG_DEF_RESERVED_SIZE);
+ sg_big_buff = sfp->reserve.bufflen; /* sysctl shows most recent size */
+ SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, use_sg=%d\n",
+ sfp->reserve.bufflen, sfp->reserve.use_sg));
+ }
return sfp;
}
while (srp) {
tsrp = srp->nextrp;
if (! srp->my_cmdp)
- sg_sc_undo_rem(srp, NULL, 0);
+ sg_finish_rem_req(srp, NULL, 0);
else
++dirty;
srp = tsrp;
prev_fp = fp;
}
}
-SCSI_LOG_TIMEOUT(6, printk("sg_remove_sfp: fb_sz=%d, fst_buf=0x%p\n",
- sfp->fb_size, sfp->fst_buf));
- sg_low_free(sfp->fst_buf, sfp->fb_size, SG_HEAP_PAGE);
+ if (sfp->reserve.bufflen > 0) {
+SCSI_LOG_TIMEOUT(6, printk("sg_remove_sfp: bufflen=%d, use_sg=%d\n",
+ (int)sfp->reserve.bufflen, (int)sfp->reserve.use_sg));
+ sg_remove_scat(&sfp->reserve);
+ }
sfp->parentdp = NULL;
- sfp->fst_buf = NULL;
- sfp->fb_size = 0;
SCSI_LOG_TIMEOUT(6, printk("sg_remove_sfp: sfp=0x%p\n", sfp));
sg_low_free((char *)sfp, sizeof(Sg_fd), sfp->my_mem_src);
res = 1;
return res;
}
-static int sg_fb_in_use(const Sg_fd * sfp)
+static int sg_res_in_use(const Sg_fd * sfp)
{
const Sg_request * srp = sfp->headrp;
while (srp) {
- if (srp->fb_used)
+ if (srp->res_used)
return 1;
srp = srp->nextrp;
}
return resp;
}
-static char * sg_malloc(Sg_request * srp, int size, int * retSzp,
+static char * sg_malloc(const Sg_fd * sfp, int size, int * retSzp,
int * mem_srcp)
{
char * resp = NULL;
if (size <= 0)
;
else {
- Sg_fd * sfp = srp->parentfp;
int low_dma = sfp->low_dma;
int l_ms = -1; /* invalid value */
switch (*mem_srcp)
{
case SG_HEAP_PAGE:
- case SG_HEAP_FB:
l_ms = (size < PAGE_SIZE) ? SG_HEAP_POOL : SG_HEAP_PAGE;
resp = sg_low_malloc(size, low_dma, l_ms, 0);
if (resp)
break;
- if ((size <= sfp->fb_size) && (0 == sg_fb_in_use(sfp))) {
- SCSI_LOG_TIMEOUT(6,
- printk("sg_malloc: scsi_malloc failed, get fst_buf\n"));
- resp = sfp->fst_buf;
- srp->fb_used = 1;
- l_ms = SG_HEAP_FB;
- break;
- }
resp = sg_low_malloc(size, low_dma, l_ms, &size);
if (! resp) {
l_ms = (SG_HEAP_POOL == l_ms) ? SG_HEAP_PAGE : SG_HEAP_POOL;
mem_src, buff, size);
}
-static void sg_free(Sg_request * srp, char * buff, int size, int mem_src)
+static void sg_free(char * buff, int size, int mem_src)
{
- Sg_fd * sfp = srp->parentfp;
-
SCSI_LOG_TIMEOUT(6,
printk("sg_free: buff=0x%p, size=%d\n", buff, size));
- if ((! sfp) || (! buff) || (size <= 0))
+ if ((! buff) || (size <= 0))
;
- else if (sfp->fst_buf == buff) {
- srp->fb_used = 0;
- SCSI_LOG_TIMEOUT(6, printk("sg_free: left cause fst_buf\n"));
- }
else
sg_low_free(buff, size, mem_src);
}
Johannes Erdfelt <jerdfelt@sventech.com>
ham <ham@unsuave.com>
Bradley M Keryan <keryan@andrew.cmu.edu>
+ Paul Mackerras <paulus@cs.anu.edu.au>
Vojtech Pavlik <vojtech@twilight.ucw.cz>
Gregory P. Smith <greg@electricrain.com>
Linus Torvalds <torvalds@transmeta.com>
dep_tristate 'USB keyboard support' CONFIG_USB_KBD $CONFIG_USB
dep_tristate 'USB audio parsing support' CONFIG_USB_AUDIO $CONFIG_USB
dep_tristate 'USB Abstract Control Model support' CONFIG_USB_ACM $CONFIG_USB
- dep_tristate 'Preliminary USB Printer support' CONFIG_USB_PRINTER $CONFIG_USB
+ dep_tristate 'USB Printer support' CONFIG_USB_PRINTER $CONFIG_USB
+ dep_tristate 'USB SCSI Support' CONFIG_USB_SCSI $CONFIG_USB
+ if [ "$CONFIG_USB_SCSI" != "n" ]; then
+ dep_tristate ' USB SCSI verbose debug' CONFIG_USB_SCSI_DEBUG $CONFIG_USB_SCSI
+ fi
fi
endmenu
MIX_OBJS += cpia.o
endif
+ifeq ($(CONFIG_USB_SCSI),y)
+ L_OBJS += usb_scsi.o
+ ifeq ($(CONFIG_USB_SCSI_DEBUG),y)
+ L_OBJS += usb_scsi_debug.o
+ endif
+endif
+
include $(TOPDIR)/Rules.make
keymap.o: keymap.c
keymap.c: maps/serial.map maps/usb.map maps/fixup.map
./mkmap > $@
+keymap-mac.o: keymap-mac.c
+keymap-mac.c: maps/mac.map maps/usb.map
+ ./mkmap.adb > $@
+
+ifneq ($(CONFIG_MAC_KEYBOARD),y)
usb-keyboard.o: keymap.o keyboard.o
$(LD) $(LD_RFLAG) -r -o $@ keymap.o keyboard.o
+else
+usb-keyboard.o: keymap-mac.o keyboard.o
+ $(LD) $(LD_RFLAG) -r -o $@ keymap-mac.o keyboard.o
+endif
usb-uhci.o: uhci.o uhci-debug.o
$(LD) $(LD_RFLAG) -r -o $@ uhci.o uhci-debug.o
+June 08, 1999 01:23:34
+
+Paul Mackerras went through the OHCI (& USB code) to fix most of the
+endianness issues so that the code now works on Linux-PPC. He also
+simplified add_td_to_ed to be simpler & atomic to the hardware.
+
May 16, 1999 16:20:54
EDs are now allocated dynamically from their device's pool. Root hub
return;
}
- portstatus = *((unsigned short *)buf + 0);
- portchange = *((unsigned short *)buf + 1);
+ portstatus = le16_to_cpup((unsigned short *)buf + 0);
+ portchange = le16_to_cpup((unsigned short *)buf + 1);
if ((!(portstatus & USB_PORT_STAT_CONNECTION)) &&
(!(portstatus & USB_PORT_STAT_ENABLE))) {
continue;
}
- portstatus = *((unsigned short *)buf + 0);
- portchange = *((unsigned short *)buf + 1);
+ portstatus = le16_to_cpup((unsigned short *)buf + 0);
+ portchange = le16_to_cpup((unsigned short *)buf + 1);
if (portchange & USB_PORT_STAT_C_CONNECTION) {
printk("hub: port %d connection change\n", i + 1);
int scancode = (int) usb_kbd_map[key];
if(scancode)
{
+#ifndef CONFIG_MAC_KEYBOARD
if(scancode & PCKBD_NEEDS_E0)
{
handle_scancode(0xe0, 1);
}
+#endif /* CONFIG_MAC_KEYBOARD */
handle_scancode((scancode & ~PCKBD_NEEDS_E0), down);
}
}
struct usb_endpoint_descriptor *endpoint;
struct usb_keyboard *kbd;
+ if (dev->descriptor.bNumConfigurations < 1)
+ return -1;
+
interface = &dev->config[0].altsetting[0].interface[0];
endpoint = &interface->endpoint[0];
--- /dev/null
+unsigned char usb_kbd_map[256] =
+{
+ 0x00, 0x00, 0x00, 0x00, 0x80, 0x0b, 0x08, 0x02,
+ 0x0e, 0x03, 0x05, 0x04, 0x22, 0x26, 0x28, 0x25,
+
+ 0x2e, 0x2d, 0x1f, 0x23, 0x0c, 0x0f, 0x01, 0x11,
+ 0x20, 0x09, 0x0d, 0x07, 0x10, 0x06, 0x12, 0x13,
+
+ 0x14, 0x15, 0x17, 0x16, 0x1a, 0x1c, 0x19, 0x1d,
+ 0x24, 0x35, 0x33, 0x30, 0x31, 0x1b, 0x18, 0x21,
+
+ 0x1e, 0x2a, 0x00, 0x29, 0x27, 0x32, 0x2b, 0x2f,
+ 0x2c, 0x39, 0x7a, 0x78, 0x63, 0x76, 0x60, 0x61,
+
+ 0x62, 0x64, 0x65, 0x6d, 0x67, 0x6f, 0x69, 0x6b,
+ 0x71, 0x72, 0x73, 0x74, 0x75, 0x77, 0x79, 0x3c,
+
+ 0x3b, 0x3d, 0x3e, 0x47, 0x4b, 0x43, 0x4e, 0x45,
+ 0x4c, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+
+ 0x5b, 0x5c, 0x52, 0x41, 0x00, 0x00, 0x00, 0x00,
+ 0x69, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x36, 0x38, 0x3a, 0x37, 0x7d, 0x7b, 0x7c, 0x37,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
--- /dev/null
+# Kernel keymap for Macintoshes. This uses 7 modifier combinations.
+keymaps 0-2,4-5,8,12
+#
+# Fixups:
+keycode 0x69 = Print_Screen
+keycode 0x6b = F14
+keycode 0x37 = Window_R
+#
+#keycode 0x00 = a
+# hack!
+keycode 0x80 = a
+ altgr keycode 0x00 = Hex_A
+keycode 0x01 = s
+keycode 0x02 = d
+ altgr keycode 0x02 = Hex_D
+keycode 0x03 = f
+ altgr keycode 0x03 = Hex_F
+keycode 0x04 = h
+keycode 0x05 = g
+keycode 0x06 = z
+keycode 0x07 = x
+keycode 0x08 = c
+ altgr keycode 0x08 = Hex_C
+keycode 0x09 = v
+keycode 0x0a =
+keycode 0x0b = b
+ altgr keycode 0x0b = Hex_B
+keycode 0x0c = q
+keycode 0x0d = w
+keycode 0x0e = e
+ altgr keycode 0x0e = Hex_E
+keycode 0x0f = r
+keycode 0x10 = y
+keycode 0x11 = t
+keycode 0x12 = one exclam
+ alt keycode 0x12 = Meta_one
+keycode 0x13 = two at at
+ control keycode 0x13 = nul
+ shift control keycode 0x13 = nul
+ alt keycode 0x13 = Meta_two
+keycode 0x14 = three numbersign
+ control keycode 0x14 = Escape
+ alt keycode 0x14 = Meta_three
+keycode 0x15 = four dollar dollar
+ control keycode 0x15 = Control_backslash
+ alt keycode 0x15 = Meta_four
+keycode 0x16 = six asciicircum
+ control keycode 0x16 = Control_asciicircum
+ alt keycode 0x16 = Meta_six
+keycode 0x17 = five percent
+ control keycode 0x17 = Control_bracketright
+ alt keycode 0x17 = Meta_five
+keycode 0x18 = equal plus
+ alt keycode 0x18 = Meta_equal
+keycode 0x19 = nine parenleft bracketright
+ alt keycode 0x19 = Meta_nine
+keycode 0x1a = seven ampersand braceleft
+ control keycode 0x1a = Control_underscore
+ alt keycode 0x1a = Meta_seven
+keycode 0x1b = minus underscore backslash
+ control keycode 0x1b = Control_underscore
+ shift control keycode 0x1b = Control_underscore
+ alt keycode 0x1b = Meta_minus
+keycode 0x1c = eight asterisk bracketleft
+ control keycode 0x1c = Delete
+ alt keycode 0x1c = Meta_eight
+keycode 0x1d = zero parenright braceright
+ alt keycode 0x1d = Meta_zero
+keycode 0x1e = bracketright braceright asciitilde
+ control keycode 0x1e = Control_bracketright
+ alt keycode 0x1e = Meta_bracketright
+keycode 0x1f = o
+keycode 0x20 = u
+keycode 0x21 = bracketleft braceleft
+ control keycode 0x21 = Escape
+ alt keycode 0x21 = Meta_bracketleft
+keycode 0x22 = i
+keycode 0x23 = p
+keycode 0x24 = Return
+ alt keycode 0x24 = Meta_Control_m
+keycode 0x25 = l
+keycode 0x26 = j
+keycode 0x27 = apostrophe quotedbl
+ control keycode 0x27 = Control_g
+ alt keycode 0x27 = Meta_apostrophe
+keycode 0x28 = k
+keycode 0x29 = semicolon colon
+ alt keycode 0x29 = Meta_semicolon
+keycode 0x2a = backslash bar
+ control keycode 0x2a = Control_backslash
+ alt keycode 0x2a = Meta_backslash
+keycode 0x2b = comma less
+ alt keycode 0x2b = Meta_comma
+keycode 0x2c = slash question
+ control keycode 0x2c = Delete
+ alt keycode 0x2c = Meta_slash
+keycode 0x2d = n
+keycode 0x2e = m
+keycode 0x2f = period greater
+ control keycode 0x2f = Compose
+ alt keycode 0x2f = Meta_period
+keycode 0x30 = Tab Tab
+ alt keycode 0x30 = Meta_Tab
+keycode 0x31 = space space
+ control keycode 0x31 = nul
+ alt keycode 0x31 = Meta_space
+keycode 0x32 = grave asciitilde
+ control keycode 0x32 = nul
+ alt keycode 0x32 = Meta_grave
+keycode 0x33 = Delete Delete
+ control keycode 0x33 = BackSpace
+ alt keycode 0x33 = Meta_Delete
+keycode 0x34 =
+keycode 0x35 = Escape Escape
+ alt keycode 0x35 = Meta_Escape
+keycode 0x36 = Control
+keycode 0x37 = Window
+keycode 0x38 = Shift
+keycode 0x39 = Caps_Lock
+keycode 0x3a = Alt
+keycode 0x3b = Left
+ alt keycode 0x3b = Decr_Console
+keycode 0x3c = Right
+ alt keycode 0x3c = Incr_Console
+keycode 0x3d = Down
+keycode 0x3e = Up
+keycode 0x3f =
+keycode 0x40 =
+keycode 0x41 = KP_Period
+keycode 0x42 =
+keycode 0x43 = KP_Multiply
+keycode 0x44 =
+keycode 0x45 = KP_Add
+keycode 0x46 =
+keycode 0x47 = Num_Lock
+# shift keycode 0x47 = Bare_Num_Lock
+keycode 0x48 =
+keycode 0x49 =
+keycode 0x4a =
+keycode 0x4b = KP_Divide
+keycode 0x4c = KP_Enter
+keycode 0x4d =
+keycode 0x4e = KP_Subtract
+keycode 0x4f =
+keycode 0x50 =
+keycode 0x51 =
+#keycode 0x51 = KP_Equals
+keycode 0x52 = KP_0
+ alt keycode 0x52 = Ascii_0
+ altgr keycode 0x52 = Hex_0
+keycode 0x53 = KP_1
+ alt keycode 0x53 = Ascii_1
+ altgr keycode 0x53 = Hex_1
+keycode 0x54 = KP_2
+ alt keycode 0x54 = Ascii_2
+ altgr keycode 0x54 = Hex_2
+keycode 0x55 = KP_3
+ alt keycode 0x55 = Ascii_3
+ altgr keycode 0x55 = Hex_3
+keycode 0x56 = KP_4
+ alt keycode 0x56 = Ascii_4
+ altgr keycode 0x56 = Hex_4
+keycode 0x57 = KP_5
+ alt keycode 0x57 = Ascii_5
+ altgr keycode 0x57 = Hex_5
+keycode 0x58 = KP_6
+ alt keycode 0x58 = Ascii_6
+ altgr keycode 0x58 = Hex_6
+keycode 0x59 = KP_7
+ alt keycode 0x59 = Ascii_7
+ altgr keycode 0x59 = Hex_7
+keycode 0x5b = KP_8
+ alt keycode 0x5b = Ascii_8
+ altgr keycode 0x5b = Hex_8
+keycode 0x5c = KP_9
+ alt keycode 0x5c = Ascii_9
+ altgr keycode 0x5c = Hex_9
+keycode 0x5d =
+keycode 0x5e =
+keycode 0x5f =
+keycode 0x60 = F5 F15 Console_17
+ control keycode 0x60 = F5
+ alt keycode 0x60 = Console_5
+ control alt keycode 0x60 = Console_5
+keycode 0x61 = F6 F16 Console_18
+ control keycode 0x61 = F6
+ alt keycode 0x61 = Console_6
+ control alt keycode 0x61 = Console_6
+keycode 0x62 = F7 F17 Console_19
+ control keycode 0x62 = F7
+ alt keycode 0x62 = Console_7
+ control alt keycode 0x62 = Console_7
+keycode 0x63 = F3 F13 Console_15
+ control keycode 0x63 = F3
+ alt keycode 0x63 = Console_3
+ control alt keycode 0x63 = Console_3
+keycode 0x64 = F8 F18 Console_20
+ control keycode 0x64 = F8
+ alt keycode 0x64 = Console_8
+ control alt keycode 0x64 = Console_8
+keycode 0x65 = F9 F19 Console_21
+ control keycode 0x65 = F9
+ alt keycode 0x65 = Console_9
+ control alt keycode 0x65 = Console_9
+keycode 0x66 =
+keycode 0x67 = F11 F11 Console_23
+ control keycode 0x67 = F11
+ alt keycode 0x67 = Console_11
+ control alt keycode 0x67 = Console_11
+keycode 0x68 =
+keycode 0x69 = F13
+keycode 0x6a =
+keycode 0x6b = Scroll_Lock Show_Memory Show_Registers
+ control keycode 0x6b = Show_State
+ alt keycode 0x6b = Scroll_Lock
+keycode 0x6c =
+keycode 0x6d = F10 F20 Console_22
+ control keycode 0x6d = F10
+ alt keycode 0x6d = Console_10
+ control alt keycode 0x6d = Console_10
+keycode 0x6e =
+keycode 0x6f = F12 F12 Console_24
+ control keycode 0x6f = F12
+ alt keycode 0x6f = Console_12
+ control alt keycode 0x6f = Console_12
+keycode 0x70 =
+keycode 0x71 = Pause
+keycode 0x72 = Insert
+keycode 0x73 = Home
+keycode 0x74 = Prior
+ shift keycode 0x74 = Scroll_Backward
+keycode 0x75 = Remove
+keycode 0x76 = F4 F14 Console_16
+ control keycode 0x76 = F4
+ alt keycode 0x76 = Console_4
+ control alt keycode 0x76 = Console_4
+keycode 0x77 = End
+keycode 0x78 = F2 F12 Console_14
+ control keycode 0x78 = F2
+ alt keycode 0x78 = Console_2
+ control alt keycode 0x78 = Console_2
+keycode 0x79 = Next
+ shift keycode 0x79 = Scroll_Forward
+keycode 0x7a = F1 F11 Console_13
+ control keycode 0x7a = F1
+ alt keycode 0x7a = Console_1
+ control alt keycode 0x7a = Console_1
+keycode 0x7b = Shift_R
+keycode 0x7c = Alt_R
+keycode 0x7d = Control_R
+keycode 0x7e =
+keycode 0x7f =
+#keycode 0x7f = Power
+ control shift keycode 0x7f = Boot
+string F1 = "\033[[A"
+string F2 = "\033[[B"
+string F3 = "\033[[C"
+string F4 = "\033[[D"
+string F5 = "\033[[E"
+string F6 = "\033[17~"
+string F7 = "\033[18~"
+string F8 = "\033[19~"
+string F9 = "\033[20~"
+string F10 = "\033[21~"
+string F11 = "\033[23~"
+string F12 = "\033[24~"
+string F13 = "\033[25~"
+string F14 = "\033[26~"
+string F15 = "\033[28~"
+string F16 = "\033[29~"
+string F17 = "\033[31~"
+string F18 = "\033[32~"
+string F19 = "\033[33~"
+string F20 = "\033[34~"
+string Find = "\033[1~"
+string Insert = "\033[2~"
+string Remove = "\033[3~"
+string Select = "\033[4~"
+string Prior = "\033[5~"
+string Next = "\033[6~"
+string Macro = "\033[M"
+string Pause = "\033[P"
+compose '`' 'A' to 'À'
+compose '`' 'a' to 'à'
+compose '\'' 'A' to 'Á'
+compose '\'' 'a' to 'á'
+compose '^' 'A' to 'Â'
+compose '^' 'a' to 'â'
+compose '~' 'A' to 'Ã'
+compose '~' 'a' to 'ã'
+compose '"' 'A' to 'Ä'
+compose '"' 'a' to 'ä'
+compose 'O' 'A' to 'Å'
+compose 'o' 'a' to 'å'
+compose '0' 'A' to 'Å'
+compose '0' 'a' to 'å'
+compose 'A' 'A' to 'Å'
+compose 'a' 'a' to 'å'
+compose 'A' 'E' to 'Æ'
+compose 'a' 'e' to 'æ'
+compose ',' 'C' to 'Ç'
+compose ',' 'c' to 'ç'
+compose '`' 'E' to 'È'
+compose '`' 'e' to 'è'
+compose '\'' 'E' to 'É'
+compose '\'' 'e' to 'é'
+compose '^' 'E' to 'Ê'
+compose '^' 'e' to 'ê'
+compose '"' 'E' to 'Ë'
+compose '"' 'e' to 'ë'
+compose '`' 'I' to 'Ì'
+compose '`' 'i' to 'ì'
+compose '\'' 'I' to 'Í'
+compose '\'' 'i' to 'í'
+compose '^' 'I' to 'Î'
+compose '^' 'i' to 'î'
+compose '"' 'I' to 'Ï'
+compose '"' 'i' to 'ï'
+compose '-' 'D' to 'Ð'
+compose '-' 'd' to 'ð'
+compose '~' 'N' to 'Ñ'
+compose '~' 'n' to 'ñ'
+compose '`' 'O' to 'Ò'
+compose '`' 'o' to 'ò'
+compose '\'' 'O' to 'Ó'
+compose '\'' 'o' to 'ó'
+compose '^' 'O' to 'Ô'
+compose '^' 'o' to 'ô'
+compose '~' 'O' to 'Õ'
+compose '~' 'o' to 'õ'
+compose '"' 'O' to 'Ö'
+compose '"' 'o' to 'ö'
+compose '/' 'O' to 'Ø'
+compose '/' 'o' to 'ø'
+compose '`' 'U' to 'Ù'
+compose '`' 'u' to 'ù'
+compose '\'' 'U' to 'Ú'
+compose '\'' 'u' to 'ú'
+compose '^' 'U' to 'Û'
+compose '^' 'u' to 'û'
+compose '"' 'U' to 'Ü'
+compose '"' 'u' to 'ü'
+compose '\'' 'Y' to 'Ý'
+compose '\'' 'y' to 'ý'
+compose 'T' 'H' to 'Þ'
+compose 't' 'h' to 'þ'
+compose 's' 's' to 'ß'
+compose '"' 'y' to 'ÿ'
+compose 's' 'z' to 'ß'
+compose 'i' 'j' to 'ÿ'
--- /dev/null
+#!/usr/bin/perl
+
+($ME = $0) =~ s|.*/||;
+
+$file = "maps/mac.map";
+$line = 1;
+open(PC, $file) || die("$!");
+while(<PC>)
+{
+ if(/^\s*keycode\s+(\d+|0x[0-9a-fA-F]+)\s*=\s*(\S+)/)
+ {
+ my($idx) = $1;
+ my($sym) = $2;
+ if ($idx =~ "0x.*") {
+ $idx = hex($idx);
+ } else {
+ $idx = int($idx);
+ }
+ if(defined($map{uc($sym)}))
+ {
+ # print STDERR "$file:$line: warning: `$sym' redefined\n";
+ }
+ $map{uc($sym)} = $idx;
+ }
+ $line++;
+}
+close(PC);
+
+# $file = "maps/fixup.map";
+# $line = 1;
+# open(FIXUP, $file) || die("$!");
+# while(<FIXUP>)
+# {
+# if(/^\s*keycode\s+(\d+)\s*=\s*/)
+# {
+# my($idx) = int($1);
+# for $sym (split(/\s+/, $'))
+# {
+# $map{uc($sym)} = $idx;
+# }
+# }
+# $line++;
+# }
+# close(FIXUP);
+
+$file = "maps/usb.map";
+$line = 1;
+open(USB, $file) || die("$!");
+while(<USB>)
+{
+ if(/^\s*keycode\s+(\d+)\s*=\s*/)
+ {
+ my($idx) = int($1);
+ for $sym (split(/\s+/, $'))
+ {
+ my($val) = $map{uc($sym)};
+ $map[$idx] = $val;
+ if(!defined($val))
+ {
+ print STDERR "$file:$line: warning: `$sym' undefined\n";
+ }
+ else
+ {
+ last;
+ }
+ }
+ }
+ $line++;
+}
+close(USB);
+
+print "unsigned char usb_kbd_map[256] = \n{\n";
+for($x = 0; $x < 32; $x++)
+{
+ if($x && !($x % 2))
+ {
+ print "\n";
+ }
+ print " ";
+ for($y = 0; $y < 8; $y++)
+ {
+ my($idx) = $x * 8 + $y;
+ print sprintf(" 0x%02x,",
+ int(defined($map[$idx]) ? $map[$idx]:0));
+ }
+ print "\n";
+}
+print "};\n";
void show_ohci_ed(struct ohci_ed *ed)
{
- int stat = ed->status;
+ int stat = le32_to_cpup(&ed->status);
int skip = (stat & OHCI_ED_SKIP);
int mps = (stat & OHCI_ED_MPS) >> 16;
int isoc = (stat & OHCI_ED_F_ISOC);
int dir = (stat & OHCI_ED_D);
int endnum = (stat & OHCI_ED_EN) >> 7;
int funcaddr = (stat & OHCI_ED_FA);
- int halted = (ed->_head_td & 1);
- int toggle = (ed->_head_td & 2) >> 1;
+ int halted = (le32_to_cpup(&ed->_head_td) & 1);
+ int toggle = (le32_to_cpup(&ed->_head_td) & 2) >> 1;
printk(KERN_DEBUG " ohci ED:\n");
printk(KERN_DEBUG " status = 0x%x\n", stat);
endnum,
funcaddr,
(stat & ED_ALLOCATED) ? " Allocated" : "");
- printk(KERN_DEBUG " tail_td = 0x%x\n", ed->tail_td);
+ printk(KERN_DEBUG " tail_td = 0x%x\n", ed_tail_td(ed));
printk(KERN_DEBUG " head_td = 0x%x\n", ed_head_td(ed));
- printk(KERN_DEBUG " next_ed = 0x%x\n", ed->next_ed);
+ printk(KERN_DEBUG " next_ed = 0x%x\n", le32_to_cpup(&ed->next_ed));
} /* show_ohci_ed() */
void show_ohci_td(struct ohci_td *td)
{
- int td_round = td->info & OHCI_TD_ROUND;
- int td_dir = td->info & OHCI_TD_D;
- int td_int_delay = (td->info & OHCI_TD_IOC_DELAY) >> 21;
- int td_toggle = (td->info & OHCI_TD_DT) >> 24;
+ int info = le32_to_cpup(&td->info);
+ int td_round = info & OHCI_TD_ROUND;
+ int td_dir = info & OHCI_TD_D;
+ int td_int_delay = (info & OHCI_TD_IOC_DELAY) >> 21;
+ int td_toggle = (info & OHCI_TD_DT) >> 24;
int td_errcnt = td_errorcount(*td);
- int td_cc = OHCI_TD_CC_GET(td->info);
+ int td_cc = OHCI_TD_CC_GET(info);
printk(KERN_DEBUG " ohci TD hardware fields:\n");
- printk(KERN_DEBUG " info = 0x%x\n", td->info);
+ printk(KERN_DEBUG " info = 0x%x\n", info);
printk(KERN_DEBUG " %s%s%s%d %s %s%d\n",
td_round ? "Rounding " : "",
(td_dir == OHCI_TD_D_IN) ? "Input " :
printk(KERN_DEBUG " %s\n", td_allocated(*td) ? "Allocated" : "Free");
- printk(KERN_DEBUG " cur_buf = 0x%x\n", td->cur_buf);
- printk(KERN_DEBUG " next_td = 0x%x\n", td->next_td);
- printk(KERN_DEBUG " buf_end = 0x%x\n", td->buf_end);
+ printk(KERN_DEBUG " cur_buf = 0x%x\n", le32_to_cpup(&td->cur_buf));
+ printk(KERN_DEBUG " next_td = 0x%x\n", le32_to_cpup(&td->next_td));
+ printk(KERN_DEBUG " buf_end = 0x%x\n", le32_to_cpup(&td->buf_end));
printk(KERN_DEBUG " ohci TD driver fields:\n");
printk(KERN_DEBUG " data = %p\n", td->data);
printk(KERN_DEBUG " dev_id = %p\n", td->dev_id);
printk(KERN_DEBUG " ohci_hcca\n");
for (idx=0; idx<NUM_INTS; idx++) {
- printk(KERN_DEBUG " int_table[%2d] == %p\n", idx, hcca->int_table +idx);
+ printk(KERN_DEBUG " int_table[%2d] == %x\n", idx,
+ le32_to_cpup(hcca->int_table + idx));
}
- printk(KERN_DEBUG " frame_no == %d\n", hcca->frame_no);
- printk(KERN_DEBUG " donehead == 0x%08x\n", hcca->donehead);
+ printk(KERN_DEBUG " frame_no == %d\n",
+ le16_to_cpup(&hcca->frame_no));
+ printk(KERN_DEBUG " donehead == 0x%08x\n",
+ le32_to_cpup(&hcca->donehead));
} /* show_ohci_hcca() */
static spinlock_t ohci_edtd_lock = SPIN_LOCK_UNLOCKED;
+#define FIELDS_OF_ED(e) le32_to_cpup(&e->status), le32_to_cpup(&e->tail_td), \
+ le32_to_cpup(&e->_head_td), le32_to_cpup(&e->next_ed)
+#define FIELDS_OF_TD(t) le32_to_cpup(&t->info), le32_to_cpup(&t->cur_buf), \
+ le32_to_cpup(&t->next_td), le32_to_cpup(&t->buf_end)
+
+static const char *cc_names[16] = {
+ "no error",
+ "CRC error",
+ "bit stuff error",
+ "data toggle mismatch",
+ "stall",
+ "device not responding",
+ "PID check failed",
+ "unexpected PID",
+ "data overrun",
+ "data underrun",
+ "reserved (10)",
+ "reserved (11)",
+ "buffer overrun",
+ "buffer underrun",
+ "not accessed (14)",
+ "not accessed"
+};
+
/*
- * Add a TD to the end of the TD list on a given ED. This function
- * does NOT advance the ED's tail_td pointer beyond the given TD. To
- * add multiple TDs, call this function once for each TD. Do not
- * "simply" update tail_td yourself... This function does more than
- * that.
- *
- * If this ED is on the controller, you MUST set its SKIP flag before
- * calling this function.
+ * Add a chain of TDs to the end of the TD list on a given ED.
+ *
+ * This function uses the first TD of the chain as the new dummy TD
+ * for the ED, and uses the old dummy TD instead of the first TD
+ * of the chain. The reason for this is that this makes it possible
+ * to update the TD chain without needing any locking between the
+ * CPU and the OHCI controller.
*
- * Important! This function needs locking and atomicity as it works
- * in parallel with the HC's DMA. Locking ohci_edtd_lock while using
- * the function is a must.
+ * The return value is the pointer to the new first TD (the old
+ * dummy TD).
+ *
+ * Important! This function is not re-entrant w.r.t. each ED.
+ * Locking ohci_edtd_lock while using the function is a must
+ * if there is any possibility of another CPU or an interrupt routine
+ * calling this function with the same ED.
*
* This function can be called by the interrupt handler.
*/
-static void ohci_add_td_to_ed(struct ohci_td *td, struct ohci_ed *ed)
+static struct ohci_td *ohci_add_td_to_ed(struct ohci_td *td,
+ struct ohci_td *last_td, struct ohci_ed *ed)
{
- struct ohci_td *dummy_td, *prev_td;
+ struct ohci_td *t, *dummy_td;
+ u32 new_dummy;
if (ed->tail_td == 0) {
printk("eek! an ED without a dummy_td\n");
+ return td;
}
- /* The ED's tail_td is constant, always pointing to the
- * dummy_td. The reason the ED never processes the dummy is
- * that it stops processing TDs as soon as head_td == tail_td.
- * When it advances to this last dummy TD it conveniently stops. */
- dummy_td = bus_to_virt(ed->tail_td);
+ /* Get a pointer to the current dummy TD. */
+ dummy_td = bus_to_virt(ed_tail_td(ed));
- /* Dummy's data pointer is used to point to the previous TD */
- if (ed_head_td(ed) != ed->tail_td) {
- prev_td = (struct ohci_td *) dummy_td->data;
- } else {
- /* if the ED is empty, previous is meaningless */
- /* We'll be inserting into the head of the list */
- prev_td = NULL;
+ for (t = td; ; t = bus_to_virt(le32_to_cpup(&t->next_td))) {
+ t->ed = ed;
+ if (t == last_td)
+ break;
}
- /* Store the new back pointer and set up this TD's next */
- dummy_td->data = td;
- td->next_td = ed->tail_td;
+ /* Make the last TD point back to the first, since it
+ * will become the new dummy TD. */
+ new_dummy = cpu_to_le32(virt_to_bus(td));
+ last_td->next_td = new_dummy;
- /* Store the TD pointer back to the ED */
- td->ed = ed;
+ /* Copy the contents of the first TD into the dummy */
+ *dummy_td = *td;
- if (!prev_td) { /* No previous TD? then insert us at the head */
- if (ed_head_td(ed) != ed->tail_td)
- printk(KERN_DEBUG "Suspicious ED...\n");
- set_ed_head_td(ed, virt_to_bus(td)); /* put it on the ED */
- } else {
- /* add the TD to the end */
- prev_td->next_td = virt_to_bus(td);
- }
+ /* Turn the first TD into a dummy */
+ make_dumb_td(td);
+
+ /* Set the HC's tail pointer to the new dummy */
+ ed->tail_td = new_dummy;
+
+ return dummy_td; /* replacement head of chain */
} /* ohci_add_td_to_ed() */
/* if the list is not empty, insert this ED at the front */
/* XXX should they go on the end? */
- if (listhead) {
- ed->next_ed = listhead;
- }
+ ed->next_ed = cpu_to_le32(listhead);
/* update the hardware listhead pointer */
writel(virt_to_bus(ed), hw_listhead_p);
* Insert this ED at the front of the list.
*/
ed->next_ed = int_ed->next_ed;
- int_ed->next_ed = virt_to_bus(ed);
+ int_ed->next_ed = cpu_to_le32(virt_to_bus(ed));
spin_unlock_irqrestore(&ohci_edtd_lock, flags);
*/
void ohci_wait_for_ed_safe(struct ohci_regs *regs, struct ohci_ed *ed, int ed_type)
{
- __u32 hw_listcurrent;
+ __u32 *hw_listcurrent;
/* tell the controller to skip this ED */
- ed->status |= OHCI_ED_SKIP;
+ ed->status |= cpu_to_le32(OHCI_ED_SKIP);
switch (ed_type) {
case HCD_ED_CONTROL:
- hw_listcurrent = readl(regs->ed_controlcurrent);
+ hw_listcurrent = ®s->ed_controlcurrent;
break;
case HCD_ED_BULK:
- hw_listcurrent = readl(regs->ed_bulkcurrent);
+ hw_listcurrent = ®s->ed_bulkcurrent;
break;
case HCD_ED_ISOC:
case HCD_ED_INT:
- hw_listcurrent = readl(regs->ed_periodcurrent);
+ hw_listcurrent = ®s->ed_periodcurrent;
break;
default:
return;
* If the HC is processing this ED we need to wait until the
* at least the next frame.
*/
- if (virt_to_bus(ed) == hw_listcurrent) {
+ if (virt_to_bus(ed) == readl(hw_listcurrent)) {
DECLARE_WAITQUEUE(wait, current);
#ifdef OHCI_DEBUG
- printk("Waiting a frame for OHC to finish with ED %p\n", ed);
+ printk("Waiting a frame for OHC to finish with ED %p [%x %x %x %x]\n", ed, FIELDS_OF_ED(ed));
#endif
add_wait_queue(&start_of_frame_wakeup, &wait);
/* walk the list and unlink the ED if found */
do {
prev = cur;
- cur = bus_to_virt(cur->next_ed);
+ cur = bus_to_virt(le32_to_cpup(&cur->next_ed));
if (virt_to_bus(cur) == bus_ed) {
/* unlink from the list */
return;
/* set the "skip me bit" in this ED */
- ed->status |= OHCI_ED_SKIP;
+ ed->status |= cpu_to_le32(OHCI_ED_SKIP);
/* XXX Assuming this list will never be circular */
/* FIXME: collapse this into a nice simple loop :) */
if (head_td->next_td != 0) {
prev_td = head_td;
- cur_td = bus_to_virt(head_td->next_td);
+ cur_td = bus_to_virt(le32_to_cpup(&head_td->next_td));
for (;;) {
if (td == cur_td) {
/* remove it */
if (cur_td->next_td == 0)
break;
prev_td = cur_td;
- cur_td = bus_to_virt(cur_td->next_td);
+ cur_td = bus_to_virt(le32_to_cpup(&cur_td->next_td));
}
}
}
ohci_free_td(td);
/* unset the "skip me bit" in this ED */
- ed->status &= ~OHCI_ED_SKIP;
+ ed->status &= cpu_to_le32(~OHCI_ED_SKIP);
spin_unlock_irqrestore(&ohci_edtd_lock, flags);
} /* ohci_remove_td_from_ed() */
/* zero out the TD */
memset(new_td, 0, sizeof(*new_td));
/* mark the new TDs as unaccessed */
- new_td->info = OHCI_TD_CC_NEW;
+ new_td->info = cpu_to_le32(OHCI_TD_CC_NEW);
/* mark it as allocated */
allocate_td(new_td);
return new_td;
/* zero out the ED */
memset(new_ed, 0, sizeof(*new_ed));
/* all new EDs start with the SKIP bit set */
- new_ed->status |= OHCI_ED_SKIP;
+ new_ed->status |= cpu_to_le32(OHCI_ED_SKIP);
/* mark it as allocated */
allocate_ed(new_ed);
return new_ed;
if (!ed)
return;
- if (ed->tail_td == 0) {
- printk("yikes! an ED without a dummy_td\n");
- } else
- ohci_free_td((struct ohci_td *)bus_to_virt(ed->tail_td));
+ if (ed_head_td(ed) != 0) {
+ struct ohci_td *td, *tail_td, *next_td;
+
+ td = bus_to_virt(ed_head_td(ed));
+ tail_td = bus_to_virt(ed_tail_td(ed));
+ for (;;) {
+ next_td = bus_to_virt(le32_to_cpup(&td->next_td));
+ ohci_free_td(td);
+ if (td == tail_td)
+ break;
+ td = next_td;
+ }
+ }
- ed->status &= ~(__u32)ED_ALLOCATED;
+ ed->status &= cpu_to_le32(~(__u32)ED_ALLOCATED);
} /* ohci_free_ed() */
inline struct ohci_td *ohci_fill_new_td(struct ohci_td *td, int dir, int toggle, __u32 flags, void *data, __u32 len, void *dev_id, usb_device_irq completed)
{
/* hardware fields */
- td->info = OHCI_TD_CC_NEW |
- (dir & OHCI_TD_D) |
- (toggle & OHCI_TD_DT) |
- flags;
- td->cur_buf = (data == NULL) ? 0 : virt_to_bus(data);
- td->buf_end = (len == 0) ? 0 : td->cur_buf + len - 1;
+ td->info = cpu_to_le32(OHCI_TD_CC_NEW |
+ (dir & OHCI_TD_D) |
+ (toggle & OHCI_TD_DT) |
+ flags);
+ td->cur_buf = (data == NULL) ? 0 : cpu_to_le32(virt_to_bus(data));
+ td->buf_end = (len == 0) ? 0 :
+ cpu_to_le32(le32_to_cpup(&td->cur_buf) + len - 1);
/* driver fields */
td->data = data;
* not be any!). This assumes that the ED is Allocated and will
* force the Allocated bit on.
*/
-struct ohci_ed *ohci_fill_ed(struct ohci_device *dev, struct ohci_ed *ed, int maxpacketsize, int lowspeed, int endp_id, int isoc_tds)
+struct ohci_ed *ohci_fill_ed(struct ohci_device *dev, struct ohci_ed *ed,
+ int maxpacketsize, int lowspeed, int endp_id,
+ int isoc_tds)
{
struct ohci_td *dummy_td;
- if (ed_head_td(ed) != ed->tail_td)
+ if (ed_head_td(ed) != ed_tail_td(ed))
printk("Reusing a non-empty ED %p!\n", ed);
if (!ed->tail_td) {
return NULL; /* no dummy available! */
}
make_dumb_td(dummy_td); /* flag it as a dummy */
- ed->tail_td = virt_to_bus(dummy_td);
+ ed->tail_td = cpu_to_le32(virt_to_bus(dummy_td));
} else {
- dummy_td = bus_to_virt(ed->tail_td);
+ dummy_td = bus_to_virt(ed_tail_td(ed));
if (!td_dummy(*dummy_td))
printk("ED %p's dummy %p is screwy\n", ed, dummy_td);
}
/* set the head TD to the dummy and clear the Carry & Halted bits */
ed->_head_td = ed->tail_td;
- ed->status = \
+ ed->status = cpu_to_le32(
ed_set_maxpacket(maxpacketsize) |
ed_set_speed(lowspeed) |
(endp_id & 0x7ff) |
- ((isoc_tds == 0) ? OHCI_ED_F_NORM : OHCI_ED_F_ISOC);
+ ((isoc_tds == 0) ? OHCI_ED_F_NORM : OHCI_ED_F_ISOC));
allocate_ed(ed);
ed->next_ed = 0;
struct ohci_device *dev = usb_to_ohci(usb);
struct ohci_td *td;
struct ohci_ed *interrupt_ed; /* endpoint descriptor for this irq */
+ int maxps = usb_maxpacket(usb, pipe);
/* Get an ED and TD */
interrupt_ed = ohci_get_free_ed(dev);
* Set the max packet size, device speed, endpoint number, usb
* device number (function address), and type of TD.
*/
- ohci_fill_ed(dev, interrupt_ed, usb_maxpacket(usb,pipe), usb_pipeslow(pipe),
- usb_pipe_endpdev(pipe), 0 /* normal TDs */);
+ ohci_fill_ed(dev, interrupt_ed, maxps, usb_pipeslow(pipe),
+ usb_pipe_endpdev(pipe), 0 /* normal TDs */);
/* Fill in the TD */
+ if (maxps > sizeof(dev->data))
+ maxps = sizeof(dev->data);
ohci_fill_new_td(td, td_set_dir_out(usb_pipeout(pipe)),
TOGGLE_AUTO,
OHCI_TD_ROUND,
- &dev->data, DATA_BUF_LEN,
+ dev->data, maxps,
dev_id, handler);
/*
* TODO: be aware of how the OHCI controller deals with DMA
/*
* Put the TD onto our ED and make sure its ready to run
*/
- ohci_add_td_to_ed(td, interrupt_ed);
- interrupt_ed->status &= ~OHCI_ED_SKIP;
+ td = ohci_add_td_to_ed(td, td, interrupt_ed);
+ interrupt_ed->status &= cpu_to_le32(~OHCI_ED_SKIP);
ohci_unhalt_ed(interrupt_ed);
- /* Linus did this. see asm/system.h; scary concept... I don't
- * know if its needed here or not but it won't hurt. */
+ /* Make sure all the stores above get done before
+ * the store which tells the OHCI about the new ed. */
wmb();
/* Assimilate the new ED into the collective */
*
* This function can NOT be called from an interrupt.
*/
-static int ohci_control_msg(struct usb_device *usb, unsigned int pipe, void *cmd, void *data, int len)
+static int ohci_control_msg(struct usb_device *usb, unsigned int pipe,
+ devrequest *cmd, void *data, int len)
{
struct ohci_device *dev = usb_to_ohci(usb);
struct ohci_ed *control_ed = ohci_get_free_ed(dev);
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
int completion_status = -1;
+ devrequest our_cmd;
+
+ /* byte-swap fields of cmd if necessary */
+ our_cmd = *cmd;
+ cpu_to_le16s(&our_cmd.value);
+ cpu_to_le16s(&our_cmd.index);
+ cpu_to_le16s(&our_cmd.length);
-#ifdef OHCI_DEBUG
+#ifdef OHCI_DEBUG
+ if (MegaDebug)
printk(KERN_DEBUG "ohci_control_msg %p (ohci_dev: %p) pipe %x, cmd %p, data %p, len %d\n", usb, dev, pipe, cmd, data, len);
#endif
if (!control_ed) {
* uses a DATA0 packet.
*
* The setup packet contains a devrequest (usb.h) which
- * will always be 8 bytes long. FIXME: the cmd parameter
- * should be a pointer to one of these instead of a void* !!!
+ * will always be 8 bytes long.
*/
ohci_fill_new_td(setup_td, OHCI_TD_D_SETUP, TOGGLE_DATA0,
OHCI_TD_IOC_OFF,
- cmd, 8, /* cmd is always 8 bytes long */
+ &our_cmd, 8, /* cmd is always 8 bytes long */
NULL, NULL);
/* allocate the next TD */
}
/* link to the next TD */
- setup_td->next_td = virt_to_bus(data_td);
+ setup_td->next_td = cpu_to_le32(virt_to_bus(data_td));
if (len > 0) {
return -1;
}
- data_td->next_td = virt_to_bus(status_td);
+ data_td->next_td = cpu_to_le32(virt_to_bus(status_td));
} else {
status_td = data_td; /* no data_td, use it for status */
}
* Add the chain of 2-3 control TDs to the control ED's TD list
*/
spin_lock_irqsave(&ohci_edtd_lock, flags);
- control_ed->status |= OHCI_ED_SKIP;
- ohci_add_td_to_ed(setup_td, control_ed);
- if (data_td != status_td)
- ohci_add_td_to_ed(data_td, control_ed);
- ohci_add_td_to_ed(status_td, control_ed);
- control_ed->status &= ~OHCI_ED_SKIP;
- ohci_unhalt_ed(control_ed);
+ setup_td = ohci_add_td_to_ed(setup_td, status_td, control_ed);
spin_unlock_irqrestore(&ohci_edtd_lock, flags);
#ifdef OHCI_DEBUG
}
#endif
- /* clean up */
- ohci_free_td(setup_td);
- if (data_td != status_td)
- ohci_free_td(data_td);
- ohci_free_td(status_td);
/* remove the control ED from the HC */
ohci_remove_control_ed(dev->ohci, control_ed);
ohci_free_ed(control_ed); /* return it to the pool */
-#if 0
- printk(KERN_DEBUG "leaving ohci_control_msg\n");
+#ifdef OHCI_DEBUG
+ if (completion_status != 0) {
+ printk(KERN_ERR "ohci_control_msg: %s on cmd %x %x %x %x %x\n",
+ cc_names[completion_status & 0xf], cmd->requesttype,
+ cmd->request, cmd->value, cmd->index, cmd->length);
+ } else if (!usb_pipeout(pipe)) {
+ unsigned char *q = data;
+ int i;
+ printk(KERN_DEBUG "ctrl msg %x %x %x %x %x returned:",
+ cmd->requesttype, cmd->request, cmd->value, cmd->index,
+ cmd->length);
+ for (i = 0; i < len; ++i) {
+ if (i % 16 == 0)
+ printk("\n" KERN_DEBUG);
+ printk(" %x", q[i]);
+ }
+ printk("\n");
+ }
#endif
return completion_status;
} /* ohci_control_msg() */
/* Initialize all EDs in a new device with the skip flag so that
* they are ignored by the controller until set otherwise. */
for (idx = 0; idx < NUM_EDS; ++idx) {
- dev->ed[idx].status |= OHCI_ED_SKIP;
+ dev->ed[idx].status = cpu_to_le32(OHCI_ED_SKIP);
}
/*
*/
static void ohci_root_hub_events(struct ohci *ohci)
{
- int num = 0;
+ int num = 0;
struct ohci_device *root_hub=usb_to_ohci(ohci->bus->root_hub);
int maxport = root_hub->usb->maxchild;
if (!waitqueue_active(&ohci_configure))
return;
- do {
- __u32 *portstatus_p = &ohci->regs->roothub.portstatus[num];
- if (readl(portstatus_p) & PORT_CSC) {
- if (waitqueue_active(&ohci_configure))
- wake_up(&ohci_configure);
- return;
- }
- } while (++num < maxport);
+ do {
+ __u32 *portstatus_p = &ohci->regs->roothub.portstatus[num];
+ if (readl(portstatus_p) & PORT_CSC) {
+ if (waitqueue_active(&ohci_configure))
+ wake_up(&ohci_configure);
+ return;
+ }
+ } while (++num < maxport);
} /* ohci_root_hub_events() */
struct ohci_hcca *hcca = root_hub->hcca;
struct ohci_td *td_list = NULL;
struct ohci_td *td_rev = NULL;
-
- td_list_hc = hcca->donehead & 0xfffffff0;
+
+ td_list_hc = le32_to_cpup(&hcca->donehead) & 0xfffffff0;
hcca->donehead = 0;
while(td_list_hc) {
td_list = (struct ohci_td *) bus_to_virt(td_list_hc);
td_list->next_dl_td = td_rev;
-
td_rev = td_list;
- td_list_hc = td_list->next_td & 0xfffffff0;
+ td_list_hc = le32_to_cpup(&td_list->next_td) & 0xfffffff0;
}
return td_list;
while (td != NULL) {
struct ohci_td *next_td = td->next_dl_td;
+ int cc = OHCI_TD_CC_GET(le32_to_cpup(&td->info));
if (td_dummy(*td))
printk("yikes! reaping a dummy TD\n");
/* FIXME: munge td->info into a future standard status format */
+
+ if (cc != 0 && ohci_ed_halted(td->ed) && td->completed == 0) {
+ /*
+ * There was an error on this TD and the ED
+ * is halted, and this was not the last TD
+ * of the transaction, so there will be TDs
+ * to clean off the ED.
+ * (We assume that a TD with a non-NULL completed
+ * field is the last one of a transaction.
+ * Ultimately we should have a flag in the TD
+ * to say that it is the last one.)
+ */
+ struct ohci_ed *ed = td->ed;
+ struct ohci_td *tail_td = bus_to_virt(ed_tail_td(ed));
+ struct ohci_td *ntd;
+
+ ohci_free_td(td);
+ td = ntd = bus_to_virt(ed_head_td(ed));
+ while (td != tail_td) {
+ ntd = bus_to_virt(le32_to_cpup(&td->next_td));
+ if (td->completed != 0)
+ break;
+ ohci_free_td(td);
+ td = ntd;
+ }
+ /* Set the ED head past the ones we cleaned
+ off, and clear the halted flag */
+ set_ed_head_td(ed, virt_to_bus(ntd));
+ ohci_unhalt_ed(ed);
+ /* If we didn't find a TD with a completion
+ routine, give up */
+ if (td == tail_td) {
+ td = next_td;
+ continue;
+ }
+ }
+
/* Check if TD should be re-queued */
if ((td->completed != NULL) &&
- (td->completed(OHCI_TD_CC_GET(td->info), td->data, td->dev_id)))
- {
+ (td->completed(cc, td->data, td->dev_id))) {
/* Mark the TD as active again:
* Set the not accessed condition code
* Reset the Error count
- * [FIXME: report errors to the device's driver]
*/
- td->info |= OHCI_TD_CC_NEW;
+ td->info |= cpu_to_le32(OHCI_TD_CC_NEW);
clear_td_errorcount(td);
+ /* reset the toggle field to TOGGLE_AUTO (0) */
+ td->info &= cpu_to_le32(~OHCI_TD_DT);
/* point it back to the start of the data buffer */
- td->cur_buf = virt_to_bus(td->data);
+ td->cur_buf = cpu_to_le32(virt_to_bus(td->data));
/* insert it back on its ED */
- ohci_add_td_to_ed(td, td->ed);
+ ohci_add_td_to_ed(td, td, td->ed);
} else {
/* return it to the pool of free TDs */
ohci_free_td(td);
/* make context = the interrupt status bits that we care about */
if (hcca->donehead != 0) {
context = OHCI_INTR_WDH; /* hcca donehead needs processing */
- if (hcca->donehead & 1) {
+ if (hcca->donehead & cpu_to_le32(1)) {
context |= status; /* other status change to check */
}
} else {
}
}
- /* Disable HC interrupts */
+ /* Disable HC interrupts */ /* why? - paulus */
writel(OHCI_INTR_MIE, ®s->intrdisable);
/* Process the done list */
ohci_reap_donelist(ohci);
/* reset the done queue and tell the controller */
- hcca->donehead = 0;
+ hcca->donehead = 0; /* XXX already done in ohci_reverse_donelist */
writel(OHCI_INTR_WDH, ®s->intrstatus);
context &= ~OHCI_INTR_WDH; /* mark this as checked */
* page as that's guaranteed to have a nice boundary.
*/
dev->hcca = (struct ohci_hcca *) __get_free_page(GFP_KERNEL);
-
+ memset(dev->hcca, 0, sizeof(struct ohci_hcca));
+
/* Tell the controller where the HCCA is */
writel(virt_to_bus(dev->hcca), &ohci->regs->hcca);
* Initialize the ED polling "tree" (for simplicity's sake in
* this driver many nodes in the tree will be identical)
*/
- dev->ed[ED_INT_32].next_ed = virt_to_bus(&dev->ed[ED_INT_16]);
- dev->ed[ED_INT_16].next_ed = virt_to_bus(&dev->ed[ED_INT_8]);
- dev->ed[ED_INT_8].next_ed = virt_to_bus(&dev->ed[ED_INT_4]);
- dev->ed[ED_INT_4].next_ed = virt_to_bus(&dev->ed[ED_INT_2]);
- dev->ed[ED_INT_2].next_ed = virt_to_bus(&dev->ed[ED_INT_1]);
+ dev->ed[ED_INT_32].next_ed = cpu_to_le32(virt_to_bus(&dev->ed[ED_INT_16]));
+ dev->ed[ED_INT_16].next_ed = cpu_to_le32(virt_to_bus(&dev->ed[ED_INT_8]));
+ dev->ed[ED_INT_8].next_ed = cpu_to_le32(virt_to_bus(&dev->ed[ED_INT_4]));
+ dev->ed[ED_INT_4].next_ed = cpu_to_le32(virt_to_bus(&dev->ed[ED_INT_2]));
+ dev->ed[ED_INT_2].next_ed = cpu_to_le32(virt_to_bus(&dev->ed[ED_INT_1]));
/*
* Initialize the polling table to call interrupts at the
* placeholders. They have their SKIP bit set and are used as
* list heads to insert real EDs onto.
*/
- dev->hcca->int_table[0] = virt_to_bus(&dev->ed[ED_INT_1]);
+ dev->hcca->int_table[0] = cpu_to_le32(virt_to_bus(&dev->ed[ED_INT_1]));
for (i = 1; i < NUM_INTS; i++) {
if (i & 16)
dev->hcca->int_table[i] =
- virt_to_bus(&dev->ed[ED_INT_32]);
+ cpu_to_le32(virt_to_bus(&dev->ed[ED_INT_32]));
if (i & 8)
dev->hcca->int_table[i] =
- virt_to_bus(&dev->ed[ED_INT_16]);
+ cpu_to_le32(virt_to_bus(&dev->ed[ED_INT_16]));
if (i & 4)
dev->hcca->int_table[i] =
- virt_to_bus(&dev->ed[ED_INT_8]);
+ cpu_to_le32(virt_to_bus(&dev->ed[ED_INT_8]));
if (i & 2)
dev->hcca->int_table[i] =
- virt_to_bus(&dev->ed[ED_INT_4]);
+ cpu_to_le32(virt_to_bus(&dev->ed[ED_INT_4]));
if (i & 1)
dev->hcca->int_table[i] =
- virt_to_bus(&dev->ed[ED_INT_2]);
+ cpu_to_le32(virt_to_bus(&dev->ed[ED_INT_2]));
}
/*
#if 0
printk(KERN_DEBUG "leaving alloc_ohci %p\n", ohci);
#endif
+printk("alloc_ohci done\n");
return ohci;
} /* alloc_ohci() */
#define TOGGLE_DATA1 (3 << 24) /* force Data1 */
#define td_force_toggle(b) (((b) | 2) << 24)
#define OHCI_TD_ERRCNT (3 << 26) /* error count */
-#define td_errorcount(td) (((td).info >> 26) & 3)
-#define clear_td_errorcount(td) ((td)->info &= ~(__u32)OHCI_TD_ERRCNT)
+#define td_errorcount(td) ((le32_to_cpup(&(td).info) >> 26) & 3)
+#define clear_td_errorcount(td) ((td)->info &= cpu_to_le32(~(__u32)OHCI_TD_ERRCNT))
#define OHCI_TD_CC (0xf << 28) /* condition code */
#define OHCI_TD_CC_GET(td_i) (((td_i) >> 28) & 0xf)
#define OHCI_TD_CC_NEW (OHCI_TD_CC) /* set this on all unaccessed TDs! */
-#define td_cc_notaccessed(td) (((td).info >> 29) == 7)
-#define td_cc_accessed(td) (((td).info >> 29) != 7)
-#define td_cc_noerror(td) ((((td).info) & OHCI_TD_CC) == 0)
+#define td_cc_notaccessed(td) ((le32_to_cpup(&(td).info) >> 29) == 7)
+#define td_cc_accessed(td) ((le32_to_cpup(&(td).info) >> 29) != 7)
+#define td_cc_noerror(td) (((le32_to_cpup(&(td).info)) & OHCI_TD_CC) == 0)
#define td_active(td) (!td_cc_noerror((td)) && (td_errorcount((td)) < 3))
#define td_done(td) (td_cc_noerror((td)) || (td_errorcount((td)) == 3))
} __attribute((aligned(16)));
/* get the head_td */
-#define ed_head_td(ed) ((ed)->_head_td & 0xfffffff0)
+#define ed_head_td(ed) (le32_to_cpup(&(ed)->_head_td) & 0xfffffff0)
+#define ed_tail_td(ed) (le32_to_cpup(&(ed)->tail_td))
/* save the carry & halted flag while setting the head_td */
-#define set_ed_head_td(ed, td) ((ed)->_head_td = (td) | ((ed)->_head_td & 3))
+#define set_ed_head_td(ed, td) ((ed)->_head_td = cpu_to_le32((td)) \
+ | ((ed)->_head_td & cpu_to_le32(3)))
/* Control the ED's halted flag */
-#define ohci_halt_ed(ed) ((ed)->_head_td |= 1)
-#define ohci_unhalt_ed(ed) ((ed)->_head_td &= ~(__u32)1)
-#define ohci_ed_halted(ed) ((ed)->_head_td & 1)
+#define ohci_halt_ed(ed) ((ed)->_head_td |= cpu_to_le32(1))
+#define ohci_unhalt_ed(ed) ((ed)->_head_td &= cpu_to_le32(~(__u32)1))
+#define ohci_ed_halted(ed) ((ed)->_head_td & cpu_to_le32(1))
#define OHCI_ED_SKIP (1 << 14)
#define OHCI_ED_MPS (0x7ff << 16)
* driver or not. If the bit is set, it is being used.
*/
#define ED_ALLOCATED (1 << 31)
-#define ed_allocated(ed) ((ed).status & ED_ALLOCATED)
-#define allocate_ed(ed) ((ed)->status |= ED_ALLOCATED)
+#define ed_allocated(ed) (le32_to_cpup(&(ed).status) & ED_ALLOCATED)
+#define allocate_ed(ed) ((ed)->status |= cpu_to_le32(ED_ALLOCATED))
/*
* The HCCA (Host Controller Communications Area) is a 256 byte
}
result = p->pusb_dev->bus->op->bulk_msg(p->pusb_dev,
usb_sndbulkpipe(p->pusb_dev, 1), obuf, thistime, &partial);
- if (result & 0x08) { /* NAK - so hold for a while */
- obuf += partial;
- thistime -= partial;
+ if (result == USB_ST_TIMEOUT) { /* NAK - so hold for a while */
if(!maxretry--)
return -ETIME;
interruptible_sleep_on_timeout(&p->wait_q, NAK_TIMEOUT);
continue;
+ } else if (!result & partial) {
+ obuf += partial;
+ thistime -= partial;
} else
break;
};
usb_rcvbulkpipe(p->pusb_dev, 2), buf, this_read, &partial);
/* unlike writes, we don't retry a NAK, just stop now */
- if (result & 0x08)
+ if (!result & partial)
count = this_read = partial;
else if (result)
return -EIO;
static int apm_resume = 0;
#endif
+static int uhci_debug = 1;
+
#define compile_assert(x) do { switch (0) { case 1: case !(x): } } while (0)
static DECLARE_WAIT_QUEUE_HEAD(uhci_configure);
+/*
+ * Map status to standard result codes
+ */
+static int uhci_map_status(int status, int dir_out)
+{
+ if (!status)
+ return USB_ST_NOERROR;
+ if (status & 0x02) /* Bitstuff error*/
+ return USB_ST_BITSTUFF;
+ if (status & 0x04) { /* CRC/Timeout */
+ if (dir_out)
+ return USB_ST_TIMEOUT;
+ else
+ return USB_ST_CRC;
+ }
+ if (status & 0x08) /* NAK */
+ return USB_ST_TIMEOUT;
+ if (status & 0x10) /* Babble */
+ return USB_ST_STALL;
+ if (status & 0x20) /* Buffer error */
+ return USB_ST_BUFFERUNDERRUN;
+ if (status & 0x40) /* Stalled */
+ return USB_ST_STALL;
+ if (status & 0x80) /* Active */
+ return USB_ST_NOERROR;
+ return USB_ST_INTERNALERROR;
+}
/*
* Return the result of a TD..
*/
unsigned int status;
struct uhci_td *tmp = td->first;
+ if(rval)
+ *rval = 0;
+
/* locate the first failing td, if any */
do {
status = (tmp->status >> 16) & 0xff;
- if (status)
+ if (status) {
+ /* must reset the toggle on first error */
+ if (uhci_debug) {
+ printk("Set toggle from %x rval %d\n", (unsigned int)tmp, rval ? *rval : 0);
+ }
+ usb_settoggle(dev->usb, usb_pipeendpoint(tmp->info), (tmp->info >> 19) & 1);
break;
+ } else {
+ if(rval)
+ *rval += (tmp->status & 0x3ff) + 1;
+ }
if ((tmp->link & 1) || (tmp->link & 2))
break;
tmp = bus_to_virt(tmp->link & ~0xF);
} while (1);
- if(rval)
- *rval = 0;
+
+ if (!status)
+ return USB_ST_NOERROR;
+
/* Some debugging code */
- if (status && (!usb_pipeendpoint(tmp->info) || !(status & 0x08)) ) {
+ if (uhci_debug /* && (!usb_pipeendpoint(tmp->info) || !(status & 0x08))*/ ) {
int i = 10;
tmp = td->first;
break;
} while (1);
}
- if (usb_pipeendpoint(tmp->info) && (status & 0x08)) {
-// printk("uhci_td_result() - NAK\n");
- /* find total length xferred and reset toggle on failed packets */
- tmp = td->first;
- do {
- /* sum up packets that did not fail */
- if(rval && !((tmp->status >> 16) & 0xff))
- *rval += (tmp->status & 0x3ff) + 1;
-
- /*
- * Note - only the first to fail will be marked NAK
- */
- if (tmp->status & 0xFF0000) {
- /* must reset the toggle on any error */
- usb_settoggle(dev->usb, usb_pipeendpoint(tmp->info), (tmp->info >> 19) & 1);
- break;
- }
- if ((tmp->link & 1) || (tmp->link & 2))
- break;
- tmp = bus_to_virt(tmp->link & ~0xF);
- } while (1);
-#if 0
- if (rval) {
- printk("uhci_td_result returning partial count %d\n", *rval);
- }
-#endif
+ if (status & 0x40) {
+ /* endpoint has stalled - mark it halted */
+
+ usb_endpoint_halt(dev->usb, usb_pipeendpoint(tmp->info));
+ return USB_ST_STALL;
+
+ }
+
+ if (status == 0x80) {
+ /* still active */
+ if (!rval)
+ return USB_ST_DATAUNDERRUN;
}
- return status;
+ return uhci_map_status(status, usb_pipeout(tmp->info));
}
/*
for (; (inuse = test_and_set_bit(0, &td->inuse)) != 0 && td < &dev->td[UHCI_MAXTD]; td++)
;
- if (!inuse)
+ if (!inuse) {
+ td->inuse = 1;
return(td);
+ }
printk("ran out of td's for dev %p\n", dev);
return(NULL);
td->link = 1;
td->status = status; /* In */
- td->info = destination | (7 << 21); /* 8 bytes of data */
+ td->info = destination | (7 << 21) | (usb_gettoggle(usb_dev, usb_pipeendpoint(pipe)) << 19); /* 8 bytes of data */
td->buffer = virt_to_bus(dev->data);
td->first = td;
td->qh = interrupt_qh;
- interrupt_qh->skel = &root_hub->skel_int8_qh;
+ td->dev = usb_dev;
+
+ /* if period 0, insert into fast q */
+
+ if (period == 0) {
+ td->inuse |= 2;
+ interrupt_qh->skel = &root_hub->skel_int2_qh;
+ } else
+ interrupt_qh->skel = &root_hub->skel_int8_qh;
uhci_add_irq_list(dev->uhci, td, handler, dev_id);
uhci_insert_td_in_qh(interrupt_qh, td);
/* Add it into the skeleton */
- uhci_insert_qh(&root_hub->skel_int8_qh, interrupt_qh);
+ uhci_insert_qh(interrupt_qh->skel, interrupt_qh);
return 0;
}
+/*
+ * Remove running irq td from queues
+ */
+
+static int uhci_remove_irq(struct usb_device *usb_dev, unsigned int pipe, usb_device_irq handler, int period, void *dev_id)
+{
+ struct uhci_device *dev = usb_to_uhci(usb_dev);
+ struct uhci_device *root_hub=usb_to_uhci(dev->uhci->bus->root_hub);
+ struct uhci_td *td;
+ struct uhci_qh *interrupt_qh;
+ unsigned long flags;
+ struct list_head *head = &dev->uhci->interrupt_list;
+ struct list_head *tmp;
+
+ spin_lock_irqsave(&irqlist_lock, flags);
+
+ /* find the TD in the interrupt list */
+
+ tmp = head->next;
+ while (tmp != head) {
+ td = list_entry(tmp, struct uhci_td, irq_list);
+ if (td->dev_id == dev_id && td->completed == handler) {
+
+ /* found the right one - let's remove it */
+
+ /* notify removal */
+
+ td->completed(USB_ST_REMOVED, NULL, td->dev_id);
+
+ /* this is DANGEROUS - not sure whether this is right */
+
+ list_del(&td->irq_list);
+ uhci_remove_td(td);
+ interrupt_qh = td->qh;
+ uhci_remove_qh(interrupt_qh->skel, interrupt_qh);
+ uhci_td_deallocate(td);
+ uhci_qh_deallocate(interrupt_qh);
+ spin_unlock_irqrestore(&irqlist_lock, flags);
+ return USB_ST_NOERROR;
+ }
+ }
+ spin_unlock_irqrestore(&irqlist_lock, flags);
+ return USB_ST_INTERNALERROR;
+}
/*
* Isochronous thread operations
*/
* information, that's just ridiculously high. Most
* control messages have just a few bytes of data.
*/
-static int uhci_control_msg(struct usb_device *usb_dev, unsigned int pipe, void *cmd, void *data, int len)
+static int uhci_control_msg(struct usb_device *usb_dev, unsigned int pipe,
+ devrequest *cmd, void *data, int len)
{
struct uhci_device *dev = usb_to_uhci(usb_dev);
struct uhci_td *first, *td, *prevtd;
} while (1);
}
- if (ret) {
- __u8 *p = cmd;
+ if (uhci_debug && ret) {
+ __u8 *p = (__u8 *) cmd;
printk("Failed cmd - %02X %02X %02X %02X %02X %02X %02X %02X\n",
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]);
int ret;
int maxsze = usb_maxpacket(usb_dev, pipe);
+ if (usb_endpoint_halted(usb_dev, usb_pipeendpoint(pipe)) &&
+ usb_clear_halt(usb_dev, usb_pipeendpoint(pipe) | (pipe & 0x80)))
+ return USB_ST_STALL;
+
if (len > maxsze * 31)
printk("Warning, too much data for a bulk packet, crashing (%d/%d)\n", len, maxsze);
I FORGOT WHAT IT EXACTLY DOES
*/
if (usb_pipeout(pipe)) {
- destination = (pipe & 0x000Aff00) | 0xE1;
+ destination = (pipe & 0x0007ff00) | 0xE1;
}
else {
- destination = (pipe & 0x000Aff00) | 0x69;
+ destination = (pipe & 0x0007ff00) | 0x69;
}
/* Status: slow/fast, Active, Short Packet Detect Three Errors */
while (len > 0) {
/* Build the TD for control status */
int pktsze = len;
+
if (pktsze > maxsze)
pktsze = maxsze;
/* Alternate Data0/1 (start with Data0) */
usb_dotoggle(usb_dev, usb_pipeendpoint(pipe));
}
- prevtd->link = 1; /* Terminate */
- prevtd->status = status | (1 << 24); /* IOC */
- prevtd->first = first;
- uhci_td_deallocate(td);
+ td->link = 1; /* Terminate */
+ td->status |= (1 << 24); /* IOC */
/* CHANGE DIRECTION HERE! SAVE IT SOMEWHERE IN THE ENDPOINT!!! */
for (i = 0; i < UHCI_MAXTD; ++i) {
struct uhci_td *td = dev->td + i;
- if (td->inuse) {
+ if (td->inuse & 1) {
uhci_remove_td(td);
/* And remove it from the irq list, if it's active */
for (i = 0; i < UHCI_MAXQH; ++i) {
struct uhci_qh *qh = dev->qh + i;
- if (qh->inuse)
+ if (qh->inuse & 1)
uhci_remove_qh(qh->skel, qh);
}
uhci_control_msg,
uhci_bulk_msg,
uhci_request_irq,
+ uhci_remove_irq,
};
/*
/* remove from IRQ list */
__list_del(tmp->prev, next);
INIT_LIST_HEAD(tmp);
- if (td->completed(td->status, bus_to_virt(td->buffer), td->dev_id)) {
+ if (td->completed(uhci_map_status((td->status & 0xff)>> 16, 0),
+ bus_to_virt(td->buffer), td->dev_id)) {
list_add(&td->irq_list, &uhci->interrupt_list);
if (!(td->status & (1 << 25))) {
struct uhci_qh *interrupt_qh = td->qh;
- td->info ^= 1 << 19; /* toggle between data0 and data1 */
+ usb_dotoggle(td->dev, usb_pipeendpoint(td->info));
+ td->info |= 1 << 19; /* toggle between data0 and data1 */
td->status = (td->status & 0x2f000000) | (1 << 23) | (1 << 24); /* active */
/* Remove then readd? Is that necessary */
uhci_remove_td(td);
uhci_insert_td_in_qh(interrupt_qh, td);
}
+ } else if (td->inuse & 2) {
+ struct uhci_qh *interrupt_qh = td->qh;
+ /* marked for removal */
+ td->inuse &= ~2;
+ usb_dotoggle(td->dev, usb_pipeendpoint(td->info));
+ uhci_remove_qh(interrupt_qh->skel, interrupt_qh);
+ uhci_qh_deallocate(interrupt_qh);
+ uhci_td_deallocate(td);
}
/* If completed wants to not reactivate, then it's */
/* responsible for free'ing the TD's and QH's */
printk("uhci_control_thread at %p\n", &uhci_control_thread);
exit_mm(current);
exit_files(current);
- exit_fs(current);
+ //exit_fs(current);
strcpy(current->comm, "uhci-control");
if(signr == SIGUSR1) {
printk("UHCI queue dump:\n");
show_queues(uhci);
+ } else if (signr == SIGUSR2) {
+ printk("UHCI debug toggle\n");
+ uhci_debug = !uhci_debug;
} else {
break;
}
usb_device_irq completed; /* Completion handler routine */
unsigned int *backptr; /* Where to remove this from.. */
void *dev_id;
- int inuse; /* Inuse? */
+ int inuse; /* Inuse? (b0) Remove (b1)*/
struct uhci_qh *qh;
struct uhci_td *first;
+ struct usb_device *dev; /* the owning device */
} __attribute__((aligned(32)));
struct uhci_iso_td {
# ifdef CONFIG_USB_HUB
usb_hub_init();
# endif
+# ifdef CONFIG_USB_SCSI
+ usb_scsi_init();
+# endif
#endif
return 0;
}
{
/* Add it to the list of buses */
list_add(&new_bus->bus_list, &usb_bus_list);
- printk("New bus registered");
+ printk("New bus registered\n");
}
void usb_deregister_bus(struct usb_bus *bus)
return;
}
for (i=0;i<USB_MAXCHILDREN;i++)
- if ((dev->children[i]!=NULL)&&
- (dev->children[i]->driver==NULL))
+ if (dev->children[i]!=NULL)
usb_check_support(dev->children[i]);
/*now we check this device*/
- usb_device_descriptor(dev);
+ if (dev->driver==NULL)
+ usb_device_descriptor(dev);
}
/*
* This entrypoint gets called for each new device.
if (len < descindex)
return -1;
- n_desc = *(unsigned short *)ptr;
- n_len = n_desc & 0xff;
+ n_desc = le16_to_cpup((unsigned short *)ptr);
+ n_len = ptr[0];
if (n_desc == ((desctype << 8) + descindex))
break;
if (n_len < 2 || n_len > len)
{
- printk("Short descriptor. (%d, %d)\n", len, n_len);
+ int i;
+ printk("Short descriptor. (%d, %d):\n", len, n_len);
+ for (i = 0; i < len; ++i)
+ printk(" %d: %x\n", i, ptr[i]);
return -1;
}
if (parsed < 0)
return parsed;
memcpy(endpoint, ptr + parsed, ptr[parsed]);
+ le16_to_cpus(&endpoint->wMaxPacketSize);
parsed += ptr[parsed];
- len -= ptr[parsed];
+ len -= parsed;
while((i = usb_check_descriptor(ptr+parsed, len, 0x25))>=0)
{
memcpy(config, ptr + parsed, *ptr);
len -= *ptr;
parsed += *ptr;
+ le16_to_cpus(&config->wTotalLength);
if (config->MaxPower == 200) {
printk("bNumInterfaces kludge\n");
int usb_get_string(struct usb_device *dev, unsigned short langid, unsigned char index, void *buf, int size)
{
devrequest dr;
- int i = 5;
- int result;
dr.requesttype = 0x80;
dr.request = USB_REQ_GET_DESCRIPTOR;
dr.index = langid;
dr.length = size;
- while (i--) {
- if (!(result = dev->bus->op->control_msg(dev, usb_rcvctrlpipe(dev,0), &dr, buf, size)))
- break;
- }
- return result;
+ return dev->bus->op->control_msg(dev, usb_rcvctrlpipe(dev,0), &dr, buf, size);
}
int usb_get_device_descriptor(struct usb_device *dev)
{
return usb_get_descriptor(dev, USB_DT_DEVICE, 0, &dev->descriptor, sizeof(dev->descriptor));
+ le16_to_cpus(&dev->descriptor.bcdUSB);
+ le16_to_cpus(&dev->descriptor.idVendor);
+ le16_to_cpus(&dev->descriptor.idProduct);
+ le16_to_cpus(&dev->descriptor.bcdDevice);
}
int usb_get_hub_descriptor(struct usb_device *dev, void *data, int size)
}
}
+int usb_clear_halt(struct usb_device *dev, int endp)
+{
+ devrequest dr;
+ int result;
+ __u16 status;
+
+ //if (!usb_endpoint_halted(dev, endp))
+ // return 0;
+
+ dr.requesttype = USB_RT_ENDPOINT;
+ dr.request = USB_REQ_CLEAR_FEATURE;
+ dr.value = 0;
+ dr.index = endp;
+ dr.length = 0;
+
+ result = dev->bus->op->control_msg(dev, usb_sndctrlpipe(dev,0), &dr, NULL, 0);
+
+ /* dont clear if failed */
+ if (result) {
+ return result;
+ }
+
+#if 1 /* lets be really tough */
+ dr.requesttype = 0x80 | USB_RT_ENDPOINT;
+ dr.request = USB_REQ_GET_STATUS;
+ dr.length = 2;
+ status = 0xffff;
+
+ result = dev->bus->op->control_msg(dev, usb_rcvctrlpipe(dev,0), &dr, &status, 2);
+
+ if (result) {
+ return result;
+ }
+ if (status & 1) {
+ return 1; /* still halted */
+ }
+#endif
+ usb_endpoint_running(dev, endp & 0x0f);
+
+ /* toggle is reset on clear */
+
+ usb_settoggle(dev, endp & 0x0f, 0);
+
+ return 0;
+}
+
int usb_set_interface(struct usb_device *dev, int interface, int alternate)
{
devrequest dr;
return -1;
/* Get the full buffer */
- size = *(unsigned short *)(bufptr+2);
+ size = le16_to_cpup((unsigned short *)(bufptr+2));
if (bufptr+size > buffer+sizeof(buffer)) {
printk(KERN_INFO "usb: truncated DT_CONFIG (want %d).\n", size);
size = buffer+sizeof(buffer)-bufptr;
usb_get_string(dev, 0, 0, buffer, sd->bLength))
return -1;
/* we are going to assume that the first ID is good */
- langid = sd->wData[0];
+ langid = le16_to_cpup(&sd->wData[0]);
/* whip through and find total length and max index */
for (maxindex = 1, totalchars = 0; maxindex<=USB_MAXSTRINGS; maxindex++) {
continue;
dev->stringindex[i] = string;
for (j=0; j < (bLengths[i] - 2)/2; j++) {
- *string++ = sd->wData[j];
+ *string++ = le16_to_cpup(&sd->wData[j]);
}
*string++ = '\0';
}
#define USB_RT_HIDD (USB_TYPE_CLASS | USB_RECIP_INTERFACE)
+/*
+ * Status codes
+ */
+#define USB_ST_NOERROR 0x0
+#define USB_ST_CRC 0x1
+#define USB_ST_BITSTUFF 0x2
+#define USB_ST_DTMISMATCH 0x3
+#define USB_ST_STALL 0x4
+#define USB_ST_TIMEOUT 0x5
+#define USB_ST_PIDCHECK 0x6
+#define USB_ST_PIDUNDEF 0x7
+#define USB_ST_DATAOVERRUN 0x8
+#define USB_ST_DATAUNDERRUN 0x9
+#define USB_ST_RESERVED1 0xA
+#define USB_ST_RESERVED2 0xB
+#define USB_ST_BUFFEROVERRUN 0xC
+#define USB_ST_BUFFERUNDERRUN 0xD
+#define USB_ST_RESERVED3 0xE
+#define USB_ST_RESERVED4 0xF
+
+/* internal errors */
+#define USB_ST_REMOVED 0x100
+#define USB_ST_INTERNALERROR -1
+
/*
* USB device number allocation bitmap. There's one bitmap
* per USB tree.
__u8 bLength;
__u8 bDescriptorType;
__u8 bNbrPorts;
- __u16 wHubCharacteristics;
+ __u8 wHubCharacteristics[2]; /* __u16 but not aligned! */
__u8 bPwrOn2PwrGood;
__u8 bHubContrCurrent;
/* DeviceRemovable and PortPwrCtrlMask want to be variable-length
struct usb_operations {
struct usb_device *(*allocate)(struct usb_device *);
int (*deallocate)(struct usb_device *);
- int (*control_msg)(struct usb_device *, unsigned int, void *, void *, int);
+ int (*control_msg)(struct usb_device *, unsigned int, devrequest *, void *, int);
int (*bulk_msg)(struct usb_device *, unsigned int, void *, int,unsigned long *);
int (*request_irq)(struct usb_device *, unsigned int, usb_device_irq, int, void *);
+ int (*remove_irq)(struct usb_device *, unsigned int, usb_device_irq, int, void *);
};
/*
int devnum; /* Device number on USB bus */
int slow; /* Slow device? */
int maxpacketsize; /* Maximum packet size */
- __u16 toggle; /* one bit for each endpoint */
+ int toggle; /* one bit for each endpoint */
+ int halted; /* endpoint halts */
struct usb_config_descriptor *actconfig;/* the active configuration */
int epmaxpacket[16]; /* endpoint specific maximums */
int ifnum; /* active interface number */
/* The D0/D1 toggle bits */
#define usb_gettoggle(dev, ep) (((dev)->toggle >> ep) & 1)
#define usb_dotoggle(dev, ep) ((dev)->toggle ^= (1 << ep))
-#define usb_settoggle(dev, ep, bit) ((dev)->toggle = ((dev)->toggle & (0xfffe << ep)) | (bit << ep))
+#define usb_settoggle(dev, ep, bit) ((dev)->toggle = ((dev)->toggle & ~(1 << ep)) | ((bit) << ep))
+
+/* Endpoint halt */
+#define usb_endpoint_halt(dev, ep) ((dev)->halted |= (1 << (ep)))
+#define usb_endpoint_running(dev, ep) ((dev)->halted &= ~(1 << (ep)))
+#define usb_endpoint_halted(dev, ep) ((dev)->halted & (1 << (ep)))
static inline unsigned int __create_pipe(struct usb_device *dev, unsigned int endpoint)
{
int usb_set_idle(struct usb_device *dev, int duration, int report_id);
int usb_set_configuration(struct usb_device *dev, int configuration);
int usb_get_report(struct usb_device *dev);
+int usb_clear_halt(struct usb_device *dev, int endp);
+static inline char * usb_string(struct usb_device* dev, int index)
+{
+ if (index <= dev->maxstring && dev->stringindex && dev->stringindex[index])
+ return dev->stringindex[index];
+ else
+ return NULL;
+}
/*
* Debugging helpers..
--- /dev/null
+
+/* Driver for USB scsi like devices
+ *
+ * (C) Michael Gee (michael@linuxspecific.com) 1999
+ *
+ * This driver is scitzoid - it makes a USB device appear as both a SCSI device
+ * and a character device. The latter is only available if the device has an
+ * interrupt endpoint, and is used specifically to receive interrupt events.
+ *
+ * In order to support various 'strange' devices, this module supports plug in
+ * device specific filter modules, which can do their own thing when required.
+ *
+ * Further reference.
+ * This driver is based on the 'USB Mass Storage Class' document. This
+ * describes in detail the transformation of SCSI command blocks to the
+ * equivalent USB control and data transfer required.
+ * It is important to note that in a number of cases this class exhibits
+ * class-specific exemptions from the USB specification. Notably the
+ * usage of NAK, STALL and ACK differs from the norm, in that they are
+ * used to communicate wait, failed and OK on SCSI commands.
+ * Also, for certain devices, the interrupt endpoint is used to convey
+ * status of a command.
+ *
+ * Basically, this stuff is WIERD!!
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/random.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/malloc.h>
+
+#include <asm/spinlock.h>
+#include <linux/smp_lock.h>
+
+#include <linux/blk.h>
+#include "../scsi/scsi.h"
+#include "../scsi/hosts.h"
+#include "../scsi/sd.h"
+
+#include "usb.h"
+#include "usb_scsi.h"
+
+/* direction table (what a pain) */
+
+unsigned char us_direction[256/8] = {
+
+#include "usb_scsi_dt.c"
+
+};
+
+/*
+ * Per device data
+ */
+
+static int my_host_number;
+
+int usbscsi_debug = 1;
+
+struct us_data {
+ struct us_data *next; /* next device */
+ struct usb_device *pusb_dev;
+ struct usb_scsi_filter *filter; /* filter driver */
+ void *fdata; /* filter data */
+ unsigned int flags; /* from filter initially*/
+ __u8 ep_in; /* in endpoint */
+ __u8 ep_out; /* out ....... */
+ __u8 ep_int; /* interrupt . */
+ __u8 subclass; /* as in overview */
+ __u8 protocol; /* .............. */
+ int (*pop)(Scsi_Cmnd *); /* protocol specific do cmd */
+ GUID(guid); /* unique dev id */
+ struct Scsi_Host *host; /* our dummy host data */
+ Scsi_Host_Template *htmplt; /* own host template */
+ int host_number; /* to find us */
+ int host_no; /* allocated by scsi */
+ int fixedlength; /* expand commands */
+ Scsi_Cmnd *srb; /* current srb */
+ int action; /* what to do */
+ wait_queue_head_t waitq; /* thread waits */
+ wait_queue_head_t ip_waitq; /* for CBI interrupts */
+ __u16 ip_data; /* interrupt data */
+ int ip_wanted; /* needed */
+ int pid; /* control thread */
+ struct semaphore *notify; /* wait for thread to begin */
+};
+
+/*
+ * kernel thread actions
+ */
+
+#define US_ACT_COMMAND 1
+#define US_ACT_ABORT 2
+#define US_ACT_DEVICE_RESET 3
+#define US_ACT_BUS_RESET 4
+#define US_ACT_HOST_RESET 5
+
+static struct proc_dir_entry proc_usb_scsi =
+{
+ PROC_SCSI_USB_SCSI,
+ 0,
+ NULL,
+ S_IFDIR | S_IRUGO | S_IXUGO,
+ 2
+};
+
+static struct us_data *us_list;
+
+static struct usb_scsi_filter *filters;
+
+static int scsi_probe(struct usb_device *dev);
+static void scsi_disconnect(struct usb_device *dev);
+static struct usb_driver scsi_driver = {
+ "usb_scsi",
+ scsi_probe,
+ scsi_disconnect,
+ { NULL, NULL }
+};
+
+/* Data handling, using SG if required */
+
+static int us_one_transfer(struct us_data *us, int pipe, char *buf, int length)
+{
+ int max_size = usb_maxpacket(us->pusb_dev, pipe) * 16;
+ int this_xfer;
+ int result;
+ unsigned long partial;
+ int maxtry = 100;
+ while (length) {
+ this_xfer = length > max_size ? max_size : length;
+ length -= this_xfer;
+ do {
+ US_DEBUGP("Bulk xfer %x(%d)\n", (unsigned int)buf, this_xfer);
+ result = us->pusb_dev->bus->op->bulk_msg(us->pusb_dev, pipe, buf,
+ this_xfer, &partial);
+
+ /* we want to retry if the device reported NAK */
+ if (result == USB_ST_TIMEOUT) {
+ if (!maxtry--)
+ break;
+ this_xfer -= partial;
+ buf += partial;
+ } else if (!result && partial != this_xfer) {
+ /* short data - assume end */
+ result = USB_ST_DATAUNDERRUN;
+ break;
+ } else
+ break;
+ } while ( this_xfer );
+ if (result)
+ return result;
+ buf += this_xfer;
+ }
+ return 0;
+
+}
+static int us_transfer(Scsi_Cmnd *srb, int dir_in)
+{
+ struct us_data *us = (struct us_data *)srb->host_scribble;
+ int i;
+ int result = -1;
+
+ if (srb->use_sg) {
+ struct scatterlist *sg = (struct scatterlist *) srb->request_buffer;
+
+ for (i = 0; i < srb->use_sg; i++) {
+ result = us_one_transfer(us, dir_in ? usb_rcvbulkpipe(us->pusb_dev, us->ep_in) :
+ usb_sndbulkpipe(us->pusb_dev, us->ep_out),
+ sg[i].address, sg[i].length);
+ if (result)
+ break;
+ }
+ return result;
+ }
+ else
+ return us_one_transfer(us, dir_in ? usb_rcvbulkpipe(us->pusb_dev, us->ep_in) :
+ usb_sndbulkpipe(us->pusb_dev, us->ep_out),
+ srb->request_buffer, srb->request_bufflen);
+}
+
+static unsigned int us_transfer_length(Scsi_Cmnd *srb)
+{
+ int i;
+ unsigned int total = 0;
+
+ /* always zero for some commands */
+ switch (srb->cmnd[0]) {
+ case SEEK_6:
+ case SEEK_10:
+ case REZERO_UNIT:
+ case ALLOW_MEDIUM_REMOVAL:
+ case START_STOP:
+ case TEST_UNIT_READY:
+ return 0;
+
+ default:
+ break;
+ }
+
+ if (srb->use_sg) {
+ struct scatterlist *sg = (struct scatterlist *) srb->request_buffer;
+
+ for (i = 0; i < srb->use_sg; i++) {
+ total += sg[i].length;
+ }
+ return total;
+ }
+ else
+ return srb->request_bufflen;
+
+}
+
+static int pop_CBI_irq(int state, void *buffer, void *dev_id)
+{
+ struct us_data *us = (struct us_data *)dev_id;
+
+ if (state != USB_ST_REMOVED) {
+ us->ip_data = *(__u16 *)buffer;
+ us->ip_wanted = 0;
+ }
+ wake_up(&us->ip_waitq);
+
+ /* we dont want another interrupt */
+
+ return 0;
+}
+static int pop_CB_command(Scsi_Cmnd *srb)
+{
+ struct us_data *us = (struct us_data *)srb->host_scribble;
+ devrequest dr;
+ unsigned char cmd[16];
+ int result;
+ int retry = 1;
+ int done_start = 0;
+
+ while (retry--) {
+ dr.requesttype = USB_TYPE_CLASS | USB_RT_INTERFACE;
+ dr.request = US_CBI_ADSC;
+ dr.value = 0;
+ dr.index = us->pusb_dev->ifnum;
+ dr.length = srb->cmd_len;
+
+ if (us->flags & US_FL_FIXED_COMMAND) {
+ dr.length = us->fixedlength;
+ memset(cmd, 0, us->fixedlength);
+
+ /* fix some commands */
+
+ switch (srb->cmnd[0]) {
+ case WRITE_6:
+ case READ_6:
+ cmd[0] = srb->cmnd[0] | 0x20;
+ cmd[1] = srb->cmnd[1] & 0xE0;
+ cmd[2] = 0;
+ cmd[3] = srb->cmnd[1] & 0x1F;
+ cmd[4] = srb->cmnd[2];
+ cmd[5] = srb->cmnd[3];
+ cmd[8] = srb->cmnd[4];
+ break;
+
+ case MODE_SENSE:
+ case MODE_SELECT:
+ cmd[0] = srb->cmnd[0] | 0x40;
+ cmd[1] = srb->cmnd[1];
+ cmd[2] = srb->cmnd[2];
+ cmd[8] = srb->cmnd[4];
+ break;
+
+ default:
+ memcpy(cmd, srb->cmnd, srb->cmd_len);
+ break;
+ }
+ result = us->pusb_dev->bus->op->control_msg(us->pusb_dev,
+ usb_sndctrlpipe(us->pusb_dev,0),
+ &dr, cmd, us->fixedlength);
+ if (!done_start && us->subclass == US_SC_UFI && cmd[0] == TEST_UNIT_READY && result) {
+ /* as per spec try a start command, wait and retry */
+
+ done_start++;
+ cmd[0] = START_STOP;
+ cmd[4] = 1; /* start */
+ result = us->pusb_dev->bus->op->control_msg(us->pusb_dev,
+ usb_sndctrlpipe(us->pusb_dev,0),
+ &dr, cmd, us->fixedlength);
+ wait_ms(100);
+ retry++;
+ continue;
+ }
+ } else
+ result = us->pusb_dev->bus->op->control_msg(us->pusb_dev,
+ usb_sndctrlpipe(us->pusb_dev,0),
+ &dr, srb->cmnd, srb->cmd_len);
+ if (result != USB_ST_STALL && result != USB_ST_TIMEOUT)
+ return result;
+ }
+ return result;
+}
+
+/* Protocol command handlers */
+
+static int pop_CBI(Scsi_Cmnd *srb)
+{
+ struct us_data *us = (struct us_data *)srb->host_scribble;
+ int result;
+
+ /* run the command */
+
+ if ((result = pop_CB_command(srb))) {
+ US_DEBUGP("CBI command %x\n", result);
+ if (result == USB_ST_STALL || result == USB_ST_TIMEOUT)
+ return (DID_OK << 16) | 2;
+ return DID_ABORT << 16;
+ }
+
+ /* transfer the data */
+
+ if (us_transfer_length(srb)) {
+ result = us_transfer(srb, US_DIRECTION(srb->cmnd[0]));
+ if (result && result != USB_ST_DATAUNDERRUN) {
+ US_DEBUGP("CBI transfer %x\n", result);
+ return DID_ABORT << 16;
+ }
+ }
+
+ /* get status */
+
+ if (us->protocol == US_PR_CBI) {
+ /* get from interrupt pipe */
+
+ /* add interrupt transfer, marked for removal */
+ us->ip_wanted = 1;
+ result = us->pusb_dev->bus->op->request_irq(us->pusb_dev,
+ usb_rcvctrlpipe(us->pusb_dev, us->ep_int),
+ pop_CBI_irq, 0, (void *)us);
+ if (result) {
+ US_DEBUGP("No interrupt for CBI %x\n", result);
+ return DID_ABORT << 16;
+ }
+ sleep_on(&us->ip_waitq);
+ if (us->ip_wanted) {
+ US_DEBUGP("Did not get interrupt on CBI\n");
+ us->ip_wanted = 0;
+ return DID_ABORT << 16;
+ }
+
+ US_DEBUGP("Got interrupt data %x\n", us->ip_data);
+
+ /* sort out what it means */
+
+ if (us->subclass == US_SC_UFI) {
+ /* gives us asc and ascq, as per request sense */
+
+ if (srb->cmnd[0] == REQUEST_SENSE ||
+ srb->cmnd[0] == INQUIRY)
+ return DID_OK << 16;
+ else
+ return (DID_OK << 16) + ((us->ip_data & 0xff) ? 2 : 0);
+ }
+ if (us->ip_data & 0xff) {
+ US_DEBUGP("Bad CBI interrupt data %x\n", us->ip_data);
+ return DID_ABORT << 16;
+ }
+ return (DID_OK << 16) + ((us->ip_data & 0x300) ? 2 : 0);
+ } else {
+ /* get from where? */
+ }
+ return DID_ERROR << 16;
+}
+
+static int pop_Bulk_reset(struct us_data *us)
+{
+ devrequest dr;
+ int result;
+
+ dr.requesttype = USB_TYPE_CLASS | USB_RT_INTERFACE;
+ dr.request = US_BULK_RESET;
+ dr.value = US_BULK_RESET_SOFT;
+ dr.index = 0;
+ dr.length = 0;
+
+ US_DEBUGP("Bulk soft reset\n");
+ result = us->pusb_dev->bus->op->control_msg(us->pusb_dev, usb_sndctrlpipe(us->pusb_dev,0), &dr, NULL, 0);
+ if (result) {
+ US_DEBUGP("Bulk soft reset failed %d\n", result);
+ dr.value = US_BULK_RESET_HARD;
+ result = us->pusb_dev->bus->op->control_msg(us->pusb_dev, usb_sndctrlpipe(us->pusb_dev,0), &dr, NULL, 0);
+ if (result)
+ US_DEBUGP("Bulk hard reset failed %d\n", result);
+ }
+ usb_clear_halt(us->pusb_dev, us->ep_in | 0x80);
+ usb_clear_halt(us->pusb_dev, us->ep_out);
+ return result;
+}
+/*
+ * The bulk only protocol handler.
+ * Uses the in and out endpoints to transfer commands and data (nasty)
+ */
+static int pop_Bulk(Scsi_Cmnd *srb)
+{
+ struct us_data *us = (struct us_data *)srb->host_scribble;
+ struct bulk_cb_wrap bcb;
+ struct bulk_cs_wrap bcs;
+ int result;
+ unsigned long partial;
+ int stall;
+
+ /* set up the command wrapper */
+
+ bcb.Signature = US_BULK_CB_SIGN;
+ bcb.DataTransferLength = us_transfer_length(srb);;
+ bcb.Flags = US_DIRECTION(srb->cmnd[0]) << 7;
+ bcb.Tag = srb->serial_number;
+ bcb.Lun = 0;
+ memset(bcb.CDB, 0, sizeof(bcb.CDB));
+ memcpy(bcb.CDB, srb->cmnd, srb->cmd_len);
+ if (us->flags & US_FL_FIXED_COMMAND) {
+ bcb.Length = us->fixedlength;
+ } else {
+ bcb.Length = srb->cmd_len;
+ }
+
+ /* send it to out endpoint */
+
+ US_DEBUGP("Bulk command S %x T %x L %d F %d CL %d\n", bcb.Signature,
+ bcb.Tag, bcb.DataTransferLength, bcb.Flags, bcb.Length);
+ result = us->pusb_dev->bus->op->bulk_msg(us->pusb_dev,
+ usb_sndbulkpipe(us->pusb_dev, us->ep_out), &bcb,
+ US_BULK_CB_WRAP_LEN, &partial);
+ if (result) {
+ US_DEBUGP("Bulk command result %x\n", result);
+ return DID_ABORT << 16;
+ }
+
+ //return DID_BAD_TARGET << 16;
+ /* send/receive data */
+
+ if (bcb.DataTransferLength) {
+ result = us_transfer(srb, bcb.Flags);
+ if (result && result != USB_ST_DATAUNDERRUN && result != USB_ST_STALL) {
+ US_DEBUGP("Bulk transfer result %x\n", result);
+ return DID_ABORT << 16;
+ }
+ }
+
+ /* get status */
+
+
+ stall = 0;
+ do {
+ //usb_settoggle(us->pusb_dev, us->ep_in, 0); /* AAARgh!! */
+ US_DEBUGP("Toggle is %d\n", usb_gettoggle(us->pusb_dev, us->ep_in));
+ result = us->pusb_dev->bus->op->bulk_msg(us->pusb_dev,
+ usb_rcvbulkpipe(us->pusb_dev, us->ep_in), &bcs,
+ US_BULK_CS_WRAP_LEN, &partial);
+ if (result == USB_ST_STALL || result == USB_ST_TIMEOUT)
+ stall++;
+ else
+ break;
+ } while ( stall < 3);
+ if (result && result != USB_ST_DATAUNDERRUN) {
+ US_DEBUGP("Bulk status result = %x\n", result);
+ return DID_ABORT << 16;
+ }
+
+ /* check bulk status */
+
+ US_DEBUGP("Bulk status S %x T %x R %d V %x\n", bcs.Signature, bcs.Tag,
+ bcs.Residue, bcs.Status);
+ if (bcs.Signature != US_BULK_CS_SIGN || bcs.Tag != bcb.Tag ||
+ bcs.Status > US_BULK_STAT_PHASE) {
+ US_DEBUGP("Bulk logical error\n");
+ return DID_ABORT << 16;
+ }
+ switch (bcs.Status) {
+ case US_BULK_STAT_OK:
+ return DID_OK << 16;
+
+ case US_BULK_STAT_FAIL:
+ /* check for underrun - dont report */
+ if (bcs.Residue)
+ return DID_OK << 16;
+ //pop_Bulk_reset(us);
+ break;
+
+ case US_BULK_STAT_PHASE:
+ return DID_ERROR << 16;
+ }
+ return (DID_OK << 16) | 2; /* check sense required */
+
+}
+
+/* Host functions */
+
+/* detect adapter (always true ) */
+static int us_detect(struct SHT *sht)
+{
+ /* FIXME - not nice at all, but how else ? */
+ struct us_data *us = (struct us_data *)sht->proc_dir;
+ char name[32];
+
+ sprintf(name, "usbscsi%d", us->host_number);
+ proc_usb_scsi.namelen = strlen(name);
+ proc_usb_scsi.name = kmalloc(proc_usb_scsi.namelen+1, GFP_KERNEL);
+ if (!proc_usb_scsi.name)
+ return 0;
+ strcpy((char *)proc_usb_scsi.name, name);
+ sht->proc_dir = kmalloc(sizeof(*sht->proc_dir), GFP_KERNEL);
+ if (!sht->proc_dir) {
+ kfree(proc_usb_scsi.name);
+ return 0;
+ }
+ *sht->proc_dir = proc_usb_scsi;
+ sht->name = proc_usb_scsi.name;
+ us->host = scsi_register(sht, sizeof(us));
+ if (us->host) {
+ us->host->hostdata[0] = (unsigned long)us;
+ us->host_no = us->host->host_no;
+ return 1;
+ }
+ kfree(proc_usb_scsi.name);
+ kfree(sht->proc_dir);
+ return 0;
+}
+
+/* release - must be here to stop scsi
+ * from trying to release IRQ etc.
+ * Kill off our data
+ */
+static int us_release(struct Scsi_Host *psh)
+{
+ struct us_data *us = (struct us_data *)psh->hostdata[0];
+ struct us_data *prev = (struct us_data *)&us_list;
+
+ if (us->filter)
+ us->filter->release(us->fdata);
+ if (us->pusb_dev)
+ usb_deregister(&scsi_driver);
+
+ /* FIXME - leaves hanging host template copy */
+ /* (bacause scsi layer uses it after removal !!!) */
+ while(prev->next != us)
+ prev = prev->next;
+ prev->next = us->next;
+ return 0;
+}
+
+/* run command */
+static int us_command( Scsi_Cmnd *srb )
+{
+ US_DEBUGP("Bad use of us_command\n");
+
+ return DID_BAD_TARGET << 16;
+}
+
+/* run command */
+static int us_queuecommand( Scsi_Cmnd *srb , void (*done)(Scsi_Cmnd *))
+{
+ struct us_data *us = (struct us_data *)srb->host->hostdata[0];
+
+ US_DEBUGP("Command wakeup\n");
+ srb->host_scribble = (unsigned char *)us;
+ us->srb = srb;
+ srb->scsi_done = done;
+ us->action = US_ACT_COMMAND;
+
+ /* wake up the process task */
+
+ wake_up_interruptible(&us->waitq);
+
+ return 0;
+}
+
+static int us_abort( Scsi_Cmnd *srb )
+{
+ return 0;
+}
+
+static int us_device_reset( Scsi_Cmnd *srb )
+{
+ return 0;
+}
+
+static int us_host_reset( Scsi_Cmnd *srb )
+{
+ return 0;
+}
+
+static int us_bus_reset( Scsi_Cmnd *srb )
+{
+ return 0;
+}
+
+#undef SPRINTF
+#define SPRINTF(args...) { if (pos < (buffer + length)) pos += sprintf (pos, ## args); }
+
+int usb_scsi_proc_info (char *buffer, char **start, off_t offset, int length, int hostno, int inout)
+{
+ struct us_data *us = us_list;
+ char *pos = buffer;
+ char *vendor;
+ char *product;
+ char *style = "";
+
+ /* find our data from hostno */
+
+ while (us) {
+ if (us->host_no == hostno)
+ break;
+ us = us->next;
+ }
+
+ if (!us)
+ return -ESRCH;
+
+ /* null on outward */
+
+ if (inout)
+ return length;
+
+ if (!(vendor = usb_string(us->pusb_dev, us->pusb_dev->descriptor.iManufacturer)))
+ vendor = "?";
+ if (!(product = usb_string(us->pusb_dev, us->pusb_dev->descriptor.iProduct)))
+ product = "?";
+
+ switch (us->protocol) {
+ case US_PR_CB:
+ style = "Control/Bulk";
+ break;
+
+ case US_PR_CBI:
+ style = "Control/Bulk/Interrupt";
+ break;
+
+ case US_PR_ZIP:
+ style = "Bulk only";
+ break;
+
+ }
+ SPRINTF ("Host scsi%d: usb-scsi\n", hostno);
+ SPRINTF ("Device: %s %s - GUID " GUID_FORMAT "\n", vendor, product, GUID_ARGS(us->guid) );
+ SPRINTF ("Style: %s\n", style);
+
+ /*
+ * Calculate start of next buffer, and return value.
+ */
+ *start = buffer + offset;
+
+ if ((pos - buffer) < offset)
+ return (0);
+ else if ((pos - buffer - offset) < length)
+ return (pos - buffer - offset);
+ else
+ return (length);
+}
+
+/*
+ * this defines our 'host'
+ */
+
+static Scsi_Host_Template my_host_template = {
+ NULL, /* next */
+ NULL, /* module */
+ NULL, /* proc_dir */
+ usb_scsi_proc_info,
+ NULL, /* name - points to unique */
+ us_detect,
+ us_release,
+ NULL, /* info */
+ NULL, /* ioctl */
+ us_command,
+ us_queuecommand,
+ NULL, /* eh_strategy */
+ us_abort,
+ us_device_reset,
+ us_bus_reset,
+ us_host_reset,
+ NULL, /* abort */
+ NULL, /* reset */
+ NULL, /* slave_attach */
+ NULL, /* bios_param */
+ 1, /* can_queue */
+ -1, /* this_id */
+ SG_ALL, /* sg_tablesize */
+ 1, /* cmd_per_lun */
+ 0, /* present */
+ FALSE, /* unchecked_isa_dma */
+ FALSE, /* use_clustering */
+ TRUE, /* use_new_eh_code */
+ TRUE /* emulated */
+};
+
+static int usbscsi_control_thread(void * __us)
+{
+ struct us_data *us = (struct us_data *)__us;
+ int action;
+
+ lock_kernel();
+
+ /*
+ * This thread doesn't need any user-level access,
+ * so get rid of all our resources..
+ */
+ exit_mm(current);
+ exit_files(current);
+ //exit_fs(current);
+
+ sprintf(current->comm, "usbscsi%d", us->host_no);
+
+ unlock_kernel();
+
+ up(us->notify);
+
+ for(;;) {
+ siginfo_t info;
+ int unsigned long signr;
+
+ interruptible_sleep_on(&us->waitq);
+
+ action = us->action;
+ us->action = 0;
+
+ switch (action) {
+ case US_ACT_COMMAND :
+ if (!us->pusb_dev || us->srb->target || us->srb->lun) {
+ /* bad device */
+ US_DEBUGP( "Bad device number (%d/%d) or dev %x\n", us->srb->target, us->srb->lun, (unsigned int)us->pusb_dev);
+ us->srb->result = DID_BAD_TARGET << 16;
+ } else {
+ US_DEBUG(us_show_command(us->srb));
+ if (us->filter && us->filter->command)
+ us->srb->result = us->filter->command(us->fdata, us->srb);
+ else
+ us->srb->result = us->pop(us->srb);
+ }
+ us->srb->scsi_done(us->srb);
+ break;
+
+ case US_ACT_ABORT :
+ break;
+
+ case US_ACT_DEVICE_RESET :
+ break;
+
+ case US_ACT_BUS_RESET :
+ break;
+
+ case US_ACT_HOST_RESET :
+ break;
+
+ }
+
+ if(signal_pending(current)) {
+ /* sending SIGUSR1 makes us print out some info */
+ spin_lock_irq(¤t->sigmask_lock);
+ signr = dequeue_signal(¤t->blocked, &info);
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ if (signr == SIGUSR2) {
+ printk("USBSCSI debug toggle\n");
+ usbscsi_debug = !usbscsi_debug;
+ } else {
+ break;
+ }
+ }
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ printk("usbscsi_control_thread exiting\n");
+
+ return 0;
+}
+
+static int scsi_probe(struct usb_device *dev)
+{
+ struct usb_interface_descriptor *interface;
+ int i;
+ char *mf; /* manufacturer */
+ char *prod; /* product */
+ char *serial; /* serial number */
+ struct us_data *ss = NULL;
+ struct usb_scsi_filter *filter = filters;
+ void *fdata = NULL;
+ unsigned int flags = 0;
+ GUID(guid);
+ struct us_data *prev;
+ Scsi_Host_Template *htmplt;
+ int protocol = 0;
+ int subclass = 0;
+
+ GUID_CLEAR(guid);
+ mf = usb_string(dev, dev->descriptor.iManufacturer);
+ prod = usb_string(dev, dev->descriptor.iProduct);
+ serial = usb_string(dev, dev->descriptor.iSerialNumber);
+
+ /* probe with filters first */
+
+ if (mf && prod) {
+ while (filter) {
+ if ((fdata = filter->probe(dev, mf, prod, serial)) != NULL) {
+ flags = filter->flags;
+ printk(KERN_INFO "USB Scsi filter %s\n", filter->name);
+ break;
+ }
+ filter = filter->next;
+ }
+ }
+
+ /* generic devices next */
+
+ if (fdata == NULL) {
+
+ /* some exceptions */
+ if (dev->descriptor.idVendor == 0x04e6 &&
+ dev->descriptor.idProduct == 0x0001) {
+ /* shuttle E-USB */
+ protocol = US_PR_ZIP;
+ subclass = US_SC_8070; /* an assumption */
+ } else if (dev->descriptor.bDeviceClass != 0 ||
+ dev->config->altsetting->interface->bInterfaceClass != 8 ||
+ dev->config->altsetting->interface->bInterfaceSubClass < US_SC_MIN ||
+ dev->config->altsetting->interface->bInterfaceSubClass > US_SC_MAX) {
+ return -1;
+ }
+
+ /* now check if we have seen it before */
+
+ if (dev->descriptor.iSerialNumber &&
+ usb_string(dev, dev->descriptor.iSerialNumber) ) {
+ make_guid(guid, dev->descriptor.idVendor, dev->descriptor.idProduct,
+ usb_string(dev, dev->descriptor.iSerialNumber));
+ for (ss = us_list; ss; ss = ss->next) {
+ if (GUID_EQUAL(guid, ss->guid)) {
+ US_DEBUGP("Found existing GUID " GUID_FORMAT "\n", GUID_ARGS(guid));
+ break;
+ }
+ }
+ }
+ }
+
+ if (!ss) {
+ if ((ss = (struct us_data *)kmalloc(sizeof(*ss), GFP_KERNEL)) == NULL) {
+ printk(KERN_WARNING USB_SCSI "Out of memory\n");
+ if (filter)
+ filter->release(fdata);
+ return -1;
+ }
+ memset(ss, 0, sizeof(struct us_data));
+ }
+
+ interface = dev->config->altsetting->interface;
+ ss->filter = filter;
+ ss->fdata = fdata;
+ ss->flags = flags;
+ if (subclass) {
+ ss->subclass = subclass;
+ ss->protocol = protocol;
+ } else {
+ ss->subclass = interface->bInterfaceSubClass;
+ ss->protocol = interface->bInterfaceProtocol;
+ }
+
+ /* set the protocol op */
+
+ US_DEBUGP("Protocol ");
+ switch (ss->protocol) {
+ case US_PR_CB:
+ US_DEBUGPX("Control/Bulk\n");
+ ss->pop = pop_CBI;
+ break;
+
+ case US_PR_CBI:
+ US_DEBUGPX("Control/Bulk/Interrupt\n");
+ ss->pop = pop_CBI;
+ break;
+
+ default:
+ US_DEBUGPX("Bulk\n");
+ ss->pop = pop_Bulk;
+ break;
+ }
+
+ /*
+ * we are expecting a minimum of 2 endpoints - in and out (bulk)
+ * an optional interrupt is OK (necessary for CBI protocol)
+ * we will ignore any others
+ */
+
+ for (i = 0; i < interface->bNumEndpoints; i++) {
+ if (interface->endpoint[i].bmAttributes == 0x02) {
+ if (interface->endpoint[i].bEndpointAddress & 0x80)
+ ss->ep_in = interface->endpoint[i].bEndpointAddress & 0x0f;
+ else
+ ss->ep_out = interface->endpoint[i].bEndpointAddress & 0x0f;
+ } else if (interface->endpoint[i].bmAttributes == 0x03) {
+ ss->ep_int = interface->endpoint[i].bEndpointAddress & 0x0f;
+ }
+ }
+ US_DEBUGP("Endpoints In %d Out %d Int %d\n", ss->ep_in, ss->ep_out, ss->ep_int);
+
+ /* exit if strange looking */
+
+ if (usb_set_configuration(dev, dev->config[0].bConfigurationValue) ||
+ !ss->ep_in || !ss->ep_out || (ss->protocol == US_PR_CBI && ss->ep_int == 0)) {
+ US_DEBUGP("Problems with device\n");
+ if (ss->host) {
+ scsi_unregister_module(MODULE_SCSI_HA, ss->htmplt);
+ kfree(ss->htmplt->name);
+ kfree(ss->htmplt);
+ }
+ if (filter)
+ filter->release(fdata);
+ kfree(ss);
+ return -1; /* no endpoints */
+ }
+
+ if (dev->config[0].iConfiguration && usb_string(dev, dev->config[0].iConfiguration))
+ US_DEBUGP("Configuration %s\n", usb_string(dev, dev->config[0].iConfiguration));
+ if (interface->iInterface && usb_string(dev, interface->iInterface))
+ US_DEBUGP("Interface %s\n", usb_string(dev, interface->iInterface));
+
+ ss->pusb_dev = dev;
+
+ /* Now generate a scsi host definition, and register with scsi above us */
+
+ if (!ss->host) {
+
+ /* make unique id if possible */
+
+ if (dev->descriptor.iSerialNumber &&
+ usb_string(dev, dev->descriptor.iSerialNumber) ) {
+ make_guid(ss->guid, dev->descriptor.idVendor, dev->descriptor.idProduct,
+ usb_string(dev, dev->descriptor.iSerialNumber));
+ }
+
+ US_DEBUGP("New GUID " GUID_FORMAT "\n", GUID_ARGS(guid));
+
+ /* set class specific stuff */
+
+ US_DEBUGP("SubClass ");
+ switch (ss->subclass) {
+ case US_SC_RBC:
+ US_DEBUGPX("Reduced Block Commands\n");
+ break;
+ case US_SC_8020:
+ US_DEBUGPX("8020\n");
+ break;
+ case US_SC_QIC:
+ US_DEBUGPX("QIC157\n");
+ break;
+ case US_SC_8070:
+ US_DEBUGPX("8070\n");
+ ss->flags |= US_FL_FIXED_COMMAND;
+ ss->fixedlength = 12;
+ break;
+ case US_SC_SCSI:
+ US_DEBUGPX("Transparent SCSI\n");
+ break;
+ case US_SC_UFI:
+ US_DEBUGPX(" UFF\n");
+ ss->flags |= US_FL_FIXED_COMMAND;
+ ss->fixedlength = 12;
+ break;
+
+ default:
+ break;
+ }
+
+ /* create unique host template */
+
+ if ((htmplt = (Scsi_Host_Template *)kmalloc(sizeof(*ss->htmplt), GFP_KERNEL)) == NULL ) {
+ printk(KERN_WARNING USB_SCSI "Out of memory\n");
+ if (filter)
+ filter->release(fdata);
+ kfree(ss);
+ return -1;
+ }
+ memcpy(htmplt, &my_host_template, sizeof(my_host_template));
+ ss->host_number = my_host_number++;
+
+
+ (struct us_data *)htmplt->proc_dir = ss;
+ if (ss->protocol == US_PR_CBI)
+ init_waitqueue_head(&ss->ip_waitq);
+
+ /* start up our thread */
+
+ {
+ DECLARE_MUTEX_LOCKED(sem);
+
+ init_waitqueue_head(&ss->waitq);
+
+ ss->notify = &sem;
+ ss->pid = kernel_thread(usbscsi_control_thread, ss,
+ CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
+ if (ss->pid < 0) {
+ printk(KERN_WARNING USB_SCSI "Unable to start control thread\n");
+ kfree(htmplt);
+ if (filter)
+ filter->release(fdata);
+ kfree(ss);
+ return -1;
+ }
+
+ /* wait for it to start */
+
+ down(&sem);
+ }
+
+ /* now register - our detect function will be called */
+
+ scsi_register_module(MODULE_SCSI_HA, htmplt);
+
+ /* put us in the list */
+
+ prev = (struct us_data *)&us_list;
+ while (prev->next)
+ prev = prev->next;
+ prev->next = ss;
+
+ }
+
+
+ printk(KERN_INFO "USB SCSI device found at address %d\n", dev->devnum);
+
+ dev->private = ss;
+ return 0;
+}
+
+static void scsi_disconnect(struct usb_device *dev)
+{
+ struct us_data *ss = dev->private;
+
+ if (!ss)
+ return;
+ if (ss->filter)
+ ss->filter->release(ss->fdata);
+ ss->pusb_dev = NULL;
+ dev->private = NULL; /* just in case */
+ MOD_DEC_USE_COUNT;
+}
+
+int usb_scsi_init(void)
+{
+
+ MOD_INC_USE_COUNT;
+#ifdef CONFIG_USB_HP4100
+ hp4100_init();
+#endif
+#ifdef CONFIG_USB_ZIP
+ usb_zip_init();
+#endif
+ usb_register(&scsi_driver);
+ printk(KERN_INFO "USB SCSI support registered.\n");
+ return 0;
+}
+
+
+int usb_scsi_register(struct usb_scsi_filter *filter)
+{
+ struct usb_scsi_filter *prev = (struct usb_scsi_filter *)&filters;
+
+ while (prev->next)
+ prev = prev->next;
+ prev->next = filter;
+ return 0;
+}
+
+void usb_scsi_deregister(struct usb_scsi_filter *filter)
+{
+ struct usb_scsi_filter *prev = (struct usb_scsi_filter *)&filters;
+
+ while (prev->next && prev->next != filter)
+ prev = prev->next;
+ if (prev->next)
+ prev->next = filter->next;
+}
+
+#ifdef MODULE
+int init_module(void)
+{
+
+ return usb_scsi_init();
+}
+
+void cleanup_module(void)
+{
+ unsigned int offset;
+
+ usb_deregister(&scsi_driver);
+}
+#endif
--- /dev/null
+/* Driver for USB scsi - include file
+ *
+ * (C) Michael Gee (michael@linuxspecific.com) 1999
+ *
+ * This driver is scitzoid - it make a USB scanner appear as both a SCSI device
+ * and a character device. The latter is only available if the device has an
+ * interrupt endpoint, and is used specifically to receive interrupt events.
+ *
+ * In order to support various 'strange' scanners, this module supports plug in
+ * device specific filter modules, which can do their own thing when required.
+ *
+ */
+
+#define USB_SCSI "usbscsi: "
+
+extern int usbscsi_debug;
+
+#ifdef CONFIG_USB_SCSI_DEBUG
+void us_show_command(Scsi_Cmnd *srb);
+#define US_DEBUGP(x...) { if(usbscsi_debug) printk( KERN_DEBUG USB_SCSI ## x ); }
+#define US_DEBUGPX(x...) { if(usbscsi_debug) printk( ## x ); }
+#define US_DEBUG(x) { if(usbscsi_debug) x; }
+#else
+#define US_DEBUGP(x...)
+#define US_DEBUGPX(x...)
+#define US_DEBUG(x)
+#endif
+
+/* bit set if input */
+extern unsigned char us_direction[256/8];
+#define US_DIRECTION(x) ((us_direction[x>>3] >> (x & 7)) & 1)
+
+/* Sub Classes */
+
+#define US_SC_RBC 1 /* Typically, flash devices */
+#define US_SC_8020 2 /* CD-ROM */
+#define US_SC_QIC 3 /* QIC-157 Tapes */
+#define US_SC_UFI 4 /* Floppy */
+#define US_SC_8070 5 /* Removable media */
+#define US_SC_SCSI 6 /* Transparent */
+#define US_SC_MIN US_SC_RBC
+#define US_SC_MAX US_SC_SCSI
+
+/* Protocols */
+
+#define US_PR_CB 1 /* Control/Bulk w/o interrupt */
+#define US_PR_CBI 0 /* Control/Bulk/Interrupt */
+#define US_PR_ZIP 0x50 /* bulk only */
+/* #define US_PR_BULK ?? */
+
+/*
+ * Bulk only data structures (Zip 100, for example)
+ */
+
+struct bulk_cb_wrap {
+ __u32 Signature; /* contains 'USBC' */
+ __u32 Tag; /* unique per command id */
+ __u32 DataTransferLength; /* size of data */
+ __u8 Flags; /* direction in bit 0 */
+ __u8 Lun; /* LUN normally 0 */
+ __u8 Length; /* of of the CDB */
+ __u8 CDB[16]; /* max command */
+};
+
+#define US_BULK_CB_WRAP_LEN 31
+#define US_BULK_CB_SIGN 0x43425355
+#define US_BULK_FLAG_IN 1
+#define US_BULK_FLAG_OUT 0
+
+struct bulk_cs_wrap {
+ __u32 Signature; /* should = 'USBS' */
+ __u32 Tag; /* same as original command */
+ __u32 Residue; /* amount not transferred */
+ __u8 Status; /* see below */
+ __u8 Filler[18];
+};
+
+#define US_BULK_CS_WRAP_LEN 31
+#define US_BULK_CS_SIGN 0x53425355
+#define US_BULK_STAT_OK 0
+#define US_BULK_STAT_FAIL 1
+#define US_BULK_STAT_PHASE 2
+
+#define US_BULK_RESET 0xff
+#define US_BULK_RESET_SOFT 1
+#define US_BULK_RESET_HARD 0
+
+/*
+ * CBI style
+ */
+
+#define US_CBI_ADSC 0
+
+/*
+ * Filter device definitions
+ */
+struct usb_scsi_filter {
+
+ struct usb_scsi_filter * next; /* usb_scsi driver only */
+ char *name; /* not really required */
+
+ unsigned int flags; /* Filter flags */
+ void * (* probe) (struct usb_device *, char *, char *, char *); /* probe device */
+ void (* release)(void *); /* device gone */
+ int (* command)(void *, Scsi_Cmnd *); /* all commands */
+};
+
+#define GUID(x) __u32 x[3]
+#define GUID_EQUAL(x, y) (x[0] == y[0] && x[1] == y[1] && x[2] == y[2])
+#define GUID_CLEAR(x) x[0] = x[1] = x[2] = 0;
+#define GUID_NONE(x) (!x[0] && !x[1] && !x[2])
+#define GUID_FORMAT "%08x%08x%08x"
+#define GUID_ARGS(x) x[0], x[1], x[2]
+
+static inline void make_guid( __u32 *pg, __u16 vendor, __u16 product, char *serial)
+{
+ pg[0] = (vendor << 16) | product;
+ pg[1] = pg[2] = 0;
+ while (*serial) {
+ pg[1] <<= 4;
+ pg[1] |= pg[2] >> 28;
+ pg[2] <<= 4;
+ if (*serial >= 'a')
+ *serial -= 'a' - 'A';
+ pg[2] |= (*serial <= '9' && *serial >= '0') ? *serial - '0'
+ : *serial - 'A' + 10;
+ serial++;
+ }
+}
+
+/* Flag definitions */
+#define US_FL_IP_STATUS 0x00000001 /* status uses interrupt */
+#define US_FL_FIXED_COMMAND 0x00000002 /* expand commands to fixed size */
+
+/*
+ * Called by filters to register/unregister the mini driver
+ *
+ * WARNING - the supplied probe function may be called before exiting this fn
+ */
+int usb_scsi_register(struct usb_scsi_filter *);
+void usb_scsi_deregister(struct usb_scsi_filter *);
+
+#ifdef CONFIG_USB_HP4100
+int hp4100_init(void);
+#endif
--- /dev/null
+
+/* Driver for USB scsi like devices
+ *
+ * (C) Michael Gee (michael@linuxspecific.com) 1999
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/random.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/malloc.h>
+
+#include <asm/spinlock.h>
+
+#include <linux/blk.h>
+#include "../scsi/scsi.h"
+#include "../scsi/hosts.h"
+#include "../scsi/sd.h"
+
+#include "usb.h"
+#include "usb_scsi.h"
+
+void us_show_command(Scsi_Cmnd *srb)
+{
+ char *what;
+
+ switch (srb->cmnd[0]) {
+ case TEST_UNIT_READY: what = "TEST_UNIT_READY"; break;
+ case REZERO_UNIT: what = "REZERO_UNIT"; break;
+ case REQUEST_SENSE: what = "REQUEST_SENSE"; break;
+ case FORMAT_UNIT: what = "FORMAT_UNIT"; break;
+ case READ_BLOCK_LIMITS: what = "READ_BLOCK_LIMITS"; break;
+ case REASSIGN_BLOCKS: what = "REASSIGN_BLOCKS"; break;
+ case READ_6: what = "READ_6"; break;
+ case WRITE_6: what = "WRITE_6"; break;
+ case SEEK_6: what = "SEEK_6"; break;
+ case READ_REVERSE: what = "READ_REVERSE"; break;
+ case WRITE_FILEMARKS: what = "WRITE_FILEMARKS"; break;
+ case SPACE: what = "SPACE"; break;
+ case INQUIRY: what = "INQUIRY"; break;
+ case RECOVER_BUFFERED_DATA: what = "RECOVER_BUFFERED_DATA"; break;
+ case MODE_SELECT: what = "MODE_SELECT"; break;
+ case RESERVE: what = "RESERVE"; break;
+ case RELEASE: what = "RELEASE"; break;
+ case COPY: what = "COPY"; break;
+ case ERASE: what = "ERASE"; break;
+ case MODE_SENSE: what = "MODE_SENSE"; break;
+ case START_STOP: what = "START_STOP"; break;
+ case RECEIVE_DIAGNOSTIC: what = "RECEIVE_DIAGNOSTIC"; break;
+ case SEND_DIAGNOSTIC: what = "SEND_DIAGNOSTIC"; break;
+ case ALLOW_MEDIUM_REMOVAL: what = "ALLOW_MEDIUM_REMOVAL"; break;
+ case SET_WINDOW: what = "SET_WINDOW"; break;
+ case READ_CAPACITY: what = "READ_CAPACITY"; break;
+ case READ_10: what = "READ_10"; break;
+ case WRITE_10: what = "WRITE_10"; break;
+ case SEEK_10: what = "SEEK_10"; break;
+ case WRITE_VERIFY: what = "WRITE_VERIFY"; break;
+ case VERIFY: what = "VERIFY"; break;
+ case SEARCH_HIGH: what = "SEARCH_HIGH"; break;
+ case SEARCH_EQUAL: what = "SEARCH_EQUAL"; break;
+ case SEARCH_LOW: what = "SEARCH_LOW"; break;
+ case SET_LIMITS: what = "SET_LIMITS"; break;
+ case READ_POSITION: what = "READ_POSITION"; break;
+ case SYNCHRONIZE_CACHE: what = "SYNCHRONIZE_CACHE"; break;
+ case LOCK_UNLOCK_CACHE: what = "LOCK_UNLOCK_CACHE"; break;
+ case READ_DEFECT_DATA: what = "READ_DEFECT_DATA"; break;
+ case MEDIUM_SCAN: what = "MEDIUM_SCAN"; break;
+ case COMPARE: what = "COMPARE"; break;
+ case COPY_VERIFY: what = "COPY_VERIFY"; break;
+ case WRITE_BUFFER: what = "WRITE_BUFFER"; break;
+ case READ_BUFFER: what = "READ_BUFFER"; break;
+ case UPDATE_BLOCK: what = "UPDATE_BLOCK"; break;
+ case READ_LONG: what = "READ_LONG"; break;
+ case WRITE_LONG: what = "WRITE_LONG"; break;
+ case CHANGE_DEFINITION: what = "CHANGE_DEFINITION"; break;
+ case WRITE_SAME: what = "WRITE_SAME"; break;
+ case READ_TOC: what = "READ_TOC"; break;
+ case LOG_SELECT: what = "LOG_SELECT"; break;
+ case LOG_SENSE: what = "LOG_SENSE"; break;
+ case MODE_SELECT_10: what = "MODE_SELECT_10"; break;
+ case MODE_SENSE_10: what = "MODE_SENSE_10"; break;
+ case MOVE_MEDIUM: what = "MOVE_MEDIUM"; break;
+ case READ_12: what = "READ_12"; break;
+ case WRITE_12: what = "WRITE_12"; break;
+ case WRITE_VERIFY_12: what = "WRITE_VERIFY_12"; break;
+ case SEARCH_HIGH_12: what = "SEARCH_HIGH_12"; break;
+ case SEARCH_EQUAL_12: what = "SEARCH_EQUAL_12"; break;
+ case SEARCH_LOW_12: what = "SEARCH_LOW_12"; break;
+ case READ_ELEMENT_STATUS: what = "READ_ELEMENT_STATUS"; break;
+ case SEND_VOLUME_TAG: what = "SEND_VOLUME_TAG"; break;
+ case WRITE_LONG_2: what = "WRITE_LONG_2"; break;
+ default: what = "??"; break;
+ }
+ printk(KERN_DEBUG USB_SCSI "Command %s (%d bytes)\n", what, srb->cmd_len);
+ printk(KERN_DEBUG USB_SCSI " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ srb->cmnd[0], srb->cmnd[1], srb->cmnd[2], srb->cmnd[3], srb->cmnd[4], srb->cmnd[5],
+ srb->cmnd[6], srb->cmnd[7], srb->cmnd[8], srb->cmnd[9]);
+}
--- /dev/null
+0x28, 0x81, 0x14, 0x14, 0x20, 0x01, 0x90, 0x77,
+0x0C, 0x20, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
*/
sb->s_op = &adfs_sops;
sb->u.adfs_sb.s_root = adfs_inode_generate(dr->root, 0);
- sb->s_root = d_alloc_root(iget(sb, sb->u.adfs_sb.s_root), NULL);
+ sb->s_root = d_alloc_root(iget(sb, sb->u.adfs_sb.s_root));
if (!sb->s_root) {
for (i = 0; i < sb->u.adfs_sb.s_map_size; i++)
return sb;
error_free_bh:
- if (bh)
- brelse(bh);
+ brelse(bh);
error_unlock:
unlock_super(sb);
error_dec_use:
root_inode = iget(s,root_block);
if (!root_inode)
goto out_no_root;
- s->s_root = d_alloc_root(root_inode, NULL);
+ s->s_root = d_alloc_root(root_inode);
if (!s->s_root)
goto out_no_root;
s->s_root->d_op = &affs_dentry_operations;
* Get the root inode and dentry, but defer checking for errors.
*/
root_inode = iget(s, AUTOFS_ROOT_INO);
- root = d_alloc_root(root_inode, NULL);
+ root = d_alloc_root(root_inode);
pipe = NULL;
/*
/* The dummy values in this structure are left in there for compatibility
* with old programs that play with the /proc entries.
*/
-union bdflush_param{
+union bdflush_param {
struct {
int nfract; /* Percentage of buffer cache dirty to
activate bdflush */
printk("coda_read_super: rootinode is %ld dev %d\n",
root->i_ino, root->i_dev);
sbi->sbi_root = root;
- sb->s_root = d_alloc_root(root, NULL);
+ sb->s_root = d_alloc_root(root);
unlock_super(sb);
EXIT;
return sb;
sb->s_dev = 0;
coda_cache_clear_all(sb);
sb_info = coda_sbp(sb);
- sb_info->sbi_vcomm->vc_inuse = 0;
+/* sb_info->sbi_vcomm->vc_inuse = 0; You can not do this: psdev_release would see usagecount == 0 and would refuse to decrease MOD_USE_COUNT --pavel */
coda_super_info.sbi_sb = NULL;
printk("Coda: Bye bye.\n");
memset(sb_info, 0, sizeof(* sb_info));
entry->d_inode = inode;
}
-struct dentry * d_alloc_root(struct inode * root_inode, struct dentry *old_root)
+struct dentry * d_alloc_root(struct inode * root_inode)
{
struct dentry *res = NULL;
* Get the root inode and dentry, but defer checking for errors.
*/
root_inode = iget(s, 1); /* inode 1 == root directory */
- root = d_alloc_root(root_inode, NULL);
+ root = d_alloc_root(root_inode);
/*
* Check whether somebody else completed the super block.
}
s->s_op = &efs_superblock_operations;
s->s_dev = dev;
- s->s_root = d_alloc_root(iget(s, EFS_ROOTINODE), NULL);
+ s->s_root = d_alloc_root(iget(s, EFS_ROOTINODE));
unlock_super(s);
if (!(s->s_root)) {
*/
sb->s_dev = dev;
sb->s_op = &ext2_sops;
- sb->s_root = d_alloc_root(iget(sb, EXT2_ROOT_INO), NULL);
+ sb->s_root = d_alloc_root(iget(sb, EXT2_ROOT_INO));
if (!sb->s_root) {
sb->s_dev = 0;
for (i = 0; i < db_count; i++)
}
link = bh->b_data;
}
- UPDATE_ATIME(inode);
base = lookup_dentry(link, base, follow);
if (bh)
brelse(bh);
root_inode->i_ino = MSDOS_ROOT_INO;
fat_read_root(root_inode);
insert_inode_hash(root_inode);
- sb->s_root = d_alloc_root(root_inode, NULL);
+ sb->s_root = d_alloc_root(root_inode);
if (!sb->s_root)
goto out_no_root;
if(i>=0) {
if (!root_inode)
goto bail_no_root;
- s->s_root = d_alloc_root(root_inode, NULL);
+ s->s_root = d_alloc_root(root_inode);
if (!s->s_root)
goto bail_no_root;
brelse(bh0);
hpfs_lock_iget(s, 1);
- s->s_root = d_alloc_root(iget(s, s->s_hpfs_root), NULL);
+ s->s_root = d_alloc_root(iget(s, s->s_hpfs_root));
hpfs_unlock_iget(s);
unlock_super(s);
if (!s->s_root || !s->s_root->d_inode) {
if (!inode->i_op)
goto out_bad_root;
/* get the root dentry */
- s->s_root = d_alloc_root(inode, NULL);
+ s->s_root = d_alloc_root(inode);
if (!(s->s_root))
goto out_no_root;
if (!ino || ino > inode->i_sb->u.minix_sb.s_ninodes) {
printk("Bad inode number on dev %s: %d is out of range\n",
kdevname(inode->i_dev), ino);
- return 0;
+ return NULL;
}
block = (2 + inode->i_sb->u.minix_sb.s_imap_blocks +
inode->i_sb->u.minix_sb.s_zmap_blocks +
bh = bread(inode->i_dev, block, BLOCK_SIZE);
if (!bh) {
printk("unable to read i-node block\n");
- return 0;
+ return NULL;
}
raw_inode = ((struct minix_inode *)bh->b_data +
(ino - 1) % MINIX_INODES_PER_BLOCK);
if (!ino || ino > inode->i_sb->u.minix_sb.s_ninodes) {
printk("Bad inode number on dev %s: %d is out of range\n",
kdevname(inode->i_dev), ino);
- return 0;
+ return NULL;
}
block = (2 + inode->i_sb->u.minix_sb.s_imap_blocks +
inode->i_sb->u.minix_sb.s_zmap_blocks +
bh = bread(inode->i_dev, block, BLOCK_SIZE);
if (!bh) {
printk("unable to read i-node block\n");
- return 0;
+ return NULL;
}
raw_inode = ((struct minix2_inode *) bh->b_data +
(ino - 1) % MINIX2_INODES_PER_BLOCK);
if (errmsg)
goto out_bad_root;
- s->s_root = d_alloc_root(root_inode, NULL);
+ s->s_root = d_alloc_root(root_inode);
if (!s->s_root)
goto out_iput;
dir->i_size = block*bh->b_size + offset;
mark_inode_dirty(dir);
}
- if (de->inode) {
- if (namecompare(namelen, info->s_namelen, name, de->name)) {
- brelse(bh);
- return -EEXIST;
- }
- } else {
+ if (!de->inode) {
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
mark_inode_dirty(dir);
for (i = 0; i < info->s_namelen ; i++)
if (!root_inode)
goto out_no_root;
DPRINTK(KERN_DEBUG "ncp_read_super: root vol=%d\n", NCP_FINFO(root_inode)->volNumber);
- server->root_dentry = sb->s_root = d_alloc_root(root_inode, NULL);
+ server->root_dentry = sb->s_root = d_alloc_root(root_inode);
if (!sb->s_root)
goto out_no_root;
server->root_dentry->d_op = &ncp_dentry_operations;
* Following Linus comments on my original hack, this version
* depends only on the dcache stuff and doesn't touch the inode
* layer (iput() and friends).
+ * 6 Jun 1999 Cache readdir lookups in the page cache. -DaveM
*/
+#define NFS_NEED_XDR_TYPES
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/stat.h>
#include <linux/kernel.h>
#include <linux/malloc.h>
#include <linux/mm.h>
-#include <linux/sunrpc/types.h>
+#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
+#include <linux/nfs.h>
+#include <linux/pagemap.h>
#include <asm/segment.h> /* for fs functions */
#define NFS_PARANOIA 1
/* #define NFS_DEBUG_VERBOSE 1 */
-/*
- * Head for a dircache entry. Currently still very simple; when
- * the cache grows larger, we will need a LRU list.
- */
-struct nfs_dirent {
- dev_t dev; /* device number */
- ino_t ino; /* inode number */
- u32 cookie; /* cookie of first entry */
- unsigned short valid : 1, /* data is valid */
- locked : 1; /* entry locked */
- unsigned int size; /* # of entries */
- unsigned long age; /* last used */
- unsigned long mtime; /* last attr stamp */
- wait_queue_head_t wait;
- __u32 * entry; /* three __u32's per entry */
-};
-
static int nfs_safe_remove(struct dentry *);
static ssize_t nfs_dir_read(struct file *, char *, size_t, loff_t *);
return -EISDIR;
}
-static struct nfs_dirent dircache[NFS_MAX_DIRCACHE];
-
-/*
- * We need to do caching of directory entries to prevent an
- * incredible amount of RPC traffic. Only the most recent open
- * directory is cached. This seems sufficient for most purposes.
- * Technically, we ought to flush the cache on close but this is
- * not a problem in practice.
+/* Each readdir response is composed of entries which look
+ * like the following, as per the NFSv2 RFC:
+ *
+ * __u32 not_end zero if end of response
+ * __u32 file ID opaque ino_t
+ * __u32 namelen size of name string
+ * VAR name string the string, padded to modulo 4 bytes
+ * __u32 cookie opaque ID of next entry
*
- * XXX: Do proper directory caching by stuffing data into the
- * page cache (may require some fiddling for rsize < PAGE_SIZE).
+ * When you hit not_end being zero, the next __u32 is non-zero if
+ * this is the end of the complete set of readdir entires for this
+ * directory. This can be used, for example, to initiate pre-fetch.
+ *
+ * In order to know what to ask the server for, we only need to know
+ * the final cookie of the previous page, and offset zero has cookie
+ * zero, so we cache cookie to page offset translations in chunks.
*/
+#define COOKIES_PER_CHUNK (8 - ((sizeof(void *) / sizeof(__u32))))
+struct nfs_cookie_table {
+ struct nfs_cookie_table *next;
+ __u32 cookies[COOKIES_PER_CHUNK];
+};
+static kmem_cache_t *nfs_cookie_cachep;
-static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+/* Since a cookie of zero is declared special by the NFS
+ * protocol, we easily can tell if a cookie in an existing
+ * table chunk is valid or not.
+ *
+ * NOTE: The cookies are indexed off-by-one because zero
+ * need not an entry.
+ */
+static __inline__ __u32 *find_cookie(struct inode *inode, unsigned long off)
{
- struct dentry *dentry = filp->f_dentry;
- struct inode *inode = dentry->d_inode;
- static DECLARE_WAIT_QUEUE_HEAD(readdir_wait);
- wait_queue_head_t *waitp = NULL;
- struct nfs_dirent *cache, *free;
- unsigned long age, dead;
- u32 cookie;
- int ismydir, result;
- int i, j, index = 0;
- __u32 *entry;
- char *name, *start;
-
- dfprintk(VFS, "NFS: nfs_readdir(%s/%s)\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
-
- result = nfs_revalidate_inode(NFS_DSERVER(dentry), dentry);
- if (result < 0)
- goto out;
-
- /*
- * Try to find the entry in the cache
- */
-again:
- if (waitp) {
- interruptible_sleep_on(waitp);
- if (signal_pending(current))
- return -ERESTARTSYS;
- waitp = NULL;
+ static __u32 cookie_zero = 0;
+ struct nfs_cookie_table *p;
+ __u32 *ret;
+
+ if (!off)
+ return &cookie_zero;
+ off -= 1;
+ p = NFS_COOKIES(inode);
+ while(off >= COOKIES_PER_CHUNK && p) {
+ off -= COOKIES_PER_CHUNK;
+ p = p->next;
+ }
+ ret = NULL;
+ if (p) {
+ ret = &p->cookies[off];
+ if (!*ret)
+ ret = NULL;
}
+ return ret;
+}
- cookie = filp->f_pos;
- entry = NULL;
- free = NULL;
- age = ~(unsigned long) 0;
- dead = jiffies - NFS_ATTRTIMEO(inode);
+/* Now we cache directories properly, by stuffing the dirent
+ * data directly in the page cache.
+ *
+ * Inode invalidation due to refresh etc. takes care of
+ * _everything_, no sloppy entry flushing logic, no extraneous
+ * copying, network direct to page cache, the way it was meant
+ * to be.
+ *
+ * NOTE: Dirent information verification is done always by the
+ * page-in of the RPC reply, nowhere else, this simplies
+ * things substantially.
+ */
+#define NFS_NAMELEN_ALIGN(__len) ((((__len)+3)>>2)<<2)
+static u32 find_midpoint(__u32 *p, u32 doff)
+{
+ u32 walk = doff & PAGE_MASK;
- for (i = 0, cache = dircache; i < NFS_MAX_DIRCACHE; i++, cache++) {
- /*
- dprintk("NFS: dircache[%d] valid %d locked %d\n",
- i, cache->valid, cache->locked);
- */
- ismydir = (cache->dev == inode->i_dev
- && cache->ino == inode->i_ino);
- if (cache->locked) {
- if (!ismydir || cache->cookie != cookie)
- continue;
- dfprintk(DIRCACHE, "NFS: waiting on dircache entry\n");
- waitp = &cache->wait;
- goto again;
- }
+ while(*p++ != 0) {
+ __u32 skip;
- if (ismydir && cache->mtime != inode->i_mtime)
- cache->valid = 0;
+ p++; /* skip fileid */
- if (!cache->valid || cache->age < dead) {
- free = cache;
- age = 0;
- } else if (cache->age < age) {
- free = cache;
- age = cache->age;
- }
+ /* Skip len, name, and cookie. */
+ skip = NFS_NAMELEN_ALIGN(*p++);
+ p += (skip >> 2) + 1;
+ walk += skip + (4 * sizeof(__u32));
+ if (walk >= doff)
+ break;
+ }
+ return walk;
+}
- if (!ismydir || !cache->valid)
- continue;
+static int create_cookie(__u32 cookie, unsigned long off, struct inode *inode)
+{
+ struct nfs_cookie_table **cpp;
- if (cache->cookie == cookie && cache->size > 0) {
- entry = cache->entry + (index = 0);
- cache->locked = 1;
- break;
- }
- for (j = 0; j < cache->size; j++) {
- __u32 *this_ent = cache->entry + j*3;
-
- if (*(this_ent+1) != cookie)
- continue;
- if (j < cache->size - 1) {
- index = j + 1;
- entry = this_ent + 3;
- } else if (*(this_ent+2) & (1 << 15)) {
- /* eof */
- return 0;
+ cpp = (struct nfs_cookie_table **) &NFS_COOKIES(inode);
+ while (off >= COOKIES_PER_CHUNK && *cpp) {
+ off -= COOKIES_PER_CHUNK;
+ cpp = &(*cpp)->next;
+ }
+ if (*cpp) {
+ (*cpp)->cookies[off] = cookie;
+ } else {
+ struct nfs_cookie_table *new;
+ int i;
+
+ new = kmem_cache_alloc(nfs_cookie_cachep, SLAB_ATOMIC);
+ if(!new)
+ return -1;
+ *cpp = new;
+ new->next = NULL;
+ for(i = 0; i < COOKIES_PER_CHUNK; i++) {
+ if (i == off) {
+ new->cookies[i] = cookie;
+ } else {
+ new->cookies[i] = 0;
}
- break;
- }
- if (entry) {
- dfprintk(DIRCACHE, "NFS: found dircache entry %d\n",
- (int)(cache - dircache));
- cache->locked = 1;
- break;
}
}
+ return 0;
+}
- /*
- * Okay, entry not present in cache, or locked and inaccessible.
- * Set up the cache entry and attempt a READDIR call.
- */
- if (entry == NULL) {
- if ((cache = free) == NULL) {
- dfprintk(DIRCACHE, "NFS: dircache contention\n");
- waitp = &readdir_wait;
- goto again;
- }
- dfprintk(DIRCACHE, "NFS: using free dircache entry %d\n",
- (int)(free - dircache));
- cache->cookie = cookie;
- cache->locked = 1;
- cache->valid = 0;
- cache->dev = inode->i_dev;
- cache->ino = inode->i_ino;
- init_waitqueue_head(&cache->wait);
- if (!cache->entry) {
- result = -ENOMEM;
- cache->entry = (__u32 *) get_free_page(GFP_KERNEL);
- if (!cache->entry)
- goto done;
+static struct page *try_to_get_dirent_page(struct file *, unsigned long, int);
+
+/* Recover from a revalidation flush. The case here is that
+ * the inode for the directory got invalidated somehow, and
+ * all of our cached information is lost. In order to get
+ * a correct cookie for the current readdir request from the
+ * user, we must (re-)fetch older readdir page cache entries.
+ */
+static int refetch_to_readdir_off(struct file *file, struct inode *inode, u32 off)
+{
+ u32 cur_off, goal_off = off & PAGE_MASK;
+
+again:
+ cur_off = 0;
+ while (cur_off < goal_off) {
+ struct page *page;
+
+ page = find_page(inode, cur_off);
+ if (page) {
+ if (PageLocked(page))
+ __wait_on_page(page);
+ if (!PageUptodate(page))
+ return -1;
+ } else {
+ page = try_to_get_dirent_page(file, cur_off, 0);
+ if (!page) {
+ if (!cur_off)
+ return -1;
+
+ /* Someone touched the dir on us. */
+ goto again;
+ }
+ page_cache_release(page);
}
- result = nfs_proc_readdir(NFS_SERVER(inode), NFS_FH(dentry),
- cookie, PAGE_SIZE, cache->entry);
- if (result <= 0)
- goto done;
- cache->size = result;
- cache->valid = 1;
- entry = cache->entry + (index = 0);
+ cur_off += PAGE_SIZE;
}
- cache->mtime = inode->i_mtime;
- cache->age = jiffies;
- /*
- * Yowza! We have a cache entry...
- */
- start = (char *) cache->entry;
- while (index < cache->size) {
- __u32 fileid = *entry++;
- __u32 nextpos = *entry++; /* cookie */
- __u32 length = *entry++;
+ return 0;
+}
- /*
- * Unpack the eof flag, offset, and length
- */
- result = length & (1 << 15); /* eof flag */
- name = start + ((length >> 16) & 0xFFFF);
- length &= 0x7FFF;
- /*
- dprintk("NFS: filldir(%p, %.*s, %d, %d, %x, eof %x)\n", entry,
- (int) length, name, length,
- (unsigned int) filp->f_pos,
- fileid, result);
- */
+static struct page *try_to_get_dirent_page(struct file *file, unsigned long offset, int refetch_ok)
+{
+ struct nfs_readdirargs rd_args;
+ struct nfs_readdirres rd_res;
+ struct dentry *dentry = file->f_dentry;
+ struct inode *inode = dentry->d_inode;
+ struct page *page, **hash;
+ unsigned long page_cache;
+ __u32 *cookiep;
- if (filldir(dirent, name, length, cookie, fileid) < 0)
- break;
- cookie = nextpos;
- index++;
- }
- filp->f_pos = cookie;
- result = 0;
+ page = NULL;
+ page_cache = page_cache_alloc();
+ if (!page_cache)
+ goto out;
- /* XXX: May want to kick async readdir-ahead here. Not too hard
- * to do. */
+ while ((cookiep = find_cookie(inode, offset)) == NULL) {
+ if (!refetch_ok ||
+ refetch_to_readdir_off(file, inode, file->f_pos))
+ goto out;
+ }
-done:
- dfprintk(DIRCACHE, "NFS: nfs_readdir complete\n");
- cache->locked = 0;
- wake_up(&cache->wait);
- wake_up(&readdir_wait);
+ hash = page_hash(inode, offset);
+ page = __find_page(inode, offset, *hash);
+ if (page) {
+ page_cache_free(page_cache);
+ goto out;
+ }
+ page = page_cache_entry(page_cache);
+ atomic_inc(&page->count);
+ page->flags = ((page->flags &
+ ~((1 << PG_uptodate) | (1 << PG_error))) |
+ ((1 << PG_referenced) | (1 << PG_locked)));
+ page->offset = offset;
+ add_page_to_inode_queue(inode, page);
+ __add_page_to_hash_queue(page, hash);
+
+ rd_args.fh = NFS_FH(dentry);
+ rd_res.buffer = (char *)page_cache;
+ rd_res.bufsiz = PAGE_CACHE_SIZE;
+ rd_res.cookie = *cookiep;
+ do {
+ rd_args.buffer = rd_res.buffer;
+ rd_args.bufsiz = rd_res.bufsiz;
+ rd_args.cookie = rd_res.cookie;
+ if (rpc_call(NFS_CLIENT(inode),
+ NFSPROC_READDIR, &rd_args, &rd_res, 0) < 0)
+ goto error;
+ } while(rd_res.bufsiz > 0);
+
+ if (rd_res.bufsiz < 0)
+ NFS_DIREOF(inode) =
+ (offset << PAGE_CACHE_SHIFT) + -(rd_res.bufsiz);
+ else if (create_cookie(rd_res.cookie, offset, inode))
+ goto error;
+
+ set_bit(PG_uptodate, &page->flags);
+unlock_out:
+ clear_bit(PG_locked, &page->flags);
+ wake_up(&page->wait);
out:
- return result;
+ return page;
+
+error:
+ set_bit(PG_error, &page->flags);
+ goto unlock_out;
}
-/*
- * Invalidate dircache entries for an inode.
- */
-void
-nfs_invalidate_dircache(struct inode *inode)
+static __inline__ u32 nfs_do_filldir(__u32 *p, u32 doff,
+ void *dirent, filldir_t filldir)
{
- struct nfs_dirent *cache = dircache;
- dev_t dev = inode->i_dev;
- ino_t ino = inode->i_ino;
- int i;
-
- dfprintk(DIRCACHE, "NFS: invalidate dircache for %x/%ld\n", dev, (long)ino);
- for (i = NFS_MAX_DIRCACHE; i--; cache++) {
- if (cache->ino != ino)
- continue;
- if (cache->dev != dev)
- continue;
- if (cache->locked) {
- printk("NFS: cache locked for %s/%ld\n",
- kdevname(dev), (long) ino);
- continue;
- }
- cache->valid = 0; /* brute force */
+ u32 end;
+
+ if (doff & ~PAGE_CACHE_MASK) {
+ doff = find_midpoint(p, doff);
+ p += (doff & ~PAGE_CACHE_MASK) >> 2;
}
+ while((end = *p++) != 0) {
+ __u32 fileid = *p++;
+ __u32 len = *p++;
+ __u32 skip = NFS_NAMELEN_ALIGN(len);
+ char *name = (char *) p;
+
+ /* Skip the cookie. */
+ p = ((__u32 *) (name + skip)) + 1;
+ if (filldir(dirent, name, len, doff, fileid) < 0)
+ goto out;
+ doff += (skip + (4 * sizeof(__u32)));
+ }
+ if (!*p)
+ doff = PAGE_CACHE_ALIGN(doff);
+out:
+ return doff;
}
-/*
- * Invalidate the dircache for a super block (or all caches),
- * and release the cache memory.
+/* The file offset position is represented in pure bytes, to
+ * make the page cache interface straight forward.
+ *
+ * However, some way is needed to make the connection between the
+ * opaque NFS directory entry cookies and our offsets, so a per-inode
+ * cookie cache table is used.
*/
-void
-nfs_invalidate_dircache_sb(struct super_block *sb)
+static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
- struct nfs_dirent *cache = dircache;
- int i;
-
- for (i = NFS_MAX_DIRCACHE; i--; cache++) {
- if (sb && sb->s_dev != cache->dev)
- continue;
- if (cache->locked) {
- printk("NFS: cache locked at umount %s\n",
- (cache->entry ? "(lost a page!)" : ""));
- continue;
- }
- cache->valid = 0; /* brute force */
- if (cache->entry) {
- free_page((unsigned long) cache->entry);
- cache->entry = NULL;
- }
- }
+ struct dentry *dentry = filp->f_dentry;
+ struct inode *inode = dentry->d_inode;
+ struct page *page, **hash;
+ unsigned long offset;
+ int res;
+
+ res = nfs_revalidate_inode(NFS_DSERVER(dentry), dentry);
+ if (res < 0)
+ return res;
+
+ if (NFS_DIREOF(inode) && filp->f_pos >= NFS_DIREOF(inode))
+ return 0;
+
+ offset = filp->f_pos >> PAGE_CACHE_SHIFT;
+ hash = page_hash(inode, offset);
+ page = __find_page(inode, offset, *hash);
+ if (!page)
+ goto no_dirent_page;
+ if (PageLocked(page))
+ goto dirent_locked_wait;
+ if (!PageUptodate(page))
+ goto dirent_read_error;
+success:
+ filp->f_pos = nfs_do_filldir((__u32 *) page_address(page),
+ filp->f_pos, dirent, filldir);
+ page_cache_release(page);
+ return 0;
+
+no_dirent_page:
+ page = try_to_get_dirent_page(filp, offset, 1);
+ if (!page)
+ goto no_page;
+
+dirent_locked_wait:
+ wait_on_page(page);
+ if (PageUptodate(page))
+ goto success;
+dirent_read_error:
+ page_cache_release(page);
+no_page:
+ return -EIO;
}
-/*
- * Free directory cache memory
- * Called from cleanup_module
+/* Invalidate directory cookie caches and EOF marker
+ * for an inode.
*/
-void
-nfs_free_dircache(void)
+__inline__ void nfs_invalidate_dircache(struct inode *inode)
{
- dfprintk(DIRCACHE, "NFS: freeing dircache\n");
- nfs_invalidate_dircache_sb(NULL);
+ struct nfs_cookie_table *p = NFS_COOKIES(inode);
+
+ if (p != NULL) {
+ NFS_COOKIES(inode) = NULL;
+ do { struct nfs_cookie_table *next = p->next;
+ kmem_cache_free(nfs_cookie_cachep, p);
+ p = next;
+ } while (p != NULL);
+ }
+ NFS_DIREOF(inode) = 0;
}
/*
out_valid:
return 1;
out_bad:
- if (dentry->d_parent->d_inode)
+ /* Purge readdir caches. */
+ if (dentry->d_parent->d_inode) {
+ invalidate_inode_pages(dentry->d_parent->d_inode);
nfs_invalidate_dircache(dentry->d_parent->d_inode);
- if (inode && S_ISDIR(inode->i_mode))
+ }
+ if (inode && S_ISDIR(inode->i_mode)) {
+ invalidate_inode_pages(inode);
nfs_invalidate_dircache(inode);
+ }
return 0;
}
#endif
}
+static kmem_cache_t *nfs_fh_cachep;
+
+__inline__ struct nfs_fh *nfs_fh_alloc(void)
+{
+ return kmem_cache_alloc(nfs_fh_cachep, SLAB_KERNEL);
+}
+
+__inline__ void nfs_fh_free(struct nfs_fh *p)
+{
+ kmem_cache_free(nfs_fh_cachep, p);
+}
+
/*
* Called when the dentry is being freed to release private memory.
*/
static void nfs_dentry_release(struct dentry *dentry)
{
if (dentry->d_fsdata)
- kfree(dentry->d_fsdata);
+ nfs_fh_free(dentry->d_fsdata);
}
struct dentry_operations nfs_dentry_operations = {
error = -ENOMEM;
if (!dentry->d_fsdata) {
- dentry->d_fsdata = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL);
+ dentry->d_fsdata = nfs_fh_alloc();
if (!dentry->d_fsdata)
goto out;
}
/*
* Invalidate the dir cache before the operation to avoid a race.
*/
+ invalidate_inode_pages(dir);
nfs_invalidate_dircache(dir);
error = nfs_proc_create(NFS_SERVER(dir), NFS_FH(dentry->d_parent),
dentry->d_name.name, &sattr, &fhandle, &fattr);
sattr.size = rdev; /* get out your barf bag */
sattr.atime.seconds = sattr.mtime.seconds = (unsigned) -1;
+ invalidate_inode_pages(dir);
nfs_invalidate_dircache(dir);
error = nfs_proc_create(NFS_SERVER(dir), NFS_FH(dentry->d_parent),
dentry->d_name.name, &sattr, &fhandle, &fattr);
* depending on potentially bogus information.
*/
d_drop(dentry);
+ invalidate_inode_pages(dir);
nfs_invalidate_dircache(dir);
error = nfs_proc_mkdir(NFS_DSERVER(dentry), NFS_FH(dentry->d_parent),
dentry->d_name.name, &sattr, &fhandle, &fattr);
dentry->d_inode->i_count, dentry->d_inode->i_nlink);
#endif
+ invalidate_inode_pages(dir);
nfs_invalidate_dircache(dir);
error = nfs_proc_rmdir(NFS_SERVER(dir), NFS_FH(dentry->d_parent),
dentry->d_name.name);
goto out;
} while(sdentry->d_inode != NULL); /* need negative lookup */
+ invalidate_inode_pages(dir);
nfs_invalidate_dircache(dir);
error = nfs_proc_rename(NFS_SERVER(dir),
NFS_FH(dentry->d_parent), dentry->d_name.name,
inode->i_nlink --;
d_delete(dentry);
}
+ invalidate_inode_pages(dir);
nfs_invalidate_dircache(dir);
error = nfs_proc_remove(NFS_SERVER(dir), NFS_FH(dentry->d_parent),
dentry->d_name.name);
* can't instantiate the new inode.
*/
d_drop(dentry);
+ invalidate_inode_pages(dir);
nfs_invalidate_dircache(dir);
error = nfs_proc_symlink(NFS_SERVER(dir), NFS_FH(dentry->d_parent),
dentry->d_name.name, symname, &sattr);
* we can't use the existing dentry.
*/
d_drop(dentry);
+ invalidate_inode_pages(dir);
nfs_invalidate_dircache(dir);
error = nfs_proc_link(NFS_DSERVER(old_dentry), NFS_FH(old_dentry),
NFS_FH(dentry->d_parent), dentry->d_name.name);
d_delete(new_dentry);
}
+ invalidate_inode_pages(new_dir);
nfs_invalidate_dircache(new_dir);
+ invalidate_inode_pages(old_dir);
nfs_invalidate_dircache(old_dir);
error = nfs_proc_rename(NFS_DSERVER(old_dentry),
NFS_FH(old_dentry->d_parent), old_dentry->d_name.name,
return error;
}
+int nfs_init_fhcache(void)
+{
+ nfs_fh_cachep = kmem_cache_create("nfs_fh",
+ sizeof(struct nfs_fh),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (nfs_fh_cachep == NULL)
+ return -ENOMEM;
+
+ nfs_cookie_cachep = kmem_cache_create("nfs_dcookie",
+ sizeof(struct nfs_cookie_table),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (nfs_cookie_cachep == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
/*
* Local variables:
* version-control: t
if (!(server->flags & NFS_MOUNT_NONLM))
lockd_down(); /* release rpc.lockd */
rpciod_down(); /* release rpciod */
- /*
- * Invalidate the dircache for this superblock.
- */
- nfs_invalidate_dircache_sb(sb);
kfree(server->hostname);
return bsize;
}
+extern struct nfs_fh *nfs_fh_alloc(void);
+extern void nfs_fh_free(struct nfs_fh *p);
+
/*
* The way this works is that the mount process passes a structure
* in the data argument which contains the server's IP address
* Keep the super block locked while we try to get
* the root fh attributes.
*/
- root_fh = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL);
+ root_fh = nfs_fh_alloc();
if (!root_fh)
goto out_no_fh;
*root_fh = data->root;
root_inode = __nfs_fhget(sb, &fattr);
if (!root_inode)
goto out_no_root;
- sb->s_root = d_alloc_root(root_inode, NULL);
+ sb->s_root = d_alloc_root(root_inode);
if (!sb->s_root)
goto out_no_root;
sb->s_root->d_op = &nfs_dentry_operations;
out_no_fattr:
printk("nfs_read_super: get root fattr failed\n");
out_free_fh:
- kfree(root_fh);
+ nfs_fh_free(root_fh);
out_no_fh:
rpciod_down();
goto out_shutdown;
NFS_ATTRTIMEO(inode) = NFS_MINATTRTIMEO(inode);
NFS_CACHEINV(inode);
+ invalidate_inode_pages(inode);
if (S_ISDIR(inode->i_mode))
nfs_invalidate_dircache(inode);
- else
- invalidate_inode_pages(inode);
}
/*
inode->i_size = fattr->size;
inode->i_mtime = fattr->mtime.seconds;
NFS_OLDMTIME(inode) = fattr->mtime.seconds;
+ NFS_COOKIES(inode) = NULL;
+ NFS_WRITEBACK(inode) = NULL;
}
nfs_refresh_inode(inode, fattr);
}
NULL
};
+extern int nfs_init_fhcache(void);
+extern int nfs_init_wreqcache(void);
+
/*
* Initialize NFS
*/
int
init_nfs_fs(void)
{
+ int err;
+
+ err = nfs_init_fhcache();
+ if (err)
+ return err;
+
+ err = nfs_init_wreqcache();
+ if (err)
+ return err;
+
#ifdef CONFIG_PROC_FS
rpc_register_sysctl();
rpc_proc_init();
rpc_proc_unregister("nfs");
#endif
unregister_filesystem(&nfs_fs_type);
- nfs_free_dircache();
}
#endif
#define NFS_linkargs_sz NFS_fhandle_sz+NFS_diropargs_sz
#define NFS_symlinkargs_sz NFS_diropargs_sz+NFS_path_sz+NFS_sattr_sz
#define NFS_readdirargs_sz NFS_fhandle_sz+2
+#define NFS_readlinkargs_sz NFS_fhandle_sz
#define NFS_dec_void_sz 0
#define NFS_attrstat_sz 1+NFS_fattr_sz
#define NFS_diropres_sz 1+NFS_fhandle_sz+NFS_fattr_sz
-#define NFS_readlinkres_sz 1+NFS_path_sz
+#define NFS_readlinkres_sz 1
#define NFS_readres_sz 1+NFS_fattr_sz+1
#define NFS_stat_sz 1
#define NFS_readdirres_sz 1
*p++ = htonl(args->count);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-#if 1
/* set up reply iovec */
replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readres_sz) << 2;
buflen = req->rq_rvec[0].iov_len;
req->rq_rvec[2].iov_len = buflen - replen;
req->rq_rlen = args->count + buflen;
req->rq_rnr = 3;
-#else
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readres_sz) << 2;
- req->rq_rvec[0].iov_len = replen;
-#endif
return 0;
}
{
struct rpc_task *task = req->rq_task;
struct rpc_auth *auth = task->tk_auth;
- u32 bufsiz = args->bufsiz;
+ int bufsiz = args->bufsiz;
int replen;
- /*
- * Some servers (e.g. HP OS 9.5) seem to expect the buffer size
+ p = xdr_encode_fhandle(p, args->fh);
+ *p++ = htonl(args->cookie);
+
+ /* Some servers (e.g. HP OS 9.5) seem to expect the buffer size
* to be in longwords ... check whether to convert the size.
*/
if (task->tk_client->cl_flags & NFS_CLNTF_BUFSIZE)
- bufsiz = bufsiz >> 2;
+ *p++ = htonl(bufsiz >> 2);
+ else
+ *p++ = htonl(bufsiz);
- p = xdr_encode_fhandle(p, args->fh);
- *p++ = htonl(args->cookie);
- *p++ = htonl(bufsiz); /* see above */
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
/* set up reply iovec */
replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readdirres_sz) << 2;
- /*
- dprintk("RPC: readdirargs: slack is 4 * (%d + %d + %d) = %d\n",
- RPC_REPHDRSIZE, auth->au_rslack, NFS_readdirres_sz, replen);
- */
req->rq_rvec[0].iov_len = replen;
req->rq_rvec[1].iov_base = args->buffer;
- req->rq_rvec[1].iov_len = args->bufsiz;
- req->rq_rlen = replen + args->bufsiz;
+ req->rq_rvec[1].iov_len = bufsiz;
+ req->rq_rlen = replen + bufsiz;
req->rq_rnr = 2;
- /*
- dprintk("RPC: readdirargs set up reply vec:\n");
- dprintk(" rvec[0] = %p/%d\n",
- req->rq_rvec[0].iov_base,
- req->rq_rvec[0].iov_len);
- dprintk(" rvec[1] = %p/%d\n",
- req->rq_rvec[1].iov_base,
- req->rq_rvec[1].iov_len);
- */
-
return 0;
}
/*
- * Decode the result of a readdir call. We decode the result in place
- * to avoid a malloc of NFS_MAXNAMLEN+1 for each file name.
- * After decoding, the layout in memory looks like this:
- * entry1 entry2 ... entryN <space> stringN ... string2 string1
- * Each entry consists of three __u32 values, the same space as NFS uses.
- * Note that the strings are not null-terminated so that the entire number
- * of entries returned by the server should fit into the buffer.
+ * Decode the result of a readdir call.
*/
+#define NFS_DIRENT_MAXLEN (5 * sizeof(u32) + (NFS_MAXNAMLEN + 1))
static int
nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs_readdirres *res)
{
struct iovec *iov = req->rq_rvec;
int status, nr;
- char *string, *start;
- u32 *end, *entry, len, fileid, cookie;
+ u32 *end;
+ u32 last_cookie = res->cookie;
- if ((status = ntohl(*p++)))
- return -nfs_stat_to_errno(status);
+ status = ntohl(*p++);
+ if (status) {
+ nr = -nfs_stat_to_errno(status);
+ goto error;
+ }
if ((void *) p != ((u8 *) iov->iov_base+iov->iov_len)) {
/* Unexpected reply header size. Punt. */
printk("NFS: Odd RPC header size in readdirres reply\n");
- return -errno_NFSERR_IO;
+ nr = -errno_NFSERR_IO;
+ goto error;
}
- /* Get start and end address of XDR data */
+ /* Get start and end address of XDR readdir response. */
p = (u32 *) iov[1].iov_base;
end = (u32 *) ((u8 *) p + iov[1].iov_len);
-
- /* Get start and end of dirent buffer */
- entry = (u32 *) res->buffer;
- start = (char *) res->buffer;
- string = (char *) res->buffer + res->bufsiz;
for (nr = 0; *p++; nr++) {
- fileid = ntohl(*p++);
+ __u32 len;
+
+ /* Convert fileid. */
+ *p = ntohl(*p);
+ p++;
+
+ /* Convert and capture len */
+ len = *p = ntohl(*p);
+ p++;
- len = ntohl(*p++);
- /*
- * Check whether the server has exceeded our reply buffer,
- * and set a flag to convert the size to longwords.
- */
if ((p + QUADLEN(len) + 3) > end) {
struct rpc_clnt *clnt = req->rq_task->tk_client;
- printk(KERN_WARNING
- "NFS: server %s, readdir reply truncated\n",
- clnt->cl_server);
- printk(KERN_WARNING "NFS: nr=%d, slots=%d, len=%d\n",
- nr, (end - p), len);
+
clnt->cl_flags |= NFS_CLNTF_BUFSIZE;
+ p -= 2;
+ p[-1] = 0;
+ p[0] = 0;
break;
}
if (len > NFS_MAXNAMLEN) {
- printk("NFS: giant filename in readdir (len %x)!\n",
- len);
- return -errno_NFSERR_IO;
+ nr = -errno_NFSERR_IO;
+ goto error;
}
- string -= len;
- if ((void *) (entry+3) > (void *) string) {
- /*
- * This error is impossible as long as the temp
- * buffer is no larger than the user buffer. The
- * current packing algorithm uses the same amount
- * of space in the user buffer as in the XDR data,
- * so it's guaranteed to fit.
- */
- printk("NFS: incorrect buffer size in %s!\n",
- __FUNCTION__);
- break;
- }
-
- memmove(string, p, len);
p += QUADLEN(len);
- cookie = ntohl(*p++);
- /*
- * To make everything fit, we encode the length, offset,
- * and eof flag into 32 bits. This works for filenames
- * up to 32K and PAGE_SIZE up to 64K.
- */
- status = !p[0] && p[1] ? (1 << 15) : 0; /* eof flag */
- *entry++ = fileid;
- *entry++ = cookie;
- *entry++ = ((string - start) << 16) | status | (len & 0x7FFF);
+
+ /* Convert and capture cookie. */
+ last_cookie = *p = ntohl(*p);
+ p++;
}
-#ifdef NFS_PARANOIA
-printk("nfs_xdr_readdirres: %d entries, ent sp=%d, str sp=%d\n",
-nr, ((char *) entry - start), (start + res->bufsiz - string));
-#endif
+ p -= 1;
+ status = ((end - p) << 2);
+ if (!p[1] && (status >= NFS_DIRENT_MAXLEN)) {
+ status = ((__u8 *)p - (__u8 *)iov[1].iov_base);
+ res->buffer += status;
+ res->bufsiz -= status;
+ } else if (p[1]) {
+ status = (int)((long)p & ~PAGE_CACHE_MASK);
+ res->bufsiz = -status;
+ } else {
+ res->bufsiz = 0;
+ }
+ res->cookie = last_cookie;
+ return nr;
+
+error:
+ res->bufsiz = 0;
return nr;
}
return 0;
}
+/*
+ * Encode arguments to readlink call
+ */
+static int nfs_xdr_readlinkargs(struct rpc_rqst *req, u32 *p, struct nfs_readlinkargs *args)
+{
+ struct rpc_task *task = req->rq_task;
+ struct rpc_auth *auth = task->tk_auth;
+ int bufsiz = NFS_MAXPATHLEN;
+ int replen;
+
+ p = xdr_encode_fhandle(p, args->fh);
+ req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readlinkres_sz) << 2;
+ req->rq_rvec[0].iov_len = replen;
+ req->rq_rvec[1].iov_base = (void *) args->buffer;
+ req->rq_rvec[1].iov_len = bufsiz;
+ req->rq_rlen = replen + bufsiz;
+ req->rq_rnr = 2;
+
+ return 0;
+}
+
/*
* Decode READLINK reply
*/
static int
-nfs_xdr_readlinkres(struct rpc_rqst *req, u32 *p, struct nfs_readlinkres *res)
+nfs_xdr_readlinkres(struct rpc_rqst *req, u32 *p, void *dummy)
{
- int status;
+ struct iovec *iov = req->rq_rvec;
+ int status, len;
+ char *name;
- if ((status = ntohl(*p++)))
+ /* Verify OK status. */
+ if ((status = ntohl(*p++)) != 0)
return -nfs_stat_to_errno(status);
- xdr_decode_string2(p, res->string, res->lenp, res->maxlen);
- /* Caller takes over the buffer here to avoid extra copy */
- res->buffer = req->rq_task->tk_buffer;
- req->rq_task->tk_buffer = NULL;
+ /* Verify OK response length. */
+ if ((__u8 *)p != ((u8 *) iov->iov_base + iov->iov_len))
+ return -errno_NFSERR_IO;
+
+ /* Convert and verify that string length is in range. */
+ p = iov[1].iov_base;
+ len = *p = ntohl(*p);
+ p++;
+ if (len > iov[1].iov_len)
+ return -errno_NFSERR_IO;
+
+ /* NULL terminate the string we got. */
+ name = (char *) p;
+ name[len] = 0;
+
return 0;
}
PROC(setattr, sattrargs, attrstat),
PROC(root, enc_void, dec_void),
PROC(lookup, diropargs, diropres),
- PROC(readlink, fhandle, readlinkres),
+ PROC(readlink, readlinkargs, readlinkres),
PROC(read, readargs, readres),
PROC(writecache, enc_void, dec_void),
PROC(write, writeargs, attrstat),
return status;
}
-int
-nfs_proc_readlink(struct nfs_server *server, struct nfs_fh *fhandle,
- void **p0, char **string, unsigned int *len,
- unsigned int maxlen)
-{
- struct nfs_readlinkres res = { string, len, maxlen, NULL };
- int status;
-
- dprintk("NFS call readlink\n");
- status = rpc_call(server->client, NFSPROC_READLINK, fhandle, &res, 0);
- dprintk("NFS reply readlink: %d\n", status);
- if (!status)
- *p0 = res.buffer;
- else if (res.buffer)
- kfree(res.buffer);
- return status;
-}
-
int
nfs_proc_read(struct nfs_server *server, struct nfs_fh *fhandle, int swap,
unsigned long offset, unsigned int count,
return status;
}
-/*
- * The READDIR implementation is somewhat hackish - we pass a temporary
- * buffer to the encode function, which installs it in the receive
- * iovec. The dirent buffer itself is passed in the result struct.
- */
-int
-nfs_proc_readdir(struct nfs_server *server, struct nfs_fh *fhandle,
- u32 cookie, unsigned int size, __u32 *entry)
-{
- struct nfs_readdirargs arg;
- struct nfs_readdirres res;
- void * buffer;
- unsigned int buf_size = PAGE_SIZE;
- int status;
-
- /* First get a temp buffer for the readdir reply */
- /* N.B. does this really need to be cleared? */
- status = -ENOMEM;
- buffer = (void *) get_free_page(GFP_KERNEL);
- if (!buffer)
- goto out;
-
- /*
- * Calculate the effective size the buffer. To make sure
- * that the returned data will fit into the user's buffer,
- * we decrease the buffer size as necessary.
- *
- * Note: NFS returns three __u32 values for each entry,
- * and we assume that the data is packed into the user
- * buffer with the same efficiency.
- */
- if (size < buf_size)
- buf_size = size;
- if (server->rsize < buf_size)
- buf_size = server->rsize;
-#if 0
-printk("nfs_proc_readdir: user size=%d, rsize=%d, buf_size=%d\n",
-size, server->rsize, buf_size);
-#endif
-
- arg.fh = fhandle;
- arg.cookie = cookie;
- arg.buffer = buffer;
- arg.bufsiz = buf_size;
- res.buffer = entry;
- res.bufsiz = size;
-
- dprintk("NFS call readdir %d\n", cookie);
- status = rpc_call(server->client, NFSPROC_READDIR, &arg, &res, 0);
- dprintk("NFS reply readdir: %d\n", status);
- free_page((unsigned long) buffer);
-out:
- return status;
-}
-
int
nfs_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
*
* Optimization changes Copyright (C) 1994 Florian La Roche
*
+ * Jun 7 1999, cache symlink lookups in the page cache. -DaveM
+ *
* nfs symlink handling code
*/
+#define NFS_NEED_XDR_TYPES
#include <linux/sched.h>
#include <linux/errno.h>
+#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
+#include <linux/nfs.h>
+#include <linux/pagemap.h>
#include <linux/stat.h>
#include <linux/mm.h>
#include <linux/malloc.h>
NULL /* permission */
};
-static int nfs_readlink(struct dentry *dentry, char *buffer, int buflen)
+/* Symlink caching in the page cache is even more simplistic
+ * and straight-forward than readdir caching.
+ */
+static struct page *try_to_get_symlink_page(struct dentry *dentry, struct inode *inode)
{
- int error;
- unsigned int len;
- char *res;
- void *mem;
-
- dfprintk(VFS, "nfs: readlink(%s/%s)\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
-
- error = nfs_proc_readlink(NFS_DSERVER(dentry), NFS_FH(dentry),
- &mem, &res, &len, NFS_MAXPATHLEN);
- if (! error) {
- if (len > buflen)
- len = buflen;
- copy_to_user(buffer, res, len);
- error = len;
- kfree(mem);
+ struct nfs_readlinkargs rl_args;
+ struct page *page, **hash;
+ unsigned long page_cache;
+
+ page = NULL;
+ page_cache = page_cache_alloc();
+ if (!page_cache)
+ goto out;
+
+ hash = page_hash(inode, 0);
+ page = __find_page(inode, 0, *hash);
+ if (page) {
+ page_cache_free(page_cache);
+ goto out;
}
- return error;
+
+ page = page_cache_entry(page_cache);
+ atomic_inc(&page->count);
+ page->flags = ((page->flags &
+ ~((1 << PG_uptodate) | (1 << PG_error))) |
+ ((1 << PG_referenced) | (1 << PG_locked)));
+ page->offset = 0;
+ add_page_to_inode_queue(inode, page);
+ __add_page_to_hash_queue(page, hash);
+
+ /* We place the length at the beginning of the page,
+ * in host byte order, followed by the string. The
+ * XDR response verification will NULL terminate it.
+ */
+ rl_args.fh = NFS_FH(dentry);
+ rl_args.buffer = (const void *)page_cache;
+ if (rpc_call(NFS_CLIENT(inode), NFSPROC_READLINK,
+ &rl_args, NULL, 0) < 0)
+ goto error;
+ set_bit(PG_uptodate, &page->flags);
+unlock_out:
+ clear_bit(PG_locked, &page->flags);
+ wake_up(&page->wait);
+out:
+ return page;
+
+error:
+ set_bit(PG_error, &page->flags);
+ goto unlock_out;
+}
+
+static int nfs_readlink(struct dentry *dentry, char *buffer, int buflen)
+{
+ struct inode *inode = dentry->d_inode;
+ struct page *page, **hash;
+ u32 *p, len;
+
+ /* Caller revalidated the directory inode already. */
+ hash = page_hash(inode, 0);
+ page = __find_page(inode, 0, *hash);
+ if (!page)
+ goto no_readlink_page;
+ if (PageLocked(page))
+ goto readlink_locked_wait;
+ if (!PageUptodate(page))
+ goto readlink_read_error;
+success:
+ p = (u32 *) page_address(page);
+ len = *p++;
+ if (len > buflen)
+ len = buflen;
+ copy_to_user(buffer, p, len);
+ page_cache_release(page);
+ return len;
+
+no_readlink_page:
+ page = try_to_get_symlink_page(dentry, inode);
+ if (!page)
+ goto no_page;
+readlink_locked_wait:
+ wait_on_page(page);
+ if (PageUptodate(page))
+ goto success;
+readlink_read_error:
+ page_cache_release(page);
+no_page:
+ return -EIO;
}
static struct dentry *
-nfs_follow_link(struct dentry * dentry, struct dentry *base, unsigned int follow)
+nfs_follow_link(struct dentry *dentry, struct dentry *base, unsigned int follow)
{
- int error;
- unsigned int len;
- char *res;
- void *mem;
- char *path;
struct dentry *result;
+ struct inode *inode = dentry->d_inode;
+ struct page *page, **hash;
+ u32 *p;
- dfprintk(VFS, "nfs: follow_link(%s/%s)\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
-
- error = nfs_proc_readlink(NFS_DSERVER(dentry), NFS_FH(dentry),
- &mem, &res, &len, NFS_MAXPATHLEN);
- result = ERR_PTR(error);
- if (error)
- goto out_dput;
-
- result = ERR_PTR(-ENOMEM);
- path = kmalloc(len + 1, GFP_KERNEL);
- if (!path)
- goto out_mem;
- memcpy(path, res, len);
- path[len] = 0;
- kfree(mem);
-
- result = lookup_dentry(path, base, follow);
- kfree(path);
-out:
+ /* Caller revalidated the directory inode already. */
+ hash = page_hash(inode, 0);
+ page = __find_page(inode, 0, *hash);
+ if (!page)
+ goto no_followlink_page;
+ if (PageLocked(page))
+ goto followlink_locked_wait;
+ if (!PageUptodate(page))
+ goto followlink_read_error;
+success:
+ p = (u32 *) page_address(page);
+ result = lookup_dentry((char *) (p + 1), base, follow);
+ page_cache_release(page);
return result;
-out_mem:
- kfree(mem);
-out_dput:
- dput(base);
- goto out;
+no_followlink_page:
+ page = try_to_get_symlink_page(dentry, inode);
+ if (!page)
+ goto no_page;
+followlink_locked_wait:
+ wait_on_page(page);
+ if (PageUptodate(page))
+ goto success;
+followlink_read_error:
+ page_cache_release(page);
+no_page:
+ return ERR_PTR(-EIO);
}
return 1;
}
+static kmem_cache_t *nfs_wreq_cachep;
+
+int nfs_init_wreqcache(void)
+{
+ nfs_wreq_cachep = kmem_cache_create("nfs_wreq",
+ sizeof(struct nfs_wreq),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (nfs_wreq_cachep == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
static inline void
free_write_request(struct nfs_wreq * req)
{
if (!--req->wb_count)
- kfree(req);
+ kmem_cache_free(nfs_wreq_cachep, req);
}
/*
page->offset + offset, bytes);
/* FIXME: Enforce hard limit on number of concurrent writes? */
- wreq = (struct nfs_wreq *) kmalloc(sizeof(*wreq), GFP_KERNEL);
+ wreq = kmem_cache_alloc(nfs_wreq_cachep, SLAB_KERNEL);
if (!wreq)
goto out_fail;
memset(wreq, 0, sizeof(*wreq));
out_req:
rpc_release_task(task);
- kfree(wreq);
+ kmem_cache_free(nfs_wreq_cachep, wreq);
out_fail:
return NULL;
}
}
} while (NULL != (exp = exp->ex_next));
} while (nfsd_parentdev(&xdev));
- if (xdentry == xdentry->d_parent) {
+ if (IS_ROOT(xdentry))
break;
- }
} while ((xdentry = xdentry->d_parent));
exp = NULL;
out:
#endif
goto out;
}
- if (ndentry == ndentry->d_parent)
+ if (IS_ROOT(ndentry))
break;
}
} while (NULL != (exp = exp->ex_next));
dir = iget(sb, dirino);
if (!dir)
goto out_root;
- dentry = d_alloc_root(dir, NULL);
+ dentry = d_alloc_root(dir);
if (!dentry)
goto out_iput;
* Add the parent to the dir cache before releasing the dentry,
* and check whether to save a copy of the dentry's path.
*/
- if (dentry != dentry->d_parent) {
+ if (!IS_ROOT(dentry)) {
struct dentry *parent = dget(dentry->d_parent);
if (add_to_fhcache(parent, NFSD_DIR_CACHE))
nfsd_nr_verified++;
error = nfserr_stale;
dprintk("fh_verify: no root_squashed access.\n");
}
- } while ((tdentry != tdentry->d_parent));
+ } while (!IS_ROOT(tdentry));
if (exp->ex_dentry != tdentry) {
error = nfserr_stale;
printk("nfsd Security: %s/%s bad export.\n",
ntfs_debug(DEBUG_OTHER, "Getting RootDir\n");
/* Get the root directory */
- if(!(sb->s_root=d_alloc_root(iget(sb,FILE_ROOT),NULL))){
+ if(!(sb->s_root=d_alloc_root(iget(sb,FILE_ROOT)))){
ntfs_error("Could not get root dir inode\n");
goto ntfs_read_super_mft;
}
j = error;
error = -ENOMEM;
- f1->f_dentry = f2->f_dentry = dget(d_alloc_root(inode, NULL));
+ f1->f_dentry = f2->f_dentry = dget(d_alloc_root(inode));
if (!f1->f_dentry)
goto close_f12_inode_i_j;
root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root);
if (!root_inode)
goto out_no_root;
- s->s_root = d_alloc_root(root_inode, NULL);
+ s->s_root = d_alloc_root(root_inode);
if (!s->s_root)
goto out_no_root;
parse_options(data, &root_inode->i_uid, &root_inode->i_gid);
/* Check for special dentries.. */
pattern = NULL;
inode = dentry->d_inode;
- if (inode && dentry->d_parent == dentry) {
+ if (inode && IS_ROOT(dentry)) {
if (S_ISSOCK(inode->i_mode))
pattern = "socket:[%lu]";
if (S_ISFIFO(inode->i_mode))
s->u.qnx4_sb.sb_buf = bh;
s->u.qnx4_sb.sb = (struct qnx4_super_block *) bh->b_data;
s->s_root =
- d_alloc_root(iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK), NULL);
+ d_alloc_root(iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK));
if (s->s_root == NULL) {
printk("qnx4: get inode failed\n");
goto out;
brelse(bh);
s->s_op = &romfs_ops;
- s->s_root = d_alloc_root(iget(s, sz), NULL);
+ s->s_root = d_alloc_root(iget(s, sz));
if (!s->s_root)
goto outnobh;
for (;;)
{
dentry->d_time = jiffies;
- if (dentry == dentry->d_parent)
+ if (IS_ROOT(dentry))
break;
dentry = dentry->d_parent;
}
if (!root_inode)
goto out_no_root;
- sb->s_root = d_alloc_root(root_inode, NULL);
+ sb->s_root = d_alloc_root(root_inode);
if (!sb->s_root)
goto out_no_root;
int register_filesystem(struct file_system_type * fs)
{
- struct file_system_type ** tmp;
-
- if (!fs)
- return -EINVAL;
- if (fs->next)
- return -EBUSY;
- tmp = &file_systems;
- while (*tmp) {
- if (strcmp((*tmp)->name, fs->name) == 0)
- return -EBUSY;
- tmp = &(*tmp)->next;
- }
- *tmp = fs;
- return 0;
+ struct file_system_type ** tmp;
+
+ if (!fs)
+ return -EINVAL;
+ if (fs->next)
+ return -EBUSY;
+ tmp = &file_systems;
+ while (*tmp) {
+ if (strcmp((*tmp)->name, fs->name) == 0)
+ return -EBUSY;
+ tmp = &(*tmp)->next;
+ }
+ *tmp = fs;
+ return 0;
}
#ifdef CONFIG_MODULES
sb->s_dev = dev;
sb->s_op = &sysv_sops;
root_inode = iget(sb,SYSV_ROOT_INO);
- sb->s_root = d_alloc_root(root_inode, NULL);
+ sb->s_root = d_alloc_root(root_inode);
if (!sb->s_root) {
printk("SysV FS: get root inode failed\n");
sysv_put_super(sb);
sb->u.ufs_sb.s_flags = flags;
sb->u.ufs_sb.s_swab = swab;
- sb->s_root = d_alloc_root(iget(sb, UFS_ROOTINO), NULL);
+ sb->s_root = d_alloc_root(iget(sb, UFS_ROOTINO));
/*
while (dentry && count < 10) {
check_dent_int (dentry, count++);
- if (dentry == dentry->d_parent) {
+ if (IS_ROOT(dentry)) {
printk (KERN_DEBUG "*** end checking dentry (root reached ok)\n");
break;
}
__initfunc(static void check_cyrix_coma(void))
{
if (boot_cpu_data.coma_bug) {
- unsigned char ccr1;
+ unsigned char ccr3, tmp;
cli();
- ccr1 = getCx86 (CX86_CCR1);
- setCx86 (CX86_CCR1, ccr1 | 0x10);
+ ccr3 = getCx86(CX86_CCR3);
+ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
+ tmp = getCx86(0x31);
+ setCx86(0x31, tmp | 0xf8);
+ tmp = getCx86(0x32);
+ setCx86(0x32, tmp | 0x7f);
+ setCx86(0x33, 0);
+ tmp = getCx86(0x3c);
+ setCx86(0x3c, tmp | 0x87);
+ setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
sti();
printk("Cyrix processor with \"coma bug\" found, workaround enabled\n");
}
*
* which has the same constant encoded..
*/
-#define __PAGE_OFFSET (0xC0000000)
+
+#include <asm/page_offset.h>
+
+#define __PAGE_OFFSET (PAGE_OFFSET_RAW)
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
--- /dev/null
+#include <linux/config.h>
+#ifdef CONFIG_1GB
+#define PAGE_OFFSET_RAW 0xC0000000
+#elif defined(CONFIG_2GB)
+#define PAGE_OFFSET_RAW 0x80000000
+#elif defined(CONFIG_3GB)
+#define PAGE_OFFSET_RAW 0x40000000
+#endif
* with heavy changes by Linus Torvalds
*/
-#define D_MAXLEN 1024
-
#define IS_ROOT(x) ((x) == (x)->d_parent)
/*
extern void d_delete(struct dentry *);
/* allocate/de-allocate */
-extern struct dentry * d_alloc(struct dentry * parent, const struct qstr *name);
+extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern void prune_dcache(int);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
extern void free_inode_memory(int); /* defined in fs/inode.c */
/* only used at mount-time */
-extern struct dentry * d_alloc_root(struct inode * root_inode, struct dentry * old_root);
+extern struct dentry * d_alloc_root(struct inode *);
/* test whether root is busy without destroying dcache */
extern int is_root_busy(struct dentry *);
/*
* This adds the entry to the hash queues.
*/
-extern void d_rehash(struct dentry * entry);
+extern void d_rehash(struct dentry *);
/*
* This adds the entry to the hash queues and initializes "d_inode".
* The entry was actually filled in earlier during "d_alloc()"
}
/* used for rename() and baskets */
-extern void d_move(struct dentry * entry, struct dentry * newdentry);
+extern void d_move(struct dentry *, struct dentry *);
/* appendix may either be NULL or be used for transname suffixes */
-extern struct dentry * d_lookup(struct dentry * dir, struct qstr * name);
+extern struct dentry * d_lookup(struct dentry *, struct qstr *);
/* validate "insecure" dentry pointer */
-extern int d_validate(struct dentry *dentry, struct dentry *dparent,
- unsigned int hash, unsigned int len);
+extern int d_validate(struct dentry *, struct dentry *, unsigned int, unsigned int);
/* write full pathname into buffer and return start of pathname */
-extern char * d_path(struct dentry * entry, char * buf, int buflen);
+extern char * d_path(struct dentry *, char *, int);
/* Allocation counts.. */
static __inline__ struct dentry * dget(struct dentry *dentry)
/*
* Install a file pointer in the fd array.
*/
-extern inline void fd_install(unsigned int fd, struct file *file)
+extern inline void fd_install(unsigned int fd, struct file * file)
{
current->files->fd[fd] = file;
}
* I suspect there are many other similar "optimizations" across the
* kernel...
*/
-extern void fput(struct file *file);
-extern void put_filp(struct file *file);
+extern void fput(struct file *);
+extern void put_filp(struct file *);
-#endif
+#endif /* __LINUX_FILE_H */
/*
* Flags that can be altered by MS_REMOUNT
*/
-#define MS_RMT_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS|MS_MANDLOCK|MS_NOATIME|MS_NODIRATIME)
+#define MS_RMT_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|\
+ MS_SYNCHRONOUS|MS_MANDLOCK|MS_NOATIME|MS_NODIRATIME)
/*
* Magic mount flag number. Has to be or-ed to the flag values.
#define MS_MGC_MSK 0xffff0000 /* magic flag number mask */
/*
- * Note that read-only etc flags are inode-specific: setting some file-system
+ * Note that nosuid etc flags are inode-specific: setting some file-system
* flags just means all the inodes inherit those flags by default. It might be
* possible to override it selectively if you really wanted to with some
* ioctl() that is not currently implemented.
#include <asm/semaphore.h>
#include <asm/byteorder.h>
-#include <asm/bitops.h>
-extern void update_atime (struct inode *inode);
+extern void update_atime (struct inode *);
#define UPDATE_ATIME(inode) update_atime (inode)
extern void buffer_init(unsigned long);
};
typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
-void init_buffer(struct buffer_head *bh, kdev_t dev, int block,
- bh_end_io_t *handler, void *dev_id);
+void init_buffer(struct buffer_head *, kdev_t, int, bh_end_io_t *, void *);
static inline int buffer_uptodate(struct buffer_head * bh)
{
#include <linux/fcntl.h>
-extern int fcntl_getlk(unsigned int fd, struct flock *l);
-extern int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l);
+extern int fcntl_getlk(unsigned int, struct flock *);
+extern int fcntl_setlk(unsigned int, unsigned int, struct flock *);
/* fs/locks.c */
-extern void locks_remove_posix(struct file *, fl_owner_t id);
+extern void locks_remove_posix(struct file *, fl_owner_t);
extern void locks_remove_flock(struct file *);
extern struct file_lock *posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *, unsigned int);
extern void posix_block_lock(struct file_lock *, struct file_lock *);
extern void posix_unblock_lock(struct file_lock *);
-#include <linux/stat.h>
-
struct fasync_struct {
int magic;
int fa_fd;
#define FLOCK_VERIFY_READ 1
#define FLOCK_VERIFY_WRITE 2
-extern int locks_mandatory_locked(struct inode *inode);
-extern int locks_mandatory_area(int read_write, struct inode *inode,
- struct file *filp, loff_t offset,
- size_t count);
+extern int locks_mandatory_locked(struct inode *);
+extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t);
extern inline int locks_verify_locked(struct inode *inode)
{
extern struct file *filp_open(const char *, int, int);
extern int filp_close(struct file *, fl_owner_t id);
-extern char * getname(const char * filename);
+extern char * getname(const char *);
#define __getname() ((char *) __get_free_page(GFP_KERNEL))
#define putname(name) free_page((unsigned long)(name))
-extern void kill_fasync(struct fasync_struct *fa, int sig);
+extern void kill_fasync(struct fasync_struct *, int);
extern int register_blkdev(unsigned int, const char *, struct file_operations *);
-extern int unregister_blkdev(unsigned int major, const char * name);
-extern int blkdev_open(struct inode * inode, struct file * filp);
-extern int blkdev_release (struct inode * inode);
+extern int unregister_blkdev(unsigned int, const char *);
+extern int blkdev_open(struct inode *, struct file *);
+extern int blkdev_release (struct inode *);
extern struct file_operations def_blk_fops;
extern struct inode_operations blkdev_inode_operations;
/* fs/devices.c */
extern int register_chrdev(unsigned int, const char *, struct file_operations *);
-extern int unregister_chrdev(unsigned int major, const char * name);
-extern int chrdev_open(struct inode * inode, struct file * filp);
+extern int unregister_chrdev(unsigned int, const char *);
+extern int chrdev_open(struct inode *, struct file *);
extern struct file_operations def_chr_fops;
extern struct inode_operations chrdev_inode_operations;
-extern char * bdevname(kdev_t dev);
-extern char * cdevname(kdev_t dev);
-extern char * kdevname(kdev_t dev);
+extern char * bdevname(kdev_t);
+extern char * cdevname(kdev_t);
+extern char * kdevname(kdev_t);
extern void init_special_inode(struct inode *, umode_t, int);
-extern void init_fifo(struct inode * inode);
+extern void init_fifo(struct inode *);
extern struct inode_operations fifo_inode_operations;
/* Invalid inode operations -- fs/bad_inode.c */
-extern void make_bad_inode(struct inode * inode);
-extern int is_bad_inode(struct inode * inode);
+extern void make_bad_inode(struct inode *);
+extern int is_bad_inode(struct inode *);
extern struct file_operations connecting_fifo_fops;
extern struct file_operations read_fifo_fops;
extern struct file_operations write_pipe_fops;
extern struct file_operations rdwr_pipe_fops;
-extern struct file_system_type *get_fs_type(const char *name);
+extern struct file_system_type *get_fs_type(const char *);
extern int fs_may_remount_ro(struct super_block *);
-extern int fs_may_mount(kdev_t dev);
+extern int fs_may_mount(kdev_t);
extern struct file *inuse_filps;
-extern void refile_buffer(struct buffer_head * buf);
-extern void set_writetime(struct buffer_head * buf, int flag);
+extern void refile_buffer(struct buffer_head *);
+extern void set_writetime(struct buffer_head *, int);
extern int try_to_free_buffers(struct page *);
extern int nr_buffers;
#define BUF_DIRTY 2 /* Dirty buffers, not yet scheduled for write */
#define NR_LIST 3
-void mark_buffer_uptodate(struct buffer_head * bh, int on);
+void mark_buffer_uptodate(struct buffer_head *, int);
extern inline void mark_buffer_clean(struct buffer_head * bh)
{
}
}
-extern int check_disk_change(kdev_t dev);
-extern int invalidate_inodes(struct super_block * sb);
+extern int check_disk_change(kdev_t);
+extern int invalidate_inodes(struct super_block *);
extern void invalidate_inode_pages(struct inode *);
-extern void invalidate_buffers(kdev_t dev);
-extern int floppy_is_wp(int minor);
-extern void sync_inodes(kdev_t dev);
-extern void write_inode_now(struct inode *inode);
-extern void sync_dev(kdev_t dev);
-extern int fsync_dev(kdev_t dev);
-extern void sync_supers(kdev_t dev);
-extern int bmap(struct inode * inode,int block);
+extern void invalidate_buffers(kdev_t);
+extern int floppy_is_wp(int);
+extern void sync_inodes(kdev_t);
+extern void write_inode_now(struct inode *);
+extern void sync_dev(kdev_t);
+extern int fsync_dev(kdev_t);
+extern void sync_supers(kdev_t);
+extern int bmap(struct inode *, int);
extern int notify_change(struct dentry *, struct iattr *);
-extern int permission(struct inode * inode,int mask);
-extern int get_write_access(struct inode *inode);
-extern void put_write_access(struct inode *inode);
-extern struct dentry * open_namei(const char * pathname, int flag, int mode);
-extern struct dentry * do_mknod(const char * filename, int mode, dev_t dev);
+extern int permission(struct inode *, int);
+extern int get_write_access(struct inode *);
+extern void put_write_access(struct inode *);
+extern struct dentry * open_namei(const char *, int, int);
+extern struct dentry * do_mknod(const char *, int, dev_t);
extern int do_pipe(int *);
/* fs/dcache.c -- generic fs support functions */
#define lnamei(pathname) __namei(pathname, 0)
extern void iput(struct inode *);
-extern struct inode * igrab(struct inode *inode);
+extern struct inode * igrab(struct inode *);
extern ino_t iunique(struct super_block *, ino_t);
extern struct inode * iget(struct super_block *, unsigned long);
extern void clear_inode(struct inode *);
extern struct file * get_empty_filp(void);
extern struct buffer_head * get_hash_table(kdev_t, int, int);
extern struct buffer_head * getblk(kdev_t, int, int);
-extern struct buffer_head * find_buffer(kdev_t dev, int block, int size);
+extern struct buffer_head * find_buffer(kdev_t, int, int);
extern void ll_rw_block(int, int, struct buffer_head * bh[]);
extern int is_read_only(kdev_t);
extern void __brelse(struct buffer_head *);
if (buf)
__brelse(buf);
}
-extern void __bforget(struct buffer_head *buf);
+extern void __bforget(struct buffer_head *);
extern inline void bforget(struct buffer_head *buf)
{
if (buf)
__bforget(buf);
}
-extern void set_blocksize(kdev_t dev, int size);
-extern unsigned int get_hardblocksize(kdev_t dev);
-extern struct buffer_head * bread(kdev_t dev, int block, int size);
-extern struct buffer_head * breada(kdev_t dev,int block, int size,
- unsigned int pos, unsigned int filesize);
+extern void set_blocksize(kdev_t, int);
+extern unsigned int get_hardblocksize(kdev_t);
+extern struct buffer_head * bread(kdev_t, int, int);
+extern struct buffer_head * breada(kdev_t, int, int, unsigned int, unsigned int);
extern int brw_page(int, struct page *, kdev_t, int [], int, int);
extern int generic_readpage(struct file *, struct page *);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern ssize_t generic_file_read(struct file *, char *, size_t, loff_t *);
-extern ssize_t generic_file_write(struct file *, const char*, size_t, loff_t *, writepage_t);
+extern ssize_t generic_file_write(struct file *, const char *, size_t, loff_t *, writepage_t);
-extern struct super_block *get_super(kdev_t dev);
-extern void put_super(kdev_t dev);
-unsigned long generate_cluster(kdev_t dev, int b[], int size);
-unsigned long generate_cluster_swab32(kdev_t dev, int b[], int size);
+extern struct super_block *get_super(kdev_t);
+extern void put_super(kdev_t);
+unsigned long generate_cluster(kdev_t, int b[], int);
+unsigned long generate_cluster_swab32(kdev_t, int b[], int);
extern kdev_t ROOT_DEV;
extern void show_buffers(void);
#ifdef CONFIG_BLK_DEV_INITRD
extern kdev_t real_root_dev;
-extern int change_root(kdev_t new_root_dev,const char *put_old);
+extern int change_root(kdev_t, const char *);
#endif
extern ssize_t char_read(struct file *, char *, size_t, loff_t *);
extern ssize_t char_write(struct file *, const char *, size_t, loff_t *);
extern ssize_t block_write(struct file *, const char *, size_t, loff_t *);
-extern int block_fsync(struct file *, struct dentry *dir);
-extern int file_fsync(struct file *, struct dentry *dir);
+extern int block_fsync(struct file *, struct dentry *);
+extern int file_fsync(struct file *, struct dentry *);
extern int inode_change_ok(struct inode *, struct iattr *);
extern void inode_setattr(struct inode *, struct iattr *);
#endif /* __KERNEL__ */
-#endif
+#endif /* _LINUX_FS_H */
const char * name;
};
+struct nfs_readlinkargs {
+ struct nfs_fh * fh;
+ const void * buffer;
+};
+
struct nfs_readargs {
struct nfs_fh * fh;
__u32 offset;
struct nfs_fh * fh;
__u32 cookie;
void * buffer;
- unsigned int bufsiz;
+ int bufsiz;
};
struct nfs_diropok {
unsigned int count;
};
-struct nfs_readlinkres {
- char ** string;
- unsigned int * lenp;
- unsigned int maxlen;
- void * buffer;
-};
-
struct nfs_readdirres {
void * buffer;
- unsigned int bufsiz;
+ int bufsiz;
+ u32 cookie;
};
#endif /* NFS_NEED_XDR_TYPES */
#define NFS_FLAGS(inode) ((inode)->u.nfs_i.flags)
#define NFS_REVALIDATING(inode) (NFS_FLAGS(inode) & NFS_INO_REVALIDATE)
#define NFS_WRITEBACK(inode) ((inode)->u.nfs_i.writeback)
+#define NFS_COOKIES(inode) ((inode)->u.nfs_i.cookies)
+#define NFS_DIREOF(inode) ((inode)->u.nfs_i.direof)
/*
* These are the default flags for swap requests
extern int nfs_proc_lookup(struct nfs_server *server, struct nfs_fh *dir,
const char *name, struct nfs_fh *fhandle,
struct nfs_fattr *fattr);
-extern int nfs_proc_readlink(struct nfs_server *server, struct nfs_fh *fhandle,
- void **p0, char **string, unsigned int *len,
- unsigned int maxlen);
extern int nfs_proc_read(struct nfs_server *server, struct nfs_fh *fhandle,
int swap, unsigned long offset, unsigned int count,
void *buffer, struct nfs_fattr *fattr);
struct nfs_fh *fhandle, struct nfs_fattr *fattr);
extern int nfs_proc_rmdir(struct nfs_server *server, struct nfs_fh *dir,
const char *name);
-extern int nfs_proc_readdir(struct nfs_server *server, struct nfs_fh *fhandle,
- u32 cookie, unsigned int size, __u32 *entry);
extern int nfs_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *res);
*/
extern struct inode_operations nfs_dir_inode_operations;
extern struct dentry_operations nfs_dentry_operations;
-extern void nfs_free_dircache(void);
extern void nfs_invalidate_dircache(struct inode *);
-extern void nfs_invalidate_dircache_sb(struct super_block *);
/*
* linux/fs/nfs/symlink.c
* pages.
*/
struct nfs_wreq * writeback;
+
+ /* Readdir caching information. */
+ void *cookies;
+ u32 direof;
};
/*
#define PAGE_CACHE_SHIFT PAGE_SHIFT
#define PAGE_CACHE_SIZE PAGE_SIZE
#define PAGE_CACHE_MASK PAGE_MASK
+#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
#define page_cache_alloc() __get_free_page(GFP_USER)
#define page_cache_free(x) free_page(x)
PROC_SCSI_INIA100,
PROC_SCSI_FCAL,
PROC_SCSI_I2O,
+ PROC_SCSI_USB_SCSI,
PROC_SCSI_SCSI_DEBUG,
PROC_SCSI_NOT_PRESENT,
PROC_SCSI_FILE, /* I'm assuming here that we */
#define AVL_MIN_MAP_COUNT 32
struct mm_struct {
- struct vm_area_struct *mmap; /* list of VMAs */
- struct vm_area_struct *mmap_avl; /* tree of VMAs */
- struct vm_area_struct *mmap_cache; /* last find_vma result */
+ struct vm_area_struct * mmap; /* list of VMAs */
+ struct vm_area_struct * mmap_avl; /* tree of VMAs */
+ struct vm_area_struct * mmap_cache; /* last find_vma result */
pgd_t * pgd;
atomic_t count;
int map_count; /* number of VMAs */
}
/* per-UID process charging. */
-extern int alloc_uid(struct task_struct *p);
-void free_uid(struct task_struct *p);
+extern int alloc_uid(struct task_struct *);
+void free_uid(struct task_struct *);
#include <asm/current.h>
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE)
-extern int in_group_p(gid_t grp);
+extern int in_group_p(gid_t);
extern void flush_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *);
-extern int dequeue_signal(sigset_t *block, siginfo_t *);
-extern int send_sig_info(int, struct siginfo *info, struct task_struct *);
-extern int force_sig_info(int, struct siginfo *info, struct task_struct *);
-extern int kill_pg_info(int, struct siginfo *info, pid_t);
-extern int kill_sl_info(int, struct siginfo *info, pid_t);
-extern int kill_proc_info(int, struct siginfo *info, pid_t);
-extern int kill_something_info(int, struct siginfo *info, int);
-extern void notify_parent(struct task_struct * tsk, int);
-extern void force_sig(int sig, struct task_struct * p);
-extern int send_sig(int sig, struct task_struct * p, int priv);
+extern int dequeue_signal(sigset_t *, siginfo_t *);
+extern int send_sig_info(int, struct siginfo *, struct task_struct *);
+extern int force_sig_info(int, struct siginfo *, struct task_struct *);
+extern int kill_pg_info(int, struct siginfo *, pid_t);
+extern int kill_sl_info(int, struct siginfo *, pid_t);
+extern int kill_proc_info(int, struct siginfo *, pid_t);
+extern int kill_something_info(int, struct siginfo *, int);
+extern void notify_parent(struct task_struct *, int);
+extern void force_sig(int, struct task_struct *);
+extern int send_sig(int, struct task_struct *, int);
extern int kill_pg(pid_t, int, int);
extern int kill_sl(pid_t, int, int);
extern int kill_proc(pid_t, int, int);
-extern int do_sigaction(int sig, const struct k_sigaction *act,
- struct k_sigaction *oact);
-extern int do_sigaltstack(const stack_t *ss, stack_t *oss, unsigned long sp);
+extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *);
+extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long);
extern inline int signal_pending(struct task_struct *p)
{
: on_sig_stack(sp) ? SS_ONSTACK : 0);
}
-extern int request_irq(unsigned int irq,
+extern int request_irq(unsigned int,
void (*handler)(int, void *, struct pt_regs *),
- unsigned long flags,
- const char *device,
- void *dev_id);
-extern void free_irq(unsigned int irq, void *dev_id);
+ unsigned long, const char *, void *);
+extern void free_irq(unsigned int, void *);
/*
* This has now become a routine instead of a macro, it sets a flag if
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Wed Oct 21 22:47:12 1998
- * Modified at: Mon May 10 14:51:06 1999
+ * Modified at: Sun May 16 13:40:03 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
ACTISYS_PLUS_DONGLE,
GIRBIL_DONGLE,
LITELINK_DONGLE,
-} DONGLE_T;
+} IRDA_DONGLE;
struct irda_device;
struct dongle {
- DONGLE_T type;
+ IRDA_DONGLE type;
void (*open)(struct irda_device *, int type);
void (*close)(struct irda_device *);
- void (*reset)( struct irda_device *, int unused);
+ void (*reset)( struct irda_device *);
void (*change_speed)( struct irda_device *, int baudrate);
void (*qos_init)( struct irda_device *, struct qos_info *);
};
#define IRCOMM_MAGIC 0x434f4d4d
#define COMM_INIT_CTRL_PARAM 3 /* length of initial control parameters */
-#define COMM_HEADER 1 /* length of clen field */
-#define COMM_HEADER_SIZE (TTP_MAX_HEADER+COMM_HEADER)
-#define COMM_DEFAULT_DATA_SIZE 64
+#define COMM_HEADER_SIZE 1 /* length of clen field */
+#define COMM_MAX_HEADER_SIZE (TTP_MAX_HEADER+COMM_HEADER_SIZE)
+#define COMM_DEFAULT_SDU_SIZE (64 - COMM_HEADER_SIZE)
#define IRCOMM_MAX_CONNECTION 1 /* Don't change for now */
int null_modem_mode; /* switch for null modem emulation */
int ttp_stop;
- int max_txbuff_size;
- __u32 max_sdu_size;
+ __u32 tx_max_sdu_size;
+ __u32 rx_max_sdu_size;
__u8 max_header_size;
__u32 daddr; /* Device address of the peer device */
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Apr 14 12:41:42 1998
- * Modified at: Mon May 10 15:46:02 1999
+ * Modified at: Wed May 19 08:44:48 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1999 Dag Brattli, All Rights Reserved.
struct dongle *dongle; /* Dongle driver */
- /* spinlock_t lock; */ /* For serializing operations */
+ spinlock_t lock; /* For serializing operations */
/* Media busy stuff */
int media_busy;
struct timer_list media_busy_timer;
/* Callbacks for driver specific implementation */
- void (*change_speed)(struct irda_device *driver, int baud);
+ void (*change_speed)(struct irda_device *idev, int baud);
int (*is_receiving)(struct irda_device *); /* receiving? */
void (*set_dtr_rts)(struct irda_device *idev, int dtr, int rts);
int (*raw_write)(struct irda_device *idev, __u8 *buf, int len);
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Sun May 9 11:45:33 1999
+ * Modified at: Mon May 31 13:54:20 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
int unicast_open;
int broadcast_open;
+ int tx_busy;
+ struct sk_buff_head txq; /* Transmit control queue */
+
struct timer_list kick_timer;
};
struct device dev; /* Ethernet device structure*/
struct enet_statistics stats;
- __u32 saddr; /* Source devcie address */
+ __u32 saddr; /* Source device address */
__u32 daddr; /* Destination device address */
int netdev_registered;
int notify_irmanager;
void irlan_open_data_tsap(struct irlan_cb *self);
+int irlan_run_ctrl_tx_queue(struct irlan_cb *self);
+
void irlan_get_provider_info(struct irlan_cb *self);
void irlan_get_unicast_addr(struct irlan_cb *self);
void irlan_get_media_char(struct irlan_cb *self);
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Oct 15 08:36:58 1998
- * Modified at: Thu Apr 22 14:09:37 1999
+ * Modified at: Fri May 14 23:29:00 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
int irlan_eth_xmit(struct sk_buff *skb, struct device *dev);
void irlan_eth_flow_indication( void *instance, void *sap, LOCAL_FLOW flow);
+void irlan_eth_send_gratuitous_arp(struct device *dev);
void irlan_eth_set_multicast_list( struct device *dev);
struct enet_statistics *irlan_eth_get_stats(struct device *dev);
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 3 13:49:59 1997
- * Modified at: Mon May 10 22:12:56 1999
+ * Modified at: Wed May 19 15:31:16 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1997, 1998-1999 Dag Brattli <dagb@cs.uit.no>
#define FRAME_MAX_SIZE 2048
-void irport_start(int iobase);
-void irport_stop(int iobase);
+void irport_start(struct irda_device *idev, int iobase);
+void irport_stop(struct irda_device *idev, int iobase);
int irport_probe(int iobase);
void irport_change_speed(struct irda_device *idev, int speed);
void irport_interrupt(int irq, void *dev_id, struct pt_regs *regs);
int irport_hard_xmit(struct sk_buff *skb, struct device *dev);
+void irport_wait_until_sent(struct irda_device *idev);
#endif
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Jun 9 13:26:50 1998
- * Modified at: Thu Feb 25 20:34:21 1999
+ * Modified at: Tue May 25 07:54:41 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (C) 1998, Aage Kvalnes <aage@cs.uit.no>
#include <linux/types.h>
#include <asm/spinlock.h>
-/* #include <net/irda/irda.h> */
-
#ifndef QUEUE_H
#define QUEUE_H
struct sk_buff_head rxbuff;
struct ircomm_cb *comm; /* ircomm instance */
+ __u32 tx_max_sdu_size;
+ __u32 max_header_size;
/*
* These members are used for compatibility with usual serial device.
* See linux/serial.h
wait_queue_head_t delta_msr_wait;
wait_queue_head_t tx_wait;
- struct timer_list timer;
+ struct timer_list tx_timer;
+ struct timer_list rx_timer;
long pgrp;
long session;
--- /dev/null
+/*********************************************************************
+ *
+ * Filename: smc.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Thomas Davis (tadavis@jps.net)
+ *
+ * Copyright (c) 1998, 1999 Thomas Davis (tadavis@jps.net>
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * I, Thomas Davis, admit no liability nor provide warranty for any
+ * of this software. This material is provided "AS-IS" and at no charge.
+ *
+ * Definitions for the SMC IrCC controller.
+ *
+ ********************************************************************/
+
+#ifndef SMC_IRCC_H
+#define SMC_IRCC_H
+
+#define UART_MASTER 0x07
+#define UART_MASTER_POWERDOWN 1<<7
+#define UART_MASTER_RESET 1<<6
+#define UART_MASTER_INT_EN 1<<5
+#define UART_MASTER_ERROR_RESET 1<<4
+
+/* Register block 0 */
+
+#define UART_IIR 0x01
+#define UART_IER 0x02
+#define UART_LSR 0x03
+#define UART_LCR_A 0x04
+#define UART_LCR_B 0x05
+#define UART_BSR 0x06
+
+#define UART_IIR_ACTIVE_FRAME 1<<7
+#define UART_IIR_EOM 1<<6
+#define UART_IIR_RAW_MODE 1<<5
+#define UART_IIR_FIFO 1<<4
+
+#define UART_IER_ACTIVE_FRAME 1<<7
+#define UART_IER_EOM 1<<6
+#define UART_IER_RAW_MODE 1<<5
+#define UART_IER_FIFO 1<<4
+
+#define UART_LSR_UNDERRUN 1<<7
+#define UART_LSR_OVERRUN 1<<6
+#define UART_LSR_FRAME_ERROR 1<<5
+#define UART_LSR_SIZE_ERROR 1<<4
+#define UART_LSR_CRC_ERROR 1<<3
+#define UART_LSR_FRAME_ABORT 1<<2
+
+#define UART_LCR_A_FIFO_RESET 1<<7
+#define UART_LCR_A_FAST 1<<6
+#define UART_LCR_A_GP_DATA 1<<5
+#define UART_LCR_A_RAW_TX 1<<4
+#define UART_LCR_A_RAW_RX 1<<3
+#define UART_LCR_A_ABORT 1<<2
+#define UART_LCR_A_DATA_DONE 1<<1
+
+#define UART_LCR_B_SCE_DISABLED 0x00<<6
+#define UART_LCR_B_SCE_TRANSMIT 0x01<<6
+#define UART_LCR_B_SCE_RECEIVE 0x02<<6
+#define UART_LCR_B_SCE_UNDEFINED 0x03<<6
+#define UART_LCR_B_SIP_ENABLE 1<<5
+#define UART_LCR_B_BRICK_WALL 1<<4
+
+#define UART_BSR_NOT_EMPTY 1<<7
+#define UART_BSR_FIFO_FULL 1<<6
+#define UART_BSR_TIMEOUT 1<<5
+
+/* Register block 1 */
+
+#define UART_SCE_CFGA 0x00
+#define UART_SCE_CFGB 0x01
+#define UART_FIFO_THRESHOLD 0x02
+
+#define UART_CFGA_AUX_IR 0x01<<7
+#define UART_CFGA_HALF_DUPLEX 0x01<<2
+#define UART_CFGA_TX_POLARITY 0x01<<1
+#define UART_CFGA_RX_POLARITY 0x01
+
+#define UART_CFGA_COM 0x00<<3
+#define UART_CFGA_IRDA_SIR_A 0x01<<3
+#define UART_CFGA_ASK_SIR 0x02<<3
+#define UART_CFGA_IRDA_SIR_B 0x03<<3
+#define UART_CFGA_IRDA_HDLC 0x04<<3
+#define UART_CFGA_IRDA_4PPM 0x05<<3
+#define UART_CFGA_CONSUMER 0x06<<3
+#define UART_CFGA_RAW_IR 0x07<<3
+#define UART_CFGA_OTHER 0x08<<3
+
+#define UART_IR_HDLC 0x04
+#define UART_IR_4PPM 0x01
+#define UART_IR_CONSUMER 0x02
+
+#define UART_CFGB_LOOPBACK 0x01<<5
+#define UART_CFGB_LPBCK_TX_CRC 0x01<<4
+#define UART_CFGB_NOWAIT 0x01<<3
+#define UART_CFGB_STRING_MOVE 0x01<<2
+#define UART_CFGB_DMA_BURST 0x01<<1
+#define UART_CFGB_DMA_ENABLE 0x01
+
+#define UART_CFGB_COM 0x00<<6
+#define UART_CFGB_IR 0x01<<6
+#define UART_CFGB_AUX 0x02<<6
+#define UART_CFGB_INACTIVE 0x03<<6
+
+/* Register block 2 - Consumer IR - not used */
+
+/* Register block 3 - Identification Registers! */
+
+#define UART_ID_HIGH 0x00 /* 0x10 */
+#define UART_ID_LOW 0x01 /* 0xB8 */
+#define UART_CHIP_ID 0x02 /* 0xF1 */
+#define UART_VERSION 0x03 /* 0x01 */
+#define UART_INTERFACE 0x04 /* low 4 = DMA, high 4 = IRQ */
+
+/* Register block 4 - IrDA */
+#define UART_CONTROL 0x00
+#define UART_BOF_COUNT_LO 0x01
+#define UART_BRICKWALL_CNT_LO 0x02
+#define UART_BRICKWALL_TX_CNT_HI 0x03
+#define UART_TX_SIZE_LO 0x04
+#define UART_RX_SIZE_HI 0x05
+#define UART_RX_SIZE_LO 0x06
+
+#define UART_1152 0x01<<7
+#define UART_CRC 0x01<<6
+
+/* For storing entries in the status FIFO */
+struct st_fifo_entry {
+ int status;
+ int len;
+};
+
+struct st_fifo {
+ struct st_fifo_entry entries[10];
+ int head;
+ int tail;
+ int len;
+};
+
+/* Private data for each instance */
+struct ircc_cb {
+ struct st_fifo st_fifo;
+
+ int tx_buff_offsets[10]; /* Offsets between frames in tx_buff */
+ int tx_len; /* Number of frames in tx_buff */
+
+ struct irda_device idev;
+};
+
+#endif
+++ /dev/null
-#if 0
-static char *rcsid = "$Id: smc_ircc.h,v 1.5 1998/07/27 01:25:29 ratbert Exp $";
-#endif
-
-#ifndef SMC_IRCC_H
-#define SMC_IRCC_H
-
-#define FIR_XMIT 1
-#define FIR_RECEIVE 2
-#define SIR_XMIT 3
-#define SIR_RECEIVE 4
-
-#define MASTER 0x07
-#define MASTER_POWERDOWN 1<<7
-#define MASTER_RESET 1<<6
-#define MASTER_INT_EN 1<<5
-#define MASTER_ERROR_RESET 1<<4
-
-/* Register block 0 */
-
-#define IIR 0x01
-#define IER 0x02
-#define LSR 0x03
-#define LCR_A 0x04
-#define LCR_B 0x05
-#define BSR 0x06
-
-#define IIR_ACTIVE_FRAME 1<<7
-#define IIR_EOM 1<<6
-#define IIR_RAW_MODE 1<<5
-#define IIR_FIFO 1<<4
-
-#define IER_ACTIVE_FRAME 1<<7
-#define IER_EOM 1<<6
-#define IER_RAW_MODE 1<<5
-#define IER_FIFO 1<<4
-
-#define LSR_UNDER_RUN 1<<7
-#define LSR_OVER_RUN 1<<6
-#define LSR_FRAME_ERROR 1<<5
-#define LSR_SIZE_ERROR 1<<4
-#define LSR_CRC_ERROR 1<<3
-#define LSR_FRAME_ABORT 1<<2
-
-#define LCR_A_FIFO_RESET 1<<7
-#define LCR_A_FAST 1<<6
-#define LCR_A_GP_DATA 1<<5
-#define LCR_A_RAW_TX 1<<4
-#define LCR_A_RAW_RX 1<<3
-#define LCR_A_ABORT 1<<2
-#define LCR_A_DATA_DONE 1<<1
-
-#define LCR_B_SCE_MODE_DISABLED 0x00<<6
-#define LCR_B_SCE_MODE_TRANSMIT 0x01<<6
-#define LCR_B_SCE_MODE_RECEIVE 0x02<<6
-#define LCR_B_SCE_MODE_UNDEFINED 0x03<<6
-#define LCR_B_SIP_ENABLE 1<<5
-#define LCR_B_BRICK_WALL 1<<4
-
-#define BSR_NOT_EMPTY 1<<7
-#define BSR_FIFO_FULL 1<<6
-#define BSR_TIMEOUT 1<<5
-
-/* Register block 1 */
-
-#define SCE_CFG_A 0x00
-#define SCE_CFG_B 0x01
-#define FIFO_THRESHOLD 0x02
-
-#define CFG_A_AUX_IR 0x01<<7
-#define CFG_A_HALF_DUPLEX 0x01<<2
-#define CFG_A_TX_POLARITY 0x01<<1
-#define CFG_A_RX_POLARITY 0x01
-
-#define CFG_A_COM 0x00<<3
-#define CFG_A_IRDA_SIR_A 0x01<<3
-#define CFG_A_ASK_SIR 0x02<<3
-#define CFG_A_IRDA_SIR_B 0x03<<3
-#define CFG_A_IRDA_HDLC 0x04<<3
-#define CFG_A_IRDA_4PPM 0x05<<3
-#define CFG_A_CONSUMER 0x06<<3
-#define CFG_A_RAW_IR 0x07<<3
-#define CFG_A_OTHER 0x08<<3
-
-#define IR_HDLC 0x04
-#define IR_4PPM 0x01
-#define IR_CONSUMER 0x02
-
-#define CFG_B_LOOPBACK 0x01<<5
-#define CFG_B_LPBCK_TX_CRC 0x01<<4
-#define CFG_B_NOWAIT 0x01<<3
-#define CFB_B_STRING_MOVE 0x01<<2
-#define CFG_B_DMA_BURST 0x01<<1
-#define CFG_B_DMA_ENABLE 0x01
-
-#define CFG_B_MUX_COM 0x00<<6
-#define CFG_B_MUX_IR 0x01<<6
-#define CFG_B_MUX_AUX 0x02<<6
-#define CFG_B_INACTIVE 0x03<<6
-
-/* Register block 2 - Consumer IR - not used */
-
-/* Register block 3 - Identification Registers! */
-
-#define SMSC_ID_HIGH 0x00 /* 0x10 */
-#define SMSC_ID_LOW 0x01 /* 0xB8 */
-#define CHIP_ID 0x02 /* 0xF1 */
-#define VERSION_NUMBER 0x03 /* 0x01 */
-#define HOST_INTERFACE 0x04 /* low 4 = DMA, high 4 = IRQ */
-
-/* Register block 4 - IrDA */
-#define IR_CONTROL 0x00
-#define BOF_COUNT_LO 0x01
-#define BRICK_WALL_CNT_LO 0x02
-#define BRICK_TX_CNT_HI 0x03
-#define TX_DATA_SIZE_LO 0x04
-#define RX_DATA_SIZE_HI 0x05
-#define RX_DATA_SIZE_LO 0x06
-
-#define SELECT_1152 0x01<<7
-#define CRC_SELECT 0x01<<6
-
-#endif
extern unsigned short udp_good_socknum(void);
-#define UDP_NO_CHECK 0
+/* Note: this must match 'valbool' in sock_setsockopt */
+#define UDP_CSUM_NOXMIT 1
+/* Used by SunRPC/xprt layer. */
+#define UDP_CSUM_NORCV 2
+
+/* Default, as per the RFC, is to always do csums. */
+#define UDP_CSUM_DEFAULT 0
extern struct proto udp_prot;
* Copyright (C) 1998, 1999 Douglas Gilbert
- Version: 2.1.32 (990501)
- This version for later 2.1.x series and 2.2.x kernels
+ Version: 2.1.34 (990603)
+ This version for later 2.1.x and 2.2.x series kernels
D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au)
+ Changes since 2.1.33 (990521)
+ - implement SG_SET_RESERVED_SIZE and associated memory re-org.
+ - add SG_NEXT_CMD_LEN to override SCSI command lengths
+ - add SG_GET_VERSION_NUM to get version expressed as an integer
+ Changes since 2.1.32 (990501)
+ - fix race condition in sg_read() and sg_open()
Changes since 2.1.31 (990327)
- add ioctls SG_GET_UNDERRUN_FLAG and _SET_. Change the default
to _not_ flag underruns (affects aic7xxx driver)
Changes since 2.1.30 (990320)
- memory tweaks: change flags on kmalloc (GFP_KERNEL to GFP_ATOMIC)
- increase max allowable mid-level pool usage
- Changes since 2.1.21 (990315)
- - skipped to 2.1.30 indicating interface change (revert to 2.1.9)
- - remove attempt to accomodate cdrecord 1.8, will fix app
- - keep SG_?ET_RESERVED_SIZE naming for clarity
- Changes since 2.1.20 (990313)
- - ommission: left out logic for SG_?ET_ALT_INTERFACE, now added
- Changes since 2.1.9 (990309)
- - skipped to version 2.1.20 to indicate some interface changes
- - incorporate sg changes to make cdrecord 1.8 work (had its
- own patches that were different from the original)
- - change SG_?ET_BUFF_SIZE to SG_?ET_RESERVED_SIZE for clarity
- Changes since 2.1.8 (990303)
- - debug ">9" option dumps debug for _all_ active sg devices
- - increase allowable dma pool usage + increase minimum threshhold
- - pad out sg_scsi_id structure
- Changes since 2.1.7 (990227)
- - command queuing now "non-default" [back. compat. with cdparanoia]
- - Tighten access on some ioctls
New features and changes:
- the SCSI target, host and driver status are returned
in unused fields of sg_header (maintaining its original size).
- asynchronous notification support added (SIGPOLL, SIGIO) for
- read()s ( write()s should never block).
- - pack_id logic added so read() can be made to wait for a specific
- pack_id.
+ read()s (write()s should never block).
+ - pack_id logic added so read() can wait for a specific pack_id.
- uses memory > ISA_DMA_THRESHOLD if adapter allows it (e.g. a
pci scsi adapter).
- this driver no longer uses a single SG_BIG_BUFF sized buffer
- obtained at driver/module init time. Rather it obtains a
- SG_SCATTER_SZ buffer when a fd is open()ed and frees it at
- the corresponding release() (ie pr fd). Hence open() can return
- ENOMEM! If write() request > SG_SCATTER_SZ bytes for data then
- it can fail with ENOMEM as well (if so, scale back).
+ obtained at driver/module init time. Rather it tries to obtain a
+ SG_DEF_RESERVED_SIZE buffer when a fd is open()ed and frees it
+ at the corresponding release() (ie per fd). Actually the "buffer"
+ may be a collection of buffers if scatter-gather is being used.
+ - add SG_SET_RESERVED_SIZE ioctl allowing the user to request a
+ large buffer for duration of current file descriptor's lifetime.
+ - SG_GET_RESERVED_SIZE ioctl can be used to find out how much
+ actually has been reserved.
+ - add SG_NEXT_CMD_LEN ioctl to override SCSI command length on
+ the next write() to this file descriptor.
+ - SG_GET_RESERVED_SIZE's presence as a symbol can be used for
+ compile time identification of the version 2 sg driver.
+ However, it is recommended that run time identification based on
+ calling the ioctl of the same name is a more flexible and
+ safer approach.
- adds several ioctl calls, see ioctl section below.
- - SG_SCATTER_SZ's presence indicates this version of "sg" driver.
Good documentation on the original "sg" device interface and usage can be
- found in the Linux HOWTO document: "SCSI Programming HOWTO" by Heiko
- Eissfeldt; last updated 7 May 1996. I will add more info on using the
- extensions in this driver as required. A quick summary:
+ found in the Linux HOWTO document: "SCSI Programming HOWTO" (version 0.5)
+ by Heiko Eissfeldt; last updated 7 May 1996. Here is a quick summary of
+ sg basics:
An SG device is accessed by writing SCSI commands plus any associated
outgoing data to it; the resulting status codes and any incoming data
are then obtained by a read call. The device can be opened O_NONBLOCK
The given SCSI command has its LUN field overwritten internally by the
value associated with the device that has been opened.
- Memory (RAM) is used within this driver for direct memory access (DMA)
- in transferring data to and from the SCSI device. The dreaded ENOMEM
- seems to be more prevalent under early 2.2.x kernels than under the
- 2.0.x kernel series. For a given (large) transfer the memory obtained by
- this driver must be contiguous or scatter-gather must be used (if
- supported by the adapter). [Furthermore, ISA SCSI adapters can only use
- memory below the 16MB level on a i386.]
- This driver tries hard to find some suitable memory before admitting
- defeat and returning ENOMEM. All is not lost if application writers
- then back off the amount they are requesting. The value returned by
- the SG_GET_RESERVED_SIZE ioctl is guaranteed to be available (one
- per fd). This driver does the following:
- - attempts to reserve a SG_SCATTER_SZ sized buffer on open(). The
- actual amount reserved is given by the SG_GET_RESERVED_SIZE ioctl().
- - each write() needs to reserve a DMA buffer of the size of the
- data buffer indicated (excluding sg_header and command overhead).
- This buffer, depending on its size, adapter type (ISA or not) and
- the amount of memory available will be obtained from the kernel
- directly (get_free_pages or kmalloc) or the from the scsi mid-level
- dma pool (taking care not to exhaust it).
- If the buffer requested is > SG_SCATTER_SZ or memory is tight then
- scatter-gather will be used if supported by the adapter.
- - write() will also attempt to use the buffer reserved on open()
- if it is large enough.
- The above strategy ensures that a write() can always depend on a buffer
- of the size indicated by the SG_GET_RESERVED_SIZE ioctl() (which could be
- 0, but at least the app knows things are tight in advance).
- Hence application writers can adopt quite aggressive strategies (e.g.
- requesting 512KB) and scale them back in the face of ENOMEM errors.
- N.B. Queuing up commands also ties up kernel memory.
-
- More documentation can be found at www.torque.net/sg
+ This device currently uses "indirect IO" in the sense that data is
+ DMAed into kernel buffers from the hardware and afterwards is
+ transferred into the user space (or vice versa if you are writing).
+ Transfer speeds or up to 20 to 30MBytes/sec have been measured using
+ indirect IO. For faster throughputs "direct IO" which cuts out the
+ double handling of data is required. This will also need a new interface.
+
+ Grabbing memory for those kernel buffers used in this driver for DMA may
+ cause the dreaded ENOMEM error. This error seems to be more prevalent
+ under early 2.2.x kernels than under the 2.0.x kernel series. For a given
+ (large) transfer the memory obtained by this driver must be contiguous or
+ scatter-gather must be used (if supported by the adapter). [Furthermore,
+ ISA SCSI adapters can only use memory below the 16MB level on a i386.]
+
+ When a "sg" device is open()ed O_RDWR then this driver will attempt to
+ reserve a buffer of SG_DEF_RESERVED_SIZE that will be used by subsequent
+ write()s on this file descriptor as long as:
+ - it is not already in use (eg when command queuing is in use)
+ - the write() does not call for a buffer size larger than the
+ reserved size.
+ In these cases the write() will attempt to find the memory it needs for
+ DMA buffers dynamically and in the worst case will fail with ENOMEM.
+ The amount of memory actually reserved depends on various dynamic factors
+ and can be checked with the SG_GET_RESERVED_SIZE ioctl(). [In a very
+ tight memory situation it may yield 0!] The size of the reserved buffer
+ can be changed with the SG_SET_RESERVED_SIZE ioctl(). It should be
+ followed with a call to the SG_GET_RESERVED_SIZE ioctl() to find out how
+ much was actually reserved.
+
+ More documentation plus test and utility programs can be found at
+ http://www.torque.net/sg
*/
#define SG_MAX_SENSE 16 /* too little, unlikely to change in 2.2.x */
int pack_len; /* [o] reply_len (ie useless), ignored as input */
int reply_len; /* [i] max length of expected reply (inc. sg_header) */
int pack_id; /* [io] id number of packet (use ints >= 0) */
- int result; /* [o] 0==ok, else (+ve) Unix errno code (e.g. EIO) */
+ int result; /* [o] 0==ok, else (+ve) Unix errno (best ignored) */
unsigned int twelve_byte:1;
/* [i] Force 12 byte command length for group 6 & 7 commands */
unsigned int target_status:5; /* [o] scsi status from target */
int unused3;
} Sg_scsi_id;
-/* ioctls ( _GET_s yield result via 'int *' 3rd argument unless
- otherwise indicated */
-#define SG_SET_TIMEOUT 0x2201 /* unit: jiffies, 10ms on i386 */
+/* IOCTLs: ( _GET_s yield result via 'int *' 3rd argument unless
+ otherwise indicated) */
+#define SG_SET_TIMEOUT 0x2201 /* unit: jiffies (10ms on i386) */
#define SG_GET_TIMEOUT 0x2202 /* yield timeout as _return_ value */
#define SG_EMULATED_HOST 0x2203 /* true for emulated host adapter (ATAPI) */
#define SG_SET_TRANSFORM 0x2204
#define SG_GET_TRANSFORM 0x2205
-#define SG_SET_RESERVED_SIZE 0x2275 /* currently ignored, future addition */
-/* Following yields buffer reserved by open(): 0 <= x <= SG_SCATTER_SZ */
-#define SG_GET_RESERVED_SIZE 0x2272
+#define SG_SET_RESERVED_SIZE 0x2275 /* request a new reserved buffer size */
+#define SG_GET_RESERVED_SIZE 0x2272 /* actual size of reserved buffer */
/* The following ioctl takes a 'Sg_scsi_id *' object as its 3rd argument. */
-#define SG_GET_SCSI_ID 0x2276 /* Yields fd's bus,chan,dev,lun+type */
+#define SG_GET_SCSI_ID 0x2276 /* Yields fd's bus, chan, dev, lun + type */
/* SCSI id information can also be obtained from SCSI_IOCTL_GET_IDLUN */
-/* Override adapter setting and always DMA using low memory ( <16MB on i386).
- Default is 0 (off - use adapter setting) */
+/* Override host setting and always DMA using low memory ( <16MB on i386) */
#define SG_SET_FORCE_LOW_DMA 0x2279 /* 0-> use adapter setting, 1-> force */
#define SG_GET_LOW_DMA 0x227a /* 0-> use all ram for dma; 1-> low dma ram */
/* When SG_SET_FORCE_PACK_ID set to 1, pack_id is input to read() which
will attempt to read that pack_id or block (or return EAGAIN). If
pack_id is -1 then read oldest waiting. When ...FORCE_PACK_ID set to 0
- (default) then pack_id ignored by read() and oldest readable fetched. */
+ then pack_id ignored by read() and oldest readable fetched. */
#define SG_SET_FORCE_PACK_ID 0x227b
#define SG_GET_PACK_ID 0x227c /* Yields oldest readable pack_id (or -1) */
/* Yields max scatter gather tablesize allowed by current host adapter */
#define SG_GET_SG_TABLESIZE 0x227F /* 0 implies can't do scatter gather */
-/* Control whether sequencing per file descriptor (default) or per device */
-#define SG_GET_MERGE_FD 0x2274 /* 0-> per fd (default), 1-> per device */
+/* Control whether sequencing per file descriptor or per device */
+#define SG_GET_MERGE_FD 0x2274 /* 0-> per fd, 1-> per device */
#define SG_SET_MERGE_FD 0x2273 /* Attempt to change sequencing state,
- if more than 1 fd open on device, will fail with EBUSY */
+ if more than current fd open on device, will fail with EBUSY */
/* Get/set command queuing state per fd (default is SG_DEF_COMMAND_Q) */
#define SG_GET_COMMAND_Q 0x2270 /* Yields 0 (queuing off) or 1 (on) */
#define SG_SET_COMMAND_Q 0x2271 /* Change queuing state with 0 or 1 */
-/* Get/set whether DMA underrun will cause an error (DID_ERROR) [this only
- currently applies to the [much-used] aic7xxx driver) */
+/* Get/set whether DMA underrun will cause an error (DID_ERROR). This only
+ currently applies to the [much-used] aic7xxx driver. */
#define SG_GET_UNDERRUN_FLAG 0x2280 /* Yields 0 (don't flag) or 1 (flag) */
#define SG_SET_UNDERRUN_FLAG 0x2281 /* Change flag underrun state */
+#define SG_GET_VERSION_NUM 0x2282 /* Example: version 2.1.34 yields 20134 */
+#define SG_NEXT_CMD_LEN 0x2283 /* override SCSI command length with given
+ number on the next write() on this file descriptor */
+
+
+#define SG_SCATTER_SZ (8 * 4096) /* PAGE_SIZE not available to user */
+/* Largest size (in bytes) a single scatter-gather list element can have.
+ The value must be a power of 2 and <= (PAGE_SIZE * 32) [131072 bytes on
+ i386]. The minimum value is PAGE_SIZE. If scatter-gather not supported
+ by adapter then this value is the largest data block that can be
+ read/written by a single scsi command. The user can find the value of
+ PAGE_SIZE by calling getpagesize() defined in unistd.h . */
#define SG_DEFAULT_TIMEOUT (60*HZ) /* HZ == 'jiffies in 1 second' */
#define SG_DEFAULT_RETRIES 1
-/* Default modes, commented if they differ from original sg driver */
+/* Defaults, commented if they differ from original sg driver */
#define SG_DEF_COMMAND_Q 0
#define SG_DEF_MERGE_FD 0 /* was 1 -> per device sequencing */
#define SG_DEF_FORCE_LOW_DMA 0 /* was 1 -> memory below 16MB on i386 */
#define SG_DEF_FORCE_PACK_ID 0
#define SG_DEF_UNDERRUN_FLAG 0
+#define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ
/* maximum outstanding requests, write() yields EDOM if exceeded */
#define SG_MAX_QUEUE 16
-#define SG_SCATTER_SZ (8 * 4096) /* PAGE_SIZE not available to user */
-/* Largest size (in bytes) a single scatter-gather list element can have.
- The value must be a power of 2 and <= (PAGE_SIZE * 32) [131072 bytes on
- i386]. The minimum value is PAGE_SIZE. If scatter-gather not supported
- by adapter then this value is the largest data block that can be
- read/written by a single scsi command. Max number of scatter-gather
- list elements seems to be limited to 255. */
-
-#define SG_BIG_BUFF SG_SCATTER_SZ /* for backward compatibility */
-/* #define SG_BIG_BUFF (SG_SCATTER_SZ * 8) */ /* =256KB, if you want */
+#define SG_BIG_BUFF SG_DEF_RESERVED_SIZE /* for backward compatibility */
#endif
/* Program loader interfaces */
EXPORT_SYMBOL(setup_arg_pages);
-EXPORT_SYMBOL(copy_strings);
+EXPORT_SYMBOL(copy_strings_kernel);
EXPORT_SYMBOL(do_execve);
EXPORT_SYMBOL(flush_old_exec);
EXPORT_SYMBOL(open_dentry);
#include <linux/unistd.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
+#include <linux/sched.h>
#include <asm/uaccess.h>
if (nr_queued_signals < max_queued_signals) {
q = (struct signal_queue *)
- kmem_cache_alloc(signal_queue_cachep, GFP_KERNEL);
+ kmem_cache_alloc(signal_queue_cachep, GFP_ATOMIC);
}
if (q) {
if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
sigdelset(&t->blocked, sig);
+ recalc_sigpending(t);
spin_unlock_irqrestore(&t->sigmask_lock, flags);
return send_sig_info(sig, info, t);
return -EINVAL;
if(copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
return -EFAULT;
+ if (new_rlim.rlim_cur < 0 || new_rlim.rlim_max < 0)
+ return -EINVAL;
old_rlim = current->rlim + resource;
if (((new_rlim.rlim_cur > old_rlim->rlim_max) ||
(new_rlim.rlim_max > old_rlim->rlim_max)) &&
return error;
}
-/*
- * sanity-check function..
- */
-static void put_page(pte_t * page_table, pte_t pte)
-{
- if (!pte_none(*page_table)) {
- free_page_and_swap_cache(pte_page(pte));
- return;
- }
-/* no need for flush_tlb */
- set_pte(page_table, pte);
-}
-
/*
* This routine is used to map in a page into an address space: needed by
* execve() for the initial stack and environment pages.
* and potentially makes it more efficient.
*/
static int do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
- unsigned long address, pte_t *page_table)
+ unsigned long address, pte_t *page_table, pte_t pte)
{
- pte_t pte;
unsigned long old_page, new_page;
struct page * page_map;
- pte = *page_table;
new_page = __get_free_page(GFP_USER);
- /* Did someone else copy this page for us while we slept? */
+ /* Did swap_out() unmapped the protected page while we slept? */
if (pte_val(*page_table) != pte_val(pte))
goto end_wp_page;
- if (!pte_present(pte))
- goto end_wp_page;
- if (pte_write(pte))
- goto end_wp_page;
old_page = pte_page(pte);
if (MAP_NR(old_page) >= max_mapnr)
goto bad_wp_page;
delete_from_swap_cache(page_map);
/* FallThrough */
case 1:
- /* We can release the kernel lock now.. */
- unlock_kernel();
-
flush_cache_page(vma, address);
set_pte(page_table, pte_mkdirty(pte_mkwrite(pte)));
flush_tlb_page(vma, address);
end_wp_page:
+ /*
+ * We can release the kernel lock now.. Now swap_out will see
+ * a dirty page and so won't get confused and flush_tlb_page
+ * won't SMP race. -Andrea
+ */
+ unlock_kernel();
+
if (new_page)
free_page(new_page);
return 1;
}
- unlock_kernel();
if (!new_page)
- return 0;
+ goto no_new_page;
- if (PageReserved(mem_map + MAP_NR(old_page)))
+ if (PageReserved(page_map))
++vma->vm_mm->rss;
copy_cow_page(old_page,new_page);
flush_page_to_ram(old_page);
flush_page_to_ram(new_page);
flush_cache_page(vma, address);
set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
- free_page(old_page);
flush_tlb_page(vma, address);
+ unlock_kernel();
+ __free_page(page_map);
return 1;
bad_wp_page:
printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
send_sig(SIGKILL, tsk, 1);
+no_new_page:
+ unlock_kernel();
if (new_page)
free_page(new_page);
return 0;
tsk->min_flt++;
flush_page_to_ram(page);
}
- put_page(page_table, entry);
+ set_pte(page_table, entry);
return 1;
}
} else if (atomic_read(&mem_map[MAP_NR(page)].count) > 1 &&
!(vma->vm_flags & VM_SHARED))
entry = pte_wrprotect(entry);
- put_page(page_table, entry);
+ set_pte(page_table, entry);
/* no need to invalidate: a not-present page shouldn't be cached */
return 1;
}
flush_tlb_page(vma, address);
if (write_access) {
if (!pte_write(entry))
- return do_wp_page(tsk, vma, address, pte);
+ return do_wp_page(tsk, vma, address, pte, entry);
entry = pte_mkdirty(entry);
set_pte(pte, entry);
* Swap reorganised 29.12.95, Stephen Tweedie
*/
+#include <linux/config.h>
#include <linux/malloc.h>
#include <linux/smp_lock.h>
#include <linux/kernel_stat.h>
} else if (S_ISREG(swap_dentry->d_inode->i_mode)) {
error = -EBUSY;
for (i = 0 ; i < nr_swapfiles ; i++) {
- if (i == type)
+ if (i == type || !swap_info[i].swap_file)
continue;
if (swap_dentry->d_inode == swap_info[i].swap_file->d_inode)
goto bad_swap;
if (protocol && protocol != IPPROTO_UDP)
goto free_and_noproto;
protocol = IPPROTO_UDP;
- sk->no_check = UDP_NO_CHECK;
+ sk->no_check = UDP_CSUM_DEFAULT;
sk->ip_pmtudisc = IP_PMTUDISC_DONT;
prot=&udp_prot;
sock->ops = &inet_dgram_ops;
/* 4.1.3.4. It's configurable by the application via setsockopt() */
/* (MAY) and it defaults to on (MUST). */
- err = ip_build_xmit(sk,sk->no_check ? udp_getfrag_nosum : udp_getfrag,
+ err = ip_build_xmit(sk,
+ (sk->no_check == UDP_CSUM_NOXMIT ?
+ udp_getfrag_nosum :
+ udp_getfrag),
&ufh, ulen, &ipc, rt, msg->msg_flags);
out:
}
#endif
+static int udp_checksum_verify(struct sk_buff *skb, struct udphdr *uh,
+ unsigned short ulen, u32 saddr, u32 daddr,
+ int full_csum_deferred)
+{
+ if (!full_csum_deferred) {
+ if (uh->check) {
+ if (skb->ip_summed == CHECKSUM_HW &&
+ udp_check(uh, ulen, saddr, daddr, skb->csum))
+ return -1;
+ if (skb->ip_summed == CHECKSUM_NONE &&
+ udp_check(uh, ulen, saddr, daddr,
+ csum_partial((char *)uh, ulen, 0)))
+ return -1;
+ }
+ } else {
+ if (uh->check == 0)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else if (skb->ip_summed == CHECKSUM_HW) {
+ if (udp_check(uh, ulen, saddr, daddr, skb->csum))
+ return -1;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+ skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
+ }
+ return 0;
+}
+
/*
* All we need to do is get the socket, and then do a checksum.
*/
}
skb_trim(skb, ulen);
-#ifndef CONFIG_UDP_DELAY_CSUM
- if (uh->check &&
- (((skb->ip_summed==CHECKSUM_HW)&&udp_check(uh,ulen,saddr,daddr,skb->csum)) ||
- ((skb->ip_summed==CHECKSUM_NONE) &&
- (udp_check(uh,ulen,saddr,daddr, csum_partial((char*)uh, ulen, 0))))))
- goto csum_error;
+ if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) {
+ int defer;
+
+#ifdef CONFIG_UDP_DELAY_CSUM
+ defer = 1;
#else
- if (uh->check==0)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else if (skb->ip_summed==CHECKSUM_HW) {
- if (udp_check(uh,ulen,saddr,daddr,skb->csum))
- goto csum_error;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if (skb->ip_summed != CHECKSUM_UNNECESSARY)
- skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
+ defer = 0;
#endif
-
- if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
+ if (udp_checksum_verify(skb, uh, ulen, saddr, daddr, defer))
+ goto csum_error;
return udp_v4_mcast_deliver(skb, uh, saddr, daddr);
+ }
#ifdef CONFIG_IP_TRANSPARENT_PROXY
if (IPCB(skb)->redirport)
kfree_skb(skb);
return(0);
}
+ if (udp_checksum_verify(skb, uh, ulen, saddr, daddr,
+#ifdef CONFIG_UDP_DELAY_CSUM
+ 1
+#else
+ (sk->no_check & UDP_CSUM_NORCV) != 0
+#endif
+ ))
+ goto csum_error;
+
udp_deliver(sk, skb);
return 0;
if (protocol && protocol != IPPROTO_UDP)
goto free_and_noproto;
protocol = IPPROTO_UDP;
- sk->no_check = UDP_NO_CHECK;
+ sk->no_check = UDP_CSUM_DEFAULT;
prot=&udpv6_prot;
sock->ops = &inet6_dgram_ops;
} else if(sock->type == SOCK_RAW) {
# IrDA protocol configuration
#
-if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
- if [ "$CONFIG_NET" != "n" ] ; then
+if [ "$CONFIG_NET" != "n" ] ; then
- mainmenu_option next_comment
- comment 'IrDA subsystem support'
- dep_tristate 'IrDA subsystem support' CONFIG_IRDA $CONFIG_EXPERIMENTAL $CONFIG_NET
+ mainmenu_option next_comment
+ comment 'IrDA subsystem support'
+ dep_tristate 'IrDA subsystem support' CONFIG_IRDA $CONFIG_NET
- if [ "$CONFIG_IRDA" != "n" ] ; then
- comment 'IrDA protocols'
- source net/irda/irlan/Config.in
- source net/irda/ircomm/Config.in
- source net/irda/irlpt/Config.in
+ if [ "$CONFIG_IRDA" != "n" ] ; then
+ comment 'IrDA protocols'
+ source net/irda/irlan/Config.in
+ source net/irda/ircomm/Config.in
+ source net/irda/irlpt/Config.in
- bool 'IrDA protocol options' CONFIG_IRDA_OPTIONS
- if [ "$CONFIG_IRDA_OPTIONS" != "n" ] ; then
- comment ' IrDA options'
- bool ' Cache last LSAP' CONFIG_IRDA_CACHE_LAST_LSAP
- bool ' Fast RRs' CONFIG_IRDA_FAST_RR
- bool ' Debug information' CONFIG_IRDA_DEBUG
- fi
+ bool 'IrDA protocol options' CONFIG_IRDA_OPTIONS
+ if [ "$CONFIG_IRDA_OPTIONS" != "n" ] ; then
+ comment ' IrDA options'
+ bool ' Cache last LSAP' CONFIG_IRDA_CACHE_LAST_LSAP
+ bool ' Fast RRs' CONFIG_IRDA_FAST_RR
+ bool ' Debug information' CONFIG_IRDA_DEBUG
fi
+ fi
- if [ "$CONFIG_IRDA" != "n" ] ; then
- source net/irda/compressors/Config.in
- source drivers/net/irda/Config.in
- fi
- endmenu
-
+ if [ "$CONFIG_IRDA" != "n" ] ; then
+ source net/irda/compressors/Config.in
+ source drivers/net/irda/Config.in
fi
+ endmenu
fi
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun May 31 10:12:43 1998
- * Modified at: Tue May 11 12:42:26 1999
+ * Modified at: Wed May 19 16:12:06 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: af_netroom.c, af_ax25.c, af_rose.c, af_x25.c etc.
*
else
self->max_data_size = max_sdu_size;
- DEBUG(0, __FUNCTION__ "(), max_data_size=%d\n", self->max_data_size);
+ DEBUG(1, __FUNCTION__ "(), max_data_size=%d\n", self->max_data_size);
memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
else
self->max_data_size = max_sdu_size;
- DEBUG(0, __FUNCTION__ "(), max_data_size=%d\n", self->max_data_size);
+ DEBUG(1, __FUNCTION__ "(), max_data_size=%d\n", self->max_data_size);
memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
switch (flow) {
case FLOW_STOP:
- DEBUG( 0, __FUNCTION__ "(), IrTTP wants us to slow down\n");
+ DEBUG(1, __FUNCTION__ "(), IrTTP wants us to slow down\n");
self->tx_flow = flow;
break;
case FLOW_START:
self->tx_flow = flow;
- DEBUG(0, __FUNCTION__ "(), IrTTP wants us to start again\n");
+ DEBUG(1, __FUNCTION__ "(), IrTTP wants us to start again\n");
wake_up_interruptible(sk->sleep);
break;
default:
sock_init_data(sock, sk);
- sock->ops = &irda_stream_ops;
+ if (sock->type == SOCK_STREAM)
+ sock->ops = &irda_stream_ops;
+ else
+ sock->ops = &irda_dgram_ops;
+
sk->protocol = protocol;
/* Register as a client with IrLMP */
{
struct sock *sk = sock->sk;
- DEBUG(0, __FUNCTION__ "(), cmd=%#x\n", cmd);
+ DEBUG(4, __FUNCTION__ "(), cmd=%#x\n", cmd);
switch (cmd) {
case TIOCOUTQ: {
return -EINVAL;
default:
- DEBUG(0, __FUNCTION__ "(), doing device ioctl!\n");
+ DEBUG(1, __FUNCTION__ "(), doing device ioctl!\n");
return dev_ioctl(cmd, (void *) arg);
}
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Apr 6 15:33:50 1999
- * Modified at: Sun May 9 22:40:43 1999
+ * Modified at: Fri May 28 20:46:38 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Modified at: Fri May 28 3:11 CST 1999
* Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
discovery = (discovery_t *) hashbin_get_first(cachelog);
while ( discovery != NULL) {
- len += sprintf( buf+len, " name: %s,",
- discovery->info);
+ len += sprintf(buf+len, "name: %s,", discovery->info);
- len += sprintf( buf+len, " hint: ");
+ len += sprintf(buf+len, " hint: 0x%02x%02x",
+ discovery->hints.byte[0],
+ discovery->hints.byte[1]);
+#if 0
if ( discovery->hints.byte[0] & HINT_PNP)
len += sprintf( buf+len, "PnP Compatible ");
if ( discovery->hints.byte[0] & HINT_PDA)
len += sprintf( buf+len, "IrCOMM ");
if ( discovery->hints.byte[1] & HINT_OBEX)
len += sprintf( buf+len, "IrOBEX ");
-
+#endif
len += sprintf(buf+len, ", saddr: 0x%08x",
discovery->saddr);
len += sprintf(buf+len, ", daddr: 0x%08x\n",
discovery->daddr);
- len += sprintf( buf+len, "\n");
+ len += sprintf(buf+len, "\n");
discovery = (discovery_t *) hashbin_get_next(cachelog);
}
#include <net/irda/ircomm_common.h>
-static char *revision_date = "Sun Apr 18 00:40:19 1999";
+static char *revision_date = "Tue May 18 03:11:39 1999";
static void ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event,
ircomm[i]->enq_char = 0x05;
ircomm[i]->ack_char = 0x06;
- ircomm[i]->max_txbuff_size = COMM_DEFAULT_DATA_SIZE; /* 64 */
- ircomm[i]->max_sdu_size = SAR_DISABLE;
- ircomm[i]->ctrl_skb = dev_alloc_skb(COMM_DEFAULT_DATA_SIZE);
+ ircomm[i]->max_header_size = COMM_MAX_HEADER_SIZE;
+ ircomm[i]->tx_max_sdu_size = COMM_DEFAULT_SDU_SIZE;
+ ircomm[i]->rx_max_sdu_size = SAR_DISABLE;
+ ircomm[i]->ctrl_skb = dev_alloc_skb(COMM_DEFAULT_SDU_SIZE
+ + COMM_MAX_HEADER_SIZE);
if (ircomm[i]->ctrl_skb == NULL){
DEBUG(0,"ircomm:init_module:alloc_skb failed!\n");
return -ENOMEM;
}
- skb_reserve(ircomm[i]->ctrl_skb,COMM_HEADER_SIZE);
+ skb_reserve(ircomm[i]->ctrl_skb,COMM_MAX_HEADER_SIZE);
}
DEBUG(0,__FUNCTION__"(): got connected!\n");
if (max_sdu_size == SAR_DISABLE)
- self->max_txbuff_size = qos->data_size.value - max_header_size;
+ self->tx_max_sdu_size =(qos->data_size.value - max_header_size
+ - COMM_HEADER_SIZE);
else {
- ASSERT(max_sdu_size >= COMM_DEFAULT_DATA_SIZE, return;);
- self->max_txbuff_size = max_sdu_size; /* use fragmentation */
+ ASSERT(max_sdu_size >= COMM_DEFAULT_SDU_SIZE, return;);
+ /* use fragmentation */
+ self->tx_max_sdu_size = max_sdu_size - COMM_HEADER_SIZE;
}
self->qos = qos;
- self->max_header_size = max_header_size;
+ self->max_header_size = max_header_size + COMM_HEADER_SIZE;
self->null_modem_mode = 0; /* disable null modem emulation */
ircomm_do_event(self, TTP_CONNECT_CONFIRM, skb);
DEBUG(0,__FUNCTION__"()\n");
if (max_sdu_size == SAR_DISABLE)
- self->max_txbuff_size = qos->data_size.value - max_header_size;
+ self->tx_max_sdu_size =(qos->data_size.value - max_header_size
+ - COMM_HEADER_SIZE);
else
- self->max_txbuff_size = max_sdu_size;
+ self->tx_max_sdu_size = max_sdu_size - COMM_HEADER_SIZE;
self->qos = qos;
- self->max_header_size = max_header_size;
+ self->max_header_size = max_header_size + COMM_HEADER_SIZE;
ircomm_do_event( self, TTP_CONNECT_INDICATION, skb);
irttp_connect_request(self->tsap, self->dlsap,
self->saddr, self->daddr,
- NULL, self->max_sdu_size, userdata);
+ NULL, self->rx_max_sdu_size, userdata);
break;
default:
if (self->notify.connect_indication)
self->notify.connect_indication(self->notify.instance, self,
- qos, 0, 0, skb);
+ qos, self->tx_max_sdu_size,
+ self->max_header_size, skb);
}
#if 0
/* give a connect_confirm to the client */
if( self->notify.connect_confirm )
self->notify.connect_confirm(self->notify.instance,
- self, NULL, SAR_DISABLE, 0, skb);
+ self, NULL, self->tx_max_sdu_size,
+ self->max_header_size, skb);
}
static void issue_connect_response(struct ircomm_cb *self,
DEBUG(0,__FUNCTION__"():THREE_WIRE_RAW is not implemented yet\n");
/* irlmp_connect_rsp(); */
} else
- irttp_connect_response(self->tsap, self->max_sdu_size, skb);
+ irttp_connect_response(self->tsap, self->rx_max_sdu_size, skb);
}
static void issue_disconnect_request(struct ircomm_cb *self,
hints = irlmp_service_to_hint(S_COMM);
- DEBUG(0,__FUNCTION__"():start discovering..\n");
+ DEBUG(1,__FUNCTION__"():start discovering..\n");
switch (ircomm_cs) {
case 0:
MOD_INC_USE_COUNT;
ASSERT( self->magic == IRCOMM_MAGIC, return;);
- DEBUG(0, __FUNCTION__"():sending connect_request...\n");
+ DEBUG(1, __FUNCTION__"():sending connect_request...\n");
self->servicetype= servicetype;
/* ircomm_control_request(self, SERVICETYPE); */ /*servictype*/
- self->max_sdu_size = SAR_DISABLE;
+ self->rx_max_sdu_size = SAR_DISABLE;
ircomm_do_event(self, IRCOMM_CONNECT_REQUEST, NULL);
}
if (!userdata){
/* FIXME: check for errors and initialize? DB */
- userdata = dev_alloc_skb(COMM_DEFAULT_DATA_SIZE);
+ userdata = dev_alloc_skb(COMM_DEFAULT_SDU_SIZE + COMM_MAX_HEADER_SIZE);
if (userdata == NULL)
return;
- skb_reserve(userdata,COMM_HEADER_SIZE);
+ skb_reserve(userdata,COMM_MAX_HEADER_SIZE);
}
/* enable null-modem emulation (i.e. server mode )*/
self->null_modem_mode = 1;
- self->max_sdu_size = max_sdu_size;
- if (max_sdu_size != SAR_DISABLE)
- self->max_txbuff_size = max_sdu_size;
-
+ self->rx_max_sdu_size = max_sdu_size;
+
ircomm_do_event(self, IRCOMM_CONNECT_RESPONSE, userdata);
}
ircomm_do_event(self, IRCOMM_CONTROL_REQUEST, skb);
self->control_ch_pending = 0;
- skb = dev_alloc_skb(COMM_DEFAULT_DATA_SIZE);
+ skb = dev_alloc_skb(COMM_DEFAULT_SDU_SIZE + COMM_MAX_HEADER_SIZE);
ASSERT(skb != NULL, return ;);
- skb_reserve(skb,COMM_HEADER_SIZE);
+ skb_reserve(skb,COMM_MAX_HEADER_SIZE);
self->ctrl_skb = skb;
}
static int irvtd_refcount;
struct irvtd_cb **irvtd = NULL;
-static char *revision_date = "Sun Apr 18 17:31:53 1999";
+static char *revision_date = "Wed May 26 00:49:11 1999";
/*
static void irvtd_send_xchar(struct tty_struct *tty, char ch);
static void irvtd_wait_until_sent(struct tty_struct *tty, int timeout);
-static void irvtd_start_timer( struct irvtd_cb *driver);
-static void irvtd_timer_expired(unsigned long data);
+static void irvtd_start_tx_timer( struct irvtd_cb *driver, int timeout);
+static void irvtd_tx_timer_expired(unsigned long data);
+static void irvtd_start_rx_timer( struct irvtd_cb *driver, int timeout);
+static void irvtd_rx_timer_expired(unsigned long data);
static int line_info(char *buf, struct irvtd_cb *driver);
static int irvtd_read_proc(char *buf, char **start, off_t offset, int len,
if(driver->rx_disable)
return;
- skb = skb_dequeue(&driver->rxbuff);
+ skb = skb_dequeue(&driver->rxbuff);
if(skb == NULL)
return; /* there's nothing */
if(skb_queue_len(&driver->rxbuff)< IRVTD_RX_QUEUE_LOW &&
driver->ttp_stoprx){
- irttp_flow_request(driver->comm->tsap, FLOW_START);
+ DEBUG(1, __FUNCTION__"():FLOW_START\n");
+ /*
+ * next 2 lines must follow this order since irttp_flow_request()
+ * will run its rx queue
+ */
driver->ttp_stoprx = 0;
+ irttp_flow_request(driver->comm->tsap, FLOW_START);
}
if(skb_queue_empty(&driver->rxbuff) && driver->disconnect_pend){
skb_queue_tail( &driver->rxbuff, skb );
if(skb_queue_len(&driver->rxbuff) > IRVTD_RX_QUEUE_HIGH){
+ DEBUG(1, __FUNCTION__"():FLOW_STOP\n");
irttp_flow_request(driver->comm->tsap, FLOW_STOP);
driver->ttp_stoprx = 1;
}
irvtd_write_to_tty(driver);
+
+ if(!skb_queue_empty(&driver->rxbuff))
+ irvtd_start_rx_timer(driver,0);
return 0;
}
*/
-static void irvtd_start_timer( struct irvtd_cb *driver)
+static void irvtd_start_tx_timer( struct irvtd_cb *driver, int timeout)
+{
+ ASSERT( driver != NULL, return;);
+ ASSERT( driver->magic == IRVTD_MAGIC, return;);
+
+ del_timer( &driver->tx_timer);
+
+ driver->tx_timer.data = (unsigned long) driver;
+ driver->tx_timer.function = &irvtd_tx_timer_expired;
+ driver->tx_timer.expires = jiffies + timeout;
+
+ add_timer( &driver->tx_timer);
+}
+
+static void irvtd_start_rx_timer( struct irvtd_cb *driver, int timeout)
{
ASSERT( driver != NULL, return;);
ASSERT( driver->magic == IRVTD_MAGIC, return;);
- del_timer( &driver->timer);
+ del_timer( &driver->rx_timer);
- driver->timer.data = (unsigned long) driver;
- driver->timer.function = &irvtd_timer_expired;
- driver->timer.expires = jiffies + (HZ / 5); /* 200msec */
+ driver->rx_timer.data = (unsigned long) driver;
+ driver->rx_timer.function = &irvtd_rx_timer_expired;
+ driver->rx_timer.expires = jiffies + timeout;
- add_timer( &driver->timer);
+ add_timer( &driver->rx_timer);
}
-static void irvtd_timer_expired(unsigned long data)
+static void irvtd_tx_timer_expired(unsigned long data)
{
struct irvtd_cb *driver = (struct irvtd_cb *)data;
DEBUG(4, __FUNCTION__"()\n");
irvtd_send_data_request(driver);
+}
- irvtd_write_to_tty(driver);
+static void irvtd_rx_timer_expired(unsigned long data)
+{
+ struct irvtd_cb *driver = (struct irvtd_cb *)data;
- /* start our timer again and again */
- irvtd_start_timer(driver);
+ ASSERT(driver != NULL,return;);
+ ASSERT(driver->magic == IRVTD_MAGIC,return;);
+ DEBUG(4, __FUNCTION__"()\n");
+
+ while(TTY_FLIPBUF_SIZE - driver->tty->flip.count
+ && !skb_queue_empty(&driver->rxbuff))
+ irvtd_write_to_tty(driver);
+
+ DEBUG(1, __FUNCTION__"(): room in flip_buffer = %d\n",
+ TTY_FLIPBUF_SIZE - driver->tty->flip.count);
+
+ if(!skb_queue_empty(&driver->rxbuff))
+ /* handle it later */
+ irvtd_start_rx_timer(driver, 1);
}
}
#endif
- DEBUG(1, __FUNCTION__"():sending %d octets\n",(int)skb->len );
+ DEBUG(1, __FUNCTION__"():len = %d, room = %d\n",(int)skb->len,
+ skb_tailroom(skb));
driver->icount.tx += skb->len;
err = ircomm_data_request(driver->comm, driver->txbuff);
if (err){
ASSERT(err == 0,;);
- DEBUG(0,"%d chars are lost\n",(int)skb->len);
+ DEBUG(1,"%d chars are lost\n",(int)skb->len);
skb_trim(skb, 0);
}
/* allocate a new frame */
- skb = driver->txbuff = dev_alloc_skb(driver->comm->max_txbuff_size);
+ skb = driver->txbuff
+ = dev_alloc_skb(driver->tx_max_sdu_size + driver->max_header_size);
if (skb == NULL){
printk(__FUNCTION__"():alloc_skb failed!\n");
} else {
- skb_reserve(skb, COMM_HEADER_SIZE);
+ skb_reserve(skb, driver->max_header_size);
}
wake_up_interruptible(&driver->tty->write_wait);
ASSERT(driver != NULL, return;);
ASSERT(driver->magic == IRVTD_MAGIC, return;);
+
+ driver->tx_max_sdu_size = max_sdu_size;
+ driver->max_header_size = max_header_size;
/*
* set default value
*/
ASSERT(comm != NULL, return;);
ASSERT(comm->magic == IRCOMM_MAGIC, return;);
+ driver->tx_max_sdu_size = max_sdu_size;
+ driver->max_header_size = max_header_size;
DEBUG(4, __FUNCTION__ "():sending connect_response...\n");
ircomm_connect_response(comm, NULL, SAR_DISABLE );
if(cmd == TX_READY){
driver->ttp_stoptx = 0;
driver->tty->hw_stopped = driver->cts_stoptx;
- irvtd_start_timer( driver);
if(driver->cts_stoptx)
return;
+ /* push tx queue so that client can send at least 1 octet */
+ irvtd_send_data_request(driver);
/*
* driver->tty->write_wait will keep asleep if
* our txbuff is full.
if(cmd == TX_BUSY){
driver->ttp_stoptx = driver->tty->hw_stopped = 1;
- del_timer( &driver->timer);
+ del_timer( &driver->tx_timer);
return;
}
driver->blocked_open--;
- DEBUG(0, __FUNCTION__"():after blocking\n");
+ DEBUG(1, __FUNCTION__"():after blocking\n");
if (retval)
return retval;
struct notify_t irvtd_notify;
/* FIXME: it should not be hard coded */
- __u8 oct_seq[6] = { 0,1,4,1,1,1 };
+ __u8 oct_seq[6] = { 0,1,6,1,1,1 };
DEBUG(4,__FUNCTION__"()\n" );
if(driver->flags & ASYNC_INITIALIZED)
*/
skb_queue_head_init(&driver->rxbuff);
- driver->txbuff = dev_alloc_skb(COMM_DEFAULT_DATA_SIZE);
+ driver->txbuff = dev_alloc_skb(COMM_DEFAULT_SDU_SIZE + COMM_MAX_HEADER_SIZE);
if (!driver->txbuff){
DEBUG(0,__FUNCTION__"(), alloc_skb failed!\n");
return -ENOMEM;
}
- skb_reserve(driver->txbuff, COMM_HEADER_SIZE);
+ skb_reserve(driver->txbuff, COMM_MAX_HEADER_SIZE);
irda_notify_init(&irvtd_notify);
irvtd_notify.data_indication = irvtd_receive_data;
driver->flags |= ASYNC_INITIALIZED;
- /*
- * discover a peer device
- * TODO: other servicetype(i.e. 3wire,3wireraw) support
- */
- ircomm_connect_request(driver->comm, NINE_WIRE);
-
- /*
- * TODO:we have to initialize control-channel here!
- * i.e.set something into RTS,CTS and so on....
- */
-
if (driver->tty)
clear_bit(TTY_IO_ERROR, &driver->tty->flags);
change_speed(driver);
- irvtd_start_timer( driver);
+
+ /*
+ * discover a peer device
+ */
+ if(driver->tty->termios->c_cflag & CRTSCTS)
+ ircomm_connect_request(driver->comm, NINE_WIRE);
+ else
+ ircomm_connect_request(driver->comm, THREE_WIRE);
+
+ /* irvtd_start_timer( driver); */
driver->rx_disable = 0;
driver->tx_disable = 1;
if (driver->tty)
set_bit(TTY_IO_ERROR, &driver->tty->flags);
- del_timer( &driver->timer);
+ del_timer( &driver->tx_timer);
+ del_timer( &driver->rx_timer);
irias_delete_object("IrDA:IrCOMM");
DEBUG(4, __FUNCTION__"()\n");
save_flags(flags);
- while(1){
+ while(count > 0){
cli();
skb = driver->txbuff;
ASSERT(skb != NULL, break;);
c = MIN(count, (skb_tailroom(skb)));
if (c <= 0)
- break;
+ {
+ if(!driver->ttp_stoptx)
+ {
+ irvtd_send_data_request(driver);
+ continue;
+ }
+ else
+ break;
+ }
/* write to the frame */
wrote += c;
count -= c;
buf += c;
- irvtd_send_data_request(driver);
}
restore_flags(flags);
+ irvtd_send_data_request(driver);
return (wrote);
}
DEBUG(4, __FUNCTION__"()\n");
+ again:
save_flags(flags);cli();
skb = driver->txbuff;
ASSERT(skb != NULL,return;);
+ if(!skb_tailroom(skb))
+ {
+ restore_flags(flags);
+ irvtd_send_data_request(driver);
+ goto again;
+ }
ASSERT(skb_tailroom(skb) > 0, return;);
- DEBUG(4, "irvtd_put_char(0x%02x) skb_len(%d) MAX(%d):\n",
+ DEBUG(4, "irvtd_put_char(0x%02x) skb_len(%d) room(%d):\n",
(int)ch ,(int)skb->len,
- driver->comm->max_txbuff_size - COMM_HEADER_SIZE);
+ skb_tailroom(skb));
/* append a character */
frame = skb_put(skb,1);
frame[0] = ch;
restore_flags(flags);
+ irvtd_start_tx_timer(driver,20);
return;
}
driver->comm->dte = driver->mcr;
ircomm_control_request(driver->comm, DTELINE_STATE );
+ DEBUG(1, __FUNCTION__"():FLOW_STOP\n");
irttp_flow_request(driver->comm->tsap, FLOW_STOP);
}
driver->comm->dte = driver->mcr;
ircomm_control_request(driver->comm, DTELINE_STATE );
+ DEBUG(1, __FUNCTION__"():FLOW_START\n");
irttp_flow_request(driver->comm->tsap, FLOW_START);
}
if (driver->msr & MSR_RI)
ret += sprintf(buf+ret, "|RI");
+ ret += sprintf(buf+ret, "\n");
+ ret += sprintf(buf+ret, "rx queue:%d",
+ skb_queue_len( &driver->rxbuff));
+ ret += sprintf(buf+ret, "ttp_stoprx:%s",
+ driver->ttp_stoprx?"TRUE":"FALSE");
+
exit:
ret += sprintf(buf+ret, "\n");
return ret;
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Wed Sep 2 20:22:08 1998
- * Modified at: Mon May 10 23:02:47 1999
+ * Modified at: Tue Jun 1 09:05:13 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Modified at: Fri May 28 3:11 CST 1999
* Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
#ifdef CONFIG_NSC_FIR
pc87108_init();
#endif
+#ifdef CONFIG_TOSHIBA_FIR
+ toshoboe_init();
+#endif
+#ifdef CONFIG_SMC_IRCC_FIR
+ ircc_init();
+#endif
#ifdef CONFIG_ESI_DONGLE
esi_init();
#endif
#ifdef CONFIG_GIRBIL_DONGLE
girbil_init();
#endif
+#ifdef CONFIG_GIRBIL_DONGLE
+ litelink_init();
+#endif
+
return 0;
}
/* Initialize timers */
init_timer(&self->media_busy_timer);
+ self->lock = SPIN_LOCK_UNLOCKED;
+
/* A pointer to the low level implementation */
self->priv = priv;
/* Open network device */
dev_open(&self->netdev);
- MESSAGE("IrDA: Registred device %s\n", self->name);
+ MESSAGE("IrDA: Registered device %s\n", self->name);
irda_device_set_media_busy(self, FALSE);
*/
static void __irda_device_change_speed(struct irda_device *self, int speed)
{
+ int n = 0;
+
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRDA_DEVICE_MAGIC, return;);
* Is is possible to change speed yet? Wait until the last byte
* has been transmitted.
*/
- if (self->wait_until_sent) {
- self->wait_until_sent(self);
-
- if (self->dongle)
- self->dongle->change_speed(self, speed);
-
- if (self->change_speed) {
- self->change_speed(self, speed);
-
- /* Update the QoS value only */
- self->qos.baud_rate.value = speed;
+ if (!self->wait_until_sent) {
+ ERROR("IrDA: wait_until_sent() "
+ "has not implemented by the IrDA device driver!\n");
+ return;
+ }
+
+ /* Make sure all transmitted data has actually been sent */
+ self->wait_until_sent(self);
+
+ /* Make sure nobody tries to transmit during the speed change */
+ while (irda_lock((void *) &self->netdev.tbusy) == FALSE) {
+ WARNING(__FUNCTION__ "(), device locked!\n");
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(MSECS_TO_JIFFIES(10));
+
+ if (n++ > 10) {
+ WARNING(__FUNCTION__ "(), breaking loop!\n");
+ break;
}
- } else {
- WARNING("IrDA: wait_until_sent() "
- "has not implemented by the IrDA device driver!\n");
}
+
+ if (self->dongle)
+ self->dongle->change_speed(self, speed);
+
+ if (self->change_speed) {
+ self->change_speed(self, speed);
+
+ /* Update the QoS value only */
+ self->qos.baud_rate.value = speed;
+ }
+ self->netdev.tbusy = FALSE;
}
/*
*/
inline void irda_device_change_speed(struct irda_device *self, int speed)
{
- DEBUG(4, __FUNCTION__ "()\n");
-
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRDA_DEVICE_MAGIC, return;);
inline int irda_device_is_media_busy( struct irda_device *self)
{
- ASSERT( self != NULL, return FALSE;);
- ASSERT( self->magic == IRDA_DEVICE_MAGIC, return FALSE;);
+ ASSERT(self != NULL, return FALSE;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return FALSE;);
return self->media_busy;
}
inline int irda_device_is_receiving( struct irda_device *self)
{
- ASSERT( self != NULL, return FALSE;);
- ASSERT( self->magic == IRDA_DEVICE_MAGIC, return FALSE;);
+ ASSERT(self != NULL, return FALSE;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return FALSE;);
- if ( self->is_receiving)
- return self->is_receiving( self);
+ if (self->is_receiving)
+ return self->is_receiving(self);
else
return FALSE;
}
-inline struct qos_info *irda_device_get_qos( struct irda_device *self)
+inline struct qos_info *irda_device_get_qos(struct irda_device *self)
{
- ASSERT( self != NULL, return NULL;);
- ASSERT( self->magic == IRDA_DEVICE_MAGIC, return NULL;);
+ ASSERT(self != NULL, return NULL;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return NULL;);
return &self->qos;
}
{
struct irda_device *self;
- DEBUG(4, __FUNCTION__ "()\n");
-
ASSERT(dev != NULL, return -1;);
self = (struct irda_device *) dev->priv;
return 0;
}
+
+#define SIOCSDONGLE SIOCDEVPRIVATE
static int irda_device_net_ioctl(struct device *dev, /* ioctl device */
struct ifreq *rq, /* Data passed */
int cmd) /* Ioctl number */
#endif
break;
#endif
+ case SIOCSDONGLE: /* Set dongle */
+ /* Initialize dongle */
+ irda_device_init_dongle(self, (int) rq->ifr_data);
+ break;
default:
ret = -EOPNOTSUPP;
}
ERROR("IrDA: Unable to find requested dongle\n");
return;
}
+
+ /* Check if we're already using a dongle */
+ if (self->dongle) {
+ self->dongle->close(self);
+ }
/* Set the dongle to be used by this driver */
self->dongle = node->dongle;
node->dongle->qos_init(self, &self->qos);
/* Reset dongle */
- node->dongle->reset(self, 0);
+ node->dongle->reset(self);
/* Set to default baudrate */
irda_device_change_speed(self, 9600);
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Tue May 11 00:22:39 1999
+ * Modified at: Mon May 31 14:19:34 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
* slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
{
struct irlan_cb *self;
- DEBUG(4, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
self = (struct irlan_cb *) instance;
irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb);
+ /* Ready for a new command */
+ self->client.tx_busy = FALSE;
+
+ /* Check if we have some queued commands waiting to be sent */
+ irlan_run_ctrl_tx_queue(self);
+
return 0;
}
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Thu May 6 13:42:38 1999
+ * Modified at: Fri May 14 23:08:15 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Sun May 9 11:48:49 1999
+ * Modified at: Mon May 31 14:25:19 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
init_timer(&self->client.kick_timer);
hashbin_insert(irlan, (QUEUE *) self, daddr, NULL);
-
+
+ skb_queue_head_init(&self->client.txq);
+
irlan_next_client_state(self, IRLAN_IDLE);
irlan_next_provider_state(self, IRLAN_IDLE);
*/
static void __irlan_close(struct irlan_cb *self)
{
- DEBUG(0, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRLAN_MAGIC, return;);
{
struct irlan_cb *self;
- DEBUG(2, __FUNCTION__ "()\n");
-
self = (struct irlan_cb *) instance;
ASSERT(self != NULL, return;);
*/
irlan_get_unicast_addr(self);
irlan_open_unicast_addr(self);
+
+ /* Open broadcast and multicast filter by default */
+ irlan_set_broadcast_filter(self, TRUE);
+ irlan_set_multicast_filter(self, TRUE);
/* Ready to transfer Ethernet frames */
self->dev.tbusy = 0;
+
+ irlan_eth_send_gratuitous_arp(&self->dev);
}
/*
break;
}
- /* Stop IP from transmitting more packets */
- /* irlan_client_flow_indication(handle, FLOW_STOP, priv); */
-
irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
}
struct notify_t notify;
struct tsap_cb *tsap;
- DEBUG(0, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRLAN_MAGIC, return;);
notify.udata_indication = irlan_eth_receive;
notify.connect_indication = irlan_connect_indication;
notify.connect_confirm = irlan_connect_confirm;
- notify.flow_indication = irlan_eth_flow_indication;
+ /*notify.flow_indication = irlan_eth_flow_indication;*/
notify.disconnect_indication = irlan_disconnect_indication;
notify.instance = self;
strncpy(notify.name, "IrLAN data", NOTIFY_MAX_NAME);
irttp_disconnect_request(self->tsap_data, NULL, P_NORMAL);
irttp_close_tsap(self->tsap_data);
self->tsap_data = NULL;
-
}
if (self->client.tsap_ctrl) {
irttp_disconnect_request(self->client.tsap_ctrl, NULL,
irias_add_string_attrib(obj, "Name", "Linux");
#endif
irias_add_string_attrib(obj, "DeviceID", "HWP19F0");
- irias_add_integer_attrib(obj, "CompCnt", 2);
- irias_add_string_attrib(obj, "Comp#01", "PNP8294");
- irias_add_string_attrib(obj, "Comp#02", "PNP8389");
+ irias_add_integer_attrib(obj, "CompCnt", 1);
+ if (self->provider.access_type == ACCESS_PEER)
+ irias_add_string_attrib(obj, "Comp#02", "PNP8389");
+ else
+ irias_add_string_attrib(obj, "Comp#01", "PNP8294");
+
irias_add_string_attrib(obj, "Manufacturer", "Linux-IrDA Project");
irias_insert_object(obj);
}
}
+/*
+ * Function irlan_run_ctrl_tx_queue (self)
+ *
+ * Try to send the next command in the control transmit queue
+ *
+ */
+int irlan_run_ctrl_tx_queue(struct irlan_cb *self)
+{
+ struct sk_buff *skb;
+
+ if (irda_lock(&self->client.tx_busy) == FALSE)
+ return -EBUSY;
+
+ skb = skb_dequeue(&self->client.txq);
+ if (!skb) {
+ self->client.tx_busy = FALSE;
+ return 0;
+ }
+ if (self->client.tsap_ctrl == NULL) {
+ self->client.tx_busy = FALSE;
+ dev_kfree_skb(skb);
+ return -1;
+ }
+
+ return irttp_data_request(self->client.tsap_ctrl, skb);
+}
+
+/*
+ * Function irlan_ctrl_data_request (self, skb)
+ *
+ * This function makes sure that commands on the control channel is being
+ * sent in a command/response fashion
+ */
+void irlan_ctrl_data_request(struct irlan_cb *self, struct sk_buff *skb)
+{
+ /* Queue command */
+ skb_queue_tail(&self->client.txq, skb);
+
+ /* Try to send command */
+ irlan_run_ctrl_tx_queue(self);
+}
+
/*
* Function irlan_get_provider_info (self)
*
frame[0] = CMD_GET_PROVIDER_INFO;
frame[1] = 0x00; /* Zero parameters */
- irttp_data_request(self->client.tsap_ctrl, skb);
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
/*
/* self->use_udata = TRUE; */
- irttp_data_request(self->client.tsap_ctrl, skb);
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
void irlan_close_data_channel(struct irlan_cb *self)
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRLAN_MAGIC, return;);
+ /* Check if the TSAP is still there */
+ if (self->client.tsap_ctrl == NULL)
+ return;
+
skb = dev_alloc_skb(64);
if (!skb)
return;
irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
- irttp_data_request(self->client.tsap_ctrl, skb);
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
/*
irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED");
irlan_insert_string_param(skb, "FILTER_MODE", "FILTER");
- irttp_data_request(self->client.tsap_ctrl, skb);
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
/*
irlan_insert_string_param(skb, "FILTER_MODE", "FILTER");
else
irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
-
- irttp_data_request(self->client.tsap_ctrl, skb);
+
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
/*
irlan_insert_string_param(skb, "FILTER_MODE", "ALL");
else
irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
-
- irttp_data_request(self->client.tsap_ctrl, skb);
+
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
/*
irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED");
irlan_insert_string_param(skb, "FILTER_OPERATION", "DYNAMIC");
- irttp_data_request(self->client.tsap_ctrl, skb);
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
/*
irlan_insert_string_param(skb, "MEDIA", "802.3");
- irttp_data_request(self->client.tsap_ctrl, skb);
+ /* irttp_data_request(self->client.tsap_ctrl, skb); */
+ irlan_ctrl_data_request(self, skb);
}
/*
printk(KERN_INFO "Success\n");
break;
case 1:
- printk(KERN_WARNING "Insufficient resources\n");
+ WARNING("IrLAN: Insufficient resources\n");
break;
case 2:
- printk(KERN_WARNING "Invalid command format\n");
+ WARNING("IrLAN: Invalid command format\n");
break;
case 3:
- printk(KERN_WARNING "Command not supported\n");
+ WARNING("IrLAN: Command not supported\n");
break;
case 4:
- printk(KERN_WARNING "Parameter not supported\n");
+ WARNING("IrLAN: Parameter not supported\n");
break;
case 5:
- printk(KERN_WARNING "Value not supported\n");
+ WARNING("IrLAN: Value not supported\n");
break;
case 6:
- printk(KERN_WARNING "Not open\n");
+ WARNING("IrLAN: Not open\n");
break;
case 7:
- printk(KERN_WARNING "Authentication required\n");
+ WARNING("IrLAN: Authentication required\n");
break;
case 8:
- printk(KERN_WARNING "Invalid password\n");
+ WARNING("IrLAN: Invalid password\n");
break;
case 9:
- printk(KERN_WARNING "Protocol error\n");
+ WARNING("IrLAN: Protocol error\n");
break;
case 255:
- printk(KERN_WARNING "Asynchronous status\n");
+ WARNING("IrLAN: Asynchronous status\n");
break;
}
}
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Oct 15 08:37:58 1998
- * Modified at: Mon May 10 20:23:49 1999
+ * Modified at: Mon May 31 19:57:08 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
* slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
struct irmanager_event mgr_event;
struct irlan_cb *self;
- DEBUG(0, __FUNCTION__"()\n");
+ DEBUG(2, __FUNCTION__"()\n");
ASSERT(dev != NULL, return -1;);
ether_setup(dev);
- dev->tx_queue_len = TTP_MAX_QUEUE;
+ /*
+ * Lets do all queueing in IrTTP instead of this device driver.
+ * Queueing here as well can introduce some strange latency
+ * problems, which we will avoid by setting the queue size to 0.
+ */
+ dev->tx_queue_len = 0;
if (self->provider.access_type == ACCESS_DIRECT) {
/*
{
struct irlan_cb *self;
- DEBUG(0, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
ASSERT(dev != NULL, return -1;);
{
struct irlan_cb *self = (struct irlan_cb *) dev->priv;
- DEBUG(0, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
/* Stop device */
dev->tbusy = 1;
int irlan_eth_xmit(struct sk_buff *skb, struct device *dev)
{
struct irlan_cb *self;
+ int ret;
self = (struct irlan_cb *) dev->priv;
ASSERT(self != NULL, return 0;);
ASSERT(self->magic == IRLAN_MAGIC, return 0;);
- /* Lock transmit buffer */
- if (irda_lock((void *) &dev->tbusy) == FALSE) {
- /*
- * If we get here, some higher level has decided we are broken.
- * There should really be a "kick me" function call instead.
- */
- int tickssofar = jiffies - dev->trans_start;
-
- if (tickssofar < 5)
- return -EBUSY;
-
- dev->tbusy = 0;
- dev->trans_start = jiffies;
- }
+ /* Check if IrTTP can accept more frames */
+ if (dev->tbusy)
+ return -EBUSY;
/* skb headroom large enough to contain all IrDA-headers? */
if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
}
dev->trans_start = jiffies;
- self->stats.tx_packets++;
- self->stats.tx_bytes += skb->len;
/* Now queue the packet in the transport layer */
if (self->use_udata)
- irttp_udata_request(self->tsap_data, skb);
- else {
- if (irttp_data_request(self->tsap_data, skb) < 0) {
- /*
- * IrTTPs tx queue is full, so we just have to
- * drop the frame! You might think that we should
- * just return -1 and don't deallocate the frame,
- * but that is dangerous since it's possible that
- * we have replaced the original skb with a new
- * one with larger headroom, and that would really
- * confuse do_dev_queue_xmit() in dev.c! I have
- * tried :-) DB
- */
- dev_kfree_skb(skb);
- ++self->stats.tx_dropped;
-
- return 0;
- }
+ ret = irttp_udata_request(self->tsap_data, skb);
+ else
+ ret = irttp_data_request(self->tsap_data, skb);
+
+ if (ret < 0) {
+ /*
+ * IrTTPs tx queue is full, so we just have to
+ * drop the frame! You might think that we should
+ * just return -1 and don't deallocate the frame,
+ * but that is dangerous since it's possible that
+ * we have replaced the original skb with a new
+ * one with larger headroom, and that would really
+ * confuse do_dev_queue_xmit() in dev.c! I have
+ * tried :-) DB
+ */
+ dev_kfree_skb(skb);
+ self->stats.tx_dropped++;
+ } else {
+ self->stats.tx_packets++;
+ self->stats.tx_bytes += skb->len;
}
- dev->tbusy = 0; /* Finished! */
return 0;
}
skb->dev = &self->dev;
skb->protocol=eth_type_trans(skb, skb->dev); /* Remove eth header */
- netif_rx(skb); /* Eat it! */
-
self->stats.rx_packets++;
self->stats.rx_bytes += skb->len;
+ netif_rx(skb); /* Eat it! */
+
return 0;
}
struct irlan_cb *self;
struct device *dev;
- DEBUG(4, __FUNCTION__ "()\n");
-
self = (struct irlan_cb *) instance;
ASSERT(self != NULL, return;);
* Send gratuitous ARP to announce that we have changed
* hardware address, so that all peers updates their ARP tables
*/
-void irlan_etc_send_gratuitous_arp(struct device *dev)
+void irlan_eth_send_gratuitous_arp(struct device *dev)
{
struct in_device *in_dev;
self = dev->priv;
- DEBUG(0, __FUNCTION__ "()\n");
- return;
+ DEBUG(2, __FUNCTION__ "()\n");
+
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRLAN_MAGIC, return;);
- if (dev->flags&IFF_PROMISC) {
- /* Enable promiscuous mode */
- DEBUG(0, "Promiscous mode not implemented\n");
- /* outw(MULTICAST|PROMISC, ioaddr); */
+ /* Check if data channel has been connected yet */
+ if (self->client.state != IRLAN_DATA) {
+ DEBUG(1, __FUNCTION__ "(), delaying!\n");
+ return;
}
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ WARNING("Promiscous mode not implemented by IrLAN!\n");
+ }
else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) {
/* Disable promiscuous mode, use normal mode. */
DEBUG(4, __FUNCTION__ "(), Setting multicast filter\n");
irlan_set_multicast_filter(self, FALSE);
}
- if (dev->flags & IFF_BROADCAST) {
- DEBUG(4, __FUNCTION__ "(), Setting broadcast filter\n");
+ if (dev->flags & IFF_BROADCAST)
irlan_set_broadcast_filter(self, TRUE);
- } else {
- DEBUG(4, __FUNCTION__ "(), Clearing broadcast filter\n");
+ else
irlan_set_broadcast_filter(self, FALSE);
- }
}
/*
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Fri Jan 29 11:16:38 1999
- * Modified at: Sat May 8 15:25:23 1999
+ * Modified at: Fri May 14 23:11:01 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
********************************************************************/
#include <linux/skbuff.h>
+#include <linux/random.h>
#include <net/irda/irlan_common.h>
/*********************************************************************
*
* Filename: irlap.c
- * Version: 0.9
- * Description: An IrDA LAP driver for Linux
- * Status: Stable.
+ * Version: 1.0
+ * Description: IrLAP implementation for Linux
+ * Status: Stable
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Fri Apr 23 10:12:29 1999
+ * Modified at: Mon May 31 21:43:55 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Reserved.
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
*
- * This program is free software; you can redistribute iyt and/or
+ * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
********************************************************************/
#include <linux/config.h>
};
#ifdef CONFIG_PROC_FS
-int irlap_proc_read( char *, char **, off_t, int, int);
+int irlap_proc_read(char *, char **, off_t, int, int);
#endif /* CONFIG_PROC_FS */
-__initfunc(int irlap_init( void))
+__initfunc(int irlap_init(void))
{
/* Allocate master array */
- irlap = hashbin_new( HB_LOCAL);
- if ( irlap == NULL) {
- printk( KERN_WARNING "IrLAP: Can't allocate irlap hashbin!\n");
+ irlap = hashbin_new(HB_LOCAL);
+ if (irlap == NULL) {
+ printk(KERN_WARNING "IrLAP: Can't allocate irlap hashbin!\n");
return -ENOMEM;
}
#ifdef CONFIG_IRDA_COMPRESSION
- irlap_compressors = hashbin_new( HB_LOCAL);
- if ( irlap_compressors == NULL) {
- printk( KERN_WARNING "IrLAP: Can't allocate compressors hashbin!\n");
+ irlap_compressors = hashbin_new(HB_LOCAL);
+ if (irlap_compressors == NULL) {
+ printk(KERN_WARNING "IrLAP: Can't allocate compressors hashbin!\n");
return -ENOMEM;
}
#endif
void irlap_cleanup(void)
{
- ASSERT( irlap != NULL, return;);
+ ASSERT(irlap != NULL, return;);
- hashbin_delete( irlap, (FREE_FUNC) __irlap_close);
+ hashbin_delete(irlap, (FREE_FUNC) __irlap_close);
#ifdef CONFIG_IRDA_COMPRESSION
- hashbin_delete( irlap_compressors, (FREE_FUNC) kfree);
+ hashbin_delete(irlap_compressors, (FREE_FUNC) kfree);
#endif
}
* Initialize IrLAP layer
*
*/
-struct irlap_cb *irlap_open( struct irda_device *irdev)
+struct irlap_cb *irlap_open(struct irda_device *irdev)
{
struct irlap_cb *self;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( irdev != NULL, return NULL;);
- ASSERT( irdev->magic == IRDA_DEVICE_MAGIC, return NULL;);
+ ASSERT(irdev != NULL, return NULL;);
+ ASSERT(irdev->magic == IRDA_DEVICE_MAGIC, return NULL;);
/* Initialize the irlap structure. */
- self = kmalloc( sizeof( struct irlap_cb), GFP_KERNEL);
- if ( self == NULL)
+ self = kmalloc(sizeof(struct irlap_cb), GFP_KERNEL);
+ if (self == NULL)
return NULL;
- memset( self, 0, sizeof(struct irlap_cb));
+ memset(self, 0, sizeof(struct irlap_cb));
self->magic = LAP_MAGIC;
/* Make a binding between the layers */
self->irdev = irdev;
self->netdev = &irdev->netdev;
- irlap_next_state( self, LAP_OFFLINE);
+ irlap_next_state(self, LAP_OFFLINE);
/* Initialize transmitt queue */
- skb_queue_head_init( &self->tx_list);
- skb_queue_head_init( &self->wx_list);
+ skb_queue_head_init(&self->tx_list);
+ skb_queue_head_init(&self->wx_list);
/* My unique IrLAP device address! */
get_random_bytes(&self->saddr, sizeof(self->saddr));
self->caddr &= 0xfe;
}
- init_timer( &self->slot_timer);
- init_timer( &self->query_timer);
- init_timer( &self->discovery_timer);
- init_timer( &self->final_timer);
- init_timer( &self->poll_timer);
- init_timer( &self->wd_timer);
- init_timer( &self->backoff_timer);
+ init_timer(&self->slot_timer);
+ init_timer(&self->query_timer);
+ init_timer(&self->discovery_timer);
+ init_timer(&self->final_timer);
+ init_timer(&self->poll_timer);
+ init_timer(&self->wd_timer);
+ init_timer(&self->backoff_timer);
- irlap_apply_default_connection_parameters( self);
+ irlap_apply_default_connection_parameters(self);
- irlap_next_state( self, LAP_NDM);
+ irlap_next_state(self, LAP_NDM);
- hashbin_insert( irlap, (QUEUE *) self, self->saddr, NULL);
+ hashbin_insert(irlap, (QUEUE *) self, self->saddr, NULL);
- irlmp_register_link( self, self->saddr, &self->notify);
+ irlmp_register_link(self, self->saddr, &self->notify);
return self;
}
* Remove IrLAP and all allocated memory. Stop any pending timers.
*
*/
-static void __irlap_close( struct irlap_cb *self)
+static void __irlap_close(struct irlap_cb *self)
{
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
/* Stop timers */
- del_timer( &self->slot_timer);
- del_timer( &self->query_timer);
- del_timer( &self->discovery_timer);
- del_timer( &self->final_timer);
- del_timer( &self->poll_timer);
- del_timer( &self->wd_timer);
- del_timer( &self->backoff_timer);
-
- irlap_flush_all_queues( self);
+ del_timer(&self->slot_timer);
+ del_timer(&self->query_timer);
+ del_timer(&self->discovery_timer);
+ del_timer(&self->final_timer);
+ del_timer(&self->poll_timer);
+ del_timer(&self->wd_timer);
+ del_timer(&self->backoff_timer);
+
+ irlap_flush_all_queues(self);
self->irdev = NULL;
self->magic = 0;
- kfree( self);
+ kfree(self);
}
/*
* Remove IrLAP instance
*
*/
-void irlap_close( struct irlap_cb *self)
+void irlap_close(struct irlap_cb *self)
{
struct irlap_cb *lap;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- irlap_disconnect_indication( self, LAP_DISC_INDICATION);
+ irlap_disconnect_indication(self, LAP_DISC_INDICATION);
irlmp_unregister_link(self->saddr);
self->notify.instance = NULL;
/* Be sure that we manage to remove ourself from the hash */
- lap = hashbin_remove( irlap, self->saddr, NULL);
- if ( !lap) {
- DEBUG( 1, __FUNCTION__ "(), Didn't find myself!\n");
+ lap = hashbin_remove(irlap, self->saddr, NULL);
+ if (!lap) {
+ DEBUG(1, __FUNCTION__ "(), Didn't find myself!\n");
return;
}
- __irlap_close( lap);
+ __irlap_close(lap);
}
/*
*/
void irlap_connect_response(struct irlap_cb *self, struct sk_buff *skb)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
irlap_do_event(self, CONNECT_RESPONSE, skb, NULL);
}
* Received some data that was sent unreliable
*
*/
-void irlap_unit_data_indication( struct irlap_cb *self, struct sk_buff *skb)
+void irlap_unit_data_indication(struct irlap_cb *self, struct sk_buff *skb)
{
- DEBUG( 1, __FUNCTION__ "()\n");
+ DEBUG(1, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
/* Hide LAP header from IrLMP layer */
- skb_pull( skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
+ skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
#ifdef CONFIG_IRDA_COMPRESSION
- if ( self->qos_tx.compression.value) {
+ if (self->qos_tx.compression.value) {
- skb = irlap_decompress_frame( self, skb);
- if ( !skb) {
- DEBUG( 1, __FUNCTION__ "(), Decompress error!\n");
+ skb = irlap_decompress_frame(self, skb);
+ if (!skb) {
+ DEBUG(1, __FUNCTION__ "(), Decompress error!\n");
return;
}
}
* Queue data for transmission, must wait until XMIT state
*
*/
-inline void irlap_data_request( struct irlap_cb *self, struct sk_buff *skb,
+inline void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb,
int reliable)
{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
-
- DEBUG( 4, __FUNCTION__ "(), tx_list=%d\n",
- skb_queue_len( &self->tx_list));
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
#ifdef CONFIG_IRDA_COMPRESSION
- if ( self->qos_tx.compression.value) {
- skb = irlap_compress_frame( self, skb);
- if ( !skb) {
- DEBUG( 1, __FUNCTION__ "(), Compress error!\n");
+ if (self->qos_tx.compression.value) {
+ skb = irlap_compress_frame(self, skb);
+ if (!skb) {
+ DEBUG(1, __FUNCTION__ "(), Compress error!\n");
return;
}
}
#endif
- ASSERT( skb_headroom( skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
- return;);
- skb_push( skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
+ ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER),
+ return;);
+ skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
/*
* Must set frame format now so that the rest of the code knows
* if its dealing with an I or an UI frame
*/
- if ( reliable)
+ if (reliable)
skb->data[1] = I_FRAME;
else {
- DEBUG( 4, __FUNCTION__ "(), queueing unreliable frame\n");
+ DEBUG(4, __FUNCTION__ "(), queueing unreliable frame\n");
skb->data[1] = UI_FRAME;
}
* Send event if this frame only if we are in the right state
* FIXME: udata should be sent first! (skb_queue_head?)
*/
- if (( self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
+ if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) {
/*
* Check if the transmit queue contains some unsent frames,
* and if so, make sure they are sent first
*/
- if ( !skb_queue_empty( &self->tx_list)) {
- skb_queue_tail( &self->tx_list, skb);
- skb = skb_dequeue( &self->tx_list);
+ if (!skb_queue_empty(&self->tx_list)) {
+ skb_queue_tail(&self->tx_list, skb);
+ skb = skb_dequeue(&self->tx_list);
- ASSERT( skb != NULL, return;);
+ ASSERT(skb != NULL, return;);
}
- irlap_do_event( self, SEND_I_CMD, skb, NULL);
+ irlap_do_event(self, SEND_I_CMD, skb, NULL);
} else
- skb_queue_tail( &self->tx_list, skb);
+ skb_queue_tail(&self->tx_list, skb);
}
/*
* Disconnect request from other device
*
*/
-void irlap_disconnect_indication( struct irlap_cb *self, LAP_REASON reason)
+void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
{
- DEBUG( 1, __FUNCTION__ "(), reason=%s\n", lap_reasons[reason]);
+ DEBUG(1, __FUNCTION__ "(), reason=%s\n", lap_reasons[reason]);
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
#ifdef CONFIG_IRDA_COMPRESSION
- irda_free_compression( self);
+ irda_free_compression(self);
#endif
/* Flush queues */
- irlap_flush_all_queues( self);
+ irlap_flush_all_queues(self);
- switch( reason) {
+ switch(reason) {
case LAP_RESET_INDICATION:
- DEBUG( 1, __FUNCTION__ "(), Sending reset request!\n");
- irlap_do_event( self, RESET_REQUEST, NULL, NULL);
+ DEBUG(1, __FUNCTION__ "(), Sending reset request!\n");
+ irlap_do_event(self, RESET_REQUEST, NULL, NULL);
break;
case LAP_NO_RESPONSE: /* FALLTROUGH */
case LAP_DISC_INDICATION: /* FALLTROUGH */
case LAP_FOUND_NONE: /* FALLTROUGH */
case LAP_MEDIA_BUSY:
- irlmp_link_disconnect_indication( self->notify.instance,
+ irlmp_link_disconnect_indication(self->notify.instance,
self, reason, NULL);
break;
default:
- DEBUG( 1, __FUNCTION__ "(), Reason %d not implemented!\n",
+ DEBUG(1, __FUNCTION__ "(), Reason %d not implemented!\n",
reason);
}
}
{
struct irlap_info info;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( discovery != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+ ASSERT(discovery != NULL, return;);
- DEBUG( 4, __FUNCTION__ "(), nslots = %d\n", discovery->nslots);
+ DEBUG(4, __FUNCTION__ "(), nslots = %d\n", discovery->nslots);
- ASSERT(( discovery->nslots == 1) || ( discovery->nslots == 6) ||
- ( discovery->nslots == 8) || ( discovery->nslots == 16),
+ ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) ||
+ (discovery->nslots == 8) || (discovery->nslots == 16),
return;);
/*
* Discovery is only possible in NDM mode
*/
- if ( self->state == LAP_NDM) {
- ASSERT( self->discovery_log == NULL, return;);
- self->discovery_log= hashbin_new( HB_LOCAL);
+ if (self->state == LAP_NDM) {
+ ASSERT(self->discovery_log == NULL, return;);
+ self->discovery_log= hashbin_new(HB_LOCAL);
info.S = discovery->nslots; /* Number of slots */
info.s = 0; /* Current slot */
self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
- irlap_do_event( self, DISCOVERY_REQUEST, NULL, &info);
+ irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info);
} else {
- DEBUG( 4, __FUNCTION__
+ DEBUG(4, __FUNCTION__
"(), discovery only possible in NDM mode\n");
- irlap_discovery_confirm( self, NULL);
+ irlap_discovery_confirm(self, NULL);
}
}
* A device has been discovered in front of this station, we
* report directly to LMP.
*/
-void irlap_discovery_confirm( struct irlap_cb *self, hashbin_t *discovery_log)
+void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log)
{
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- ASSERT( self->notify.instance != NULL, return;);
+ ASSERT(self->notify.instance != NULL, return;);
/*
* Check for successful discovery, since we are then allowed to clear
irda_device_set_media_busy(self->irdev, FALSE);
/* Inform IrLMP */
- irlmp_link_discovery_confirm( self->notify.instance, discovery_log);
+ irlmp_link_discovery_confirm(self->notify.instance, discovery_log);
/*
* IrLMP has now the responsibilities for the discovery_log
*/
void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( discovery != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+ ASSERT(discovery != NULL, return;);
- ASSERT( self->notify.instance != NULL, return;);
+ ASSERT(self->notify.instance != NULL, return;);
irlmp_link_discovery_indication(self->notify.instance, discovery);
}
*/
void irlap_status_indication(int quality_of_link)
{
- switch( quality_of_link) {
+ switch(quality_of_link) {
case STATUS_NO_ACTIVITY:
- printk( KERN_INFO "IrLAP, no activity on link!\n");
+ printk(KERN_INFO "IrLAP, no activity on link!\n");
break;
case STATUS_NOISY:
- printk( KERN_INFO "IrLAP, noisy link!\n");
+ printk(KERN_INFO "IrLAP, noisy link!\n");
break;
default:
break;
*
*
*/
-void irlap_reset_indication( struct irlap_cb *self)
+void irlap_reset_indication(struct irlap_cb *self)
{
- DEBUG( 1, __FUNCTION__ "()\n");
+ DEBUG(1, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- if ( self->state == LAP_RESET_WAIT)
- irlap_do_event( self, RESET_REQUEST, NULL, NULL);
+ if (self->state == LAP_RESET_WAIT)
+ irlap_do_event(self, RESET_REQUEST, NULL, NULL);
else
- irlap_do_event( self, RESET_RESPONSE, NULL, NULL);
+ irlap_do_event(self, RESET_RESPONSE, NULL, NULL);
}
/*
*/
void irlap_reset_confirm(void)
{
- DEBUG( 1, __FUNCTION__ "()\n");
+ DEBUG(1, __FUNCTION__ "()\n");
}
/*
* S = Number of slots (0 -> S-1)
* s = Current slot
*/
-int irlap_generate_rand_time_slot( int S, int s)
+int irlap_generate_rand_time_slot(int S, int s)
{
int slot;
- ASSERT(( S - s) > 0, return 0;);
+ ASSERT((S - s) > 0, return 0;);
slot = s + jiffies % (S-s);
- ASSERT(( slot >= s) || ( slot < S), return 0;);
+ ASSERT((slot >= s) || (slot < S), return 0;);
return slot;
}
* not intuitive and you should not try to change it. If you think it
* contains bugs, please mail a patch to the author instead.
*/
-void irlap_update_nr_received( struct irlap_cb *self, int nr)
+void irlap_update_nr_received(struct irlap_cb *self, int nr)
{
struct sk_buff *skb = NULL;
int count = 0;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
/*
* Remove all the ack-ed frames from the window queue.
*/
- DEBUG( 4, "--> wx_list=%d, va=%d, nr=%d\n",
- skb_queue_len( &self->wx_list), self->va, nr);
+ DEBUG(4, "--> wx_list=%d, va=%d, nr=%d\n",
+ skb_queue_len(&self->wx_list), self->va, nr);
/*
* Optimize for the common case. It is most likely that the receiver
* will acknowledge all the frames we have sent! So in that case we
* delete all frames stored in window.
*/
- if ( nr == self->vs) {
- while (( skb = skb_dequeue( &self->wx_list)) != NULL) {
+ if (nr == self->vs) {
+ while ((skb = skb_dequeue(&self->wx_list)) != NULL) {
dev_kfree_skb(skb);
}
/* The last acked frame is the next to send minus one */
self->va = nr - 1;
} else {
/* Remove all acknowledged frames in current window */
- while (( skb_peek( &self->wx_list) != NULL) &&
- ((( self->va+1) % 8) != nr))
+ while ((skb_peek(&self->wx_list) != NULL) &&
+ (((self->va+1) % 8) != nr))
{
- skb = skb_dequeue( &self->wx_list);
+ skb = skb_dequeue(&self->wx_list);
dev_kfree_skb(skb);
self->va = (self->va + 1) % 8;
count++;
}
- DEBUG( 4, "irlap_update_nr_received(), removed %d\n", count);
- DEBUG( 4, "wx_list=%d, va=%d, nr=%d -->\n",
- skb_queue_len( &self->wx_list), self->va, nr);
+ DEBUG(4, "irlap_update_nr_received(), removed %d\n", count);
+ DEBUG(4, "wx_list=%d, va=%d, nr=%d -->\n",
+ skb_queue_len(&self->wx_list), self->va, nr);
}
/* Advance window */
- self->window = self->window_size - skb_queue_len( &self->wx_list);
+ self->window = self->window_size - skb_queue_len(&self->wx_list);
}
/*
*
* Validate the next to send (ns) field from received frame.
*/
-int irlap_validate_ns_received( struct irlap_cb *self, int ns)
+int irlap_validate_ns_received(struct irlap_cb *self, int ns)
{
- ASSERT( self != NULL, return -ENODEV;);
- ASSERT( self->magic == LAP_MAGIC, return -EBADR;);
+ ASSERT(self != NULL, return -ENODEV;);
+ ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
/* ns as expected? */
- if ( ns == self->vr) {
- DEBUG( 4, __FUNCTION__ "(), expected!\n");
+ if (ns == self->vr) {
+ DEBUG(4, __FUNCTION__ "(), expected!\n");
return NS_EXPECTED;
}
/*
* Validate the next to receive (nr) field from received frame.
*
*/
-int irlap_validate_nr_received( struct irlap_cb *self, int nr)
+int irlap_validate_nr_received(struct irlap_cb *self, int nr)
{
- ASSERT( self != NULL, return -ENODEV;);
- ASSERT( self->magic == LAP_MAGIC, return -EBADR;);
+ ASSERT(self != NULL, return -ENODEV;);
+ ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
/* nr as expected? */
- if ( nr == self->vs) {
- DEBUG( 4, __FUNCTION__ "(), expected!\n");
+ if (nr == self->vs) {
+ DEBUG(4, __FUNCTION__ "(), expected!\n");
return NR_EXPECTED;
}
* unexpected nr? (but within current window), first we check if the
* ns numbers of the frames in the current window wrap.
*/
- if ( self->va < self->vs) {
- if (( nr >= self->va) && ( nr <= self->vs))
+ if (self->va < self->vs) {
+ if ((nr >= self->va) && (nr <= self->vs))
return NR_UNEXPECTED;
} else {
- if (( nr >= self->va) || ( nr <= self->vs))
+ if ((nr >= self->va) || (nr <= self->vs))
return NR_UNEXPECTED;
}
* Initialize the connection state parameters
*
*/
-void irlap_initiate_connection_state( struct irlap_cb *self)
+void irlap_initiate_connection_state(struct irlap_cb *self)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
/* Next to send and next to receive */
self->vs = self->vr = 0;
* Flush all queues
*
*/
-void irlap_flush_all_queues( struct irlap_cb *self)
+void irlap_flush_all_queues(struct irlap_cb *self)
{
struct sk_buff* skb;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
/* Free transmission queue */
- while (( skb = skb_dequeue( &self->tx_list)) != NULL)
- dev_kfree_skb( skb);
+ while ((skb = skb_dequeue(&self->tx_list)) != NULL)
+ dev_kfree_skb(skb);
/* Free sliding window buffered packets */
- while (( skb = skb_dequeue( &self->wx_list)) != NULL)
- dev_kfree_skb( skb);
+ while ((skb = skb_dequeue(&self->wx_list)) != NULL)
+ dev_kfree_skb(skb);
#ifdef CONFIG_IRDA_RECYCLE_RR
- if ( self->recycle_rr_skb) {
- dev_kfree_skb( self->recycle_rr_skb);
+ if (self->recycle_rr_skb) {
+ dev_kfree_skb(self->recycle_rr_skb);
self->recycle_rr_skb = NULL;
}
#endif
ASSERT(self->magic == LAP_MAGIC, return;);
if (!self->irdev) {
- DEBUG( 1, __FUNCTION__ "(), driver missing!\n");
+ DEBUG(1, __FUNCTION__ "(), driver missing!\n");
return;
}
__u8 mask; /* Current bit tested */
int i;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
/*
* Find out which compressors we support. We do this be checking that
* actually been loaded. Ths is sort of hairy code but that is what
* you get when you do a little bit flicking :-)
*/
- DEBUG( 4, __FUNCTION__ "(), comp bits 0x%02x\n",
+ DEBUG(4, __FUNCTION__ "(), comp bits 0x%02x\n",
self->qos_rx.compression.bits);
mask = 0x80; /* Start with testing MSB */
- for ( i=0;i<8;i++) {
- DEBUG( 4, __FUNCTION__ "(), testing bit %d\n", 8-i);
- if ( self->qos_rx.compression.bits & mask) {
- DEBUG( 4, __FUNCTION__ "(), bit %d is set by defalt\n",
+ for (i=0;i<8;i++) {
+ DEBUG(4, __FUNCTION__ "(), testing bit %d\n", 8-i);
+ if (self->qos_rx.compression.bits & mask) {
+ DEBUG(4, __FUNCTION__ "(), bit %d is set by defalt\n",
8-i);
- comp = hashbin_find( irlap_compressors,
+ comp = hashbin_find(irlap_compressors,
compression[ msb_index(mask)],
NULL);
- if ( !comp) {
+ if (!comp) {
/* Protocol not supported, so clear the bit */
- DEBUG( 4, __FUNCTION__ "(), Compression "
+ DEBUG(4, __FUNCTION__ "(), Compression "
"protocol %d has not been loaded!\n",
compression[msb_index(mask)]);
self->qos_rx.compression.bits &= ~mask;
- DEBUG( 4, __FUNCTION__
+ DEBUG(4, __FUNCTION__
"(), comp bits 0x%02x\n",
self->qos_rx.compression.bits);
}
void irlap_init_qos_capabilities(struct irlap_cb *self,
struct qos_info *qos_user)
{
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( self->irdev != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+ ASSERT(self->irdev != NULL, return;);
/* Start out with the maximum QoS support possible */
- irda_init_max_qos_capabilies( &self->qos_rx);
+ irda_init_max_qos_capabilies(&self->qos_rx);
#ifdef CONFIG_IRDA_COMPRESSION
- irlap_init_comp_qos_capabilities( self);
+ irlap_init_comp_qos_capabilities(self);
#endif
/* Apply drivers QoS capabilities */
- irda_qos_compute_intersection( &self->qos_rx,
- irda_device_get_qos( self->irdev));
+ irda_qos_compute_intersection(&self->qos_rx,
+ irda_device_get_qos(self->irdev));
/*
* Check for user supplied QoS parameters. The service user is only
* user may not have set all of them.
*/
if (qos_user) {
- DEBUG( 1, __FUNCTION__ "(), Found user specified QoS!\n");
+ DEBUG(1, __FUNCTION__ "(), Found user specified QoS!\n");
- if ( qos_user->baud_rate.bits)
+ if (qos_user->baud_rate.bits)
self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
- if ( qos_user->max_turn_time.bits)
+ if (qos_user->max_turn_time.bits)
self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits;
- if ( qos_user->data_size.bits)
+ if (qos_user->data_size.bits)
self->qos_rx.data_size.bits &= qos_user->data_size.bits;
- if ( qos_user->link_disc_time.bits)
+ if (qos_user->link_disc_time.bits)
self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits;
#ifdef CONFIG_IRDA_COMPRESSION
self->qos_rx.compression.bits &= qos_user->compression.bits;
/* Set disconnect time */
self->qos_rx.link_disc_time.bits &= 0x07;
- irda_qos_bits_to_value( &self->qos_rx);
+ irda_qos_bits_to_value(&self->qos_rx);
}
/*
* Use the default connection and transmission parameters
*
*/
-void irlap_apply_default_connection_parameters( struct irlap_cb *self)
+void irlap_apply_default_connection_parameters(struct irlap_cb *self)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- irlap_change_speed( self, 9600);
+ irlap_change_speed(self, 9600);
/* Default value in NDM */
self->bofs_count = 11;
void irlap_apply_connection_parameters(struct irlap_cb *self,
struct qos_info *qos)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- irlap_change_speed( self, qos->baud_rate.value);
+ irlap_change_speed(self, qos->baud_rate.value);
self->window_size = qos->window_size.value;
self->window = qos->window_size.value;
*/
self->window_bytes = qos->baud_rate.value
* qos->max_turn_time.value / 10000;
- DEBUG( 4, "Setting window_bytes = %d\n", self->window_bytes);
+ DEBUG(4, "Setting window_bytes = %d\n", self->window_bytes);
/*
* Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to
else
self->N1 = 3000 / qos->max_turn_time.value;
- DEBUG( 4, "Setting N1 = %d\n", self->N1);
+ DEBUG(4, "Setting N1 = %d\n", self->N1);
self->N2 = qos->link_disc_time.value * 1000 / qos->max_turn_time.value;
- DEBUG( 4, "Setting N2 = %d\n", self->N2);
+ DEBUG(4, "Setting N2 = %d\n", self->N2);
/*
* Initialize timeout values, some of the rules are listed on
self->wd_timeout = self->poll_timeout * 2;
#ifdef CONFIG_IRDA_COMPRESSION
- if ( qos->compression.value) {
- DEBUG( 1, __FUNCTION__ "(), Initializing compression\n");
- irda_set_compression( self, qos->compression.value);
+ if (qos->compression.value) {
+ DEBUG(1, __FUNCTION__ "(), Initializing compression\n");
+ irda_set_compression(self, qos->compression.value);
- irlap_compressor_init( self, 0);
+ irlap_compressor_init(self, 0);
}
#endif
}
* Give some info to the /proc file system
*
*/
-int irlap_proc_read( char *buf, char **start, off_t offset, int len,
+int irlap_proc_read(char *buf, char **start, off_t offset, int len,
int unused)
{
struct irlap_cb *self;
len = 0;
- self = (struct irlap_cb *) hashbin_get_first( irlap);
- while ( self != NULL) {
- ASSERT( self != NULL, return -ENODEV;);
- ASSERT( self->magic == LAP_MAGIC, return -EBADR;);
+ self = (struct irlap_cb *) hashbin_get_first(irlap);
+ while (self != NULL) {
+ ASSERT(self != NULL, return -ENODEV;);
+ ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
- len += sprintf( buf+len, "irlap%d <-> %s ",
+ len += sprintf(buf+len, "irlap%d <-> %s ",
i++, self->irdev->name);
- len += sprintf( buf+len, "state: %s\n",
+ len += sprintf(buf+len, "state: %s\n",
irlap_state[ self->state]);
- len += sprintf( buf+len, " caddr: %#02x, ", self->caddr);
- len += sprintf( buf+len, "saddr: %#08x, ", self->saddr);
- len += sprintf( buf+len, "daddr: %#08x\n", self->daddr);
+ len += sprintf(buf+len, " caddr: %#02x, ", self->caddr);
+ len += sprintf(buf+len, "saddr: %#08x, ", self->saddr);
+ len += sprintf(buf+len, "daddr: %#08x\n", self->daddr);
- len += sprintf( buf+len, " win size: %d, ",
+ len += sprintf(buf+len, " win size: %d, ",
self->window_size);
- len += sprintf( buf+len, "win: %d, ", self->window);
- len += sprintf( buf+len, "win bytes: %d, ", self->window_bytes);
- len += sprintf( buf+len, "bytes left: %d\n", self->bytes_left);
-
- len += sprintf( buf+len, " tx queue len: %d ",
- skb_queue_len( &self->tx_list));
- len += sprintf( buf+len, "win queue len: %d ",
- skb_queue_len( &self->wx_list));
- len += sprintf( buf+len, "rbusy: %s\n", self->remote_busy ?
+ len += sprintf(buf+len, "win: %d, ", self->window);
+ len += sprintf(buf+len, "win bytes: %d, ", self->window_bytes);
+ len += sprintf(buf+len, "bytes left: %d\n", self->bytes_left);
+
+ len += sprintf(buf+len, " tx queue len: %d ",
+ skb_queue_len(&self->tx_list));
+ len += sprintf(buf+len, "win queue len: %d ",
+ skb_queue_len(&self->wx_list));
+ len += sprintf(buf+len, "rbusy: %s\n", self->remote_busy ?
"TRUE" : "FALSE");
- len += sprintf( buf+len, " retrans: %d ", self->retry_count);
- len += sprintf( buf+len, "vs: %d ", self->vs);
- len += sprintf( buf+len, "vr: %d ", self->vr);
- len += sprintf( buf+len, "va: %d\n", self->va);
+ len += sprintf(buf+len, " retrans: %d ", self->retry_count);
+ len += sprintf(buf+len, "vs: %d ", self->vs);
+ len += sprintf(buf+len, "vr: %d ", self->vr);
+ len += sprintf(buf+len, "va: %d\n", self->va);
- len += sprintf( buf+len, " qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
+ len += sprintf(buf+len, " qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n");
- len += sprintf( buf+len, " tx\t%d\t",
+ len += sprintf(buf+len, " tx\t%d\t",
self->qos_tx.baud_rate.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_tx.max_turn_time.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_tx.data_size.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_tx.window_size.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_tx.additional_bofs.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_tx.min_turn_time.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_tx.link_disc_time.value);
#ifdef CONFIG_IRDA_COMPRESSION
- len += sprintf( buf+len, "%d",
+ len += sprintf(buf+len, "%d",
self->qos_tx.compression.value);
#endif
- len += sprintf( buf+len, "\n");
+ len += sprintf(buf+len, "\n");
- len += sprintf( buf+len, " rx\t%d\t",
+ len += sprintf(buf+len, " rx\t%d\t",
self->qos_rx.baud_rate.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_rx.max_turn_time.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_rx.data_size.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_rx.window_size.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_rx.additional_bofs.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_rx.min_turn_time.value);
- len += sprintf( buf+len, "%d\t",
+ len += sprintf(buf+len, "%d\t",
self->qos_rx.link_disc_time.value);
#ifdef CONFIG_IRDA_COMPRESSION
- len += sprintf( buf+len, "%d",
+ len += sprintf(buf+len, "%d",
self->qos_rx.compression.value);
#endif
- len += sprintf( buf+len, "\n");
+ len += sprintf(buf+len, "\n");
- self = (struct irlap_cb *) hashbin_get_next( irlap);
+ self = (struct irlap_cb *) hashbin_get_next(irlap);
}
restore_flags(flags);
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sat Aug 16 00:59:29 1997
- * Modified at: Sun May 9 22:44:32 1999
+ * Modified at: Mon May 31 21:55:42 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
* Rushes through the state machine without any delay. If state == XMIT
* then send queued data frames.
*/
-void irlap_do_event( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info)
+void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
{
int ret;
return;
DEBUG(4, __FUNCTION__ "(), event = %s, state = %s\n",
- irlap_event[ event], irlap_state[ self->state]);
+ irlap_event[event], irlap_state[self->state]);
ret = (*state[ self->state]) (self, event, skb, info);
if (skb_queue_len(&self->tx_list)) {
/* Try to send away all queued data frames */
while ((skb = skb_dequeue(&self->tx_list)) != NULL) {
- ret = (*state[ self->state])(self, SEND_I_CMD,
- skb, NULL);
+ ret = (*state[self->state])(self, SEND_I_CMD,
+ skb, NULL);
if ( ret == -EPROTO)
break; /* Try again later! */
}
} else if (self->disconnect_pending) {
- DEBUG(0, __FUNCTION__ "(), disconnecting!\n");
self->disconnect_pending = FALSE;
ret = (*state[self->state])(self, DISCONNECT_REQUEST,
* stations.
*
*/
-static int irlap_state_xmit_p( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info)
+static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
- ASSERT( self != NULL, return -ENODEV;);
- ASSERT( self->magic == LAP_MAGIC, return -EBADR;);
-
- DEBUG( 4, __FUNCTION__ "(), event=%s, vs=%d, vr=%d",
- irlap_event[ event], self->vs, self->vr);
+ DEBUG(4, __FUNCTION__ "(), event=%s, vs=%d, vr=%d",
+ irlap_event[event], self->vs, self->vr);
switch (event) {
case SEND_I_CMD:
- ASSERT( skb != NULL, return -1;);
- DEBUG( 4, __FUNCTION__ "(), Window=%d\n", self->window);
-
/*
* Only send frame if send-window > 0.
*/
- if (( self->window > 0) && ( !self->remote_busy)) {
+ if ((self->window > 0) && (!self->remote_busy)) {
/*
* Test if we have transmitted more bytes over the
* link than its possible to do with the current
* speed and turn-around-time.
*/
- if (( skb->len+self->bofs_count) > self->bytes_left) {
- DEBUG( 4, __FUNCTION__ "(), Not allowed to "
- "transmit more bytes!\n");
- skb_queue_head( &self->tx_list, skb);
+ if ((skb->len+self->bofs_count) > self->bytes_left) {
+ DEBUG(4, __FUNCTION__ "(), Not allowed to "
+ "transmit more bytes!\n");
+ skb_queue_head(&self->tx_list, skb);
/*
* We should switch state to LAP_NRM_P, but
*/
return -EPROTO;
}
- self->bytes_left -= ( skb->len + self->bofs_count);
+ self->bytes_left -= (skb->len + self->bofs_count);
/*
* Send data with poll bit cleared only if window > 1
if (( self->window > 1) &&
skb_queue_len( &self->tx_list) > 0)
{
- DEBUG( 4, __FUNCTION__ "(), window > 1\n");
irlap_send_data_primary( self, skb);
irlap_next_state( self, LAP_XMIT_P);
} else {
- DEBUG( 4, __FUNCTION__ "(), window <= 1\n");
irlap_send_data_primary_poll( self, skb);
irlap_next_state( self, LAP_NRM_P);
int ns_status;
int nr_status;
- ASSERT(self != NULL, return -1;);
- ASSERT(self->magic == LAP_MAGIC, return -1;);
-
switch (event) {
case RECV_I_RSP: /* Optimize for the common case */
/* FIXME: must check for remote_busy below */
*/
self->fast_RR = FALSE;
#endif
-
ASSERT( info != NULL, return -1;);
ns_status = irlap_validate_ns_received(self, info->ns);
}
break;
case RECV_RR_RSP:
- DEBUG(4, __FUNCTION__ "(), RECV_RR_FRAME: "
- "Retrans:%d, nr=%d, va=%d, vs=%d, vr=%d\n",
- self->retry_count, info->nr, self->va, self->vs,
- self->vr);
-
- ASSERT(info != NULL, return -1;);
-
/*
* If you get a RR, the remote isn't busy anymore,
* no matter what the NR
/* Resend rejected frames */
irlap_resend_rejected_frames( self, CMD_FRAME);
- /*
- * Start only if not running, DB
- * TODO: Should this one be here?
- */
- /* if ( !self->final_timer.prev) */
-/* irda_start_timer( FINAL_TIMER, self->final_timeout); */
-
- /* Keep state */
irlap_next_state( self, LAP_NRM_P);
} else if (ret == NR_INVALID) {
DEBUG(1, "irlap_state_nrm_p: received RR with "
irlap_next_state( self, LAP_RESET_WAIT);
- irlap_disconnect_indication( self,
- LAP_RESET_INDICATION);
+ irlap_disconnect_indication(self, LAP_RESET_INDICATION);
self->xmitflag = TRUE;
}
if (skb)
/*
* Send frame only if send window > 1
*/
- if (( self->window > 0) && ( !self->remote_busy)) {
+ if ((self->window > 0) && ( !self->remote_busy)) {
/*
* Test if we have transmitted more bytes over the
* link than its possible to do with the current
* speed and turn-around-time.
*/
- if (( skb->len+self->bofs_count) > self->bytes_left) {
+ if ((skb->len+self->bofs_count) > self->bytes_left) {
DEBUG( 4, "IrDA: Not allowed to transmit more bytes!\n");
skb_queue_head( &self->tx_list, skb);
/*
if (( self->window > 1) &&
skb_queue_len( &self->tx_list) > 0)
{
- DEBUG( 4, __FUNCTION__ "(), window > 1\n");
irlap_send_data_secondary( self, skb);
irlap_next_state( self, LAP_XMIT_S);
} else {
- DEBUG( 4, "(), window <= 1\n");
irlap_send_data_secondary_final( self, skb);
irlap_next_state( self, LAP_NRM_S);
/*
* Check for Unexpected next to send (Ns)
*/
- if (( ns_status == NS_UNEXPECTED) &&
- ( nr_status == NR_EXPECTED))
+ if ((ns_status == NS_UNEXPECTED) && (nr_status == NR_EXPECTED))
{
/* Unexpected next to send, with final bit cleared */
if ( !info->pf) {
/*
* Unexpected Next to Receive(NR) ?
*/
- if (( ns_status == NS_EXPECTED) &&
- ( nr_status == NR_UNEXPECTED))
+ if ((ns_status == NS_EXPECTED) && (nr_status == NR_UNEXPECTED))
{
if ( info->pf) {
DEBUG( 4, "RECV_I_RSP: frame(s) lost\n");
irlap_update_nr_received( self, info->nr);
del_timer( &self->wd_timer);
- irlap_wait_min_turn_around( self, &self->qos_tx);
+ irlap_wait_min_turn_around(self, &self->qos_tx);
irlap_next_state( self, LAP_XMIT_S);
} else {
self->remote_busy = FALSE;
/* Update Nr received */
- irlap_update_nr_received( self, info->nr);
- irlap_wait_min_turn_around( self, &self->qos_tx);
+ irlap_update_nr_received(self, info->nr);
+ irlap_wait_min_turn_around(self, &self->qos_tx);
- irlap_send_rr_frame( self, RSP_FRAME);
+ irlap_send_rr_frame(self, RSP_FRAME);
- irlap_start_wd_timer( self, self->wd_timeout);
- irlap_next_state( self, LAP_NRM_S);
+ irlap_start_wd_timer(self, self->wd_timeout);
+ irlap_next_state(self, LAP_NRM_S);
}
- } else if ( nr_status == NR_UNEXPECTED) {
+ } else if (nr_status == NR_UNEXPECTED) {
self->remote_busy = FALSE;
irlap_update_nr_received( self, info->nr);
irlap_resend_rejected_frames( self, RSP_FRAME);
} else {
DEBUG(1, __FUNCTION__ "(), invalid nr not implemented!\n");
}
- if ( skb)
- dev_kfree_skb( skb);
+ if (skb)
+ dev_kfree_skb(skb);
break;
case RECV_SNRM_CMD:
ASSERT( self != NULL, return -ENODEV;);
ASSERT( self->magic == LAP_MAGIC, return -EBADR;);
- switch( event) {
+ switch(event) {
case RESET_RESPONSE:
irlap_send_ua_response_frame( self, &self->qos_rx);
irlap_initiate_connection_state( self);
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Aug 19 10:27:26 1997
- * Modified at: Sun May 9 22:55:11 1999
+ * Modified at: Mon May 31 09:29:13 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, All Rights Resrved.
{
__u8 *frame;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
-
frame = skb->data;
/* Insert connection address */
/* Insert next to receive (Vr) */
frame[1] |= (self->vr << 5); /* insert nr */
-#if 0
- {
- int ns;
- ns = (frame[1] >> 1) & 0x07; /* Next to send */
-
- DEBUG(0, __FUNCTION__ "(), ns=%d\n", ns);
- }
-#endif
-
irlap_queue_xmit(self, skb);
}
* Optimize for the common case and check if the frame is an
* I(nformation) frame. Only I-frames have bit 0 set to 0
*/
- if(~control & 0x01) {
+ if (~control & 0x01) {
irlap_recv_i_frame(self, skb, &info, command);
self->stats.rx_packets++;
return 0;
* Status: Stable.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 17 20:54:32 1997
- * Modified at: Sun May 9 22:45:06 1999
+ * Modified at: Mon May 31 21:49:41 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
ASSERT(skb != NULL, return;);
ASSERT(self->lap != NULL, return;);
- DEBUG(0, __FUNCTION__ "(), slsap_sel=%02x, dlsap_sel=%02x\n",
+ DEBUG(2, __FUNCTION__ "(), slsap_sel=%02x, dlsap_sel=%02x\n",
self->slsap_sel, self->dlsap_sel);
self->qos = *self->lap->qos;
- lap_header_size = irlap_get_header_size(self->lap->irlap);
-
- max_seg_size = self->lap->qos->data_size.value-LMP_HEADER-
- lap_header_size;
+ max_seg_size = self->lap->qos->data_size.value-LMP_HEADER;
DEBUG(2, __FUNCTION__ "(), max_seg_size=%d\n", max_seg_size);
+ lap_header_size = irlap_get_header_size(self->lap->irlap);
+
max_header_size = LMP_HEADER + lap_header_size;
DEBUG(2, __FUNCTION__ "(), max_header_size=%d\n", max_header_size);
ASSERT(self->lap != NULL, return;);
self->qos = *self->lap->qos;
- lap_header_size = irlap_get_header_size(self->lap->irlap);
-
- max_seg_size = self->lap->qos->data_size.value-LMP_HEADER-
- lap_header_size;
+ max_seg_size = self->lap->qos->data_size.value-LMP_HEADER;
DEBUG(2, __FUNCTION__ "(), max_seg_size=%d\n", max_seg_size);
+
+ lap_header_size = irlap_get_header_size(self->lap->irlap);
max_header_size = LMP_HEADER + lap_header_size;
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Aug 19 02:09:59 1997
- * Modified at: Sun May 9 21:00:05 1999
+ * Modified at: Mon May 31 09:53:16 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>
self->lsaps);
if (lsap == NULL) {
- DEBUG(0, "IrLMP, Sorry, no LSAP for received frame!\n");
- DEBUG(0, __FUNCTION__
+ DEBUG(2, "IrLMP, Sorry, no LSAP for received frame!\n");
+ DEBUG(2, __FUNCTION__
"(), slsap_sel = %02x, dlsap_sel = %02x\n", slsap_sel,
dlsap_sel);
if (fp[0] & CONTROL_BIT) {
- DEBUG(0, __FUNCTION__
+ DEBUG(2, __FUNCTION__
"(), received control frame %02x\n", fp[2]);
} else {
- DEBUG(0, __FUNCTION__ "(), received data frame\n");
+ DEBUG(2, __FUNCTION__ "(), received data frame\n");
}
- dev_kfree_skb( skb);
+ dev_kfree_skb(skb);
return;
}
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Dec 15 13:55:39 1997
- * Modified at: Mon May 10 15:28:49 1999
+ * Modified at: Fri May 14 13:46:02 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1997, 1999 Dag Brattli, All Rights Reserved.
#include <net/irda/wrapper.h>
#include <net/irda/timer.h>
-extern struct proc_dir_entry proc_irda;
+extern struct proc_dir_entry *proc_irda;
struct irda_cb irda; /* One global instance */
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:31 1997
- * Modified at: Mon May 10 17:12:53 1999
+ * Modified at: Mon May 31 10:29:56 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
#include <net/irda/irlmp.h>
#include <net/irda/irttp.h>
-struct irttp_cb *irttp = NULL;
+static struct irttp_cb *irttp = NULL;
static void __irttp_close_tsap(struct tsap_cb *self);
static int irttp_udata_indication(void *instance, void *sap,
struct sk_buff *skb);
static void irttp_disconnect_indication(void *instance, void *sap,
- LM_REASON reason,
- struct sk_buff *);
+ LM_REASON reason, struct sk_buff *);
static void irttp_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_sdu_size,
__u8 header_size, struct sk_buff *skb);
static void irttp_flush_queues(struct tsap_cb *self);
static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb);
-static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
static void irttp_start_todo_timer(struct tsap_cb *self, int timeout);
+static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
/*
* Function irttp_init (void)
/* Check that nothing bad happens */
if ((skb->len == 0) || (!self->connected)) {
- DEBUG(4, __FUNCTION__ "(), No data, or not connected\n");
+ ERROR(__FUNCTION__ "(), No data, or not connected\n");
return -ENOTCONN;
}
* inside an IrLAP frame
*/
if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) {
- DEBUG(1, __FUNCTION__
- "(), SAR disabled, and data is to large for IrLAP!\n");
+ ERROR(__FUNCTION__
+ "(), SAR disabled, and data is to large for IrLAP!\n");
return -EMSGSIZE;
}
(self->tx_max_sdu_size != SAR_UNBOUND) &&
(skb->len > self->tx_max_sdu_size))
{
- DEBUG(1, __FUNCTION__ "(), SAR enabled, "
- "but data is larger than TxMaxSduSize!\n");
+ ERROR(__FUNCTION__ "(), SAR enabled, "
+ "but data is larger than TxMaxSduSize!\n");
return -EMSGSIZE;
}
/*
frame = skb_push(skb, TTP_HEADER);
frame[0] = 0x00; /* Clear more bit */
- DEBUG(4, __FUNCTION__ "(), queueing original skb\n");
skb_queue_tail(&self->tx_queue, skb);
} else {
/*
{
struct sk_buff *skb = NULL;
unsigned long flags;
- __u8 *frame;
int n;
- ASSERT(self != NULL, return;);
- ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
-
if (irda_lock(&self->tx_queue_lock) == FALSE)
return;
* More bit must be set by the data_request() or fragment()
* functions
*/
- frame = skb->data;
-
- DEBUG(4, __FUNCTION__ "(), More=%s\n", frame[0] & 0x80 ?
- "TRUE" : "FALSE" );
-
- frame[0] |= (__u8) (n & 0x7f);
+ skb->data[0] |= (n & 0x7f);
irlmp_data_request(self->lsap, skb);
self->stats.tx_packets++;
/* Check if we can accept more frames from client */
if ((self->tx_sdu_busy) &&
(skb_queue_len(&self->tx_queue) < LOW_THRESHOLD))
- {
+ {
self->tx_sdu_busy = FALSE;
if (self->notify.flow_indication)
self->notify.flow_indication(
- self->notify.instance, self,
+ self->notify.instance, self,
FLOW_START);
}
}
struct sk_buff *skb)
{
struct tsap_cb *self;
- int more;
int n;
- __u8 *frame;
-
+
self = (struct tsap_cb *) instance;
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
- ASSERT(skb != NULL, return -1;);
- frame = skb->data;
-
- n = frame[0] & 0x7f; /* Extract the credits */
- more = frame[0] & 0x80;
-
- DEBUG(3, __FUNCTION__"(), got %d credits, TSAP sel=%02x\n",
- n, self->stsap_sel);
+ n = skb->data[0] & 0x7f; /* Extract the credits */
self->stats.rx_packets++;
* Data or dataless frame? Dataless frames only contain the
* TTP_HEADER
*/
- if (skb->len == 1) {
- /* Dataless flowdata TTP-PDU */
- self->send_credit += n;
- } else {
+ if (skb->len == 1)
+ self->send_credit += n; /* Dataless flowdata TTP-PDU */
+ else {
/* Deal with inbound credit */
self->send_credit += n;
self->remote_credit--;
self->connected = TRUE;
parameters = frame[0] & 0x80;
+
+ ASSERT(skb->len >= TTP_HEADER, return;);
+ skb_pull(skb, TTP_HEADER);
+
if (parameters) {
plen = frame[1];
pi = frame[2];
DEBUG(4, __FUNCTION__ "(), RxMaxSduSize=%d\n",
self->tx_max_sdu_size);
+
+ /* Remove parameters */
+ ASSERT(skb->len >= (plen+1), return;);
+ skb_pull(skb, plen+1);
}
DEBUG(4, __FUNCTION__ "() send=%d,avail=%d,remote=%d\n",
self->send_credit, self->avail_credit, self->remote_credit);
- skb_pull(skb, TTP_HEADER);
-
if (self->notify.connect_confirm) {
self->notify.connect_confirm(self->notify.instance, self, qos,
self->tx_max_sdu_size,
*
*/
void irttp_connect_indication(void *instance, void *sap, struct qos_info *qos,
- __u32 max_seg_size, __u8 max_header_size,
+ __u32 max_seg_size, __u8 max_header_size,
struct sk_buff *skb)
{
struct tsap_cb *self;
self->send_credit = n;
self->tx_max_sdu_size = 0;
- parameters = frame[0] & 0x80;
+ parameters = frame[0] & 0x80;
+
+ ASSERT(skb->len >= TTP_HEADER, return;);
+ skb_pull(skb, TTP_HEADER);
+
if (parameters) {
DEBUG(3, __FUNCTION__ "(), Contains parameters!\n");
plen = frame[1];
"() illegal value length for max_sdu_size!\n");
self->tx_max_sdu_size = 0;
};
-
+
+ /* Remove parameters */
+ ASSERT(skb->len >= (plen+1), return;);
+ skb_pull(skb, plen+1);
DEBUG(3, __FUNCTION__ "(), MaxSduSize=%d\n",
self->tx_max_sdu_size);
DEBUG(4, __FUNCTION__ "(), initial send_credit=%d\n", n);
- skb_pull(skb, 1); /* Remove TTP header */
-
if (self->notify.connect_indication) {
self->notify.connect_indication(self->notify.instance, self,
qos, self->rx_max_sdu_size,
irttp_run_tx_queue(self);
/* Give avay some credits to peer? */
- if ((skb_queue_empty(&self->tx_queue)) &&
- (self->remote_credit < LOW_THRESHOLD) &&
- (self->avail_credit > 0))
+ if ((self->remote_credit < LOW_THRESHOLD) &&
+ (self->avail_credit > 0) && (skb_queue_empty(&self->tx_queue)))
{
- DEBUG(4, __FUNCTION__ "(), sending credit!\n");
irttp_give_credit(self);
}
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Sun May 2 21:58:00 1999
+ * Modified at: Fri May 28 20:30:24 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Modified at: Fri May 28 3:11 CST 1999
* Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
/*
* Function state_begin_frame (idev, byte)
*
- *
+ * Begin of frame detected
*
*/
static void state_begin_frame(struct irda_device *idev, __u8 byte)
case CE:
/* Stuffed byte */
idev->rx_buff.state = LINK_ESCAPE;
+
+ /* Time to initialize receive buffer */
+ idev->rx_buff.data = idev->rx_buff.head;
+ idev->rx_buff.len = 0;
break;
case EOF:
/* Abort frame */
idev->stats.rx_errors++;
idev->stats.rx_frame_errors++;
break;
- default:
- /* Got first byte of frame */
+ default:
+ /* Time to initialize receive buffer */
idev->rx_buff.data = idev->rx_buff.head;
idev->rx_buff.len = 0;
-
+
idev->rx_buff.data[idev->rx_buff.len++] = byte;
idev->rx_buff.fcs = irda_fcs(INIT_FCS, byte);
/*
* Function state_inside_frame (idev, byte)
*
- *
+ * Handle bytes received within a frame
*
*/
static void state_inside_frame(struct irda_device *idev, __u8 byte)
return -ENFILE;
}
- file->f_dentry = d_alloc_root(inode, NULL);
+ file->f_dentry = d_alloc_root(inode);
if (!file->f_dentry) {
put_filp(file);
put_unused_fd(fd);
#define __KERNEL_SYSCALLS__
#include <linux/version.h>
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/malloc.h>
#include <linux/sched.h>
#include <linux/file.h>
#include <net/sock.h>
+#include <net/checksum.h>
+#include <net/udp.h>
#include <asm/uaccess.h>
sk->user_data = NULL;
#endif
sk->data_ready = xprt->old_data_ready;
+ sk->no_check = 0;
sk->state_change = xprt->old_state_change;
sk->write_space = xprt->old_write_space;
return;
}
-/*
- * Input handler for RPC replies. Called from a bottom half and hence
+/* We have set things up such that we perform the checksum of the UDP
+ * packet in parallel with the copies into the RPC client iovec. -DaveM
+ */
+static int csum_partial_copy_to_page_cache(struct iovec *iov,
+ struct sk_buff *skb,
+ int copied)
+{
+ __u8 *pkt_data = skb->data + sizeof(struct udphdr);
+ __u8 *cur_ptr = iov->iov_base;
+ __kernel_size_t cur_len = iov->iov_len;
+ unsigned int csum = skb->csum;
+ int need_csum = (skb->ip_summed != CHECKSUM_UNNECESSARY);
+ int slack = skb->len - copied - sizeof(struct udphdr);
+
+ if (need_csum)
+ csum = csum_partial(skb->h.raw, sizeof(struct udphdr), csum);
+ while (copied > 0) {
+ if (cur_len) {
+ int to_move = cur_len;
+ if (to_move > copied)
+ to_move = copied;
+ if (need_csum)
+ csum = csum_partial_copy_nocheck(pkt_data, cur_ptr,
+ to_move, csum);
+ else
+ memcpy(cur_ptr, pkt_data, to_move);
+ pkt_data += to_move;
+ copied -= to_move;
+ cur_ptr += to_move;
+ cur_len -= to_move;
+ }
+ if (cur_len <= 0) {
+ iov++;
+ cur_len = iov->iov_len;
+ cur_ptr = iov->iov_base;
+ }
+ }
+ if (need_csum) {
+ if (slack > 0)
+ csum = csum_partial(pkt_data, slack, csum);
+ if ((unsigned short)csum_fold(csum))
+ return -1;
+ }
+ return 0;
+}
+
+/* Input handler for RPC replies. Called from a bottom half and hence
* atomic.
*/
static inline void
udp_data_ready(struct sock *sk, int len)
{
- struct rpc_task *task;
struct rpc_xprt *xprt;
struct rpc_rqst *rovr;
struct sk_buff *skb;
- struct iovec iov[MAX_IOVEC];
int err, repsize, copied;
dprintk("RPC: udp_data_ready...\n");
if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
return;
- repsize = skb->len - 8; /* don't account for UDP header */
+ repsize = skb->len - sizeof(struct udphdr);
if (repsize < 4) {
printk("RPC: impossible RPC reply size %d!\n", repsize);
goto dropit;
}
/* Look up the request corresponding to the given XID */
- if (!(rovr = xprt_lookup_rqst(xprt, *(u32 *) (skb->h.raw + 8))))
+ if (!(rovr = xprt_lookup_rqst(xprt,
+ *(u32 *) (skb->h.raw + sizeof(struct udphdr)))))
goto dropit;
- task = rovr->rq_task;
- dprintk("RPC: %4d received reply\n", task->tk_pid);
- xprt_pktdump("packet data:", (u32 *) (skb->h.raw+8), repsize);
+ dprintk("RPC: %4d received reply\n", rovr->rq_task->tk_pid);
+ xprt_pktdump("packet data:",
+ (u32 *) (skb->h.raw + sizeof(struct udphdr)), repsize);
if ((copied = rovr->rq_rlen) > repsize)
copied = repsize;
- /* Okay, we have it. Copy datagram... */
- memcpy(iov, rovr->rq_rvec, rovr->rq_rnr * sizeof(iov[0]));
- /* This needs to stay tied with the usermode skb_copy_dagram... */
- memcpy_tokerneliovec(iov, skb->data+8, copied);
+ /* Suck it into the iovec, verify checksum if not done by hw. */
+ if (csum_partial_copy_to_page_cache(rovr->rq_rvec, skb, copied))
+ goto dropit;
+
+ /* Something worked... */
+ dst_confirm(skb->dst);
xprt_complete_rqst(xprt, rovr, copied);
xprt->old_write_space = inet->write_space;
if (proto == IPPROTO_UDP) {
inet->data_ready = udp_data_ready;
+ inet->no_check = UDP_CSUM_NORCV;
} else {
inet->data_ready = tcp_data_ready;
inet->state_change = tcp_state_change;