S: FIN-00330 Helsingfors
S: Finland
+N: Trond Myklebust
+E: trond.myklebust@fys.uio.no
+D: current NFS client hacker.
+S: Dagaliveien 31e
+S: N-0391 Oslo
+S: Norway
+
N: Matija Nalis
E: mnalis@jagor.srce.hr
E: mnalis@voyager.hr
IP: firewall packet netlink device
CONFIG_IP_FIREWALL_NETLINK
- If you say Y here, then the first 128 bytes of each packet that hit
- your Linux firewall and was blocked are passed on to optional user
+ If you say Y here, you can use the ipchains tool to copy all or part of
+ any packet you specify that hits your Linux firewall to optional user
space monitoring software that can then look for attacks and take
actions such as paging the administrator of the site.
and you need (to write) a program that reads from that device and
takes appropriate action.
- With the ipchains tool you can specify which packets you want to go
- to this device, as well as how many bytes from each packet.
-
IP: kernel level autoconfiguration
CONFIG_IP_PNP
This enables automatic configuration of IP addresses of devices and
This means we decide at boot time whenever we want to run in text or
graphics mode. Switching mode later on (in protected mode) is
impossible; BIOS calls work in real mode only. VESA BIOS Extentions
-Version 2.0 are required, becauce we need a linear frame buffer.
+Version 2.0 are required, because we need a linear frame buffer.
Advantages:
1. Note: LILO cannot handle hex, for booting directly with
"vga=mode-number" you have to transform the numbers to decimal.
2. Note: Some newer versions of LILO appear to work with those hex values,
- if you set the 0x infront of the numbers.
+ if you set the 0x in front of the numbers.
X11
===
the work back to the Linux community. This work is based on the
2.1.132 and 2.2.0-pre-kernel versions. I'm afraid it's still far from
complete, but we hope it will be useful. As far as we know, it is the
-first 'all-in-one\92 document about the /proc file system. It is
+first 'all-in-one' document about the /proc file system. It is
focused on the Intel x86 hardware, so if you are looking for PPC, ARM,
SPARC, APX, etc., features, you probably won't find what you are
looking for. It also only covers IPv4 networking, not IPv6 nor other
sg-big-buff
This file shows the size of the generic SCSI (sg) buffer. At this
- point, you can\92t tune it yet, but you can change it at compile time
+ point, you can't tune it yet, but you can change it at compile time
by editing include/scsi/sg.h and changing the value of
SG_BIG_BUFF.
swap_cluster
This is probably the greatest influence on system
performance. swap_cluster is the number of pages kswapd writes in
- one turn. You\92ll want this value to be large so that kswapd does
- its I/O in large chunks and the disk doesn\92t have to seek as
- often., but you don\92t want it to be too large since that would
+ one turn. You'll want this value to be large so that kswapd does
+ its I/O in large chunks and the disk doesn't have to seek as
+ often., but you don't want it to be too large since that would
flood the request queue.
overcommit_memory
accept_source_route
Should source routed packages be accepted or declined. The
default is dependent on the kernel configuration. It's 'yes' for
- routers and 'np' for hosts.
+ routers and 'no' for hosts.
bootp_relay
Accept packets with source address 0.b.c.d destined not to this
10: Newer Hauppage (Bt878)
11: Miro PCTV Pro
12: ADS Tech Channel Surfer TV (and maybe TV+FM)
+ 13: AVerMedia TVCapture 98
+ 14: Aimslab VHX
+ 15: Zoltrix TV-Max
- You may have to adjust BTTV_MAJOR to a different number depending on your
kernel version. The official number 81 does not work on some setups.
o Please direct queries about the in kernel version of this driver to
Alan Cox first not to Ralph, or better yet join the video4linux mailing
- list (mail majordomo@phunk.org with "subscribe video4linux")
+ list (mail video4linux-list-request@redhat.com with "subscribe")
L: netdev@roxanne.nuclecu.unam.mx
S: Maintained
+NFS CLIENT
+P: Trond Myklebust
+M: trond.myklebust@fys.uio.no
+L: linux-kernel@vger.rutgers.edu
+S: Maintained
+
NI5010 NETWORK DRIVER
P: Jan-Pascal van Best and Andreas Mohr
M: jvbest@qv3pluto.leidenuniv.nl (Best)
case ide_tape:
printk ("TAPE");
break;
+ case ide_optical:
+ printk ("OPTICAL");
+ drive->removable = 1;
+ break;
default:
printk("UNKNOWN (type %d)", type);
break;
#define ide_scsi 0x21
#define ide_disk 0x20
+#define ide_optical 0x7
#define ide_cdrom 0x5
#define ide_tape 0x1
#define ide_floppy 0x0
{ 3, 4, 0, 2, 15, { 2, 3, 1, 1}, { 13, 14, 11, 7, 0, 0}, 0},
/* Aimslab VHX */
{ 3, 1, 0, 2, 7, { 2, 3, 1, 1}, { 0, 1, 2, 3, 4}},
+ /* Zoltrix TV-Max */
+ { 3, 1, 0, 2,15, { 2, 3, 1, 1}, { 0, 0, 0, 0, 0}},
};
#define TVCARDS (sizeof(tvcards)/sizeof(tvcard))
* because they share the same hardware.
* Johan Myreen <jem@iki.fi> 1998-10-08.
*
+ * Code fixes to handle mouse ACKs properly.
+ * C. Scott Ananian <cananian@alumni.princeton.edu> 1999-01-29.
+ *
*/
#include <linux/config.h>
static struct aux_queue *queue; /* Mouse data buffer. */
static int aux_count = 0;
+/* used when we send commands to the mouse that expect an ACK. */
+static unsigned char mouse_reply_expected = 0;
#define AUX_INTS_OFF (KBD_MODE_KCC | KBD_MODE_DISABLE_MOUSE | KBD_MODE_SYS | KBD_MODE_KBD_INT)
#define AUX_INTS_ON (KBD_MODE_KCC | KBD_MODE_SYS | KBD_MODE_MOUSE_INT | KBD_MODE_KBD_INT)
* Controller Status register are set 0."
*/
-static inline void kb_wait(void)
+static void kb_wait(void)
{
unsigned long timeout = KBC_TIMEOUT;
return 0200;
}
+static inline void handle_mouse_event(unsigned char scancode)
+{
+#ifdef CONFIG_PSMOUSE
+ if (mouse_reply_expected) {
+ if (scancode == AUX_ACK) {
+ mouse_reply_expected--;
+ return;
+ }
+ mouse_reply_expected = 0;
+ }
+
+ add_mouse_randomness(scancode);
+ if (aux_count) {
+ int head = queue->head;
+
+ queue->buf[head] = scancode;
+ head = (head + 1) & (AUX_BUF_SIZE-1);
+ if (head != queue->tail) {
+ queue->head = head;
+ if (queue->fasync)
+ kill_fasync(queue->fasync, SIGIO);
+ wake_up_interruptible(&queue->proc_list);
+ }
+ }
+#endif
+}
+
/*
* This reads the keyboard status port, and does the
* appropriate action.
scancode = inb(KBD_DATA_REG);
if (status & KBD_STAT_MOUSE_OBF) {
-#ifdef CONFIG_PSMOUSE
- /* Mouse data. */
- if (aux_count) {
- int head = queue->head;
- queue->buf[head] = scancode;
- add_mouse_randomness(scancode);
- head = (head + 1) & (AUX_BUF_SIZE-1);
- if (head != queue->tail) {
- queue->head = head;
- if (queue->fasync)
- kill_fasync(queue->fasync, SIGIO);
- wake_up_interruptible(&queue->proc_list);
- }
- }
-#endif
+ handle_mouse_event(scancode);
} else {
if (do_acknowledge(scancode))
handle_scancode(scancode);
static int __init detect_auxiliary_port(void)
{
unsigned long flags;
- unsigned char status;
- unsigned char val;
- int loops = 5;
+ int loops = 10;
int retval = 0;
spin_lock_irqsave(&kbd_controller_lock, flags);
kb_wait();
outb(0x5a, KBD_DATA_REG); /* 0x5a is a random dummy value. */
- status = inb(KBD_STATUS_REG);
- while (!(status & KBD_STAT_OBF) && loops--) {
- mdelay(1);
- status = inb(KBD_STATUS_REG);
- }
+ do {
+ unsigned char status = inb(KBD_STATUS_REG);
- if (status & KBD_STAT_OBF) {
- val = inb(KBD_DATA_REG);
- if (status & KBD_STAT_MOUSE_OBF) {
- printk(KERN_INFO "Detected PS/2 Mouse Port.\n");
- retval = 1;
+ if (status & KBD_STAT_OBF) {
+ (void) inb(KBD_DATA_REG);
+ if (status & KBD_STAT_MOUSE_OBF) {
+ printk(KERN_INFO "Detected PS/2 Mouse Port.\n");
+ retval = 1;
+ }
+ break;
}
- }
-
+ mdelay(1);
+ } while (--loops);
spin_unlock_irqrestore(&kbd_controller_lock, flags);
return retval;
spin_unlock_irqrestore(&kbd_controller_lock, flags);
}
-static unsigned int get_from_queue(void)
+/*
+ * Send a byte to the mouse & handle returned ack
+ */
+static void aux_write_ack(int val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbd_controller_lock, flags);
+ kb_wait();
+ outb(KBD_CCMD_WRITE_MOUSE, KBD_CNTL_REG);
+ kb_wait();
+ outb(val, KBD_DATA_REG);
+ /* we expect an ACK in response. */
+ mouse_reply_expected++;
+ kb_wait();
+ spin_unlock_irqrestore(&kbd_controller_lock, flags);
+}
+
+static unsigned char get_from_queue(void)
{
- unsigned int result;
+ unsigned char result;
unsigned long flags;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&kbd_controller_lock, flags);
result = queue->buf[queue->tail];
queue->tail = (queue->tail + 1) & (AUX_BUF_SIZE-1);
- restore_flags(flags);
+ spin_unlock_irqrestore(&kbd_controller_lock, flags);
return result;
}
kbd_write(KBD_CNTL_REG, KBD_CCMD_MOUSE_ENABLE); /* Enable the
auxiliary port on
controller. */
- aux_write_dev(AUX_ENABLE_DEV); /* Enable aux device */
+ aux_write_ack(AUX_ENABLE_DEV); /* Enable aux device */
kbd_write_cmd(AUX_INTS_ON); /* Enable controller ints */
return 0;
#ifdef INITIALIZE_MOUSE
kbd_write(KBD_CNTL_REG, KBD_CCMD_MOUSE_ENABLE); /* Enable Aux. */
- aux_write_dev(AUX_SET_SAMPLE);
- aux_write_dev(100); /* 100 samples/sec */
- aux_write_dev(AUX_SET_RES);
- aux_write_dev(3); /* 8 counts per mm */
- aux_write_dev(AUX_SET_SCALE21); /* 2:1 scaling */
+ aux_write_ack(AUX_SET_SAMPLE);
+ aux_write_ack(100); /* 100 samples/sec */
+ aux_write_ack(AUX_SET_RES);
+ aux_write_ack(3); /* 8 counts per mm */
+ aux_write_ack(AUX_SET_SCALE21); /* 2:1 scaling */
#endif /* INITIALIZE_MOUSE */
kbd_write(KBD_CNTL_REG, KBD_CCMD_MOUSE_DISABLE); /* Disable aux device. */
kbd_write_cmd(AUX_INTS_OFF); /* Disable controller ints. */
#define AUX_ENABLE_DEV 0xF4 /* Enable aux device */
#define AUX_DISABLE_DEV 0xF5 /* Disable aux device */
#define AUX_RESET 0xFF /* Reset aux device */
+#define AUX_ACK 0xFA /* Command byte ACK. */
-#define AUX_BUF_SIZE 2048
+#define AUX_BUF_SIZE 2048 /* This might be better divisible by
+ three to make overruns stay in sync
+ but then the read function would need
+ a lock etc - ick */
struct aux_queue {
unsigned long head;
wake_up_interruptible(&tty->link->write_wait);
set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
if (tty->driver.subtype == PTY_TYPE_MASTER) {
- tty_hangup(tty->link);
set_bit(TTY_OTHER_CLOSED, &tty->flags);
#ifdef CONFIG_UNIX98_PTYS
{
}
}
#endif
+ tty_vhangup(tty->link);
}
}
return(DFX_K_SUCCESS);
}
+
+/*
+ * Align an sk_buff to a boundary power of 2
+ *
+ */
+
+void my_skb_align(struct sk_buff *skb, int n)
+{
+ u32 x=(u32)skb->data; /* We only want the low bits .. */
+ u32 v;
+
+ v=(x+n-1)&~(n-1); /* Where we want to be */
+
+ skb_reserve(skb, v-x);
+}
+
\f
/*
* ================
* align to 128 bytes for compatibility with
* the old EISA boards.
*/
- newskb->data = (char *)((unsigned long)
- (newskb->data+127) & ~127);
+
+ my_skb_align(newskb,128);
bp->descr_block_virt->rcv_data[i+j].long_1 = virt_to_bus(newskb->data);
/*
* p_rcv_buff_va is only used inside the
newskb = dev_alloc_skb(NEW_SKB_SIZE);
if (newskb){
rx_in_place = 1;
-
- newskb->data = (char *)((unsigned long)(newskb->data+127) & ~127);
+
+ my_skb_align(newskb, 128);
skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
- skb->data += RCV_BUFF_K_PADDING;
+ skb_reserve(skb, RCV_BUFF_K_PADDING);
bp->p_rcv_buff_va[entry] = (char *)newskb;
bp->descr_block_virt->rcv_data[entry].long_1 = virt_to_bus(newskb->data);
} else
memcpy(skb->data, p_buff + RCV_BUFF_K_PADDING, pkt_len+3);
}
-
- skb->data += 3; /* adjust data field so that it points to FC byte */
- skb->len = pkt_len; /* pass up packet length, NOT including CRC */
+
+ skb_reserve(skb,3); /* adjust data field so that it points to FC byte */
+ skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */
skb->dev = bp->dev; /* pass up device pointer */
skb->protocol = fddi_type_trans(skb, bp->dev);
wait_for_cmd_done(ioaddr + SCBCmd);
outw(CU_DUMPSTATS, ioaddr + SCBCmd);
- /* No need to wait for the command unit to accept here. */
- if ((sp->phy[0] & 0x8000) == 0)
- mdio_read(ioaddr, sp->phy[0] & 0x1f, 0);
/*
* Request the IRQ last, after we have set up all data structures.
return -EAGAIN;
}
+ /* No need to wait for the command unit to accept here. */
+ if ((sp->phy[0] & 0x8000) == 0)
+ mdio_read(ioaddr, sp->phy[0] & 0x1f, 0);
+
MOD_INC_USE_COUNT;
/* Set the timer. The timer serves a dual purpose:
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_ULTRA_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_ULTRA_CARDS) "i");
+EXPORT_NO_SYMBOLS;
+
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
int
if_down (dev);
if (! (sp->pp_flags & PP_CISCO)) {
/* Shut down the PPP link. */
+ sp->lcp.magic = jiffies;
sp->lcp.state = LCP_STATE_CLOSED;
sp->ipcp.state = IPCP_STATE_CLOSED;
sppp_clear_timeout (sp);
EXPORT_SYMBOL(sppp_open);
+int sppp_reopen (struct device *dev)
+{
+ struct sppp *sp = &((struct ppp_device *)dev)->sppp;
+ sppp_close(dev);
+ dev->flags |= IFF_RUNNING;
+ if (!(sp->pp_flags & PP_CISCO))
+ {
+ sp->lcp.magic = jiffies;
+ ++sp->pp_seq;
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ /* Give it a moment for the line to settle then go */
+ sppp_set_timeout (sp, 1);
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL(sppp_reopen);
+
int sppp_change_mtu(struct device *dev, int new_mtu)
{
if(new_mtu<128||new_mtu>PPP_MTU||(dev->flags&IFF_UP))
int sppp_isempty (struct device *dev);
void sppp_flush (struct device *dev);
int sppp_open (struct device *dev);
+int sppp_reopen (struct device *dev);
int sppp_close (struct device *dev);
#endif
*
* Tigran Aivazian <tigran@sco.com>: TLan_PciProbe() now uses
* new PCI BIOS interface.
+ * Alan Cox <alan@redhat.com>: Fixed the out of memory
+ * handling.
*
********************************************************************/
netif_rx( skb );
}
} else {
- skb = (struct sk_buff *) head_list->buffer[9].address;
- head_list->buffer[9].address = 0;
- skb_trim( skb, head_list->frameSize );
-
+ struct sk_buff *new_skb;
+
+ /*
+ * I changed the algorithm here. What we now do
+ * is allocate the new frame. If this fails we
+ * simply recycle the frame.
+ */
+
+ new_skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 );
+ if ( new_skb != NULL ) {
+ /* If this ever happened it would be a problem */
+ /* not any more - ac */
+ skb = (struct sk_buff *) head_list->buffer[9].address;
+ head_list->buffer[9].address = 0;
+ skb_trim( skb, head_list->frameSize );
#if LINUX_KERNEL_VERSION > 0x20100
priv->stats->rx_bytes += head_list->frameSize;
#endif
- skb->protocol = eth_type_trans( skb, dev );
- netif_rx( skb );
-
- skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 );
- if ( skb == NULL ) {
- printk( "TLAN: Couldn't allocate memory for received data.\n" );
- /* If this ever happened it would be a problem */
- } else {
- skb->dev = dev;
- skb_reserve( skb, 2 );
- t = (void *) skb_put( skb, TLAN_MAX_FRAME_SIZE );
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+
+ new_skb->dev = dev;
+ skb_reserve( new_skb, 2 );
+ t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE );
head_list->buffer[0].address = virt_to_bus( t );
- head_list->buffer[9].address = (u32) skb;
+ head_list->buffer[9].address = (u32) new_skb;
}
+ else
+ printk(KERN_WARNING "TLAN: Couldn't allocate memory for received data.\n" );
}
head_list->forward = 0;
if(status&TxEOM)
{
/* printk("%s: Tx underrun.\n", chan->dev->name); */
+ chan->stats.tx_fifo_errors++;
write_zsctrl(chan, ERR_RES);
z8530_tx_done(chan);
}
{
printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
+ if(chan->netdevice)
+ sppp_reopen(chan->netdevice);
}
else
{
{
printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
+ if(chan->netdevice)
+ sppp_reopen(chan->netdevice);
}
else
{
{
flags=claim_dma_lock();
disable_dma(c->txdma);
+ /*
+ * Check if we crapped out.
+ */
+ if(get_dma_residue(c->txdma))
+ {
+ c->stats.tx_dropped++;
+ c->stats.tx_fifo_errors++;
+ }
release_dma_lock(flags);
}
c->txcount=0;
set_dma_count(c->txdma, c->txcount);
enable_dma(c->txdma);
release_dma_lock(flags);
+ write_zsctrl(c, RES_EOM_L);
write_zsreg(c, R5, c->regs[R5]|TxENAB);
}
else
c->tx_skb=NULL;
z8530_tx_begin(c);
spin_unlock_irqrestore(&z8530_buffer_lock, flags);
+ c->stats.tx_packets++;
+ c->stats.tx_bytes+=skb->len;
dev_kfree_skb(skb);
}
skb=dev_alloc_skb(ct);
if(skb==NULL)
- printk("%s: Memory squeeze.\n", c->netdevice->name);
+ {
+ c->stats.rx_dropped++;
+ printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name);
+ }
else
{
skb_put(skb, ct);
memcpy(skb->data, rxb, ct);
+ c->stats.rx_packets++;
+ c->stats.rx_bytes+=ct;
}
c->dma_ready=1;
}
{
skb_put(c->skb2,c->mtu);
}
+ c->stats.rx_packets++;
+ c->stats.rx_bytes+=ct;
+
}
/*
* If we received a frame we must now process it.
c->rx_function(c,skb);
}
else
- printk("Lost a frame\n");
+ {
+ c->stats.rx_dropped++;
+ printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
+ }
}
/*
h=0;
while ( devid[h] != 0 )
{
- pci_find_device(0x1191,devid[h],pdev);
- if (pdev == NULL); {
+ pdev = pci_find_device(0x1191,devid[h],pdev);
+ if (pdev == NULL) {
h++;
index=0;
continue;
* Ver 0.6 Jan 27 98 Allow disabling of SCSI command translation layer
* for access through /dev/sg.
* Fix MODE_SENSE_6/MODE_SELECT_6/INQUIRY translation.
- * Ver 0.7 Dev 04 98 Ignore commands where lun != 0 to avoid multiple
+ * Ver 0.7 Dec 04 98 Ignore commands where lun != 0 to avoid multiple
* detection of devices with CONFIG_SCSI_MULTI_LUN
+ * Ver 0.8 Feb 05 99 Optical media need translation too.
*/
#define IDESCSI_VERSION "0.6"
if (!test_bit(PC_TRANSFORM, &pc->flags))
return;
- if (drive->media == ide_cdrom) {
+ if (drive->media == ide_cdrom || drive->media == ide_optical) {
if (c[0] == READ_6 || c[0] == WRITE_6) {
c[8] = c[4]; c[5] = c[3]; c[4] = c[2];
c[3] = c[1] & 0x1f; c[2] = 0; c[1] &= 0xe0;
if (!test_bit(PC_TRANSFORM, &pc->flags))
return;
- if (drive->media == ide_cdrom) {
+ if (drive->media == ide_cdrom || drive->media == ide_optical) {
if (pc->c[0] == MODE_SENSE_10 && sc[0] == MODE_SENSE) {
scsi_buf[0] = atapi_buf[1]; /* Mode data length */
scsi_buf[1] = atapi_buf[2]; /* Medium type */
int idescsi_reset (Scsi_Cmnd *cmd, unsigned int resetflags)
{
- return SCSI_RESET_PUNT;
+ return SCSI_RESET_SUCCESS;
}
int idescsi_bios (Disk *disk, kdev_t dev, int *parm)
status = imm_out(host_no, &cmd[l << 1], 2);
if (!status) {
- imm_disconnect(host_no);
- imm_connect(host_no, CONNECT_EPP_MAYBE);
- w_dtr(ppb, 0x40);
- w_ctr(ppb, 0x08);
- udelay(30);
- w_ctr(ppb, 0x0c);
- udelay(1000);
- imm_disconnect(host_no);
- udelay(1000);
- if (imm_hosts[host_no].mode == IMM_EPP_32) {
- imm_hosts[host_no].mode = old_mode;
- goto second_pass;
- }
+ imm_disconnect(host_no);
+ imm_connect(host_no, CONNECT_EPP_MAYBE);
+ imm_reset_pulse(IMM_BASE(host_no));
+ udelay(1000);
+ imm_disconnect(host_no);
+ udelay(1000);
+ if (imm_hosts[host_no].mode == IMM_EPP_32) {
+ imm_hosts[host_no].mode = old_mode;
+ goto second_pass;
+ }
printk("imm: Unable to establish communication, aborting driver load.\n");
return 1;
}
base = pdev->base_address[1];
base_2 = pdev->base_address[2];
irq = pdev->irq;
+ if ((base & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64)
+ base_2 = pdev->base_address[3];
#else
(void) pcibios_read_config_dword(bus, device_fn,
PCI_BASE_ADDRESS_0, &io_port);
#ifndef _PPA_H
#define _PPA_H
-#define PPA_VERSION "2.03 (for Linux 2.0.0)"
+#define PPA_VERSION "2.03 (for Linux 2.2.x)"
/*
* this driver has been hacked by Matteo Frigo (athena@theory.lcs.mit.edu)
*/
if (rscsi_disks[dev].sector_size == 1024)
if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
- printk("sd.c:Bad block number requested");
+ printk("sd.c:Bad block number/count requested");
SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
goto repeat;
}
if (rscsi_disks[dev].sector_size == 2048)
if((block & 3) || (SCpnt->request.nr_sectors & 3)) {
- printk("sd.c:Bad block number requested");
+ printk("sd.c:Bad block number/count requested");
SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
goto repeat;
}
* Audio 1 and Audio 2 at the same time.
* (Jan 9 1999): Put all ESS stuff into sb_ess.[ch], this
* includes both the ESS stuff that has been in
- * sb_*[ch] before I touched it and the ESS suppor
+ * sb_*[ch] before I touched it and the ESS support
* I added later
- * (Jan 23 1998): Full Duplex seems to work. I wrote a small
+ * (Jan 23 1999): Full Duplex seems to work. I wrote a small
* test proggy which works OK. Haven't found
* any applications to test it though. So why did
* I bother to create it anyway?? :) Just for
* ES1946 yes This is a PCI chip; not handled by this driver
*/
-#include <linux/config.h>
#include <linux/delay.h>
#include "sound_config.h"
int n=low;
if (index < 0) { /* first free */
+
+ while (*list && (*list)->unit_minor<n)
+ list=&((*list)->next);
+
while(n<top)
{
/* Found a hole ? */
}
if(n>=top)
- return -ENOMEM;
+ return -ENOENT;
} else {
n = low+(index*16);
while (*list) {
int r;
struct sound_unit *s=(struct sound_unit *)kmalloc(sizeof(struct sound_unit), GFP_KERNEL);
if(s==NULL)
- return -1;
+ return -ENOMEM;
spin_lock(&sound_loader_lock);
r=__sound_insert_unit(s,list,fops,index,low,top);
spin_unlock(&sound_loader_lock);
- if(r==-1)
+ if(r<0)
kfree(s);
return r;
}
sb = dir->i_sb;
inode->i_sb = sb;
- inode->i_flags = sb->s_flags;
+ inode->i_flags = 0;
if (!(block = affs_new_header((struct inode *)dir))) {
iput(inode);
static int nr_buffers = 0;
static int nr_buffers_type[NR_LIST] = {0,};
+static int size_buffers_type[NR_LIST] = {0,};
static int nr_buffer_heads = 0;
static int nr_unused_buffer_heads = 0;
each time we call refill */
int nref_dirt; /* Dirty buffer threshold for activating bdflush
when trying to refill buffers. */
- int dummy1; /* unused */
+ int pct_dirt; /* Max %age of mem for dirty buffers before
+ activating bdflush */
int age_buffer; /* Time for normal buffer to age before
we flush it */
int age_super; /* Time for superblock to age before we
return;
}
nr_buffers_type[bh->b_list]--;
+ size_buffers_type[bh->b_list] -= bh->b_size;
remove_from_hash_queue(bh);
remove_from_lru_list(bh);
}
(*bhp)->b_prev_free = bh;
nr_buffers_type[bh->b_list]++;
+ size_buffers_type[bh->b_list] += bh->b_size;
/* Put the buffer in new hash-queue if it has a device. */
bh->b_next = NULL;
file_buffer(buf, dispose);
if(dispose == BUF_DIRTY) {
int too_many = (nr_buffers * bdf_prm.b_un.nfract/100);
+ int too_large = (num_physpages * bdf_prm.b_un.pct_dirt/100);
/* This buffer is dirty, maybe we need to start flushing.
* If too high a percentage of the buffers are dirty...
*/
- if (nr_buffers_type[BUF_DIRTY] > too_many)
- wakeup_bdflush(0);
-
+ if (nr_buffers_type[BUF_DIRTY] > too_many ||
+ (size_buffers_type[BUF_DIRTY] + size_buffers_type[BUF_LOCKED])/PAGE_SIZE > too_large) {
+ if (nr_buffers_type[BUF_LOCKED] > 2 * bdf_prm.b_un.ndirty)
+ wakeup_bdflush(1);
+ else
+ wakeup_bdflush(0);
+ }
+
/* If this is a loop device, and
* more than half of the buffers are dirty...
* (Prevents no-free-buffers deadlock with loop device.)
#ifdef DEBUG
for(nlist = 0; nlist < NR_LIST; nlist++)
#else
- for(nlist = BUF_DIRTY; nlist <= BUF_DIRTY; nlist++)
+ for(nlist = BUF_LOCKED; nlist <= BUF_DIRTY; nlist++)
#endif
{
ndirty = 0;
}
/* Clean buffer on dirty list? Refile it */
- if (nlist == BUF_DIRTY && !buffer_dirty(bh) && !buffer_locked(bh))
- {
+ if (nlist == BUF_DIRTY && !buffer_dirty(bh) && !buffer_locked(bh)) {
+ refile_buffer(bh);
+ continue;
+ }
+
+ /* Unlocked buffer on locked list? Refile it */
+ if (nlist == BUF_LOCKED && !buffer_locked(bh)) {
refile_buffer(bh);
continue;
}
#include <asm/segment.h>
#include <asm/uaccess.h>
#include <linux/utsname.h>
+#define __NO_VERSION__
+#include <linux/module.h>
#include <linux/coda.h>
#include <linux/coda_linux.h>
coda_cache_inv_stats_get_info
};
+static void coda_proc_modcount(struct inode *inode, int fill)
+{
+ if (fill)
+ MOD_INC_USE_COUNT;
+ else
+ MOD_DEC_USE_COUNT;
+}
+
#endif
#ifdef CONFIG_PROC_FS
proc_register(&proc_root_fs,&proc_fs_coda);
+ proc_fs_coda.fill_inode = &coda_proc_modcount;
proc_register(&proc_fs_coda,&proc_coda_vfs);
proc_register(&proc_fs_coda,&proc_coda_upcall);
proc_register(&proc_fs_coda,&proc_coda_permission);
struct {
int nr_inodes;
int nr_free_inodes;
- int preshrink; /* pre-shrink dcache? */
- int dummy[4];
-} inodes_stat = {0, 0, 0,};
+ int dummy[5];
+} inodes_stat = {0, 0,};
int max_inodes;
spin_unlock(&inode_lock);
}
+/*
+ * Called with the spinlock already held..
+ */
+static void sync_all_inodes(void)
+{
+ struct super_block * sb = sb_entry(super_blocks.next);
+ for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
+ if (!sb->s_dev)
+ continue;
+ sync_list(&sb->s_dirty);
+ }
+}
+
/*
* Needed by knfsd
*/
return found;
}
-static void shrink_dentry_inodes(int goal)
-{
- int found;
-
- spin_unlock(&inode_lock);
- found = select_dcache(goal, 0);
- if (found < goal)
- found = goal;
- prune_dcache(found);
- spin_lock(&inode_lock);
-}
-
/*
* Searches the inodes list for freeable inodes,
* shrinking the dcache before (and possible after,
*/
static void try_to_free_inodes(int goal)
{
- shrink_dentry_inodes(goal);
- if (!free_inodes())
- shrink_dentry_inodes(goal);
+ /*
+ * First stry to just get rid of unused inodes.
+ *
+ * If we can't reach our goal that way, we'll have
+ * to try to shrink the dcache and sync existing
+ * inodes..
+ */
+ free_inodes();
+ goal -= inodes_stat.nr_free_inodes;
+ if (goal > 0) {
+ spin_unlock(&inode_lock);
+ prune_dcache(goal);
+ spin_lock(&inode_lock);
+ sync_all_inodes();
+ free_inodes();
+ }
}
/*
/*
* Check whether to restock the unused list.
*/
- if (inodes_stat.preshrink) {
+ if (inodes_stat.nr_inodes > max_inodes) {
struct list_head *tmp;
- try_to_free_inodes(8);
+ try_to_free_inodes(inodes_stat.nr_inodes >> 2);
tmp = inode_unused.next;
if (tmp != &inode_unused) {
inodes_stat.nr_free_inodes--;
*/
inodes_stat.nr_inodes += INODES_PER_PAGE;
inodes_stat.nr_free_inodes += INODES_PER_PAGE - 1;
- inodes_stat.preshrink = 0;
- if (inodes_stat.nr_inodes > max_inodes)
- inodes_stat.preshrink = 1;
return inode;
}
* the dcache and then try again to free some inodes.
*/
prune_dcache(inodes_stat.nr_inodes >> 2);
- inodes_stat.preshrink = 1;
spin_lock(&inode_lock);
free_inodes();
*/
static u32 nlm_cookie = 0x1234;
+static inline void nlmclnt_next_cookie(struct nlm_cookie *c)
+{
+ memcpy(c->data, &nlm_cookie, 4);
+ memset(c->data+4, 0, 4);
+ c->len=4;
+ nlm_cookie++;
+}
+
/*
* Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
*/
struct nlm_lock *lock = &argp->lock;
memset(argp, 0, sizeof(*argp));
- argp->cookie = nlm_cookie++;
+ nlmclnt_next_cookie(&argp->cookie);
argp->state = nsm_local_state;
lock->fh = *NFS_FH(fl->fl_file->f_dentry);
lock->caller = system_utsname.nodename;
int
nlmclnt_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
{
- call->a_args.cookie = nlm_cookie++;
+ nlmclnt_next_cookie(&call->a_args.cookie);
call->a_args.lock = *lock;
call->a_args.lock.caller = system_utsname.nodename;
/* Perform the RPC call. If an error occurs, try again */
if ((status = rpc_call(clnt, proc, argp, resp, 0)) < 0) {
dprintk("lockd: rpc_call returned error %d\n", -status);
- if (status == -ERESTARTSYS)
- return status;
- nlm_rebind_host(host);
+ switch (status) {
+ case -EPROTONOSUPPORT:
+ status = -EINVAL;
+ break;
+ case -ECONNREFUSED:
+ case -ETIMEDOUT:
+ case -ENOTCONN:
+ status = -EAGAIN;
+ break;
+ case -ERESTARTSYS:
+ return signalled () ? -EINTR : status;
+ default:
+ break;
+ }
+ if (req->a_args.block)
+ nlm_rebind_host(host);
+ else
+ break;
} else
if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) {
dprintk("lockd: server in grace period\n");
/* Back off a little and try again */
interruptible_sleep_on_timeout(&host->h_gracewait, 15*HZ);
- } while (!signalled());
- return -ERESTARTSYS;
+ /* When the lock requested by F_SETLKW isn't available,
+ we will wait until the request can be satisfied. If
+ a signal is received during wait, we should return
+ -EINTR. */
+ if (signalled ()) {
+ status = -EINTR;
+ break;
+ }
+ } while (1);
+
+ return status;
}
/*
int status = req->a_res.status;
if (RPC_ASSASSINATED(task))
- goto die;
+ return;
if (task->tk_status < 0) {
dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
&& status != NLM_LCK_DENIED_GRACE_PERIOD) {
printk("lockd: unexpected unlock status: %d\n", status);
}
-
-die:
- rpc_release_task(task);
}
/*
}
die:
- rpc_release_task(task);
nlm_release_host(req->a_host);
kfree(req);
return;
{
struct svc_serv *serv = rqstp->rq_server;
int err = 0;
+ unsigned long grace_period_expire;
/* Lock module and set up kernel thread */
MOD_INC_USE_COUNT;
}
#endif
- nlmsvc_grace_period += jiffies;
+ grace_period_expire = nlmsvc_grace_period + jiffies;
nlmsvc_timeout = nlm_timeout * HZ;
/*
return NULL;
}
+static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
+{
+ if(a->len != b->len)
+ return 0;
+ if(memcmp(a->data,b->data,a->len))
+ return 0;
+ return 1;
+}
+
/*
* Find a block with a given NLM cookie.
*/
static inline struct nlm_block *
-nlmsvc_find_block(u32 cookie)
+nlmsvc_find_block(struct nlm_cookie *cookie)
{
struct nlm_block *block;
for (block = nlm_blocked; block; block = block->b_next) {
- if (block->b_call.a_args.cookie == cookie)
+ if (nlm_cookie_match(&block->b_call.a_args.cookie,cookie))
break;
}
*/
static inline struct nlm_block *
nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
- struct nlm_lock *lock, u32 cookie)
+ struct nlm_lock *lock, struct nlm_cookie *cookie)
{
struct nlm_block *block;
struct nlm_host *host;
lock->fl.fl_notify = nlmsvc_notify_blocked;
if (!nlmclnt_setgrantargs(&block->b_call, lock))
goto failed_free;
- block->b_call.a_args.cookie = cookie; /* see above */
+ block->b_call.a_args.cookie = *cookie; /* see above */
dprintk("lockd: created block %p...\n", block);
*/
u32
nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
- struct nlm_lock *lock, int wait, u32 cookie)
+ struct nlm_lock *lock, int wait, struct nlm_cookie *cookie)
{
struct file_lock *conflock;
struct nlm_block *block;
unsigned long timeout;
dprintk("lockd: GRANT_MSG RPC callback\n");
- if (!(block = nlmsvc_find_block(call->a_args.cookie))) {
- dprintk("lockd: no block for cookie %x\n", call->a_args.cookie);
+ if (!(block = nlmsvc_find_block(&call->a_args.cookie))) {
+ dprintk("lockd: no block for cookie %x\n", *(u32 *)(call->a_args.cookie.data));
return;
}
block->b_incall = 0;
nlm_release_host(call->a_host);
- rpc_release_task(task);
}
/*
* block.
*/
void
-nlmsvc_grant_reply(u32 cookie, u32 status)
+nlmsvc_grant_reply(struct nlm_cookie *cookie, u32 status)
{
struct nlm_block *block;
struct nlm_file *file;
/* Now try to lock the file */
resp->status = nlmsvc_lock(rqstp, file, &argp->lock,
- argp->block, argp->cookie);
+ argp->block, &argp->cookie);
dprintk("lockd: LOCK status %ld\n", ntohl(resp->status));
nlm_release_host(host);
task->tk_pid, -task->tk_status);
}
nlm_release_host(call->a_host);
- rpc_release_task(task);
kfree(call);
}
/*
* XDR functions for basic NLM types
*/
-static inline u32 *
-nlm_decode_cookie(u32 *p, u32 *c)
+static inline u32 *nlm_decode_cookie(u32 *p, struct nlm_cookie *c)
{
unsigned int len;
- if ((len = ntohl(*p++)) == 4) {
- *c = ntohl(*p++);
- } else if (len == 0) { /* hockeypux brain damage */
- *c = 0;
- } else {
+ len = ntohl(*p++);
+
+ if(len==0)
+ {
+ c->len=4;
+ memset(c->data, 0, 4); /* hockeypux brain damage */
+ }
+ else if(len<=8)
+ {
+ c->len=len;
+ memcpy(c->data, p, len);
+ p+=(len+3)>>2;
+ }
+ else
+ {
printk(KERN_NOTICE
- "lockd: bad cookie size %d (should be 4)\n", len);
+ "lockd: bad cookie size %d (only cookies under 8 bytes are supported.)\n", len);
return NULL;
}
return p;
}
static inline u32 *
-nlm_encode_cookie(u32 *p, u32 c)
+nlm_encode_cookie(u32 *p, struct nlm_cookie *c)
{
- *p++ = htonl(sizeof(c));
- *p++ = htonl(c);
+ *p++ = htonl(c->len);
+ memcpy(p, c->data, c->len);
+ p+=(c->len+3)>>2;
return p;
}
static u32 *
nlm_encode_testres(u32 *p, struct nlm_res *resp)
{
- if (!(p = nlm_encode_cookie(p, resp->cookie)))
+ if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
int
nlmsvc_encode_shareres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
{
- if (!(p = nlm_encode_cookie(p, resp->cookie)))
+ if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
*p++ = xdr_zero; /* sequence argument */
int
nlmsvc_encode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
{
- if (!(p = nlm_encode_cookie(p, resp->cookie)))
+ if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
return xdr_ressize_check(rqstp, p);
{
struct nlm_lock *lock = &argp->lock;
- if (!(p = nlm_encode_cookie(p, argp->cookie)))
+ if (!(p = nlm_encode_cookie(p, &argp->cookie)))
return -EIO;
*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
if (!(p = nlm_encode_lock(p, lock)))
{
struct nlm_lock *lock = &argp->lock;
- if (!(p = nlm_encode_cookie(p, argp->cookie)))
+ if (!(p = nlm_encode_cookie(p, &argp->cookie)))
return -EIO;
*p++ = argp->block? xdr_one : xdr_zero;
*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
{
struct nlm_lock *lock = &argp->lock;
- if (!(p = nlm_encode_cookie(p, argp->cookie)))
+ if (!(p = nlm_encode_cookie(p, &argp->cookie)))
return -EIO;
*p++ = argp->block? xdr_one : xdr_zero;
*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
{
struct nlm_lock *lock = &argp->lock;
- if (!(p = nlm_encode_cookie(p, argp->cookie)))
+ if (!(p = nlm_encode_cookie(p, &argp->cookie)))
return -EIO;
if (!(p = nlm_encode_lock(p, lock)))
return -EIO;
static int
nlmclt_encode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
{
- if (!(p = nlm_encode_cookie(p, resp->cookie)))
+ if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return -EIO;
*p++ = resp->status;
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
* Buffer requirements for NLM
*/
#define NLM_void_sz 0
-#define NLM_cookie_sz 2
+#define NLM_cookie_sz 3 /* 1 len , 2 data */
#define NLM_caller_sz 1+QUADLEN(sizeof(system_utsname.nodename))
#define NLM_netobj_sz 1+QUADLEN(XDR_MAX_NETOBJ)
/* #define NLM_owner_sz 1+QUADLEN(NLM_MAXOWNER) */
#define BLKFRAGET _IO(0x12,101)/* get filesystem (mm/filemap.c) read-ahead */
#define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */
#define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */
+#define BLKSSZGET _IO(0x12,104)/* get block device sector size (reserved for) */
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
* Server-side lock handling
*/
u32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
- struct nlm_lock *, int, u32);
+ struct nlm_lock *, int, struct nlm_cookie *);
u32 nlmsvc_unlock(struct nlm_file *, struct nlm_lock *);
u32 nlmsvc_testlock(struct nlm_file *, struct nlm_lock *,
struct nlm_lock *);
struct file_lock fl;
};
+/*
+ * NLM cookies. Technically they can be 1K, Nobody uses over 8 bytes
+ * however.
+ */
+
+struct nlm_cookie
+{
+ unsigned char data[8];
+ unsigned int len;
+};
+
/*
* Generic lockd arguments for all but sm_notify
*/
struct nlm_args {
- u32 cookie;
+ struct nlm_cookie cookie;
struct nlm_lock lock;
u32 block;
u32 reclaim;
* Generic lockd result
*/
struct nlm_res {
- u32 cookie;
+ struct nlm_cookie cookie;
u32 status;
struct nlm_lock lock;
};
/*
* ioctl commands
*/
-#define SMB_IOC_GETMOUNTUID _IOR('u', 1, uid_t)
+#define SMB_IOC_GETMOUNTUID _IOR('u', 1, __kernel_uid_t)
#define SMB_IOC_NEWCONN _IOW('u', 2, struct smb_conn_opt)
#ifdef __KERNEL__
struct task_struct * prev, * next;
int this_cpu;
+ run_task_queue(&tq_scheduler);
+
prev = current;
this_cpu = prev->processor;
/*
/* Do "administrative" work here while we don't hold any locks */
if (bh_active & bh_mask)
do_bottom_half();
- run_task_queue(&tq_scheduler);
spin_lock(&scheduler_lock);
spin_lock_irq(&runqueue_lock);
continue;
}
unregister_proc_table(table->child, de);
+
+ /* Don't unregister directories which still have entries.. */
+ if (de->subdir)
+ continue;
}
- /* Don't unregister proc directories which still have
- entries... */
- if (!((de->mode & S_IFDIR) && de->subdir)) {
- proc_unregister(root, de->low_ino);
- table->de = NULL;
- kfree(de);
- }
+
+ /* Don't unregoster proc entries that are still being used.. */
+ if (de->count)
+ continue;
+
+ proc_unregister(root, de->low_ino);
+ table->de = NULL;
+ kfree(de);
}
}
int kpiod(void * unused)
{
- struct wait_queue wait = {current};
+ struct task_struct *tsk = current;
+ struct wait_queue wait = { tsk, };
struct inode * inode;
struct dentry * dentry;
struct pio_request * p;
- current->session = 1;
- current->pgrp = 1;
- strcpy(current->comm, "kpiod");
- sigfillset(¤t->blocked);
+ tsk->session = 1;
+ tsk->pgrp = 1;
+ strcpy(tsk->comm, "kpiod");
+ sigfillset(&tsk->blocked);
init_waitqueue(&pio_wait);
+ /*
+ * Mark this task as a memory allocator - we don't want to get caught
+ * up in the regular mm freeing frenzy if we have to allocate memory
+ * in order to write stuff out.
+ */
+ tsk->flags |= PF_MEMALLOC;
lock_kernel();
NULL, NULL);
if (!pio_request_cache)
panic ("Could not create pio_request slab cache");
-
+
while (1) {
- current->state = TASK_INTERRUPTIBLE;
+ tsk->state = TASK_INTERRUPTIBLE;
add_wait_queue(&pio_wait, &wait);
- while (!pio_first)
+ if (!pio_first)
schedule();
remove_wait_queue(&pio_wait, &wait);
- current->state = TASK_RUNNING;
+ tsk->state = TASK_RUNNING;
while (pio_first) {
p = get_pio_request();
ifeq ($(CONFIG_IRDA),y)
SUB_DIRS += irda
+# There might be some irda features that are compiled as modules
+MOD_SUB_DIRS += irda
else
ifeq ($(CONFIG_IRDA),m)
MOD_SUB_DIRS += irda
n->stamp=skb->stamp;
n->destructor = NULL;
n->security=skb->security;
+#ifdef CONFIG_IP_FIREWALL
+ n->fwmark = skb->fwmark;
+#endif
return n;
}
n->stamp=skb->stamp;
n->destructor = NULL;
n->security=skb->security;
+#ifdef CONFIG_IP_FIREWALL
+ n->fwmark = skb->fwmark;
+#endif
return n;
}
*
* IPv4 Forwarding Information Base: semantics.
*
- * Version: $Id: fib_semantics.c,v 1.11 1998/10/03 09:37:12 davem Exp $
+ * Version: $Id: fib_semantics.c,v 1.12 1999/01/26 05:33:44 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
*
* Does (reverse-masq) forwarding based on skb->fwmark value
*
- * $Id: ip_masq_mfw.c,v 1.2 1998/12/12 02:40:42 davem Exp $
+ * $Id: ip_masq_mfw.c,v 1.3 1999/01/26 05:33:47 davem Exp $
*
* Author: Juan Jose Ciarlante <jjciarla@raiz.uncu.edu.ar>
* based on Steven Clarke's portfw
*
* The Internet Protocol (IP) output module.
*
- * Version: $Id: ip_output.c,v 1.64 1999/01/04 20:05:33 davem Exp $
+ * Version: $Id: ip_output.c,v 1.65 1999/01/21 13:37:34 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* PROC file system. It is mainly used for debugging and
* statistics.
*
- * Version: $Id: proc.c,v 1.33 1998/10/21 05:44:35 davem Exp $
+ * Version: $Id: proc.c,v 1.34 1999/02/08 11:20:34 davem Exp $
*
* Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de>
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_input.c,v 1.153 1999/01/20 07:20:03 davem Exp $
+ * Version: $Id: tcp_input.c,v 1.155 1999/01/26 05:33:50 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_ipv4.c,v 1.164 1999/01/04 20:36:55 davem Exp $
+ * Version: $Id: tcp_ipv4.c,v 1.165 1999/02/08 11:19:56 davem Exp $
*
* IPv4 specific functions
*
if (sk->ip_pmtudisc != IP_PMTUDISC_DONT && sk->dst_cache) {
if (tp->pmtu_cookie > sk->dst_cache->pmtu &&
!atomic_read(&sk->sock_readers)) {
- lock_sock(sk);
tcp_sync_mss(sk, sk->dst_cache->pmtu);
/* Resend the TCP packet because it's
* discovery.
*/
tcp_simple_retransmit(sk);
- release_sock(sk);
} /* else let the usual retransmit timer handle it */
}
}
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: tcp_ipv6.c,v 1.94 1998/11/07 11:50:33 davem Exp $
+ * $Id: tcp_ipv6.c,v 1.95 1999/02/08 11:20:03 davem Exp $
*
* Based on:
* linux/net/ipv4/tcp.c
sk->err_soft = -dst->error;
} else if (tp->pmtu_cookie > dst->pmtu
&& !atomic_read(&sk->sock_readers)) {
- lock_sock(sk);
tcp_sync_mss(sk, dst->pmtu);
tcp_simple_retransmit(sk);
- release_sock(sk);
} /* else let the usual retransmit timer handle it */
dst_release(dst);
return;
ipv6_statistics.Ip6InDelivers++;
- /* XXX We need to think more about socket locking
- * XXX wrt. backlog queues, __release_sock(), etc. -DaveM
- */
- lock_sock(sk);
-
/*
* This doesn't check if the socket has enough room for the packet.
* Either process the packet _without_ queueing it and then free it,
nsk = tcp_v6_hnd_req(sk, skb);
if (!nsk)
goto discard;
- lock_sock(nsk);
- release_sock(sk);
+
+ /*
+ * Queue it on the new socket if the new socket is active,
+ * otherwise we just shortcircuit this and continue with
+ * the new socket..
+ */
+ if (atomic_read(&nsk->sock_readers)) {
+ __skb_queue_tail(&nsk->back_log, skb);
+ return 0;
+ }
sk = nsk;
}
goto reset;
if (users)
goto ipv6_pktoptions;
- release_sock(sk);
return 0;
reset:
if (users)
kfree_skb(skb);
kfree_skb(skb);
- release_sock(sk);
return 0;
ipv6_pktoptions:
if (skb)
kfree_skb(skb);
- release_sock(sk);
return 0;
}
#include <linux/fcntl.h>
#include <linux/net.h>
#include <linux/in.h>
+#include <linux/inet.h>
#include <linux/udp.h>
#include <linux/version.h>
#include <linux/unistd.h>
* we just punt connects from unprivileged ports. */
if (ntohs(sin.sin_port) >= 1024) {
printk(KERN_WARNING
- "%s: connect from unprivileged port: %08lx:%d",
+ "%s: connect from unprivileged port: %s:%d",
serv->sv_name,
- ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port));
+ in_ntoa(sin.sin_addr.s_addr), ntohs(sin.sin_port));
goto failed;
}
- dprintk("%s: connect from %08lx:%04x\n", serv->sv_name,
- ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port));
+ dprintk("%s: connect from %s:%04x\n", serv->sv_name,
+ in_ntoa(sin.sin_addr.s_addr), ntohs(sin.sin_port));
if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 0)))
goto failed;
# /bin /sbin /usr/bin /usr/sbin /usr/local/bin, but it may
# differ on your system.
#
+PATH=/sbin:/usr/sbin:/bin:/usr/bin:$PATH
echo '-- Versions installed: (if some fields are empty or looks'
echo '-- unusual then possibly you have very old versions)'
uname -a