VERSION = 2
PATCHLEVEL = 2
SUBLEVEL = 24
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
}
else
{
- int gp_start = 0x800 - (ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
+ int len = skb->len;
+ int pad = 0;
+ int gp_start;
unsigned char *buf = skb->data;
+
+ if(len < ETH_ZLEN)
+ pad = ETH_ZLEN - len;
+
+ gp_start = 0x800 - ( len + pad );
load_it_again_sam:
lp->tx_pkt_start = gp_start;
outw(0x00, RX_BUF_CLR); /* Set rx packet area to 0. */
outw(gp_start, GP_LOW); /* aim - packet will be loaded into buffer start */
- outsb(DATAPORT,buf,skb->len); /* load buffer (usual thing each byte increments the pointer) */
+ outsb(DATAPORT,buf,len); /* load buffer (usual thing each byte increments the pointer) */
+ if(pad)
+ {
+ while(pad--) /* Zero fill buffer tail */
+ outb(0, DATAPORT);
+ }
outw(gp_start, GP_LOW); /* the board reuses the same register */
if(lp->loading==2) /* A receive upset our load, despite our best efforts */
adapter->current_dma.direction = 1;
adapter->current_dma.start_time = jiffies;
- if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS) {
- memcpy(adapter->dma_buffer, skb->data, nlen);
+ if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
+ memcpy(adapter->dma_buffer, skb->data, skb->len);
+ memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
target = virt_to_bus(adapter->dma_buffer);
}
else {
static int el16_close(struct device *dev);
static struct net_device_stats *el16_get_stats(struct device *dev);
-static void hardware_send_packet(struct device *dev, void *buf, short length);
+static void hardware_send_packet(struct device *dev, void *buf, short length, short pad);
static void init_82586_mem(struct device *dev);
\f
outb(0x80, ioaddr + MISC_CTRL);
#ifdef CONFIG_SMP
spin_lock_irqsave(&lp->lock, flags);
- hardware_send_packet(dev, buf, length);
+ hardware_send_packet(dev, buf, skb->len, length - skb->len);
spin_unlock_irqrestore(&lp->lock, flags);
#else
- hardware_send_packet(dev, buf, length);
+ hardware_send_packet(dev, buf, skb->len, length - skb->len);
#endif
dev->trans_start = jiffies;
/* Enable the 82586 interrupt input. */
return;
}
-static void hardware_send_packet(struct device *dev, void *buf, short length)
+static void hardware_send_packet(struct device *dev, void *buf, short length, short pad)
{
struct net_local *lp = (struct net_local *)dev->priv;
short ioaddr = dev->base_addr;
ushort tx_block = lp->tx_head;
unsigned long write_ptr = dev->mem_start + tx_block;
+ static char padding[ETH_ZLEN];
/* Set the write pointer to the Tx block, and put out the header. */
writew(0x0000,write_ptr); /* Tx status */
writew(tx_block+8,write_ptr+=2); /* Data Buffer offset. */
/* Output the data buffer descriptor. */
- writew(length | 0x8000,write_ptr+=2); /* Byte count parameter. */
+ writew((pad + length) | 0x8000,write_ptr+=2); /* Byte count parameter. */
writew(-1,write_ptr+=2); /* No next data buffer. */
writew(tx_block+22+SCB_BASE,write_ptr+=2); /* Buffer follows the NoOp command. */
writew(0x0000,write_ptr+=2); /* Buffer address high bits (always zero). */
/* Output the packet at the write pointer. */
memcpy_toio(write_ptr+2, buf, length);
+ if(pad)
+ memcpy_toio(write_ptr+length+2, padding, pad);
/* Set the old command link pointing to this send packet. */
writew(tx_block,dev->mem_start + lp->tx_cmd_link);
if (test_and_set_bit(0, (void *) &dev->tbusy) != 0) {
printk("%s: Transmitter access conflict.\n", dev->name);
} else {
- memcpy((char *) p->xmit_cbuffs[p->xmit_count], (char *) (skb->data), skb->len);
len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
+ if(len != skb->len)
+ memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
+ memcpy((char *) p->xmit_cbuffs[p->xmit_count], (char *) (skb->data), skb->len);
#if (NUM_XMIT_BUFFS == 1)
#ifdef NO_NOPCOMMANDS
/* We will need this to flush the buffer out */
lp->tx_ring[lp->tx_ring_head].skb=skb;
+ if(skb->len < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ goto out;
+ }
np->length = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
np->data = virt_to_bus(skb->data);
p->control &= ~CONTROL_EOL; /* Clear EOL on p */
+out:
dev->tbusy = 0; /* Keep feeding me */
restore_flags(flags);
ib->btx_ring [entry].length = (-len) | 0xf000;
ib->btx_ring [entry].misc = 0;
+ if(skb->len < ETH_ZLEN)
+ memset((char *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen);
/* Now, give the packet to the lance */
if (test_and_set_bit(0, (void *) &dev->tbusy) != 0)
printk("%s: Transmitter access conflict.\n", dev->name);
else {
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ short length = skb->len;
dev->trans_start = jiffies;
+ if(skb->len < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ {
+ dev->tbusy = 0;
+ return 0;
+ }
+ length = ETH_ZLEN;
+ }
tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
tbd = lp->tbds + lp->next_tx_cmd;
struct ei_device *ei_local = (struct ei_device *) dev->priv;
int length, send_length, output_page;
unsigned long flags;
+ char scratch[ETH_ZLEN];
/*
* We normally shouldn't be called if dev->tbusy is set, but the
* isn't already sending. If it is busy, the interrupt handler will
* trigger the send later, upon receiving a Tx done interrupt.
*/
-
- ei_block_output(dev, length, skb->data, output_page);
+
+ if(length == send_length)
+ ei_block_output(dev, length, skb->data, output_page);
+ else
+ {
+ memset(scratch, 0, ETH_ZLEN);
+ memcpy(scratch, skb->data, skb->len);
+ ei_block_output(dev, ETH_ZLEN, scratch, output_page);
+ }
+
if (! ei_local->txing)
{
ei_local->txing = 1;
* reasonable hardware if you only use one Tx buffer.
*/
- ei_block_output(dev, length, skb->data, ei_local->tx_start_page);
+ if(length == send_length)
+ ei_block_output(dev, length, skb->data, ei_local->tx_start_page);
+ else
+ {
+ memset(scratch, 0, ETH_ZLEN);
+ memcpy(scratch, skb->data, skb->len);
+ ei_block_output(dev, ETH_ZLEN, scratch, ei_local->tx_start_page);
+ }
ei_local->txing = 1;
NS8390_trigger_send(dev, send_length, ei_local->tx_start_page);
dev->trans_start = jiffies;
}
skblen = skb->len;
-
+ len = skblen;
+
+ if(len < ETH_ZLEN)
+ {
+ len = ETH_ZLEN;
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ return 0;
+ }
+
save_flags(flags);
cli();
}
}
#endif
- len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
entry = lp->tx_new & lp->tx_ring_mod_mask;
ib->btx_ring [entry].length = (-len) | 0xf000;
ib->btx_ring [entry].misc = 0;
if (!dev->tbusy) {
again:
if (!test_and_set_bit(0, (void*)&dev->tbusy)) {
- unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned int length = skb->len;
unsigned int hdraddr, bufaddr;
unsigned int head;
unsigned long flags;
+
+ /* FIXME: I thought the 79c961 could do padding - RMK ??? */
+ if(length < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ {
+ dev->tbusy = 0;
+ return 0;
+ }
+ length = ETH_ZLEN;
+ }
+
head = priv->txhead;
hdraddr = priv->txhdr + (head << 3);
bufaddr = priv->txbuffer[head];
struct AriadneBoard *board = priv->board;
int entry;
unsigned long flags;
+ int len = skb->len;
/* Transmitter timeout, serious problems. */
if (dev->tbusy) {
}
/* Fill in a Tx ring entry */
+ /* FIXME: is the 79C960 new enough to do its own padding right ? */
+ if(skb->len < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ {
+ dev->tbusy = 0;
+ return 0;
+ }
+ len = ETH_ZLEN;
+ }
++
#if 0
printk("TX pkt type 0x%04x from ", ((u_short *)skb->data)[6]);
priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len);
priv->tx_ring[entry]->TMD3 = 0x0000;
- memcpyw(priv->tx_buff[entry], (u_short *)skb->data, skb->len);
+ memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len);
#if 0
{
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0)
printk("%s: Transmitter access conflict.\n", dev->name);
else {
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ short length = skb->len;
+ static u8 pad[ETH_ZLEN];
unsigned char *buf = skb->data;
/* We may not start transmitting unless we finish transferring
lp->tx_queue_ready = 0;
{
outw(length, ioaddr + DATAPORT);
+ /* Packet data */
outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+ /* Check for dribble byte */
+ if(length & 1)
+ {
+ outw(skb->data[skb->len-1], ioaddr + DATAPORT);
+ length--;
+ }
+ /* Check for packet padding */
+ if(length != skb->len)
+ outsw(ioaddr + DATAPORT, pad, (length - skb->len + 1) >> 1);
lp->tx_queue++;
lp->tx_queue_len += length + 2;
DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
dev->name, DREG ));
+ /* The old LANCE chips doesn't automatically pad buffers to min. size. */
+ len = skb->len;
+ if(len < ETH_ZLEN)
+ len = ETH_ZLEN;
+ /* PAM-Card has a bug: Can only send packets with even number of bytes! */
+ else if (lp->cardtype == PAM_CARD && (len & 1))
+ ++len;
+
+ if(len > skb->len)
+ {
+ skb = skb_padto(skb, len);
+ if(skb == NULL)
+ return 0;
+ }
+
/* Block a timer-based transmit from overlapping. This could better be
done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
if (test_and_set_bit( 0, (void*)&dev->tbusy ) != 0) {
* last.
*/
- /* The old LANCE chips doesn't automatically pad buffers to min. size. */
- len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
- /* PAM-Card has a bug: Can only send packets with even number of bytes! */
- if (lp->cardtype == PAM_CARD && (len & 1))
- ++len;
-
head->length = -len;
head->misc = 0;
lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
static unsigned short eeprom_op(short ioaddr, unsigned int cmd);
static int net_open(struct device *dev);
static void hardware_init(struct device *dev);
-static void write_packet(short ioaddr, int length, unsigned char *packet, int mode);
+static void write_packet(short ioaddr, int length, unsigned char *packet, int pad, int mode);
static void trigger_send(short ioaddr, int length);
static int net_send_packet(struct sk_buff *skb, struct device *dev);
static void net_interrupt(int irq, void *dev_id, struct pt_regs *regs);
write_reg(ioaddr, CMR1, CMR1_Xmit);
}
-static void write_packet(short ioaddr, int length, unsigned char *packet, int data_mode)
+static void write_packet(short ioaddr, int length, unsigned char *packet, int pad_len, int data_mode)
{
- length = (length + 1) & ~1; /* Round up to word length. */
+ if(length & 1)
+ {
+ length++;
+ pad_len++;
+ }
+
outb(EOC+MAR, ioaddr + PAR_DATA);
if ((data_mode & 1) == 0) {
/* Write the packet out, starting with the write addr. */
outb(WrAddr+MAR, ioaddr + PAR_DATA);
do {
write_byte_mode0(ioaddr, *packet++);
+ } while (--length > pad_len) ;
+ do {
+ write_byte_mode0(ioaddr, 0);
} while (--length > 0) ;
} else {
/* Write the packet out in slow mode. */
outbyte >>= 4;
outb(outbyte & 0x0f, ioaddr + PAR_DATA);
outb(Ctrl_HNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
- while (--length > 0)
+ while (--length > pad_len)
write_byte_mode1(ioaddr, *packet++);
+ while (--length > 0)
+ write_byte_mode1(ioaddr, 0);
}
/* Terminate the Tx frame. End of write: ECB. */
outb(0xff, ioaddr + PAR_DATA);
write_reg_high(ioaddr, IMR, 0);
restore_flags(flags);
- write_packet(ioaddr, length, buf, dev->if_port);
+ write_packet(ioaddr, length, buf, length-skb->len, dev->if_port);
lp->pac_cnt_in_tx_buf++;
if (lp->tx_unit_busy == 0) {
struct lance_tx_head *head;
unsigned long flags;
+ /* The old LANCE chips doesn't automatically pad buffers to min. size. */
+ len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
+ /* PAM-Card has a bug: Can only send packets with even number of bytes! */
+ if (lp->cardtype == PAM_CARD && (len & 1))
+ ++len;
+
+ if (len > skb->len)
+ {
+ skb = skb_padto(skb, len);
+ if(skb == NULL)
+ return 0;
+ }
+
/* Transmitter timeout, serious problems. */
if (dev->tbusy) {
int tickssofar = jiffies - dev->trans_start;
* last.
*/
- /* The old LANCE chips doesn't automatically pad buffers to min. size. */
- len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
- /* PAM-Card has a bug: Can only send packets with even number of bytes! */
- if (lp->cardtype == PAM_CARD && (len & 1))
- ++len;
-
head->length = -len;
head->misc = 0;
lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
int len;
int tickssofar;
byte *buffer = skb->data;
+ int i;
if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
tickssofar = jiffies - dev->trans_start;
#endif
de600_setup_address(transmit_from, RW_ADDR);
- for ( ; len > 0; --len, ++buffer)
+ for (i = 0; i < skb->len ; ++i, ++buffer)
de600_put_byte(*buffer);
+ for (; i < len; ++i)
+ de600_put_byte(0);
if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */
dev->trans_start = jiffies;
}
static inline void
-de620_write_block(struct device *dev, byte *buffer, int count)
+de620_write_block(struct device *dev, byte *buffer, int count, int pad)
{
#ifndef LOWSPEED
byte uflip = NIC_Cmd ^ (DS0 | DS1);
for ( ; count > 0; --count, ++buffer) {
de620_put_byte(dev,*buffer);
}
+ for ( count = pad ; count > 0; --count, ++buffer) {
+ de620_put_byte(dev, 0);
+ }
de620_send_command(dev,W_DUMMY);
#ifdef COUNT_LOOPS
/* trial debug output: loops per byte in de620_ready() */
return 1;
break;
}
- de620_write_block(dev, buffer, len);
+ de620_write_block(dev, buffer, skb->len, len-skb->len);
dev->trans_start = jiffies;
dev->tbusy = (using_txbuf == (TXBF0 | TXBF1)); /* Boolean! */
return -1;
}
skblen = skb->len;
+ len = skblen;
+
+ if(len < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ {
+ dev->tbusy = 0;
+ return 0;
+ }
+ len = ETH_ZLEN;
+ }
save_and_cli(flags);
if (!TX_BUFFS_AVAIL) {
restore_flags(flags);
return -1;
}
- len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
lp->stats.tx_bytes += len;
}
return status;
} else if (skb->len > 0) {
+ if(skb->len < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ return 0;
+ }
+
/* Enforce 1 process per h/w access */
if (test_and_set_bit(0, (void *) &dev->tbusy) != 0) {
printk("%s: Transmitter access conflict.\n", dev->name);
{
struct eepro_local *lp = (struct eepro_local *)dev->priv;
int ioaddr = dev->base_addr;
+ short length = skb->len;
unsigned long flags;
if (net_debug > 5)
printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name);
+ if(length < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ return 0;
+ length = ETH_ZLEN;
+ }
+
eepro_dis_int(ioaddr);
spin_lock_irqsave(&lp->lock, flags);
printk(KERN_WARNING "%s: Transmitter access conflict.\n", dev->name);
lp->stats.tx_aborted_errors++;
} else {
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
if (hardware_send_packet(dev, skb->data, length))
/* we won't unset tbusy because we're out of space. */
static int eexp_xmit(struct sk_buff *buf, struct device *dev)
{
struct net_local *lp = (struct net_local *)dev->priv;
+ short length = buf->len;
unsigned long flags;
#if NET_DEBUG > 6
printk(KERN_DEBUG "%s: eexp_xmit()\n", dev->name);
#endif
+ if(buf->len < ETH_ZLEN)
+ {
+ buf = skb_padto(buf, ETH_ZLEN);
+ if(buf == NULL)
+ return 0;
+ length = buf->len;
+ }
+
disable_irq(dev->irq);
/*
}
else
{
- unsigned short length = (ETH_ZLEN < buf->len) ? buf->len :
- ETH_ZLEN;
unsigned short *data = (unsigned short *)buf->data;
lp->stats.tx_bytes += length;
int entry, free_count;
u32 ctrl_word;
long flags;
+
+ if(skb->len < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ return 0;
+ }
/* Block a timer-based transmit from overlapping. This could better be
done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
status = -1;
}
else {
- ushort length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ ushort length = skb->len;
unsigned char *buf = skb->data;
+ if(length < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ {
+ dev->tbusy = 0;
+ return 0;
+ }
+ length = ETH_ZLEN;
+ }
if( (length + 2) > (lp->tx_buf_size - lp->tx_queue_len)) {
if(eth16i_debug > 0)
printk(KERN_WARNING "%s: Transmit buffer full.\n", dev->name);
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0)
printk("%s: Transmitter access conflict.\n", dev->name);
else {
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ short length = skb->len;
unsigned char *buf = skb->data;
if (length > ETH_FRAME_LEN) {
dev->name, length);
return 1;
}
+
+ if (length < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ {
+ dev->tbusy = 0;
+ return 0;
+ }
+ length = ETH_ZLEN;
+ }
if (net_debug > 4)
printk("%s: Transmitting a packet of length %lu.\n", dev->name,
if ( skb->len <= 0 ) return 0;
+ if (skb->len < ETH_ZLEN && lp->chip == HP100_CHIPID_SHASTA)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ return 0;
+ }
+
/* Get Tx ring tail pointer */
if( lp->txrtail->next==lp->txrhead )
{
/* The old LANCE chips doesn't automatically pad buffers to min. size. */
if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
- lp->tx_ring[entry].length =
- -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
+ if(skb->len < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ goto out;
+ lp->tx_ring[entry].length = -ETH_ZLEN;
+ }
+ else
+ lp->tx_ring[entry].length = -skb->len;
} else
lp->tx_ring[entry].length = -skb->len;
dev->trans_start = jiffies;
+out:
save_flags(flags);
cli();
lp->lock = 0;
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
printk("%s: Transmitter access conflict.\n", dev->name);
} else {
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ short length;
dev->trans_start = jiffies;
+ length = skb->len;
+
+ if(length < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ {
+ dev->tbusy = 0;
+ return 0;
+ }
+ length = ETH_ZLEN;
+ }
+
tx_cmd = (struct tx_cmd *)
kmalloc ((sizeof (struct tx_cmd)
+ sizeof (struct i596_tbd)), GFP_ATOMIC);
static int process_xmt_interrupt(struct device *dev);
#define tx_done(dev) 1
-extern void hardware_send_packet(struct device *dev, char *buf, int length);
+extern void hardware_send_packet(struct device *dev, char *buf, int length, int pad);
extern void chipset_init(struct device *dev, int startp);
static void dump_packet(void *buf, int len);
static void show_registers(struct device *dev);
} else {
int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- hardware_send_packet(dev, (unsigned char *)skb->data, length);
+ hardware_send_packet(dev, (unsigned char *)skb->data, length, length-skb->len);
dev->trans_start = jiffies;
}
dev_kfree_skb (skb);
}
}
-extern void hardware_send_packet(struct device *dev, char *buf, int length)
+extern void hardware_send_packet(struct device *dev, char *buf, int length, int pad)
{
struct ni5010_local *lp = (struct ni5010_local *)dev->priv;
int ioaddr = dev->base_addr;
if (NI5010_DEBUG > 3) dump_packet(buf, length);
- buf_offs = NI5010_BUFSIZE - length;
- lp->o_pkt_size = length;
+ buf_offs = NI5010_BUFSIZE - length - pad ;
+ lp->o_pkt_size = length + pad ;
save_flags(flags);
cli();
outw(buf_offs, IE_GP); /* Point GP at start of packet */
outsb(IE_XBUF, buf, length); /* Put data in buffer */
+ while(pad--)
+ outb(0, IE_XBUF);
+
outw(buf_offs, IE_GP); /* Rewrite where packet starts */
/* should work without that outb() (Crynwr used it) */
#endif
else
{
+ len = skb->len;
+ if(len < ETH_ZLEN)
+ {
+ len = ETH_ZLEN;
+ memset((char *)p->xmit_cbuffs[p->xmit_count]+skb->len, 0, len - skb->len);
+ }
memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len);
- len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
#if (NUM_XMIT_BUFFS == 1)
# ifdef NO_NOPCOMMANDS
memcpy((char *) p->tmdbounce[p->tmdbouncenum] ,(char *)skb->data,
(skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len);
+ if(len > skb->len)
+ memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
dev_kfree_skb (skb);
save_flags(flags);
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0)
printk("%s: Transmitter access conflict.\n", dev->name);
else {
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ short length = skb->len;
unsigned char *buf = skb->data;
+ if(length < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ {
+ dev->tbusy = 0;
+ return 0;
+ }
+ length = ETH_ZLEN;
+ }
+
hardware_send_packet(dev, buf, length);
dev->trans_start = jiffies;
lp->stats.tx_bytes += length;
* added this new entry and restarted it.
*/
memcpy((char *)td->buf_vaddr, skb->data, skblen);
+ if(len != skblen)
+ memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen);
td->tdma.cntinfo = ((len) & HPCDMA_BCNT) |
(HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX);
if(sp->tx_old != sp->tx_new) {
{
struct priv *p = (struct priv *) dev->priv;
struct tmd *tmdp;
+ static char pad[64];
if (dev->tbusy)
{
/* Copy data into dual ported ram */
memcpy_toio((tmdp->u.buffer & 0x00ffffff), skb->data, skb->len);
+ if(len != skb->len)
+ memcpy_toio((tmdp->u.buffer & 0x00ffffff) + skb->len, pad, len-skb->len);
writew(-len, tmdp->blen); /* set length to transmit */
printk(CARDNAME": Bad Craziness - sent packet while busy.\n" );
return 1;
}
- lp->saved_skb = skb;
- length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ length = skb->len;
+ if(length < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ {
+ netif_wake_queue(dev);
+ return 0;
+ }
+ length = ETH_ZLEN;
+ }
+ lp->saved_skb = skb;
/*
** The MMU wants the number of pages to be the number of 256 bytes
/* Calculate the next Tx descriptor entry. */
entry = np->cur_tx % TX_RING_SIZE;
+ if (skb->len < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ {
+ dev->tbusy = 0;
+ return 0;
+ }
+ }
+
np->tx_skbuff[entry] = skb;
if ((np->drv_flags & ReqTxAlign) && ((long)skb->data & 3)) {
{
int ioaddr = dev->base_addr;
struct net_local *lp = (struct net_local *)dev->priv;
+ short length = skb->len;
if (znet_debug > 4)
printk(KERN_DEBUG "%s: ZNet_send_packet(%ld).\n", dev->name, dev->tbusy);
+ if(length < ETH_ZLEN)
+ {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if(skb == NULL)
+ return 0;
+ length = ETH_ZLEN;
+ }
+
/* Transmitter timeout, likely just recovery after suspending the machine. */
if (dev->tbusy) {
ushort event, tx_status, rx_offset, state;
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0)
printk(KERN_WARNING "%s: Transmitter access conflict.\n", dev->name);
else {
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
unsigned char *buf = (void *)skb->data;
ushort *tx_link = zn.tx_cur - 1;
ushort rnd_len = (length + 1)>>1;
extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
extern struct sk_buff * skb_copy(struct sk_buff *skb, int priority);
extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, int newheadroom);
+extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad);
#define dev_kfree_skb(a) kfree_skb(a)
extern unsigned char * skb_put(struct sk_buff *skb, unsigned int len);
extern unsigned char * skb_push(struct sk_buff *skb, unsigned int len);
return skb;
}
+/**
+ * skb_padto - pad an skbuff up to a minimal size
+ * @skb: buffer to pad
+ * @len: minimal length
+ *
+ * Pads up a buffer to ensure the trailing bytes exist and are
+ * blanked. If the buffer already contains sufficient data it
+ * is untouched. Returns the buffer, which may be a replacement
+ * for the original, or NULL for out of memory - in which case
+ * the original buffer is still freed.
+ */
+
+static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
+{
+ unsigned int size = skb->len;
+ if(size >= len)
+ return skb;
+ return skb_pad(skb, len-size);
+}
+
extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
extern unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);
extern int skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size);
return n;
}
+struct sk_buff *skb_copy_grow(struct sk_buff *skb, int pad, int gfp_mask)
+{
+ struct sk_buff *n;
+ unsigned long offset;
+
+ /*
+ * Allocate the copy buffer
+ */
+
+ n=alloc_skb(skb->end - skb->head + pad, gfp_mask);
+ if(n==NULL)
+ return NULL;
+
+ /*
+ * Shift between the two data areas in bytes
+ */
+
+ offset=n->head-skb->head;
+
+ /* Set the data pointer */
+ skb_reserve(n,skb->data-skb->head);
+ /* Set the tail pointer and length */
+ skb_put(n,skb->len);
+ /* Copy the bytes */
+ memcpy(n->head,skb->head,skb->end-skb->head);
+ n->csum = skb->csum;
+ n->list=NULL;
+ n->sk=NULL;
+ n->dev=skb->dev;
+ n->priority=skb->priority;
+ n->protocol=skb->protocol;
+ n->dst=dst_clone(skb->dst);
+ n->h.raw=skb->h.raw+offset;
+ n->nh.raw=skb->nh.raw+offset;
+ n->mac.raw=skb->mac.raw+offset;
+ memcpy(n->cb, skb->cb, sizeof(skb->cb));
+ n->used=skb->used;
+ n->is_clone=0;
+ atomic_set(&n->users, 1);
+ n->pkt_type=skb->pkt_type;
+ n->stamp=skb->stamp;
+ n->destructor = NULL;
+ n->security=skb->security;
+#ifdef CONFIG_IP_FIREWALL
+ n->fwmark = skb->fwmark;
+#endif
+ return n;
+}
+
struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, int newheadroom)
{
struct sk_buff *n;
return n;
}
+/**
+ * skb_pad - zero pad the tail of an skb
+ * @skb: buffer to pad
+ * @pad: space to pad
+ *
+ * Ensure that a buffer is followed by a padding area that is zero
+ * filled. Used by network drivers which may DMA or transfer data
+ * beyond the buffer end onto the wire.
+ *
+ * May return NULL in out of memory cases.
+ */
+
+struct sk_buff *skb_pad(struct sk_buff *skb, int pad)
+{
+ struct sk_buff *nskb;
+
+ /* If the skbuff is non linear tailroom is always zero.. */
+ if(skb_tailroom(skb) >= pad)
+ {
+ memset(skb->data+skb->len, 0, pad);
+ return skb;
+ }
+
+ nskb = skb_copy_grow(skb, pad, GFP_ATOMIC);
+ kfree_skb(skb);
+ if(nskb)
+ memset(nskb->data+nskb->len, 0, pad);
+ return nskb;
+}
+
#if 0
/*
* Tune the memory allocator for a new MTU size.