#ifdef CONFIG_ACENIC_OMIT_TIGON_I
#define ACE_IS_TIGON_I(ap) 0
+#define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES
#else
#define ACE_IS_TIGON_I(ap) (ap->version == 1)
+#define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries
#endif
#ifndef PCI_VENDOR_ID_ALTEON
static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
static char version[] __initdata =
- "acenic.c: v0.87 03/14/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
+ "acenic.c: v0.88 03/14/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
" http://home.cern.ch/~jes/gige/acenic.html\n";
static struct net_device *root_dev;
ap->evt_ring_dma);
ap->evt_ring = NULL;
}
+ if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
+ size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
+ pci_free_consistent(ap->pdev, size, ap->tx_ring,
+ ap->tx_ring_dma);
+ }
+ ap->tx_ring = NULL;
+
if (ap->evt_prd != NULL) {
pci_free_consistent(ap->pdev, sizeof(u32),
(void *)ap->evt_prd, ap->evt_prd_dma);
if (ap->evt_ring == NULL)
goto fail;
- size = (sizeof(struct tx_desc) * TX_RING_ENTRIES);
+ /*
+ * Only allocate a host TX ring for the Tigon II, the Tigon I
+ * has to use PCI registers for this ;-(
+ */
+ if (!ACE_IS_TIGON_I(ap)) {
+ size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
- ap->tx_ring = pci_alloc_consistent(ap->pdev, size, &ap->tx_ring_dma);
+ ap->tx_ring = pci_alloc_consistent(ap->pdev, size,
+ &ap->tx_ring_dma);
- if (ap->tx_ring == NULL)
- goto fail;
+ if (ap->tx_ring == NULL)
+ goto fail;
+ }
ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
&ap->evt_prd_dma);
tigonFwReleaseFix);
writel(0, ®s->LocalCtrl);
ap->version = 1;
+ ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES;
break;
#endif
case 6:
writel(SRAM_BANK_512K, ®s->LocalCtrl);
writel(SYNC_SRAM_TIMING, ®s->MiscCfg);
ap->version = 2;
+ ap->tx_ring_entries = MAX_TX_RING_ENTRIES;
break;
default:
printk(KERN_WARNING " Unsupported Tigon version detected "
#ifdef INDEX_DEBUG
spin_lock_init(&ap->debug_lock);
- ap->last_tx = TX_RING_ENTRIES - 1;
+ ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;
ap->last_std_rx = 0;
ap->last_mini_rx = 0;
#endif
*(ap->rx_ret_prd) = 0;
writel(TX_RING_BASE, ®s->WinBase);
- memset(ap->tx_ring, 0, TX_RING_ENTRIES * sizeof(struct tx_desc));
- set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);
+ if (ACE_IS_TIGON_I(ap)) {
+ ap->tx_ring = (struct tx_desc *)regs->Window;
+ for (i = 0; i < (TIGON_I_TX_RING_ENTRIES *
+ sizeof(struct tx_desc) / 4); i++) {
+ writel(0, (unsigned long)ap->tx_ring + i * 4);
+ }
+
+ set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);
+ } else {
+ memset(ap->tx_ring, 0,
+ MAX_TX_RING_ENTRIES * sizeof(struct tx_desc));
+
+ set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);
+ }
+
+ info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);
+ tmp = RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR;
- info->tx_ctrl.max_len = TX_RING_ENTRIES;
- tmp = RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|RCB_FLG_TX_HOST_RING;
+ /*
+ * The Tigon I does not like having the TX ring in host memory ;-(
+ */
+ if (!ACE_IS_TIGON_I(ap))
+ tmp |= RCB_FLG_TX_HOST_RING;
#if TX_COAL_INTS_ONLY
tmp |= RCB_FLG_COAL_INT_ONLY;
#endif
info->skb = NULL;
}
- idx = (idx + 1) % TX_RING_ENTRIES;
+ idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
} while (idx != txcsm);
if (netif_queue_stopped(dev))
* update releases enough of space, otherwise we just
* wait for device to make more work.
*/
- if (!tx_ring_full(txcsm, ap->tx_prd))
+ if (!tx_ring_full(ap, txcsm, ap->tx_prd))
ace_tx_int(dev, txcsm, idx);
}
save_flags(flags);
cli();
- for (i = 0; i < TX_RING_ENTRIES; i++) {
+ for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
struct sk_buff *skb;
dma_addr_t mapping;
struct tx_ring_info *info;
mapping = pci_unmap_addr(info, mapping);
if (mapping) {
- memset(ap->tx_ring + i, 0, sizeof(struct tx_desc));
+ if (ACE_IS_TIGON_I(ap)) {
+ writel(0, &ap->tx_ring[i].addr.addrhi);
+ writel(0, &ap->tx_ring[i].addr.addrlo);
+ writel(0, &ap->tx_ring[i].flagsize);
+ } else
+ memset(ap->tx_ring + i, 0,
+ sizeof(struct tx_desc));
pci_unmap_page(ap->pdev, mapping,
pci_unmap_len(info, maplen),
PCI_DMA_TODEVICE);
static inline void
-ace_load_tx_bd(struct tx_desc *desc, u64 addr, u32 flagsize)
+ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc,
+ u64 addr, u32 flagsize)
{
#if !USE_TX_COAL_NOW
flagsize &= ~BD_FLG_COAL_NOW;
#endif
-
- desc->addr.addrhi = addr >> 32;
- desc->addr.addrlo = addr;
- desc->flagsize = flagsize;
+ if (!ACE_IS_TIGON_I(ap)) {
+ writel(addr >> 32, &desc->addr.addrhi);
+ writel(addr & 0xffffffff, &desc->addr.addrlo);
+ writel(flagsize, &desc->flagsize);
+ } else {
+ desc->addr.addrhi = addr >> 32;
+ desc->addr.addrlo = addr;
+ desc->flagsize = flagsize;
+ }
}
restart:
idx = ap->tx_prd;
- if (tx_ring_full(ap->tx_ret_csm, idx))
+ if (tx_ring_full(ap, ap->tx_ret_csm, idx))
goto overflow;
#if MAX_SKB_FRAGS
if (skb->ip_summed == CHECKSUM_HW)
flagsize |= BD_FLG_TCP_UDP_SUM;
desc = ap->tx_ring + idx;
- idx = (idx + 1) % TX_RING_ENTRIES;
+ idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
/* Look at ace_tx_int for explanations. */
- if (tx_ring_full(ap->tx_ret_csm, idx))
+ if (tx_ring_full(ap, ap->tx_ret_csm, idx))
flagsize |= BD_FLG_COAL_NOW;
- ace_load_tx_bd(desc, mapping, flagsize);
+ ace_load_tx_bd(ap, desc, mapping, flagsize);
}
#if MAX_SKB_FRAGS
else {
if (skb->ip_summed == CHECKSUM_HW)
flagsize |= BD_FLG_TCP_UDP_SUM;
- ace_load_tx_bd(ap->tx_ring + idx, mapping, flagsize);
+ ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize);
- idx = (idx + 1) % TX_RING_ENTRIES;
+ idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
flagsize = (frag->size << 16);
if (skb->ip_summed == CHECKSUM_HW)
flagsize |= BD_FLG_TCP_UDP_SUM;
- idx = (idx + 1) % TX_RING_ENTRIES;
+ idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
if (i == skb_shinfo(skb)->nr_frags - 1) {
flagsize |= BD_FLG_END;
- if (tx_ring_full(ap->tx_ret_csm, idx))
+ if (tx_ring_full(ap, ap->tx_ret_csm, idx))
flagsize |= BD_FLG_COAL_NOW;
/*
}
pci_unmap_addr_set(info, mapping, mapping);
pci_unmap_len_set(info, maplen, frag->size);
- ace_load_tx_bd(desc, mapping, flagsize);
+ ace_load_tx_bd(ap, desc, mapping, flagsize);
}
}
#endif
* serialized, this is the only situation we have to
* re-test.
*/
- if (!tx_ring_full(ap->tx_ret_csm, idx))
+ if (!tx_ring_full(ap, ap->tx_ret_csm, idx))
netif_wake_queue(dev);
}
if (copy_from_user(&ecmd, ifr->ifr_data, sizeof(ecmd)))
return -EFAULT;
switch (ecmd.cmd) {
- case ETHTOOL_GSET: {
+ case ETHTOOL_GSET:
ecmd.supported =
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
if(copy_to_user(ifr->ifr_data, &ecmd, sizeof(ecmd)))
return -EFAULT;
return 0;
- }
- case ETHTOOL_SSET: {
+
+ case ETHTOOL_SSET:
if(!capable(CAP_NET_ADMIN))
return -EPERM;
ace_issue_cmd(regs, &cmd);
}
return 0;
- }
+
case ETHTOOL_GDRVINFO: {
struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
strncpy(info.driver, "acenic", sizeof(info.driver) - 1);