1) Add hw acceleration hooks for device drivers.
2) Move private declarations out of public includes.
3) Mark file local functions and data as static.
4) Use a small hash table for VLAN group lookups.
5) Correct all the locking and device ref counting.
6) No longer mark it as CONFIG_EXPERIMENTAL.
unsigned short h_vlan_encapsulated_proto; /* packet type ID field (or len) */
};
-/* Find a VLAN device by the MAC address of it's Ethernet device, and
- * it's VLAN ID. The default configuration is to have VLAN's scope
- * to be box-wide, so the MAC will be ignored. The mac will only be
- * looked at if we are configured to have a seperate set of VLANs per
- * each MAC addressable interface. Note that this latter option does
- * NOT follow the spec for VLANs, but may be useful for doing very
- * large quantities of VLAN MUX/DEMUX onto FrameRelay or ATM PVCs.
- */
-struct net_device *find_802_1Q_vlan_dev(struct net_device* real_dev,
- unsigned short VID); /* vlan.c */
+#define VLAN_VID_MASK 0xfff
/* found in af_inet.c */
extern int (*vlan_ioctl_hook)(unsigned long arg);
-/* found in vlan_dev.c */
-struct net_device_stats* vlan_dev_get_stats(struct net_device* dev);
-int vlan_dev_rebuild_header(struct sk_buff *skb);
-int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type* ptype);
-int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, void *daddr, void *saddr,
- unsigned len);
-int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
-int vlan_dev_change_mtu(struct net_device *dev, int new_mtu);
-int vlan_dev_set_mac_address(struct net_device *dev, void* addr);
-int vlan_dev_open(struct net_device* dev);
-int vlan_dev_stop(struct net_device* dev);
-int vlan_dev_init(struct net_device* dev);
-void vlan_dev_destruct(struct net_device* dev);
-void vlan_dev_copy_and_sum(struct sk_buff *dest, unsigned char *src,
- int length, int base);
-int vlan_dev_set_ingress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
-int vlan_dev_set_egress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
-int vlan_dev_set_vlan_flag(char* dev_name, __u32 flag, short flag_val);
-
-/* VLAN multicast stuff */
-/* Delete all of the MC list entries from this vlan device. Also deals
- * with the underlying device...
- */
-void vlan_flush_mc_list(struct net_device* dev);
-/* copy the mc_list into the vlan_info structure. */
-void vlan_copy_mc_list(struct dev_mc_list* mc_list, struct vlan_dev_info* vlan_info);
-/** dmi is a single entry into a dev_mc_list, a single node. mc_list is
- * an entire list, and we'll iterate through it.
- */
-int vlan_should_add_mc(struct dev_mc_list *dmi, struct dev_mc_list *mc_list);
-/** Taken from Gleb + Lennert's VLAN code, and modified... */
-void vlan_dev_set_multicast_list(struct net_device *vlan_dev);
-
-int vlan_collection_add_vlan(struct vlan_collection* vc, unsigned short vlan_id,
- unsigned short flags);
-int vlan_collection_remove_vlan(struct vlan_collection* vc,
- struct net_device* vlan_dev);
-int vlan_collection_remove_vlan_id(struct vlan_collection* vc, unsigned short vlan_id);
-
-/* found in vlan.c */
-/* Our listing of VLAN group(s) */
-extern struct vlan_group* p802_1Q_vlan_list;
-
#define VLAN_NAME "vlan"
/* if this changes, algorithm will have to be reworked because this
* depends on completely exhausting the VLAN identifier space. Thus
- * it gives constant time look-up, but it many cases it wastes memory.
+ * it gives constant time look-up, but in many cases it wastes memory.
*/
#define VLAN_GROUP_ARRAY_LEN 4096
/* inline functions */
-/* Used in vlan_skb_recv */
-static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
+static inline struct net_device_stats *vlan_dev_get_stats(struct net_device *dev)
{
- if (VLAN_DEV_INFO(skb->dev)->flags & 1) {
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (skb) {
- /* Lifted from Gleb's VLAN code... */
- memmove(skb->data - ETH_HLEN,
- skb->data - VLAN_ETH_HLEN, 12);
- skb->mac.raw += VLAN_HLEN;
- }
- }
-
- return skb;
+ return &(VLAN_DEV_INFO(dev)->dev_stats);
}
-static inline unsigned short vlan_dev_get_egress_qos_mask(struct net_device* dev,
- struct sk_buff* skb)
+static inline __u32 vlan_get_ingress_priority(struct net_device *dev,
+ unsigned short vlan_tag)
{
- struct vlan_priority_tci_mapping *mp =
- VLAN_DEV_INFO(dev)->egress_priority_map[(skb->priority & 0xF)];
-
- while (mp) {
- if (mp->priority == skb->priority) {
- return mp->vlan_qos; /* This should already be shifted to mask
- * correctly with the VLAN's TCI
- */
- }
- mp = mp->next;
- }
- return 0;
-}
+ struct vlan_dev_info *vip = VLAN_DEV_INFO(dev);
-static inline int vlan_dmi_equals(struct dev_mc_list *dmi1,
- struct dev_mc_list *dmi2)
-{
- return ((dmi1->dmi_addrlen == dmi2->dmi_addrlen) &&
- (memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0));
+ return vip->ingress_priority_map[(vlan_tag >> 13) & 0x7];
}
-static inline void vlan_destroy_mc_list(struct dev_mc_list *mc_list)
+/* VLAN tx hw acceleration helpers. */
+struct vlan_skb_tx_cookie {
+ u32 magic;
+ u32 vlan_tag;
+};
+
+#define VLAN_TX_COOKIE_MAGIC 0x564c414e /* "VLAN" in ascii. */
+#define VLAN_TX_SKB_CB(__skb) ((struct vlan_skb_tx_cookie *)&((__skb)->cb[0]))
+#define vlan_tx_tag_present(__skb) \
+ (VLAN_TX_SKB_CB(__skb)->magic == VLAN_TX_COOKIE_MAGIC)
+#define vlan_tx_tag_get(__skb) (VLAN_TX_SKB_CB(__skb)->vlan_tag)
+
+/* VLAN rx hw acceleration helper. This acts like netif_rx(). */
+static inline int vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
+ unsigned short vlan_tag)
{
- struct dev_mc_list *dmi = mc_list;
- struct dev_mc_list *next;
+ struct net_device_stats *stats;
- while(dmi) {
- next = dmi->next;
- kfree(dmi);
- dmi = next;
+ skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK];
+ if (skb->dev == NULL) {
+ kfree_skb(skb);
+
+ /* Not NET_RX_DROP, this is not being dropped
+ * due to congestion.
+ */
+ return 0;
}
+
+ skb->dev->last_rx = jiffies;
+
+ stats = vlan_dev_get_stats(skb->dev);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+
+ skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tag);
+ switch (skb->pkt_type) {
+ case PACKET_BROADCAST:
+ break;
+
+ case PACKET_MULTICAST:
+ stats->multicast++;
+ break;
+
+ case PACKET_OTHERHOST:
+ /* Our lower layer thinks this is not local, let's make sure.
+ * This allows the VLAN to have a different MAC than the underlying
+ * device, and still route correctly.
+ */
+ if (!memcmp(skb->mac.ethernet->h_dest, skb->dev->dev_addr, ETH_ALEN))
+ skb->pkt_type = PACKET_HOST;
+ break;
+ };
+
+ return netif_rx(skb);
}
#endif /* __KERNEL__ */
#endif
struct divert_blk;
+struct vlan_group;
#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
functions are available. */
#define NETIF_F_DYNALLOC 16 /* Self-dectructable device. */
#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
+#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
+#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
+#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
+#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
/* Called after device is detached from network. */
void (*uninit)(struct net_device *dev);
#define HAVE_TX_TIMEOUT
void (*tx_timeout) (struct net_device *dev);
+ void (*vlan_rx_register)(struct net_device *dev,
+ struct vlan_group *grp);
+ void (*vlan_rx_add_vid)(struct net_device *dev,
+ unsigned short vid);
+ void (*vlan_rx_kill_vid)(struct net_device *dev,
+ unsigned short vid);
+
int (*hard_header_parse)(struct sk_buff *skb,
unsigned char *haddr);
int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
*
* Fixes:
* Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
- *
+ * Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
+ * Correct all the locking - David S. Miller <davem@redhat.com>;
+ * Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
/* Global VLAN variables */
/* Our listing of VLAN group(s) */
-struct vlan_group *p802_1Q_vlan_list;
+struct vlan_group *vlan_group_hash[VLAN_GRP_HASH_SIZE];
+spinlock_t vlan_group_lock = SPIN_LOCK_UNLOCKED;
+#define vlan_grp_hashfn(IDX) ((((IDX) >> VLAN_GRP_HASH_SHIFT) ^ (IDX)) & VLAN_GRP_HASH_MASK)
static char vlan_fullname[] = "802.1Q VLAN Support";
static unsigned int vlan_version = 1;
-static unsigned int vlan_release = 6;
-static char vlan_copyright[] = " Ben Greear <greearb@candelatech.com>";
+static unsigned int vlan_release = 7;
+static char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>";
+static char vlan_buggyright[] = "David S. Miller <davem@redhat.com>";
static int vlan_device_event(struct notifier_block *, unsigned long, void *);
/* Determines interface naming scheme. */
unsigned short vlan_name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
-/* Counter for how many NON-VLAN protos we've received on a VLAN. */
-unsigned long vlan_bad_proto_recvd = 0;
-
/* DO reorder the header by default */
unsigned short vlan_default_dev_flags = 1;
printk(VLAN_INF "%s v%u.%u %s\n",
vlan_fullname, vlan_version, vlan_release, vlan_copyright);
+ printk(VLAN_INF "All bugs added by %s\n",
+ vlan_buggyright);
/* proc file system initialization */
err = vlan_proc_init();
vlan_ioctl_hook = vlan_ioctl_handler;
- printk(VLAN_INF "%s Initialization complete.\n", VLAN_NAME);
return 0;
}
-/*
- * Cleanup of groups before exit
- */
-
-static void vlan_group_cleanup(void)
-{
- struct vlan_group *grp = NULL;
- struct vlan_group *nextgroup;
-
- for (grp = p802_1Q_vlan_list; (grp != NULL);) {
- nextgroup = grp->next;
- kfree(grp);
- grp = nextgroup;
- }
- p802_1Q_vlan_list = NULL;
-}
-
/*
* Module 'remove' entry point.
* o delete /proc/net/router directory and static entries.
*/
static void __exit vlan_cleanup_module(void)
{
+ int i;
+
+ /* This table must be empty if there are no module
+ * references left.
+ */
+ for (i = 0; i < VLAN_GRP_HASH_SIZE; i++) {
+ if (vlan_group_hash[i] != NULL)
+ BUG();
+ }
+
/* Un-register us from receiving netdevice events */
unregister_netdevice_notifier(&vlan_notifier_block);
dev_remove_pack(&vlan_packet_type);
vlan_proc_cleanup();
- vlan_group_cleanup();
vlan_ioctl_hook = NULL;
}
module_init(vlan_proto_init);
module_exit(vlan_cleanup_module);
-/** Will search linearly for now, based on device index. Could
- * hash, or directly link, this some day. --Ben
- * TODO: Potential performance issue here. Linear search where N is
- * the number of 'real' devices used by VLANs.
- */
-struct vlan_group* vlan_find_group(int real_dev_ifindex)
+/* Must be invoked with vlan_group_lock held. */
+static struct vlan_group *__vlan_find_group(int real_dev_ifindex)
{
- struct vlan_group *grp = NULL;
+ struct vlan_group *grp;
- br_read_lock_bh(BR_NETPROTO_LOCK);
- for (grp = p802_1Q_vlan_list;
- ((grp != NULL) && (grp->real_dev_ifindex != real_dev_ifindex));
+ for (grp = vlan_group_hash[vlan_grp_hashfn(real_dev_ifindex)];
+ grp != NULL;
grp = grp->next) {
- /* nothing */ ;
+ if (grp->real_dev_ifindex == real_dev_ifindex)
+ break;
}
- br_read_unlock_bh(BR_NETPROTO_LOCK);
return grp;
}
-/* Find the protocol handler. Assumes VID < 0xFFF.
+/* Must hold vlan_group_lock. */
+static void __grp_hash(struct vlan_group *grp)
+{
+ struct vlan_group **head;
+
+ head = &vlan_group_hash[vlan_grp_hashfn(grp->real_dev_ifindex)];
+ grp->next = *head;
+ *head = grp;
+}
+
+/* Must hold vlan_group_lock. */
+static void __grp_unhash(struct vlan_group *grp)
+{
+ struct vlan_group *next, **pprev;
+
+ pprev = &vlan_group_hash[vlan_grp_hashfn(grp->real_dev_ifindex)];
+ next = *pprev;
+ while (next != grp) {
+ pprev = &next->next;
+ next = *pprev;
+ }
+ *pprev = grp->next;
+}
+
+/* Find the protocol handler. Assumes VID < VLAN_VID_MASK.
+ *
+ * Must be invoked with vlan_group_lock held.
*/
-struct net_device *find_802_1Q_vlan_dev(struct net_device *real_dev,
- unsigned short VID)
+struct net_device *__find_vlan_dev(struct net_device *real_dev,
+ unsigned short VID)
{
- struct vlan_group *grp = vlan_find_group(real_dev->ifindex);
+ struct vlan_group *grp = __vlan_find_group(real_dev->ifindex);
if (grp)
return grp->vlan_devices[VID];
return NULL;
}
-/** This method will explicitly do a dev_put on the device if do_dev_put
- * is TRUE. This gets around a difficulty with reference counting, and
- * the unregister-by-name (below). If do_locks is true, it will grab
- * a lock before un-registering. If do_locks is false, it is assumed that
- * the lock has already been grabbed externally... --Ben
+/* This returns 0 if everything went fine.
+ * It will return 1 if the group was killed as a result.
+ * A negative return indicates failure.
+ *
+ * The RTNL lock must be held.
*/
-int unregister_802_1Q_vlan_dev(int real_dev_ifindex, unsigned short vlan_id,
- int do_dev_put, int do_locks)
+static int unregister_vlan_dev(struct net_device *real_dev,
+ unsigned short vlan_id)
{
struct net_device *dev = NULL;
+ int real_dev_ifindex = real_dev->ifindex;
struct vlan_group *grp;
+ int i, ret;
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__ ": VID: %i\n", vlan_id);
#endif
/* sanity check */
- if ((vlan_id >= 0xFFF) || (vlan_id <= 0))
+ if ((vlan_id >= VLAN_VID_MASK) || (vlan_id <= 0))
return -EINVAL;
- grp = vlan_find_group(real_dev_ifindex);
+ spin_lock_bh(&vlan_group_lock);
+ grp = __vlan_find_group(real_dev_ifindex);
+ spin_unlock_bh(&vlan_group_lock);
+
+ ret = 0;
+
if (grp) {
dev = grp->vlan_devices[vlan_id];
if (dev) {
/* Remove proc entry */
vlan_proc_rem_dev(dev);
- /* Take it out of our own structures */
- grp->vlan_devices[vlan_id] = NULL;
+ /* Take it out of our own structures, but be sure to
+ * interlock with HW accelerating devices or SW vlan
+ * input packet processing.
+ */
+ if (real_dev->features &
+ (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER)) {
+ real_dev->vlan_rx_kill_vid(real_dev, vlan_id);
+ } else {
+ br_write_lock(BR_NETPROTO_LOCK);
+ grp->vlan_devices[vlan_id] = NULL;
+ br_write_unlock(BR_NETPROTO_LOCK);
+ }
- /* Take it out of the global list of devices.
- * NOTE: This deletes dev, don't access it again!!
+ /* Caller unregisters (and if necessary, puts)
+ * VLAN device, but we get rid of the reference to
+ * real_dev here.
*/
+ dev_put(real_dev);
- if (do_dev_put)
- dev_put(dev);
+ /* If the group is now empty, kill off the
+ * group.
+ */
+ for (i = 0; i < VLAN_VID_MASK; i++)
+ if (grp->vlan_devices[i])
+ break;
- /* TODO: Please review this code. */
- if (do_locks) {
- rtnl_lock();
- unregister_netdevice(dev);
- rtnl_unlock();
- } else {
- unregister_netdevice(dev);
+ if (i == VLAN_VID_MASK) {
+ if (real_dev->features & NETIF_F_HW_VLAN_RX)
+ real_dev->vlan_rx_register(real_dev, NULL);
+
+ spin_lock_bh(&vlan_group_lock);
+ __grp_unhash(grp);
+ spin_unlock_bh(&vlan_group_lock);
+
+ ret = 1;
}
MOD_DEC_USE_COUNT;
}
}
-
- return 0;
+
+ return ret;
}
-int unregister_802_1Q_vlan_device(const char *vlan_IF_name)
+static int unregister_vlan_device(const char *vlan_IF_name)
{
struct net_device *dev = NULL;
+ int ret;
-#ifdef VLAN_DEBUG
- printk(VLAN_DBG __FUNCTION__ ": unregister VLAN by name, name -:%s:-\n",
- vlan_IF_name);
-#endif
dev = dev_get_by_name(vlan_IF_name);
+ ret = -EINVAL;
if (dev) {
if (dev->priv_flags & IFF_802_1Q_VLAN) {
- return unregister_802_1Q_vlan_dev(
- VLAN_DEV_INFO(dev)->real_dev->ifindex,
- (unsigned short)(VLAN_DEV_INFO(dev)->vlan_id),
- 1 /* do dev_put */, 1 /* do locking */);
+ rtnl_lock();
+
+ ret = unregister_vlan_dev(VLAN_DEV_INFO(dev)->real_dev,
+ VLAN_DEV_INFO(dev)->vlan_id);
+
+ dev_put(dev);
+ unregister_netdevice(dev);
+
+ rtnl_unlock();
+
+ if (ret == 1)
+ ret = 0;
} else {
printk(VLAN_ERR __FUNCTION__
": ERROR: Tried to remove a non-vlan device "
"with VLAN code, name: %s priv_flags: %hX\n",
dev->name, dev->priv_flags);
dev_put(dev);
- return -EPERM;
+ ret = -EPERM;
}
} else {
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__ ": WARNING: Could not find dev.\n");
#endif
- return -EINVAL;
+ ret = -EINVAL;
}
+
+ return ret;
}
/* Attach a VLAN device to a mac address (ie Ethernet Card).
* Returns the device that was created, or NULL if there was
* an error of some kind.
*/
-struct net_device *register_802_1Q_vlan_device(const char* eth_IF_name,
+static struct net_device *register_vlan_device(const char *eth_IF_name,
unsigned short VLAN_ID)
{
struct vlan_group *grp;
struct net_device *new_dev;
struct net_device *real_dev; /* the ethernet device */
int malloc_size = 0;
+ int r;
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__ ": if_name -:%s:- vid: %i\n",
eth_IF_name, VLAN_ID);
#endif
- if (VLAN_ID >= 0xfff)
+ if (VLAN_ID >= VLAN_VID_MASK)
goto out_ret_null;
/* find the device relating to eth_IF_name. */
if (!real_dev)
goto out_ret_null;
- /* TODO: Make sure this device can really handle having a VLAN attached
- * to it...
+ if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
+ printk(VLAN_DBG __FUNCTION__ ": VLANs not supported on %s.\n",
+ real_dev->name);
+ goto out_put_dev;
+ }
+
+ if ((real_dev->features & NETIF_F_HW_VLAN_RX) &&
+ (real_dev->vlan_rx_register == NULL ||
+ real_dev->vlan_rx_kill_vid == NULL)) {
+ printk(VLAN_DBG __FUNCTION__ ": Device %s has buggy VLAN hw accel.\n",
+ real_dev->name);
+ goto out_put_dev;
+ }
+
+ if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) &&
+ (real_dev->vlan_rx_add_vid == NULL ||
+ real_dev->vlan_rx_kill_vid == NULL)) {
+ printk(VLAN_DBG __FUNCTION__ ": Device %s has buggy VLAN hw accel.\n",
+ real_dev->name);
+ goto out_put_dev;
+ }
+
+ /* From this point on, all the data structures must remain
+ * consistent.
+ */
+ rtnl_lock();
+
+ /* The real device must be up and operating in order to
+ * assosciate a VLAN device with it.
*/
- if (find_802_1Q_vlan_dev(real_dev, VLAN_ID)) {
+ if (!(real_dev->flags & IFF_UP))
+ goto out_unlock;
+
+ spin_lock_bh(&vlan_group_lock);
+ r = (__find_vlan_dev(real_dev, VLAN_ID) != NULL);
+ spin_unlock_bh(&vlan_group_lock);
+
+ if (r) {
/* was already registered. */
printk(VLAN_DBG __FUNCTION__ ": ALREADY had VLAN registered\n");
- dev_put(real_dev);
- return NULL;
+ goto out_unlock;
}
malloc_size = (sizeof(struct net_device));
new_dev, malloc_size);
if (new_dev == NULL)
- goto out_put_dev;
+ goto out_unlock;
memset(new_dev, 0, malloc_size);
- /* set us up to not use a Qdisc, as the underlying Hardware device
+ /* Set us up to have no queue, as the underlying Hardware device
* can do all the queueing we could want.
*/
- /* new_dev->qdisc_sleeping = &noqueue_qdisc; Not needed it seems. */
- new_dev->tx_queue_len = 0; /* This should effectively give us no queue. */
+ new_dev->tx_queue_len = 0;
/* Gotta set up the fields for the device. */
#ifdef VLAN_DEBUG
/* TODO: maybe just assign it to be ETHERNET? */
new_dev->type = real_dev->type;
- /* Regular ethernet + 4 bytes (18 total). */
- new_dev->hard_header_len = VLAN_HLEN + real_dev->hard_header_len;
+ new_dev->hard_header_len = real_dev->hard_header_len;
+ if (!(real_dev->features & NETIF_F_HW_VLAN_TX)) {
+ /* Regular ethernet + 4 bytes (18 total). */
+ new_dev->hard_header_len += VLAN_HLEN;
+ }
new_dev->priv = kmalloc(sizeof(struct vlan_dev_info),
GFP_KERNEL);
new_dev->priv,
sizeof(struct vlan_dev_info));
- if (new_dev->priv == NULL) {
- kfree(new_dev);
- goto out_put_dev;
- }
+ if (new_dev->priv == NULL)
+ goto out_free_newdev;
memset(new_dev->priv, 0, sizeof(struct vlan_dev_info));
new_dev->open = vlan_dev_open;
new_dev->stop = vlan_dev_stop;
- new_dev->hard_header = vlan_dev_hard_header;
- new_dev->hard_start_xmit = vlan_dev_hard_start_xmit;
- new_dev->rebuild_header = vlan_dev_rebuild_header;
+ if (real_dev->features & NETIF_F_HW_VLAN_TX) {
+ new_dev->hard_header = real_dev->hard_header;
+ new_dev->hard_start_xmit = vlan_dev_hwaccel_hard_start_xmit;
+ new_dev->rebuild_header = real_dev->rebuild_header;
+ } else {
+ new_dev->hard_header = vlan_dev_hard_header;
+ new_dev->hard_start_xmit = vlan_dev_hard_start_xmit;
+ new_dev->rebuild_header = vlan_dev_rebuild_header;
+ }
new_dev->hard_header_parse = real_dev->hard_header_parse;
new_dev->set_mac_address = vlan_dev_set_mac_address;
new_dev->set_multicast_list = vlan_dev_set_multicast_list;
- VLAN_DEV_INFO(new_dev)->vlan_id = VLAN_ID; /* 1 through 0xFFF */
+ VLAN_DEV_INFO(new_dev)->vlan_id = VLAN_ID; /* 1 through VLAN_VID_MASK */
VLAN_DEV_INFO(new_dev)->real_dev = real_dev;
VLAN_DEV_INFO(new_dev)->dent = NULL;
VLAN_DEV_INFO(new_dev)->flags = vlan_default_dev_flags;
/* So, got the sucker initialized, now lets place
* it into our local structure.
*/
- grp = vlan_find_group(real_dev->ifindex);
+ spin_lock_bh(&vlan_group_lock);
+ grp = __vlan_find_group(real_dev->ifindex);
+ spin_unlock_bh(&vlan_group_lock);
+
+ /* Note, we are running under the RTNL semaphore
+ * so it cannot "appear" on us.
+ */
if (!grp) { /* need to add a new group */
grp = kmalloc(sizeof(struct vlan_group), GFP_KERNEL);
- VLAN_MEM_DBG("grp malloc, addr: %p size: %i\n",
- grp, sizeof(struct vlan_group));
- if (!grp) {
- kfree(new_dev->priv);
- VLAN_FMEM_DBG("new_dev->priv free, addr: %p\n",
- new_dev->priv);
- kfree(new_dev);
- VLAN_FMEM_DBG("new_dev free, addr: %p\n", new_dev);
-
- goto out_put_dev;
- }
+ if (!grp)
+ goto out_free_newdev_priv;
- printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n");
+ /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */
memset(grp, 0, sizeof(struct vlan_group));
grp->real_dev_ifindex = real_dev->ifindex;
- br_write_lock_bh(BR_NETPROTO_LOCK);
- grp->next = p802_1Q_vlan_list;
- p802_1Q_vlan_list = grp;
- br_write_unlock_bh(BR_NETPROTO_LOCK);
+ spin_lock_bh(&vlan_group_lock);
+ __grp_hash(grp);
+ spin_unlock_bh(&vlan_group_lock);
+
+ if (real_dev->features & NETIF_F_HW_VLAN_RX)
+ real_dev->vlan_rx_register(real_dev, grp);
}
grp->vlan_devices[VLAN_ID] = new_dev;
+
vlan_proc_add_dev(new_dev); /* create it's proc entry */
- /* TODO: Please check this: RTNL --Ben */
- rtnl_lock();
+ if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
+ real_dev->vlan_rx_add_vid(real_dev, VLAN_ID);
+
register_netdevice(new_dev);
+
rtnl_unlock();
/* NOTE: We have a reference to the real device,
#endif
return new_dev;
+out_free_newdev_priv:
+ kfree(new_dev->priv);
+
+out_free_newdev:
+ kfree(new_dev);
+
+out_unlock:
+ rtnl_unlock();
+
out_put_dev:
dev_put(real_dev);
{
struct net_device *dev = (struct net_device *)(ptr);
struct vlan_group *grp = NULL;
- int i = 0;
+ int i, flgs;
struct net_device *vlandev = NULL;
+ spin_lock_bh(&vlan_group_lock);
+ grp = __vlan_find_group(dev->ifindex);
+ spin_unlock_bh(&vlan_group_lock);
+
+ if (!grp)
+ goto out;
+
+ /* It is OK that we do not hold the group lock right now,
+ * as we run under the RTNL lock.
+ */
+
switch (event) {
case NETDEV_CHANGEADDR:
- /* Ignore for now */
- break;
-
case NETDEV_GOING_DOWN:
/* Ignore for now */
break;
case NETDEV_DOWN:
- /* TODO: Please review this code. */
- /* put all related VLANs in the down state too. */
- for (grp = p802_1Q_vlan_list; grp != NULL; grp = grp->next) {
- int flgs = 0;
-
- for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- vlandev = grp->vlan_devices[i];
- if (!vlandev ||
- (VLAN_DEV_INFO(vlandev)->real_dev != dev) ||
- (!(vlandev->flags & IFF_UP)))
- continue;
-
- flgs = vlandev->flags;
- flgs &= ~IFF_UP;
- dev_change_flags(vlandev, flgs);
- }
+ /* Put all VLANs for this dev in the down state too. */
+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+ vlandev = grp->vlan_devices[i];
+ if (!vlandev)
+ continue;
+
+ flgs = vlandev->flags;
+ if (!(flgs & IFF_UP))
+ continue;
+
+ dev_change_flags(vlandev, flgs & ~IFF_UP);
}
break;
case NETDEV_UP:
- /* TODO: Please review this code. */
- /* put all related VLANs in the down state too. */
- for (grp = p802_1Q_vlan_list; grp != NULL; grp = grp->next) {
- int flgs;
-
- for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- vlandev = grp->vlan_devices[i];
- if (!vlandev ||
- (VLAN_DEV_INFO(vlandev)->real_dev != dev) ||
- (vlandev->flags & IFF_UP))
- continue;
+ /* Put all VLANs for this dev in the up state too. */
+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+ vlandev = grp->vlan_devices[i];
+ if (!vlandev)
+ continue;
- flgs = vlandev->flags;
- flgs |= IFF_UP;
- dev_change_flags(vlandev, flgs);
- }
+ flgs = vlandev->flags;
+ if (flgs & IFF_UP)
+ continue;
+
+ dev_change_flags(vlandev, flgs | IFF_UP);
}
break;
case NETDEV_UNREGISTER:
- /* TODO: Please review this code. */
- /* delete all related VLANs. */
- for (grp = p802_1Q_vlan_list; grp != NULL; grp = grp->next) {
- for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- vlandev = grp->vlan_devices[i];
- if (!vlandev ||
- (VLAN_DEV_INFO(vlandev)->real_dev != dev))
- continue;
-
- unregister_802_1Q_vlan_dev(
- VLAN_DEV_INFO(vlandev)->real_dev->ifindex,
- VLAN_DEV_INFO(vlandev)->vlan_id,
- 0, 0);
- vlandev = NULL;
- }
+ /* Delete all VLANs for this dev. */
+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+ int ret;
+
+ vlandev = grp->vlan_devices[i];
+ if (!vlandev)
+ continue;
+
+ ret = unregister_vlan_dev(dev,
+ VLAN_DEV_INFO(vlandev)->vlan_id);
+
+ unregister_netdev(vlandev);
+
+ /* Group was destroyed? */
+ if (ret == 1)
+ break;
}
break;
};
+out:
return NOTIFY_DONE;
}
* talk to: args.dev1 We also have the
* VLAN ID: args.u.VID
*/
- if (register_802_1Q_vlan_device(args.device1, args.u.VID)) {
+ if (register_vlan_device(args.device1, args.u.VID)) {
err = 0;
} else {
err = -EINVAL;
/* Here, the args.dev1 is the actual VLAN we want
* to get rid of.
*/
- err = unregister_802_1Q_vlan_device(args.device1);
+ err = unregister_vlan_device(args.device1);
break;
default:
return err;
}
-
+MODULE_LICENSE("GPL");
extern unsigned short vlan_name_type;
-/* Counter for how many NON-VLAN protos we've received on a VLAN. */
-extern unsigned long vlan_bad_proto_recvd;
-
int vlan_ioctl_handler(unsigned long arg);
-/* Add some headers for the public VLAN methods. */
-int unregister_802_1Q_vlan_device(const char* vlan_IF_name);
-struct net_device *register_802_1Q_vlan_device(const char* eth_IF_name,
- unsigned short VID);
+#define VLAN_GRP_HASH_SHIFT 5
+#define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT)
+#define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1)
+extern struct vlan_group *vlan_group_hash[VLAN_GRP_HASH_SIZE];
+extern spinlock_t vlan_group_lock;
+
+/* Find a VLAN device by the MAC address of it's Ethernet device, and
+ * it's VLAN ID. The default configuration is to have VLAN's scope
+ * to be box-wide, so the MAC will be ignored. The mac will only be
+ * looked at if we are configured to have a seperate set of VLANs per
+ * each MAC addressable interface. Note that this latter option does
+ * NOT follow the spec for VLANs, but may be useful for doing very
+ * large quantities of VLAN MUX/DEMUX onto FrameRelay or ATM PVCs.
+ *
+ * Must be invoked with vlan_group_lock held and that lock MUST NOT
+ * be dropped until a reference is obtained on the returned device.
+ * You may drop the lock earlier if you are running under the RTNL
+ * semaphore, however.
+ */
+struct net_device *__find_vlan_dev(struct net_device* real_dev,
+ unsigned short VID); /* vlan.c */
+
+/* found in vlan_dev.c */
+int vlan_dev_rebuild_header(struct sk_buff *skb);
+int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type* ptype);
+int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr, void *saddr,
+ unsigned len);
+int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
+int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
+int vlan_dev_change_mtu(struct net_device *dev, int new_mtu);
+int vlan_dev_set_mac_address(struct net_device *dev, void* addr);
+int vlan_dev_open(struct net_device* dev);
+int vlan_dev_stop(struct net_device* dev);
+int vlan_dev_init(struct net_device* dev);
+void vlan_dev_destruct(struct net_device* dev);
+int vlan_dev_set_ingress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
+int vlan_dev_set_egress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
+int vlan_dev_set_vlan_flag(char* dev_name, __u32 flag, short flag_val);
+void vlan_dev_set_multicast_list(struct net_device *vlan_dev);
#endif /* !(__BEN_VLAN_802_1Q_INC__) */
#include <linux/if_vlan.h>
#include <net/ip.h>
-struct net_device_stats *vlan_dev_get_stats(struct net_device *dev)
-{
- return &(((struct vlan_dev_info *)(dev->priv))->dev_stats);
-}
-
-
/*
* Rebuild the Ethernet MAC header. This is called after an ARP
* (or in future other address resolution) has completed on this
return 0;
}
+static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
+{
+ if (VLAN_DEV_INFO(skb->dev)->flags & 1) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (skb) {
+ /* Lifted from Gleb's VLAN code... */
+ memmove(skb->data - ETH_HLEN,
+ skb->data - VLAN_ETH_HLEN, 12);
+ skb->mac.raw += VLAN_HLEN;
+ }
+ }
+
+ return skb;
+}
+
/*
* Determine the packet's protocol ID. The rule here is that we
* assume 802.3 if the type field is short enough to be a length.
/* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */
vlan_TCI = ntohs(vhdr->h_vlan_TCI);
- vid = (vlan_TCI & 0xFFF);
+ vid = (vlan_TCI & VLAN_VID_MASK);
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__ ": skb: %p vlan_id: %hx\n",
* and then go on as usual.
*/
- /* we have 12 bits of vlan ID. */
- /* If it's NULL, we will tag it to be junked below */
- skb->dev = find_802_1Q_vlan_dev(dev, vid);
+ /* We have 12 bits of vlan ID.
+ *
+ * We must not drop the vlan_group_lock until we hold a
+ * reference to the device (netif_rx does that) or we
+ * fail.
+ */
+ spin_lock_bh(&vlan_group_lock);
+ skb->dev = __find_vlan_dev(dev, vid);
if (!skb->dev) {
+ spin_unlock_bh(&vlan_group_lock);
+
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__ ": ERROR: No net_device for VID: %i on dev: %s [%i]\n",
(unsigned int)(vid), dev->name, dev->ifindex);
return -1;
}
+ skb->dev->last_rx = jiffies;
+
/* Bump the rx counters for the VLAN device. */
stats = vlan_dev_get_stats(skb->dev);
stats->rx_packets++;
*/
if (dev != VLAN_DEV_INFO(skb->dev)->real_dev) {
+ spin_unlock_bh(&vlan_group_lock);
+
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__ ": dropping skb: %p because came in on wrong device, dev: %s real_dev: %s, skb_dev: %s\n",
skb, dev->name, VLAN_DEV_INFO(skb->dev)->real_dev->name, skb->dev->name);
/*
* Deal with ingress priority mapping.
*/
- skb->priority = VLAN_DEV_INFO(skb->dev)->ingress_priority_map[(ntohs(vhdr->h_vlan_TCI) >> 13) & 0x7];
+ skb->priority = vlan_get_ingress_priority(skb->dev, ntohs(vhdr->h_vlan_TCI));
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__ ": priority: %lu for TCI: %hu (hbo)\n",
switch (skb->pkt_type) {
case PACKET_BROADCAST: /* Yeah, stats collect these together.. */
// stats->broadcast ++; // no such counter :-(
+ break;
+
case PACKET_MULTICAST:
stats->multicast++;
break;
+
case PACKET_OTHERHOST:
/* Our lower layer thinks this is not local, let's make sure.
* This allows the VLAN to have a different MAC than the underlying
/* TODO: Add a more specific counter here. */
stats->rx_errors++;
}
+ spin_unlock_bh(&vlan_group_lock);
return 0;
}
/* TODO: Add a more specific counter here. */
stats->rx_errors++;
}
+ spin_unlock_bh(&vlan_group_lock);
return 0;
}
/* TODO: Add a more specific counter here. */
stats->rx_errors++;
}
+ spin_unlock_bh(&vlan_group_lock);
+ return 0;
+}
+
+static inline unsigned short vlan_dev_get_egress_qos_mask(struct net_device* dev,
+ struct sk_buff* skb)
+{
+ struct vlan_priority_tci_mapping *mp =
+ VLAN_DEV_INFO(dev)->egress_priority_map[(skb->priority & 0xF)];
+
+ while (mp) {
+ if (mp->priority == skb->priority) {
+ return mp->vlan_qos; /* This should already be shifted to mask
+ * correctly with the VLAN's TCI
+ */
+ }
+ mp = mp->next;
+ }
return 0;
}
*/
if (veth->h_vlan_proto != __constant_htons(ETH_P_8021Q)) {
+ unsigned short veth_TCI;
+
/* This is not a VLAN frame...but we can fix that! */
- unsigned short veth_TCI = 0;
VLAN_DEV_INFO(dev)->cnt_encap_on_xmit++;
#ifdef VLAN_DEBUG
veth->h_vlan_proto, veth->h_vlan_TCI, veth->h_vlan_encapsulated_proto);
#endif
- dev_queue_xmit(skb);
stats->tx_packets++; /* for statics only */
stats->tx_bytes += skb->len;
- return 0;
-}
-int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
-{
- /* TODO: gotta make sure the underlying layer can handle it,
- * maybe an IFF_VLAN_CAPABLE flag for devices?
- */
- if (VLAN_DEV_INFO(dev)->real_dev->mtu < new_mtu)
- return -ERANGE;
-
- dev->mtu = new_mtu;
+ dev_queue_xmit(skb);
- return new_mtu;
+ return 0;
}
-int vlan_dev_open(struct net_device *dev)
+int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- if (!(VLAN_DEV_INFO(dev)->real_dev->flags & IFF_UP))
- return -ENETDOWN;
+ struct net_device_stats *stats = vlan_dev_get_stats(dev);
+ struct vlan_skb_tx_cookie *cookie;
- return 0;
-}
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
-int vlan_dev_stop(struct net_device *dev)
-{
- vlan_flush_mc_list(dev);
- return 0;
-}
+ skb->dev = VLAN_DEV_INFO(dev)->real_dev;
+ cookie = VLAN_TX_SKB_CB(skb);
+ cookie->magic = VLAN_TX_COOKIE_MAGIC;
+ cookie->vlan_tag = (VLAN_DEV_INFO(dev)->vlan_id |
+ vlan_dev_get_egress_qos_mask(dev, skb));
+
+ dev_queue_xmit(skb);
-int vlan_dev_init(struct net_device *dev)
-{
- /* TODO: figure this out, maybe do nothing?? */
return 0;
}
-void vlan_dev_destruct(struct net_device *dev)
+int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
{
- if (dev) {
- vlan_flush_mc_list(dev);
- if (dev->priv) {
- dev_put(VLAN_DEV_INFO(dev)->real_dev);
- if (VLAN_DEV_INFO(dev)->dent) {
- printk(KERN_ERR __FUNCTION__ ": dent is NOT NULL!\n");
-
- /* If we ever get here, there is a serious bug
- * that must be fixed.
- */
- }
+ /* TODO: gotta make sure the underlying layer can handle it,
+ * maybe an IFF_VLAN_CAPABLE flag for devices?
+ */
+ if (VLAN_DEV_INFO(dev)->real_dev->mtu < new_mtu)
+ return -ERANGE;
- kfree(dev->priv);
+ dev->mtu = new_mtu;
- VLAN_FMEM_DBG("dev->priv free, addr: %p\n", dev->priv);
- dev->priv = NULL;
- }
- }
+ return new_mtu;
}
int vlan_dev_set_ingress_priority(char *dev_name, __u32 skb_prio, short vlan_prio)
return 0;
}
+static inline int vlan_dmi_equals(struct dev_mc_list *dmi1,
+ struct dev_mc_list *dmi2)
+{
+ return ((dmi1->dmi_addrlen == dmi2->dmi_addrlen) &&
+ (memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0));
+}
+
+/** dmi is a single entry into a dev_mc_list, a single node. mc_list is
+ * an entire list, and we'll iterate through it.
+ */
+static int vlan_should_add_mc(struct dev_mc_list *dmi, struct dev_mc_list *mc_list)
+{
+ struct dev_mc_list *idmi;
+
+ for (idmi = mc_list; idmi != NULL; ) {
+ if (vlan_dmi_equals(dmi, idmi)) {
+ if (dmi->dmi_users > idmi->dmi_users)
+ return 1;
+ else
+ return 0;
+ } else {
+ idmi = idmi->next;
+ }
+ }
+
+ return 1;
+}
+
+static inline void vlan_destroy_mc_list(struct dev_mc_list *mc_list)
+{
+ struct dev_mc_list *dmi = mc_list;
+ struct dev_mc_list *next;
+
+ while(dmi) {
+ next = dmi->next;
+ kfree(dmi);
+ dmi = next;
+ }
+}
+
+static void vlan_copy_mc_list(struct dev_mc_list *mc_list, struct vlan_dev_info *vlan_info)
+{
+ struct dev_mc_list *dmi, *new_dmi;
+
+ vlan_destroy_mc_list(vlan_info->old_mc_list);
+ vlan_info->old_mc_list = NULL;
+
+ for (dmi = mc_list; dmi != NULL; dmi = dmi->next) {
+ new_dmi = kmalloc(sizeof(*new_dmi), GFP_ATOMIC);
+ if (new_dmi == NULL) {
+ printk(KERN_ERR "vlan: cannot allocate memory. "
+ "Multicast may not work properly from now.\n");
+ return;
+ }
+
+ /* Copy whole structure, then make new 'next' pointer */
+ *new_dmi = *dmi;
+ new_dmi->next = vlan_info->old_mc_list;
+ vlan_info->old_mc_list = new_dmi;
+ }
+}
+
+static void vlan_flush_mc_list(struct net_device *dev)
+{
+ struct dev_mc_list *dmi = dev->mc_list;
+
+ while (dmi) {
+ dev_mc_delete(dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
+ printk(KERN_INFO "%s: del %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address from vlan interface\n",
+ dev->name,
+ dmi->dmi_addr[0],
+ dmi->dmi_addr[1],
+ dmi->dmi_addr[2],
+ dmi->dmi_addr[3],
+ dmi->dmi_addr[4],
+ dmi->dmi_addr[5]);
+ dmi = dev->mc_list;
+ }
+
+ /* dev->mc_list is NULL by the time we get here. */
+ vlan_destroy_mc_list(VLAN_DEV_INFO(dev)->old_mc_list);
+ VLAN_DEV_INFO(dev)->old_mc_list = NULL;
+}
+
+int vlan_dev_open(struct net_device *dev)
+{
+ if (!(VLAN_DEV_INFO(dev)->real_dev->flags & IFF_UP))
+ return -ENETDOWN;
+
+ return 0;
+}
+
+int vlan_dev_stop(struct net_device *dev)
+{
+ vlan_flush_mc_list(dev);
+ return 0;
+}
+
+int vlan_dev_init(struct net_device *dev)
+{
+ /* TODO: figure this out, maybe do nothing?? */
+ return 0;
+}
+
+void vlan_dev_destruct(struct net_device *dev)
+{
+ if (dev) {
+ vlan_flush_mc_list(dev);
+ if (dev->priv) {
+ if (VLAN_DEV_INFO(dev)->dent)
+ BUG();
+
+ kfree(dev->priv);
+ dev->priv = NULL;
+ }
+ }
+}
+
/** Taken from Gleb + Lennert's VLAN code, and modified... */
void vlan_dev_set_multicast_list(struct net_device *vlan_dev)
{
vlan_copy_mc_list(vlan_dev->mc_list, VLAN_DEV_INFO(vlan_dev));
}
}
-
-/** dmi is a single entry into a dev_mc_list, a single node. mc_list is
- * an entire list, and we'll iterate through it.
- */
-int vlan_should_add_mc(struct dev_mc_list *dmi, struct dev_mc_list *mc_list)
-{
- struct dev_mc_list *idmi;
-
- for (idmi = mc_list; idmi != NULL; ) {
- if (vlan_dmi_equals(dmi, idmi)) {
- if (dmi->dmi_users > idmi->dmi_users)
- return 1;
- else
- return 0;
- } else {
- idmi = idmi->next;
- }
- }
-
- return 1;
-}
-
-void vlan_copy_mc_list(struct dev_mc_list *mc_list, struct vlan_dev_info *vlan_info)
-{
- struct dev_mc_list *dmi, *new_dmi;
-
- vlan_destroy_mc_list(vlan_info->old_mc_list);
- vlan_info->old_mc_list = NULL;
-
- for (dmi = mc_list; dmi != NULL; dmi = dmi->next) {
- new_dmi = kmalloc(sizeof(*new_dmi), GFP_ATOMIC);
- if (new_dmi == NULL) {
- printk(KERN_ERR "vlan: cannot allocate memory. "
- "Multicast may not work properly from now.\n");
- return;
- }
-
- /* Copy whole structure, then make new 'next' pointer */
- *new_dmi = *dmi;
- new_dmi->next = vlan_info->old_mc_list;
- vlan_info->old_mc_list = new_dmi;
- }
-}
-
-void vlan_flush_mc_list(struct net_device *dev)
-{
- struct dev_mc_list *dmi = dev->mc_list;
-
- while (dmi) {
- dev_mc_delete(dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
- printk(KERN_INFO "%s: del %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address from vlan interface\n",
- dev->name,
- dmi->dmi_addr[0],
- dmi->dmi_addr[1],
- dmi->dmi_addr[2],
- dmi->dmi_addr[3],
- dmi->dmi_addr[4],
- dmi->dmi_addr[5]);
- dmi = dev->mc_list;
- }
-
- /* dev->mc_list is NULL by the time we get here. */
- vlan_destroy_mc_list(VLAN_DEV_INFO(dev)->old_mc_list);
- VLAN_DEV_INFO(dev)->old_mc_list = NULL;
-}
{
struct net_device *vlandev = NULL;
struct vlan_group *grp = NULL;
- int i = 0;
+ int h, i;
char *nm_type = NULL;
struct vlan_dev_info *dev_info = NULL;
nm_type = "UNKNOWN";
}
- cnt += sprintf(buf + cnt, "Name-Type: %s bad_proto_recvd: %lu\n",
- nm_type, vlan_bad_proto_recvd);
+ cnt += sprintf(buf + cnt, "Name-Type: %s\n", nm_type);
- for (grp = p802_1Q_vlan_list; grp != NULL; grp = grp->next) {
- /* loop through all devices for this device */
-#ifdef VLAN_DEBUG
- printk(VLAN_DBG __FUNCTION__ ": found a group, addr: %p\n",grp);
-#endif
- for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- vlandev = grp->vlan_devices[i];
- if (!vlandev)
- continue;
-#ifdef VLAN_DEBUG
- printk(VLAN_DBG __FUNCTION__
- ": found a vlan_dev, addr: %p\n", vlandev);
-#endif
- if ((cnt + 100) > VLAN_PROC_BUFSZ) {
- if ((cnt+strlen(term_msg)) < VLAN_PROC_BUFSZ)
- cnt += sprintf(buf+cnt, "%s", term_msg);
+ spin_lock_bh(&vlan_group_lock);
+ for (h = 0; h < VLAN_GRP_HASH_SIZE; h++) {
+ for (grp = vlan_group_hash[h]; grp != NULL; grp = grp->next) {
+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+ vlandev = grp->vlan_devices[i];
+ if (!vlandev)
+ continue;
- return cnt;
- }
- if (!vlandev->priv) {
- printk(KERN_ERR __FUNCTION__
- ": ERROR: vlandev->priv is NULL\n");
- continue;
- }
+ if ((cnt + 100) > VLAN_PROC_BUFSZ) {
+ if ((cnt+strlen(term_msg)) < VLAN_PROC_BUFSZ)
+ cnt += sprintf(buf+cnt, "%s", term_msg);
- dev_info = VLAN_DEV_INFO(vlandev);
+ goto out;
+ }
-#ifdef VLAN_DEBUG
- printk(VLAN_DBG __FUNCTION__
- ": got a good vlandev, addr: %p\n",
- VLAN_DEV_INFO(vlandev));
-#endif
- cnt += sprintf(buf + cnt, "%-15s| %d | %s\n",
- vlandev->name, dev_info->vlan_id,
- dev_info->real_dev->name);
+ dev_info = VLAN_DEV_INFO(vlandev);
+ cnt += sprintf(buf + cnt, "%-15s| %d | %s\n",
+ vlandev->name,
+ dev_info->vlan_id,
+ dev_info->real_dev->name);
+ }
}
}
+out:
+ spin_unlock_bh(&vlan_group_lock);
+
return cnt;
}
int cnt = 0;
int i;
-#ifdef VLAN_DEBUG
- printk(VLAN_DBG __FUNCTION__ ": vlandev: %p\n", vlandev);
-#endif
-
- if ((vlandev == NULL) || (!vlandev->priv_flags & IFF_802_1Q_VLAN))
+ if ((vlandev == NULL) || (!(vlandev->priv_flags & IFF_802_1Q_VLAN)))
return 0;
dev_info = VLAN_DEV_INFO(vlandev);
cnt += sprintf(buf + cnt, "EGRESSS priority Mappings: ");
- for (i = 0; i<16; i++) {
+ for (i = 0; i < 16; i++) {
mp = dev_info->egress_priority_map[i];
while (mp) {
cnt += sprintf(buf + cnt, "%lu:%hu ",
tristate ' Multi-Protocol Over ATM (MPOA) support' CONFIG_ATM_MPOA
fi
fi
-
- dep_tristate '802.1Q VLAN Support (EXPERIMENTAL)' CONFIG_VLAN_8021Q $CONFIG_EXPERIMENTAL
-
fi
+tristate '802.1Q VLAN Support' CONFIG_VLAN_8021Q
comment ' '
tristate 'The IPX protocol' CONFIG_IPX