#include <linux/ax25.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
+#include <linux/list.h>
#include <asm/atomic.h>
#define AX25_T1CLAMPLO 1
} ax25_dev;
typedef struct ax25_cb {
- struct ax25_cb *next;
+ struct hlist_node ax25_node;
ax25_address source_addr, dest_addr;
ax25_digi *digipeat;
ax25_dev *ax25_dev;
struct sk_buff_head ack_queue;
struct sk_buff_head frag_queue;
unsigned char window;
- struct timer_list timer;
+ struct timer_list timer, dtimer;
struct sock *sk; /* Backlink to socket */
+ atomic_t refcount;
} ax25_cb;
#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo)
+#define ax25_for_each(__ax25, node, list) \
+ hlist_for_each_entry(__ax25, node, list, ax25_node)
+
+#define ax25_cb_hold(__ax25) \
+ atomic_inc(&((__ax25)->refcount))
+
+static __inline__ void ax25_cb_put(ax25_cb *ax25)
+{
+ if (atomic_dec_and_test(&ax25->refcount)) {
+ if (ax25->digipeat)
+ kfree(ax25->digipeat);
+ kfree(ax25);
+ }
+}
+
/* af_ax25.c */
-extern ax25_cb *ax25_list;
+extern struct hlist_head ax25_list;
extern spinlock_t ax25_list_lock;
-extern void ax25_free_cb(ax25_cb *);
-extern void ax25_insert_socket(ax25_cb *);
+extern void ax25_cb_add(ax25_cb *);
struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int);
struct sock *ax25_get_socket(ax25_address *, ax25_address *, int);
extern ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, struct net_device *);
-ax25_cb *ax25_list;
+HLIST_HEAD(ax25_list);
spinlock_t ax25_list_lock = SPIN_LOCK_UNLOCKED;
static struct proto_ops ax25_proto_ops;
-/*
- * Free an allocated ax25 control block. This is done to centralise
- * the MOD count code.
- */
-void ax25_free_cb(ax25_cb *ax25)
-{
- if (ax25->digipeat != NULL) {
- kfree(ax25->digipeat);
- ax25->digipeat = NULL;
- }
-
- kfree(ax25);
-}
-
static void ax25_free_sock(struct sock *sk)
{
- ax25_free_cb(ax25_sk(sk));
+ ax25_cb_put(ax25_sk(sk));
}
/*
* Socket removal during an interrupt is now safe.
*/
-static void ax25_remove_socket(ax25_cb *ax25)
+static void ax25_cb_del(ax25_cb *ax25)
{
- ax25_cb *s;
-
- spin_lock_bh(&ax25_list_lock);
- if ((s = ax25_list) == ax25) {
- ax25_list = s->next;
+ if (!hlist_unhashed(&ax25->ax25_node)) {
+ spin_lock_bh(&ax25_list_lock);
+ hlist_del_init(&ax25->ax25_node);
spin_unlock_bh(&ax25_list_lock);
- return;
- }
-
- while (s != NULL && s->next != NULL) {
- if (s->next == ax25) {
- s->next = ax25->next;
- spin_unlock_bh(&ax25_list_lock);
- return;
- }
-
- s = s->next;
+ ax25_cb_put(ax25);
}
- spin_unlock_bh(&ax25_list_lock);
}
/*
{
ax25_dev *ax25_dev;
ax25_cb *s;
+ struct hlist_node *node;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return;
spin_lock_bh(&ax25_list_lock);
- for (s = ax25_list; s != NULL; s = s->next) {
+ ax25_for_each(s, node, &ax25_list) {
if (s->ax25_dev == ax25_dev) {
s->ax25_dev = NULL;
ax25_disconnect(s, ENETUNREACH);
/*
* Add a socket to the bound sockets list.
*/
-void ax25_insert_socket(ax25_cb *ax25)
+void ax25_cb_add(ax25_cb *ax25)
{
spin_lock_bh(&ax25_list_lock);
- ax25->next = ax25_list;
- ax25_list = ax25;
+ ax25_cb_hold(ax25);
+ hlist_add_head(&ax25->ax25_node, &ax25_list);
spin_unlock_bh(&ax25_list_lock);
}
struct net_device *dev, int type)
{
ax25_cb *s;
+ struct hlist_node *node;
spin_lock_bh(&ax25_list_lock);
- for (s = ax25_list; s != NULL; s = s->next) {
+ ax25_for_each(s, node, &ax25_list) {
if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
continue;
if (s->sk && !ax25cmp(&s->source_addr, addr) &&
s->sk->sk_type == type && s->sk->sk_state == TCP_LISTEN) {
/* If device is null we match any device */
if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) {
+ sock_hold(s->sk);
spin_unlock_bh(&ax25_list_lock);
-
return s->sk;
}
}
{
struct sock *sk = NULL;
ax25_cb *s;
+ struct hlist_node *node;
spin_lock_bh(&ax25_list_lock);
- for (s = ax25_list; s != NULL; s = s->next) {
+ ax25_for_each(s, node, &ax25_list) {
if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
!ax25cmp(&s->dest_addr, dest_addr) &&
s->sk->sk_type == type) {
ax25_digi *digi, struct net_device *dev)
{
ax25_cb *s;
+ struct hlist_node *node;
spin_lock_bh(&ax25_list_lock);
- for (s = ax25_list; s != NULL; s = s->next) {
+ ax25_for_each(s, node, &ax25_list) {
if (s->sk && s->sk->sk_type != SOCK_SEQPACKET)
continue;
if (s->ax25_dev == NULL)
if (s->digipeat != NULL && s->digipeat->ndigi != 0)
continue;
}
+ ax25_cb_hold(s);
spin_unlock_bh(&ax25_list_lock);
return s;
{
struct sock *sk = NULL;
ax25_cb *s;
+ struct hlist_node *node;
spin_lock_bh(&ax25_list_lock);
- for (s = ax25_list; s != NULL; s = s->next) {
+ ax25_for_each(s, node, &ax25_list) {
if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
s->sk->sk_type == SOCK_RAW) {
sk = s->sk;
break;
}
}
+
spin_unlock_bh(&ax25_list_lock);
return sk;
*/
static void ax25_destroy_timer(unsigned long data)
{
- ax25_destroy_socket((ax25_cb *)data);
+ ax25_cb *ax25=(ax25_cb *)data;
+ struct sock *sk;
+
+ sk=ax25->sk;
+
+ bh_lock_sock(sk);
+ sock_hold(sk);
+ ax25_destroy_socket(ax25);
+ bh_unlock_sock(sk);
+ sock_put(sk);
}
/*
{
struct sk_buff *skb;
- ax25_remove_socket(ax25);
+ ax25_cb_del(ax25);
ax25_stop_heartbeat(ax25);
ax25_stop_t1timer(ax25);
kfree_skb(skb);
}
+ while ((skb = skb_dequeue(&ax25->sk->sk_write_queue)) != NULL) {
+ kfree_skb(skb);
+ }
}
if (ax25->sk != NULL) {
if (atomic_read(&ax25->sk->sk_wmem_alloc) ||
atomic_read(&ax25->sk->sk_rmem_alloc)) {
/* Defer: outstanding buffers */
- init_timer(&ax25->timer);
- ax25->timer.expires = jiffies + 10 * HZ;
- ax25->timer.function = ax25_destroy_timer;
- ax25->timer.data = (unsigned long)ax25;
- add_timer(&ax25->timer);
+ init_timer(&ax25->dtimer);
+ ax25->dtimer.expires = jiffies + 2 * HZ;
+ ax25->dtimer.function = ax25_destroy_timer;
+ ax25->dtimer.data = (unsigned long)ax25;
+ add_timer(&ax25->dtimer);
} else {
- sock_put(ax25->sk);
+ struct sock *sk=ax25->sk;
+ ax25->sk=NULL;
+ sock_put(sk);
}
} else {
- ax25_free_cb(ax25);
+ ax25_cb_put(ax25);
}
}
case AX25_N2:
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 31)
- return -EINVAL;
+ return -EINVAL;
ax25->n2count = 0;
ax25->n2 = ax25_ctl.arg;
break;
return -EINVAL;
}
- return 0;
+ return 0;
}
/*
return NULL;
memset(ax25, 0x00, sizeof(*ax25));
+ atomic_set(&ax25->refcount, 1);
skb_queue_head_init(&ax25->write_queue);
skb_queue_head_init(&ax25->frag_queue);
(sock->state != SS_UNCONNECTED ||
sk->sk_state == TCP_LISTEN)) {
res = -EADDRNOTAVAIL;
+ dev_put(dev);
break;
}
break;
default:
sk_free(sk);
- ax25_free_cb(ax25);
+ ax25_cb_put(ax25);
return NULL;
}
if (sk == NULL)
return 0;
+ sock_hold(sk);
lock_sock(sk);
ax25 = ax25_sk(sk);
switch (ax25->state) {
case AX25_STATE_0:
ax25_disconnect(ax25, 0);
- goto drop;
+ ax25_destroy_socket(ax25);
+ break;
case AX25_STATE_1:
case AX25_STATE_2:
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_disconnect(ax25, 0);
- goto drop;
+ ax25_destroy_socket(ax25);
+ break;
case AX25_STATE_3:
case AX25_STATE_4:
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
- goto drop;
+ ax25_destroy_socket(ax25);
}
sock->sk = NULL;
sk->sk_socket = NULL; /* Not used, but we should do this */
release_sock(sk);
- return 0;
- drop:
- release_sock(sk);
- ax25_destroy_socket(ax25);
+ sock_put(sk);
+
return 0;
}
ax25_fillin_cb(ax25, ax25_dev);
done:
- ax25_insert_socket(ax25);
+ ax25_cb_add(ax25);
sk->sk_zapped = 0;
out:
int addr_len, int flags)
{
struct sock *sk = sock->sk;
- ax25_cb *ax25 = ax25_sk(sk);
+ ax25_cb *ax25 = ax25_sk(sk), *ax25t;
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)uaddr;
ax25_digi *digi = NULL;
int ct = 0, err = 0;
goto out;
ax25_fillin_cb(ax25, ax25->ax25_dev);
- ax25_insert_socket(ax25);
+ ax25_cb_add(ax25);
} else {
if (ax25->ax25_dev == NULL) {
err = -EHOSTUNREACH;
}
if (sk->sk_type == SOCK_SEQPACKET &&
- ax25_find_cb(&ax25->source_addr, &fsa->fsa_ax25.sax25_call, digi,
- ax25->ax25_dev->dev)) {
+ (ax25t=ax25_find_cb(&ax25->source_addr, &fsa->fsa_ax25.sax25_call, digi,
+ ax25->ax25_dev->dev))) {
if (digi != NULL)
kfree(digi);
err = -EADDRINUSE; /* Already such a connection */
+ ax25_cb_put(ax25t);
goto out;
}
lock_sock(sk);
continue;
}
+ current->state = TASK_RUNNING;
+ remove_wait_queue(sk->sk_sleep, &wait);
return -ERESTARTSYS;
}
current->state = TASK_RUNNING;
sock->state = SS_CONNECTED;
+ err=0;
out:
release_sock(sk);
- return 0;
+ return err;
}
if (skb)
break;
- current->state = TASK_INTERRUPTIBLE;
release_sock(sk);
+ current->state = TASK_INTERRUPTIBLE;
if (flags & O_NONBLOCK)
return -EWOULDBLOCK;
if (!signal_pending(tsk)) {
schedule();
+ current->state = TASK_RUNNING;
lock_sock(sk);
continue;
}
+ current->state = TASK_RUNNING;
+ remove_wait_queue(sk->sk_sleep, &wait);
return -ERESTARTSYS;
}
current->state = TASK_RUNNING;
SOCK_DEBUG(sk, "AX.25: sendto: building packet.\n");
/* Assume the worst case */
- size = len + 3 + ax25_addr_size(dp) + AX25_BPQ_HEADER_LEN;
+ size = len + ax25->ax25_dev->dev->hard_header_len;
skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT, &err);
if (skb == NULL)
/* old structure? */
if (cmd == SIOCAX25GETINFOOLD) {
- static int warned;
+ static int warned = 0;
if (!warned) {
printk(KERN_INFO "%s uses old SIOCAX25GETINFO\n",
current->comm);
int len = 0;
off_t pos = 0;
off_t begin = 0;
+ struct hlist_node *node;
spin_lock_bh(&ax25_list_lock);
* magic dev src_addr dest_addr,digi1,digi2,.. st vs vr va t1 t1 t2 t2 t3 t3 idle idle n2 n2 rtt window paclen Snd-Q Rcv-Q inode
*/
- for (ax25 = ax25_list; ax25 != NULL; ax25 = ax25->next) {
+ ax25_for_each(ax25, node, &ax25_list) {
len += sprintf(buffer+len, "%8.8lx %s %s%s ",
(long) ax25,
ax25->ax25_dev == NULL? "???" : ax25->ax25_dev->dev->name,
ax25->paclen);
if (ax25->sk != NULL) {
+ bh_lock_sock(ax25->sk);
len += sprintf(buffer + len, " %d %d %ld\n",
atomic_read(&ax25->sk->sk_wmem_alloc),
atomic_read(&ax25->sk->sk_rmem_alloc),
ax25->sk->sk_socket != NULL ? SOCK_INODE(ax25->sk->sk_socket)->i_ino : 0L);
+ bh_unlock_sock(ax25->sk);
} else {
len += sprintf(buffer + len, " * * *\n");
}
ax25->state = AX25_STATE_3;
ax25->n2count = 0;
if (ax25->sk != NULL) {
+ bh_lock_sock(ax25->sk);
ax25->sk->sk_state = TCP_ESTABLISHED;
/*
* For WAIT_SABM connections we will produce an accept
*/
if (!sock_flag(ax25->sk, SOCK_DEAD))
ax25->sk->sk_state_change(ax25->sk);
+ bh_unlock_sock(ax25->sk);
}
ax25_dama_on(ax25);
void ax25_ds_enquiry_response(ax25_cb *ax25)
{
ax25_cb *ax25o;
+ struct hlist_node *node;
/* Please note that neither DK4EG´s nor DG2FEF´s
* DAMA spec mention the following behaviour as seen
ax25_ds_set_timer(ax25->ax25_dev);
spin_lock_bh(&ax25_list_lock);
- for (ax25o = ax25_list; ax25o != NULL; ax25o = ax25o->next) {
+ ax25_for_each(ax25o, node, &ax25_list) {
if (ax25o == ax25)
continue;
{
ax25_cb *ax25;
int res = 0;
+ struct hlist_node *node;
spin_lock_bh(&ax25_list_lock);
- for (ax25 = ax25_list; ax25 != NULL ; ax25 = ax25->next)
+ ax25_for_each(ax25, node, &ax25_list)
if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) {
res = 1;
break;
{
ax25_dev *ax25_dev = (struct ax25_dev *) arg;
ax25_cb *ax25;
+ struct hlist_node *node;
if (ax25_dev == NULL || !ax25_dev->dama.slave)
return; /* Yikes! */
}
spin_lock_bh(&ax25_list_lock);
- for (ax25=ax25_list; ax25 != NULL; ax25 = ax25->next) {
+ ax25_for_each(ax25, node, &ax25_list) {
if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE))
continue;
void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
{
+ struct sock *sk=ax25->sk;
+
+ if (sk)
+ bh_lock_sock(sk);
+
switch (ax25->state) {
case AX25_STATE_0:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
- if (!ax25->sk || sock_flag(ax25->sk, SOCK_DESTROY) ||
- (ax25->sk->sk_state == TCP_LISTEN &&
- sock_flag(ax25->sk, SOCK_DEAD))) {
- ax25_destroy_socket(ax25);
+ if (!sk || sock_flag(sk, SOCK_DESTROY) ||
+ (sk->sk_state == TCP_LISTEN &&
+ sock_flag(sk, SOCK_DEAD))) {
+ if (sk) {
+ sock_hold(sk);
+ ax25_destroy_socket(ax25);
+ sock_put(sk);
+ bh_unlock_sock(sk);
+ } else
+ ax25_destroy_socket(ax25);
return;
}
break;
/*
* Check the state of the receive buffer.
*/
- if (ax25->sk != NULL) {
- if (atomic_read(&ax25->sk->sk_rmem_alloc) <
- (ax25->sk->sk_rcvbuf / 2) &&
+ if (sk != NULL) {
+ if (atomic_read(&sk->sk_rmem_alloc) <
+ (sk->sk_rcvbuf / 2) &&
(ax25->condition & AX25_COND_OWN_RX_BUSY)) {
ax25->condition &= ~AX25_COND_OWN_RX_BUSY;
ax25->condition &= ~AX25_COND_ACK_PENDING;
break;
}
+ if (sk)
+ bh_unlock_sock(sk);
+
ax25_start_heartbeat(ax25);
}
ax25_stop_t3timer(ax25);
if (ax25->sk != NULL) {
+ bh_lock_sock(ax25->sk);
ax25->sk->sk_state = TCP_CLOSE;
ax25->sk->sk_err = 0;
ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
ax25->sk->sk_state_change(ax25->sk);
sock_set_flag(ax25->sk, SOCK_DEAD);
}
+ bh_lock_sock(ax25->sk);
}
}
}
if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
+ bh_lock_sock(ax25->sk);
if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
ax25->pidincl) {
if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
else
ax25->condition |= AX25_COND_OWN_RX_BUSY;
}
+ bh_unlock_sock(ax25->sk);
}
return queued;
if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
kfree_skb(skb);
+ ax25_cb_put(ax25);
return 0;
}
sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);
if (sk != NULL) {
+ bh_lock_sock(sk);
if (sk->sk_ack_backlog == sk->sk_max_ack_backlog ||
(make = ax25_make_new(sk, ax25_dev)) == NULL) {
if (mine)
ax25_return_dm(dev, &src, &dest, &dp);
kfree_skb(skb);
+ bh_unlock_sock(sk);
+ sock_put(sk);
return 0;
}
make->sk_pair = sk;
sk->sk_ack_backlog++;
+ bh_unlock_sock(sk);
+ sock_put(sk);
} else {
if (!mine) {
kfree_skb(skb);
ax25->state = AX25_STATE_3;
- ax25_insert_socket(ax25);
+ ax25_cb_add(ax25);
ax25_start_heartbeat(ax25);
ax25_start_t3timer(ax25);
ax25_address *src, *dst;
ax25_dev *ax25_dev;
ax25_route _route, *route = &_route;
+ ax25_cb *ax25;
dst = (ax25_address *)(bp + 1);
src = (ax25_address *)(bp + 8);
skb_pull(ourskb, AX25_HEADER_LEN - 1); /* Keep PID */
ourskb->nh.raw = ourskb->data;
- ax25_send_frame(ourskb, ax25_dev->values[AX25_VALUES_PACLEN], &src_c,
-&dst_c, route->digipeat, dev);
-
+ ax25=ax25_send_frame(
+ ourskb,
+ ax25_dev->values[AX25_VALUES_PACLEN],
+ &src_c,
+ &dst_c, route->digipeat, dev);
+ if (ax25) {
+ ax25_cb_put(ax25);
+ }
goto put;
}
}
if (digi != NULL) {
if ((ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
- ax25_free_cb(ax25);
+ ax25_cb_put(ax25);
return NULL;
}
memcpy(ax25->digipeat, digi, sizeof(ax25_digi));
#endif
}
- ax25_insert_socket(ax25);
+ ax25_cb_add(ax25);
ax25->state = AX25_STATE_1;
if (ax25_rt->digipeat != NULL)
kfree(ax25_rt->digipeat);
kfree(ax25_rt);
+ return;
}
/*
ax25_adjust_path(addr, ax25->digipeat);
}
- if (ax25->sk != NULL)
+ if (ax25->sk != NULL) {
+ bh_lock_sock(ax25->sk);
ax25->sk->sk_zapped = 0;
+ bh_unlock_sock(ax25->sk);
+ }
put:
ax25_put_route(ax25_rt);
ax25->state = AX25_STATE_3;
ax25->n2count = 0;
if (ax25->sk != NULL) {
+ bh_lock_sock(ax25->sk);
ax25->sk->sk_state = TCP_ESTABLISHED;
/* For WAIT_SABM connections we will produce an accept ready socket here */
if (!sock_flag(ax25->sk, SOCK_DEAD))
ax25->sk->sk_state_change(ax25->sk);
+ bh_unlock_sock(ax25->sk);
}
}
break;
void ax25_std_heartbeat_expiry(ax25_cb *ax25)
{
+ struct sock *sk=ax25->sk;
+
+ if (sk)
+ bh_lock_sock(sk);
+
switch (ax25->state) {
case AX25_STATE_0:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
- if (!ax25->sk || sock_flag(ax25->sk, SOCK_DESTROY) ||
- (ax25->sk->sk_state == TCP_LISTEN &&
- sock_flag(ax25->sk, SOCK_DEAD))) {
- ax25_destroy_socket(ax25);
+ if (!sk || sock_flag(sk, SOCK_DESTROY) ||
+ (sk->sk_state == TCP_LISTEN &&
+ sock_flag(sk, SOCK_DEAD))) {
+ if (sk) {
+ sock_hold(sk);
+ ax25_destroy_socket(ax25);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ } else
+ ax25_destroy_socket(ax25);
return;
}
break;
/*
* Check the state of the receive buffer.
*/
- if (ax25->sk != NULL) {
- if (atomic_read(&ax25->sk->sk_rmem_alloc) <
- (ax25->sk->sk_rcvbuf / 2) &&
+ if (sk != NULL) {
+ if (atomic_read(&sk->sk_rmem_alloc) <
+ (sk->sk_rcvbuf / 2) &&
(ax25->condition & AX25_COND_OWN_RX_BUSY)) {
ax25->condition &= ~AX25_COND_OWN_RX_BUSY;
ax25->condition &= ~AX25_COND_ACK_PENDING;
}
}
+ if (sk)
+ bh_unlock_sock(sk);
+
ax25_start_heartbeat(ax25);
}
ax25_stop_t3timer(ax25);
if (ax25->sk != NULL) {
+ bh_lock_sock(ax25->sk);
ax25->sk->sk_state = TCP_CLOSE;
ax25->sk->sk_err = 0;
ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
ax25->sk->sk_state_change(ax25->sk);
sock_set_flag(ax25->sk, SOCK_DEAD);
}
+ bh_unlock_sock(ax25->sk);
}
}
{
int proto = AX25_PROTO_STD_SIMPLEX;
ax25_cb *ax25 = (ax25_cb *)param;
- struct sock *sk = ax25->sk;
if (ax25->ax25_dev)
proto = ax25->ax25_dev->values[AX25_VALUES_PROTOCOL];
- bh_lock_sock(sk);
-
switch (proto) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
break;
#endif
}
- bh_unlock_sock(sk);
}
static void ax25_t1timer_expiry(unsigned long param)
{
ax25_cb *ax25 = (ax25_cb *)param;
- struct sock *sk = ax25->sk;
- bh_lock_sock(sk);
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
break;
#endif
}
- bh_unlock_sock(sk);
}
static void ax25_t2timer_expiry(unsigned long param)
{
ax25_cb *ax25 = (ax25_cb *)param;
- struct sock *sk = ax25->sk;
- bh_lock_sock(sk);
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
break;
#endif
}
- bh_unlock_sock(sk);
}
static void ax25_t3timer_expiry(unsigned long param)
{
ax25_cb *ax25 = (ax25_cb *)param;
- struct sock *sk = ax25->sk;
- bh_lock_sock(sk);
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
break;
#endif
}
- bh_unlock_sock(sk);
}
static void ax25_idletimer_expiry(unsigned long param)
{
ax25_cb *ax25 = (ax25_cb *)param;
- struct sock *sk = ax25->sk;
- bh_lock_sock(sk);
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
break;
#endif
}
- bh_unlock_sock(sk);
}
for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
ax25_table_size += sizeof(ctl_table);
- if ((ax25_table = kmalloc(ax25_table_size, GFP_ATOMIC)) == NULL)
+ if ((ax25_table = kmalloc(ax25_table_size, GFP_ATOMIC)) == NULL) {
+ spin_unlock_bh(&ax25_dev_lock);
return;
+ }
memset(ax25_table, 0x00, ax25_table_size);