]> git.neil.brown.name Git - history.git/commitdiff
[PATCH] Rearrange setting of snd/rcv buf size to avoid locking issue
authorNeil Brown <neilb@cse.unsw.edu.au>
Tue, 13 Aug 2002 00:48:41 +0000 (17:48 -0700)
committerLinus Torvalds <torvalds@home.transmeta.com>
Tue, 13 Aug 2002 00:48:41 +0000 (17:48 -0700)
Currently svc_sock_setbuf can be called under a spinlock,
but it can try to lock a socket, which can block....

Now when we decide that changing the size might be good we
set a flag (SK_CHNGBUF) and then later (when the next
packet arrives) we change the sizes appropriately.

include/linux/sunrpc/svcsock.h
net/sunrpc/svcsock.c

index eb118134b0c076c6363e129f20c143480da437b8..34cbc22c0732cf870ff5c994f8d029430328595e 100644 (file)
@@ -30,6 +30,7 @@ struct svc_sock {
 #define        SK_TEMP         4                       /* temp (TCP) socket */
 #define        SK_QUED         5                       /* on serv->sk_sockets */
 #define        SK_DEAD         6                       /* socket closed */
+#define        SK_CHNGBUF      7                       /* need to change snd/rcv buffer sizes */
 
        int                     sk_reserved;    /* space on outq that is reserved */
 
index 86340a0ded9574fc6721402f2ad95f1cddf42808..afda80a6fd9fedb7eb95849efe9a5e12aa26cb40 100644 (file)
@@ -481,6 +481,16 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
        u32             *data;
        int             err, len;
 
+       if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
+           /* udp sockets need large rcvbuf as all pending
+            * requests are still in that buffer.  sndbuf must
+            * also be large enough that there is enough space
+            * for one reply per thread.
+            */
+           svc_sock_setbufsize(svsk->sk_sock,
+                               (serv->sv_nrthreads+3) * serv->sv_bufsz,
+                               (serv->sv_nrthreads+3) * serv->sv_bufsz);
+
        clear_bit(SK_DATA, &svsk->sk_flags);
        while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) {
                svc_sock_received(svsk);
@@ -564,6 +574,8 @@ svc_udp_init(struct svc_sock *svsk)
        svsk->sk_recvfrom = svc_udp_recvfrom;
        svsk->sk_sendto = svc_udp_sendto;
 
+       set_bit(SK_CHNGBUF, &svsk->sk_flags);
+
        return 0;
 }
 
@@ -771,6 +783,18 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
                return 0;
        }
 
+       if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
+               /* sndbuf needs to have room for one request
+                * per thread, otherwise we can stall even when the
+                * network isn't a bottleneck.
+                * rcvbuf just needs to be able to hold a few requests.
+                * Normally they will be removed from the queue 
+                * as soon a a complete request arrives.
+                */
+               svc_sock_setbufsize(svsk->sk_sock,
+                                   (serv->sv_nrthreads+3) * serv->sv_bufsz,
+                                   3 * serv->sv_bufsz);
+
        clear_bit(SK_DATA, &svsk->sk_flags);
 
        /* Receive data. If we haven't got the record length yet, get
@@ -916,17 +940,7 @@ svc_tcp_init(struct svc_sock *svsk)
                svsk->sk_reclen = 0;
                svsk->sk_tcplen = 0;
 
-               /* sndbuf needs to have room for one request
-                * per thread, otherwise we can stall even when the
-                * network isn't a bottleneck.
-                * rcvbuf just needs to be able to hold a few requests.
-                * Normally they will be removed from the queue 
-                * as soon a a complete request arrives.
-                */
-               svc_sock_setbufsize(svsk->sk_sock,
-                                   (svsk->sk_server->sv_nrthreads+3) *
-                                   svsk->sk_server->sv_bufsz,
-                                   3 * svsk->sk_server->sv_bufsz);
+               set_bit(SK_CHNGBUF, &svsk->sk_flags);
        }
 
        return 0;
@@ -945,30 +959,12 @@ svc_sock_update_bufs(struct svc_serv *serv)
        list_for_each(le, &serv->sv_permsocks) {
                struct svc_sock *svsk = 
                        list_entry(le, struct svc_sock, sk_list);
-               struct socket *sock = svsk->sk_sock;
-               if (sock->type == SOCK_DGRAM) {
-                       /* udp sockets need large rcvbuf as all pending
-                        * requests are still in that buffer.
-                        */
-                       svc_sock_setbufsize(sock,
-                                           (serv->sv_nrthreads+3) * serv->sv_bufsz,
-                                           (serv->sv_nrthreads+3) * serv->sv_bufsz);
-               } else if (svsk->sk_sk->state != TCP_LISTEN) {
-                       printk(KERN_ERR "RPC update_bufs: permanent sock neither UDP or TCP_LISTEN\n");
-               }
+               set_bit(SK_CHNGBUF, &svsk->sk_flags);
        }
        list_for_each(le, &serv->sv_tempsocks) {
                struct svc_sock *svsk =
                        list_entry(le, struct svc_sock, sk_list);
-               struct socket *sock = svsk->sk_sock;
-               if (sock->type == SOCK_STREAM) {
-                       /* See svc_tcp_init above for rationale on buffer sizes */
-                       svc_sock_setbufsize(sock,
-                                           (serv->sv_nrthreads+3) *
-                                           serv->sv_bufsz,
-                                           3 * serv->sv_bufsz);
-               } else 
-                       printk(KERN_ERR "RPC update_bufs: temp sock not TCP\n");
+               set_bit(SK_CHNGBUF, &svsk->sk_flags);
        }
        spin_unlock_bh(&serv->sv_lock);
 }