itself to the server once and once only.
This patch ensures that we do this by sharing the local identifier
struct nfs4_client among all mountpoints that talk to the same
server/ip address.
nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fattr *fattr)
{
+ struct nfs4_client *clp;
struct nfs4_compound compound;
struct nfs4_op ops[4];
struct nfs_fsinfo fsinfo;
struct qstr q;
int status;
- fattr->valid = 0;
-
- if (!(server->nfs4_state = nfs4_get_client()))
+ clp = server->nfs4_state = nfs4_get_client(&server->addr.sin_addr);
+ if (!clp)
return -ENOMEM;
+ down_write(&clp->cl_sem);
+ /* Has the clientid already been initialized? */
+ if (clp->cl_state != NFS4CLNT_NEW) {
+ /* Yep, so just read the root attributes and the lease time. */
+ fattr->valid = 0;
+ nfs4_setup_compound(&compound, ops, server, "getrootfh");
+ nfs4_setup_putrootfh(&compound);
+ nfs4_setup_getrootattr(&compound, fattr, &fsinfo);
+ nfs4_setup_getfh(&compound, fhandle);
+ if ((status = nfs4_call_compound(&compound, NULL, 0)))
+ goto out_unlock;
+ goto no_setclientid;
+ }
+
/*
* SETCLIENTID.
* Until delegations are imported, we don't bother setting the program
nfs4_setup_compound(&compound, ops, server, "setclientid");
nfs4_setup_setclientid(&compound, 0, 0);
if ((status = nfs4_call_compound(&compound, NULL, 0)))
- goto out;
+ goto out_unlock;
/*
* SETCLIENTID_CONFIRM, plus root filehandle.
* We also get the lease time here.
*/
+ fattr->valid = 0;
nfs4_setup_compound(&compound, ops, server, "setclientid_confirm");
nfs4_setup_setclientid_confirm(&compound);
nfs4_setup_putrootfh(&compound);
nfs4_setup_getrootattr(&compound, fattr, &fsinfo);
nfs4_setup_getfh(&compound, fhandle);
if ((status = nfs4_call_compound(&compound, NULL, 0)))
- goto out;
-
+ goto out_unlock;
+ clp->cl_state = NFS4CLNT_OK;
+
+no_setclientid:
/*
* Now that we have instantiated the clientid and determined
* the lease time, we can initialize the renew daemon for this
* server.
+ * FIXME: we only need one renewd daemon per server.
*/
server->lease_time = fsinfo.lease_time * HZ;
if ((status = nfs4_init_renewd(server)))
- goto out;
+ goto out_unlock;
+ up_write(&clp->cl_sem);
/*
* Now we do a separate LOOKUP for each component of the mount path.
p++;
q.len = p - q.name;
+ fattr->valid = 0;
nfs4_setup_compound(&compound, ops, server, "mount");
nfs4_setup_putfh(&compound, fhandle);
nfs4_setup_lookup(&compound, &q);
}
break;
}
-
-out:
+ return status;
+out_unlock:
+ up_write(&clp->cl_sem);
+ nfs4_put_client(clp);
+ server->nfs4_state = NULL;
return status;
}
nfs4_stateid one_stateid =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+static LIST_HEAD(nfs4_clientid_list);
/*
* nfs4_get_client(): returns an empty client structure
* bother putting them in a slab cache...
*/
struct nfs4_client *
-nfs4_get_client(void)
+nfs4_alloc_client(struct in_addr *addr)
{
struct nfs4_client *clp;
- if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)))
+ if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL))) {
memset(clp, 0, sizeof(*clp));
+ memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
+ init_rwsem(&clp->cl_sem);
+ INIT_LIST_HEAD(&clp->cl_state_owners);
+ spin_lock_init(&clp->cl_lock);
+ atomic_set(&clp->cl_count, 1);
+ clp->cl_state = NFS4CLNT_NEW;
+ }
return clp;
}
void
-nfs4_put_client(struct nfs4_client *clp)
+nfs4_free_client(struct nfs4_client *clp)
{
BUG_ON(!clp);
kfree(clp);
}
+struct nfs4_client *
+nfs4_get_client(struct in_addr *addr)
+{
+ struct nfs4_client *new, *clp = NULL;
+
+ new = nfs4_alloc_client(addr);
+ spin_lock(&state_spinlock);
+ list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
+ if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0)
+ goto found;
+ }
+ if (new)
+ list_add(&new->cl_servers, &nfs4_clientid_list);
+ spin_unlock(&state_spinlock);
+ return new;
+found:
+ atomic_inc(&clp->cl_count);
+ spin_unlock(&state_spinlock);
+ if (new)
+ nfs4_free_client(new);
+ return clp;
+}
+
+void
+nfs4_put_client(struct nfs4_client *clp)
+{
+ if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))
+ return;
+ list_del(&clp->cl_servers);
+ spin_unlock(&state_spinlock);
+ nfs4_free_client(clp);
+}
+
static inline u32
nfs4_alloc_lockowner_id(struct nfs4_client *clp)
{
#include <linux/in.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
+#include <linux/rwsem.h>
#include <linux/wait.h>
#include <linux/uio.h>
((err) != NFSERR_RESOURCE) && \
((err) != NFSERR_NOFILEHANDLE))
+enum nfs4_client_state {
+ NFS4CLNT_OK = 0,
+ NFS4CLNT_NEW,
+};
+
+/*
+ * The nfs4_client identifies our client state to the server.
+ */
struct nfs4_client {
- u64 cl_clientid; /* constant */
- nfs4_verifier cl_confirm;
+ struct list_head cl_servers; /* Global list of servers */
+ struct in_addr cl_addr; /* Server identifier */
+ u64 cl_clientid; /* constant */
+ nfs4_verifier cl_confirm;
+ enum nfs4_client_state cl_state;
u32 cl_lockowner_id;
+
+ /*
+ * The following rwsem ensures exclusive access to the server
+ * while we recover the state following a lease expiration.
+ */
+ struct rw_semaphore cl_sem;
+
+ struct list_head cl_state_owners;
+ spinlock_t cl_lock;
+ atomic_t cl_count;
};
/*
* the protocol.
*/
struct nfs4_state_owner {
+ struct list_head so_list; /* per-clientid list of state_owners */
u32 so_id; /* 32-bit identifier, unique */
struct semaphore so_sema;
u32 so_seqid; /* protected by so_sema */
extern int nfs4_init_renewd(struct nfs_server *server);
/* nfs4state.c */
-extern struct nfs4_client *nfs4_get_client(void);
+extern struct nfs4_client *nfs4_get_client(struct in_addr *);
extern void nfs4_put_client(struct nfs4_client *clp);
extern struct nfs4_state_owner * nfs4_get_state_owner(struct inode *inode);
void nfs4_put_state_owner(struct inode *inode, struct nfs4_state_owner *sp);