|
|
|
@ -354,6 +354,124 @@ static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
|
|
|
|
|
.release = nfsd4_cb_notify_lock_release,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We store the NONE, READ, WRITE, and BOTH bits separately in the
|
|
|
|
|
* st_{access,deny}_bmap field of the stateid, in order to track not
|
|
|
|
|
* only what share bits are currently in force, but also what
|
|
|
|
|
* combinations of share bits previous opens have used. This allows us
|
|
|
|
|
* to enforce the recommendation of rfc 3530 14.2.19 that the server
|
|
|
|
|
* return an error if the client attempt to downgrade to a combination
|
|
|
|
|
* of share bits not explicable by closing some of its previous opens.
|
|
|
|
|
*
|
|
|
|
|
* XXX: This enforcement is actually incomplete, since we don't keep
|
|
|
|
|
* track of access/deny bit combinations; so, e.g., we allow:
|
|
|
|
|
*
|
|
|
|
|
* OPEN allow read, deny write
|
|
|
|
|
* OPEN allow both, deny none
|
|
|
|
|
* DOWNGRADE allow read, deny none
|
|
|
|
|
*
|
|
|
|
|
* which we should reject.
|
|
|
|
|
*/
|
|
|
|
|
static unsigned int
|
|
|
|
|
bmap_to_share_mode(unsigned long bmap)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
unsigned int access = 0;
|
|
|
|
|
|
|
|
|
|
for (i = 1; i < 4; i++) {
|
|
|
|
|
if (test_bit(i, &bmap))
|
|
|
|
|
access |= i;
|
|
|
|
|
}
|
|
|
|
|
return access;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* set share access for a given stateid */
|
|
|
|
|
static inline void
|
|
|
|
|
set_access(u32 access, struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
unsigned char mask = 1 << access;
|
|
|
|
|
|
|
|
|
|
WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
|
|
|
|
|
stp->st_access_bmap |= mask;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* clear share access for a given stateid */
|
|
|
|
|
static inline void
|
|
|
|
|
clear_access(u32 access, struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
unsigned char mask = 1 << access;
|
|
|
|
|
|
|
|
|
|
WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
|
|
|
|
|
stp->st_access_bmap &= ~mask;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* test whether a given stateid has access */
|
|
|
|
|
static inline bool
|
|
|
|
|
test_access(u32 access, struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
unsigned char mask = 1 << access;
|
|
|
|
|
|
|
|
|
|
return (bool)(stp->st_access_bmap & mask);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* set share deny for a given stateid */
|
|
|
|
|
static inline void
|
|
|
|
|
set_deny(u32 deny, struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
unsigned char mask = 1 << deny;
|
|
|
|
|
|
|
|
|
|
WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
|
|
|
|
|
stp->st_deny_bmap |= mask;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* clear share deny for a given stateid */
|
|
|
|
|
static inline void
|
|
|
|
|
clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
unsigned char mask = 1 << deny;
|
|
|
|
|
|
|
|
|
|
WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
|
|
|
|
|
stp->st_deny_bmap &= ~mask;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* test whether a given stateid is denying specific access */
|
|
|
|
|
static inline bool
|
|
|
|
|
test_deny(u32 deny, struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
unsigned char mask = 1 << deny;
|
|
|
|
|
|
|
|
|
|
return (bool)(stp->st_deny_bmap & mask);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int nfs4_access_to_omode(u32 access)
|
|
|
|
|
{
|
|
|
|
|
switch (access & NFS4_SHARE_ACCESS_BOTH) {
|
|
|
|
|
case NFS4_SHARE_ACCESS_READ:
|
|
|
|
|
return O_RDONLY;
|
|
|
|
|
case NFS4_SHARE_ACCESS_WRITE:
|
|
|
|
|
return O_WRONLY;
|
|
|
|
|
case NFS4_SHARE_ACCESS_BOTH:
|
|
|
|
|
return O_RDWR;
|
|
|
|
|
}
|
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
|
return O_RDONLY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
|
access_permit_read(struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
|
|
|
|
|
test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
|
|
|
|
|
test_access(NFS4_SHARE_ACCESS_WRITE, stp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
|
access_permit_write(struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
|
|
|
|
|
test_access(NFS4_SHARE_ACCESS_BOTH, stp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline struct nfs4_stateowner *
|
|
|
|
|
nfs4_get_stateowner(struct nfs4_stateowner *sop)
|
|
|
|
|
{
|
|
|
|
@ -543,14 +661,12 @@ static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
|
|
|
|
|
#define FILE_HASH_BITS 8
|
|
|
|
|
#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
|
|
|
|
|
|
|
|
|
|
static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
|
|
|
|
|
static unsigned int file_hashval(struct svc_fh *fh)
|
|
|
|
|
{
|
|
|
|
|
return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
|
|
|
|
|
}
|
|
|
|
|
struct inode *inode = d_inode(fh->fh_dentry);
|
|
|
|
|
|
|
|
|
|
static unsigned int file_hashval(struct knfsd_fh *fh)
|
|
|
|
|
{
|
|
|
|
|
return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
|
|
|
|
|
/* XXX: why not (here & in file cache) use inode? */
|
|
|
|
|
return (unsigned int)hash_long(inode->i_ino, FILE_HASH_BITS);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
|
|
|
|
@ -1152,108 +1268,6 @@ static unsigned int clientstr_hashval(struct xdr_netobj name)
|
|
|
|
|
return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We store the NONE, READ, WRITE, and BOTH bits separately in the
|
|
|
|
|
* st_{access,deny}_bmap field of the stateid, in order to track not
|
|
|
|
|
* only what share bits are currently in force, but also what
|
|
|
|
|
* combinations of share bits previous opens have used. This allows us
|
|
|
|
|
* to enforce the recommendation of rfc 3530 14.2.19 that the server
|
|
|
|
|
* return an error if the client attempt to downgrade to a combination
|
|
|
|
|
* of share bits not explicable by closing some of its previous opens.
|
|
|
|
|
*
|
|
|
|
|
* XXX: This enforcement is actually incomplete, since we don't keep
|
|
|
|
|
* track of access/deny bit combinations; so, e.g., we allow:
|
|
|
|
|
*
|
|
|
|
|
* OPEN allow read, deny write
|
|
|
|
|
* OPEN allow both, deny none
|
|
|
|
|
* DOWNGRADE allow read, deny none
|
|
|
|
|
*
|
|
|
|
|
* which we should reject.
|
|
|
|
|
*/
|
|
|
|
|
static unsigned int
|
|
|
|
|
bmap_to_share_mode(unsigned long bmap) {
|
|
|
|
|
int i;
|
|
|
|
|
unsigned int access = 0;
|
|
|
|
|
|
|
|
|
|
for (i = 1; i < 4; i++) {
|
|
|
|
|
if (test_bit(i, &bmap))
|
|
|
|
|
access |= i;
|
|
|
|
|
}
|
|
|
|
|
return access;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* set share access for a given stateid */
|
|
|
|
|
static inline void
|
|
|
|
|
set_access(u32 access, struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
unsigned char mask = 1 << access;
|
|
|
|
|
|
|
|
|
|
WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
|
|
|
|
|
stp->st_access_bmap |= mask;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* clear share access for a given stateid */
|
|
|
|
|
static inline void
|
|
|
|
|
clear_access(u32 access, struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
unsigned char mask = 1 << access;
|
|
|
|
|
|
|
|
|
|
WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
|
|
|
|
|
stp->st_access_bmap &= ~mask;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* test whether a given stateid has access */
|
|
|
|
|
static inline bool
|
|
|
|
|
test_access(u32 access, struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
unsigned char mask = 1 << access;
|
|
|
|
|
|
|
|
|
|
return (bool)(stp->st_access_bmap & mask);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* set share deny for a given stateid */
|
|
|
|
|
static inline void
|
|
|
|
|
set_deny(u32 deny, struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
unsigned char mask = 1 << deny;
|
|
|
|
|
|
|
|
|
|
WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
|
|
|
|
|
stp->st_deny_bmap |= mask;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* clear share deny for a given stateid */
|
|
|
|
|
static inline void
|
|
|
|
|
clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
unsigned char mask = 1 << deny;
|
|
|
|
|
|
|
|
|
|
WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
|
|
|
|
|
stp->st_deny_bmap &= ~mask;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* test whether a given stateid is denying specific access */
|
|
|
|
|
static inline bool
|
|
|
|
|
test_deny(u32 deny, struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
unsigned char mask = 1 << deny;
|
|
|
|
|
|
|
|
|
|
return (bool)(stp->st_deny_bmap & mask);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int nfs4_access_to_omode(u32 access)
|
|
|
|
|
{
|
|
|
|
|
switch (access & NFS4_SHARE_ACCESS_BOTH) {
|
|
|
|
|
case NFS4_SHARE_ACCESS_READ:
|
|
|
|
|
return O_RDONLY;
|
|
|
|
|
case NFS4_SHARE_ACCESS_WRITE:
|
|
|
|
|
return O_WRONLY;
|
|
|
|
|
case NFS4_SHARE_ACCESS_BOTH:
|
|
|
|
|
return O_RDWR;
|
|
|
|
|
}
|
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
|
return O_RDONLY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* A stateid that had a deny mode associated with it is being released
|
|
|
|
|
* or downgraded. Recalculate the deny mode on the file.
|
|
|
|
@ -3125,6 +3139,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|
|
|
|
goto out_nolock;
|
|
|
|
|
}
|
|
|
|
|
new->cl_mach_cred = true;
|
|
|
|
|
break;
|
|
|
|
|
case SP4_NONE:
|
|
|
|
|
break;
|
|
|
|
|
default: /* checked by xdr code */
|
|
|
|
@ -4072,7 +4087,7 @@ static struct nfs4_file *nfsd4_alloc_file(void)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* OPEN Share state helper functions */
|
|
|
|
|
static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
|
|
|
|
|
static void nfsd4_init_file(struct svc_fh *fh, unsigned int hashval,
|
|
|
|
|
struct nfs4_file *fp)
|
|
|
|
|
{
|
|
|
|
|
lockdep_assert_held(&state_lock);
|
|
|
|
@ -4082,12 +4097,14 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
|
|
|
|
|
INIT_LIST_HEAD(&fp->fi_stateids);
|
|
|
|
|
INIT_LIST_HEAD(&fp->fi_delegations);
|
|
|
|
|
INIT_LIST_HEAD(&fp->fi_clnt_odstate);
|
|
|
|
|
fh_copy_shallow(&fp->fi_fhandle, fh);
|
|
|
|
|
fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
|
|
|
|
|
fp->fi_deleg_file = NULL;
|
|
|
|
|
fp->fi_had_conflict = false;
|
|
|
|
|
fp->fi_share_deny = 0;
|
|
|
|
|
memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
|
|
|
|
|
memset(fp->fi_access, 0, sizeof(fp->fi_access));
|
|
|
|
|
fp->fi_aliased = false;
|
|
|
|
|
fp->fi_inode = d_inode(fh->fh_dentry);
|
|
|
|
|
#ifdef CONFIG_NFSD_PNFS
|
|
|
|
|
INIT_LIST_HEAD(&fp->fi_lo_states);
|
|
|
|
|
atomic_set(&fp->fi_lo_recalls, 0);
|
|
|
|
@ -4426,13 +4443,13 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
|
|
|
|
|
|
|
|
|
|
/* search file_hashtbl[] for file */
|
|
|
|
|
static struct nfs4_file *
|
|
|
|
|
find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
|
|
|
|
|
find_file_locked(struct svc_fh *fh, unsigned int hashval)
|
|
|
|
|
{
|
|
|
|
|
struct nfs4_file *fp;
|
|
|
|
|
|
|
|
|
|
hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
|
|
|
|
|
lockdep_is_held(&state_lock)) {
|
|
|
|
|
if (fh_match(&fp->fi_fhandle, fh)) {
|
|
|
|
|
if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
|
|
|
|
|
if (refcount_inc_not_zero(&fp->fi_ref))
|
|
|
|
|
return fp;
|
|
|
|
|
}
|
|
|
|
@ -4440,8 +4457,32 @@ find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct nfs4_file *
|
|
|
|
|
find_file(struct knfsd_fh *fh)
|
|
|
|
|
static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh,
|
|
|
|
|
unsigned int hashval)
|
|
|
|
|
{
|
|
|
|
|
struct nfs4_file *fp;
|
|
|
|
|
struct nfs4_file *ret = NULL;
|
|
|
|
|
bool alias_found = false;
|
|
|
|
|
|
|
|
|
|
spin_lock(&state_lock);
|
|
|
|
|
hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
|
|
|
|
|
lockdep_is_held(&state_lock)) {
|
|
|
|
|
if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
|
|
|
|
|
if (refcount_inc_not_zero(&fp->fi_ref))
|
|
|
|
|
ret = fp;
|
|
|
|
|
} else if (d_inode(fh->fh_dentry) == fp->fi_inode)
|
|
|
|
|
fp->fi_aliased = alias_found = true;
|
|
|
|
|
}
|
|
|
|
|
if (likely(ret == NULL)) {
|
|
|
|
|
nfsd4_init_file(fh, hashval, new);
|
|
|
|
|
new->fi_aliased = alias_found;
|
|
|
|
|
ret = new;
|
|
|
|
|
}
|
|
|
|
|
spin_unlock(&state_lock);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct nfs4_file * find_file(struct svc_fh *fh)
|
|
|
|
|
{
|
|
|
|
|
struct nfs4_file *fp;
|
|
|
|
|
unsigned int hashval = file_hashval(fh);
|
|
|
|
@ -4453,7 +4494,7 @@ find_file(struct knfsd_fh *fh)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct nfs4_file *
|
|
|
|
|
find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
|
|
|
|
|
find_or_add_file(struct nfs4_file *new, struct svc_fh *fh)
|
|
|
|
|
{
|
|
|
|
|
struct nfs4_file *fp;
|
|
|
|
|
unsigned int hashval = file_hashval(fh);
|
|
|
|
@ -4464,15 +4505,7 @@ find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
|
|
|
|
|
if (fp)
|
|
|
|
|
return fp;
|
|
|
|
|
|
|
|
|
|
spin_lock(&state_lock);
|
|
|
|
|
fp = find_file_locked(fh, hashval);
|
|
|
|
|
if (likely(fp == NULL)) {
|
|
|
|
|
nfsd4_init_file(fh, hashval, new);
|
|
|
|
|
fp = new;
|
|
|
|
|
}
|
|
|
|
|
spin_unlock(&state_lock);
|
|
|
|
|
|
|
|
|
|
return fp;
|
|
|
|
|
return insert_file(new, fh, hashval);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@ -4485,7 +4518,7 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
|
|
|
|
|
struct nfs4_file *fp;
|
|
|
|
|
__be32 ret = nfs_ok;
|
|
|
|
|
|
|
|
|
|
fp = find_file(¤t_fh->fh_handle);
|
|
|
|
|
fp = find_file(current_fh);
|
|
|
|
|
if (!fp)
|
|
|
|
|
return ret;
|
|
|
|
|
/* Check for conflicting share reservations */
|
|
|
|
@ -4880,6 +4913,11 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
|
|
|
|
|
if (nf)
|
|
|
|
|
nfsd_file_put(nf);
|
|
|
|
|
|
|
|
|
|
status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
|
|
|
|
|
access));
|
|
|
|
|
if (status)
|
|
|
|
|
goto out_put_access;
|
|
|
|
|
|
|
|
|
|
status = nfsd4_truncate(rqstp, cur_fh, open);
|
|
|
|
|
if (status)
|
|
|
|
|
goto out_put_access;
|
|
|
|
@ -4951,6 +4989,65 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
|
|
|
|
|
return fl;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
|
|
|
|
|
struct nfs4_file *fp)
|
|
|
|
|
{
|
|
|
|
|
struct nfs4_ol_stateid *st;
|
|
|
|
|
struct file *f = fp->fi_deleg_file->nf_file;
|
|
|
|
|
struct inode *ino = locks_inode(f);
|
|
|
|
|
int writes;
|
|
|
|
|
|
|
|
|
|
writes = atomic_read(&ino->i_writecount);
|
|
|
|
|
if (!writes)
|
|
|
|
|
return 0;
|
|
|
|
|
/*
|
|
|
|
|
* There could be multiple filehandles (hence multiple
|
|
|
|
|
* nfs4_files) referencing this file, but that's not too
|
|
|
|
|
* common; let's just give up in that case rather than
|
|
|
|
|
* trying to go look up all the clients using that other
|
|
|
|
|
* nfs4_file as well:
|
|
|
|
|
*/
|
|
|
|
|
if (fp->fi_aliased)
|
|
|
|
|
return -EAGAIN;
|
|
|
|
|
/*
|
|
|
|
|
* If there's a close in progress, make sure that we see it
|
|
|
|
|
* clear any fi_fds[] entries before we see it decrement
|
|
|
|
|
* i_writecount:
|
|
|
|
|
*/
|
|
|
|
|
smp_mb__after_atomic();
|
|
|
|
|
|
|
|
|
|
if (fp->fi_fds[O_WRONLY])
|
|
|
|
|
writes--;
|
|
|
|
|
if (fp->fi_fds[O_RDWR])
|
|
|
|
|
writes--;
|
|
|
|
|
if (writes > 0)
|
|
|
|
|
return -EAGAIN; /* There may be non-NFSv4 writers */
|
|
|
|
|
/*
|
|
|
|
|
* It's possible there are non-NFSv4 write opens in progress,
|
|
|
|
|
* but if they haven't incremented i_writecount yet then they
|
|
|
|
|
* also haven't called break lease yet; so, they'll break this
|
|
|
|
|
* lease soon enough. So, all that's left to check for is NFSv4
|
|
|
|
|
* opens:
|
|
|
|
|
*/
|
|
|
|
|
spin_lock(&fp->fi_lock);
|
|
|
|
|
list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
|
|
|
|
|
if (st->st_openstp == NULL /* it's an open */ &&
|
|
|
|
|
access_permit_write(st) &&
|
|
|
|
|
st->st_stid.sc_client != clp) {
|
|
|
|
|
spin_unlock(&fp->fi_lock);
|
|
|
|
|
return -EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
spin_unlock(&fp->fi_lock);
|
|
|
|
|
/*
|
|
|
|
|
* There's a small chance that we could be racing with another
|
|
|
|
|
* NFSv4 open. However, any open that hasn't added itself to
|
|
|
|
|
* the fi_stateids list also hasn't called break_lease yet; so,
|
|
|
|
|
* they'll break this lease soon enough.
|
|
|
|
|
*/
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct nfs4_delegation *
|
|
|
|
|
nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
|
|
|
|
|
struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
|
|
|
|
@ -4970,9 +5067,12 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
|
|
|
|
|
|
|
|
|
|
nf = find_readable_file(fp);
|
|
|
|
|
if (!nf) {
|
|
|
|
|
/* We should always have a readable file here */
|
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
|
return ERR_PTR(-EBADF);
|
|
|
|
|
/*
|
|
|
|
|
* We probably could attempt another open and get a read
|
|
|
|
|
* delegation, but for now, don't bother until the
|
|
|
|
|
* client actually sends us one.
|
|
|
|
|
*/
|
|
|
|
|
return ERR_PTR(-EAGAIN);
|
|
|
|
|
}
|
|
|
|
|
spin_lock(&state_lock);
|
|
|
|
|
spin_lock(&fp->fi_lock);
|
|
|
|
@ -5007,6 +5107,9 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
|
|
|
|
|
locks_free_lock(fl);
|
|
|
|
|
if (status)
|
|
|
|
|
goto out_clnt_odstate;
|
|
|
|
|
status = nfsd4_check_conflicting_opens(clp, fp);
|
|
|
|
|
if (status)
|
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
|
|
spin_lock(&state_lock);
|
|
|
|
|
spin_lock(&fp->fi_lock);
|
|
|
|
@ -5088,17 +5191,6 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
|
|
|
|
|
goto out_no_deleg;
|
|
|
|
|
if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
|
|
|
|
|
goto out_no_deleg;
|
|
|
|
|
/*
|
|
|
|
|
* Also, if the file was opened for write or
|
|
|
|
|
* create, there's a good chance the client's
|
|
|
|
|
* about to write to it, resulting in an
|
|
|
|
|
* immediate recall (since we don't support
|
|
|
|
|
* write delegations):
|
|
|
|
|
*/
|
|
|
|
|
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
|
|
|
|
|
goto out_no_deleg;
|
|
|
|
|
if (open->op_create == NFS4_OPEN_CREATE)
|
|
|
|
|
goto out_no_deleg;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
goto out_no_deleg;
|
|
|
|
@ -5161,7 +5253,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
|
|
|
|
|
* and check for delegations in the process of being recalled.
|
|
|
|
|
* If not found, create the nfs4_file struct
|
|
|
|
|
*/
|
|
|
|
|
fp = find_or_add_file(open->op_file, ¤t_fh->fh_handle);
|
|
|
|
|
fp = find_or_add_file(open->op_file, current_fh);
|
|
|
|
|
if (fp != open->op_file) {
|
|
|
|
|
status = nfs4_check_deleg(cl, open, &dp);
|
|
|
|
|
if (status)
|
|
|
|
@ -5502,21 +5594,6 @@ static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
|
|
|
|
|
return nfs_ok;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
|
access_permit_read(struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
|
|
|
|
|
test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
|
|
|
|
|
test_access(NFS4_SHARE_ACCESS_WRITE, stp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
|
access_permit_write(struct nfs4_ol_stateid *stp)
|
|
|
|
|
{
|
|
|
|
|
return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
|
|
|
|
|
test_access(NFS4_SHARE_ACCESS_BOTH, stp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static
|
|
|
|
|
__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
|
|
|
|
|
{
|
|
|
|
@ -6288,15 +6365,6 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline u64
|
|
|
|
|
end_offset(u64 start, u64 len)
|
|
|
|
|
{
|
|
|
|
|
u64 end;
|
|
|
|
|
|
|
|
|
|
end = start + len;
|
|
|
|
|
return end >= start ? end: NFS4_MAX_UINT64;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* last octet in a range */
|
|
|
|
|
static inline u64
|
|
|
|
|
last_byte_offset(u64 start, u64 len)
|
|
|
|
@ -6865,11 +6933,20 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|
|
|
|
static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
|
|
|
|
|
{
|
|
|
|
|
struct nfsd_file *nf;
|
|
|
|
|
__be32 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
|
|
|
|
|
if (!err) {
|
|
|
|
|
err = nfserrno(vfs_test_lock(nf->nf_file, lock));
|
|
|
|
|
nfsd_file_put(nf);
|
|
|
|
|
}
|
|
|
|
|
__be32 err;
|
|
|
|
|
|
|
|
|
|
err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
fh_lock(fhp); /* to block new leases till after test_lock: */
|
|
|
|
|
err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
|
|
|
|
|
NFSD_MAY_READ));
|
|
|
|
|
if (err)
|
|
|
|
|
goto out;
|
|
|
|
|
err = nfserrno(vfs_test_lock(nf->nf_file, lock));
|
|
|
|
|
out:
|
|
|
|
|
fh_unlock(fhp);
|
|
|
|
|
nfsd_file_put(nf);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|