提交 9092131f 编写于 作者: L Linus Torvalds
......@@ -1268,6 +1268,7 @@ config NFS_FS
depends on INET
select LOCKD
select SUNRPC
select NFS_ACL_SUPPORT if NFS_V3_ACL
help
If you are connected to some other (usually local) Unix computer
(using SLIP, PLIP, PPP or Ethernet) and want to mount files residing
......@@ -1310,6 +1311,16 @@ config NFS_V3
If unsure, say Y.
config NFS_V3_ACL
bool "Provide client support for the NFSv3 ACL protocol extension"
depends on NFS_V3
help
Implement the NFSv3 ACL protocol extension for manipulating POSIX
Access Control Lists. The server should also be compiled with
the NFSv3 ACL protocol extension; see the CONFIG_NFSD_V3_ACL option.
If unsure, say N.
config NFS_V4
bool "Provide NFSv4 client support (EXPERIMENTAL)"
depends on NFS_FS && EXPERIMENTAL
......@@ -1353,6 +1364,7 @@ config NFSD
select LOCKD
select SUNRPC
select EXPORTFS
select NFS_ACL_SUPPORT if NFSD_V3_ACL || NFSD_V2_ACL
help
If you want your Linux box to act as an NFS *server*, so that other
computers on your local network which support NFS can access certain
......@@ -1376,6 +1388,10 @@ config NFSD
To compile the NFS server support as a module, choose M here: the
module will be called nfsd. If unsure, say N.
config NFSD_V2_ACL
bool
depends on NFSD
config NFSD_V3
bool "Provide NFSv3 server support"
depends on NFSD
......@@ -1383,6 +1399,16 @@ config NFSD_V3
If you would like to include the NFSv3 server as well as the NFSv2
server, say Y here. If unsure, say Y.
config NFSD_V3_ACL
bool "Provide server support for the NFSv3 ACL protocol extension"
depends on NFSD_V3
select NFSD_V2_ACL
help
Implement the NFSv3 ACL protocol extension for manipulating POSIX
Access Control Lists on exported file systems. NFS clients should
be compiled with the NFSv3 ACL protocol extension; see the
CONFIG_NFS_V3_ACL option. If unsure, say N.
config NFSD_V4
bool "Provide NFSv4 server support (EXPERIMENTAL)"
depends on NFSD_V3 && EXPERIMENTAL
......@@ -1427,6 +1453,15 @@ config LOCKD_V4
config EXPORTFS
tristate
config NFS_ACL_SUPPORT
tristate
select FS_POSIX_ACL
config NFS_COMMON
bool
depends on NFSD || NFS_FS
default y
config SUNRPC
tristate
......
......@@ -31,6 +31,7 @@ obj-$(CONFIG_BINFMT_FLAT) += binfmt_flat.o
obj-$(CONFIG_FS_MBCACHE) += mbcache.o
obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o xattr_acl.o
obj-$(CONFIG_NFS_COMMON) += nfs_common/
obj-$(CONFIG_QUOTA) += dquot.o
obj-$(CONFIG_QFMT_V1) += quota_v1.o
......
......@@ -31,7 +31,7 @@ static int reclaimer(void *ptr);
* This is the representation of a blocked client lock.
*/
struct nlm_wait {
struct nlm_wait * b_next; /* linked list */
struct list_head b_list; /* linked list */
wait_queue_head_t b_wait; /* where to wait on */
struct nlm_host * b_host;
struct file_lock * b_lock; /* local file lock */
......@@ -39,27 +39,54 @@ struct nlm_wait {
u32 b_status; /* grant callback status */
};
static struct nlm_wait * nlm_blocked;
static LIST_HEAD(nlm_blocked);
/*
* Block on a lock
* Queue up a lock for blocking so that the GRANTED request can see it
*/
int
nlmclnt_block(struct nlm_host *host, struct file_lock *fl, u32 *statp)
int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl)
{
struct nlm_wait *block;
BUG_ON(req->a_block != NULL);
block = kmalloc(sizeof(*block), GFP_KERNEL);
if (block == NULL)
return -ENOMEM;
block->b_host = host;
block->b_lock = fl;
init_waitqueue_head(&block->b_wait);
block->b_status = NLM_LCK_BLOCKED;
list_add(&block->b_list, &nlm_blocked);
req->a_block = block;
return 0;
}
void nlmclnt_finish_block(struct nlm_rqst *req)
{
struct nlm_wait block, **head;
int err;
u32 pstate;
struct nlm_wait *block = req->a_block;
block.b_host = host;
block.b_lock = fl;
init_waitqueue_head(&block.b_wait);
block.b_status = NLM_LCK_BLOCKED;
block.b_next = nlm_blocked;
nlm_blocked = █
if (block == NULL)
return;
req->a_block = NULL;
list_del(&block->b_list);
kfree(block);
}
/*
* Block on a lock
*/
long nlmclnt_block(struct nlm_rqst *req, long timeout)
{
struct nlm_wait *block = req->a_block;
long ret;
/* Remember pseudo nsm state */
pstate = host->h_state;
/* A borken server might ask us to block even if we didn't
* request it. Just say no!
*/
if (!req->a_args.block)
return -EAGAIN;
/* Go to sleep waiting for GRANT callback. Some servers seem
* to lose callbacks, however, so we're going to poll from
......@@ -69,28 +96,16 @@ nlmclnt_block(struct nlm_host *host, struct file_lock *fl, u32 *statp)
* a 1 minute timeout would do. See the comment before
* nlmclnt_lock for an explanation.
*/
sleep_on_timeout(&block.b_wait, 30*HZ);
for (head = &nlm_blocked; *head; head = &(*head)->b_next) {
if (*head == &block) {
*head = block.b_next;
break;
}
}
ret = wait_event_interruptible_timeout(block->b_wait,
block->b_status != NLM_LCK_BLOCKED,
timeout);
if (!signalled()) {
*statp = block.b_status;
return 0;
if (block->b_status != NLM_LCK_BLOCKED) {
req->a_res.status = block->b_status;
block->b_status = NLM_LCK_BLOCKED;
}
/* Okay, we were interrupted. Cancel the pending request
* unless the server has rebooted.
*/
if (pstate == host->h_state && (err = nlmclnt_cancel(host, fl)) < 0)
printk(KERN_NOTICE
"lockd: CANCEL call failed (errno %d)\n", -err);
return -ERESTARTSYS;
return ret;
}
/*
......@@ -100,27 +115,23 @@ u32
nlmclnt_grant(struct nlm_lock *lock)
{
struct nlm_wait *block;
u32 res = nlm_lck_denied;
/*
* Look up blocked request based on arguments.
* Warning: must not use cookie to match it!
*/
for (block = nlm_blocked; block; block = block->b_next) {
if (nlm_compare_locks(block->b_lock, &lock->fl))
break;
list_for_each_entry(block, &nlm_blocked, b_list) {
if (nlm_compare_locks(block->b_lock, &lock->fl)) {
/* Alright, we found a lock. Set the return status
* and wake up the caller
*/
block->b_status = NLM_LCK_GRANTED;
wake_up(&block->b_wait);
res = nlm_granted;
}
}
/* Ooops, no blocked request found. */
if (block == NULL)
return nlm_lck_denied;
/* Alright, we found the lock. Set the return status and
* wake up the caller.
*/
block->b_status = NLM_LCK_GRANTED;
wake_up(&block->b_wait);
return nlm_granted;
return res;
}
/*
......@@ -230,7 +241,7 @@ reclaimer(void *ptr)
host->h_reclaiming = 0;
/* Now, wake up all processes that sleep on a blocked lock */
for (block = nlm_blocked; block; block = block->b_next) {
list_for_each_entry(block, &nlm_blocked, b_list) {
if (block->b_host == host) {
block->b_status = NLM_LCK_DENIED_GRACE_PERIOD;
wake_up(&block->b_wait);
......
......@@ -21,6 +21,7 @@
#define NLMDBG_FACILITY NLMDBG_CLIENT
#define NLMCLNT_GRACE_WAIT (5*HZ)
#define NLMCLNT_POLL_TIMEOUT (30*HZ)
static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
......@@ -553,7 +554,8 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
{
struct nlm_host *host = req->a_host;
struct nlm_res *resp = &req->a_res;
int status;
long timeout;
int status;
if (!host->h_monitored && nsm_monitor(host) < 0) {
printk(KERN_NOTICE "lockd: failed to monitor %s\n",
......@@ -562,15 +564,32 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
goto out;
}
do {
if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0) {
if (resp->status != NLM_LCK_BLOCKED)
break;
status = nlmclnt_block(host, fl, &resp->status);
}
if (req->a_args.block) {
status = nlmclnt_prepare_block(req, host, fl);
if (status < 0)
goto out;
} while (resp->status == NLM_LCK_BLOCKED && req->a_args.block);
}
for(;;) {
status = nlmclnt_call(req, NLMPROC_LOCK);
if (status < 0)
goto out_unblock;
if (resp->status != NLM_LCK_BLOCKED)
break;
/* Wait on an NLM blocking lock */
timeout = nlmclnt_block(req, NLMCLNT_POLL_TIMEOUT);
/* Did a reclaimer thread notify us of a server reboot? */
if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD)
continue;
if (resp->status != NLM_LCK_BLOCKED)
break;
if (timeout >= 0)
continue;
/* We were interrupted. Send a CANCEL request to the server
* and exit
*/
status = (int)timeout;
goto out_unblock;
}
if (resp->status == NLM_LCK_GRANTED) {
fl->fl_u.nfs_fl.state = host->h_state;
......@@ -579,6 +598,11 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
do_vfs_lock(fl);
}
status = nlm_stat_to_errno(resp->status);
out_unblock:
nlmclnt_finish_block(req);
/* Cancel the blocked request if it is still pending */
if (resp->status == NLM_LCK_BLOCKED)
nlmclnt_cancel(host, fl);
out:
nlmclnt_release_lockargs(req);
return status;
......
......@@ -189,17 +189,15 @@ nlm_bind_host(struct nlm_host *host)
goto forgetit;
xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout);
xprt->nocong = 1; /* No congestion control for NLM */
xprt->resvport = 1; /* NLM requires a reserved port */
/* Existing NLM servers accept AUTH_UNIX only */
clnt = rpc_create_client(xprt, host->h_name, &nlm_program,
host->h_version, RPC_AUTH_UNIX);
if (IS_ERR(clnt)) {
xprt_destroy(xprt);
if (IS_ERR(clnt))
goto forgetit;
}
clnt->cl_autobind = 1; /* turn on pmap queries */
xprt->nocong = 1; /* No congestion control for NLM */
xprt->resvport = 1; /* NLM requires a reserved port */
host->h_rpcclnt = clnt;
}
......
......@@ -115,20 +115,19 @@ nsm_create(void)
xprt = xprt_create_proto(IPPROTO_UDP, &sin, NULL);
if (IS_ERR(xprt))
return (struct rpc_clnt *)xprt;
xprt->resvport = 1; /* NSM requires a reserved port */
clnt = rpc_create_client(xprt, "localhost",
&nsm_program, SM_VERSION,
RPC_AUTH_NULL);
if (IS_ERR(clnt))
goto out_destroy;
goto out_err;
clnt->cl_softrtry = 1;
clnt->cl_chatty = 1;
clnt->cl_oneshot = 1;
xprt->resvport = 1; /* NSM requires a reserved port */
return clnt;
out_destroy:
xprt_destroy(xprt);
out_err:
return clnt;
}
......
......@@ -1548,6 +1548,8 @@ int fcntl_getlk(struct file *filp, struct flock __user *l)
if (filp->f_op && filp->f_op->lock) {
error = filp->f_op->lock(filp, F_GETLK, &file_lock);
if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private)
file_lock.fl_ops->fl_release_private(&file_lock);
if (error < 0)
goto out;
else
......@@ -1690,6 +1692,8 @@ int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
if (filp->f_op && filp->f_op->lock) {
error = filp->f_op->lock(filp, F_GETLK, &file_lock);
if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private)
file_lock.fl_ops->fl_release_private(&file_lock);
if (error < 0)
goto out;
else
......@@ -1873,6 +1877,8 @@ void locks_remove_flock(struct file *filp)
.fl_end = OFFSET_MAX,
};
filp->f_op->flock(filp, F_SETLKW, &fl);
if (fl.fl_ops && fl.fl_ops->fl_release_private)
fl.fl_ops->fl_release_private(&fl);
}
lock_kernel();
......
......@@ -8,6 +8,7 @@ nfs-y := dir.o file.o inode.o nfs2xdr.o pagelist.o \
proc.o read.o symlink.o unlink.o write.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o mount_clnt.o
nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
delegation.o idmap.o \
callback.o callback_xdr.o callback_proc.o
......
......@@ -14,6 +14,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/nfs_fs.h>
#include "nfs4_fs.h"
#include "callback.h"
#define NFSDBG_FACILITY NFSDBG_CALLBACK
......
......@@ -8,6 +8,7 @@
#include <linux/config.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include "nfs4_fs.h"
#include "callback.h"
#include "delegation.h"
......
......@@ -10,6 +10,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include "nfs4_fs.h"
#include "callback.h"
#define CB_OP_TAGLEN_MAXSZ (512)
......@@ -410,7 +411,6 @@ static int nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *resp
xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
p = (uint32_t*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
rqstp->rq_res.head[0].iov_len = PAGE_SIZE;
xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
decode_compound_hdr_arg(&xdr_in, &hdr_arg);
......
......@@ -16,6 +16,7 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_xdr.h>
#include "nfs4_fs.h"
#include "delegation.h"
static struct nfs_delegation *nfs_alloc_delegation(void)
......
......@@ -32,6 +32,7 @@
#include <linux/smp_lock.h>
#include <linux/namei.h>
#include "nfs4_fs.h"
#include "delegation.h"
#define NFS_PARANOIA 1
......@@ -50,8 +51,10 @@ static int nfs_mknod(struct inode *, struct dentry *, int, dev_t);
static int nfs_rename(struct inode *, struct dentry *,
struct inode *, struct dentry *);
static int nfs_fsync_dir(struct file *, struct dentry *, int);
static loff_t nfs_llseek_dir(struct file *, loff_t, int);
struct file_operations nfs_dir_operations = {
.llseek = nfs_llseek_dir,
.read = generic_read_dir,
.readdir = nfs_readdir,
.open = nfs_opendir,
......@@ -74,6 +77,27 @@ struct inode_operations nfs_dir_inode_operations = {
.setattr = nfs_setattr,
};
#ifdef CONFIG_NFS_V3
struct inode_operations nfs3_dir_inode_operations = {
.create = nfs_create,
.lookup = nfs_lookup,
.link = nfs_link,
.unlink = nfs_unlink,
.symlink = nfs_symlink,
.mkdir = nfs_mkdir,
.rmdir = nfs_rmdir,
.mknod = nfs_mknod,
.rename = nfs_rename,
.permission = nfs_permission,
.getattr = nfs_getattr,
.setattr = nfs_setattr,
.listxattr = nfs3_listxattr,
.getxattr = nfs3_getxattr,
.setxattr = nfs3_setxattr,
.removexattr = nfs3_removexattr,
};
#endif /* CONFIG_NFS_V3 */
#ifdef CONFIG_NFS_V4
static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *);
......@@ -90,6 +114,9 @@ struct inode_operations nfs4_dir_inode_operations = {
.permission = nfs_permission,
.getattr = nfs_getattr,
.setattr = nfs_setattr,
.getxattr = nfs4_getxattr,
.setxattr = nfs4_setxattr,
.listxattr = nfs4_listxattr,
};
#endif /* CONFIG_NFS_V4 */
......@@ -116,7 +143,8 @@ typedef struct {
struct page *page;
unsigned long page_index;
u32 *ptr;
u64 target;
u64 *dir_cookie;
loff_t current_index;
struct nfs_entry *entry;
decode_dirent_t decode;
int plus;
......@@ -164,12 +192,10 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME;
/* Ensure consistent page alignment of the data.
* Note: assumes we have exclusive access to this mapping either
* throught inode->i_sem or some other mechanism.
* through inode->i_sem or some other mechanism.
*/
if (page->index == 0) {
invalidate_inode_pages(inode->i_mapping);
NFS_I(inode)->readdir_timestamp = timestamp;
}
if (page->index == 0)
invalidate_inode_pages2_range(inode->i_mapping, PAGE_CACHE_SIZE, -1);
unlock_page(page);
return 0;
error:
......@@ -202,22 +228,22 @@ void dir_page_release(nfs_readdir_descriptor_t *desc)
/*
* Given a pointer to a buffer that has already been filled by a call
* to readdir, find the next entry.
* to readdir, find the next entry with cookie '*desc->dir_cookie'.
*
* If the end of the buffer has been reached, return -EAGAIN, if not,
* return the offset within the buffer of the next entry to be
* read.
*/
static inline
int find_dirent(nfs_readdir_descriptor_t *desc, struct page *page)
int find_dirent(nfs_readdir_descriptor_t *desc)
{
struct nfs_entry *entry = desc->entry;
int loop_count = 0,
status;
while((status = dir_decode(desc)) == 0) {
dfprintk(VFS, "NFS: found cookie %Lu\n", (long long)entry->cookie);
if (entry->prev_cookie == desc->target)
dfprintk(VFS, "NFS: found cookie %Lu\n", (unsigned long long)entry->cookie);
if (entry->prev_cookie == *desc->dir_cookie)
break;
if (loop_count++ > 200) {
loop_count = 0;
......@@ -229,8 +255,44 @@ int find_dirent(nfs_readdir_descriptor_t *desc, struct page *page)
}
/*
* Find the given page, and call find_dirent() in order to try to
* return the next entry.
* Given a pointer to a buffer that has already been filled by a call
* to readdir, find the entry at offset 'desc->file->f_pos'.
*
* If the end of the buffer has been reached, return -EAGAIN, if not,
* return the offset within the buffer of the next entry to be
* read.
*/
static inline
int find_dirent_index(nfs_readdir_descriptor_t *desc)
{
struct nfs_entry *entry = desc->entry;
int loop_count = 0,
status;
for(;;) {
status = dir_decode(desc);
if (status)
break;
dfprintk(VFS, "NFS: found cookie %Lu at index %Ld\n", (unsigned long long)entry->cookie, desc->current_index);
if (desc->file->f_pos == desc->current_index) {
*desc->dir_cookie = entry->cookie;
break;
}
desc->current_index++;
if (loop_count++ > 200) {
loop_count = 0;
schedule();
}
}
dfprintk(VFS, "NFS: find_dirent_index() returns %d\n", status);
return status;
}
/*
* Find the given page, and call find_dirent() or find_dirent_index in
* order to try to return the next entry.
*/
static inline
int find_dirent_page(nfs_readdir_descriptor_t *desc)
......@@ -253,7 +315,10 @@ int find_dirent_page(nfs_readdir_descriptor_t *desc)
/* NOTE: Someone else may have changed the READDIRPLUS flag */
desc->page = page;
desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */
status = find_dirent(desc, page);
if (*desc->dir_cookie != 0)
status = find_dirent(desc);
else
status = find_dirent_index(desc);
if (status < 0)
dir_page_release(desc);
out:
......@@ -268,7 +333,8 @@ int find_dirent_page(nfs_readdir_descriptor_t *desc)
* Recurse through the page cache pages, and return a
* filled nfs_entry structure of the next directory entry if possible.
*
* The target for the search is 'desc->target'.
* The target for the search is '*desc->dir_cookie' if non-0,
* 'desc->file->f_pos' otherwise
*/
static inline
int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
......@@ -276,7 +342,16 @@ int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
int loop_count = 0;
int res;
dfprintk(VFS, "NFS: readdir_search_pagecache() searching for cookie %Lu\n", (long long)desc->target);
/* Always search-by-index from the beginning of the cache */
if (*desc->dir_cookie == 0) {
dfprintk(VFS, "NFS: readdir_search_pagecache() searching for offset %Ld\n", (long long)desc->file->f_pos);
desc->page_index = 0;
desc->entry->cookie = desc->entry->prev_cookie = 0;
desc->entry->eof = 0;
desc->current_index = 0;
} else
dfprintk(VFS, "NFS: readdir_search_pagecache() searching for cookie %Lu\n", (unsigned long long)*desc->dir_cookie);
for (;;) {
res = find_dirent_page(desc);
if (res != -EAGAIN)
......@@ -313,7 +388,7 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
int loop_count = 0,
res;
dfprintk(VFS, "NFS: nfs_do_filldir() filling starting @ cookie %Lu\n", (long long)desc->target);
dfprintk(VFS, "NFS: nfs_do_filldir() filling starting @ cookie %Lu\n", (long long)entry->cookie);
for(;;) {
unsigned d_type = DT_UNKNOWN;
......@@ -333,10 +408,11 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
}
res = filldir(dirent, entry->name, entry->len,
entry->prev_cookie, fileid, d_type);
file->f_pos, fileid, d_type);
if (res < 0)
break;
file->f_pos = desc->target = entry->cookie;
file->f_pos++;
*desc->dir_cookie = entry->cookie;
if (dir_decode(desc) != 0) {
desc->page_index ++;
break;
......@@ -349,7 +425,7 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
dir_page_release(desc);
if (dentry != NULL)
dput(dentry);
dfprintk(VFS, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n", (long long)desc->target, res);
dfprintk(VFS, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n", (unsigned long long)*desc->dir_cookie, res);
return res;
}
......@@ -375,14 +451,14 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
struct page *page = NULL;
int status;
dfprintk(VFS, "NFS: uncached_readdir() searching for cookie %Lu\n", (long long)desc->target);
dfprintk(VFS, "NFS: uncached_readdir() searching for cookie %Lu\n", (unsigned long long)*desc->dir_cookie);
page = alloc_page(GFP_HIGHUSER);
if (!page) {
status = -ENOMEM;
goto out;
}
desc->error = NFS_PROTO(inode)->readdir(file->f_dentry, cred, desc->target,
desc->error = NFS_PROTO(inode)->readdir(file->f_dentry, cred, *desc->dir_cookie,
page,
NFS_SERVER(inode)->dtsize,
desc->plus);
......@@ -391,7 +467,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */
if (desc->error >= 0) {
if ((status = dir_decode(desc)) == 0)
desc->entry->prev_cookie = desc->target;
desc->entry->prev_cookie = *desc->dir_cookie;
} else
status = -EIO;
if (status < 0)
......@@ -412,8 +488,9 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
goto out;
}
/* The file offset position is now represented as a true offset into the
* page cache as is the case in most of the other filesystems.
/* The file offset position represents the dirent entry number. A
last cookie cache takes care of the common case of reading the
whole directory.
*/
static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
......@@ -435,15 +512,15 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
}
/*
* filp->f_pos points to the file offset in the page cache.
* but if the cache has meanwhile been zapped, we need to
* read from the last dirent to revalidate f_pos
* itself.
* filp->f_pos points to the dirent entry number.
* *desc->dir_cookie has the cookie for the next entry. We have
* to either find the entry with the appropriate number or
* revalidate the cookie.
*/
memset(desc, 0, sizeof(*desc));
desc->file = filp;
desc->target = filp->f_pos;
desc->dir_cookie = &((struct nfs_open_context *)filp->private_data)->dir_cookie;
desc->decode = NFS_PROTO(inode)->decode_dirent;
desc->plus = NFS_USE_READDIRPLUS(inode);
......@@ -455,9 +532,10 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
while(!desc->entry->eof) {
res = readdir_search_pagecache(desc);
if (res == -EBADCOOKIE) {
/* This means either end of directory */
if (desc->entry->cookie != desc->target) {
if (*desc->dir_cookie && desc->entry->cookie != *desc->dir_cookie) {
/* Or that the server has 'lost' a cookie */
res = uncached_readdir(desc, dirent, filldir);
if (res >= 0)
......@@ -490,6 +568,28 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
return 0;
}
loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
{
down(&filp->f_dentry->d_inode->i_sem);
switch (origin) {
case 1:
offset += filp->f_pos;
case 0:
if (offset >= 0)
break;
default:
offset = -EINVAL;
goto out;
}
if (offset != filp->f_pos) {
filp->f_pos = offset;
((struct nfs_open_context *)filp->private_data)->dir_cookie = 0;
}
out:
up(&filp->f_dentry->d_inode->i_sem);
return offset;
}
/*
* All directory operations under NFS are synchronous, so fsync()
* is a dummy operation.
......
......@@ -517,7 +517,7 @@ static ssize_t nfs_direct_write_seg(struct inode *inode,
result = tot_bytes;
out:
nfs_end_data_update_defer(inode);
nfs_end_data_update(inode);
nfs_writedata_free(wdata);
return result;
......
......@@ -71,6 +71,18 @@ struct inode_operations nfs_file_inode_operations = {
.setattr = nfs_setattr,
};
#ifdef CONFIG_NFS_V3
struct inode_operations nfs3_file_inode_operations = {
.permission = nfs_permission,
.getattr = nfs_getattr,
.setattr = nfs_setattr,
.listxattr = nfs3_listxattr,
.getxattr = nfs3_getxattr,
.setxattr = nfs3_setxattr,
.removexattr = nfs3_removexattr,
};
#endif /* CONFIG_NFS_v3 */
/* Hack for future NFS swap support */
#ifndef IS_SWAPFILE
# define IS_SWAPFILE(inode) (0)
......@@ -115,6 +127,21 @@ nfs_file_release(struct inode *inode, struct file *filp)
return NFS_PROTO(inode)->file_release(inode, filp);
}
/**
* nfs_revalidate_file - Revalidate the page cache & related metadata
* @inode - pointer to inode struct
* @file - pointer to file
*/
static int nfs_revalidate_file(struct inode *inode, struct file *filp)
{
int retval = 0;
if ((NFS_FLAGS(inode) & NFS_INO_REVAL_PAGECACHE) || nfs_attribute_timeout(inode))
retval = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
nfs_revalidate_mapping(inode, filp->f_mapping);
return 0;
}
/**
* nfs_revalidate_size - Revalidate the file size
* @inode - pointer to inode struct
......@@ -137,7 +164,8 @@ static int nfs_revalidate_file_size(struct inode *inode, struct file *filp)
goto force_reval;
if (nfsi->npages != 0)
return 0;
return nfs_revalidate_inode(server, inode);
if (!(NFS_FLAGS(inode) & NFS_INO_REVAL_PAGECACHE) && !nfs_attribute_timeout(inode))
return 0;
force_reval:
return __nfs_revalidate_inode(server, inode);
}
......@@ -198,7 +226,7 @@ nfs_file_read(struct kiocb *iocb, char __user * buf, size_t count, loff_t pos)
dentry->d_parent->d_name.name, dentry->d_name.name,
(unsigned long) count, (unsigned long) pos);
result = nfs_revalidate_inode(NFS_SERVER(inode), inode);
result = nfs_revalidate_file(inode, iocb->ki_filp);
if (!result)
result = generic_file_aio_read(iocb, buf, count, pos);
return result;
......@@ -216,7 +244,7 @@ nfs_file_sendfile(struct file *filp, loff_t *ppos, size_t count,
dentry->d_parent->d_name.name, dentry->d_name.name,
(unsigned long) count, (unsigned long long) *ppos);
res = nfs_revalidate_inode(NFS_SERVER(inode), inode);
res = nfs_revalidate_file(inode, filp);
if (!res)
res = generic_file_sendfile(filp, ppos, count, actor, target);
return res;
......@@ -232,7 +260,7 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
dfprintk(VFS, "nfs: mmap(%s/%s)\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
status = nfs_revalidate_inode(NFS_SERVER(inode), inode);
status = nfs_revalidate_file(inode, file);
if (!status)
status = generic_file_mmap(file, vma);
return status;
......@@ -321,9 +349,15 @@ nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t
result = -EBUSY;
if (IS_SWAPFILE(inode))
goto out_swapfile;
result = nfs_revalidate_inode(NFS_SERVER(inode), inode);
if (result)
goto out;
/*
* O_APPEND implies that we must revalidate the file length.
*/
if (iocb->ki_filp->f_flags & O_APPEND) {
result = nfs_revalidate_file_size(inode, iocb->ki_filp);
if (result)
goto out;
}
nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
result = count;
if (!count)
......
......@@ -50,6 +50,7 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_idmap.h>
#include "nfs4_fs.h"
#define IDMAP_HASH_SZ 128
......
此差异已折叠。
......@@ -80,9 +80,7 @@ mnt_create(char *hostname, struct sockaddr_in *srvaddr, int version,
clnt = rpc_create_client(xprt, hostname,
&mnt_program, version,
RPC_AUTH_UNIX);
if (IS_ERR(clnt)) {
xprt_destroy(xprt);
} else {
if (!IS_ERR(clnt)) {
clnt->cl_softrtry = 1;
clnt->cl_chatty = 1;
clnt->cl_oneshot = 1;
......
#include <linux/fs.h>
#include <linux/nfs.h>
#include <linux/nfs3.h>
#include <linux/nfs_fs.h>
#include <linux/xattr_acl.h>
#include <linux/nfsacl.h>
#define NFSDBG_FACILITY NFSDBG_PROC
ssize_t nfs3_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct inode *inode = dentry->d_inode;
struct posix_acl *acl;
int pos=0, len=0;
# define output(s) do { \
if (pos + sizeof(s) <= size) { \
memcpy(buffer + pos, s, sizeof(s)); \
pos += sizeof(s); \
} \
len += sizeof(s); \
} while(0)
acl = nfs3_proc_getacl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl) {
output("system.posix_acl_access");
posix_acl_release(acl);
}
if (S_ISDIR(inode->i_mode)) {
acl = nfs3_proc_getacl(inode, ACL_TYPE_DEFAULT);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl) {
output("system.posix_acl_default");
posix_acl_release(acl);
}
}
# undef output
if (!buffer || len <= size)
return len;
return -ERANGE;
}
ssize_t nfs3_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size)
{
struct inode *inode = dentry->d_inode;
struct posix_acl *acl;
int type, error = 0;
if (strcmp(name, XATTR_NAME_ACL_ACCESS) == 0)
type = ACL_TYPE_ACCESS;
else if (strcmp(name, XATTR_NAME_ACL_DEFAULT) == 0)
type = ACL_TYPE_DEFAULT;
else
return -EOPNOTSUPP;
acl = nfs3_proc_getacl(inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
else if (acl) {
if (type == ACL_TYPE_ACCESS && acl->a_count == 0)
error = -ENODATA;
else
error = posix_acl_to_xattr(acl, buffer, size);
posix_acl_release(acl);
} else
error = -ENODATA;
return error;
}
int nfs3_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct inode *inode = dentry->d_inode;
struct posix_acl *acl;
int type, error;
if (strcmp(name, XATTR_NAME_ACL_ACCESS) == 0)
type = ACL_TYPE_ACCESS;
else if (strcmp(name, XATTR_NAME_ACL_DEFAULT) == 0)
type = ACL_TYPE_DEFAULT;
else
return -EOPNOTSUPP;
acl = posix_acl_from_xattr(value, size);
if (IS_ERR(acl))
return PTR_ERR(acl);
error = nfs3_proc_setacl(inode, type, acl);
posix_acl_release(acl);
return error;
}
int nfs3_removexattr(struct dentry *dentry, const char *name)
{
struct inode *inode = dentry->d_inode;
int type;
if (strcmp(name, XATTR_NAME_ACL_ACCESS) == 0)
type = ACL_TYPE_ACCESS;
else if (strcmp(name, XATTR_NAME_ACL_DEFAULT) == 0)
type = ACL_TYPE_DEFAULT;
else
return -EOPNOTSUPP;
return nfs3_proc_setacl(inode, type, NULL);
}
static void __nfs3_forget_cached_acls(struct nfs_inode *nfsi)
{
if (!IS_ERR(nfsi->acl_access)) {
posix_acl_release(nfsi->acl_access);
nfsi->acl_access = ERR_PTR(-EAGAIN);
}
if (!IS_ERR(nfsi->acl_default)) {
posix_acl_release(nfsi->acl_default);
nfsi->acl_default = ERR_PTR(-EAGAIN);
}
}
void nfs3_forget_cached_acls(struct inode *inode)
{
dprintk("NFS: nfs3_forget_cached_acls(%s/%ld)\n", inode->i_sb->s_id,
inode->i_ino);
spin_lock(&inode->i_lock);
__nfs3_forget_cached_acls(NFS_I(inode));
spin_unlock(&inode->i_lock);
}
static struct posix_acl *nfs3_get_cached_acl(struct inode *inode, int type)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct posix_acl *acl = ERR_PTR(-EINVAL);
spin_lock(&inode->i_lock);
switch(type) {
case ACL_TYPE_ACCESS:
acl = nfsi->acl_access;
break;
case ACL_TYPE_DEFAULT:
acl = nfsi->acl_default;
break;
default:
goto out;
}
if (IS_ERR(acl))
acl = ERR_PTR(-EAGAIN);
else
acl = posix_acl_dup(acl);
out:
spin_unlock(&inode->i_lock);
dprintk("NFS: nfs3_get_cached_acl(%s/%ld, %d) = %p\n", inode->i_sb->s_id,
inode->i_ino, type, acl);
return acl;
}
static void nfs3_cache_acls(struct inode *inode, struct posix_acl *acl,
struct posix_acl *dfacl)
{
struct nfs_inode *nfsi = NFS_I(inode);
dprintk("nfs3_cache_acls(%s/%ld, %p, %p)\n", inode->i_sb->s_id,
inode->i_ino, acl, dfacl);
spin_lock(&inode->i_lock);
__nfs3_forget_cached_acls(NFS_I(inode));
nfsi->acl_access = posix_acl_dup(acl);
nfsi->acl_default = posix_acl_dup(dfacl);
spin_unlock(&inode->i_lock);
}
struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_fattr fattr;
struct page *pages[NFSACL_MAXPAGES] = { };
struct nfs3_getaclargs args = {
.fh = NFS_FH(inode),
/* The xdr layer may allocate pages here. */
.pages = pages,
};
struct nfs3_getaclres res = {
.fattr = &fattr,
};
struct posix_acl *acl;
int status, count;
if (!nfs_server_capable(inode, NFS_CAP_ACLS))
return ERR_PTR(-EOPNOTSUPP);
status = nfs_revalidate_inode(server, inode);
if (status < 0)
return ERR_PTR(status);
acl = nfs3_get_cached_acl(inode, type);
if (acl != ERR_PTR(-EAGAIN))
return acl;
acl = NULL;
/*
* Only get the access acl when explicitly requested: We don't
* need it for access decisions, and only some applications use
* it. Applications which request the access acl first are not
* penalized from this optimization.
*/
if (type == ACL_TYPE_ACCESS)
args.mask |= NFS_ACLCNT|NFS_ACL;
if (S_ISDIR(inode->i_mode))
args.mask |= NFS_DFACLCNT|NFS_DFACL;
if (args.mask == 0)
return NULL;
dprintk("NFS call getacl\n");
status = rpc_call(server->client_acl, ACLPROC3_GETACL,
&args, &res, 0);
dprintk("NFS reply getacl: %d\n", status);
/* pages may have been allocated at the xdr layer. */
for (count = 0; count < NFSACL_MAXPAGES && args.pages[count]; count++)
__free_page(args.pages[count]);
switch (status) {
case 0:
status = nfs_refresh_inode(inode, &fattr);
break;
case -EPFNOSUPPORT:
case -EPROTONOSUPPORT:
dprintk("NFS_V3_ACL extension not supported; disabling\n");
server->caps &= ~NFS_CAP_ACLS;
case -ENOTSUPP:
status = -EOPNOTSUPP;
default:
goto getout;
}
if ((args.mask & res.mask) != args.mask) {
status = -EIO;
goto getout;
}
if (res.acl_access != NULL) {
if (posix_acl_equiv_mode(res.acl_access, NULL) == 0) {
posix_acl_release(res.acl_access);
res.acl_access = NULL;
}
}
nfs3_cache_acls(inode, res.acl_access, res.acl_default);
switch(type) {
case ACL_TYPE_ACCESS:
acl = res.acl_access;
res.acl_access = NULL;
break;
case ACL_TYPE_DEFAULT:
acl = res.acl_default;
res.acl_default = NULL;
}
getout:
posix_acl_release(res.acl_access);
posix_acl_release(res.acl_default);
if (status != 0) {
posix_acl_release(acl);
acl = ERR_PTR(status);
}
return acl;
}
static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
struct posix_acl *dfacl)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_fattr fattr;
struct page *pages[NFSACL_MAXPAGES] = { };
struct nfs3_setaclargs args = {
.inode = inode,
.mask = NFS_ACL,
.acl_access = acl,
.pages = pages,
};
int status, count;
status = -EOPNOTSUPP;
if (!nfs_server_capable(inode, NFS_CAP_ACLS))
goto out;
/* We are doing this here, because XDR marshalling can only
return -ENOMEM. */
status = -ENOSPC;
if (acl != NULL && acl->a_count > NFS_ACL_MAX_ENTRIES)
goto out;
if (dfacl != NULL && dfacl->a_count > NFS_ACL_MAX_ENTRIES)
goto out;
if (S_ISDIR(inode->i_mode)) {
args.mask |= NFS_DFACL;
args.acl_default = dfacl;
}
dprintk("NFS call setacl\n");
nfs_begin_data_update(inode);
status = rpc_call(server->client_acl, ACLPROC3_SETACL,
&args, &fattr, 0);
NFS_FLAGS(inode) |= NFS_INO_INVALID_ACCESS;
nfs_end_data_update(inode);
dprintk("NFS reply setacl: %d\n", status);
/* pages may have been allocated at the xdr layer. */
for (count = 0; count < NFSACL_MAXPAGES && args.pages[count]; count++)
__free_page(args.pages[count]);
switch (status) {
case 0:
status = nfs_refresh_inode(inode, &fattr);
break;
case -EPFNOSUPPORT:
case -EPROTONOSUPPORT:
dprintk("NFS_V3_ACL SETACL RPC not supported"
"(will not retry)\n");
server->caps &= ~NFS_CAP_ACLS;
case -ENOTSUPP:
status = -EOPNOTSUPP;
}
out:
return status;
}
int nfs3_proc_setacl(struct inode *inode, int type, struct posix_acl *acl)
{
struct posix_acl *alloc = NULL, *dfacl = NULL;
int status;
if (S_ISDIR(inode->i_mode)) {
switch(type) {
case ACL_TYPE_ACCESS:
alloc = dfacl = nfs3_proc_getacl(inode,
ACL_TYPE_DEFAULT);
if (IS_ERR(alloc))
goto fail;
break;
case ACL_TYPE_DEFAULT:
dfacl = acl;
alloc = acl = nfs3_proc_getacl(inode,
ACL_TYPE_ACCESS);
if (IS_ERR(alloc))
goto fail;
break;
default:
return -EINVAL;
}
} else if (type != ACL_TYPE_ACCESS)
return -EINVAL;
if (acl == NULL) {
alloc = acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
if (IS_ERR(alloc))
goto fail;
}
status = nfs3_proc_setacls(inode, acl, dfacl);
posix_acl_release(alloc);
return status;
fail:
return PTR_ERR(alloc);
}
int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode,
mode_t mode)
{
struct posix_acl *dfacl, *acl;
int error = 0;
dfacl = nfs3_proc_getacl(dir, ACL_TYPE_DEFAULT);
if (IS_ERR(dfacl)) {
error = PTR_ERR(dfacl);
return (error == -EOPNOTSUPP) ? 0 : error;
}
if (!dfacl)
return 0;
acl = posix_acl_clone(dfacl, GFP_KERNEL);
error = -ENOMEM;
if (!acl)
goto out_release_dfacl;
error = posix_acl_create_masq(acl, &mode);
if (error < 0)
goto out_release_acl;
error = nfs3_proc_setacls(inode, acl, S_ISDIR(inode->i_mode) ?
dfacl : NULL);
out_release_acl:
posix_acl_release(acl);
out_release_dfacl:
posix_acl_release(dfacl);
return error;
}
......@@ -17,6 +17,7 @@
#include <linux/nfs_page.h>
#include <linux/lockd/bind.h>
#include <linux/smp_lock.h>
#include <linux/nfs_mount.h>
#define NFSDBG_FACILITY NFSDBG_PROC
......@@ -45,7 +46,7 @@ static inline int
nfs3_rpc_call_wrapper(struct rpc_clnt *clnt, u32 proc, void *argp, void *resp, int flags)
{
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[proc],
.rpc_proc = &clnt->cl_procinfo[proc],
.rpc_argp = argp,
.rpc_resp = resp,
};
......@@ -313,7 +314,8 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
.fh = &fhandle,
.fattr = &fattr
};
int status;
mode_t mode = sattr->ia_mode;
int status;
dprintk("NFS call create %s\n", dentry->d_name.name);
arg.createmode = NFS3_CREATE_UNCHECKED;
......@@ -323,6 +325,8 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
arg.verifier[1] = current->pid;
}
sattr->ia_mode &= ~current->fs->umask;
again:
dir_attr.valid = 0;
fattr.valid = 0;
......@@ -369,6 +373,9 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
nfs_refresh_inode(dentry->d_inode, &fattr);
dprintk("NFS reply setattr (post-create): %d\n", status);
}
if (status != 0)
goto out;
status = nfs3_proc_set_default_acl(dir, dentry->d_inode, mode);
out:
dprintk("NFS reply create: %d\n", status);
return status;
......@@ -538,15 +545,24 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
.fh = &fhandle,
.fattr = &fattr
};
int status;
int mode = sattr->ia_mode;
int status;
dprintk("NFS call mkdir %s\n", dentry->d_name.name);
dir_attr.valid = 0;
fattr.valid = 0;
sattr->ia_mode &= ~current->fs->umask;
status = rpc_call(NFS_CLIENT(dir), NFS3PROC_MKDIR, &arg, &res, 0);
nfs_refresh_inode(dir, &dir_attr);
if (status == 0)
status = nfs_instantiate(dentry, &fhandle, &fattr);
if (status != 0)
goto out;
status = nfs_instantiate(dentry, &fhandle, &fattr);
if (status != 0)
goto out;
status = nfs3_proc_set_default_acl(dir, dentry->d_inode, mode);
out:
dprintk("NFS reply mkdir: %d\n", status);
return status;
}
......@@ -641,6 +657,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
.fh = &fh,
.fattr = &fattr
};
mode_t mode = sattr->ia_mode;
int status;
switch (sattr->ia_mode & S_IFMT) {
......@@ -653,12 +670,20 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
dprintk("NFS call mknod %s %u:%u\n", dentry->d_name.name,
MAJOR(rdev), MINOR(rdev));
sattr->ia_mode &= ~current->fs->umask;
dir_attr.valid = 0;
fattr.valid = 0;
status = rpc_call(NFS_CLIENT(dir), NFS3PROC_MKNOD, &arg, &res, 0);
nfs_refresh_inode(dir, &dir_attr);
if (status == 0)
status = nfs_instantiate(dentry, &fh, &fattr);
if (status != 0)
goto out;
status = nfs_instantiate(dentry, &fh, &fattr);
if (status != 0)
goto out;
status = nfs3_proc_set_default_acl(dir, dentry->d_inode, mode);
out:
dprintk("NFS reply mknod: %d\n", status);
return status;
}
......@@ -825,7 +850,8 @@ nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
struct nfs_rpc_ops nfs_v3_clientops = {
.version = 3, /* protocol version */
.dentry_ops = &nfs_dentry_operations,
.dir_inode_ops = &nfs_dir_inode_operations,
.dir_inode_ops = &nfs3_dir_inode_operations,
.file_inode_ops = &nfs3_file_inode_operations,
.getroot = nfs3_proc_get_root,
.getattr = nfs3_proc_getattr,
.setattr = nfs3_proc_setattr,
......@@ -856,4 +882,5 @@ struct nfs_rpc_ops nfs_v3_clientops = {
.file_open = nfs_open,
.file_release = nfs_release,
.lock = nfs3_proc_lock,
.clear_acl_cache = nfs3_forget_cached_acls,
};
......@@ -21,6 +21,7 @@
#include <linux/nfs.h>
#include <linux/nfs3.h>
#include <linux/nfs_fs.h>
#include <linux/nfsacl.h>
#define NFSDBG_FACILITY NFSDBG_XDR
......@@ -79,6 +80,11 @@ extern int nfs_stat_to_errno(int);
#define NFS3_pathconfres_sz (1+NFS3_post_op_attr_sz+6)
#define NFS3_commitres_sz (1+NFS3_wcc_data_sz+2)
#define ACL3_getaclargs_sz (NFS3_fh_sz+1)
#define ACL3_setaclargs_sz (NFS3_fh_sz+1+2*(2+5*3))
#define ACL3_getaclres_sz (1+NFS3_post_op_attr_sz+1+2*(2+5*3))
#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz)
/*
* Map file type to S_IFMT bits
*/
......@@ -627,6 +633,74 @@ nfs3_xdr_commitargs(struct rpc_rqst *req, u32 *p, struct nfs_writeargs *args)
return 0;
}
#ifdef CONFIG_NFS_V3_ACL
/*
* Encode GETACL arguments
*/
static int
nfs3_xdr_getaclargs(struct rpc_rqst *req, u32 *p,
struct nfs3_getaclargs *args)
{
struct rpc_auth *auth = req->rq_task->tk_auth;
unsigned int replen;
p = xdr_encode_fhandle(p, args->fh);
*p++ = htonl(args->mask);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
if (args->mask & (NFS_ACL | NFS_DFACL)) {
/* Inline the page array */
replen = (RPC_REPHDRSIZE + auth->au_rslack +
ACL3_getaclres_sz) << 2;
xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0,
NFSACL_MAXPAGES << PAGE_SHIFT);
}
return 0;
}
/*
* Encode SETACL arguments
*/
static int
nfs3_xdr_setaclargs(struct rpc_rqst *req, u32 *p,
struct nfs3_setaclargs *args)
{
struct xdr_buf *buf = &req->rq_snd_buf;
unsigned int base, len_in_head, len = nfsacl_size(
(args->mask & NFS_ACL) ? args->acl_access : NULL,
(args->mask & NFS_DFACL) ? args->acl_default : NULL);
int count, err;
p = xdr_encode_fhandle(p, NFS_FH(args->inode));
*p++ = htonl(args->mask);
base = (char *)p - (char *)buf->head->iov_base;
/* put as much of the acls into head as possible. */
len_in_head = min_t(unsigned int, buf->head->iov_len - base, len);
len -= len_in_head;
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p + (len_in_head >> 2));
for (count = 0; (count << PAGE_SHIFT) < len; count++) {
args->pages[count] = alloc_page(GFP_KERNEL);
if (!args->pages[count]) {
while (count)
__free_page(args->pages[--count]);
return -ENOMEM;
}
}
xdr_encode_pages(buf, args->pages, 0, len);
err = nfsacl_encode(buf, base, args->inode,
(args->mask & NFS_ACL) ?
args->acl_access : NULL, 1, 0);
if (err > 0)
err = nfsacl_encode(buf, base + err, args->inode,
(args->mask & NFS_DFACL) ?
args->acl_default : NULL, 1,
NFS_ACL_DEFAULT);
return (err > 0) ? 0 : err;
}
#endif /* CONFIG_NFS_V3_ACL */
/*
* NFS XDR decode functions
*/
......@@ -978,6 +1052,54 @@ nfs3_xdr_commitres(struct rpc_rqst *req, u32 *p, struct nfs_writeres *res)
return 0;
}
#ifdef CONFIG_NFS_V3_ACL
/*
* Decode GETACL reply
*/
static int
nfs3_xdr_getaclres(struct rpc_rqst *req, u32 *p,
struct nfs3_getaclres *res)
{
struct xdr_buf *buf = &req->rq_rcv_buf;
int status = ntohl(*p++);
struct posix_acl **acl;
unsigned int *aclcnt;
int err, base;
if (status != 0)
return -nfs_stat_to_errno(status);
p = xdr_decode_post_op_attr(p, res->fattr);
res->mask = ntohl(*p++);
if (res->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
return -EINVAL;
base = (char *)p - (char *)req->rq_rcv_buf.head->iov_base;
acl = (res->mask & NFS_ACL) ? &res->acl_access : NULL;
aclcnt = (res->mask & NFS_ACLCNT) ? &res->acl_access_count : NULL;
err = nfsacl_decode(buf, base, aclcnt, acl);
acl = (res->mask & NFS_DFACL) ? &res->acl_default : NULL;
aclcnt = (res->mask & NFS_DFACLCNT) ? &res->acl_default_count : NULL;
if (err > 0)
err = nfsacl_decode(buf, base + err, aclcnt, acl);
return (err > 0) ? 0 : err;
}
/*
* Decode setacl reply.
*/
static int
nfs3_xdr_setaclres(struct rpc_rqst *req, u32 *p, struct nfs_fattr *fattr)
{
int status = ntohl(*p++);
if (status)
return -nfs_stat_to_errno(status);
xdr_decode_post_op_attr(p, fattr);
return 0;
}
#endif /* CONFIG_NFS_V3_ACL */
#ifndef MAX
# define MAX(a, b) (((a) > (b))? (a) : (b))
#endif
......@@ -1021,3 +1143,28 @@ struct rpc_version nfs_version3 = {
.procs = nfs3_procedures
};
#ifdef CONFIG_NFS_V3_ACL
static struct rpc_procinfo nfs3_acl_procedures[] = {
[ACLPROC3_GETACL] = {
.p_proc = ACLPROC3_GETACL,
.p_encode = (kxdrproc_t) nfs3_xdr_getaclargs,
.p_decode = (kxdrproc_t) nfs3_xdr_getaclres,
.p_bufsiz = MAX(ACL3_getaclargs_sz, ACL3_getaclres_sz) << 2,
.p_timer = 1,
},
[ACLPROC3_SETACL] = {
.p_proc = ACLPROC3_SETACL,
.p_encode = (kxdrproc_t) nfs3_xdr_setaclargs,
.p_decode = (kxdrproc_t) nfs3_xdr_setaclres,
.p_bufsiz = MAX(ACL3_setaclargs_sz, ACL3_setaclres_sz) << 2,
.p_timer = 0,
},
};
struct rpc_version nfsacl_version3 = {
.number = 3,
.nrprocs = sizeof(nfs3_acl_procedures)/
sizeof(nfs3_acl_procedures[0]),
.procs = nfs3_acl_procedures,
};
#endif /* CONFIG_NFS_V3_ACL */
/*
* linux/fs/nfs/nfs4_fs.h
*
* Copyright (C) 2005 Trond Myklebust
*
* NFSv4-specific filesystem definitions and declarations
*/
#ifndef __LINUX_FS_NFS_NFS4_FS_H
#define __LINUX_FS_NFS_NFS4_FS_H
#ifdef CONFIG_NFS_V4
struct idmap;
/*
* In a seqid-mutating op, this macro controls which error return
* values trigger incrementation of the seqid.
*
* from rfc 3010:
* The client MUST monotonically increment the sequence number for the
* CLOSE, LOCK, LOCKU, OPEN, OPEN_CONFIRM, and OPEN_DOWNGRADE
* operations. This is true even in the event that the previous
* operation that used the sequence number received an error. The only
* exception to this rule is if the previous operation received one of
* the following errors: NFSERR_STALE_CLIENTID, NFSERR_STALE_STATEID,
* NFSERR_BAD_STATEID, NFSERR_BAD_SEQID, NFSERR_BADXDR,
* NFSERR_RESOURCE, NFSERR_NOFILEHANDLE.
*
*/
#define seqid_mutating_err(err) \
(((err) != NFSERR_STALE_CLIENTID) && \
((err) != NFSERR_STALE_STATEID) && \
((err) != NFSERR_BAD_STATEID) && \
((err) != NFSERR_BAD_SEQID) && \
((err) != NFSERR_BAD_XDR) && \
((err) != NFSERR_RESOURCE) && \
((err) != NFSERR_NOFILEHANDLE))
enum nfs4_client_state {
NFS4CLNT_OK = 0,
};
/*
* The nfs4_client identifies our client state to the server.
*/
struct nfs4_client {
struct list_head cl_servers; /* Global list of servers */
struct in_addr cl_addr; /* Server identifier */
u64 cl_clientid; /* constant */
nfs4_verifier cl_confirm;
unsigned long cl_state;
u32 cl_lockowner_id;
/*
* The following rwsem ensures exclusive access to the server
* while we recover the state following a lease expiration.
*/
struct rw_semaphore cl_sem;
struct list_head cl_delegations;
struct list_head cl_state_owners;
struct list_head cl_unused;
int cl_nunused;
spinlock_t cl_lock;
atomic_t cl_count;
struct rpc_clnt * cl_rpcclient;
struct rpc_cred * cl_cred;
struct list_head cl_superblocks; /* List of nfs_server structs */
unsigned long cl_lease_time;
unsigned long cl_last_renewal;
struct work_struct cl_renewd;
struct work_struct cl_recoverd;
wait_queue_head_t cl_waitq;
struct rpc_wait_queue cl_rpcwaitq;
/* used for the setclientid verifier */
struct timespec cl_boot_time;
/* idmapper */
struct idmap * cl_idmap;
/* Our own IP address, as a null-terminated string.
* This is used to generate the clientid, and the callback address.
*/
char cl_ipaddr[16];
unsigned char cl_id_uniquifier;
};
/*
* NFS4 state_owners and lock_owners are simply labels for ordered
* sequences of RPC calls. Their sole purpose is to provide once-only
* semantics by allowing the server to identify replayed requests.
*
* The ->so_sema is held during all state_owner seqid-mutating operations:
* OPEN, OPEN_DOWNGRADE, and CLOSE. Its purpose is to properly serialize
* so_seqid.
*/
struct nfs4_state_owner {
struct list_head so_list; /* per-clientid list of state_owners */
struct nfs4_client *so_client;
u32 so_id; /* 32-bit identifier, unique */
struct semaphore so_sema;
u32 so_seqid; /* protected by so_sema */
atomic_t so_count;
struct rpc_cred *so_cred; /* Associated cred */
struct list_head so_states;
struct list_head so_delegations;
};
/*
* struct nfs4_state maintains the client-side state for a given
* (state_owner,inode) tuple (OPEN) or state_owner (LOCK).
*
* OPEN:
* In order to know when to OPEN_DOWNGRADE or CLOSE the state on the server,
* we need to know how many files are open for reading or writing on a
* given inode. This information too is stored here.
*
* LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN)
*/
struct nfs4_lock_state {
struct list_head ls_locks; /* Other lock stateids */
struct nfs4_state * ls_state; /* Pointer to open state */
fl_owner_t ls_owner; /* POSIX lock owner */
#define NFS_LOCK_INITIALIZED 1
int ls_flags;
u32 ls_seqid;
u32 ls_id;
nfs4_stateid ls_stateid;
atomic_t ls_count;
};
/* bits for nfs4_state->flags */
enum {
LK_STATE_IN_USE,
NFS_DELEGATED_STATE,
};
struct nfs4_state {
struct list_head open_states; /* List of states for the same state_owner */
struct list_head inode_states; /* List of states for the same inode */
struct list_head lock_states; /* List of subservient lock stateids */
struct nfs4_state_owner *owner; /* Pointer to the open owner */
struct inode *inode; /* Pointer to the inode */
unsigned long flags; /* Do we hold any locks? */
struct semaphore lock_sema; /* Serializes file locking operations */
spinlock_t state_lock; /* Protects the lock_states list */
nfs4_stateid stateid;
unsigned int nreaders;
unsigned int nwriters;
int state; /* State on the server (R,W, or RW) */
atomic_t count;
};
struct nfs4_exception {
long timeout;
int retry;
};
struct nfs4_state_recovery_ops {
int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
};
extern struct dentry_operations nfs4_dentry_operations;
extern struct inode_operations nfs4_dir_inode_operations;
/* inode.c */
extern ssize_t nfs4_getxattr(struct dentry *, const char *, void *, size_t);
extern int nfs4_setxattr(struct dentry *, const char *, const void *, size_t, int);
extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
/* nfs4proc.c */
extern int nfs4_map_errors(int err);
extern int nfs4_proc_setclientid(struct nfs4_client *, u32, unsigned short);
extern int nfs4_proc_setclientid_confirm(struct nfs4_client *);
extern int nfs4_proc_async_renew(struct nfs4_client *);
extern int nfs4_proc_renew(struct nfs4_client *);
extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode);
extern struct inode *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int);
extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;
extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops;
extern const u32 nfs4_fattr_bitmap[2];
extern const u32 nfs4_statfs_bitmap[2];
extern const u32 nfs4_pathconf_bitmap[2];
extern const u32 nfs4_fsinfo_bitmap[2];
/* nfs4renewd.c */
extern void nfs4_schedule_state_renewal(struct nfs4_client *);
extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
extern void nfs4_kill_renewd(struct nfs4_client *);
extern void nfs4_renew_state(void *);
/* nfs4state.c */
extern void init_nfsv4_state(struct nfs_server *);
extern void destroy_nfsv4_state(struct nfs_server *);
extern struct nfs4_client *nfs4_get_client(struct in_addr *);
extern void nfs4_put_client(struct nfs4_client *clp);
extern int nfs4_init_client(struct nfs4_client *clp);
extern struct nfs4_client *nfs4_find_client(struct in_addr *);
extern u32 nfs4_alloc_lockowner_id(struct nfs4_client *);
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
extern void nfs4_drop_state_owner(struct nfs4_state_owner *);
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
extern void nfs4_put_open_state(struct nfs4_state *);
extern void nfs4_close_state(struct nfs4_state *, mode_t);
extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode);
extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp);
extern void nfs4_schedule_state_recovery(struct nfs4_client *);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls);
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
extern const nfs4_stateid zero_stateid;
/* nfs4xdr.c */
extern uint32_t *nfs4_decode_dirent(uint32_t *p, struct nfs_entry *entry, int plus);
extern struct rpc_procinfo nfs4_procedures[];
struct nfs4_mount_data;
/* callback_xdr.c */
extern struct svc_version nfs4_callback_version1;
#else
#define init_nfsv4_state(server) do { } while (0)
#define destroy_nfsv4_state(server) do { } while (0)
#define nfs4_put_state_owner(inode, owner) do { } while (0)
#define nfs4_put_open_state(state) do { } while (0)
#define nfs4_close_state(a, b) do { } while (0)
#endif /* CONFIG_NFS_V4 */
#endif /* __LINUX_FS_NFS_NFS4_FS.H */
......@@ -48,6 +48,7 @@
#include <linux/smp_lock.h>
#include <linux/namei.h>
#include "nfs4_fs.h"
#include "delegation.h"
#define NFSDBG_FACILITY NFSDBG_PROC
......@@ -62,8 +63,6 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus);
extern struct rpc_procinfo nfs4_procedures[];
extern nfs4_stateid zero_stateid;
/* Prevent leaks of NFSv4 errors into userland */
int nfs4_map_errors(int err)
{
......@@ -104,7 +103,7 @@ const u32 nfs4_statfs_bitmap[2] = {
| FATTR4_WORD1_SPACE_TOTAL
};
u32 nfs4_pathconf_bitmap[2] = {
const u32 nfs4_pathconf_bitmap[2] = {
FATTR4_WORD0_MAXLINK
| FATTR4_WORD0_MAXNAME,
0
......@@ -124,7 +123,7 @@ static void nfs4_setup_readdir(u64 cookie, u32 *verifier, struct dentry *dentry,
BUG_ON(readdir->count < 80);
if (cookie > 2) {
readdir->cookie = (cookie > 2) ? cookie : 0;
readdir->cookie = cookie;
memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
return;
}
......@@ -270,14 +269,9 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta
int err;
do {
err = _nfs4_open_reclaim(sp, state);
switch (err) {
case 0:
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
return err;
}
err = nfs4_handle_exception(server, err, &exception);
if (err != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
......@@ -509,6 +503,20 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
goto out_nodeleg;
}
static inline int nfs4_do_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry)
{
struct nfs_server *server = NFS_SERVER(dentry->d_inode);
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_open_expired(sp, state, dentry);
if (err == -NFS4ERR_DELAY)
nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
......@@ -521,7 +529,7 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta
continue;
get_nfs_open_context(ctx);
spin_unlock(&state->inode->i_lock);
status = _nfs4_open_expired(sp, state, ctx->dentry);
status = nfs4_do_open_expired(sp, state, ctx->dentry);
put_nfs_open_context(ctx);
return status;
}
......@@ -748,11 +756,10 @@ static int _nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr,
fattr->valid = 0;
if (state != NULL)
if (state != NULL) {
msg.rpc_cred = state->owner->so_cred;
if (sattr->ia_valid & ATTR_SIZE)
nfs4_copy_stateid(&arg.stateid, state, NULL);
else
nfs4_copy_stateid(&arg.stateid, state, current->files);
} else
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
return rpc_call_sync(server->client, &msg, 0);
......@@ -1116,47 +1123,31 @@ static int
nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
struct iattr *sattr)
{
struct inode * inode = dentry->d_inode;
int size_change = sattr->ia_valid & ATTR_SIZE;
struct nfs4_state *state = NULL;
int need_iput = 0;
struct rpc_cred *cred;
struct inode *inode = dentry->d_inode;
struct nfs4_state *state;
int status;
fattr->valid = 0;
if (size_change) {
struct rpc_cred *cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0);
if (IS_ERR(cred))
return PTR_ERR(cred);
cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0);
if (IS_ERR(cred))
return PTR_ERR(cred);
/* Search for an existing WRITE delegation first */
state = nfs4_open_delegated(inode, FMODE_WRITE, cred);
if (!IS_ERR(state)) {
/* NB: nfs4_open_delegated() bumps the inode->i_count */
iput(inode);
} else {
/* Search for an existing open(O_WRITE) stateid */
state = nfs4_find_state(inode, cred, FMODE_WRITE);
if (state == NULL) {
state = nfs4_open_delegated(dentry->d_inode,
FMODE_WRITE, cred);
if (IS_ERR(state))
state = nfs4_do_open(dentry->d_parent->d_inode,
dentry, FMODE_WRITE,
NULL, cred);
need_iput = 1;
}
put_rpccred(cred);
if (IS_ERR(state))
return PTR_ERR(state);
if (state->inode != inode) {
printk(KERN_WARNING "nfs: raced in setattr (%p != %p), returning -EIO\n", inode, state->inode);
status = -EIO;
goto out;
}
}
status = nfs4_do_setattr(NFS_SERVER(inode), fattr,
NFS_FH(inode), sattr, state);
out:
if (state) {
inode = state->inode;
if (state != NULL)
nfs4_close_state(state, FMODE_WRITE);
if (need_iput)
iput(inode);
}
put_rpccred(cred);
return status;
}
......@@ -1731,6 +1722,10 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
};
int status;
dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __FUNCTION__,
dentry->d_parent->d_name.name,
dentry->d_name.name,
(unsigned long long)cookie);
lock_kernel();
nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
res.pgbase = args.pgbase;
......@@ -1738,6 +1733,7 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
if (status == 0)
memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
unlock_kernel();
dprintk("%s: returns %d\n", __FUNCTION__, status);
return status;
}
......@@ -2163,6 +2159,193 @@ nfs4_proc_file_release(struct inode *inode, struct file *filp)
return 0;
}
static inline int nfs4_server_supports_acls(struct nfs_server *server)
{
return (server->caps & NFS_CAP_ACLS)
&& (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
&& (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
}
/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
* it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
* the stack.
*/
#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
static void buf_to_pages(const void *buf, size_t buflen,
struct page **pages, unsigned int *pgbase)
{
const void *p = buf;
*pgbase = offset_in_page(buf);
p -= *pgbase;
while (p < buf + buflen) {
*(pages++) = virt_to_page(p);
p += PAGE_CACHE_SIZE;
}
}
struct nfs4_cached_acl {
int cached;
size_t len;
char data[0];
};
static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
{
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
kfree(nfsi->nfs4_acl);
nfsi->nfs4_acl = acl;
spin_unlock(&inode->i_lock);
}
static void nfs4_zap_acl_attr(struct inode *inode)
{
nfs4_set_cached_acl(inode, NULL);
}
static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs4_cached_acl *acl;
int ret = -ENOENT;
spin_lock(&inode->i_lock);
acl = nfsi->nfs4_acl;
if (acl == NULL)
goto out;
if (buf == NULL) /* user is just asking for length */
goto out_len;
if (acl->cached == 0)
goto out;
ret = -ERANGE; /* see getxattr(2) man page */
if (acl->len > buflen)
goto out;
memcpy(buf, acl->data, acl->len);
out_len:
ret = acl->len;
out:
spin_unlock(&inode->i_lock);
return ret;
}
static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len)
{
struct nfs4_cached_acl *acl;
if (buf && acl_len <= PAGE_SIZE) {
acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
if (acl == NULL)
goto out;
acl->cached = 1;
memcpy(acl->data, buf, acl_len);
} else {
acl = kmalloc(sizeof(*acl), GFP_KERNEL);
if (acl == NULL)
goto out;
acl->cached = 0;
}
acl->len = acl_len;
out:
nfs4_set_cached_acl(inode, acl);
}
static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
struct page *pages[NFS4ACL_MAXPAGES];
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.acl_len = buflen,
};
size_t resp_len = buflen;
void *resp_buf;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
.rpc_argp = &args,
.rpc_resp = &resp_len,
};
struct page *localpage = NULL;
int ret;
if (buflen < PAGE_SIZE) {
/* As long as we're doing a round trip to the server anyway,
* let's be prepared for a page of acl data. */
localpage = alloc_page(GFP_KERNEL);
resp_buf = page_address(localpage);
if (localpage == NULL)
return -ENOMEM;
args.acl_pages[0] = localpage;
args.acl_pgbase = 0;
args.acl_len = PAGE_SIZE;
} else {
resp_buf = buf;
buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
}
ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
if (ret)
goto out_free;
if (resp_len > args.acl_len)
nfs4_write_cached_acl(inode, NULL, resp_len);
else
nfs4_write_cached_acl(inode, resp_buf, resp_len);
if (buf) {
ret = -ERANGE;
if (resp_len > buflen)
goto out_free;
if (localpage)
memcpy(buf, resp_buf, resp_len);
}
ret = resp_len;
out_free:
if (localpage)
__free_page(localpage);
return ret;
}
static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
{
struct nfs_server *server = NFS_SERVER(inode);
int ret;
if (!nfs4_server_supports_acls(server))
return -EOPNOTSUPP;
ret = nfs_revalidate_inode(server, inode);
if (ret < 0)
return ret;
ret = nfs4_read_cached_acl(inode, buf, buflen);
if (ret != -ENOENT)
return ret;
return nfs4_get_acl_uncached(inode, buf, buflen);
}
static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
{
struct nfs_server *server = NFS_SERVER(inode);
struct page *pages[NFS4ACL_MAXPAGES];
struct nfs_setaclargs arg = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.acl_len = buflen,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
.rpc_argp = &arg,
.rpc_resp = NULL,
};
int ret;
if (!nfs4_server_supports_acls(server))
return -EOPNOTSUPP;
buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
ret = rpc_call_sync(NFS_SERVER(inode)->client, &msg, 0);
if (ret == 0)
nfs4_write_cached_acl(inode, buf, buflen);
return ret;
}
static int
nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server)
{
......@@ -2448,14 +2631,11 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
down_read(&clp->cl_sem);
nlo.clientid = clp->cl_clientid;
down(&state->lock_sema);
lsp = nfs4_find_lock_state(state, request->fl_owner);
if (lsp)
nlo.id = lsp->ls_id;
else {
spin_lock(&clp->cl_lock);
nlo.id = nfs4_alloc_lockowner_id(clp);
spin_unlock(&clp->cl_lock);
}
status = nfs4_set_lock_state(state, request);
if (status != 0)
goto out;
lsp = request->fl_u.nfs4_fl.owner;
nlo.id = lsp->ls_id;
arg.u.lockt = &nlo;
status = rpc_call_sync(server->client, &msg, 0);
if (!status) {
......@@ -2476,8 +2656,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
request->fl_pid = 0;
status = 0;
}
if (lsp)
nfs4_put_lock_state(lsp);
out:
up(&state->lock_sema);
up_read(&clp->cl_sem);
return status;
......@@ -2537,28 +2716,26 @@ static int _nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock
};
struct nfs4_lock_state *lsp;
struct nfs_locku_opargs luargs;
int status = 0;
int status;
down_read(&clp->cl_sem);
down(&state->lock_sema);
lsp = nfs4_find_lock_state(state, request->fl_owner);
if (!lsp)
status = nfs4_set_lock_state(state, request);
if (status != 0)
goto out;
lsp = request->fl_u.nfs4_fl.owner;
/* We might have lost the locks! */
if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) {
luargs.seqid = lsp->ls_seqid;
memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid));
arg.u.locku = &luargs;
status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
nfs4_increment_lock_seqid(status, lsp);
}
if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0)
goto out;
luargs.seqid = lsp->ls_seqid;
memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid));
arg.u.locku = &luargs;
status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
nfs4_increment_lock_seqid(status, lsp);
if (status == 0) {
if (status == 0)
memcpy(&lsp->ls_stateid, &res.u.stateid,
sizeof(lsp->ls_stateid));
nfs4_notify_unlck(state, request, lsp);
}
nfs4_put_lock_state(lsp);
out:
up(&state->lock_sema);
if (status == 0)
......@@ -2584,7 +2761,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *r
{
struct inode *inode = state->inode;
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_lock_state *lsp;
struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
struct nfs_lockargs arg = {
.fh = NFS_FH(inode),
.type = nfs4_lck_type(cmd, request),
......@@ -2606,9 +2783,6 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *r
};
int status;
lsp = nfs4_get_lock_state(state, request->fl_owner);
if (lsp == NULL)
return -ENOMEM;
if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) {
struct nfs4_state_owner *owner = state->owner;
struct nfs_open_to_lock otl = {
......@@ -2630,38 +2804,57 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *r
* seqid mutating errors */
nfs4_increment_seqid(status, owner);
up(&owner->so_sema);
if (status == 0) {
lsp->ls_flags |= NFS_LOCK_INITIALIZED;
lsp->ls_seqid++;
}
} else {
struct nfs_exist_lock el = {
.seqid = lsp->ls_seqid,
};
memcpy(&el.stateid, &lsp->ls_stateid, sizeof(el.stateid));
largs.u.exist_lock = &el;
largs.new_lock_owner = 0;
arg.u.lock = &largs;
status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
/* increment seqid on success, and * seqid mutating errors*/
nfs4_increment_lock_seqid(status, lsp);
}
/* increment seqid on success, and * seqid mutating errors*/
nfs4_increment_lock_seqid(status, lsp);
/* save the returned stateid. */
if (status == 0) {
if (status == 0)
memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid));
lsp->ls_flags |= NFS_LOCK_INITIALIZED;
if (!reclaim)
nfs4_notify_setlk(state, request, lsp);
} else if (status == -NFS4ERR_DENIED)
else if (status == -NFS4ERR_DENIED)
status = -EAGAIN;
nfs4_put_lock_state(lsp);
return status;
}
static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
{
return _nfs4_do_setlk(state, F_SETLK, request, 1);
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_do_setlk(state, F_SETLK, request, 1);
if (err != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
{
return _nfs4_do_setlk(state, F_SETLK, request, 0);
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_do_setlk(state, F_SETLK, request, 0);
if (err != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
......@@ -2671,7 +2864,9 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
down_read(&clp->cl_sem);
down(&state->lock_sema);
status = _nfs4_do_setlk(state, cmd, request, 0);
status = nfs4_set_lock_state(state, request);
if (status == 0)
status = _nfs4_do_setlk(state, cmd, request, 0);
up(&state->lock_sema);
if (status == 0) {
/* Note: we always want to sleep here! */
......@@ -2729,10 +2924,53 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
if (signalled())
break;
} while(status < 0);
return status;
}
#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf,
size_t buflen, int flags)
{
struct inode *inode = dentry->d_inode;
if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
return -EOPNOTSUPP;
if (!S_ISREG(inode->i_mode) &&
(!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX))
return -EPERM;
return nfs4_proc_set_acl(inode, buf, buflen);
}
/* The getxattr man page suggests returning -ENODATA for unknown attributes,
* and that's what we'll do for e.g. user attributes that haven't been set.
* But we'll follow ext2/ext3's lead by returning -EOPNOTSUPP for unsupported
* attributes in kernel-managed attribute namespaces. */
ssize_t nfs4_getxattr(struct dentry *dentry, const char *key, void *buf,
size_t buflen)
{
struct inode *inode = dentry->d_inode;
if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
return -EOPNOTSUPP;
return nfs4_proc_get_acl(inode, buf, buflen);
}
ssize_t nfs4_listxattr(struct dentry *dentry, char *buf, size_t buflen)
{
size_t len = strlen(XATTR_NAME_NFSV4_ACL) + 1;
if (buf && buflen < len)
return -ERANGE;
if (buf)
memcpy(buf, XATTR_NAME_NFSV4_ACL, len);
return len;
}
struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops = {
.recover_open = nfs4_open_reclaim,
.recover_lock = nfs4_lock_reclaim,
......@@ -2743,10 +2981,20 @@ struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops = {
.recover_lock = nfs4_lock_expired,
};
static struct inode_operations nfs4_file_inode_operations = {
.permission = nfs_permission,
.getattr = nfs_getattr,
.setattr = nfs_setattr,
.getxattr = nfs4_getxattr,
.setxattr = nfs4_setxattr,
.listxattr = nfs4_listxattr,
};
struct nfs_rpc_ops nfs_v4_clientops = {
.version = 4, /* protocol version */
.dentry_ops = &nfs4_dentry_operations,
.dir_inode_ops = &nfs4_dir_inode_operations,
.file_inode_ops = &nfs4_file_inode_operations,
.getroot = nfs4_proc_get_root,
.getattr = nfs4_proc_getattr,
.setattr = nfs4_proc_setattr,
......@@ -2777,6 +3025,7 @@ struct nfs_rpc_ops nfs_v4_clientops = {
.file_open = nfs4_proc_file_open,
.file_release = nfs4_proc_file_release,
.lock = nfs4_proc_lock,
.clear_acl_cache = nfs4_zap_acl_attr,
};
/*
......
......@@ -53,6 +53,7 @@
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include "nfs4_fs.h"
#define NFSDBG_FACILITY NFSDBG_PROC
......
......@@ -46,24 +46,18 @@
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include "nfs4_fs.h"
#include "callback.h"
#include "delegation.h"
#define OPENOWNER_POOL_SIZE 8
static DEFINE_SPINLOCK(state_spinlock);
nfs4_stateid zero_stateid;
#if 0
nfs4_stateid one_stateid =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
#endif
const nfs4_stateid zero_stateid;
static DEFINE_SPINLOCK(state_spinlock);
static LIST_HEAD(nfs4_clientid_list);
static void nfs4_recover_state(void *);
extern void nfs4_renew_state(void *);
void
init_nfsv4_state(struct nfs_server *server)
......@@ -116,6 +110,7 @@ nfs4_alloc_client(struct in_addr *addr)
INIT_LIST_HEAD(&clp->cl_superblocks);
init_waitqueue_head(&clp->cl_waitq);
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
clp->cl_rpcclient = ERR_PTR(-EINVAL);
clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_OK;
return clp;
......@@ -137,7 +132,7 @@ nfs4_free_client(struct nfs4_client *clp)
if (clp->cl_cred)
put_rpccred(clp->cl_cred);
nfs_idmap_delete(clp);
if (clp->cl_rpcclient)
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
kfree(clp);
nfs_callback_down();
......@@ -365,7 +360,7 @@ nfs4_alloc_open_state(void)
atomic_set(&state->count, 1);
INIT_LIST_HEAD(&state->lock_states);
init_MUTEX(&state->lock_sema);
rwlock_init(&state->state_lock);
spin_lock_init(&state->state_lock);
return state;
}
......@@ -547,16 +542,6 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
return NULL;
}
struct nfs4_lock_state *
nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
{
struct nfs4_lock_state *lsp;
read_lock(&state->state_lock);
lsp = __nfs4_find_lock_state(state, fl_owner);
read_unlock(&state->state_lock);
return lsp;
}
/*
* Return a compatible lock_state. If no initialized lock_state structure
* exists, return an uninitialized one.
......@@ -573,14 +558,13 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
return NULL;
lsp->ls_flags = 0;
lsp->ls_seqid = 0; /* arbitrary */
lsp->ls_id = -1;
memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data));
atomic_set(&lsp->ls_count, 1);
lsp->ls_owner = fl_owner;
INIT_LIST_HEAD(&lsp->ls_locks);
spin_lock(&clp->cl_lock);
lsp->ls_id = nfs4_alloc_lockowner_id(clp);
spin_unlock(&clp->cl_lock);
INIT_LIST_HEAD(&lsp->ls_locks);
return lsp;
}
......@@ -590,121 +574,112 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
*
* The caller must be holding state->lock_sema and clp->cl_sem
*/
struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
{
struct nfs4_lock_state * lsp;
struct nfs4_lock_state *lsp, *new = NULL;
lsp = nfs4_find_lock_state(state, owner);
if (lsp == NULL)
lsp = nfs4_alloc_lock_state(state, owner);
for(;;) {
spin_lock(&state->state_lock);
lsp = __nfs4_find_lock_state(state, owner);
if (lsp != NULL)
break;
if (new != NULL) {
new->ls_state = state;
list_add(&new->ls_locks, &state->lock_states);
set_bit(LK_STATE_IN_USE, &state->flags);
lsp = new;
new = NULL;
break;
}
spin_unlock(&state->state_lock);
new = nfs4_alloc_lock_state(state, owner);
if (new == NULL)
return NULL;
}
spin_unlock(&state->state_lock);
kfree(new);
return lsp;
}
/*
* Byte-range lock aware utility to initialize the stateid of read/write
* requests.
* Release reference to lock_state, and free it if we see that
* it is no longer in use
*/
void
nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
static void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
{
if (test_bit(LK_STATE_IN_USE, &state->flags)) {
struct nfs4_lock_state *lsp;
struct nfs4_state *state;
lsp = nfs4_find_lock_state(state, fl_owner);
if (lsp) {
memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
nfs4_put_lock_state(lsp);
return;
}
}
memcpy(dst, &state->stateid, sizeof(*dst));
if (lsp == NULL)
return;
state = lsp->ls_state;
if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
return;
list_del(&lsp->ls_locks);
if (list_empty(&state->lock_states))
clear_bit(LK_STATE_IN_USE, &state->flags);
spin_unlock(&state->state_lock);
kfree(lsp);
}
/*
* Called with state->lock_sema and clp->cl_sem held.
*/
void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
{
if (status == NFS_OK || seqid_mutating_err(-status))
lsp->ls_seqid++;
}
struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
/*
* Check to see if the request lock (type FL_UNLK) effects the fl lock.
*
* fl and request must have the same posix owner
*
* return:
* 0 -> fl not effected by request
* 1 -> fl consumed by request
*/
dst->fl_u.nfs4_fl.owner = lsp;
atomic_inc(&lsp->ls_count);
}
static int
nfs4_check_unlock(struct file_lock *fl, struct file_lock *request)
static void nfs4_fl_release_lock(struct file_lock *fl)
{
if (fl->fl_start >= request->fl_start && fl->fl_end <= request->fl_end)
return 1;
return 0;
nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
}
/*
* Post an initialized lock_state on the state->lock_states list.
*/
void nfs4_notify_setlk(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
static struct file_lock_operations nfs4_fl_lock_ops = {
.fl_copy_lock = nfs4_fl_copy_lock,
.fl_release_private = nfs4_fl_release_lock,
};
int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
{
if (!list_empty(&lsp->ls_locks))
return;
atomic_inc(&lsp->ls_count);
write_lock(&state->state_lock);
list_add(&lsp->ls_locks, &state->lock_states);
set_bit(LK_STATE_IN_USE, &state->flags);
write_unlock(&state->state_lock);
struct nfs4_lock_state *lsp;
if (fl->fl_ops != NULL)
return 0;
lsp = nfs4_get_lock_state(state, fl->fl_owner);
if (lsp == NULL)
return -ENOMEM;
fl->fl_u.nfs4_fl.owner = lsp;
fl->fl_ops = &nfs4_fl_lock_ops;
return 0;
}
/*
* to decide to 'reap' lock state:
* 1) search i_flock for file_locks with fl.lock_state = to ls.
* 2) determine if unlock will consume found lock.
* if so, reap
*
* else, don't reap.
*
/*
* Byte-range lock aware utility to initialize the stateid of read/write
* requests.
*/
void
nfs4_notify_unlck(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
{
struct inode *inode = state->inode;
struct file_lock *fl;
struct nfs4_lock_state *lsp;
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (!(fl->fl_flags & FL_POSIX))
continue;
if (fl->fl_owner != lsp->ls_owner)
continue;
/* Exit if we find at least one lock which is not consumed */
if (nfs4_check_unlock(fl,request) == 0)
return;
}
memcpy(dst, &state->stateid, sizeof(*dst));
if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
return;
write_lock(&state->state_lock);
list_del_init(&lsp->ls_locks);
if (list_empty(&state->lock_states))
clear_bit(LK_STATE_IN_USE, &state->flags);
write_unlock(&state->state_lock);
spin_lock(&state->state_lock);
lsp = __nfs4_find_lock_state(state, fl_owner);
if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
spin_unlock(&state->state_lock);
nfs4_put_lock_state(lsp);
}
/*
* Release reference to lock_state, and free it if we see that
* it is no longer in use
*/
void
nfs4_put_lock_state(struct nfs4_lock_state *lsp)
* Called with state->lock_sema and clp->cl_sem held.
*/
void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
{
if (!atomic_dec_and_test(&lsp->ls_count))
return;
BUG_ON (!list_empty(&lsp->ls_locks));
kfree(lsp);
if (status == NFS_OK || seqid_mutating_err(-status))
lsp->ls_seqid++;
}
/*
......
......@@ -51,6 +51,7 @@
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_idmap.h>
#include "nfs4_fs.h"
#define NFSDBG_FACILITY NFSDBG_XDR
......@@ -82,12 +83,16 @@ static int nfs_stat_to_errno(int);
#define encode_getfh_maxsz (op_encode_hdr_maxsz)
#define decode_getfh_maxsz (op_decode_hdr_maxsz + 1 + \
((3+NFS4_FHSIZE) >> 2))
#define encode_getattr_maxsz (op_encode_hdr_maxsz + 3)
#define nfs4_fattr_bitmap_maxsz 3
#define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz)
#define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2))
#define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2))
#define nfs4_fattr_bitmap_maxsz (36 + 2 * nfs4_name_maxsz)
#define decode_getattr_maxsz (op_decode_hdr_maxsz + 3 + \
nfs4_fattr_bitmap_maxsz)
/* This is based on getfattr, which uses the most attributes: */
#define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \
3 + 3 + 3 + 2 * nfs4_name_maxsz))
#define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \
nfs4_fattr_value_maxsz)
#define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz)
#define encode_savefh_maxsz (op_encode_hdr_maxsz)
#define decode_savefh_maxsz (op_decode_hdr_maxsz)
#define encode_fsinfo_maxsz (op_encode_hdr_maxsz + 2)
......@@ -122,11 +127,11 @@ static int nfs_stat_to_errno(int);
#define encode_symlink_maxsz (op_encode_hdr_maxsz + \
1 + nfs4_name_maxsz + \
nfs4_path_maxsz + \
nfs4_fattr_bitmap_maxsz)
nfs4_fattr_maxsz)
#define decode_symlink_maxsz (op_decode_hdr_maxsz + 8)
#define encode_create_maxsz (op_encode_hdr_maxsz + \
2 + nfs4_name_maxsz + \
nfs4_fattr_bitmap_maxsz)
nfs4_fattr_maxsz)
#define decode_create_maxsz (op_decode_hdr_maxsz + 8)
#define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4)
#define decode_delegreturn_maxsz (op_decode_hdr_maxsz)
......@@ -205,7 +210,7 @@ static int nfs_stat_to_errno(int);
#define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
op_encode_hdr_maxsz + 4 + \
nfs4_fattr_bitmap_maxsz + \
nfs4_fattr_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_setattr_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
......@@ -360,6 +365,20 @@ static int nfs_stat_to_errno(int);
encode_delegreturn_maxsz)
#define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \
decode_delegreturn_maxsz)
#define NFS4_enc_getacl_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_getacl_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
op_decode_hdr_maxsz + \
nfs4_fattr_bitmap_maxsz + 1)
#define NFS4_enc_setacl_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
op_encode_hdr_maxsz + 4 + \
nfs4_fattr_bitmap_maxsz + 1)
#define NFS4_dec_setacl_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz)
static struct {
unsigned int mode;
......@@ -459,7 +478,7 @@ static int encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const s
* In the worst-case, this would be
* 12(bitmap) + 4(attrlen) + 8(size) + 4(mode) + 4(atime) + 4(mtime)
* = 36 bytes, plus any contribution from variable-length fields
* such as owner/group/acl's.
* such as owner/group.
*/
len = 16;
......@@ -660,8 +679,6 @@ static int encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1
static int encode_getfattr(struct xdr_stream *xdr, const u32* bitmask)
{
extern u32 nfs4_fattr_bitmap[];
return encode_getattr_two(xdr,
bitmask[0] & nfs4_fattr_bitmap[0],
bitmask[1] & nfs4_fattr_bitmap[1]);
......@@ -669,8 +686,6 @@ static int encode_getfattr(struct xdr_stream *xdr, const u32* bitmask)
static int encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask)
{
extern u32 nfs4_fsinfo_bitmap[];
return encode_getattr_two(xdr, bitmask[0] & nfs4_fsinfo_bitmap[0],
bitmask[1] & nfs4_fsinfo_bitmap[1]);
}
......@@ -969,7 +984,6 @@ static int encode_putrootfh(struct xdr_stream *xdr)
static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx)
{
extern nfs4_stateid zero_stateid;
nfs4_stateid stateid;
uint32_t *p;
......@@ -1000,6 +1014,10 @@ static int encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args)
static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req)
{
struct rpc_auth *auth = req->rq_task->tk_auth;
uint32_t attrs[2] = {
FATTR4_WORD0_RDATTR_ERROR|FATTR4_WORD0_FILEID,
FATTR4_WORD1_MOUNTED_ON_FILEID,
};
int replen;
uint32_t *p;
......@@ -1010,13 +1028,20 @@ static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg
WRITE32(readdir->count >> 1); /* We're not doing readdirplus */
WRITE32(readdir->count);
WRITE32(2);
if (readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) {
WRITE32(0);
WRITE32(FATTR4_WORD1_MOUNTED_ON_FILEID);
} else {
WRITE32(FATTR4_WORD0_FILEID);
WRITE32(0);
}
/* Switch to mounted_on_fileid if the server supports it */
if (readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
attrs[0] &= ~FATTR4_WORD0_FILEID;
else
attrs[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
WRITE32(attrs[0] & readdir->bitmask[0]);
WRITE32(attrs[1] & readdir->bitmask[1]);
dprintk("%s: cookie = %Lu, verifier = 0x%x%x, bitmap = 0x%x%x\n",
__FUNCTION__,
(unsigned long long)readdir->cookie,
((u32 *)readdir->verifier.data)[0],
((u32 *)readdir->verifier.data)[1],
attrs[0] & readdir->bitmask[0],
attrs[1] & readdir->bitmask[1]);
/* set up reply kvec
* toplevel_status + taglen + rescount + OP_PUTFH + status
......@@ -1025,6 +1050,9 @@ static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg
replen = (RPC_REPHDRSIZE + auth->au_rslack + 9) << 2;
xdr_inline_pages(&req->rq_rcv_buf, replen, readdir->pages,
readdir->pgbase, readdir->count);
dprintk("%s: inlined page args = (%u, %p, %u, %u)\n",
__FUNCTION__, replen, readdir->pages,
readdir->pgbase, readdir->count);
return 0;
}
......@@ -1088,6 +1116,25 @@ static int encode_renew(struct xdr_stream *xdr, const struct nfs4_client *client
return 0;
}
static int
encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg)
{
uint32_t *p;
RESERVE_SPACE(4+sizeof(zero_stateid.data));
WRITE32(OP_SETATTR);
WRITEMEM(zero_stateid.data, sizeof(zero_stateid.data));
RESERVE_SPACE(2*4);
WRITE32(1);
WRITE32(FATTR4_WORD0_ACL);
if (arg->acl_len % 4)
return -EINVAL;
RESERVE_SPACE(4);
WRITE32(arg->acl_len);
xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
return 0;
}
static int
encode_savefh(struct xdr_stream *xdr)
{
......@@ -1631,6 +1678,34 @@ static int nfs4_xdr_enc_setattr(struct rpc_rqst *req, uint32_t *p, struct nfs_se
return status;
}
/*
* Encode a GETACL request
*/
static int
nfs4_xdr_enc_getacl(struct rpc_rqst *req, uint32_t *p,
struct nfs_getaclargs *args)
{
struct xdr_stream xdr;
struct rpc_auth *auth = req->rq_task->tk_auth;
struct compound_hdr hdr = {
.nops = 2,
};
int replen, status;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
status = encode_putfh(&xdr, args->fh);
if (status)
goto out;
status = encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0);
/* set up reply buffer: */
replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_getacl_sz) << 2;
xdr_inline_pages(&req->rq_rcv_buf, replen,
args->acl_pages, args->acl_pgbase, args->acl_len);
out:
return status;
}
/*
* Encode a WRITE request
*/
......@@ -1697,7 +1772,6 @@ static int nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, uint32_t *p, struct nfs4_fs
*/
static int nfs4_xdr_enc_pathconf(struct rpc_rqst *req, uint32_t *p, const struct nfs4_pathconf_arg *args)
{
extern u32 nfs4_pathconf_bitmap[2];
struct xdr_stream xdr;
struct compound_hdr hdr = {
.nops = 2,
......@@ -1718,7 +1792,6 @@ static int nfs4_xdr_enc_pathconf(struct rpc_rqst *req, uint32_t *p, const struct
*/
static int nfs4_xdr_enc_statfs(struct rpc_rqst *req, uint32_t *p, const struct nfs4_statfs_arg *args)
{
extern u32 nfs4_statfs_bitmap[];
struct xdr_stream xdr;
struct compound_hdr hdr = {
.nops = 2,
......@@ -3003,6 +3076,11 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
return status;
READ_BUF(8);
COPYMEM(readdir->verifier.data, 8);
dprintk("%s: verifier = 0x%x%x\n",
__FUNCTION__,
((u32 *)readdir->verifier.data)[0],
((u32 *)readdir->verifier.data)[1]);
hdrlen = (char *) p - (char *) iov->iov_base;
recvd = rcvbuf->len - hdrlen;
......@@ -3017,12 +3095,14 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
for (nr = 0; *p++; nr++) {
if (p + 3 > end)
goto short_pkt;
dprintk("cookie = %Lu, ", *((unsigned long long *)p));
p += 2; /* cookie */
len = ntohl(*p++); /* filename length */
if (len > NFS4_MAXNAMLEN) {
printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len);
goto err_unmap;
}
dprintk("filename = %*s\n", len, (char *)p);
p += XDR_QUADLEN(len);
if (p + 1 > end)
goto short_pkt;
......@@ -3042,6 +3122,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
kunmap_atomic(kaddr, KM_USER0);
return 0;
short_pkt:
dprintk("%s: short packet at entry %d\n", __FUNCTION__, nr);
entry[0] = entry[1] = 0;
/* truncate listing ? */
if (!nr) {
......@@ -3127,6 +3208,47 @@ static int decode_renew(struct xdr_stream *xdr)
return decode_op_hdr(xdr, OP_RENEW);
}
static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
size_t *acl_len)
{
uint32_t *savep;
uint32_t attrlen,
bitmap[2] = {0};
struct kvec *iov = req->rq_rcv_buf.head;
int status;
*acl_len = 0;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
goto out;
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
goto out;
if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
goto out;
if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
int hdrlen, recvd;
/* We ignore &savep and don't do consistency checks on
* the attr length. Let userspace figure it out.... */
hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
recvd = req->rq_rcv_buf.len - hdrlen;
if (attrlen > recvd) {
printk(KERN_WARNING "NFS: server cheating in getattr"
" acl reply: attrlen %u > recvd %u\n",
attrlen, recvd);
return -EINVAL;
}
if (attrlen <= *acl_len)
xdr_read_pages(xdr, attrlen);
*acl_len = attrlen;
}
out:
return status;
}
static int
decode_savefh(struct xdr_stream *xdr)
{
......@@ -3418,6 +3540,71 @@ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4
}
/*
* Encode an SETACL request
*/
static int
nfs4_xdr_enc_setacl(struct rpc_rqst *req, uint32_t *p, struct nfs_setaclargs *args)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
.nops = 2,
};
int status;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
status = encode_putfh(&xdr, args->fh);
if (status)
goto out;
status = encode_setacl(&xdr, args);
out:
return status;
}
/*
* Decode SETACL response
*/
static int
nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, uint32_t *p, void *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
status = decode_setattr(&xdr, res);
out:
return status;
}
/*
* Decode GETACL response
*/
static int
nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, uint32_t *p, size_t *acl_len)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
status = decode_getacl(&xdr, rqstp, acl_len);
out:
return status;
}
/*
* Decode CLOSE response
......@@ -3895,6 +4082,12 @@ uint32_t *nfs4_decode_dirent(uint32_t *p, struct nfs_entry *entry, int plus)
}
len = XDR_QUADLEN(ntohl(*p++)); /* attribute buffer length */
if (len > 0) {
if (bitmap[0] & FATTR4_WORD0_RDATTR_ERROR) {
bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR;
/* Ignore the return value of rdattr_error for now */
p++;
len--;
}
if (bitmap[0] == 0 && bitmap[1] == FATTR4_WORD1_MOUNTED_ON_FILEID)
xdr_decode_hyper(p, &entry->ino);
else if (bitmap[0] == FATTR4_WORD0_FILEID)
......@@ -3934,6 +4127,8 @@ static struct {
{ NFS4ERR_DQUOT, EDQUOT },
{ NFS4ERR_STALE, ESTALE },
{ NFS4ERR_BADHANDLE, EBADHANDLE },
{ NFS4ERR_BADOWNER, EINVAL },
{ NFS4ERR_BADNAME, EINVAL },
{ NFS4ERR_BAD_COOKIE, EBADCOOKIE },
{ NFS4ERR_NOTSUPP, ENOTSUPP },
{ NFS4ERR_TOOSMALL, ETOOSMALL },
......@@ -4019,6 +4214,8 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(READDIR, enc_readdir, dec_readdir),
PROC(SERVER_CAPS, enc_server_caps, dec_server_caps),
PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn),
PROC(GETACL, enc_getacl, dec_getacl),
PROC(SETACL, enc_setacl, dec_setacl),
};
struct rpc_version nfs_version4 = {
......
......@@ -124,6 +124,7 @@ enum {
Opt_soft, Opt_hard, Opt_intr,
Opt_nointr, Opt_posix, Opt_noposix, Opt_cto, Opt_nocto, Opt_ac,
Opt_noac, Opt_lock, Opt_nolock, Opt_v2, Opt_v3, Opt_udp, Opt_tcp,
Opt_acl, Opt_noacl,
/* Error token */
Opt_err
};
......@@ -158,6 +159,8 @@ static match_table_t __initdata tokens = {
{Opt_udp, "udp"},
{Opt_tcp, "proto=tcp"},
{Opt_tcp, "tcp"},
{Opt_acl, "acl"},
{Opt_noacl, "noacl"},
{Opt_err, NULL}
};
......@@ -266,6 +269,12 @@ static int __init root_nfs_parse(char *name, char *buf)
case Opt_tcp:
nfs_data.flags |= NFS_MOUNT_TCP;
break;
case Opt_acl:
nfs_data.flags &= ~NFS_MOUNT_NOACL;
break;
case Opt_noacl:
nfs_data.flags |= NFS_MOUNT_NOACL;
break;
default :
return 0;
}
......
......@@ -107,10 +107,37 @@ void nfs_unlock_request(struct nfs_page *req)
smp_mb__before_clear_bit();
clear_bit(PG_BUSY, &req->wb_flags);
smp_mb__after_clear_bit();
wake_up_all(&req->wb_context->waitq);
wake_up_bit(&req->wb_flags, PG_BUSY);
nfs_release_request(req);
}
/**
* nfs_set_page_writeback_locked - Lock a request for writeback
* @req:
*/
int nfs_set_page_writeback_locked(struct nfs_page *req)
{
struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
if (!nfs_lock_request(req))
return 0;
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
return 1;
}
/**
* nfs_clear_page_writeback - Unlock request and wake up sleepers
*/
void nfs_clear_page_writeback(struct nfs_page *req)
{
struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
spin_lock(&nfsi->req_lock);
radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
spin_unlock(&nfsi->req_lock);
nfs_unlock_request(req);
}
/**
* nfs_clear_request - Free up all resources allocated to the request
* @req:
......@@ -150,34 +177,15 @@ nfs_release_request(struct nfs_page *req)
nfs_page_free(req);
}
/**
* nfs_list_add_request - Insert a request into a sorted list
* @req: request
* @head: head of list into which to insert the request.
*
* Note that the wb_list is sorted by page index in order to facilitate
* coalescing of requests.
* We use an insertion sort that is optimized for the case of appended
* writes.
*/
void
nfs_list_add_request(struct nfs_page *req, struct list_head *head)
static int nfs_wait_bit_interruptible(void *word)
{
struct list_head *pos;
int ret = 0;
#ifdef NFS_PARANOIA
if (!list_empty(&req->wb_list)) {
printk(KERN_ERR "NFS: Add to list failed!\n");
BUG();
}
#endif
list_for_each_prev(pos, head) {
struct nfs_page *p = nfs_list_entry(pos);
if (p->wb_index < req->wb_index)
break;
}
list_add(&req->wb_list, pos);
req->wb_list_head = head;
if (signal_pending(current))
ret = -ERESTARTSYS;
else
schedule();
return ret;
}
/**
......@@ -190,12 +198,22 @@ nfs_list_add_request(struct nfs_page *req, struct list_head *head)
int
nfs_wait_on_request(struct nfs_page *req)
{
struct inode *inode = req->wb_context->dentry->d_inode;
struct rpc_clnt *clnt = NFS_CLIENT(inode);
if (!NFS_WBACK_BUSY(req))
return 0;
return nfs_wait_event(clnt, req->wb_context->waitq, !NFS_WBACK_BUSY(req));
struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->dentry->d_inode);
sigset_t oldmask;
int ret = 0;
if (!test_bit(PG_BUSY, &req->wb_flags))
goto out;
/*
* Note: the call to rpc_clnt_sigmask() suffices to ensure that we
* are not interrupted if intr flag is not set
*/
rpc_clnt_sigmask(clnt, &oldmask);
ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
rpc_clnt_sigunmask(clnt, &oldmask);
out:
return ret;
}
/**
......@@ -243,6 +261,62 @@ nfs_coalesce_requests(struct list_head *head, struct list_head *dst,
return npages;
}
#define NFS_SCAN_MAXENTRIES 16
/**
* nfs_scan_lock_dirty - Scan the radix tree for dirty requests
* @nfsi: NFS inode
* @dst: Destination list
* @idx_start: lower bound of page->index to scan
* @npages: idx_start + npages sets the upper bound to scan.
*
* Moves elements from one of the inode request lists.
* If the number of requests is set to 0, the entire address_space
* starting at index idx_start, is scanned.
* The requests are *not* checked to ensure that they form a contiguous set.
* You must be holding the inode's req_lock when calling this function
*/
int
nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst,
unsigned long idx_start, unsigned int npages)
{
struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
struct nfs_page *req;
unsigned long idx_end;
int found, i;
int res;
res = 0;
if (npages == 0)
idx_end = ~0;
else
idx_end = idx_start + npages - 1;
for (;;) {
found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree,
(void **)&pgvec[0], idx_start, NFS_SCAN_MAXENTRIES,
NFS_PAGE_TAG_DIRTY);
if (found <= 0)
break;
for (i = 0; i < found; i++) {
req = pgvec[i];
if (req->wb_index > idx_end)
goto out;
idx_start = req->wb_index + 1;
if (nfs_set_page_writeback_locked(req)) {
radix_tree_tag_clear(&nfsi->nfs_page_tree,
req->wb_index, NFS_PAGE_TAG_DIRTY);
nfs_list_remove_request(req);
nfs_list_add_request(req, dst);
res++;
}
}
}
out:
return res;
}
/**
* nfs_scan_list - Scan a list for matching requests
* @head: One of the NFS inode request lists
......@@ -280,7 +354,7 @@ nfs_scan_list(struct list_head *head, struct list_head *dst,
if (req->wb_index > idx_end)
break;
if (!nfs_lock_request(req))
if (!nfs_set_page_writeback_locked(req))
continue;
nfs_list_remove_request(req);
nfs_list_add_request(req, dst);
......
......@@ -622,6 +622,7 @@ struct nfs_rpc_ops nfs_v2_clientops = {
.version = 2, /* protocol version */
.dentry_ops = &nfs_dentry_operations,
.dir_inode_ops = &nfs_dir_inode_operations,
.file_inode_ops = &nfs_file_inode_operations,
.getroot = nfs_proc_get_root,
.getattr = nfs_proc_getattr,
.setattr = nfs_proc_setattr,
......
......@@ -173,7 +173,6 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
if (len < PAGE_CACHE_SIZE)
memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
nfs_lock_request(new);
nfs_list_add_request(new, &one_request);
nfs_pagein_one(&one_request, inode);
return 0;
......@@ -185,7 +184,6 @@ static void nfs_readpage_release(struct nfs_page *req)
nfs_clear_request(req);
nfs_release_request(req);
nfs_unlock_request(req);
dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
req->wb_context->dentry->d_inode->i_sb->s_id,
......@@ -553,7 +551,6 @@ readpage_async_filler(void *data, struct page *page)
}
if (len < PAGE_CACHE_SIZE)
memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
nfs_lock_request(new);
nfs_list_add_request(new, desc->head);
return 0;
}
......
......@@ -220,7 +220,7 @@ static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
ClearPageError(page);
io_error:
nfs_end_data_update_defer(inode);
nfs_end_data_update(inode);
nfs_writedata_free(wdata);
return written ? written : result;
}
......@@ -352,7 +352,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
if (err < 0)
goto out;
}
err = nfs_commit_inode(inode, 0, 0, wb_priority(wbc));
err = nfs_commit_inode(inode, wb_priority(wbc));
if (err > 0) {
wbc->nr_to_write -= err;
err = 0;
......@@ -401,7 +401,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
nfsi->npages--;
if (!nfsi->npages) {
spin_unlock(&nfsi->req_lock);
nfs_end_data_update_defer(inode);
nfs_end_data_update(inode);
iput(inode);
} else
spin_unlock(&nfsi->req_lock);
......@@ -446,6 +446,8 @@ nfs_mark_request_dirty(struct nfs_page *req)
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&nfsi->req_lock);
radix_tree_tag_set(&nfsi->nfs_page_tree,
req->wb_index, NFS_PAGE_TAG_DIRTY);
nfs_list_add_request(req, &nfsi->dirty);
nfsi->ndirty++;
spin_unlock(&nfsi->req_lock);
......@@ -503,13 +505,12 @@ nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int
spin_lock(&nfsi->req_lock);
next = idx_start;
while (radix_tree_gang_lookup(&nfsi->nfs_page_tree, (void **)&req, next, 1)) {
while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
if (req->wb_index > idx_end)
break;
next = req->wb_index + 1;
if (!NFS_WBACK_BUSY(req))
continue;
BUG_ON(!NFS_WBACK_BUSY(req));
atomic_inc(&req->wb_count);
spin_unlock(&nfsi->req_lock);
......@@ -538,12 +539,15 @@ static int
nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
{
struct nfs_inode *nfsi = NFS_I(inode);
int res;
res = nfs_scan_list(&nfsi->dirty, dst, idx_start, npages);
nfsi->ndirty -= res;
sub_page_state(nr_dirty,res);
if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))
printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");
int res = 0;
if (nfsi->ndirty != 0) {
res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages);
nfsi->ndirty -= res;
sub_page_state(nr_dirty,res);
if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))
printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");
}
return res;
}
......@@ -562,11 +566,14 @@ static int
nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
{
struct nfs_inode *nfsi = NFS_I(inode);
int res;
res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages);
nfsi->ncommit -= res;
if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
int res = 0;
if (nfsi->ncommit != 0) {
res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages);
nfsi->ncommit -= res;
if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
}
return res;
}
#endif
......@@ -750,7 +757,7 @@ int nfs_updatepage(struct file *file, struct page *page,
* is entirely in cache, it may be more efficient to avoid
* fragmenting write requests.
*/
if (PageUptodate(page) && inode->i_flock == NULL) {
if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
loff_t end_offs = i_size_read(inode) - 1;
unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
......@@ -821,7 +828,7 @@ static void nfs_writepage_release(struct nfs_page *req)
#else
nfs_inode_remove_request(req);
#endif
nfs_unlock_request(req);
nfs_clear_page_writeback(req);
}
static inline int flush_task_priority(int how)
......@@ -952,7 +959,7 @@ static int nfs_flush_multi(struct list_head *head, struct inode *inode, int how)
nfs_writedata_free(data);
}
nfs_mark_request_dirty(req);
nfs_unlock_request(req);
nfs_clear_page_writeback(req);
return -ENOMEM;
}
......@@ -1002,7 +1009,7 @@ static int nfs_flush_one(struct list_head *head, struct inode *inode, int how)
struct nfs_page *req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_mark_request_dirty(req);
nfs_unlock_request(req);
nfs_clear_page_writeback(req);
}
return -ENOMEM;
}
......@@ -1029,7 +1036,7 @@ nfs_flush_list(struct list_head *head, int wpages, int how)
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_mark_request_dirty(req);
nfs_unlock_request(req);
nfs_clear_page_writeback(req);
}
return error;
}
......@@ -1121,7 +1128,7 @@ static void nfs_writeback_done_full(struct nfs_write_data *data, int status)
nfs_inode_remove_request(req);
#endif
next:
nfs_unlock_request(req);
nfs_clear_page_writeback(req);
}
}
......@@ -1210,36 +1217,24 @@ static void nfs_commit_rpcsetup(struct list_head *head,
struct nfs_write_data *data, int how)
{
struct rpc_task *task = &data->task;
struct nfs_page *first, *last;
struct nfs_page *first;
struct inode *inode;
loff_t start, end, len;
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
list_splice_init(head, &data->pages);
first = nfs_list_entry(data->pages.next);
last = nfs_list_entry(data->pages.prev);
inode = first->wb_context->dentry->d_inode;
/*
* Determine the offset range of requests in the COMMIT call.
* We rely on the fact that data->pages is an ordered list...
*/
start = req_offset(first);
end = req_offset(last) + last->wb_bytes;
len = end - start;
/* If 'len' is not a 32-bit quantity, pass '0' in the COMMIT call */
if (end >= i_size_read(inode) || len < 0 || len > (~((u32)0) >> 1))
len = 0;
data->inode = inode;
data->cred = first->wb_context->cred;
data->args.fh = NFS_FH(data->inode);
data->args.offset = start;
data->args.count = len;
data->res.count = len;
/* Note: we always request a commit of the entire inode */
data->args.offset = 0;
data->args.count = 0;
data->res.count = 0;
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
......@@ -1278,7 +1273,7 @@ nfs_commit_list(struct list_head *head, int how)
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_mark_request_commit(req);
nfs_unlock_request(req);
nfs_clear_page_writeback(req);
}
return -ENOMEM;
}
......@@ -1324,7 +1319,7 @@ nfs_commit_done(struct rpc_task *task)
dprintk(" mismatch\n");
nfs_mark_request_dirty(req);
next:
nfs_unlock_request(req);
nfs_clear_page_writeback(req);
res++;
}
sub_page_state(nr_unstable,res);
......@@ -1342,16 +1337,23 @@ static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
spin_lock(&nfsi->req_lock);
res = nfs_scan_dirty(inode, &head, idx_start, npages);
spin_unlock(&nfsi->req_lock);
if (res)
error = nfs_flush_list(&head, NFS_SERVER(inode)->wpages, how);
if (res) {
struct nfs_server *server = NFS_SERVER(inode);
/* For single writes, FLUSH_STABLE is more efficient */
if (res == nfsi->npages && nfsi->npages <= server->wpages) {
if (res > 1 || nfs_list_entry(head.next)->wb_bytes <= server->wsize)
how |= FLUSH_STABLE;
}
error = nfs_flush_list(&head, server->wpages, how);
}
if (error < 0)
return error;
return res;
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
int nfs_commit_inode(struct inode *inode, unsigned long idx_start,
unsigned int npages, int how)
int nfs_commit_inode(struct inode *inode, int how)
{
struct nfs_inode *nfsi = NFS_I(inode);
LIST_HEAD(head);
......@@ -1359,15 +1361,13 @@ int nfs_commit_inode(struct inode *inode, unsigned long idx_start,
error = 0;
spin_lock(&nfsi->req_lock);
res = nfs_scan_commit(inode, &head, idx_start, npages);
res = nfs_scan_commit(inode, &head, 0, 0);
spin_unlock(&nfsi->req_lock);
if (res) {
res += nfs_scan_commit(inode, &head, 0, 0);
spin_unlock(&nfsi->req_lock);
error = nfs_commit_list(&head, how);
} else
spin_unlock(&nfsi->req_lock);
if (error < 0)
return error;
if (error < 0)
return error;
}
return res;
}
#endif
......@@ -1389,7 +1389,7 @@ int nfs_sync_inode(struct inode *inode, unsigned long idx_start,
error = nfs_flush_inode(inode, idx_start, npages, how);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
if (error == 0)
error = nfs_commit_inode(inode, idx_start, npages, how);
error = nfs_commit_inode(inode, how);
#endif
} while (error > 0);
return error;
......
#
# Makefile for Linux filesystem routines that are shared by client and server.
#
obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o
nfs_acl-objs := nfsacl.o
/*
* fs/nfs_common/nfsacl.c
*
* Copyright (C) 2002-2003 Andreas Gruenbacher <agruen@suse.de>
*/
/*
* The Solaris nfsacl protocol represents some ACLs slightly differently
* than POSIX 1003.1e draft 17 does (and we do):
*
* - Minimal ACLs always have an ACL_MASK entry, so they have
* four instead of three entries.
* - The ACL_MASK entry in such minimal ACLs always has the same
* permissions as the ACL_GROUP_OBJ entry. (In extended ACLs
* the ACL_MASK and ACL_GROUP_OBJ entries may differ.)
* - The identifier fields of the ACL_USER_OBJ and ACL_GROUP_OBJ
* entries contain the identifiers of the owner and owning group.
* (In POSIX ACLs we always set them to ACL_UNDEFINED_ID).
* - ACL entries in the kernel are kept sorted in ascending order
* of (e_tag, e_id). Solaris ACLs are unsorted.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/sunrpc/xdr.h>
#include <linux/nfsacl.h>
#include <linux/nfs3.h>
#include <linux/sort.h>
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(nfsacl_encode);
EXPORT_SYMBOL(nfsacl_decode);
struct nfsacl_encode_desc {
struct xdr_array2_desc desc;
unsigned int count;
struct posix_acl *acl;
int typeflag;
uid_t uid;
gid_t gid;
};
static int
xdr_nfsace_encode(struct xdr_array2_desc *desc, void *elem)
{
struct nfsacl_encode_desc *nfsacl_desc =
(struct nfsacl_encode_desc *) desc;
u32 *p = (u32 *) elem;
if (nfsacl_desc->count < nfsacl_desc->acl->a_count) {
struct posix_acl_entry *entry =
&nfsacl_desc->acl->a_entries[nfsacl_desc->count++];
*p++ = htonl(entry->e_tag | nfsacl_desc->typeflag);
switch(entry->e_tag) {
case ACL_USER_OBJ:
*p++ = htonl(nfsacl_desc->uid);
break;
case ACL_GROUP_OBJ:
*p++ = htonl(nfsacl_desc->gid);
break;
case ACL_USER:
case ACL_GROUP:
*p++ = htonl(entry->e_id);
break;
default: /* Solaris depends on that! */
*p++ = 0;
break;
}
*p++ = htonl(entry->e_perm & S_IRWXO);
} else {
const struct posix_acl_entry *pa, *pe;
int group_obj_perm = ACL_READ|ACL_WRITE|ACL_EXECUTE;
FOREACH_ACL_ENTRY(pa, nfsacl_desc->acl, pe) {
if (pa->e_tag == ACL_GROUP_OBJ) {
group_obj_perm = pa->e_perm & S_IRWXO;
break;
}
}
/* fake up ACL_MASK entry */
*p++ = htonl(ACL_MASK | nfsacl_desc->typeflag);
*p++ = htonl(0);
*p++ = htonl(group_obj_perm);
}
return 0;
}
unsigned int
nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
struct posix_acl *acl, int encode_entries, int typeflag)
{
int entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0;
struct nfsacl_encode_desc nfsacl_desc = {
.desc = {
.elem_size = 12,
.array_len = encode_entries ? entries : 0,
.xcode = xdr_nfsace_encode,
},
.acl = acl,
.typeflag = typeflag,
.uid = inode->i_uid,
.gid = inode->i_gid,
};
int err;
if (entries > NFS_ACL_MAX_ENTRIES ||
xdr_encode_word(buf, base, entries))
return -EINVAL;
err = xdr_encode_array2(buf, base + 4, &nfsacl_desc.desc);
if (!err)
err = 8 + nfsacl_desc.desc.elem_size *
nfsacl_desc.desc.array_len;
return err;
}
struct nfsacl_decode_desc {
struct xdr_array2_desc desc;
unsigned int count;
struct posix_acl *acl;
};
static int
xdr_nfsace_decode(struct xdr_array2_desc *desc, void *elem)
{
struct nfsacl_decode_desc *nfsacl_desc =
(struct nfsacl_decode_desc *) desc;
u32 *p = (u32 *) elem;
struct posix_acl_entry *entry;
if (!nfsacl_desc->acl) {
if (desc->array_len > NFS_ACL_MAX_ENTRIES)
return -EINVAL;
nfsacl_desc->acl = posix_acl_alloc(desc->array_len, GFP_KERNEL);
if (!nfsacl_desc->acl)
return -ENOMEM;
nfsacl_desc->count = 0;
}
entry = &nfsacl_desc->acl->a_entries[nfsacl_desc->count++];
entry->e_tag = ntohl(*p++) & ~NFS_ACL_DEFAULT;
entry->e_id = ntohl(*p++);
entry->e_perm = ntohl(*p++);
switch(entry->e_tag) {
case ACL_USER_OBJ:
case ACL_USER:
case ACL_GROUP_OBJ:
case ACL_GROUP:
case ACL_OTHER:
if (entry->e_perm & ~S_IRWXO)
return -EINVAL;
break;
case ACL_MASK:
/* Solaris sometimes sets additonal bits in the mask */
entry->e_perm &= S_IRWXO;
break;
default:
return -EINVAL;
}
return 0;
}
static int
cmp_acl_entry(const void *x, const void *y)
{
const struct posix_acl_entry *a = x, *b = y;
if (a->e_tag != b->e_tag)
return a->e_tag - b->e_tag;
else if (a->e_id > b->e_id)
return 1;
else if (a->e_id < b->e_id)
return -1;
else
return 0;
}
/*
* Convert from a Solaris ACL to a POSIX 1003.1e draft 17 ACL.
*/
static int
posix_acl_from_nfsacl(struct posix_acl *acl)
{
struct posix_acl_entry *pa, *pe,
*group_obj = NULL, *mask = NULL;
if (!acl)
return 0;
sort(acl->a_entries, acl->a_count, sizeof(struct posix_acl_entry),
cmp_acl_entry, NULL);
/* Clear undefined identifier fields and find the ACL_GROUP_OBJ
and ACL_MASK entries. */
FOREACH_ACL_ENTRY(pa, acl, pe) {
switch(pa->e_tag) {
case ACL_USER_OBJ:
pa->e_id = ACL_UNDEFINED_ID;
break;
case ACL_GROUP_OBJ:
pa->e_id = ACL_UNDEFINED_ID;
group_obj = pa;
break;
case ACL_MASK:
mask = pa;
/* fall through */
case ACL_OTHER:
pa->e_id = ACL_UNDEFINED_ID;
break;
}
}
if (acl->a_count == 4 && group_obj && mask &&
mask->e_perm == group_obj->e_perm) {
/* remove bogus ACL_MASK entry */
memmove(mask, mask+1, (3 - (mask - acl->a_entries)) *
sizeof(struct posix_acl_entry));
acl->a_count = 3;
}
return 0;
}
unsigned int
nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
struct posix_acl **pacl)
{
struct nfsacl_decode_desc nfsacl_desc = {
.desc = {
.elem_size = 12,
.xcode = pacl ? xdr_nfsace_decode : NULL,
},
};
u32 entries;
int err;
if (xdr_decode_word(buf, base, &entries) ||
entries > NFS_ACL_MAX_ENTRIES)
return -EINVAL;
err = xdr_decode_array2(buf, base + 4, &nfsacl_desc.desc);
if (err)
return err;
if (pacl) {
if (entries != nfsacl_desc.desc.array_len ||
posix_acl_from_nfsacl(nfsacl_desc.acl) != 0) {
posix_acl_release(nfsacl_desc.acl);
return -EINVAL;
}
*pacl = nfsacl_desc.acl;
}
if (aclcnt)
*aclcnt = entries;
return 8 + nfsacl_desc.desc.elem_size *
nfsacl_desc.desc.array_len;
}
......@@ -6,7 +6,9 @@ obj-$(CONFIG_NFSD) += nfsd.o
nfsd-y := nfssvc.o nfsctl.o nfsproc.o nfsfh.o vfs.o \
export.o auth.o lockd.o nfscache.o nfsxdr.o stats.o
nfsd-$(CONFIG_NFSD_V2_ACL) += nfs2acl.o
nfsd-$(CONFIG_NFSD_V3) += nfs3proc.o nfs3xdr.o
nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o
nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \
nfs4acl.o nfs4callback.o
nfsd-objs := $(nfsd-y)
/*
* linux/fs/nfsd/nfsacl.c
*
* Process version 2 NFSACL requests.
*
* Copyright (C) 2002-2003 Andreas Gruenbacher <agruen@suse.de>
*/
#include <linux/sunrpc/svc.h>
#include <linux/nfs.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/cache.h>
#include <linux/nfsd/xdr.h>
#include <linux/nfsd/xdr3.h>
#include <linux/posix_acl.h>
#include <linux/nfsacl.h>
#define NFSDDBG_FACILITY NFSDDBG_PROC
#define RETURN_STATUS(st) { resp->status = (st); return (st); }
/*
* NULL call.
*/
static int
nfsacld_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
{
return nfs_ok;
}
/*
* Get the Access and/or Default ACL of a file.
*/
static int nfsacld_proc_getacl(struct svc_rqst * rqstp,
struct nfsd3_getaclargs *argp, struct nfsd3_getaclres *resp)
{
svc_fh *fh;
struct posix_acl *acl;
int nfserr = 0;
dprintk("nfsd: GETACL(2acl) %s\n", SVCFH_fmt(&argp->fh));
fh = fh_copy(&resp->fh, &argp->fh);
if ((nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP)))
RETURN_STATUS(nfserr_inval);
if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
RETURN_STATUS(nfserr_inval);
resp->mask = argp->mask;
if (resp->mask & (NFS_ACL|NFS_ACLCNT)) {
acl = nfsd_get_posix_acl(fh, ACL_TYPE_ACCESS);
if (IS_ERR(acl)) {
int err = PTR_ERR(acl);
if (err == -ENODATA || err == -EOPNOTSUPP)
acl = NULL;
else {
nfserr = nfserrno(err);
goto fail;
}
}
if (acl == NULL) {
/* Solaris returns the inode's minimum ACL. */
struct inode *inode = fh->fh_dentry->d_inode;
acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
}
resp->acl_access = acl;
}
if (resp->mask & (NFS_DFACL|NFS_DFACLCNT)) {
/* Check how Solaris handles requests for the Default ACL
of a non-directory! */
acl = nfsd_get_posix_acl(fh, ACL_TYPE_DEFAULT);
if (IS_ERR(acl)) {
int err = PTR_ERR(acl);
if (err == -ENODATA || err == -EOPNOTSUPP)
acl = NULL;
else {
nfserr = nfserrno(err);
goto fail;
}
}
resp->acl_default = acl;
}
/* resp->acl_{access,default} are released in nfssvc_release_getacl. */
RETURN_STATUS(0);
fail:
posix_acl_release(resp->acl_access);
posix_acl_release(resp->acl_default);
RETURN_STATUS(nfserr);
}
/*
* Set the Access and/or Default ACL of a file.
*/
static int nfsacld_proc_setacl(struct svc_rqst * rqstp,
struct nfsd3_setaclargs *argp,
struct nfsd_attrstat *resp)
{
svc_fh *fh;
int nfserr = 0;
dprintk("nfsd: SETACL(2acl) %s\n", SVCFH_fmt(&argp->fh));
fh = fh_copy(&resp->fh, &argp->fh);
nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP);
if (!nfserr) {
nfserr = nfserrno( nfsd_set_posix_acl(
fh, ACL_TYPE_ACCESS, argp->acl_access) );
}
if (!nfserr) {
nfserr = nfserrno( nfsd_set_posix_acl(
fh, ACL_TYPE_DEFAULT, argp->acl_default) );
}
/* argp->acl_{access,default} may have been allocated in
nfssvc_decode_setaclargs. */
posix_acl_release(argp->acl_access);
posix_acl_release(argp->acl_default);
return nfserr;
}
/*
* Check file attributes
*/
static int nfsacld_proc_getattr(struct svc_rqst * rqstp,
struct nfsd_fhandle *argp, struct nfsd_attrstat *resp)
{
dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh));
fh_copy(&resp->fh, &argp->fh);
return fh_verify(rqstp, &resp->fh, 0, MAY_NOP);
}
/*
* Check file access
*/
static int nfsacld_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessargs *argp,
struct nfsd3_accessres *resp)
{
int nfserr;
dprintk("nfsd: ACCESS(2acl) %s 0x%x\n",
SVCFH_fmt(&argp->fh),
argp->access);
fh_copy(&resp->fh, &argp->fh);
resp->access = argp->access;
nfserr = nfsd_access(rqstp, &resp->fh, &resp->access, NULL);
return nfserr;
}
/*
* XDR decode functions
*/
static int nfsaclsvc_decode_getaclargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_getaclargs *argp)
{
if (!(p = nfs2svc_decode_fh(p, &argp->fh)))
return 0;
argp->mask = ntohl(*p); p++;
return xdr_argsize_check(rqstp, p);
}
static int nfsaclsvc_decode_setaclargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_setaclargs *argp)
{
struct kvec *head = rqstp->rq_arg.head;
unsigned int base;
int n;
if (!(p = nfs2svc_decode_fh(p, &argp->fh)))
return 0;
argp->mask = ntohl(*p++);
if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT) ||
!xdr_argsize_check(rqstp, p))
return 0;
base = (char *)p - (char *)head->iov_base;
n = nfsacl_decode(&rqstp->rq_arg, base, NULL,
(argp->mask & NFS_ACL) ?
&argp->acl_access : NULL);
if (n > 0)
n = nfsacl_decode(&rqstp->rq_arg, base + n, NULL,
(argp->mask & NFS_DFACL) ?
&argp->acl_default : NULL);
return (n > 0);
}
static int nfsaclsvc_decode_fhandleargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd_fhandle *argp)
{
if (!(p = nfs2svc_decode_fh(p, &argp->fh)))
return 0;
return xdr_argsize_check(rqstp, p);
}
static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_accessargs *argp)
{
if (!(p = nfs2svc_decode_fh(p, &argp->fh)))
return 0;
argp->access = ntohl(*p++);
return xdr_argsize_check(rqstp, p);
}
/*
* XDR encode functions
*/
/* GETACL */
static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_getaclres *resp)
{
struct dentry *dentry = resp->fh.fh_dentry;
struct inode *inode = dentry->d_inode;
int w = nfsacl_size(
(resp->mask & NFS_ACL) ? resp->acl_access : NULL,
(resp->mask & NFS_DFACL) ? resp->acl_default : NULL);
struct kvec *head = rqstp->rq_res.head;
unsigned int base;
int n;
if (dentry == NULL || dentry->d_inode == NULL)
return 0;
inode = dentry->d_inode;
p = nfs2svc_encode_fattr(rqstp, p, &resp->fh);
*p++ = htonl(resp->mask);
if (!xdr_ressize_check(rqstp, p))
return 0;
base = (char *)p - (char *)head->iov_base;
rqstp->rq_res.page_len = w;
while (w > 0) {
if (!svc_take_res_page(rqstp))
return 0;
w -= PAGE_SIZE;
}
n = nfsacl_encode(&rqstp->rq_res, base, inode,
resp->acl_access,
resp->mask & NFS_ACL, 0);
if (n > 0)
n = nfsacl_encode(&rqstp->rq_res, base + n, inode,
resp->acl_default,
resp->mask & NFS_DFACL,
NFS_ACL_DEFAULT);
if (n <= 0)
return 0;
return 1;
}
static int nfsaclsvc_encode_attrstatres(struct svc_rqst *rqstp, u32 *p,
struct nfsd_attrstat *resp)
{
p = nfs2svc_encode_fattr(rqstp, p, &resp->fh);
return xdr_ressize_check(rqstp, p);
}
/* ACCESS */
static int nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_accessres *resp)
{
p = nfs2svc_encode_fattr(rqstp, p, &resp->fh);
*p++ = htonl(resp->access);
return xdr_ressize_check(rqstp, p);
}
/*
* XDR release functions
*/
static int nfsaclsvc_release_getacl(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_getaclres *resp)
{
fh_put(&resp->fh);
posix_acl_release(resp->acl_access);
posix_acl_release(resp->acl_default);
return 1;
}
static int nfsaclsvc_release_fhandle(struct svc_rqst *rqstp, u32 *p,
struct nfsd_fhandle *resp)
{
fh_put(&resp->fh);
return 1;
}
#define nfsaclsvc_decode_voidargs NULL
#define nfsaclsvc_encode_voidres NULL
#define nfsaclsvc_release_void NULL
#define nfsd3_fhandleargs nfsd_fhandle
#define nfsd3_attrstatres nfsd_attrstat
#define nfsd3_voidres nfsd3_voidargs
struct nfsd3_voidargs { int dummy; };
#define PROC(name, argt, rest, relt, cache, respsize) \
{ (svc_procfunc) nfsacld_proc_##name, \
(kxdrproc_t) nfsaclsvc_decode_##argt##args, \
(kxdrproc_t) nfsaclsvc_encode_##rest##res, \
(kxdrproc_t) nfsaclsvc_release_##relt, \
sizeof(struct nfsd3_##argt##args), \
sizeof(struct nfsd3_##rest##res), \
0, \
cache, \
respsize, \
}
#define ST 1 /* status*/
#define AT 21 /* attributes */
#define pAT (1+AT) /* post attributes - conditional */
#define ACL (1+NFS_ACL_MAX_ENTRIES*3) /* Access Control List */
static struct svc_procedure nfsd_acl_procedures2[] = {
PROC(null, void, void, void, RC_NOCACHE, ST),
PROC(getacl, getacl, getacl, getacl, RC_NOCACHE, ST+1+2*(1+ACL)),
PROC(setacl, setacl, attrstat, fhandle, RC_NOCACHE, ST+AT),
PROC(getattr, fhandle, attrstat, fhandle, RC_NOCACHE, ST+AT),
PROC(access, access, access, fhandle, RC_NOCACHE, ST+AT+1),
};
struct svc_version nfsd_acl_version2 = {
.vs_vers = 2,
.vs_nproc = 5,
.vs_proc = nfsd_acl_procedures2,
.vs_dispatch = nfsd_dispatch,
.vs_xdrsize = NFS3_SVC_XDRSIZE,
};
此差异已折叠。
......@@ -71,6 +71,12 @@ decode_fh(u32 *p, struct svc_fh *fhp)
return p + XDR_QUADLEN(size);
}
/* Helper function for NFSv3 ACL code */
u32 *nfs3svc_decode_fh(u32 *p, struct svc_fh *fhp)
{
return decode_fh(p, fhp);
}
static inline u32 *
encode_fh(u32 *p, struct svc_fh *fhp)
{
......@@ -233,6 +239,13 @@ encode_post_op_attr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
return p;
}
/* Helper for NFSv3 ACLs */
u32 *
nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
{
return encode_post_op_attr(rqstp, p, fhp);
}
/*
* Enocde weak cache consistency data
*/
......
......@@ -430,7 +430,7 @@ nfsd4_probe_callback(struct nfs4_client *clp)
clnt = rpc_create_client(xprt, hostname, program, 1, RPC_AUTH_UNIX);
if (IS_ERR(clnt)) {
dprintk("NFSD: couldn't create callback client\n");
goto out_xprt;
goto out_err;
}
clnt->cl_intr = 0;
clnt->cl_softrtry = 1;
......@@ -465,8 +465,6 @@ nfsd4_probe_callback(struct nfs4_client *clp)
out_clnt:
rpc_shutdown_client(clnt);
goto out_err;
out_xprt:
xprt_destroy(xprt);
out_err:
dprintk("NFSD: warning: no callback path to client %.*s\n",
(int)clp->cl_name.len, clp->cl_name.data);
......
......@@ -591,6 +591,7 @@ nfserrno (int errno)
{ nfserr_dropit, -ENOMEM },
{ nfserr_badname, -ESRCH },
{ nfserr_io, -ETXTBSY },
{ nfserr_notsupp, -EOPNOTSUPP },
{ -1, -EIO }
};
int i;
......
此差异已折叠。
......@@ -49,6 +49,12 @@ decode_fh(u32 *p, struct svc_fh *fhp)
return p + (NFS_FHSIZE >> 2);
}
/* Helper function for NFSv2 ACL code */
u32 *nfs2svc_decode_fh(u32 *p, struct svc_fh *fhp)
{
return decode_fh(p, fhp);
}
static inline u32 *
encode_fh(u32 *p, struct svc_fh *fhp)
{
......@@ -190,6 +196,11 @@ encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
return p;
}
/* Helper function for NFSv2 ACL code */
u32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
{
return encode_fattr(rqstp, p, fhp);
}
/*
* XDR decode functions
......
此差异已折叠。
......@@ -674,6 +674,7 @@ struct file_lock {
struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
union {
struct nfs_lock_info nfs_fl;
struct nfs4_lock_info nfs4_fl;
} fl_u;
};
......
此差异已折叠。
......@@ -382,6 +382,8 @@ enum {
NFSPROC4_CLNT_READDIR,
NFSPROC4_CLNT_SERVER_CAPS,
NFSPROC4_CLNT_DELEGRETURN,
NFSPROC4_CLNT_GETACL,
NFSPROC4_CLNT_SETACL,
};
#endif
......
此差异已折叠。
此差异已折叠。
......@@ -10,6 +10,7 @@
struct nfs_server {
struct rpc_clnt * client; /* RPC client handle */
struct rpc_clnt * client_sys; /* 2nd handle for FSINFO */
struct rpc_clnt * client_acl; /* ACL RPC client handle */
struct nfs_rpc_ops * rpc_ops; /* NFS protocol vector */
struct backing_dev_info backing_dev_info;
int flags; /* various flags */
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册