提交 92737230 编写于 作者: T Trond Myklebust

NLM: Add nlmclnt_release_call

Add a helper function to simplify the freeing of NLM client requests.
Signed-off-by: NTrond Myklebust <Trond.Myklebust@netapp.com>
上级 e4cd038a
......@@ -152,9 +152,8 @@ static void nlmclnt_release_lockargs(struct nlm_rqst *req)
int
nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
{
struct nfs_server *nfssrv = NFS_SERVER(inode);
struct nlm_host *host;
struct nlm_rqst reqst, *call = &reqst;
struct nlm_rqst *call;
sigset_t oldset;
unsigned long flags;
int status, proto, vers;
......@@ -168,23 +167,17 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
/* Retrieve transport protocol from NFS client */
proto = NFS_CLIENT(inode)->cl_xprt->prot;
if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers)))
host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers);
if (host == NULL)
return -ENOLCK;
/* Create RPC client handle if not there, and copy soft
* and intr flags from NFS client. */
if (host->h_rpcclnt == NULL) {
struct rpc_clnt *clnt;
call = nlm_alloc_call(host);
if (call == NULL)
return -ENOMEM;
/* Bind an rpc client to this host handle (does not
* perform a portmapper lookup) */
if (!(clnt = nlm_bind_host(host))) {
status = -ENOLCK;
goto done;
}
clnt->cl_softrtry = nfssrv->client->cl_softrtry;
clnt->cl_intr = nfssrv->client->cl_intr;
}
nlmclnt_locks_init_private(fl, host);
/* Set up the argument struct */
nlmclnt_setlockargs(call, fl);
/* Keep the old signal mask */
spin_lock_irqsave(&current->sighand->siglock, flags);
......@@ -197,26 +190,10 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
&& (current->flags & PF_EXITING)) {
sigfillset(&current->blocked); /* Mask all signals */
recalc_sigpending();
spin_unlock_irqrestore(&current->sighand->siglock, flags);
call = nlmclnt_alloc_call();
if (!call) {
status = -ENOMEM;
goto out_restore;
}
call->a_flags = RPC_TASK_ASYNC;
} else {
spin_unlock_irqrestore(&current->sighand->siglock, flags);
memset(call, 0, sizeof(*call));
locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl);
}
call->a_host = host;
nlmclnt_locks_init_private(fl, host);
/* Set up the argument struct */
nlmclnt_setlockargs(call, fl);
spin_unlock_irqrestore(&current->sighand->siglock, flags);
if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
if (fl->fl_type != F_UNLCK) {
......@@ -229,24 +206,26 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
else
status = -EINVAL;
out_restore:
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
spin_lock_irqsave(&current->sighand->siglock, flags);
current->blocked = oldset;
recalc_sigpending();
spin_unlock_irqrestore(&current->sighand->siglock, flags);
done:
dprintk("lockd: clnt proc returns %d\n", status);
nlm_release_host(host);
return status;
}
EXPORT_SYMBOL(nlmclnt_proc);
/*
* Allocate an NLM RPC call struct
*
* Note: the caller must hold a reference to host. In case of failure,
* this reference will be released.
*/
struct nlm_rqst *
nlmclnt_alloc_call(void)
struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
{
struct nlm_rqst *call;
......@@ -255,16 +234,30 @@ nlmclnt_alloc_call(void)
if (call != NULL) {
locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl);
call->a_host = host;
return call;
}
if (signalled())
break;
printk("nlmclnt_alloc_call: failed, waiting for memory\n");
printk("nlm_alloc_call: failed, waiting for memory\n");
schedule_timeout_interruptible(5*HZ);
}
nlm_release_host(host);
return NULL;
}
void nlm_release_call(struct nlm_rqst *call)
{
nlm_release_host(call->a_host);
nlmclnt_release_lockargs(call);
kfree(call);
}
static void nlmclnt_rpc_release(void *data)
{
return nlm_release_call(data);
}
static int nlm_wait_on_grace(wait_queue_head_t *queue)
{
DEFINE_WAIT(wait);
......@@ -361,7 +354,7 @@ nlmclnt_call(struct nlm_rqst *req, u32 proc)
/*
* Generic NLM call, async version.
*/
int nlmsvc_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
......@@ -369,48 +362,23 @@ int nlmsvc_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops
.rpc_argp = &req->a_args,
.rpc_resp = &req->a_res,
};
int status;
dprintk("lockd: call procedure %d on %s (async)\n",
(int)proc, host->h_name);
/* If we have no RPC client yet, create one. */
if ((clnt = nlm_bind_host(host)) == NULL)
return -ENOLCK;
msg.rpc_proc = &clnt->cl_procinfo[proc];
/* bootstrap and kick off the async RPC call */
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
return status;
}
static int nlmclnt_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
struct nlm_args *argp = &req->a_args;
struct nlm_res *resp = &req->a_res;
struct rpc_message msg = {
.rpc_argp = argp,
.rpc_resp = resp,
};
int status;
int status = -ENOLCK;
dprintk("lockd: call procedure %d on %s (async)\n",
(int)proc, host->h_name);
/* If we have no RPC client yet, create one. */
if ((clnt = nlm_bind_host(host)) == NULL)
return -ENOLCK;
clnt = nlm_bind_host(host);
if (clnt == NULL)
goto out_err;
msg.rpc_proc = &clnt->cl_procinfo[proc];
/* Increment host refcount */
nlm_get_host(host);
/* bootstrap and kick off the async RPC call */
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
if (status < 0)
nlm_release_host(host);
if (status == 0)
return 0;
out_err:
nlm_release_call(req);
return status;
}
......@@ -423,26 +391,28 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
int status;
status = nlmclnt_call(req, NLMPROC_TEST);
nlmclnt_release_lockargs(req);
if (status < 0)
return status;
goto out;
status = req->a_res.status;
if (status == NLM_LCK_GRANTED) {
fl->fl_type = F_UNLCK;
} if (status == NLM_LCK_DENIED) {
/*
* Report the conflicting lock back to the application.
*/
fl->fl_start = req->a_res.lock.fl.fl_start;
fl->fl_end = req->a_res.lock.fl.fl_start;
fl->fl_type = req->a_res.lock.fl.fl_type;
fl->fl_pid = 0;
} else {
return nlm_stat_to_errno(req->a_res.status);
switch (req->a_res.status) {
case NLM_LCK_GRANTED:
fl->fl_type = F_UNLCK;
break;
case NLM_LCK_DENIED:
/*
* Report the conflicting lock back to the application.
*/
fl->fl_start = req->a_res.lock.fl.fl_start;
fl->fl_end = req->a_res.lock.fl.fl_start;
fl->fl_type = req->a_res.lock.fl.fl_type;
fl->fl_pid = 0;
break;
default:
status = nlm_stat_to_errno(req->a_res.status);
}
return 0;
out:
nlm_release_call(req);
return status;
}
static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
......@@ -560,7 +530,7 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
if (resp->status == NLM_LCK_BLOCKED)
nlmclnt_cancel(host, req->a_args.block, fl);
out:
nlmclnt_release_lockargs(req);
nlm_release_call(req);
return status;
}
......@@ -623,32 +593,24 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
*/
do_vfs_lock(fl);
if (req->a_flags & RPC_TASK_ASYNC) {
status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
&nlmclnt_unlock_ops);
/* Hrmf... Do the unlock early since locks_remove_posix()
* really expects us to free the lock synchronously */
if (status < 0) {
nlmclnt_release_lockargs(req);
kfree(req);
}
return status;
}
if (req->a_flags & RPC_TASK_ASYNC)
return nlm_async_call(req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
status = nlmclnt_call(req, NLMPROC_UNLOCK);
nlmclnt_release_lockargs(req);
if (status < 0)
return status;
goto out;
status = 0;
if (resp->status == NLM_LCK_GRANTED)
return 0;
goto out;
if (resp->status != NLM_LCK_DENIED_NOLOCKS)
printk("lockd: unexpected unlock status: %d\n", resp->status);
/* What to do now? I'm out of my depth... */
return -ENOLCK;
status = -ENOLCK;
out:
nlm_release_call(req);
return status;
}
static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
......@@ -670,9 +632,6 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
if (status != NLM_LCK_GRANTED)
printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
die:
nlm_release_host(req->a_host);
nlmclnt_release_lockargs(req);
kfree(req);
return;
retry_rebind:
nlm_rebind_host(req->a_host);
......@@ -682,6 +641,7 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
static const struct rpc_call_ops nlmclnt_unlock_ops = {
.rpc_call_done = nlmclnt_unlock_callback,
.rpc_release = nlmclnt_rpc_release,
};
/*
......@@ -703,20 +663,15 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl
recalc_sigpending();
spin_unlock_irqrestore(&current->sighand->siglock, flags);
req = nlmclnt_alloc_call();
req = nlm_alloc_call(nlm_get_host(host));
if (!req)
return -ENOMEM;
req->a_host = host;
req->a_flags = RPC_TASK_ASYNC;
nlmclnt_setlockargs(req, fl);
req->a_args.block = block;
status = nlmclnt_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops);
if (status < 0) {
nlmclnt_release_lockargs(req);
kfree(req);
}
status = nlm_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops);
spin_lock_irqsave(&current->sighand->siglock, flags);
current->blocked = oldset;
......@@ -757,9 +712,6 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
}
die:
nlm_release_host(req->a_host);
nlmclnt_release_lockargs(req);
kfree(req);
return;
retry_cancel:
......@@ -773,6 +725,7 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
static const struct rpc_call_ops nlmclnt_cancel_ops = {
.rpc_call_done = nlmclnt_cancel_callback,
.rpc_release = nlmclnt_rpc_release,
};
/*
......
......@@ -480,43 +480,37 @@ nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
struct nlm_host *host;
struct nlm_rqst *call;
if (!(call = nlmclnt_alloc_call()))
host = nlmsvc_lookup_host(rqstp);
if (host == NULL)
return rpc_system_err;
host = nlmsvc_lookup_host(rqstp);
if (!host) {
kfree(call);
call = nlm_alloc_call(host);
if (call == NULL)
return rpc_system_err;
}
call->a_flags = RPC_TASK_ASYNC;
call->a_host = host;
memcpy(&call->a_args, resp, sizeof(*resp));
if (nlmsvc_async_call(call, proc, &nlm4svc_callback_ops) < 0)
goto error;
if (nlm_async_call(call, proc, &nlm4svc_callback_ops) < 0)
return rpc_system_err;
return rpc_success;
error:
kfree(call);
nlm_release_host(host);
return rpc_system_err;
}
static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
{
struct nlm_rqst *call = data;
dprintk("lockd: %4d callback returned %d\n", task->tk_pid,
-task->tk_status);
}
if (task->tk_status < 0) {
dprintk("lockd: %4d callback failed (errno = %d)\n",
task->tk_pid, -task->tk_status);
}
nlm_release_host(call->a_host);
kfree(call);
static void nlm4svc_callback_release(void *data)
{
nlm_release_call(data);
}
static const struct rpc_call_ops nlm4svc_callback_ops = {
.rpc_call_done = nlm4svc_callback_exit,
.rpc_release = nlm4svc_callback_release,
};
/*
......
......@@ -117,12 +117,12 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock, int remove)
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end, lock->fl.fl_type);
for (head = &nlm_blocked; (block = *head) != 0; head = &block->b_next) {
fl = &block->b_call.a_args.lock.fl;
fl = &block->b_call->a_args.lock.fl;
dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
block->b_file, fl->fl_pid,
(long long)fl->fl_start,
(long long)fl->fl_end, fl->fl_type,
nlmdbg_cookie2a(&block->b_call.a_args.cookie));
nlmdbg_cookie2a(&block->b_call->a_args.cookie));
if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
if (remove) {
*head = block->b_next;
......@@ -156,7 +156,7 @@ nlmsvc_find_block(struct nlm_cookie *cookie, struct sockaddr_in *sin)
for (block = nlm_blocked; block; block = block->b_next) {
dprintk("cookie: head of blocked queue %p, block %p\n",
nlm_blocked, block);
if (nlm_cookie_match(&block->b_call.a_args.cookie,cookie)
if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)
&& nlm_cmp_addr(sin, &block->b_host->h_addr))
break;
}
......@@ -182,28 +182,30 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
{
struct nlm_block *block;
struct nlm_host *host;
struct nlm_rqst *call;
struct nlm_rqst *call = NULL;
/* Create host handle for callback */
host = nlmsvc_lookup_host(rqstp);
if (host == NULL)
return NULL;
call = nlm_alloc_call(host);
if (call == NULL)
return NULL;
/* Allocate memory for block, and initialize arguments */
if (!(block = (struct nlm_block *) kmalloc(sizeof(*block), GFP_KERNEL)))
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (block == NULL)
goto failed;
memset(block, 0, sizeof(*block));
locks_init_lock(&block->b_call.a_args.lock.fl);
locks_init_lock(&block->b_call.a_res.lock.fl);
kref_init(&block->b_count);
if (!nlmsvc_setgrantargs(&block->b_call, lock))
if (!nlmsvc_setgrantargs(call, lock))
goto failed_free;
/* Set notifier function for VFS, and init args */
block->b_call.a_args.lock.fl.fl_flags |= FL_SLEEP;
block->b_call.a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
block->b_call.a_args.cookie = *cookie; /* see above */
call->a_args.lock.fl.fl_flags |= FL_SLEEP;
call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
call->a_args.cookie = *cookie; /* see above */
dprintk("lockd: created block %p...\n", block);
......@@ -217,16 +219,16 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
file->f_blocks = block;
/* Set up RPC arguments for callback */
call = &block->b_call;
call->a_host = host;
block->b_call = call;
call->a_flags = RPC_TASK_ASYNC;
call->a_block = block;
return block;
failed_free:
kfree(block);
failed:
nlm_release_host(host);
nlm_release_call(call);
return NULL;
}
......@@ -242,7 +244,7 @@ static int nlmsvc_unlink_block(struct nlm_block *block)
dprintk("lockd: unlinking block %p...\n", block);
/* Remove block from list */
status = posix_unblock_lock(block->b_file->f_file, &block->b_call.a_args.lock.fl);
status = posix_unblock_lock(block->b_file->f_file, &block->b_call->a_args.lock.fl);
nlmsvc_remove_block(block);
return status;
}
......@@ -263,9 +265,8 @@ static void nlmsvc_free_block(struct kref *kref)
}
}
if (block->b_host)
nlm_release_host(block->b_host);
nlmsvc_freegrantargs(&block->b_call);
nlmsvc_freegrantargs(block->b_call);
nlm_release_call(block->b_call);
kfree(block);
}
......@@ -316,10 +317,8 @@ static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
if (lock->oh.len > NLMCLNT_OHSIZE) {
void *data = kmalloc(lock->oh.len, GFP_KERNEL);
if (!data) {
nlmsvc_freegrantargs(call);
if (!data)
return 0;
}
call->a_args.lock.oh.data = (u8 *) data;
}
......@@ -329,17 +328,8 @@ static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
static void nlmsvc_freegrantargs(struct nlm_rqst *call)
{
struct file_lock *fl = &call->a_args.lock.fl;
/*
* Check whether we allocated memory for the owner.
*/
if (call->a_args.lock.oh.data != (u8 *) call->a_owner) {
if (call->a_args.lock.oh.data != call->a_owner)
kfree(call->a_args.lock.oh.data);
}
if (fl->fl_ops && fl->fl_ops->fl_release_private)
fl->fl_ops->fl_release_private(fl);
if (fl->fl_lmops && fl->fl_lmops->fl_release_private)
fl->fl_lmops->fl_release_private(fl);
}
/*
......@@ -371,9 +361,9 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
block = nlmsvc_lookup_block(file, lock, 0);
if (block == NULL) {
if (newblock != NULL)
lock = &newblock->b_call.a_args.lock;
lock = &newblock->b_call->a_args.lock;
} else
lock = &block->b_call.a_args.lock;
lock = &block->b_call->a_args.lock;
error = posix_lock_file(file->f_file, &lock->fl);
lock->fl.fl_flags &= ~FL_SLEEP;
......@@ -523,7 +513,7 @@ nlmsvc_notify_blocked(struct file_lock *fl)
dprintk("lockd: VFS unblock notification for block %p\n", fl);
for (bp = &nlm_blocked; (block = *bp) != 0; bp = &block->b_next) {
if (nlm_compare_locks(&block->b_call.a_args.lock.fl, fl)) {
if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
nlmsvc_insert_block(block, 0);
svc_wake_up(block->b_daemon);
return;
......@@ -558,7 +548,7 @@ static void
nlmsvc_grant_blocked(struct nlm_block *block)
{
struct nlm_file *file = block->b_file;
struct nlm_lock *lock = &block->b_call.a_args.lock;
struct nlm_lock *lock = &block->b_call->a_args.lock;
int error;
dprintk("lockd: grant blocked lock %p\n", block);
......@@ -606,7 +596,7 @@ nlmsvc_grant_blocked(struct nlm_block *block)
/* Call the client */
kref_get(&block->b_count);
if (nlmsvc_async_call(&block->b_call, NLMPROC_GRANTED_MSG,
if (nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
&nlmsvc_grant_ops) < 0)
nlmsvc_release_block(block);
out_unlock:
......@@ -624,7 +614,7 @@ nlmsvc_grant_blocked(struct nlm_block *block)
static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
{
struct nlm_rqst *call = data;
struct nlm_block *block = container_of(call, struct nlm_block, b_call);
struct nlm_block *block = call->a_block;
unsigned long timeout;
dprintk("lockd: GRANT_MSG RPC callback\n");
......
......@@ -505,43 +505,36 @@ nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
struct nlm_host *host;
struct nlm_rqst *call;
if (!(call = nlmclnt_alloc_call()))
host = nlmsvc_lookup_host(rqstp);
if (host == NULL)
return rpc_system_err;
host = nlmsvc_lookup_host(rqstp);
if (!host) {
kfree(call);
call = nlm_alloc_call(host);
if (call == NULL)
return rpc_system_err;
}
call->a_flags = RPC_TASK_ASYNC;
call->a_host = host;
memcpy(&call->a_args, resp, sizeof(*resp));
if (nlmsvc_async_call(call, proc, &nlmsvc_callback_ops) < 0)
goto error;
if (nlm_async_call(call, proc, &nlmsvc_callback_ops) < 0)
return rpc_system_err;
return rpc_success;
error:
nlm_release_host(host);
kfree(call);
return rpc_system_err;
}
static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
{
struct nlm_rqst *call = data;
dprintk("lockd: %4d callback returned %d\n", task->tk_pid,
-task->tk_status);
}
if (task->tk_status < 0) {
dprintk("lockd: %4d callback failed (errno = %d)\n",
task->tk_pid, -task->tk_status);
}
nlm_release_host(call->a_host);
kfree(call);
static void nlmsvc_callback_release(void *data)
{
nlm_release_call(data);
}
static const struct rpc_call_ops nlmsvc_callback_ops = {
.rpc_call_done = nlmsvc_callback_exit,
.rpc_release = nlmsvc_callback_release,
};
/*
......
......@@ -86,8 +86,9 @@ struct nlm_rqst {
struct nlm_host * a_host; /* host handle */
struct nlm_args a_args; /* arguments */
struct nlm_res a_res; /* result */
struct nlm_block * a_block;
unsigned int a_retries; /* Retry count */
char a_owner[NLMCLNT_OHSIZE];
u8 a_owner[NLMCLNT_OHSIZE];
};
/*
......@@ -115,7 +116,7 @@ struct nlm_block {
struct kref b_count; /* Reference count */
struct nlm_block * b_next; /* linked list (all blocks) */
struct nlm_block * b_fnext; /* linked list (per file) */
struct nlm_rqst b_call; /* RPC args & callback info */
struct nlm_rqst * b_call; /* RPC args & callback info */
struct svc_serv * b_daemon; /* NLM service */
struct nlm_host * b_host; /* host handle for RPC clnt */
unsigned long b_when; /* next re-xmit */
......@@ -147,7 +148,9 @@ extern unsigned long nlmsvc_timeout;
/*
* Lockd client functions
*/
struct nlm_rqst * nlmclnt_alloc_call(void);
struct nlm_rqst * nlm_alloc_call(struct nlm_host *host);
void nlm_release_call(struct nlm_rqst *);
int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
void nlmclnt_finish_block(struct nlm_wait *block);
int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
......@@ -172,7 +175,6 @@ extern struct nlm_host *nlm_find_client(void);
/*
* Server-side lock handling
*/
int nlmsvc_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
u32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
struct nlm_lock *, int, struct nlm_cookie *);
u32 nlmsvc_unlock(struct nlm_file *, struct nlm_lock *);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册