callback_proc.c 16.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * linux/fs/nfs/callback_proc.c
 *
 * Copyright (C) 2004 Trond Myklebust
 *
 * NFSv4 callback procedures
 */
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
10
#include <linux/slab.h>
11
#include <linux/rcupdate.h>
12
#include "nfs4_fs.h"
L
Linus Torvalds 已提交
13 14
#include "callback.h"
#include "delegation.h"
15
#include "internal.h"
F
Fred Isaman 已提交
16
#include "pnfs.h"
17
#include "nfs4session.h"
18
#include "nfs4trace.h"
L
Linus Torvalds 已提交
19 20

#define NFSDBG_FACILITY NFSDBG_CALLBACK
21 22 23 24

__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
			     struct cb_getattrres *res,
			     struct cb_process_state *cps)
L
Linus Torvalds 已提交
25 26 27 28
{
	struct nfs_delegation *delegation;
	struct nfs_inode *nfsi;
	struct inode *inode;
29

30 31 32 33
	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
		goto out;

L
Linus Torvalds 已提交
34 35
	res->bitmap[0] = res->bitmap[1] = 0;
	res->status = htonl(NFS4ERR_BADHANDLE);
36

37
	dprintk_rcu("NFS: GETATTR callback request from %s\n",
38
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
39

40
	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
41
	if (inode == NULL) {
42
		trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
43
				-ntohl(res->status));
44
		goto out;
45
	}
L
Linus Torvalds 已提交
46
	nfsi = NFS_I(inode);
47 48
	rcu_read_lock();
	delegation = rcu_dereference(nfsi->delegation);
L
Linus Torvalds 已提交
49 50 51
	if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
		goto out_iput;
	res->size = i_size_read(inode);
52
	res->change_attr = delegation->change_attr;
53
	if (nfsi->nrequests != 0)
54
		res->change_attr++;
L
Linus Torvalds 已提交
55 56 57 58 59 60 61 62
	res->ctime = inode->i_ctime;
	res->mtime = inode->i_mtime;
	res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
		args->bitmap[0];
	res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
		args->bitmap[1];
	res->status = 0;
out_iput:
63
	rcu_read_unlock();
64
	trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
L
Linus Torvalds 已提交
65 66
	iput(inode);
out:
67
	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
L
Linus Torvalds 已提交
68 69 70
	return res->status;
}

71 72
__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
			    struct cb_process_state *cps)
L
Linus Torvalds 已提交
73 74
{
	struct inode *inode;
75
	__be32 res;
L
Linus Torvalds 已提交
76
	
77 78
	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
L
Linus Torvalds 已提交
79
		goto out;
80

81
	dprintk_rcu("NFS: RECALL callback request from %s\n",
82 83 84 85
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));

	res = htonl(NFS4ERR_BADHANDLE);
	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
86 87 88
	if (inode == NULL) {
		trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
				&args->stateid, -ntohl(res));
89
		goto out;
90
	}
91 92 93 94 95 96
	/* Set up a helper thread to actually return the delegation */
	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
	case 0:
		res = 0;
		break;
	case -ENOENT:
97
		res = htonl(NFS4ERR_BAD_STATEID);
98 99 100 101
		break;
	default:
		res = htonl(NFS4ERR_RESOURCE);
	}
102 103
	trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
			&args->stateid, -ntohl(res));
104
	iput(inode);
L
Linus Torvalds 已提交
105
out:
106
	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
L
Linus Torvalds 已提交
107 108
	return res;
}
109 110 111

#if defined(CONFIG_NFS_V4_1)

112
/*
113
 * Lookup a layout inode by stateid
114
 *
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
 * Note: returns a refcount on the inode and superblock
 */
static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
		const nfs4_stateid *stateid)
{
	struct nfs_server *server;
	struct inode *inode;
	struct pnfs_layout_hdr *lo;

restart:
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		list_for_each_entry(lo, &server->layouts, plh_layouts) {
			if (stateid != NULL &&
			    !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
				continue;
			inode = igrab(lo->plh_inode);
			if (!inode)
				continue;
			if (!nfs_sb_active(inode->i_sb)) {
				rcu_read_lock();
				spin_unlock(&clp->cl_lock);
				iput(inode);
				spin_lock(&clp->cl_lock);
				goto restart;
			}
			return inode;
		}
	}

	return NULL;
}

/*
 * Lookup a layout inode by filehandle.
 *
 * Note: returns a refcount on the inode and superblock
151 152
 *
 */
153 154
static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
		const struct nfs_fh *fh)
F
Fred Isaman 已提交
155
{
156
	struct nfs_server *server;
157
	struct nfs_inode *nfsi;
158
	struct inode *inode;
159
	struct pnfs_layout_hdr *lo;
F
Fred Isaman 已提交
160

161
restart:
162 163
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		list_for_each_entry(lo, &server->layouts, plh_layouts) {
164 165
			nfsi = NFS_I(lo->plh_inode);
			if (nfs_compare_fh(fh, &nfsi->fh))
166
				continue;
167
			if (nfsi->layout != lo)
168
				continue;
169 170 171 172 173 174 175 176
			inode = igrab(lo->plh_inode);
			if (!inode)
				continue;
			if (!nfs_sb_active(inode->i_sb)) {
				rcu_read_lock();
				spin_unlock(&clp->cl_lock);
				iput(inode);
				spin_lock(&clp->cl_lock);
177
				goto restart;
178
			}
179
			return inode;
180
		}
F
Fred Isaman 已提交
181
	}
182 183 184 185

	return NULL;
}

186 187 188
static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
		const struct nfs_fh *fh,
		const nfs4_stateid *stateid)
189
{
190
	struct inode *inode;
191 192 193

	spin_lock(&clp->cl_lock);
	rcu_read_lock();
194 195 196
	inode = nfs_layout_find_inode_by_stateid(clp, stateid);
	if (!inode)
		inode = nfs_layout_find_inode_by_fh(clp, fh);
197
	rcu_read_unlock();
F
Fred Isaman 已提交
198
	spin_unlock(&clp->cl_lock);
199

200
	return inode;
201 202
}

203 204 205
/*
 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
 */
206
static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
207 208 209 210
					const nfs4_stateid *new)
{
	u32 oldseq, newseq;

211 212 213 214 215 216 217 218
	/* Is the stateid still not initialised? */
	if (!pnfs_layout_is_valid(lo))
		return NFS4ERR_DELAY;

	/* Mismatched stateid? */
	if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
		return NFS4ERR_BAD_STATEID;

219
	newseq = be32_to_cpu(new->seqid);
220 221 222 223 224 225 226 227 228
	/* Are we already in a layout recall situation? */
	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
	    lo->plh_return_seq != 0) {
		if (newseq < lo->plh_return_seq)
			return NFS4ERR_OLD_STATEID;
		if (newseq > lo->plh_return_seq)
			return NFS4ERR_DELAY;
		goto out;
	}
229

230 231
	/* Check that the stateid matches what we think it should be. */
	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
232
	if (newseq > oldseq + 1)
233 234 235 236 237 238
		return NFS4ERR_DELAY;
	/* Crazy server! */
	if (newseq <= oldseq)
		return NFS4ERR_OLD_STATEID;
out:
	return NFS_OK;
239 240
}

241 242 243 244 245 246 247 248
static u32 initiate_file_draining(struct nfs_client *clp,
				  struct cb_layoutrecallargs *args)
{
	struct inode *ino;
	struct pnfs_layout_hdr *lo;
	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
	LIST_HEAD(free_me_list);

249 250
	ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
	if (!ino)
251
		goto out;
F
Fred Isaman 已提交
252

253 254
	pnfs_layoutcommit_inode(ino, false);

255 256

	spin_lock(&ino->i_lock);
257 258 259 260 261 262
	lo = NFS_I(ino)->layout;
	if (!lo) {
		spin_unlock(&ino->i_lock);
		goto out;
	}
	pnfs_get_layout_hdr(lo);
263 264
	rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
	if (rv != NFS_OK)
265
		goto unlock;
266 267
	pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);

268 269 270 271 272 273 274 275
	/*
	 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
	 */
	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
		rv = NFS4ERR_DELAY;
		goto unlock;
	}

276
	if (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
277 278
				&args->cbl_range,
				be32_to_cpu(args->cbl_stateid.seqid))) {
279
		rv = NFS4_OK;
280 281 282
		goto unlock;
	}

283 284 285
	/* Embrace your forgetfulness! */
	rv = NFS4ERR_NOMATCHING_LAYOUT;

286 287 288 289 290
	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
		NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
			&args->cbl_range);
	}
unlock:
F
Fred Isaman 已提交
291 292
	spin_unlock(&ino->i_lock);
	pnfs_free_lseg_list(&free_me_list);
293 294
	/* Free all lsegs that are attached to commit buckets */
	nfs_commit_inode(ino, 0);
295
	pnfs_put_layout_hdr(lo);
296
out:
297 298
	trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
			&args->cbl_stateid, -rv);
299
	nfs_iput_and_deactive(ino);
F
Fred Isaman 已提交
300 301 302 303 304 305
	return rv;
}

static u32 initiate_bulk_draining(struct nfs_client *clp,
				  struct cb_layoutrecallargs *args)
{
306
	int stat;
307

308 309 310 311 312 313 314
	if (args->cbl_recall_type == RETURN_FSID)
		stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
	else
		stat = pnfs_destroy_layouts_byclid(clp, true);
	if (stat != 0)
		return NFS4ERR_DELAY;
	return NFS4ERR_NOMATCHING_LAYOUT;
F
Fred Isaman 已提交
315 316 317 318 319 320
}

static u32 do_callback_layoutrecall(struct nfs_client *clp,
				    struct cb_layoutrecallargs *args)
{
	if (args->cbl_recall_type == RETURN_FILE)
321 322
		return initiate_file_draining(clp, args);
	return initiate_bulk_draining(clp, args);
F
Fred Isaman 已提交
323 324
}

F
Fred Isaman 已提交
325 326 327
__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
				  void *dummy, struct cb_process_state *cps)
{
328
	u32 res = NFS4ERR_OP_NOT_IN_SESSION;
F
Fred Isaman 已提交
329 330 331 332

	if (cps->clp)
		res = do_callback_layoutrecall(cps->clp, args);
	return cpu_to_be32(res);
F
Fred Isaman 已提交
333 334
}

335 336 337 338 339 340 341 342 343 344 345
static void pnfs_recall_all_layouts(struct nfs_client *clp)
{
	struct cb_layoutrecallargs args;

	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
	memset(&args, 0, sizeof(args));
	args.cbl_recall_type = RETURN_ALL;
	/* FIXME we ignore errors, what should we do? */
	do_callback_layoutrecall(clp, &args);
}

M
Marc Eshel 已提交
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
__be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args,
				  void *dummy, struct cb_process_state *cps)
{
	int i;
	__be32 res = 0;
	struct nfs_client *clp = cps->clp;
	struct nfs_server *server = NULL;

	if (!clp) {
		res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
		goto out;
	}

	for (i = 0; i < args->ndevs; i++) {
		struct cb_devicenotifyitem *dev = &args->devs[i];

		if (!server ||
		    server->pnfs_curr_ld->id != dev->cbd_layout_type) {
			rcu_read_lock();
			list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
				if (server->pnfs_curr_ld &&
				    server->pnfs_curr_ld->id == dev->cbd_layout_type) {
					rcu_read_unlock();
					goto found;
				}
			rcu_read_unlock();
			continue;
		}

	found:
376
		nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
M
Marc Eshel 已提交
377 378 379 380 381 382 383
	}

out:
	kfree(args->devs);
	return res;
}

384 385 386 387 388
/*
 * Validate the sequenceID sent by the server.
 * Return success if the sequenceID is one more than what we last saw on
 * this slot, accounting for wraparound.  Increments the slot's sequence.
 *
389 390
 * We don't yet implement a duplicate request cache, instead we set the
 * back channel ca_maxresponsesize_cached to zero. This is OK for now
391 392 393 394 395 396
 * since we only currently implement idempotent callbacks anyway.
 *
 * We have a single slot backchannel at this time, so we don't bother
 * checking the used_slots bit array on the table.  The lower layer guarantees
 * a single outstanding callback request at a time.
 */
397
static __be32
398 399
validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
		const struct cb_sequenceargs * args)
400
{
401
	if (args->csa_slotid > tbl->server_highest_slotid)
402 403 404
		return htonl(NFS4ERR_BADSLOT);

	/* Replay */
405
	if (args->csa_sequenceid == slot->seq_nr) {
406
		if (nfs4_test_locked_slot(tbl, slot->slot_nr))
407
			return htonl(NFS4ERR_DELAY);
408 409 410 411
		/* Signal process_op to set this error on next op */
		if (args->csa_cachethis == 0)
			return htonl(NFS4ERR_RETRY_UNCACHED_REP);

412 413
		/* Liar! We never allowed you to set csa_cachethis != 0 */
		return htonl(NFS4ERR_SEQ_FALSE_RETRY);
414 415 416
	}

	/* Wraparound */
417 418 419 420 421
	if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
		if (args->csa_sequenceid == 1)
			return htonl(NFS4_OK);
	} else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
		return htonl(NFS4_OK);
422 423 424 425 426

	/* Misordered request */
	return htonl(NFS4ERR_SEQ_MISORDERED);
}

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
/*
 * For each referring call triple, check the session's slot table for
 * a match.  If the slot is in use and the sequence numbers match, the
 * client is still waiting for a response to the original request.
 */
static bool referring_call_exists(struct nfs_client *clp,
				  uint32_t nrclists,
				  struct referring_call_list *rclists)
{
	bool status = 0;
	int i, j;
	struct nfs4_session *session;
	struct nfs4_slot_table *tbl;
	struct referring_call_list *rclist;
	struct referring_call *ref;

	/*
	 * XXX When client trunking is implemented, this becomes
	 * a session lookup from within the loop
	 */
	session = clp->cl_session;
	tbl = &session->fc_slot_table;

	for (i = 0; i < nrclists; i++) {
		rclist = &rclists[i];
		if (memcmp(session->sess_id.data,
			   rclist->rcl_sessionid.data,
			   NFS4_MAX_SESSIONID_LEN) != 0)
			continue;

		for (j = 0; j < rclist->rcl_nrefcalls; j++) {
			ref = &rclist->rcl_refcalls[j];
459 460
			status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
					ref->rc_sequenceid, HZ >> 1) < 0;
461 462 463 464 465 466 467 468 469
			if (status)
				goto out;
		}
	}

out:
	return status;
}

470
__be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
471 472
			      struct cb_sequenceres *res,
			      struct cb_process_state *cps)
473
{
474
	struct nfs4_slot_table *tbl;
475
	struct nfs4_slot *slot;
476
	struct nfs_client *clp;
477
	int i;
478
	__be32 status = htonl(NFS4ERR_BADSESSION);
479

480 481
	clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
					 &args->csa_sessionid, cps->minorversion);
482 483 484
	if (clp == NULL)
		goto out;

485 486
	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
		goto out;
487

488 489
	tbl = &clp->cl_session->bc_slot_table;

490 491 492 493 494 495
	/* Set up res before grabbing the spinlock */
	memcpy(&res->csr_sessionid, &args->csa_sessionid,
	       sizeof(res->csr_sessionid));
	res->csr_sequenceid = args->csa_sequenceid;
	res->csr_slotid = args->csa_slotid;

496
	spin_lock(&tbl->slot_tbl_lock);
497
	/* state manager is resetting the session */
498
	if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
499
		status = htonl(NFS4ERR_DELAY);
500 501 502 503 504
		/* Return NFS4ERR_BADSESSION if we're draining the session
		 * in order to reset it.
		 */
		if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
			status = htonl(NFS4ERR_BADSESSION);
505
		goto out_unlock;
506 507
	}

508 509 510 511
	status = htonl(NFS4ERR_BADSLOT);
	slot = nfs4_lookup_slot(tbl, args->csa_slotid);
	if (IS_ERR(slot))
		goto out_unlock;
512 513 514 515

	res->csr_highestslotid = tbl->server_highest_slotid;
	res->csr_target_highestslotid = tbl->target_highest_slotid;

516 517 518
	status = validate_seqid(tbl, slot, args);
	if (status)
		goto out_unlock;
519 520 521 522 523
	if (!nfs4_try_to_lock_slot(tbl, slot)) {
		status = htonl(NFS4ERR_DELAY);
		goto out_unlock;
	}
	cps->slot = slot;
524

525
	/* The ca_maxresponsesize_cached is 0 with no DRC */
O
Olga Kornievskaia 已提交
526 527 528 529
	if (args->csa_cachethis != 0) {
		status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
		goto out_unlock;
	}
530

531 532 533 534 535 536 537
	/*
	 * Check for pending referring calls.  If a match is found, a
	 * related callback was received before the response to the original
	 * call.
	 */
	if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
		status = htonl(NFS4ERR_DELAY);
538
		goto out_unlock;
539 540
	}

541 542 543 544 545
	/*
	 * RFC5661 20.9.3
	 * If CB_SEQUENCE returns an error, then the state of the slot
	 * (sequence ID, cached reply) MUST NOT change.
	 */
546
	slot->seq_nr = args->csa_sequenceid;
547 548
out_unlock:
	spin_unlock(&tbl->slot_tbl_lock);
549

550
out:
551
	cps->clp = clp; /* put in nfs4_callback_compound */
552 553 554 555
	for (i = 0; i < args->csa_nrclists; i++)
		kfree(args->csa_rclists[i].rcl_refcalls);
	kfree(args->csa_rclists);

556 557 558 559
	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
		cps->drc_status = status;
		status = 0;
	} else
560
		res->csr_status = status;
561

562
	trace_nfs4_cb_sequence(args, res, status);
563
	return status;
564 565
}

566 567 568 569 570 571
static bool
validate_bitmap_values(unsigned long mask)
{
	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
}

572 573
__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
			       struct cb_process_state *cps)
574
{
575
	__be32 status;
576 577
	fmode_t flags = 0;

578
	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
579
	if (!cps->clp) /* set in cb_sequence */
580 581
		goto out;

582
	dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
583
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
584

585 586 587 588 589
	status = cpu_to_be32(NFS4ERR_INVAL);
	if (!validate_bitmap_values(args->craa_type_mask))
		goto out;

	status = cpu_to_be32(NFS4_OK);
590 591 592 593 594 595
	if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
		     &args->craa_type_mask))
		flags = FMODE_READ;
	if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
		     &args->craa_type_mask))
		flags |= FMODE_WRITE;
596 597 598
	if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
		     &args->craa_type_mask))
		pnfs_recall_all_layouts(cps->clp);
599
	if (flags)
600
		nfs_expire_unused_delegation_types(cps->clp, flags);
601 602 603 604
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;
}
A
Andy Adamson 已提交
605 606

/* Reduce the fore channel's max_slots to the target value */
607 608
__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
				struct cb_process_state *cps)
A
Andy Adamson 已提交
609 610
{
	struct nfs4_slot_table *fc_tbl;
611
	__be32 status;
A
Andy Adamson 已提交
612 613

	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
614
	if (!cps->clp) /* set in cb_sequence */
A
Andy Adamson 已提交
615 616
		goto out;

617
	dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
618
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
619
		args->crsa_target_highest_slotid);
A
Andy Adamson 已提交
620

621
	fc_tbl = &cps->clp->cl_session->fc_slot_table;
A
Andy Adamson 已提交
622

623
	status = htonl(NFS4_OK);
A
Andy Adamson 已提交
624

625
	nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
626
	nfs41_notify_server(cps->clp);
A
Andy Adamson 已提交
627 628 629 630
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;
}
631 632 633 634 635 636 637 638 639 640

__be32 nfs4_callback_notify_lock(struct cb_notify_lock_args *args, void *dummy,
				 struct cb_process_state *cps)
{
	if (!cps->clp) /* set in cb_sequence */
		return htonl(NFS4ERR_OP_NOT_IN_SESSION);

	dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));

641 642 643 644
	/* Don't wake anybody if the string looked bogus */
	if (args->cbnl_valid)
		__wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);

645 646
	return htonl(NFS4_OK);
}
647
#endif /* CONFIG_NFS_V4_1 */