callback_proc.c 16.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * linux/fs/nfs/callback_proc.c
 *
 * Copyright (C) 2004 Trond Myklebust
 *
 * NFSv4 callback procedures
 */
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
10
#include <linux/slab.h>
11
#include <linux/rcupdate.h>
12
#include "nfs4_fs.h"
L
Linus Torvalds 已提交
13 14
#include "callback.h"
#include "delegation.h"
15
#include "internal.h"
F
Fred Isaman 已提交
16
#include "pnfs.h"
17
#include "nfs4session.h"
18
#include "nfs4trace.h"
L
Linus Torvalds 已提交
19 20

#define NFSDBG_FACILITY NFSDBG_CALLBACK
21

22
__be32 nfs4_callback_getattr(void *argp, void *resp,
23
			     struct cb_process_state *cps)
L
Linus Torvalds 已提交
24
{
25 26
	struct cb_getattrargs *args = argp;
	struct cb_getattrres *res = resp;
L
Linus Torvalds 已提交
27 28 29
	struct nfs_delegation *delegation;
	struct nfs_inode *nfsi;
	struct inode *inode;
30

31 32 33 34
	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
		goto out;

L
Linus Torvalds 已提交
35 36
	res->bitmap[0] = res->bitmap[1] = 0;
	res->status = htonl(NFS4ERR_BADHANDLE);
37

38
	dprintk_rcu("NFS: GETATTR callback request from %s\n",
39
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
40

41
	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
42
	if (inode == NULL) {
43
		trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
44
				-ntohl(res->status));
45
		goto out;
46
	}
L
Linus Torvalds 已提交
47
	nfsi = NFS_I(inode);
48 49
	rcu_read_lock();
	delegation = rcu_dereference(nfsi->delegation);
L
Linus Torvalds 已提交
50 51 52
	if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
		goto out_iput;
	res->size = i_size_read(inode);
53
	res->change_attr = delegation->change_attr;
54
	if (nfs_have_writebacks(inode))
55
		res->change_attr++;
L
Linus Torvalds 已提交
56 57 58 59 60 61 62 63
	res->ctime = inode->i_ctime;
	res->mtime = inode->i_mtime;
	res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
		args->bitmap[0];
	res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
		args->bitmap[1];
	res->status = 0;
out_iput:
64
	rcu_read_unlock();
65
	trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
L
Linus Torvalds 已提交
66 67
	iput(inode);
out:
68
	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
L
Linus Torvalds 已提交
69 70 71
	return res->status;
}

72
__be32 nfs4_callback_recall(void *argp, void *resp,
73
			    struct cb_process_state *cps)
L
Linus Torvalds 已提交
74
{
75
	struct cb_recallargs *args = argp;
L
Linus Torvalds 已提交
76
	struct inode *inode;
77
	__be32 res;
L
Linus Torvalds 已提交
78
	
79 80
	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
L
Linus Torvalds 已提交
81
		goto out;
82

83
	dprintk_rcu("NFS: RECALL callback request from %s\n",
84 85 86 87
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));

	res = htonl(NFS4ERR_BADHANDLE);
	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
88 89 90
	if (inode == NULL) {
		trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
				&args->stateid, -ntohl(res));
91
		goto out;
92
	}
93 94 95 96 97 98
	/* Set up a helper thread to actually return the delegation */
	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
	case 0:
		res = 0;
		break;
	case -ENOENT:
99
		res = htonl(NFS4ERR_BAD_STATEID);
100 101 102 103
		break;
	default:
		res = htonl(NFS4ERR_RESOURCE);
	}
104 105
	trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
			&args->stateid, -ntohl(res));
106
	iput(inode);
L
Linus Torvalds 已提交
107
out:
108
	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
L
Linus Torvalds 已提交
109 110
	return res;
}
111 112 113

#if defined(CONFIG_NFS_V4_1)

114
/*
115
 * Lookup a layout inode by stateid
116
 *
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
 * Note: returns a refcount on the inode and superblock
 */
static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
		const nfs4_stateid *stateid)
{
	struct nfs_server *server;
	struct inode *inode;
	struct pnfs_layout_hdr *lo;

restart:
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		list_for_each_entry(lo, &server->layouts, plh_layouts) {
			if (stateid != NULL &&
			    !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
				continue;
			inode = igrab(lo->plh_inode);
			if (!inode)
				continue;
			if (!nfs_sb_active(inode->i_sb)) {
T
Trond Myklebust 已提交
136
				rcu_read_unlock();
137 138 139
				spin_unlock(&clp->cl_lock);
				iput(inode);
				spin_lock(&clp->cl_lock);
T
Trond Myklebust 已提交
140
				rcu_read_lock();
141 142 143 144 145 146 147 148 149 150 151 152 153
				goto restart;
			}
			return inode;
		}
	}

	return NULL;
}

/*
 * Lookup a layout inode by filehandle.
 *
 * Note: returns a refcount on the inode and superblock
154 155
 *
 */
156 157
static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
		const struct nfs_fh *fh)
F
Fred Isaman 已提交
158
{
159
	struct nfs_server *server;
160
	struct nfs_inode *nfsi;
161
	struct inode *inode;
162
	struct pnfs_layout_hdr *lo;
F
Fred Isaman 已提交
163

164
restart:
165 166
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		list_for_each_entry(lo, &server->layouts, plh_layouts) {
167 168
			nfsi = NFS_I(lo->plh_inode);
			if (nfs_compare_fh(fh, &nfsi->fh))
169
				continue;
170
			if (nfsi->layout != lo)
171
				continue;
172 173 174 175
			inode = igrab(lo->plh_inode);
			if (!inode)
				continue;
			if (!nfs_sb_active(inode->i_sb)) {
T
Trond Myklebust 已提交
176
				rcu_read_unlock();
177 178 179
				spin_unlock(&clp->cl_lock);
				iput(inode);
				spin_lock(&clp->cl_lock);
T
Trond Myklebust 已提交
180
				rcu_read_lock();
181
				goto restart;
182
			}
183
			return inode;
184
		}
F
Fred Isaman 已提交
185
	}
186 187 188 189

	return NULL;
}

190 191 192
static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
		const struct nfs_fh *fh,
		const nfs4_stateid *stateid)
193
{
194
	struct inode *inode;
195 196 197

	spin_lock(&clp->cl_lock);
	rcu_read_lock();
198 199 200
	inode = nfs_layout_find_inode_by_stateid(clp, stateid);
	if (!inode)
		inode = nfs_layout_find_inode_by_fh(clp, fh);
201
	rcu_read_unlock();
F
Fred Isaman 已提交
202
	spin_unlock(&clp->cl_lock);
203

204
	return inode;
205 206
}

207 208 209
/*
 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
 */
210
static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
211 212 213 214
					const nfs4_stateid *new)
{
	u32 oldseq, newseq;

215 216 217 218 219 220 221 222
	/* Is the stateid still not initialised? */
	if (!pnfs_layout_is_valid(lo))
		return NFS4ERR_DELAY;

	/* Mismatched stateid? */
	if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
		return NFS4ERR_BAD_STATEID;

223
	newseq = be32_to_cpu(new->seqid);
224 225 226 227 228 229 230 231 232
	/* Are we already in a layout recall situation? */
	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
	    lo->plh_return_seq != 0) {
		if (newseq < lo->plh_return_seq)
			return NFS4ERR_OLD_STATEID;
		if (newseq > lo->plh_return_seq)
			return NFS4ERR_DELAY;
		goto out;
	}
233

234 235
	/* Check that the stateid matches what we think it should be. */
	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
236
	if (newseq > oldseq + 1)
237 238 239 240 241 242
		return NFS4ERR_DELAY;
	/* Crazy server! */
	if (newseq <= oldseq)
		return NFS4ERR_OLD_STATEID;
out:
	return NFS_OK;
243 244
}

245 246 247 248 249 250 251 252
static u32 initiate_file_draining(struct nfs_client *clp,
				  struct cb_layoutrecallargs *args)
{
	struct inode *ino;
	struct pnfs_layout_hdr *lo;
	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
	LIST_HEAD(free_me_list);

253 254
	ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
	if (!ino)
255
		goto out;
F
Fred Isaman 已提交
256

257 258
	pnfs_layoutcommit_inode(ino, false);

259 260

	spin_lock(&ino->i_lock);
261 262 263 264 265 266
	lo = NFS_I(ino)->layout;
	if (!lo) {
		spin_unlock(&ino->i_lock);
		goto out;
	}
	pnfs_get_layout_hdr(lo);
267 268
	rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
	if (rv != NFS_OK)
269
		goto unlock;
270 271
	pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);

272 273 274 275 276 277 278 279
	/*
	 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
	 */
	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
		rv = NFS4ERR_DELAY;
		goto unlock;
	}

280
	if (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
281 282
				&args->cbl_range,
				be32_to_cpu(args->cbl_stateid.seqid))) {
283
		rv = NFS4_OK;
284 285 286
		goto unlock;
	}

287 288 289
	/* Embrace your forgetfulness! */
	rv = NFS4ERR_NOMATCHING_LAYOUT;

290 291 292 293 294
	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
		NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
			&args->cbl_range);
	}
unlock:
F
Fred Isaman 已提交
295 296
	spin_unlock(&ino->i_lock);
	pnfs_free_lseg_list(&free_me_list);
297 298
	/* Free all lsegs that are attached to commit buckets */
	nfs_commit_inode(ino, 0);
299
	pnfs_put_layout_hdr(lo);
300
out:
301 302
	trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
			&args->cbl_stateid, -rv);
303
	nfs_iput_and_deactive(ino);
F
Fred Isaman 已提交
304 305 306 307 308 309
	return rv;
}

static u32 initiate_bulk_draining(struct nfs_client *clp,
				  struct cb_layoutrecallargs *args)
{
310
	int stat;
311

312 313 314 315 316 317 318
	if (args->cbl_recall_type == RETURN_FSID)
		stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
	else
		stat = pnfs_destroy_layouts_byclid(clp, true);
	if (stat != 0)
		return NFS4ERR_DELAY;
	return NFS4ERR_NOMATCHING_LAYOUT;
F
Fred Isaman 已提交
319 320 321 322 323 324
}

static u32 do_callback_layoutrecall(struct nfs_client *clp,
				    struct cb_layoutrecallargs *args)
{
	if (args->cbl_recall_type == RETURN_FILE)
325 326
		return initiate_file_draining(clp, args);
	return initiate_bulk_draining(clp, args);
F
Fred Isaman 已提交
327 328
}

329 330
__be32 nfs4_callback_layoutrecall(void *argp, void *resp,
				  struct cb_process_state *cps)
F
Fred Isaman 已提交
331
{
332
	struct cb_layoutrecallargs *args = argp;
333
	u32 res = NFS4ERR_OP_NOT_IN_SESSION;
F
Fred Isaman 已提交
334 335 336 337

	if (cps->clp)
		res = do_callback_layoutrecall(cps->clp, args);
	return cpu_to_be32(res);
F
Fred Isaman 已提交
338 339
}

340 341 342 343 344 345 346 347 348 349 350
static void pnfs_recall_all_layouts(struct nfs_client *clp)
{
	struct cb_layoutrecallargs args;

	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
	memset(&args, 0, sizeof(args));
	args.cbl_recall_type = RETURN_ALL;
	/* FIXME we ignore errors, what should we do? */
	do_callback_layoutrecall(clp, &args);
}

351 352
__be32 nfs4_callback_devicenotify(void *argp, void *resp,
				  struct cb_process_state *cps)
M
Marc Eshel 已提交
353
{
354
	struct cb_devicenotifyargs *args = argp;
M
Marc Eshel 已提交
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
	int i;
	__be32 res = 0;
	struct nfs_client *clp = cps->clp;
	struct nfs_server *server = NULL;

	if (!clp) {
		res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
		goto out;
	}

	for (i = 0; i < args->ndevs; i++) {
		struct cb_devicenotifyitem *dev = &args->devs[i];

		if (!server ||
		    server->pnfs_curr_ld->id != dev->cbd_layout_type) {
			rcu_read_lock();
			list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
				if (server->pnfs_curr_ld &&
				    server->pnfs_curr_ld->id == dev->cbd_layout_type) {
					rcu_read_unlock();
					goto found;
				}
			rcu_read_unlock();
			continue;
		}

	found:
382
		nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
M
Marc Eshel 已提交
383 384 385 386 387 388 389
	}

out:
	kfree(args->devs);
	return res;
}

390 391 392 393 394
/*
 * Validate the sequenceID sent by the server.
 * Return success if the sequenceID is one more than what we last saw on
 * this slot, accounting for wraparound.  Increments the slot's sequence.
 *
395 396
 * We don't yet implement a duplicate request cache, instead we set the
 * back channel ca_maxresponsesize_cached to zero. This is OK for now
397 398 399 400 401 402
 * since we only currently implement idempotent callbacks anyway.
 *
 * We have a single slot backchannel at this time, so we don't bother
 * checking the used_slots bit array on the table.  The lower layer guarantees
 * a single outstanding callback request at a time.
 */
403
static __be32
404 405
validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
		const struct cb_sequenceargs * args)
406
{
407
	if (args->csa_slotid > tbl->server_highest_slotid)
408 409 410
		return htonl(NFS4ERR_BADSLOT);

	/* Replay */
411
	if (args->csa_sequenceid == slot->seq_nr) {
412
		if (nfs4_test_locked_slot(tbl, slot->slot_nr))
413
			return htonl(NFS4ERR_DELAY);
414 415 416 417
		/* Signal process_op to set this error on next op */
		if (args->csa_cachethis == 0)
			return htonl(NFS4ERR_RETRY_UNCACHED_REP);

418 419
		/* Liar! We never allowed you to set csa_cachethis != 0 */
		return htonl(NFS4ERR_SEQ_FALSE_RETRY);
420 421 422
	}

	/* Wraparound */
423 424 425 426 427
	if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
		if (args->csa_sequenceid == 1)
			return htonl(NFS4_OK);
	} else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
		return htonl(NFS4_OK);
428 429 430 431 432

	/* Misordered request */
	return htonl(NFS4ERR_SEQ_MISORDERED);
}

433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
/*
 * For each referring call triple, check the session's slot table for
 * a match.  If the slot is in use and the sequence numbers match, the
 * client is still waiting for a response to the original request.
 */
static bool referring_call_exists(struct nfs_client *clp,
				  uint32_t nrclists,
				  struct referring_call_list *rclists)
{
	bool status = 0;
	int i, j;
	struct nfs4_session *session;
	struct nfs4_slot_table *tbl;
	struct referring_call_list *rclist;
	struct referring_call *ref;

	/*
	 * XXX When client trunking is implemented, this becomes
	 * a session lookup from within the loop
	 */
	session = clp->cl_session;
	tbl = &session->fc_slot_table;

	for (i = 0; i < nrclists; i++) {
		rclist = &rclists[i];
		if (memcmp(session->sess_id.data,
			   rclist->rcl_sessionid.data,
			   NFS4_MAX_SESSIONID_LEN) != 0)
			continue;

		for (j = 0; j < rclist->rcl_nrefcalls; j++) {
			ref = &rclist->rcl_refcalls[j];
465 466
			status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
					ref->rc_sequenceid, HZ >> 1) < 0;
467 468 469 470 471 472 473 474 475
			if (status)
				goto out;
		}
	}

out:
	return status;
}

476
__be32 nfs4_callback_sequence(void *argp, void *resp,
477
			      struct cb_process_state *cps)
478
{
479 480
	struct cb_sequenceargs *args = argp;
	struct cb_sequenceres *res = resp;
481
	struct nfs4_slot_table *tbl;
482
	struct nfs4_slot *slot;
483
	struct nfs_client *clp;
484
	int i;
485
	__be32 status = htonl(NFS4ERR_BADSESSION);
486

487 488
	clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
					 &args->csa_sessionid, cps->minorversion);
489 490 491
	if (clp == NULL)
		goto out;

492 493
	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
		goto out;
494

495 496
	tbl = &clp->cl_session->bc_slot_table;

497 498 499 500 501 502
	/* Set up res before grabbing the spinlock */
	memcpy(&res->csr_sessionid, &args->csa_sessionid,
	       sizeof(res->csr_sessionid));
	res->csr_sequenceid = args->csa_sequenceid;
	res->csr_slotid = args->csa_slotid;

503
	spin_lock(&tbl->slot_tbl_lock);
504
	/* state manager is resetting the session */
505
	if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
506
		status = htonl(NFS4ERR_DELAY);
507 508 509 510 511
		/* Return NFS4ERR_BADSESSION if we're draining the session
		 * in order to reset it.
		 */
		if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
			status = htonl(NFS4ERR_BADSESSION);
512
		goto out_unlock;
513 514
	}

515 516 517 518
	status = htonl(NFS4ERR_BADSLOT);
	slot = nfs4_lookup_slot(tbl, args->csa_slotid);
	if (IS_ERR(slot))
		goto out_unlock;
519 520 521 522

	res->csr_highestslotid = tbl->server_highest_slotid;
	res->csr_target_highestslotid = tbl->target_highest_slotid;

523 524 525
	status = validate_seqid(tbl, slot, args);
	if (status)
		goto out_unlock;
526 527 528 529 530
	if (!nfs4_try_to_lock_slot(tbl, slot)) {
		status = htonl(NFS4ERR_DELAY);
		goto out_unlock;
	}
	cps->slot = slot;
531

532
	/* The ca_maxresponsesize_cached is 0 with no DRC */
O
Olga Kornievskaia 已提交
533 534 535 536
	if (args->csa_cachethis != 0) {
		status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
		goto out_unlock;
	}
537

538 539 540 541 542 543 544
	/*
	 * Check for pending referring calls.  If a match is found, a
	 * related callback was received before the response to the original
	 * call.
	 */
	if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
		status = htonl(NFS4ERR_DELAY);
545
		goto out_unlock;
546 547
	}

548 549 550 551 552
	/*
	 * RFC5661 20.9.3
	 * If CB_SEQUENCE returns an error, then the state of the slot
	 * (sequence ID, cached reply) MUST NOT change.
	 */
553
	slot->seq_nr = args->csa_sequenceid;
554 555
out_unlock:
	spin_unlock(&tbl->slot_tbl_lock);
556

557
out:
558
	cps->clp = clp; /* put in nfs4_callback_compound */
559 560 561 562
	for (i = 0; i < args->csa_nrclists; i++)
		kfree(args->csa_rclists[i].rcl_refcalls);
	kfree(args->csa_rclists);

563 564 565 566
	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
		cps->drc_status = status;
		status = 0;
	} else
567
		res->csr_status = status;
568

569
	trace_nfs4_cb_sequence(args, res, status);
570
	return status;
571 572
}

573 574 575 576 577 578
static bool
validate_bitmap_values(unsigned long mask)
{
	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
}

579
__be32 nfs4_callback_recallany(void *argp, void *resp,
580
			       struct cb_process_state *cps)
581
{
582
	struct cb_recallanyargs *args = argp;
583
	__be32 status;
584 585
	fmode_t flags = 0;

586
	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
587
	if (!cps->clp) /* set in cb_sequence */
588 589
		goto out;

590
	dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
591
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
592

593 594 595 596 597
	status = cpu_to_be32(NFS4ERR_INVAL);
	if (!validate_bitmap_values(args->craa_type_mask))
		goto out;

	status = cpu_to_be32(NFS4_OK);
598 599 600 601 602 603
	if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
		     &args->craa_type_mask))
		flags = FMODE_READ;
	if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
		     &args->craa_type_mask))
		flags |= FMODE_WRITE;
604 605 606
	if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
		     &args->craa_type_mask))
		pnfs_recall_all_layouts(cps->clp);
607
	if (flags)
608
		nfs_expire_unused_delegation_types(cps->clp, flags);
609 610 611 612
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;
}
A
Andy Adamson 已提交
613 614

/* Reduce the fore channel's max_slots to the target value */
615
__be32 nfs4_callback_recallslot(void *argp, void *resp,
616
				struct cb_process_state *cps)
A
Andy Adamson 已提交
617
{
618
	struct cb_recallslotargs *args = argp;
A
Andy Adamson 已提交
619
	struct nfs4_slot_table *fc_tbl;
620
	__be32 status;
A
Andy Adamson 已提交
621 622

	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
623
	if (!cps->clp) /* set in cb_sequence */
A
Andy Adamson 已提交
624 625
		goto out;

626
	dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
627
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
628
		args->crsa_target_highest_slotid);
A
Andy Adamson 已提交
629

630
	fc_tbl = &cps->clp->cl_session->fc_slot_table;
A
Andy Adamson 已提交
631

632
	status = htonl(NFS4_OK);
A
Andy Adamson 已提交
633

634
	nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
635
	nfs41_notify_server(cps->clp);
A
Andy Adamson 已提交
636 637 638 639
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;
}
640

641
__be32 nfs4_callback_notify_lock(void *argp, void *resp,
642 643
				 struct cb_process_state *cps)
{
644 645
	struct cb_notify_lock_args *args = argp;

646 647 648 649 650 651
	if (!cps->clp) /* set in cb_sequence */
		return htonl(NFS4ERR_OP_NOT_IN_SESSION);

	dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));

652 653 654 655
	/* Don't wake anybody if the string looked bogus */
	if (args->cbnl_valid)
		__wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);

656 657
	return htonl(NFS4_OK);
}
658
#endif /* CONFIG_NFS_V4_1 */