callback_proc.c 17.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10
/*
 * linux/fs/nfs/callback_proc.c
 *
 * Copyright (C) 2004 Trond Myklebust
 *
 * NFSv4 callback procedures
 */
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
11
#include <linux/slab.h>
12
#include <linux/rcupdate.h>
13
#include "nfs4_fs.h"
L
Linus Torvalds 已提交
14 15
#include "callback.h"
#include "delegation.h"
16
#include "internal.h"
F
Fred Isaman 已提交
17
#include "pnfs.h"
18
#include "nfs4session.h"
19
#include "nfs4trace.h"
L
Linus Torvalds 已提交
20 21

#define NFSDBG_FACILITY NFSDBG_CALLBACK
22

23
__be32 nfs4_callback_getattr(void *argp, void *resp,
24
			     struct cb_process_state *cps)
L
Linus Torvalds 已提交
25
{
26 27
	struct cb_getattrargs *args = argp;
	struct cb_getattrres *res = resp;
L
Linus Torvalds 已提交
28 29 30
	struct nfs_delegation *delegation;
	struct nfs_inode *nfsi;
	struct inode *inode;
31

32 33 34 35
	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
		goto out;

L
Linus Torvalds 已提交
36 37
	res->bitmap[0] = res->bitmap[1] = 0;
	res->status = htonl(NFS4ERR_BADHANDLE);
38

39
	dprintk_rcu("NFS: GETATTR callback request from %s\n",
40
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
41

42
	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
43 44 45
	if (IS_ERR(inode)) {
		if (inode == ERR_PTR(-EAGAIN))
			res->status = htonl(NFS4ERR_DELAY);
46
		trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
47
				-ntohl(res->status));
48
		goto out;
49
	}
L
Linus Torvalds 已提交
50
	nfsi = NFS_I(inode);
51 52
	rcu_read_lock();
	delegation = rcu_dereference(nfsi->delegation);
L
Linus Torvalds 已提交
53 54 55
	if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
		goto out_iput;
	res->size = i_size_read(inode);
56
	res->change_attr = delegation->change_attr;
57
	if (nfs_have_writebacks(inode))
58
		res->change_attr++;
L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66
	res->ctime = inode->i_ctime;
	res->mtime = inode->i_mtime;
	res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
		args->bitmap[0];
	res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
		args->bitmap[1];
	res->status = 0;
out_iput:
67
	rcu_read_unlock();
68
	trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
L
Linus Torvalds 已提交
69 70
	iput(inode);
out:
71
	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
L
Linus Torvalds 已提交
72 73 74
	return res->status;
}

75
__be32 nfs4_callback_recall(void *argp, void *resp,
76
			    struct cb_process_state *cps)
L
Linus Torvalds 已提交
77
{
78
	struct cb_recallargs *args = argp;
L
Linus Torvalds 已提交
79
	struct inode *inode;
80
	__be32 res;
L
Linus Torvalds 已提交
81
	
82 83
	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
L
Linus Torvalds 已提交
84
		goto out;
85

86
	dprintk_rcu("NFS: RECALL callback request from %s\n",
87 88 89 90
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));

	res = htonl(NFS4ERR_BADHANDLE);
	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
91 92 93
	if (IS_ERR(inode)) {
		if (inode == ERR_PTR(-EAGAIN))
			res = htonl(NFS4ERR_DELAY);
94 95
		trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
				&args->stateid, -ntohl(res));
96
		goto out;
97
	}
98 99 100 101 102 103
	/* Set up a helper thread to actually return the delegation */
	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
	case 0:
		res = 0;
		break;
	case -ENOENT:
104
		res = htonl(NFS4ERR_BAD_STATEID);
105 106 107 108
		break;
	default:
		res = htonl(NFS4ERR_RESOURCE);
	}
109 110
	trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
			&args->stateid, -ntohl(res));
111
	iput(inode);
L
Linus Torvalds 已提交
112
out:
113
	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
L
Linus Torvalds 已提交
114 115
	return res;
}
116 117 118

#if defined(CONFIG_NFS_V4_1)

119
/*
120
 * Lookup a layout inode by stateid
121
 *
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
 * Note: returns a refcount on the inode and superblock
 */
static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
		const nfs4_stateid *stateid)
{
	struct nfs_server *server;
	struct inode *inode;
	struct pnfs_layout_hdr *lo;

restart:
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		list_for_each_entry(lo, &server->layouts, plh_layouts) {
			if (stateid != NULL &&
			    !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
				continue;
			inode = igrab(lo->plh_inode);
			if (!inode)
				continue;
			if (!nfs_sb_active(inode->i_sb)) {
T
Trond Myklebust 已提交
141
				rcu_read_unlock();
142 143 144
				spin_unlock(&clp->cl_lock);
				iput(inode);
				spin_lock(&clp->cl_lock);
T
Trond Myklebust 已提交
145
				rcu_read_lock();
146 147 148 149 150 151 152 153 154 155 156 157 158
				goto restart;
			}
			return inode;
		}
	}

	return NULL;
}

/*
 * Lookup a layout inode by filehandle.
 *
 * Note: returns a refcount on the inode and superblock
159 160
 *
 */
161 162
static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
		const struct nfs_fh *fh)
F
Fred Isaman 已提交
163
{
164
	struct nfs_server *server;
165
	struct nfs_inode *nfsi;
166
	struct inode *inode;
167
	struct pnfs_layout_hdr *lo;
F
Fred Isaman 已提交
168

169
restart:
170 171
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		list_for_each_entry(lo, &server->layouts, plh_layouts) {
172 173
			nfsi = NFS_I(lo->plh_inode);
			if (nfs_compare_fh(fh, &nfsi->fh))
174
				continue;
175
			if (nfsi->layout != lo)
176
				continue;
177 178 179 180
			inode = igrab(lo->plh_inode);
			if (!inode)
				continue;
			if (!nfs_sb_active(inode->i_sb)) {
T
Trond Myklebust 已提交
181
				rcu_read_unlock();
182 183 184
				spin_unlock(&clp->cl_lock);
				iput(inode);
				spin_lock(&clp->cl_lock);
T
Trond Myklebust 已提交
185
				rcu_read_lock();
186
				goto restart;
187
			}
188
			return inode;
189
		}
F
Fred Isaman 已提交
190
	}
191 192 193 194

	return NULL;
}

195 196 197
static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
		const struct nfs_fh *fh,
		const nfs4_stateid *stateid)
198
{
199
	struct inode *inode;
200 201 202

	spin_lock(&clp->cl_lock);
	rcu_read_lock();
203 204 205
	inode = nfs_layout_find_inode_by_stateid(clp, stateid);
	if (!inode)
		inode = nfs_layout_find_inode_by_fh(clp, fh);
206
	rcu_read_unlock();
F
Fred Isaman 已提交
207
	spin_unlock(&clp->cl_lock);
208

209
	return inode;
210 211
}

212 213 214
/*
 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
 */
215
static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
216 217 218 219
					const nfs4_stateid *new)
{
	u32 oldseq, newseq;

220 221 222 223 224 225 226 227
	/* Is the stateid still not initialised? */
	if (!pnfs_layout_is_valid(lo))
		return NFS4ERR_DELAY;

	/* Mismatched stateid? */
	if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
		return NFS4ERR_BAD_STATEID;

228
	newseq = be32_to_cpu(new->seqid);
229 230 231 232 233 234 235 236 237
	/* Are we already in a layout recall situation? */
	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
	    lo->plh_return_seq != 0) {
		if (newseq < lo->plh_return_seq)
			return NFS4ERR_OLD_STATEID;
		if (newseq > lo->plh_return_seq)
			return NFS4ERR_DELAY;
		goto out;
	}
238

239 240
	/* Check that the stateid matches what we think it should be. */
	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
241
	if (newseq > oldseq + 1)
242 243 244 245 246 247
		return NFS4ERR_DELAY;
	/* Crazy server! */
	if (newseq <= oldseq)
		return NFS4ERR_OLD_STATEID;
out:
	return NFS_OK;
248 249
}

250 251 252 253 254 255 256 257
static u32 initiate_file_draining(struct nfs_client *clp,
				  struct cb_layoutrecallargs *args)
{
	struct inode *ino;
	struct pnfs_layout_hdr *lo;
	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
	LIST_HEAD(free_me_list);

258 259
	ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
	if (!ino)
260
		goto out;
F
Fred Isaman 已提交
261

262 263
	pnfs_layoutcommit_inode(ino, false);

264 265

	spin_lock(&ino->i_lock);
266 267 268 269 270 271
	lo = NFS_I(ino)->layout;
	if (!lo) {
		spin_unlock(&ino->i_lock);
		goto out;
	}
	pnfs_get_layout_hdr(lo);
272 273
	rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
	if (rv != NFS_OK)
274
		goto unlock;
275 276
	pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);

277 278 279 280 281 282 283 284
	/*
	 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
	 */
	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
		rv = NFS4ERR_DELAY;
		goto unlock;
	}

285
	if (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
286 287
				&args->cbl_range,
				be32_to_cpu(args->cbl_stateid.seqid))) {
288
		rv = NFS4_OK;
289 290 291
		goto unlock;
	}

292 293 294
	/* Embrace your forgetfulness! */
	rv = NFS4ERR_NOMATCHING_LAYOUT;

295 296 297 298 299
	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
		NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
			&args->cbl_range);
	}
unlock:
F
Fred Isaman 已提交
300 301
	spin_unlock(&ino->i_lock);
	pnfs_free_lseg_list(&free_me_list);
302 303
	/* Free all lsegs that are attached to commit buckets */
	nfs_commit_inode(ino, 0);
304
	pnfs_put_layout_hdr(lo);
305
out:
306 307
	trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
			&args->cbl_stateid, -rv);
308
	nfs_iput_and_deactive(ino);
F
Fred Isaman 已提交
309 310 311 312 313 314
	return rv;
}

static u32 initiate_bulk_draining(struct nfs_client *clp,
				  struct cb_layoutrecallargs *args)
{
315
	int stat;
316

317 318 319 320 321 322 323
	if (args->cbl_recall_type == RETURN_FSID)
		stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
	else
		stat = pnfs_destroy_layouts_byclid(clp, true);
	if (stat != 0)
		return NFS4ERR_DELAY;
	return NFS4ERR_NOMATCHING_LAYOUT;
F
Fred Isaman 已提交
324 325 326 327 328
}

static u32 do_callback_layoutrecall(struct nfs_client *clp,
				    struct cb_layoutrecallargs *args)
{
329 330
	write_seqcount_begin(&clp->cl_callback_count);
	write_seqcount_end(&clp->cl_callback_count);
F
Fred Isaman 已提交
331
	if (args->cbl_recall_type == RETURN_FILE)
332 333
		return initiate_file_draining(clp, args);
	return initiate_bulk_draining(clp, args);
F
Fred Isaman 已提交
334 335
}

336 337
__be32 nfs4_callback_layoutrecall(void *argp, void *resp,
				  struct cb_process_state *cps)
F
Fred Isaman 已提交
338
{
339
	struct cb_layoutrecallargs *args = argp;
340
	u32 res = NFS4ERR_OP_NOT_IN_SESSION;
F
Fred Isaman 已提交
341 342 343 344

	if (cps->clp)
		res = do_callback_layoutrecall(cps->clp, args);
	return cpu_to_be32(res);
F
Fred Isaman 已提交
345 346
}

347 348 349 350 351 352 353 354 355 356 357
static void pnfs_recall_all_layouts(struct nfs_client *clp)
{
	struct cb_layoutrecallargs args;

	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
	memset(&args, 0, sizeof(args));
	args.cbl_recall_type = RETURN_ALL;
	/* FIXME we ignore errors, what should we do? */
	do_callback_layoutrecall(clp, &args);
}

358 359
__be32 nfs4_callback_devicenotify(void *argp, void *resp,
				  struct cb_process_state *cps)
M
Marc Eshel 已提交
360
{
361
	struct cb_devicenotifyargs *args = argp;
M
Marc Eshel 已提交
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
	int i;
	__be32 res = 0;
	struct nfs_client *clp = cps->clp;
	struct nfs_server *server = NULL;

	if (!clp) {
		res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
		goto out;
	}

	for (i = 0; i < args->ndevs; i++) {
		struct cb_devicenotifyitem *dev = &args->devs[i];

		if (!server ||
		    server->pnfs_curr_ld->id != dev->cbd_layout_type) {
			rcu_read_lock();
			list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
				if (server->pnfs_curr_ld &&
				    server->pnfs_curr_ld->id == dev->cbd_layout_type) {
					rcu_read_unlock();
					goto found;
				}
			rcu_read_unlock();
			continue;
		}

	found:
389
		nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
M
Marc Eshel 已提交
390 391 392 393 394 395 396
	}

out:
	kfree(args->devs);
	return res;
}

397 398 399 400 401
/*
 * Validate the sequenceID sent by the server.
 * Return success if the sequenceID is one more than what we last saw on
 * this slot, accounting for wraparound.  Increments the slot's sequence.
 *
402 403
 * We don't yet implement a duplicate request cache, instead we set the
 * back channel ca_maxresponsesize_cached to zero. This is OK for now
404 405 406 407 408 409
 * since we only currently implement idempotent callbacks anyway.
 *
 * We have a single slot backchannel at this time, so we don't bother
 * checking the used_slots bit array on the table.  The lower layer guarantees
 * a single outstanding callback request at a time.
 */
410
static __be32
411 412
validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
		const struct cb_sequenceargs * args)
413
{
414
	if (args->csa_slotid > tbl->server_highest_slotid)
415 416 417
		return htonl(NFS4ERR_BADSLOT);

	/* Replay */
418
	if (args->csa_sequenceid == slot->seq_nr) {
419
		if (nfs4_test_locked_slot(tbl, slot->slot_nr))
420
			return htonl(NFS4ERR_DELAY);
421 422 423 424
		/* Signal process_op to set this error on next op */
		if (args->csa_cachethis == 0)
			return htonl(NFS4ERR_RETRY_UNCACHED_REP);

425 426
		/* Liar! We never allowed you to set csa_cachethis != 0 */
		return htonl(NFS4ERR_SEQ_FALSE_RETRY);
427 428 429
	}

	/* Wraparound */
430 431 432 433 434
	if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
		if (args->csa_sequenceid == 1)
			return htonl(NFS4_OK);
	} else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
		return htonl(NFS4_OK);
435 436 437 438 439

	/* Misordered request */
	return htonl(NFS4ERR_SEQ_MISORDERED);
}

440 441 442 443 444 445 446 447 448
/*
 * For each referring call triple, check the session's slot table for
 * a match.  If the slot is in use and the sequence numbers match, the
 * client is still waiting for a response to the original request.
 */
static bool referring_call_exists(struct nfs_client *clp,
				  uint32_t nrclists,
				  struct referring_call_list *rclists)
{
449
	bool status = false;
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
	int i, j;
	struct nfs4_session *session;
	struct nfs4_slot_table *tbl;
	struct referring_call_list *rclist;
	struct referring_call *ref;

	/*
	 * XXX When client trunking is implemented, this becomes
	 * a session lookup from within the loop
	 */
	session = clp->cl_session;
	tbl = &session->fc_slot_table;

	for (i = 0; i < nrclists; i++) {
		rclist = &rclists[i];
		if (memcmp(session->sess_id.data,
			   rclist->rcl_sessionid.data,
			   NFS4_MAX_SESSIONID_LEN) != 0)
			continue;

		for (j = 0; j < rclist->rcl_nrefcalls; j++) {
			ref = &rclist->rcl_refcalls[j];
472 473
			status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
					ref->rc_sequenceid, HZ >> 1) < 0;
474 475 476 477 478 479 480 481 482
			if (status)
				goto out;
		}
	}

out:
	return status;
}

483
__be32 nfs4_callback_sequence(void *argp, void *resp,
484
			      struct cb_process_state *cps)
485
{
486 487
	struct cb_sequenceargs *args = argp;
	struct cb_sequenceres *res = resp;
488
	struct nfs4_slot_table *tbl;
489
	struct nfs4_slot *slot;
490
	struct nfs_client *clp;
491
	int i;
492
	__be32 status = htonl(NFS4ERR_BADSESSION);
493

494 495
	clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
					 &args->csa_sessionid, cps->minorversion);
496 497 498
	if (clp == NULL)
		goto out;

499 500
	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
		goto out;
501

502 503
	tbl = &clp->cl_session->bc_slot_table;

504 505 506 507 508 509
	/* Set up res before grabbing the spinlock */
	memcpy(&res->csr_sessionid, &args->csa_sessionid,
	       sizeof(res->csr_sessionid));
	res->csr_sequenceid = args->csa_sequenceid;
	res->csr_slotid = args->csa_slotid;

510
	spin_lock(&tbl->slot_tbl_lock);
511
	/* state manager is resetting the session */
512
	if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
513
		status = htonl(NFS4ERR_DELAY);
514 515 516 517 518
		/* Return NFS4ERR_BADSESSION if we're draining the session
		 * in order to reset it.
		 */
		if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
			status = htonl(NFS4ERR_BADSESSION);
519
		goto out_unlock;
520 521
	}

522 523 524 525
	status = htonl(NFS4ERR_BADSLOT);
	slot = nfs4_lookup_slot(tbl, args->csa_slotid);
	if (IS_ERR(slot))
		goto out_unlock;
526 527 528 529

	res->csr_highestslotid = tbl->server_highest_slotid;
	res->csr_target_highestslotid = tbl->target_highest_slotid;

530 531 532
	status = validate_seqid(tbl, slot, args);
	if (status)
		goto out_unlock;
533 534 535 536 537
	if (!nfs4_try_to_lock_slot(tbl, slot)) {
		status = htonl(NFS4ERR_DELAY);
		goto out_unlock;
	}
	cps->slot = slot;
538

539
	/* The ca_maxresponsesize_cached is 0 with no DRC */
O
Olga Kornievskaia 已提交
540 541 542 543
	if (args->csa_cachethis != 0) {
		status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
		goto out_unlock;
	}
544

545 546 547 548 549 550 551
	/*
	 * Check for pending referring calls.  If a match is found, a
	 * related callback was received before the response to the original
	 * call.
	 */
	if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
		status = htonl(NFS4ERR_DELAY);
552
		goto out_unlock;
553 554
	}

555 556 557 558 559
	/*
	 * RFC5661 20.9.3
	 * If CB_SEQUENCE returns an error, then the state of the slot
	 * (sequence ID, cached reply) MUST NOT change.
	 */
560
	slot->seq_nr = args->csa_sequenceid;
561 562
out_unlock:
	spin_unlock(&tbl->slot_tbl_lock);
563

564
out:
565
	cps->clp = clp; /* put in nfs4_callback_compound */
566 567 568 569
	for (i = 0; i < args->csa_nrclists; i++)
		kfree(args->csa_rclists[i].rcl_refcalls);
	kfree(args->csa_rclists);

570 571 572 573
	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
		cps->drc_status = status;
		status = 0;
	} else
574
		res->csr_status = status;
575

576
	trace_nfs4_cb_sequence(args, res, status);
577
	return status;
578 579
}

580
static bool
581
validate_bitmap_values(unsigned int mask)
582 583 584 585
{
	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
}

586
__be32 nfs4_callback_recallany(void *argp, void *resp,
587
			       struct cb_process_state *cps)
588
{
589
	struct cb_recallanyargs *args = argp;
590
	__be32 status;
591 592
	fmode_t flags = 0;

593
	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
594
	if (!cps->clp) /* set in cb_sequence */
595 596
		goto out;

597
	dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
598
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
599

600 601 602 603 604
	status = cpu_to_be32(NFS4ERR_INVAL);
	if (!validate_bitmap_values(args->craa_type_mask))
		goto out;

	status = cpu_to_be32(NFS4_OK);
605
	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
606
		flags = FMODE_READ;
607
	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
608 609
		flags |= FMODE_WRITE;
	if (flags)
610
		nfs_expire_unused_delegation_types(cps->clp, flags);
611 612 613

	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
		pnfs_recall_all_layouts(cps->clp);
614 615 616 617
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;
}
A
Andy Adamson 已提交
618 619

/* Reduce the fore channel's max_slots to the target value */
620
__be32 nfs4_callback_recallslot(void *argp, void *resp,
621
				struct cb_process_state *cps)
A
Andy Adamson 已提交
622
{
623
	struct cb_recallslotargs *args = argp;
A
Andy Adamson 已提交
624
	struct nfs4_slot_table *fc_tbl;
625
	__be32 status;
A
Andy Adamson 已提交
626 627

	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
628
	if (!cps->clp) /* set in cb_sequence */
A
Andy Adamson 已提交
629 630
		goto out;

631
	dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
632
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
633
		args->crsa_target_highest_slotid);
A
Andy Adamson 已提交
634

635
	fc_tbl = &cps->clp->cl_session->fc_slot_table;
A
Andy Adamson 已提交
636

637
	status = htonl(NFS4_OK);
A
Andy Adamson 已提交
638

639
	nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
640
	nfs41_notify_server(cps->clp);
A
Andy Adamson 已提交
641 642 643 644
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;
}
645

646
__be32 nfs4_callback_notify_lock(void *argp, void *resp,
647 648
				 struct cb_process_state *cps)
{
649 650
	struct cb_notify_lock_args *args = argp;

651 652 653 654 655 656
	if (!cps->clp) /* set in cb_sequence */
		return htonl(NFS4ERR_OP_NOT_IN_SESSION);

	dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));

657 658 659 660
	/* Don't wake anybody if the string looked bogus */
	if (args->cbnl_valid)
		__wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);

661 662
	return htonl(NFS4_OK);
}
663
#endif /* CONFIG_NFS_V4_1 */