callback_proc.c 17.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10
/*
 * linux/fs/nfs/callback_proc.c
 *
 * Copyright (C) 2004 Trond Myklebust
 *
 * NFSv4 callback procedures
 */
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
11
#include <linux/slab.h>
12
#include <linux/rcupdate.h>
13
#include "nfs4_fs.h"
L
Linus Torvalds 已提交
14 15
#include "callback.h"
#include "delegation.h"
16
#include "internal.h"
F
Fred Isaman 已提交
17
#include "pnfs.h"
18
#include "nfs4session.h"
19
#include "nfs4trace.h"
L
Linus Torvalds 已提交
20 21

#define NFSDBG_FACILITY NFSDBG_CALLBACK
22

23
__be32 nfs4_callback_getattr(void *argp, void *resp,
24
			     struct cb_process_state *cps)
L
Linus Torvalds 已提交
25
{
26 27
	struct cb_getattrargs *args = argp;
	struct cb_getattrres *res = resp;
L
Linus Torvalds 已提交
28 29 30
	struct nfs_delegation *delegation;
	struct nfs_inode *nfsi;
	struct inode *inode;
31

32 33 34 35
	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
		goto out;

L
Linus Torvalds 已提交
36 37
	res->bitmap[0] = res->bitmap[1] = 0;
	res->status = htonl(NFS4ERR_BADHANDLE);
38

39
	dprintk_rcu("NFS: GETATTR callback request from %s\n",
40
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
41

42
	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
43 44 45
	if (IS_ERR(inode)) {
		if (inode == ERR_PTR(-EAGAIN))
			res->status = htonl(NFS4ERR_DELAY);
46
		trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
47
				-ntohl(res->status));
48
		goto out;
49
	}
L
Linus Torvalds 已提交
50
	nfsi = NFS_I(inode);
51 52
	rcu_read_lock();
	delegation = rcu_dereference(nfsi->delegation);
L
Linus Torvalds 已提交
53 54 55
	if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
		goto out_iput;
	res->size = i_size_read(inode);
56
	res->change_attr = delegation->change_attr;
57
	if (nfs_have_writebacks(inode))
58
		res->change_attr++;
59 60
	res->ctime = timespec64_to_timespec(inode->i_ctime);
	res->mtime = timespec64_to_timespec(inode->i_mtime);
L
Linus Torvalds 已提交
61 62 63 64 65 66
	res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
		args->bitmap[0];
	res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
		args->bitmap[1];
	res->status = 0;
out_iput:
67
	rcu_read_unlock();
68
	trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
L
Linus Torvalds 已提交
69 70
	iput(inode);
out:
71
	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
L
Linus Torvalds 已提交
72 73 74
	return res->status;
}

75
__be32 nfs4_callback_recall(void *argp, void *resp,
76
			    struct cb_process_state *cps)
L
Linus Torvalds 已提交
77
{
78
	struct cb_recallargs *args = argp;
L
Linus Torvalds 已提交
79
	struct inode *inode;
80
	__be32 res;
L
Linus Torvalds 已提交
81
	
82 83
	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
L
Linus Torvalds 已提交
84
		goto out;
85

86
	dprintk_rcu("NFS: RECALL callback request from %s\n",
87 88 89 90
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));

	res = htonl(NFS4ERR_BADHANDLE);
	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
91 92 93
	if (IS_ERR(inode)) {
		if (inode == ERR_PTR(-EAGAIN))
			res = htonl(NFS4ERR_DELAY);
94 95
		trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
				&args->stateid, -ntohl(res));
96
		goto out;
97
	}
98 99 100 101 102 103
	/* Set up a helper thread to actually return the delegation */
	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
	case 0:
		res = 0;
		break;
	case -ENOENT:
104
		res = htonl(NFS4ERR_BAD_STATEID);
105 106 107 108
		break;
	default:
		res = htonl(NFS4ERR_RESOURCE);
	}
109 110
	trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
			&args->stateid, -ntohl(res));
111
	iput(inode);
L
Linus Torvalds 已提交
112
out:
113
	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
L
Linus Torvalds 已提交
114 115
	return res;
}
116 117 118

#if defined(CONFIG_NFS_V4_1)

119
/*
120
 * Lookup a layout inode by stateid
121
 *
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
 * Note: returns a refcount on the inode and superblock
 */
static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
		const nfs4_stateid *stateid)
{
	struct nfs_server *server;
	struct inode *inode;
	struct pnfs_layout_hdr *lo;

	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		list_for_each_entry(lo, &server->layouts, plh_layouts) {
			if (stateid != NULL &&
			    !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
				continue;
			inode = igrab(lo->plh_inode);
			if (!inode)
138
				return ERR_PTR(-EAGAIN);
139
			if (!nfs_sb_active(inode->i_sb)) {
T
Trond Myklebust 已提交
140
				rcu_read_unlock();
141 142 143
				spin_unlock(&clp->cl_lock);
				iput(inode);
				spin_lock(&clp->cl_lock);
T
Trond Myklebust 已提交
144
				rcu_read_lock();
145
				return ERR_PTR(-EAGAIN);
146 147 148 149 150
			}
			return inode;
		}
	}

151
	return ERR_PTR(-ENOENT);
152 153 154 155 156 157
}

/*
 * Lookup a layout inode by filehandle.
 *
 * Note: returns a refcount on the inode and superblock
158 159
 *
 */
160 161
static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
		const struct nfs_fh *fh)
F
Fred Isaman 已提交
162
{
163
	struct nfs_server *server;
164
	struct nfs_inode *nfsi;
165
	struct inode *inode;
166
	struct pnfs_layout_hdr *lo;
F
Fred Isaman 已提交
167

168 169
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		list_for_each_entry(lo, &server->layouts, plh_layouts) {
170 171
			nfsi = NFS_I(lo->plh_inode);
			if (nfs_compare_fh(fh, &nfsi->fh))
172
				continue;
173
			if (nfsi->layout != lo)
174
				continue;
175 176
			inode = igrab(lo->plh_inode);
			if (!inode)
177
				return ERR_PTR(-EAGAIN);
178
			if (!nfs_sb_active(inode->i_sb)) {
T
Trond Myklebust 已提交
179
				rcu_read_unlock();
180 181 182
				spin_unlock(&clp->cl_lock);
				iput(inode);
				spin_lock(&clp->cl_lock);
T
Trond Myklebust 已提交
183
				rcu_read_lock();
184
				return ERR_PTR(-EAGAIN);
185
			}
186
			return inode;
187
		}
F
Fred Isaman 已提交
188
	}
189

190
	return ERR_PTR(-ENOENT);
191 192
}

193 194 195
static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
		const struct nfs_fh *fh,
		const nfs4_stateid *stateid)
196
{
197
	struct inode *inode;
198 199 200

	spin_lock(&clp->cl_lock);
	rcu_read_lock();
201
	inode = nfs_layout_find_inode_by_stateid(clp, stateid);
202
	if (inode == ERR_PTR(-ENOENT))
203
		inode = nfs_layout_find_inode_by_fh(clp, fh);
204
	rcu_read_unlock();
F
Fred Isaman 已提交
205
	spin_unlock(&clp->cl_lock);
206

207
	return inode;
208 209
}

210 211 212
/*
 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
 */
213
static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
214 215 216 217
					const nfs4_stateid *new)
{
	u32 oldseq, newseq;

218 219 220 221 222 223 224 225
	/* Is the stateid still not initialised? */
	if (!pnfs_layout_is_valid(lo))
		return NFS4ERR_DELAY;

	/* Mismatched stateid? */
	if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
		return NFS4ERR_BAD_STATEID;

226
	newseq = be32_to_cpu(new->seqid);
227 228 229 230 231 232 233 234 235
	/* Are we already in a layout recall situation? */
	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
	    lo->plh_return_seq != 0) {
		if (newseq < lo->plh_return_seq)
			return NFS4ERR_OLD_STATEID;
		if (newseq > lo->plh_return_seq)
			return NFS4ERR_DELAY;
		goto out;
	}
236

237 238
	/* Check that the stateid matches what we think it should be. */
	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
239
	if (newseq > oldseq + 1)
240 241 242 243 244 245
		return NFS4ERR_DELAY;
	/* Crazy server! */
	if (newseq <= oldseq)
		return NFS4ERR_OLD_STATEID;
out:
	return NFS_OK;
246 247
}

248 249 250 251 252 253 254 255
static u32 initiate_file_draining(struct nfs_client *clp,
				  struct cb_layoutrecallargs *args)
{
	struct inode *ino;
	struct pnfs_layout_hdr *lo;
	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
	LIST_HEAD(free_me_list);

256
	ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
257 258 259 260 261
	if (IS_ERR(ino)) {
		if (ino == ERR_PTR(-EAGAIN))
			rv = NFS4ERR_DELAY;
		goto out_noput;
	}
F
Fred Isaman 已提交
262

263 264
	pnfs_layoutcommit_inode(ino, false);

265 266

	spin_lock(&ino->i_lock);
267 268 269 270 271 272
	lo = NFS_I(ino)->layout;
	if (!lo) {
		spin_unlock(&ino->i_lock);
		goto out;
	}
	pnfs_get_layout_hdr(lo);
273 274
	rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
	if (rv != NFS_OK)
275
		goto unlock;
276 277
	pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);

278 279 280 281 282 283 284 285
	/*
	 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
	 */
	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
		rv = NFS4ERR_DELAY;
		goto unlock;
	}

286
	if (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
287 288
				&args->cbl_range,
				be32_to_cpu(args->cbl_stateid.seqid))) {
289
		rv = NFS4_OK;
290 291 292
		goto unlock;
	}

293 294 295
	/* Embrace your forgetfulness! */
	rv = NFS4ERR_NOMATCHING_LAYOUT;

296 297 298 299 300
	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
		NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
			&args->cbl_range);
	}
unlock:
F
Fred Isaman 已提交
301 302
	spin_unlock(&ino->i_lock);
	pnfs_free_lseg_list(&free_me_list);
303 304
	/* Free all lsegs that are attached to commit buckets */
	nfs_commit_inode(ino, 0);
305
	pnfs_put_layout_hdr(lo);
306
out:
307 308
	nfs_iput_and_deactive(ino);
out_noput:
309 310
	trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
			&args->cbl_stateid, -rv);
F
Fred Isaman 已提交
311 312 313 314 315 316
	return rv;
}

static u32 initiate_bulk_draining(struct nfs_client *clp,
				  struct cb_layoutrecallargs *args)
{
317
	int stat;
318

319 320 321 322 323 324 325
	if (args->cbl_recall_type == RETURN_FSID)
		stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
	else
		stat = pnfs_destroy_layouts_byclid(clp, true);
	if (stat != 0)
		return NFS4ERR_DELAY;
	return NFS4ERR_NOMATCHING_LAYOUT;
F
Fred Isaman 已提交
326 327 328 329 330
}

static u32 do_callback_layoutrecall(struct nfs_client *clp,
				    struct cb_layoutrecallargs *args)
{
331 332
	write_seqcount_begin(&clp->cl_callback_count);
	write_seqcount_end(&clp->cl_callback_count);
F
Fred Isaman 已提交
333
	if (args->cbl_recall_type == RETURN_FILE)
334 335
		return initiate_file_draining(clp, args);
	return initiate_bulk_draining(clp, args);
F
Fred Isaman 已提交
336 337
}

338 339
__be32 nfs4_callback_layoutrecall(void *argp, void *resp,
				  struct cb_process_state *cps)
F
Fred Isaman 已提交
340
{
341
	struct cb_layoutrecallargs *args = argp;
342
	u32 res = NFS4ERR_OP_NOT_IN_SESSION;
F
Fred Isaman 已提交
343 344 345 346

	if (cps->clp)
		res = do_callback_layoutrecall(cps->clp, args);
	return cpu_to_be32(res);
F
Fred Isaman 已提交
347 348
}

349 350 351 352 353 354 355 356 357 358 359
static void pnfs_recall_all_layouts(struct nfs_client *clp)
{
	struct cb_layoutrecallargs args;

	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
	memset(&args, 0, sizeof(args));
	args.cbl_recall_type = RETURN_ALL;
	/* FIXME we ignore errors, what should we do? */
	do_callback_layoutrecall(clp, &args);
}

360 361
__be32 nfs4_callback_devicenotify(void *argp, void *resp,
				  struct cb_process_state *cps)
M
Marc Eshel 已提交
362
{
363
	struct cb_devicenotifyargs *args = argp;
M
Marc Eshel 已提交
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
	int i;
	__be32 res = 0;
	struct nfs_client *clp = cps->clp;
	struct nfs_server *server = NULL;

	if (!clp) {
		res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
		goto out;
	}

	for (i = 0; i < args->ndevs; i++) {
		struct cb_devicenotifyitem *dev = &args->devs[i];

		if (!server ||
		    server->pnfs_curr_ld->id != dev->cbd_layout_type) {
			rcu_read_lock();
			list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
				if (server->pnfs_curr_ld &&
				    server->pnfs_curr_ld->id == dev->cbd_layout_type) {
					rcu_read_unlock();
					goto found;
				}
			rcu_read_unlock();
			continue;
		}

	found:
391
		nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
M
Marc Eshel 已提交
392 393 394 395 396 397 398
	}

out:
	kfree(args->devs);
	return res;
}

399 400 401 402 403
/*
 * Validate the sequenceID sent by the server.
 * Return success if the sequenceID is one more than what we last saw on
 * this slot, accounting for wraparound.  Increments the slot's sequence.
 *
404 405
 * We don't yet implement a duplicate request cache, instead we set the
 * back channel ca_maxresponsesize_cached to zero. This is OK for now
406 407 408 409 410 411
 * since we only currently implement idempotent callbacks anyway.
 *
 * We have a single slot backchannel at this time, so we don't bother
 * checking the used_slots bit array on the table.  The lower layer guarantees
 * a single outstanding callback request at a time.
 */
412
static __be32
413 414
validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
		const struct cb_sequenceargs * args)
415
{
416
	if (args->csa_slotid > tbl->server_highest_slotid)
417 418 419
		return htonl(NFS4ERR_BADSLOT);

	/* Replay */
420
	if (args->csa_sequenceid == slot->seq_nr) {
421
		if (nfs4_test_locked_slot(tbl, slot->slot_nr))
422
			return htonl(NFS4ERR_DELAY);
423 424 425 426
		/* Signal process_op to set this error on next op */
		if (args->csa_cachethis == 0)
			return htonl(NFS4ERR_RETRY_UNCACHED_REP);

427 428
		/* Liar! We never allowed you to set csa_cachethis != 0 */
		return htonl(NFS4ERR_SEQ_FALSE_RETRY);
429 430
	}

431 432
	/* Note: wraparound relies on seq_nr being of type u32 */
	if (likely(args->csa_sequenceid == slot->seq_nr + 1))
433
		return htonl(NFS4_OK);
434 435 436 437 438

	/* Misordered request */
	return htonl(NFS4ERR_SEQ_MISORDERED);
}

439 440 441 442 443 444 445 446 447
/*
 * For each referring call triple, check the session's slot table for
 * a match.  If the slot is in use and the sequence numbers match, the
 * client is still waiting for a response to the original request.
 */
static bool referring_call_exists(struct nfs_client *clp,
				  uint32_t nrclists,
				  struct referring_call_list *rclists)
{
448
	bool status = false;
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
	int i, j;
	struct nfs4_session *session;
	struct nfs4_slot_table *tbl;
	struct referring_call_list *rclist;
	struct referring_call *ref;

	/*
	 * XXX When client trunking is implemented, this becomes
	 * a session lookup from within the loop
	 */
	session = clp->cl_session;
	tbl = &session->fc_slot_table;

	for (i = 0; i < nrclists; i++) {
		rclist = &rclists[i];
		if (memcmp(session->sess_id.data,
			   rclist->rcl_sessionid.data,
			   NFS4_MAX_SESSIONID_LEN) != 0)
			continue;

		for (j = 0; j < rclist->rcl_nrefcalls; j++) {
			ref = &rclist->rcl_refcalls[j];
471 472
			status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
					ref->rc_sequenceid, HZ >> 1) < 0;
473 474 475 476 477 478 479 480 481
			if (status)
				goto out;
		}
	}

out:
	return status;
}

482
__be32 nfs4_callback_sequence(void *argp, void *resp,
483
			      struct cb_process_state *cps)
484
{
485 486
	struct cb_sequenceargs *args = argp;
	struct cb_sequenceres *res = resp;
487
	struct nfs4_slot_table *tbl;
488
	struct nfs4_slot *slot;
489
	struct nfs_client *clp;
490
	int i;
491
	__be32 status = htonl(NFS4ERR_BADSESSION);
492

493 494
	clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
					 &args->csa_sessionid, cps->minorversion);
495 496 497
	if (clp == NULL)
		goto out;

498 499
	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
		goto out;
500

501 502
	tbl = &clp->cl_session->bc_slot_table;

503 504 505 506 507 508
	/* Set up res before grabbing the spinlock */
	memcpy(&res->csr_sessionid, &args->csa_sessionid,
	       sizeof(res->csr_sessionid));
	res->csr_sequenceid = args->csa_sequenceid;
	res->csr_slotid = args->csa_slotid;

509
	spin_lock(&tbl->slot_tbl_lock);
510
	/* state manager is resetting the session */
511
	if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
512
		status = htonl(NFS4ERR_DELAY);
513 514 515 516 517
		/* Return NFS4ERR_BADSESSION if we're draining the session
		 * in order to reset it.
		 */
		if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
			status = htonl(NFS4ERR_BADSESSION);
518
		goto out_unlock;
519 520
	}

521 522 523 524
	status = htonl(NFS4ERR_BADSLOT);
	slot = nfs4_lookup_slot(tbl, args->csa_slotid);
	if (IS_ERR(slot))
		goto out_unlock;
525 526 527 528

	res->csr_highestslotid = tbl->server_highest_slotid;
	res->csr_target_highestslotid = tbl->target_highest_slotid;

529 530 531
	status = validate_seqid(tbl, slot, args);
	if (status)
		goto out_unlock;
532 533 534 535 536
	if (!nfs4_try_to_lock_slot(tbl, slot)) {
		status = htonl(NFS4ERR_DELAY);
		goto out_unlock;
	}
	cps->slot = slot;
537

538
	/* The ca_maxresponsesize_cached is 0 with no DRC */
O
Olga Kornievskaia 已提交
539 540 541 542
	if (args->csa_cachethis != 0) {
		status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
		goto out_unlock;
	}
543

544 545 546 547 548 549 550
	/*
	 * Check for pending referring calls.  If a match is found, a
	 * related callback was received before the response to the original
	 * call.
	 */
	if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
		status = htonl(NFS4ERR_DELAY);
551
		goto out_unlock;
552 553
	}

554 555 556 557 558
	/*
	 * RFC5661 20.9.3
	 * If CB_SEQUENCE returns an error, then the state of the slot
	 * (sequence ID, cached reply) MUST NOT change.
	 */
559
	slot->seq_nr = args->csa_sequenceid;
560 561
out_unlock:
	spin_unlock(&tbl->slot_tbl_lock);
562

563
out:
564
	cps->clp = clp; /* put in nfs4_callback_compound */
565 566 567 568
	for (i = 0; i < args->csa_nrclists; i++)
		kfree(args->csa_rclists[i].rcl_refcalls);
	kfree(args->csa_rclists);

569 570 571 572
	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
		cps->drc_status = status;
		status = 0;
	} else
573
		res->csr_status = status;
574

575
	trace_nfs4_cb_sequence(args, res, status);
576
	return status;
577 578
}

579
static bool
580
validate_bitmap_values(unsigned int mask)
581 582 583 584
{
	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
}

585
__be32 nfs4_callback_recallany(void *argp, void *resp,
586
			       struct cb_process_state *cps)
587
{
588
	struct cb_recallanyargs *args = argp;
589
	__be32 status;
590 591
	fmode_t flags = 0;

592
	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
593
	if (!cps->clp) /* set in cb_sequence */
594 595
		goto out;

596
	dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
597
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
598

599 600 601 602 603
	status = cpu_to_be32(NFS4ERR_INVAL);
	if (!validate_bitmap_values(args->craa_type_mask))
		goto out;

	status = cpu_to_be32(NFS4_OK);
604
	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
605
		flags = FMODE_READ;
606
	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
607 608
		flags |= FMODE_WRITE;
	if (flags)
609
		nfs_expire_unused_delegation_types(cps->clp, flags);
610 611 612

	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
		pnfs_recall_all_layouts(cps->clp);
613 614 615 616
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;
}
A
Andy Adamson 已提交
617 618

/* Reduce the fore channel's max_slots to the target value */
619
__be32 nfs4_callback_recallslot(void *argp, void *resp,
620
				struct cb_process_state *cps)
A
Andy Adamson 已提交
621
{
622
	struct cb_recallslotargs *args = argp;
A
Andy Adamson 已提交
623
	struct nfs4_slot_table *fc_tbl;
624
	__be32 status;
A
Andy Adamson 已提交
625 626

	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
627
	if (!cps->clp) /* set in cb_sequence */
A
Andy Adamson 已提交
628 629
		goto out;

630
	dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
631
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
632
		args->crsa_target_highest_slotid);
A
Andy Adamson 已提交
633

634
	fc_tbl = &cps->clp->cl_session->fc_slot_table;
A
Andy Adamson 已提交
635

636
	status = htonl(NFS4_OK);
A
Andy Adamson 已提交
637

638
	nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
639
	nfs41_notify_server(cps->clp);
A
Andy Adamson 已提交
640 641 642 643
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;
}
644

645
__be32 nfs4_callback_notify_lock(void *argp, void *resp,
646 647
				 struct cb_process_state *cps)
{
648 649
	struct cb_notify_lock_args *args = argp;

650 651 652 653 654 655
	if (!cps->clp) /* set in cb_sequence */
		return htonl(NFS4ERR_OP_NOT_IN_SESSION);

	dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));

656 657 658 659
	/* Don't wake anybody if the string looked bogus */
	if (args->cbnl_valid)
		__wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);

660 661
	return htonl(NFS4_OK);
}
662
#endif /* CONFIG_NFS_V4_1 */