callback_proc.c 17.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10
/*
 * linux/fs/nfs/callback_proc.c
 *
 * Copyright (C) 2004 Trond Myklebust
 *
 * NFSv4 callback procedures
 */
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
11
#include <linux/slab.h>
12
#include <linux/rcupdate.h>
13
#include "nfs4_fs.h"
L
Linus Torvalds 已提交
14 15
#include "callback.h"
#include "delegation.h"
16
#include "internal.h"
F
Fred Isaman 已提交
17
#include "pnfs.h"
18
#include "nfs4session.h"
19
#include "nfs4trace.h"
L
Linus Torvalds 已提交
20 21

#define NFSDBG_FACILITY NFSDBG_CALLBACK
22

23
__be32 nfs4_callback_getattr(void *argp, void *resp,
24
			     struct cb_process_state *cps)
L
Linus Torvalds 已提交
25
{
26 27
	struct cb_getattrargs *args = argp;
	struct cb_getattrres *res = resp;
L
Linus Torvalds 已提交
28 29 30
	struct nfs_delegation *delegation;
	struct nfs_inode *nfsi;
	struct inode *inode;
31

32 33 34 35
	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
		goto out;

L
Linus Torvalds 已提交
36 37
	res->bitmap[0] = res->bitmap[1] = 0;
	res->status = htonl(NFS4ERR_BADHANDLE);
38

39
	dprintk_rcu("NFS: GETATTR callback request from %s\n",
40
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
41

42
	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
43 44 45
	if (IS_ERR(inode)) {
		if (inode == ERR_PTR(-EAGAIN))
			res->status = htonl(NFS4ERR_DELAY);
46
		trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
47
				-ntohl(res->status));
48
		goto out;
49
	}
L
Linus Torvalds 已提交
50
	nfsi = NFS_I(inode);
51 52
	rcu_read_lock();
	delegation = rcu_dereference(nfsi->delegation);
L
Linus Torvalds 已提交
53 54 55
	if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
		goto out_iput;
	res->size = i_size_read(inode);
56
	res->change_attr = delegation->change_attr;
57
	if (nfs_have_writebacks(inode))
58
		res->change_attr++;
L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66
	res->ctime = inode->i_ctime;
	res->mtime = inode->i_mtime;
	res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
		args->bitmap[0];
	res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
		args->bitmap[1];
	res->status = 0;
out_iput:
67
	rcu_read_unlock();
68
	trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
L
Linus Torvalds 已提交
69 70
	iput(inode);
out:
71
	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
L
Linus Torvalds 已提交
72 73 74
	return res->status;
}

75
__be32 nfs4_callback_recall(void *argp, void *resp,
76
			    struct cb_process_state *cps)
L
Linus Torvalds 已提交
77
{
78
	struct cb_recallargs *args = argp;
L
Linus Torvalds 已提交
79
	struct inode *inode;
80
	__be32 res;
L
Linus Torvalds 已提交
81
	
82 83
	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
L
Linus Torvalds 已提交
84
		goto out;
85

86
	dprintk_rcu("NFS: RECALL callback request from %s\n",
87 88 89 90
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));

	res = htonl(NFS4ERR_BADHANDLE);
	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
91 92 93
	if (IS_ERR(inode)) {
		if (inode == ERR_PTR(-EAGAIN))
			res = htonl(NFS4ERR_DELAY);
94 95
		trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
				&args->stateid, -ntohl(res));
96
		goto out;
97
	}
98 99 100 101 102 103
	/* Set up a helper thread to actually return the delegation */
	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
	case 0:
		res = 0;
		break;
	case -ENOENT:
104
		res = htonl(NFS4ERR_BAD_STATEID);
105 106 107 108
		break;
	default:
		res = htonl(NFS4ERR_RESOURCE);
	}
109 110
	trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
			&args->stateid, -ntohl(res));
111
	iput(inode);
L
Linus Torvalds 已提交
112
out:
113
	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
L
Linus Torvalds 已提交
114 115
	return res;
}
116 117 118

#if defined(CONFIG_NFS_V4_1)

119
/*
120
 * Lookup a layout inode by stateid
121
 *
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
 * Note: returns a refcount on the inode and superblock
 */
static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
		const nfs4_stateid *stateid)
{
	struct nfs_server *server;
	struct inode *inode;
	struct pnfs_layout_hdr *lo;

	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		list_for_each_entry(lo, &server->layouts, plh_layouts) {
			if (stateid != NULL &&
			    !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
				continue;
			inode = igrab(lo->plh_inode);
			if (!inode)
138
				return ERR_PTR(-EAGAIN);
139
			if (!nfs_sb_active(inode->i_sb)) {
T
Trond Myklebust 已提交
140
				rcu_read_unlock();
141 142 143
				spin_unlock(&clp->cl_lock);
				iput(inode);
				spin_lock(&clp->cl_lock);
T
Trond Myklebust 已提交
144
				rcu_read_lock();
145
				return ERR_PTR(-EAGAIN);
146 147 148 149 150
			}
			return inode;
		}
	}

151
	return ERR_PTR(-ENOENT);
152 153 154 155 156 157
}

/*
 * Lookup a layout inode by filehandle.
 *
 * Note: returns a refcount on the inode and superblock
158 159
 *
 */
160 161
static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
		const struct nfs_fh *fh)
F
Fred Isaman 已提交
162
{
163
	struct nfs_server *server;
164
	struct nfs_inode *nfsi;
165
	struct inode *inode;
166
	struct pnfs_layout_hdr *lo;
F
Fred Isaman 已提交
167

168 169
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		list_for_each_entry(lo, &server->layouts, plh_layouts) {
170 171
			nfsi = NFS_I(lo->plh_inode);
			if (nfs_compare_fh(fh, &nfsi->fh))
172
				continue;
173
			if (nfsi->layout != lo)
174
				continue;
175 176
			inode = igrab(lo->plh_inode);
			if (!inode)
177
				return ERR_PTR(-EAGAIN);
178
			if (!nfs_sb_active(inode->i_sb)) {
T
Trond Myklebust 已提交
179
				rcu_read_unlock();
180 181 182
				spin_unlock(&clp->cl_lock);
				iput(inode);
				spin_lock(&clp->cl_lock);
T
Trond Myklebust 已提交
183
				rcu_read_lock();
184
				return ERR_PTR(-EAGAIN);
185
			}
186
			return inode;
187
		}
F
Fred Isaman 已提交
188
	}
189

190
	return ERR_PTR(-ENOENT);
191 192
}

193 194 195
static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
		const struct nfs_fh *fh,
		const nfs4_stateid *stateid)
196
{
197
	struct inode *inode;
198 199 200

	spin_lock(&clp->cl_lock);
	rcu_read_lock();
201
	inode = nfs_layout_find_inode_by_stateid(clp, stateid);
202
	if (inode == ERR_PTR(-ENOENT))
203
		inode = nfs_layout_find_inode_by_fh(clp, fh);
204
	rcu_read_unlock();
F
Fred Isaman 已提交
205
	spin_unlock(&clp->cl_lock);
206

207
	return inode;
208 209
}

210 211 212
/*
 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
 */
213
static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
214 215 216 217
					const nfs4_stateid *new)
{
	u32 oldseq, newseq;

218 219 220 221 222 223 224 225
	/* Is the stateid still not initialised? */
	if (!pnfs_layout_is_valid(lo))
		return NFS4ERR_DELAY;

	/* Mismatched stateid? */
	if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
		return NFS4ERR_BAD_STATEID;

226
	newseq = be32_to_cpu(new->seqid);
227 228 229 230 231 232 233 234 235
	/* Are we already in a layout recall situation? */
	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
	    lo->plh_return_seq != 0) {
		if (newseq < lo->plh_return_seq)
			return NFS4ERR_OLD_STATEID;
		if (newseq > lo->plh_return_seq)
			return NFS4ERR_DELAY;
		goto out;
	}
236

237 238
	/* Check that the stateid matches what we think it should be. */
	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
239
	if (newseq > oldseq + 1)
240 241 242 243 244 245
		return NFS4ERR_DELAY;
	/* Crazy server! */
	if (newseq <= oldseq)
		return NFS4ERR_OLD_STATEID;
out:
	return NFS_OK;
246 247
}

248 249 250 251 252 253 254 255
static u32 initiate_file_draining(struct nfs_client *clp,
				  struct cb_layoutrecallargs *args)
{
	struct inode *ino;
	struct pnfs_layout_hdr *lo;
	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
	LIST_HEAD(free_me_list);

256
	ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
257 258 259 260 261
	if (IS_ERR(ino)) {
		if (ino == ERR_PTR(-EAGAIN))
			rv = NFS4ERR_DELAY;
		goto out_noput;
	}
F
Fred Isaman 已提交
262

263 264
	pnfs_layoutcommit_inode(ino, false);

265 266

	spin_lock(&ino->i_lock);
267 268 269 270 271 272
	lo = NFS_I(ino)->layout;
	if (!lo) {
		spin_unlock(&ino->i_lock);
		goto out;
	}
	pnfs_get_layout_hdr(lo);
273 274
	rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
	if (rv != NFS_OK)
275
		goto unlock;
276 277
	pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);

278 279 280 281 282 283 284 285
	/*
	 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
	 */
	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
		rv = NFS4ERR_DELAY;
		goto unlock;
	}

286
	if (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
287 288
				&args->cbl_range,
				be32_to_cpu(args->cbl_stateid.seqid))) {
289
		rv = NFS4_OK;
290 291 292
		goto unlock;
	}

293 294 295
	/* Embrace your forgetfulness! */
	rv = NFS4ERR_NOMATCHING_LAYOUT;

296 297 298 299 300
	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
		NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
			&args->cbl_range);
	}
unlock:
F
Fred Isaman 已提交
301 302
	spin_unlock(&ino->i_lock);
	pnfs_free_lseg_list(&free_me_list);
303 304
	/* Free all lsegs that are attached to commit buckets */
	nfs_commit_inode(ino, 0);
305
	pnfs_put_layout_hdr(lo);
306
out:
307 308
	nfs_iput_and_deactive(ino);
out_noput:
309 310
	trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
			&args->cbl_stateid, -rv);
F
Fred Isaman 已提交
311 312 313 314 315 316
	return rv;
}

static u32 initiate_bulk_draining(struct nfs_client *clp,
				  struct cb_layoutrecallargs *args)
{
317
	int stat;
318

319 320 321 322 323 324 325
	if (args->cbl_recall_type == RETURN_FSID)
		stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
	else
		stat = pnfs_destroy_layouts_byclid(clp, true);
	if (stat != 0)
		return NFS4ERR_DELAY;
	return NFS4ERR_NOMATCHING_LAYOUT;
F
Fred Isaman 已提交
326 327 328 329 330
}

static u32 do_callback_layoutrecall(struct nfs_client *clp,
				    struct cb_layoutrecallargs *args)
{
331 332
	write_seqcount_begin(&clp->cl_callback_count);
	write_seqcount_end(&clp->cl_callback_count);
F
Fred Isaman 已提交
333
	if (args->cbl_recall_type == RETURN_FILE)
334 335
		return initiate_file_draining(clp, args);
	return initiate_bulk_draining(clp, args);
F
Fred Isaman 已提交
336 337
}

338 339
__be32 nfs4_callback_layoutrecall(void *argp, void *resp,
				  struct cb_process_state *cps)
F
Fred Isaman 已提交
340
{
341
	struct cb_layoutrecallargs *args = argp;
342
	u32 res = NFS4ERR_OP_NOT_IN_SESSION;
F
Fred Isaman 已提交
343 344 345 346

	if (cps->clp)
		res = do_callback_layoutrecall(cps->clp, args);
	return cpu_to_be32(res);
F
Fred Isaman 已提交
347 348
}

349 350 351 352 353 354 355 356 357 358 359
static void pnfs_recall_all_layouts(struct nfs_client *clp)
{
	struct cb_layoutrecallargs args;

	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
	memset(&args, 0, sizeof(args));
	args.cbl_recall_type = RETURN_ALL;
	/* FIXME we ignore errors, what should we do? */
	do_callback_layoutrecall(clp, &args);
}

360 361
__be32 nfs4_callback_devicenotify(void *argp, void *resp,
				  struct cb_process_state *cps)
M
Marc Eshel 已提交
362
{
363
	struct cb_devicenotifyargs *args = argp;
M
Marc Eshel 已提交
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
	int i;
	__be32 res = 0;
	struct nfs_client *clp = cps->clp;
	struct nfs_server *server = NULL;

	if (!clp) {
		res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
		goto out;
	}

	for (i = 0; i < args->ndevs; i++) {
		struct cb_devicenotifyitem *dev = &args->devs[i];

		if (!server ||
		    server->pnfs_curr_ld->id != dev->cbd_layout_type) {
			rcu_read_lock();
			list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
				if (server->pnfs_curr_ld &&
				    server->pnfs_curr_ld->id == dev->cbd_layout_type) {
					rcu_read_unlock();
					goto found;
				}
			rcu_read_unlock();
			continue;
		}

	found:
391
		nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
M
Marc Eshel 已提交
392 393 394 395 396 397 398
	}

out:
	kfree(args->devs);
	return res;
}

399 400 401 402 403
/*
 * Validate the sequenceID sent by the server.
 * Return success if the sequenceID is one more than what we last saw on
 * this slot, accounting for wraparound.  Increments the slot's sequence.
 *
404 405
 * We don't yet implement a duplicate request cache, instead we set the
 * back channel ca_maxresponsesize_cached to zero. This is OK for now
406 407 408 409 410 411
 * since we only currently implement idempotent callbacks anyway.
 *
 * We have a single slot backchannel at this time, so we don't bother
 * checking the used_slots bit array on the table.  The lower layer guarantees
 * a single outstanding callback request at a time.
 */
412
static __be32
413 414
validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
		const struct cb_sequenceargs * args)
415
{
416
	if (args->csa_slotid > tbl->server_highest_slotid)
417 418 419
		return htonl(NFS4ERR_BADSLOT);

	/* Replay */
420
	if (args->csa_sequenceid == slot->seq_nr) {
421
		if (nfs4_test_locked_slot(tbl, slot->slot_nr))
422
			return htonl(NFS4ERR_DELAY);
423 424 425 426
		/* Signal process_op to set this error on next op */
		if (args->csa_cachethis == 0)
			return htonl(NFS4ERR_RETRY_UNCACHED_REP);

427 428
		/* Liar! We never allowed you to set csa_cachethis != 0 */
		return htonl(NFS4ERR_SEQ_FALSE_RETRY);
429 430 431
	}

	/* Wraparound */
432 433 434 435 436
	if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
		if (args->csa_sequenceid == 1)
			return htonl(NFS4_OK);
	} else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
		return htonl(NFS4_OK);
437 438 439 440 441

	/* Misordered request */
	return htonl(NFS4ERR_SEQ_MISORDERED);
}

442 443 444 445 446 447 448 449 450
/*
 * For each referring call triple, check the session's slot table for
 * a match.  If the slot is in use and the sequence numbers match, the
 * client is still waiting for a response to the original request.
 */
static bool referring_call_exists(struct nfs_client *clp,
				  uint32_t nrclists,
				  struct referring_call_list *rclists)
{
451
	bool status = false;
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
	int i, j;
	struct nfs4_session *session;
	struct nfs4_slot_table *tbl;
	struct referring_call_list *rclist;
	struct referring_call *ref;

	/*
	 * XXX When client trunking is implemented, this becomes
	 * a session lookup from within the loop
	 */
	session = clp->cl_session;
	tbl = &session->fc_slot_table;

	for (i = 0; i < nrclists; i++) {
		rclist = &rclists[i];
		if (memcmp(session->sess_id.data,
			   rclist->rcl_sessionid.data,
			   NFS4_MAX_SESSIONID_LEN) != 0)
			continue;

		for (j = 0; j < rclist->rcl_nrefcalls; j++) {
			ref = &rclist->rcl_refcalls[j];
474 475
			status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
					ref->rc_sequenceid, HZ >> 1) < 0;
476 477 478 479 480 481 482 483 484
			if (status)
				goto out;
		}
	}

out:
	return status;
}

485
__be32 nfs4_callback_sequence(void *argp, void *resp,
486
			      struct cb_process_state *cps)
487
{
488 489
	struct cb_sequenceargs *args = argp;
	struct cb_sequenceres *res = resp;
490
	struct nfs4_slot_table *tbl;
491
	struct nfs4_slot *slot;
492
	struct nfs_client *clp;
493
	int i;
494
	__be32 status = htonl(NFS4ERR_BADSESSION);
495

496 497
	clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
					 &args->csa_sessionid, cps->minorversion);
498 499 500
	if (clp == NULL)
		goto out;

501 502
	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
		goto out;
503

504 505
	tbl = &clp->cl_session->bc_slot_table;

506 507 508 509 510 511
	/* Set up res before grabbing the spinlock */
	memcpy(&res->csr_sessionid, &args->csa_sessionid,
	       sizeof(res->csr_sessionid));
	res->csr_sequenceid = args->csa_sequenceid;
	res->csr_slotid = args->csa_slotid;

512
	spin_lock(&tbl->slot_tbl_lock);
513
	/* state manager is resetting the session */
514
	if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
515
		status = htonl(NFS4ERR_DELAY);
516 517 518 519 520
		/* Return NFS4ERR_BADSESSION if we're draining the session
		 * in order to reset it.
		 */
		if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
			status = htonl(NFS4ERR_BADSESSION);
521
		goto out_unlock;
522 523
	}

524 525 526 527
	status = htonl(NFS4ERR_BADSLOT);
	slot = nfs4_lookup_slot(tbl, args->csa_slotid);
	if (IS_ERR(slot))
		goto out_unlock;
528 529 530 531

	res->csr_highestslotid = tbl->server_highest_slotid;
	res->csr_target_highestslotid = tbl->target_highest_slotid;

532 533 534
	status = validate_seqid(tbl, slot, args);
	if (status)
		goto out_unlock;
535 536 537 538 539
	if (!nfs4_try_to_lock_slot(tbl, slot)) {
		status = htonl(NFS4ERR_DELAY);
		goto out_unlock;
	}
	cps->slot = slot;
540

541
	/* The ca_maxresponsesize_cached is 0 with no DRC */
O
Olga Kornievskaia 已提交
542 543 544 545
	if (args->csa_cachethis != 0) {
		status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
		goto out_unlock;
	}
546

547 548 549 550 551 552 553
	/*
	 * Check for pending referring calls.  If a match is found, a
	 * related callback was received before the response to the original
	 * call.
	 */
	if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
		status = htonl(NFS4ERR_DELAY);
554
		goto out_unlock;
555 556
	}

557 558 559 560 561
	/*
	 * RFC5661 20.9.3
	 * If CB_SEQUENCE returns an error, then the state of the slot
	 * (sequence ID, cached reply) MUST NOT change.
	 */
562
	slot->seq_nr = args->csa_sequenceid;
563 564
out_unlock:
	spin_unlock(&tbl->slot_tbl_lock);
565

566
out:
567
	cps->clp = clp; /* put in nfs4_callback_compound */
568 569 570 571
	for (i = 0; i < args->csa_nrclists; i++)
		kfree(args->csa_rclists[i].rcl_refcalls);
	kfree(args->csa_rclists);

572 573 574 575
	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
		cps->drc_status = status;
		status = 0;
	} else
576
		res->csr_status = status;
577

578
	trace_nfs4_cb_sequence(args, res, status);
579
	return status;
580 581
}

582
static bool
583
validate_bitmap_values(unsigned int mask)
584 585 586 587
{
	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
}

588
__be32 nfs4_callback_recallany(void *argp, void *resp,
589
			       struct cb_process_state *cps)
590
{
591
	struct cb_recallanyargs *args = argp;
592
	__be32 status;
593 594
	fmode_t flags = 0;

595
	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
596
	if (!cps->clp) /* set in cb_sequence */
597 598
		goto out;

599
	dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
600
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
601

602 603 604 605 606
	status = cpu_to_be32(NFS4ERR_INVAL);
	if (!validate_bitmap_values(args->craa_type_mask))
		goto out;

	status = cpu_to_be32(NFS4_OK);
607
	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
608
		flags = FMODE_READ;
609
	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
610 611
		flags |= FMODE_WRITE;
	if (flags)
612
		nfs_expire_unused_delegation_types(cps->clp, flags);
613 614 615

	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
		pnfs_recall_all_layouts(cps->clp);
616 617 618 619
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;
}
A
Andy Adamson 已提交
620 621

/* Reduce the fore channel's max_slots to the target value */
622
__be32 nfs4_callback_recallslot(void *argp, void *resp,
623
				struct cb_process_state *cps)
A
Andy Adamson 已提交
624
{
625
	struct cb_recallslotargs *args = argp;
A
Andy Adamson 已提交
626
	struct nfs4_slot_table *fc_tbl;
627
	__be32 status;
A
Andy Adamson 已提交
628 629

	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
630
	if (!cps->clp) /* set in cb_sequence */
A
Andy Adamson 已提交
631 632
		goto out;

633
	dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
634
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
635
		args->crsa_target_highest_slotid);
A
Andy Adamson 已提交
636

637
	fc_tbl = &cps->clp->cl_session->fc_slot_table;
A
Andy Adamson 已提交
638

639
	status = htonl(NFS4_OK);
A
Andy Adamson 已提交
640

641
	nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
642
	nfs41_notify_server(cps->clp);
A
Andy Adamson 已提交
643 644 645 646
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;
}
647

648
__be32 nfs4_callback_notify_lock(void *argp, void *resp,
649 650
				 struct cb_process_state *cps)
{
651 652
	struct cb_notify_lock_args *args = argp;

653 654 655 656 657 658
	if (!cps->clp) /* set in cb_sequence */
		return htonl(NFS4ERR_OP_NOT_IN_SESSION);

	dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));

659 660 661 662
	/* Don't wake anybody if the string looked bogus */
	if (args->cbnl_valid)
		__wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);

663 664
	return htonl(NFS4_OK);
}
665
#endif /* CONFIG_NFS_V4_1 */