callback_proc.c 17.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * linux/fs/nfs/callback_proc.c
 *
 * Copyright (C) 2004 Trond Myklebust
 *
 * NFSv4 callback procedures
 */
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
10
#include <linux/slab.h>
11
#include <linux/rcupdate.h>
12
#include "nfs4_fs.h"
L
Linus Torvalds 已提交
13 14
#include "callback.h"
#include "delegation.h"
15
#include "internal.h"
F
Fred Isaman 已提交
16
#include "pnfs.h"
17
#include "nfs4session.h"
18
#include "nfs4trace.h"
L
Linus Torvalds 已提交
19 20

#define NFSDBG_FACILITY NFSDBG_CALLBACK
21 22 23 24

__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
			     struct cb_getattrres *res,
			     struct cb_process_state *cps)
L
Linus Torvalds 已提交
25 26 27 28
{
	struct nfs_delegation *delegation;
	struct nfs_inode *nfsi;
	struct inode *inode;
29

30 31 32 33
	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
		goto out;

L
Linus Torvalds 已提交
34 35
	res->bitmap[0] = res->bitmap[1] = 0;
	res->status = htonl(NFS4ERR_BADHANDLE);
36

37
	dprintk_rcu("NFS: GETATTR callback request from %s\n",
38
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
39

40
	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
41
	if (inode == NULL) {
42
		trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
43
				-ntohl(res->status));
44
		goto out;
45
	}
L
Linus Torvalds 已提交
46
	nfsi = NFS_I(inode);
47 48
	rcu_read_lock();
	delegation = rcu_dereference(nfsi->delegation);
L
Linus Torvalds 已提交
49 50 51
	if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
		goto out_iput;
	res->size = i_size_read(inode);
52
	res->change_attr = delegation->change_attr;
53
	if (nfsi->nrequests != 0)
54
		res->change_attr++;
L
Linus Torvalds 已提交
55 56 57 58 59 60 61 62
	res->ctime = inode->i_ctime;
	res->mtime = inode->i_mtime;
	res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
		args->bitmap[0];
	res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
		args->bitmap[1];
	res->status = 0;
out_iput:
63
	rcu_read_unlock();
64
	trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
L
Linus Torvalds 已提交
65 66
	iput(inode);
out:
67
	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
L
Linus Torvalds 已提交
68 69 70
	return res->status;
}

71 72
__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
			    struct cb_process_state *cps)
L
Linus Torvalds 已提交
73 74
{
	struct inode *inode;
75
	__be32 res;
L
Linus Torvalds 已提交
76
	
77 78
	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
L
Linus Torvalds 已提交
79
		goto out;
80

81
	dprintk_rcu("NFS: RECALL callback request from %s\n",
82 83 84 85
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));

	res = htonl(NFS4ERR_BADHANDLE);
	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
86 87 88
	if (inode == NULL) {
		trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
				&args->stateid, -ntohl(res));
89
		goto out;
90
	}
91 92 93 94 95 96
	/* Set up a helper thread to actually return the delegation */
	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
	case 0:
		res = 0;
		break;
	case -ENOENT:
97
		res = htonl(NFS4ERR_BAD_STATEID);
98 99 100 101
		break;
	default:
		res = htonl(NFS4ERR_RESOURCE);
	}
102 103
	trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
			&args->stateid, -ntohl(res));
104
	iput(inode);
L
Linus Torvalds 已提交
105
out:
106
	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
L
Linus Torvalds 已提交
107 108
	return res;
}
109 110 111

#if defined(CONFIG_NFS_V4_1)

112 113 114 115 116 117 118 119 120
/*
 * Lookup a layout by filehandle.
 *
 * Note: gets a refcount on the layout hdr and on its respective inode.
 * Caller must put the layout hdr and the inode.
 *
 * TODO: keep track of all layouts (and delegations) in a hash table
 * hashed by filehandle.
 */
121
static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp,
122
		struct nfs_fh *fh)
F
Fred Isaman 已提交
123
{
124
	struct nfs_server *server;
125
	struct nfs_inode *nfsi;
F
Fred Isaman 已提交
126
	struct inode *ino;
127
	struct pnfs_layout_hdr *lo;
F
Fred Isaman 已提交
128

129
restart:
130 131
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		list_for_each_entry(lo, &server->layouts, plh_layouts) {
132 133
			nfsi = NFS_I(lo->plh_inode);
			if (nfs_compare_fh(fh, &nfsi->fh))
134
				continue;
135
			if (nfsi->layout != lo)
136 137 138
				continue;
			ino = igrab(lo->plh_inode);
			if (!ino)
139
				break;
140 141
			spin_lock(&ino->i_lock);
			/* Is this layout in the process of being freed? */
142
			if (nfsi->layout != lo) {
143 144
				spin_unlock(&ino->i_lock);
				iput(ino);
145
				goto restart;
146
			}
147
			pnfs_get_layout_hdr(lo);
148
			spin_unlock(&ino->i_lock);
149
			return lo;
150
		}
F
Fred Isaman 已提交
151
	}
152 153 154 155

	return NULL;
}

156
static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp,
157
		struct nfs_fh *fh)
158 159 160 161 162
{
	struct pnfs_layout_hdr *lo;

	spin_lock(&clp->cl_lock);
	rcu_read_lock();
163
	lo = get_layout_by_fh_locked(clp, fh);
164
	rcu_read_unlock();
F
Fred Isaman 已提交
165
	spin_unlock(&clp->cl_lock);
166

167 168 169
	return lo;
}

170 171 172
/*
 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
 */
173
static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
174 175 176 177
					const nfs4_stateid *new)
{
	u32 oldseq, newseq;

178 179 180 181 182 183 184 185
	/* Is the stateid still not initialised? */
	if (!pnfs_layout_is_valid(lo))
		return NFS4ERR_DELAY;

	/* Mismatched stateid? */
	if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
		return NFS4ERR_BAD_STATEID;

186
	newseq = be32_to_cpu(new->seqid);
187 188 189 190 191 192 193 194 195
	/* Are we already in a layout recall situation? */
	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
	    lo->plh_return_seq != 0) {
		if (newseq < lo->plh_return_seq)
			return NFS4ERR_OLD_STATEID;
		if (newseq > lo->plh_return_seq)
			return NFS4ERR_DELAY;
		goto out;
	}
196

197 198
	/* Check that the stateid matches what we think it should be. */
	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
199
	if (newseq > oldseq + 1)
200 201 202 203 204 205
		return NFS4ERR_DELAY;
	/* Crazy server! */
	if (newseq <= oldseq)
		return NFS4ERR_OLD_STATEID;
out:
	return NFS_OK;
206 207
}

208 209 210 211 212 213 214 215
static u32 initiate_file_draining(struct nfs_client *clp,
				  struct cb_layoutrecallargs *args)
{
	struct inode *ino;
	struct pnfs_layout_hdr *lo;
	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
	LIST_HEAD(free_me_list);

216
	lo = get_layout_by_fh(clp, &args->cbl_fh);
217 218 219
	if (!lo) {
		trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, NULL,
				&args->cbl_stateid, -rv);
220
		goto out;
221
	}
F
Fred Isaman 已提交
222

223
	ino = lo->plh_inode;
224 225
	pnfs_layoutcommit_inode(ino, false);

226 227

	spin_lock(&ino->i_lock);
228 229
	rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
	if (rv != NFS_OK)
230
		goto unlock;
231 232
	pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);

233 234 235 236 237 238 239 240
	/*
	 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
	 */
	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
		rv = NFS4ERR_DELAY;
		goto unlock;
	}

241
	if (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
242 243
				&args->cbl_range,
				be32_to_cpu(args->cbl_stateid.seqid))) {
244
		rv = NFS4_OK;
245 246 247
		goto unlock;
	}

248 249 250
	/* Embrace your forgetfulness! */
	rv = NFS4ERR_NOMATCHING_LAYOUT;

251 252 253 254 255
	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
		NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
			&args->cbl_range);
	}
unlock:
F
Fred Isaman 已提交
256 257
	spin_unlock(&ino->i_lock);
	pnfs_free_lseg_list(&free_me_list);
258 259
	/* Free all lsegs that are attached to commit buckets */
	nfs_commit_inode(ino, 0);
260
	pnfs_put_layout_hdr(lo);
261 262
	trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
			&args->cbl_stateid, -rv);
F
Fred Isaman 已提交
263
	iput(ino);
264
out:
F
Fred Isaman 已提交
265 266 267 268 269 270
	return rv;
}

static u32 initiate_bulk_draining(struct nfs_client *clp,
				  struct cb_layoutrecallargs *args)
{
271
	int stat;
272

273 274 275 276 277 278 279
	if (args->cbl_recall_type == RETURN_FSID)
		stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
	else
		stat = pnfs_destroy_layouts_byclid(clp, true);
	if (stat != 0)
		return NFS4ERR_DELAY;
	return NFS4ERR_NOMATCHING_LAYOUT;
F
Fred Isaman 已提交
280 281 282 283 284
}

static u32 do_callback_layoutrecall(struct nfs_client *clp,
				    struct cb_layoutrecallargs *args)
{
285
	u32 res;
F
Fred Isaman 已提交
286 287 288 289 290 291 292 293 294 295 296

	dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
	if (args->cbl_recall_type == RETURN_FILE)
		res = initiate_file_draining(clp, args);
	else
		res = initiate_bulk_draining(clp, args);
	dprintk("%s returning %i\n", __func__, res);
	return res;

}

F
Fred Isaman 已提交
297 298 299
__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
				  void *dummy, struct cb_process_state *cps)
{
F
Fred Isaman 已提交
300 301 302 303 304 305 306 307 308 309 310
	u32 res;

	dprintk("%s: -->\n", __func__);

	if (cps->clp)
		res = do_callback_layoutrecall(cps->clp, args);
	else
		res = NFS4ERR_OP_NOT_IN_SESSION;

	dprintk("%s: exit with status = %d\n", __func__, res);
	return cpu_to_be32(res);
F
Fred Isaman 已提交
311 312
}

313 314 315 316 317 318 319 320 321 322 323
static void pnfs_recall_all_layouts(struct nfs_client *clp)
{
	struct cb_layoutrecallargs args;

	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
	memset(&args, 0, sizeof(args));
	args.cbl_recall_type = RETURN_ALL;
	/* FIXME we ignore errors, what should we do? */
	do_callback_layoutrecall(clp, &args);
}

M
Marc Eshel 已提交
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
__be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args,
				  void *dummy, struct cb_process_state *cps)
{
	int i;
	__be32 res = 0;
	struct nfs_client *clp = cps->clp;
	struct nfs_server *server = NULL;

	dprintk("%s: -->\n", __func__);

	if (!clp) {
		res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
		goto out;
	}

	for (i = 0; i < args->ndevs; i++) {
		struct cb_devicenotifyitem *dev = &args->devs[i];

		if (!server ||
		    server->pnfs_curr_ld->id != dev->cbd_layout_type) {
			rcu_read_lock();
			list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
				if (server->pnfs_curr_ld &&
				    server->pnfs_curr_ld->id == dev->cbd_layout_type) {
					rcu_read_unlock();
					goto found;
				}
			rcu_read_unlock();
			dprintk("%s: layout type %u not found\n",
				__func__, dev->cbd_layout_type);
			continue;
		}

	found:
358
		nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
M
Marc Eshel 已提交
359 360 361 362 363 364 365 366 367
	}

out:
	kfree(args->devs);
	dprintk("%s: exit with status = %u\n",
		__func__, be32_to_cpu(res));
	return res;
}

368 369 370 371 372
/*
 * Validate the sequenceID sent by the server.
 * Return success if the sequenceID is one more than what we last saw on
 * this slot, accounting for wraparound.  Increments the slot's sequence.
 *
373 374
 * We don't yet implement a duplicate request cache, instead we set the
 * back channel ca_maxresponsesize_cached to zero. This is OK for now
375 376 377 378 379 380
 * since we only currently implement idempotent callbacks anyway.
 *
 * We have a single slot backchannel at this time, so we don't bother
 * checking the used_slots bit array on the table.  The lower layer guarantees
 * a single outstanding callback request at a time.
 */
381
static __be32
382 383
validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
		const struct cb_sequenceargs * args)
384
{
385 386
	dprintk("%s enter. slotid %u seqid %u, slot table seqid: %u\n",
		__func__, args->csa_slotid, args->csa_sequenceid, slot->seq_nr);
387

388
	if (args->csa_slotid > tbl->server_highest_slotid)
389 390 391
		return htonl(NFS4ERR_BADSLOT);

	/* Replay */
392
	if (args->csa_sequenceid == slot->seq_nr) {
393
		dprintk("%s seqid %u is a replay\n",
394
			__func__, args->csa_sequenceid);
395
		if (nfs4_test_locked_slot(tbl, slot->slot_nr))
396
			return htonl(NFS4ERR_DELAY);
397 398 399 400
		/* Signal process_op to set this error on next op */
		if (args->csa_cachethis == 0)
			return htonl(NFS4ERR_RETRY_UNCACHED_REP);

401 402
		/* Liar! We never allowed you to set csa_cachethis != 0 */
		return htonl(NFS4ERR_SEQ_FALSE_RETRY);
403 404 405
	}

	/* Wraparound */
406 407 408 409 410
	if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
		if (args->csa_sequenceid == 1)
			return htonl(NFS4_OK);
	} else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
		return htonl(NFS4_OK);
411 412 413 414 415

	/* Misordered request */
	return htonl(NFS4ERR_SEQ_MISORDERED);
}

416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
/*
 * For each referring call triple, check the session's slot table for
 * a match.  If the slot is in use and the sequence numbers match, the
 * client is still waiting for a response to the original request.
 */
static bool referring_call_exists(struct nfs_client *clp,
				  uint32_t nrclists,
				  struct referring_call_list *rclists)
{
	bool status = 0;
	int i, j;
	struct nfs4_session *session;
	struct nfs4_slot_table *tbl;
	struct referring_call_list *rclist;
	struct referring_call *ref;

	/*
	 * XXX When client trunking is implemented, this becomes
	 * a session lookup from within the loop
	 */
	session = clp->cl_session;
	tbl = &session->fc_slot_table;

	for (i = 0; i < nrclists; i++) {
		rclist = &rclists[i];
		if (memcmp(session->sess_id.data,
			   rclist->rcl_sessionid.data,
			   NFS4_MAX_SESSIONID_LEN) != 0)
			continue;

		for (j = 0; j < rclist->rcl_nrefcalls; j++) {
			ref = &rclist->rcl_refcalls[j];

			dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u "
				"slotid %u\n", __func__,
				((u32 *)&rclist->rcl_sessionid.data)[0],
				((u32 *)&rclist->rcl_sessionid.data)[1],
				((u32 *)&rclist->rcl_sessionid.data)[2],
				((u32 *)&rclist->rcl_sessionid.data)[3],
				ref->rc_sequenceid, ref->rc_slotid);

457 458
			status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
					ref->rc_sequenceid, HZ >> 1) < 0;
459 460 461 462 463 464 465 466 467
			if (status)
				goto out;
		}
	}

out:
	return status;
}

468
__be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
469 470
			      struct cb_sequenceres *res,
			      struct cb_process_state *cps)
471
{
472
	struct nfs4_slot_table *tbl;
473
	struct nfs4_slot *slot;
474
	struct nfs_client *clp;
475
	int i;
476
	__be32 status = htonl(NFS4ERR_BADSESSION);
477

478 479
	clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
					 &args->csa_sessionid, cps->minorversion);
480 481 482
	if (clp == NULL)
		goto out;

483 484
	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
		goto out;
485

486 487
	tbl = &clp->cl_session->bc_slot_table;

488 489 490 491 492 493
	/* Set up res before grabbing the spinlock */
	memcpy(&res->csr_sessionid, &args->csa_sessionid,
	       sizeof(res->csr_sessionid));
	res->csr_sequenceid = args->csa_sequenceid;
	res->csr_slotid = args->csa_slotid;

494
	spin_lock(&tbl->slot_tbl_lock);
495
	/* state manager is resetting the session */
496
	if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
497
		status = htonl(NFS4ERR_DELAY);
498 499 500 501 502
		/* Return NFS4ERR_BADSESSION if we're draining the session
		 * in order to reset it.
		 */
		if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
			status = htonl(NFS4ERR_BADSESSION);
503
		goto out_unlock;
504 505
	}

506 507 508 509
	status = htonl(NFS4ERR_BADSLOT);
	slot = nfs4_lookup_slot(tbl, args->csa_slotid);
	if (IS_ERR(slot))
		goto out_unlock;
510 511 512 513

	res->csr_highestslotid = tbl->server_highest_slotid;
	res->csr_target_highestslotid = tbl->target_highest_slotid;

514 515 516
	status = validate_seqid(tbl, slot, args);
	if (status)
		goto out_unlock;
517 518 519 520 521
	if (!nfs4_try_to_lock_slot(tbl, slot)) {
		status = htonl(NFS4ERR_DELAY);
		goto out_unlock;
	}
	cps->slot = slot;
522

523
	/* The ca_maxresponsesize_cached is 0 with no DRC */
O
Olga Kornievskaia 已提交
524 525 526 527
	if (args->csa_cachethis != 0) {
		status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
		goto out_unlock;
	}
528

529 530 531 532 533 534 535
	/*
	 * Check for pending referring calls.  If a match is found, a
	 * related callback was received before the response to the original
	 * call.
	 */
	if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
		status = htonl(NFS4ERR_DELAY);
536
		goto out_unlock;
537 538
	}

539 540 541 542 543
	/*
	 * RFC5661 20.9.3
	 * If CB_SEQUENCE returns an error, then the state of the slot
	 * (sequence ID, cached reply) MUST NOT change.
	 */
544
	slot->seq_nr = args->csa_sequenceid;
545 546
out_unlock:
	spin_unlock(&tbl->slot_tbl_lock);
547

548
out:
549
	cps->clp = clp; /* put in nfs4_callback_compound */
550 551 552 553
	for (i = 0; i < args->csa_nrclists; i++)
		kfree(args->csa_rclists[i].rcl_refcalls);
	kfree(args->csa_rclists);

554 555 556 557
	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
		cps->drc_status = status;
		status = 0;
	} else
558
		res->csr_status = status;
559

560
	trace_nfs4_cb_sequence(args, res, status);
561 562 563
	dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
		ntohl(status), ntohl(res->csr_status));
	return status;
564 565
}

566 567 568 569 570 571
static bool
validate_bitmap_values(unsigned long mask)
{
	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
}

572 573
__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
			       struct cb_process_state *cps)
574
{
575
	__be32 status;
576 577
	fmode_t flags = 0;

578
	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
579
	if (!cps->clp) /* set in cb_sequence */
580 581
		goto out;

582
	dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
583
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
584

585 586 587 588 589
	status = cpu_to_be32(NFS4ERR_INVAL);
	if (!validate_bitmap_values(args->craa_type_mask))
		goto out;

	status = cpu_to_be32(NFS4_OK);
590 591 592 593 594 595
	if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
		     &args->craa_type_mask))
		flags = FMODE_READ;
	if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
		     &args->craa_type_mask))
		flags |= FMODE_WRITE;
596 597 598
	if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
		     &args->craa_type_mask))
		pnfs_recall_all_layouts(cps->clp);
599
	if (flags)
600
		nfs_expire_unused_delegation_types(cps->clp, flags);
601 602 603 604
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;
}
A
Andy Adamson 已提交
605 606

/* Reduce the fore channel's max_slots to the target value */
607 608
__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
				struct cb_process_state *cps)
A
Andy Adamson 已提交
609 610
{
	struct nfs4_slot_table *fc_tbl;
611
	__be32 status;
A
Andy Adamson 已提交
612 613

	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
614
	if (!cps->clp) /* set in cb_sequence */
A
Andy Adamson 已提交
615 616
		goto out;

617
	dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
618
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
619
		args->crsa_target_highest_slotid);
A
Andy Adamson 已提交
620

621
	fc_tbl = &cps->clp->cl_session->fc_slot_table;
A
Andy Adamson 已提交
622

623
	status = htonl(NFS4_OK);
A
Andy Adamson 已提交
624

625
	nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
626
	nfs41_notify_server(cps->clp);
A
Andy Adamson 已提交
627 628 629 630
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;
}
631 632 633 634 635 636 637 638 639 640

__be32 nfs4_callback_notify_lock(struct cb_notify_lock_args *args, void *dummy,
				 struct cb_process_state *cps)
{
	if (!cps->clp) /* set in cb_sequence */
		return htonl(NFS4ERR_OP_NOT_IN_SESSION);

	dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));

641 642 643 644
	/* Don't wake anybody if the string looked bogus */
	if (args->cbnl_valid)
		__wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);

645 646
	return htonl(NFS4_OK);
}
647
#endif /* CONFIG_NFS_V4_1 */