callback_proc.c 16.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * linux/fs/nfs/callback_proc.c
 *
 * Copyright (C) 2004 Trond Myklebust
 *
 * NFSv4 callback procedures
 */
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
10
#include <linux/slab.h>
11
#include <linux/rcupdate.h>
12
#include "nfs4_fs.h"
L
Linus Torvalds 已提交
13 14
#include "callback.h"
#include "delegation.h"
15
#include "internal.h"
F
Fred Isaman 已提交
16
#include "pnfs.h"
17
#include "nfs4session.h"
18
#include "nfs4trace.h"
L
Linus Torvalds 已提交
19 20

#define NFSDBG_FACILITY NFSDBG_CALLBACK
21 22 23 24

__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
			     struct cb_getattrres *res,
			     struct cb_process_state *cps)
L
Linus Torvalds 已提交
25 26 27 28
{
	struct nfs_delegation *delegation;
	struct nfs_inode *nfsi;
	struct inode *inode;
29

30 31 32 33
	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
		goto out;

L
Linus Torvalds 已提交
34 35
	res->bitmap[0] = res->bitmap[1] = 0;
	res->status = htonl(NFS4ERR_BADHANDLE);
36

37
	dprintk_rcu("NFS: GETATTR callback request from %s\n",
38
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
39

40
	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
41
	if (inode == NULL) {
42
		trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
43
				-ntohl(res->status));
44
		goto out;
45
	}
L
Linus Torvalds 已提交
46
	nfsi = NFS_I(inode);
47 48
	rcu_read_lock();
	delegation = rcu_dereference(nfsi->delegation);
L
Linus Torvalds 已提交
49 50 51
	if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
		goto out_iput;
	res->size = i_size_read(inode);
52
	res->change_attr = delegation->change_attr;
53
	if (nfsi->nrequests != 0)
54
		res->change_attr++;
L
Linus Torvalds 已提交
55 56 57 58 59 60 61 62
	res->ctime = inode->i_ctime;
	res->mtime = inode->i_mtime;
	res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
		args->bitmap[0];
	res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
		args->bitmap[1];
	res->status = 0;
out_iput:
63
	rcu_read_unlock();
64
	trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
L
Linus Torvalds 已提交
65 66
	iput(inode);
out:
67
	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
L
Linus Torvalds 已提交
68 69 70
	return res->status;
}

71 72
__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
			    struct cb_process_state *cps)
L
Linus Torvalds 已提交
73 74
{
	struct inode *inode;
75
	__be32 res;
L
Linus Torvalds 已提交
76
	
77 78
	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
L
Linus Torvalds 已提交
79
		goto out;
80

81
	dprintk_rcu("NFS: RECALL callback request from %s\n",
82 83 84 85
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));

	res = htonl(NFS4ERR_BADHANDLE);
	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
86 87 88
	if (inode == NULL) {
		trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
				&args->stateid, -ntohl(res));
89
		goto out;
90
	}
91 92 93 94 95 96
	/* Set up a helper thread to actually return the delegation */
	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
	case 0:
		res = 0;
		break;
	case -ENOENT:
97
		res = htonl(NFS4ERR_BAD_STATEID);
98 99 100 101
		break;
	default:
		res = htonl(NFS4ERR_RESOURCE);
	}
102 103
	trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
			&args->stateid, -ntohl(res));
104
	iput(inode);
L
Linus Torvalds 已提交
105
out:
106
	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
L
Linus Torvalds 已提交
107 108
	return res;
}
109 110 111

#if defined(CONFIG_NFS_V4_1)

112 113 114 115 116 117 118 119 120
/*
 * Lookup a layout by filehandle.
 *
 * Note: gets a refcount on the layout hdr and on its respective inode.
 * Caller must put the layout hdr and the inode.
 *
 * TODO: keep track of all layouts (and delegations) in a hash table
 * hashed by filehandle.
 */
121 122
static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp,
		struct nfs_fh *fh, nfs4_stateid *stateid)
F
Fred Isaman 已提交
123
{
124
	struct nfs_server *server;
F
Fred Isaman 已提交
125
	struct inode *ino;
126
	struct pnfs_layout_hdr *lo;
F
Fred Isaman 已提交
127

128 129
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		list_for_each_entry(lo, &server->layouts, plh_layouts) {
130 131
			if (!nfs4_stateid_match_other(&lo->plh_stateid, stateid))
				continue;
132
			if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh))
133 134 135
				continue;
			ino = igrab(lo->plh_inode);
			if (!ino)
136
				break;
137 138 139 140 141
			spin_lock(&ino->i_lock);
			/* Is this layout in the process of being freed? */
			if (NFS_I(ino)->layout != lo) {
				spin_unlock(&ino->i_lock);
				iput(ino);
142
				break;
143
			}
144
			pnfs_get_layout_hdr(lo);
145
			spin_unlock(&ino->i_lock);
146
			return lo;
147
		}
F
Fred Isaman 已提交
148
	}
149 150 151 152

	return NULL;
}

153 154
static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp,
		struct nfs_fh *fh, nfs4_stateid *stateid)
155 156 157 158 159
{
	struct pnfs_layout_hdr *lo;

	spin_lock(&clp->cl_lock);
	rcu_read_lock();
160
	lo = get_layout_by_fh_locked(clp, fh, stateid);
161
	rcu_read_unlock();
F
Fred Isaman 已提交
162
	spin_unlock(&clp->cl_lock);
163

164 165 166
	return lo;
}

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
/*
 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
 */
static bool pnfs_check_stateid_sequence(struct pnfs_layout_hdr *lo,
					const nfs4_stateid *new)
{
	u32 oldseq, newseq;

	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
	newseq = be32_to_cpu(new->seqid);

	if (newseq > oldseq + 1)
		return false;
	return true;
}

183 184 185 186 187 188 189 190
static u32 initiate_file_draining(struct nfs_client *clp,
				  struct cb_layoutrecallargs *args)
{
	struct inode *ino;
	struct pnfs_layout_hdr *lo;
	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
	LIST_HEAD(free_me_list);

191
	lo = get_layout_by_fh(clp, &args->cbl_fh, &args->cbl_stateid);
192 193 194
	if (!lo) {
		trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, NULL,
				&args->cbl_stateid, -rv);
195
		goto out;
196
	}
F
Fred Isaman 已提交
197

198
	ino = lo->plh_inode;
199 200

	spin_lock(&ino->i_lock);
201 202 203 204
	if (!pnfs_check_stateid_sequence(lo, &args->cbl_stateid)) {
		rv = NFS4ERR_DELAY;
		goto unlock;
	}
205 206 207 208 209
	pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
	spin_unlock(&ino->i_lock);

	pnfs_layoutcommit_inode(ino, false);

F
Fred Isaman 已提交
210
	spin_lock(&ino->i_lock);
211 212 213 214 215 216 217 218
	/*
	 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
	 */
	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
		rv = NFS4ERR_DELAY;
		goto unlock;
	}

219
	if (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
220 221
				&args->cbl_range,
				be32_to_cpu(args->cbl_stateid.seqid))) {
222
		rv = NFS4_OK;
223 224 225 226 227 228 229
		goto unlock;
	}

	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
		NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
			&args->cbl_range);
	}
230
	pnfs_mark_layout_returned_if_empty(lo);
231
unlock:
F
Fred Isaman 已提交
232 233
	spin_unlock(&ino->i_lock);
	pnfs_free_lseg_list(&free_me_list);
234 235
	/* Free all lsegs that are attached to commit buckets */
	nfs_commit_inode(ino, 0);
236
	pnfs_put_layout_hdr(lo);
237 238
	trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
			&args->cbl_stateid, -rv);
F
Fred Isaman 已提交
239
	iput(ino);
240
out:
F
Fred Isaman 已提交
241 242 243 244 245 246
	return rv;
}

static u32 initiate_bulk_draining(struct nfs_client *clp,
				  struct cb_layoutrecallargs *args)
{
247
	int stat;
248

249 250 251 252 253 254 255
	if (args->cbl_recall_type == RETURN_FSID)
		stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
	else
		stat = pnfs_destroy_layouts_byclid(clp, true);
	if (stat != 0)
		return NFS4ERR_DELAY;
	return NFS4ERR_NOMATCHING_LAYOUT;
F
Fred Isaman 已提交
256 257 258 259 260
}

static u32 do_callback_layoutrecall(struct nfs_client *clp,
				    struct cb_layoutrecallargs *args)
{
261
	u32 res;
F
Fred Isaman 已提交
262 263 264 265 266 267 268 269 270 271 272

	dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
	if (args->cbl_recall_type == RETURN_FILE)
		res = initiate_file_draining(clp, args);
	else
		res = initiate_bulk_draining(clp, args);
	dprintk("%s returning %i\n", __func__, res);
	return res;

}

F
Fred Isaman 已提交
273 274 275
__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
				  void *dummy, struct cb_process_state *cps)
{
F
Fred Isaman 已提交
276 277 278 279 280 281 282 283 284 285 286
	u32 res;

	dprintk("%s: -->\n", __func__);

	if (cps->clp)
		res = do_callback_layoutrecall(cps->clp, args);
	else
		res = NFS4ERR_OP_NOT_IN_SESSION;

	dprintk("%s: exit with status = %d\n", __func__, res);
	return cpu_to_be32(res);
F
Fred Isaman 已提交
287 288
}

289 290 291 292 293 294 295 296 297 298 299
static void pnfs_recall_all_layouts(struct nfs_client *clp)
{
	struct cb_layoutrecallargs args;

	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
	memset(&args, 0, sizeof(args));
	args.cbl_recall_type = RETURN_ALL;
	/* FIXME we ignore errors, what should we do? */
	do_callback_layoutrecall(clp, &args);
}

M
Marc Eshel 已提交
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
__be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args,
				  void *dummy, struct cb_process_state *cps)
{
	int i;
	__be32 res = 0;
	struct nfs_client *clp = cps->clp;
	struct nfs_server *server = NULL;

	dprintk("%s: -->\n", __func__);

	if (!clp) {
		res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
		goto out;
	}

	for (i = 0; i < args->ndevs; i++) {
		struct cb_devicenotifyitem *dev = &args->devs[i];

		if (!server ||
		    server->pnfs_curr_ld->id != dev->cbd_layout_type) {
			rcu_read_lock();
			list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
				if (server->pnfs_curr_ld &&
				    server->pnfs_curr_ld->id == dev->cbd_layout_type) {
					rcu_read_unlock();
					goto found;
				}
			rcu_read_unlock();
			dprintk("%s: layout type %u not found\n",
				__func__, dev->cbd_layout_type);
			continue;
		}

	found:
334
		nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
M
Marc Eshel 已提交
335 336 337 338 339 340 341 342 343
	}

out:
	kfree(args->devs);
	dprintk("%s: exit with status = %u\n",
		__func__, be32_to_cpu(res));
	return res;
}

344 345 346 347 348
/*
 * Validate the sequenceID sent by the server.
 * Return success if the sequenceID is one more than what we last saw on
 * this slot, accounting for wraparound.  Increments the slot's sequence.
 *
349 350
 * We don't yet implement a duplicate request cache, instead we set the
 * back channel ca_maxresponsesize_cached to zero. This is OK for now
351 352 353 354 355 356
 * since we only currently implement idempotent callbacks anyway.
 *
 * We have a single slot backchannel at this time, so we don't bother
 * checking the used_slots bit array on the table.  The lower layer guarantees
 * a single outstanding callback request at a time.
 */
357
static __be32
358 359
validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
		const struct cb_sequenceargs * args)
360
{
361 362
	dprintk("%s enter. slotid %u seqid %u, slot table seqid: %u\n",
		__func__, args->csa_slotid, args->csa_sequenceid, slot->seq_nr);
363

364
	if (args->csa_slotid > tbl->server_highest_slotid)
365 366 367
		return htonl(NFS4ERR_BADSLOT);

	/* Replay */
368
	if (args->csa_sequenceid == slot->seq_nr) {
369
		dprintk("%s seqid %u is a replay\n",
370
			__func__, args->csa_sequenceid);
371
		if (nfs4_test_locked_slot(tbl, slot->slot_nr))
372
			return htonl(NFS4ERR_DELAY);
373 374 375 376
		/* Signal process_op to set this error on next op */
		if (args->csa_cachethis == 0)
			return htonl(NFS4ERR_RETRY_UNCACHED_REP);

377 378
		/* Liar! We never allowed you to set csa_cachethis != 0 */
		return htonl(NFS4ERR_SEQ_FALSE_RETRY);
379 380 381
	}

	/* Wraparound */
382 383 384 385 386
	if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
		if (args->csa_sequenceid == 1)
			return htonl(NFS4_OK);
	} else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
		return htonl(NFS4_OK);
387 388 389 390 391

	/* Misordered request */
	return htonl(NFS4ERR_SEQ_MISORDERED);
}

392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
/*
 * For each referring call triple, check the session's slot table for
 * a match.  If the slot is in use and the sequence numbers match, the
 * client is still waiting for a response to the original request.
 */
static bool referring_call_exists(struct nfs_client *clp,
				  uint32_t nrclists,
				  struct referring_call_list *rclists)
{
	bool status = 0;
	int i, j;
	struct nfs4_session *session;
	struct nfs4_slot_table *tbl;
	struct referring_call_list *rclist;
	struct referring_call *ref;

	/*
	 * XXX When client trunking is implemented, this becomes
	 * a session lookup from within the loop
	 */
	session = clp->cl_session;
	tbl = &session->fc_slot_table;

	for (i = 0; i < nrclists; i++) {
		rclist = &rclists[i];
		if (memcmp(session->sess_id.data,
			   rclist->rcl_sessionid.data,
			   NFS4_MAX_SESSIONID_LEN) != 0)
			continue;

		for (j = 0; j < rclist->rcl_nrefcalls; j++) {
			ref = &rclist->rcl_refcalls[j];

			dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u "
				"slotid %u\n", __func__,
				((u32 *)&rclist->rcl_sessionid.data)[0],
				((u32 *)&rclist->rcl_sessionid.data)[1],
				((u32 *)&rclist->rcl_sessionid.data)[2],
				((u32 *)&rclist->rcl_sessionid.data)[3],
				ref->rc_sequenceid, ref->rc_slotid);

			spin_lock(&tbl->slot_tbl_lock);
			status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
				  tbl->slots[ref->rc_slotid].seq_nr ==
					ref->rc_sequenceid);
			spin_unlock(&tbl->slot_tbl_lock);
			if (status)
				goto out;
		}
	}

out:
	return status;
}

447
__be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
448 449
			      struct cb_sequenceres *res,
			      struct cb_process_state *cps)
450
{
451
	struct nfs4_slot_table *tbl;
452
	struct nfs4_slot *slot;
453
	struct nfs_client *clp;
454
	int i;
455
	__be32 status = htonl(NFS4ERR_BADSESSION);
456

457 458
	clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
					 &args->csa_sessionid, cps->minorversion);
459 460 461
	if (clp == NULL)
		goto out;

462 463
	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
		goto out;
464

465
	tbl = &clp->cl_session->bc_slot_table;
466
	slot = tbl->slots + args->csa_slotid;
467

468 469 470 471 472 473
	/* Set up res before grabbing the spinlock */
	memcpy(&res->csr_sessionid, &args->csa_sessionid,
	       sizeof(res->csr_sessionid));
	res->csr_sequenceid = args->csa_sequenceid;
	res->csr_slotid = args->csa_slotid;

474
	spin_lock(&tbl->slot_tbl_lock);
475
	/* state manager is resetting the session */
476
	if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
477
		status = htonl(NFS4ERR_DELAY);
478 479 480 481 482
		/* Return NFS4ERR_BADSESSION if we're draining the session
		 * in order to reset it.
		 */
		if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
			status = htonl(NFS4ERR_BADSESSION);
483
		goto out_unlock;
484 485
	}

486 487 488 489
	status = htonl(NFS4ERR_BADSLOT);
	slot = nfs4_lookup_slot(tbl, args->csa_slotid);
	if (IS_ERR(slot))
		goto out_unlock;
490 491 492 493

	res->csr_highestslotid = tbl->server_highest_slotid;
	res->csr_target_highestslotid = tbl->target_highest_slotid;

494 495 496
	status = validate_seqid(tbl, slot, args);
	if (status)
		goto out_unlock;
497 498 499 500 501
	if (!nfs4_try_to_lock_slot(tbl, slot)) {
		status = htonl(NFS4ERR_DELAY);
		goto out_unlock;
	}
	cps->slot = slot;
502

503
	/* The ca_maxresponsesize_cached is 0 with no DRC */
O
Olga Kornievskaia 已提交
504 505 506 507
	if (args->csa_cachethis != 0) {
		status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
		goto out_unlock;
	}
508

509 510 511 512 513 514 515
	/*
	 * Check for pending referring calls.  If a match is found, a
	 * related callback was received before the response to the original
	 * call.
	 */
	if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
		status = htonl(NFS4ERR_DELAY);
516
		goto out_unlock;
517 518
	}

519 520 521 522 523
	/*
	 * RFC5661 20.9.3
	 * If CB_SEQUENCE returns an error, then the state of the slot
	 * (sequence ID, cached reply) MUST NOT change.
	 */
524
	slot->seq_nr = args->csa_sequenceid;
525 526
out_unlock:
	spin_unlock(&tbl->slot_tbl_lock);
527

528
out:
529
	cps->clp = clp; /* put in nfs4_callback_compound */
530 531 532 533
	for (i = 0; i < args->csa_nrclists; i++)
		kfree(args->csa_rclists[i].rcl_refcalls);
	kfree(args->csa_rclists);

534 535 536 537
	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
		cps->drc_status = status;
		status = 0;
	} else
538
		res->csr_status = status;
539

540
	trace_nfs4_cb_sequence(args, res, status);
541 542 543
	dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
		ntohl(status), ntohl(res->csr_status));
	return status;
544 545
}

546 547 548 549 550 551
static bool
validate_bitmap_values(unsigned long mask)
{
	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
}

552 553
__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
			       struct cb_process_state *cps)
554
{
555
	__be32 status;
556 557
	fmode_t flags = 0;

558
	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
559
	if (!cps->clp) /* set in cb_sequence */
560 561
		goto out;

562
	dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
563
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
564

565 566 567 568 569
	status = cpu_to_be32(NFS4ERR_INVAL);
	if (!validate_bitmap_values(args->craa_type_mask))
		goto out;

	status = cpu_to_be32(NFS4_OK);
570 571 572 573 574 575
	if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
		     &args->craa_type_mask))
		flags = FMODE_READ;
	if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
		     &args->craa_type_mask))
		flags |= FMODE_WRITE;
576 577 578
	if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
		     &args->craa_type_mask))
		pnfs_recall_all_layouts(cps->clp);
579
	if (flags)
580
		nfs_expire_unused_delegation_types(cps->clp, flags);
581 582 583 584
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;
}
A
Andy Adamson 已提交
585 586

/* Reduce the fore channel's max_slots to the target value */
587 588
__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
				struct cb_process_state *cps)
A
Andy Adamson 已提交
589 590
{
	struct nfs4_slot_table *fc_tbl;
591
	__be32 status;
A
Andy Adamson 已提交
592 593

	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
594
	if (!cps->clp) /* set in cb_sequence */
A
Andy Adamson 已提交
595 596
		goto out;

597
	dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
598
		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
599
		args->crsa_target_highest_slotid);
A
Andy Adamson 已提交
600

601
	fc_tbl = &cps->clp->cl_session->fc_slot_table;
A
Andy Adamson 已提交
602

603
	status = htonl(NFS4_OK);
A
Andy Adamson 已提交
604

605
	nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
606
	nfs41_notify_server(cps->clp);
A
Andy Adamson 已提交
607 608 609 610
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;
}
611
#endif /* CONFIG_NFS_V4_1 */